2 * Copyright (c) 2005 SilverStorm Technologies. All rights reserved.
\r
3 * Copyright (c) 1996-2003 Intel Corporation. All rights reserved.
\r
5 * This software is available to you under the OpenIB.org BSD license
\r
8 * Redistribution and use in source and binary forms, with or
\r
9 * without modification, are permitted provided that the following
\r
10 * conditions are met:
\r
12 * - Redistributions of source code must retain the above
\r
13 * copyright notice, this list of conditions and the following
\r
16 * - Redistributions in binary form must reproduce the above
\r
17 * copyright notice, this list of conditions and the following
\r
18 * disclaimer in the documentation and/or other materials
\r
19 * provided with the distribution.
\r
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
\r
22 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
\r
23 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
\r
24 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
\r
25 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
\r
26 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
\r
27 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
\r
33 #include <complib/cl_async_proc.h>
\r
34 #include <complib/cl_memory.h>
\r
35 #include <complib/cl_timer.h>
\r
40 #include "al_cm_cep.h"
\r
42 #include "al_debug.h"
\r
43 #if defined(EVENT_TRACING)
\r
47 #include "al_qp.tmh"
\r
50 #include "al_mad_pool.h"
\r
51 #include "al_mcast.h"
\r
57 #include "al_query.h"
\r
60 #endif /* CL_KERNEL */
\r
61 #include "al_verbs.h"
\r
63 #include "ib_common.h"
\r
66 #define UNBOUND_PORT_GUID 0
\r
69 extern ib_pool_handle_t gh_mad_pool;
\r
73 * Function prototypes.
\r
77 IN al_obj_t *p_obj );
\r
81 IN al_obj_t *p_obj );
\r
85 IN al_obj_t *p_obj );
\r
91 IN ib_qp_t* const p_qp,
\r
92 IN const void* const qp_context,
\r
93 IN const ib_pfn_event_cb_t pfn_qp_event_cb,
\r
94 IN OUT ci_umv_buf_t* const p_umv_buf );
\r
98 IN const ib_qp_handle_t h_qp,
\r
99 IN const ib_pd_handle_t h_pd,
\r
100 IN const ib_net64_t port_guid OPTIONAL,
\r
101 IN const ib_qp_create_t* const p_qp_create,
\r
102 IN OUT ci_umv_buf_t* const p_umv_buf );
\r
106 IN al_conn_qp_t* const p_conn_qp,
\r
107 IN const ib_pd_handle_t h_pd,
\r
108 IN const ib_qp_create_t* const p_qp_create,
\r
109 IN OUT ci_umv_buf_t* const p_umv_buf );
\r
113 IN al_dgrm_qp_t* const p_dgrm_qp,
\r
114 IN const ib_pd_handle_t h_pd,
\r
115 IN const ib_qp_create_t* const p_qp_create,
\r
116 IN OUT ci_umv_buf_t* const p_umv_buf );
\r
120 IN al_special_qp_t* const p_special_qp,
\r
121 IN const ib_pd_handle_t h_pd,
\r
122 IN const ib_net64_t port_guid,
\r
123 IN const ib_qp_create_t* const p_qp_create );
\r
127 IN al_qp_alias_t* const p_qp_alias,
\r
128 IN const ib_pd_handle_t h_pd,
\r
129 IN const ib_net64_t port_guid,
\r
130 IN const ib_qp_create_t* const p_qp_create );
\r
134 IN al_mad_qp_t* const p_mad_qp,
\r
135 IN const ib_pd_handle_t h_pd,
\r
136 IN const ib_qp_create_t* const p_qp_create,
\r
137 IN const ib_pfn_event_cb_t pfn_qp_event_cb );
\r
141 IN const ib_qp_handle_t h_qp,
\r
142 IN const ib_dgrm_info_t* const p_dgrm_info );
\r
147 IN const ib_qp_handle_t h_qp,
\r
148 IN const ib_qp_mod_t* const p_qp_mod,
\r
149 IN OUT ci_umv_buf_t* const p_umv_buf );
\r
154 IN const ib_qp_handle_t h_qp,
\r
155 IN const ib_dgrm_info_t* const p_dgrm_info );
\r
159 IN al_mad_qp_t* const p_mad_qp );
\r
163 IN const ib_qp_handle_t h_qp,
\r
164 IN ib_send_wr_t* const p_send_wr,
\r
165 OUT ib_send_wr_t **pp_send_failure );
\r
168 special_qp_post_send(
\r
169 IN const ib_qp_handle_t h_qp,
\r
170 IN ib_send_wr_t* const p_send_wr,
\r
171 OUT ib_send_wr_t **pp_send_failure );
\r
175 IN const ib_qp_handle_t h_qp,
\r
176 IN al_mad_wr_t* const p_mad_wr );
\r
179 mad_qp_resume_sends(
\r
180 IN ib_qp_handle_t h_qp );
\r
184 IN al_mad_qp_t* p_mad_qp,
\r
185 IN al_mad_wr_t* const p_mad_wr );
\r
189 IN const ib_cq_handle_t h_cq,
\r
190 IN void *cq_context );
\r
194 IN const ib_cq_handle_t h_cq,
\r
195 IN void *cq_context );
\r
199 IN al_mad_qp_t* p_mad_qp,
\r
200 IN const ib_cq_handle_t h_cq,
\r
201 IN ib_wc_type_t wc_type );
\r
204 mad_qp_cq_event_cb(
\r
205 IN ib_async_event_rec_t *p_event_rec );
\r
210 * Allocates a structure to store QP information.
\r
214 IN const ib_qp_type_t qp_type,
\r
215 OUT ib_qp_handle_t* const ph_qp )
\r
217 ib_qp_handle_t h_qp;
\r
221 case IB_QPT_RELIABLE_CONN:
\r
222 case IB_QPT_UNRELIABLE_CONN:
\r
223 h_qp = (ib_qp_handle_t)cl_zalloc( sizeof( al_conn_qp_t ) );
\r
226 case IB_QPT_UNRELIABLE_DGRM:
\r
227 h_qp = (ib_qp_handle_t)cl_zalloc( sizeof( al_dgrm_qp_t ) );
\r
232 h_qp = (ib_qp_handle_t)cl_zalloc( sizeof( al_special_qp_t ) );
\r
235 case IB_QPT_RAW_IPV6:
\r
236 case IB_QPT_RAW_ETHER:
\r
237 h_qp = (ib_qp_handle_t)cl_zalloc( sizeof( ib_qp_t ) );
\r
241 h_qp = (ib_qp_handle_t)cl_zalloc( sizeof( al_mad_qp_t ) );
\r
244 case IB_QPT_QP0_ALIAS:
\r
245 case IB_QPT_QP1_ALIAS:
\r
246 h_qp = (ib_qp_handle_t)cl_zalloc( sizeof( al_qp_alias_t ) );
\r
250 CL_ASSERT( qp_type == IB_QPT_RELIABLE_CONN ||
\r
251 qp_type == IB_QPT_UNRELIABLE_CONN ||
\r
252 qp_type == IB_QPT_UNRELIABLE_DGRM ||
\r
253 qp_type == IB_QPT_QP0 ||
\r
254 qp_type == IB_QPT_QP1 ||
\r
255 qp_type == IB_QPT_RAW_IPV6 ||
\r
256 qp_type == IB_QPT_RAW_ETHER ||
\r
257 qp_type == IB_QPT_MAD ||
\r
258 qp_type == IB_QPT_QP0_ALIAS ||
\r
259 qp_type == IB_QPT_QP1_ALIAS );
\r
260 return IB_INVALID_SETTING;
\r
265 return IB_INSUFFICIENT_MEMORY;
\r
268 h_qp->type = qp_type;
\r
277 * Initializes the QP information structure.
\r
281 IN const ib_pd_handle_t h_pd,
\r
282 IN const ib_qp_create_t* const p_qp_create,
\r
283 IN const void* const qp_context,
\r
284 IN const ib_pfn_event_cb_t pfn_qp_event_cb,
\r
285 OUT ib_qp_handle_t* const ph_qp,
\r
286 IN OUT ci_umv_buf_t* const p_umv_buf )
\r
288 ib_api_status_t status;
\r
289 ib_qp_handle_t h_qp;
\r
291 if( !p_qp_create || !ph_qp )
\r
293 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") );
\r
294 return IB_INVALID_PARAMETER;
\r
297 /* Allocate a QP. */
\r
298 status = alloc_qp( p_qp_create->qp_type, &h_qp );
\r
299 if( status != IB_SUCCESS )
\r
304 /* Init the base QP first. */
\r
305 status = init_base_qp( h_qp, qp_context, pfn_qp_event_cb, p_umv_buf );
\r
306 if( status != IB_SUCCESS )
\r
309 /* Initialize the QP based on its type. */
\r
310 switch( h_qp->type )
\r
312 case IB_QPT_RELIABLE_CONN:
\r
313 case IB_QPT_UNRELIABLE_CONN:
\r
314 if( AL_OBJ_INVALID_HANDLE( p_qp_create->h_sq_cq, AL_OBJ_TYPE_H_CQ ) ||
\r
315 AL_OBJ_INVALID_HANDLE( p_qp_create->h_rq_cq, AL_OBJ_TYPE_H_CQ ) )
\r
317 status = IB_INVALID_CQ_HANDLE;
\r
320 status = init_conn_qp( (al_conn_qp_t*)h_qp, h_pd, p_qp_create, p_umv_buf );
\r
323 case IB_QPT_UNRELIABLE_DGRM:
\r
324 if( AL_OBJ_INVALID_HANDLE( p_qp_create->h_sq_cq, AL_OBJ_TYPE_H_CQ ) ||
\r
325 AL_OBJ_INVALID_HANDLE( p_qp_create->h_rq_cq, AL_OBJ_TYPE_H_CQ ) )
\r
327 status = IB_INVALID_CQ_HANDLE;
\r
330 status = init_dgrm_qp( (al_dgrm_qp_t*)h_qp, h_pd, p_qp_create, p_umv_buf );
\r
334 if( p_qp_create->h_sq_cq || p_qp_create->h_rq_cq )
\r
336 status = IB_INVALID_CQ_HANDLE;
\r
339 status = init_mad_qp( (al_mad_qp_t*)h_qp, h_pd, p_qp_create,
\r
344 CL_ASSERT( h_qp->type == IB_QPT_RELIABLE_CONN ||
\r
345 h_qp->type == IB_QPT_UNRELIABLE_CONN ||
\r
346 h_qp->type == IB_QPT_UNRELIABLE_DGRM ||
\r
347 h_qp->type == IB_QPT_MAD );
\r
348 status = IB_INVALID_SETTING;
\r
352 if( status != IB_SUCCESS )
\r
354 h_qp->obj.pfn_destroy( &h_qp->obj, NULL );
\r
361 * Note that we don't release the reference taken in init_al_obj here.
\r
362 * For kernel clients, it is release in ib_create_qp. For user-mode
\r
363 * clients is is released by the proxy after the handle is extracted.
\r
372 IN const ib_pd_handle_t h_pd,
\r
373 IN const ib_net64_t port_guid,
\r
374 IN const ib_qp_create_t* const p_qp_create,
\r
375 IN const void* const qp_context,
\r
376 IN const ib_pfn_event_cb_t pfn_qp_event_cb,
\r
377 OUT ib_pool_key_t* const p_pool_key OPTIONAL,
\r
378 OUT ib_qp_handle_t* const ph_qp,
\r
379 IN OUT ci_umv_buf_t* const p_umv_buf )
\r
381 ib_api_status_t status;
\r
382 ib_qp_handle_t h_qp;
\r
384 if( !p_qp_create || !ph_qp )
\r
386 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") );
\r
387 return IB_INVALID_PARAMETER;
\r
390 /* Only allow creation of the special QP types. */
\r
391 switch( p_qp_create->qp_type )
\r
397 case IB_QPT_QP0_ALIAS:
\r
398 case IB_QPT_QP1_ALIAS:
\r
399 case IB_QPT_RAW_IPV6:
\r
400 case IB_QPT_RAW_ETHER:
\r
401 break; /* The QP type is valid. */
\r
404 return IB_INVALID_SETTING;
\r
407 /* Allocate a QP. */
\r
408 status = alloc_qp( p_qp_create->qp_type, &h_qp );
\r
409 if( status != IB_SUCCESS )
\r
414 /* Init the base QP first. */
\r
415 status = init_base_qp( h_qp, qp_context, pfn_qp_event_cb, p_umv_buf );
\r
416 if( status != IB_SUCCESS )
\r
419 /* Initialize the QP based on its type. */
\r
420 switch( h_qp->type )
\r
425 if( AL_OBJ_INVALID_HANDLE( p_qp_create->h_sq_cq, AL_OBJ_TYPE_H_CQ ) ||
\r
426 AL_OBJ_INVALID_HANDLE( p_qp_create->h_rq_cq, AL_OBJ_TYPE_H_CQ ) )
\r
428 status = IB_INVALID_CQ_HANDLE;
\r
431 status = init_special_qp( (al_special_qp_t*)h_qp, h_pd, port_guid,
\r
434 #endif /* CL_KERNEL */
\r
436 case IB_QPT_QP0_ALIAS:
\r
437 case IB_QPT_QP1_ALIAS:
\r
438 if( p_qp_create->h_sq_cq || p_qp_create->h_rq_cq )
\r
440 status = IB_INVALID_CQ_HANDLE;
\r
443 status = init_alias_qp( (al_qp_alias_t*)h_qp, h_pd, port_guid,
\r
445 if( status == IB_SUCCESS && p_pool_key )
\r
447 /* Create a pool_key to access to the global MAD pool. */
\r
448 status = ib_reg_mad_pool( gh_mad_pool, h_pd,
\r
449 &((al_qp_alias_t*)h_qp)->pool_key );
\r
450 if( status == IB_SUCCESS )
\r
451 *p_pool_key = ((al_qp_alias_t*)h_qp)->pool_key;
\r
455 case IB_QPT_RAW_IPV6:
\r
456 case IB_QPT_RAW_ETHER:
\r
457 if( AL_OBJ_INVALID_HANDLE( p_qp_create->h_sq_cq, AL_OBJ_TYPE_H_CQ ) ||
\r
458 AL_OBJ_INVALID_HANDLE( p_qp_create->h_rq_cq, AL_OBJ_TYPE_H_CQ ) )
\r
460 status = IB_INVALID_CQ_HANDLE;
\r
463 status = init_raw_qp( h_qp, h_pd, port_guid, p_qp_create, p_umv_buf );
\r
467 CL_ASSERT( h_qp->type == IB_QPT_QP0 ||
\r
468 h_qp->type == IB_QPT_QP1 ||
\r
469 h_qp->type == IB_QPT_QP0_ALIAS ||
\r
470 h_qp->type == IB_QPT_QP1_ALIAS ||
\r
471 h_qp->type == IB_QPT_RAW_IPV6 ||
\r
472 h_qp->type == IB_QPT_RAW_ETHER );
\r
474 status = IB_INVALID_SETTING;
\r
478 if( status != IB_SUCCESS )
\r
480 h_qp->obj.pfn_destroy( &h_qp->obj, NULL );
\r
490 static ib_api_status_t
\r
492 IN const ib_qp_handle_t h_qp,
\r
493 IN const ib_qp_mod_t* const p_qp_mod,
\r
494 IN OUT ci_umv_buf_t* const p_umv_buf )
\r
496 UNUSED_PARAM( h_qp );
\r
497 UNUSED_PARAM( p_qp_mod );
\r
498 UNUSED_PARAM( p_umv_buf );
\r
499 return IB_INVALID_PARAMETER;
\r
503 static ib_api_status_t
\r
505 IN const ib_qp_handle_t h_qp,
\r
506 IN ib_send_wr_t* const p_send_wr,
\r
507 IN ib_send_wr_t **pp_send_failure OPTIONAL )
\r
509 UNUSED_PARAM( h_qp );
\r
510 UNUSED_PARAM( p_send_wr );
\r
511 UNUSED_PARAM( pp_send_failure );
\r
512 return IB_INVALID_PARAMETER;
\r
516 static ib_api_status_t
\r
518 IN const ib_qp_handle_t h_qp,
\r
519 IN ib_recv_wr_t* const p_recv_wr,
\r
520 IN ib_recv_wr_t **p_recv_failure OPTIONAL )
\r
522 UNUSED_PARAM( h_qp );
\r
523 UNUSED_PARAM( p_recv_wr );
\r
524 UNUSED_PARAM( p_recv_failure );
\r
525 return IB_INVALID_PARAMETER;
\r
529 static ib_api_status_t
\r
530 al_bad_init_dgrm_svc(
\r
531 IN const ib_qp_handle_t h_qp,
\r
532 IN const ib_dgrm_info_t* const p_dgrm_info )
\r
534 UNUSED_PARAM( h_qp );
\r
535 UNUSED_PARAM( p_dgrm_info );
\r
536 return IB_INVALID_PARAMETER;
\r
540 static ib_api_status_t
\r
541 al_bad_reg_mad_svc(
\r
542 IN const ib_qp_handle_t h_qp,
\r
543 IN const ib_mad_svc_t* const p_mad_svc,
\r
544 OUT ib_mad_svc_handle_t* const ph_mad_svc )
\r
546 UNUSED_PARAM( h_qp );
\r
547 UNUSED_PARAM( p_mad_svc );
\r
548 UNUSED_PARAM( ph_mad_svc );
\r
549 return IB_INVALID_PARAMETER;
\r
553 static ib_api_status_t
\r
554 al_bad_dereg_mad_svc(
\r
555 IN const ib_mad_svc_handle_t h_mad_svc )
\r
557 UNUSED_PARAM( h_mad_svc );
\r
558 return IB_INVALID_PARAMETER;
\r
564 IN const ib_qp_handle_t h_qp,
\r
565 IN al_mad_wr_t* const p_mad_wr )
\r
567 UNUSED_PARAM( h_qp );
\r
568 UNUSED_PARAM( p_mad_wr );
\r
574 IN const ib_qp_handle_t h_qp )
\r
576 UNUSED_PARAM( h_qp );
\r
581 static ib_api_status_t
\r
583 IN const ib_qp_handle_t h_qp,
\r
584 IN const ib_mcast_req_t* const p_mcast_req )
\r
586 UNUSED_PARAM( h_qp );
\r
587 UNUSED_PARAM( p_mcast_req );
\r
588 return IB_INVALID_PARAMETER;
\r
594 IN ib_qp_t* const p_qp,
\r
595 IN const void* const qp_context,
\r
596 IN const ib_pfn_event_cb_t pfn_qp_event_cb,
\r
597 IN OUT ci_umv_buf_t* const p_umv_buf )
\r
599 ib_api_status_t status;
\r
600 al_obj_type_t obj_type = AL_OBJ_TYPE_H_QP;
\r
605 obj_type |= AL_OBJ_SUBTYPE_UM_EXPORT;
\r
607 construct_al_obj( &p_qp->obj, obj_type );
\r
608 status = init_al_obj( &p_qp->obj, qp_context, TRUE,
\r
609 destroying_qp, cleanup_qp, free_qp );
\r
610 if( status != IB_SUCCESS )
\r
612 free_qp( &p_qp->obj );
\r
616 p_qp->pfn_event_cb = pfn_qp_event_cb;
\r
619 * All function pointers should be invalid. They will be set by
\r
620 * derived QP types where appropriate.
\r
622 p_qp->pfn_modify_qp = al_bad_modify_qp;
\r
623 p_qp->pfn_post_recv = al_bad_post_recv;
\r
624 p_qp->pfn_post_send = al_bad_post_send;
\r
625 p_qp->pfn_reg_mad_svc = al_bad_reg_mad_svc;
\r
626 p_qp->pfn_dereg_mad_svc = al_bad_dereg_mad_svc;
\r
627 p_qp->pfn_queue_mad = al_bad_queue_mad;
\r
628 p_qp->pfn_resume_mad = al_bad_resume_mad;
\r
629 p_qp->pfn_init_dgrm_svc = al_bad_init_dgrm_svc;
\r
630 p_qp->pfn_join_mcast = al_bad_join_mcast;
\r
632 if( p_qp->type == IB_QPT_RELIABLE_CONN ||
\r
633 p_qp->type == IB_QPT_UNRELIABLE_CONN )
\r
635 ((al_conn_qp_t*)p_qp)->cid = AL_INVALID_CID;
\r
645 IN const ib_qp_handle_t h_qp,
\r
646 IN const ib_pd_handle_t h_pd,
\r
647 IN const ib_net64_t port_guid OPTIONAL,
\r
648 IN const ib_qp_create_t* const p_qp_create,
\r
649 IN OUT ci_umv_buf_t* const p_umv_buf )
\r
651 ib_api_status_t status;
\r
652 ib_qp_create_t qp_create;
\r
653 ib_qp_attr_t qp_attr;
\r
656 status = attach_al_obj( &h_pd->obj, &h_qp->obj );
\r
657 if( status != IB_SUCCESS )
\r
660 /* Convert AL handles to CI handles. */
\r
661 qp_create = *p_qp_create;
\r
662 convert_qp_handle( qp_create );
\r
664 /* Clear the QP attributes to ensure non-set values are 0. */
\r
665 cl_memclr( &qp_attr, sizeof( ib_qp_attr_t ) );
\r
667 h_qp->port_guid = port_guid;
\r
670 * Allocate a QP from the channel adapter. Note that these calls
\r
671 * set the send and receive pointers appropriately for posting
\r
674 if( port_guid == UNBOUND_PORT_GUID )
\r
677 verbs_create_qp( h_pd, h_qp, &qp_create, &qp_attr, p_umv_buf );
\r
681 status = get_port_num( h_pd->obj.p_ci_ca, port_guid, &port_num );
\r
682 if( status == IB_SUCCESS )
\r
684 status = verbs_get_spl_qp( h_pd, port_num, h_qp,
\r
685 &qp_create, &qp_attr );
\r
688 if( status != IB_SUCCESS )
\r
693 /* Override function pointers. */
\r
694 h_qp->pfn_modify_qp = al_modify_qp;
\r
696 if( h_qp->type == IB_QPT_UNRELIABLE_DGRM ||
\r
697 h_qp->type == IB_QPT_QP0 ||
\r
698 h_qp->type == IB_QPT_QP1 )
\r
700 /* We have to mess with the AV handles. */
\r
701 h_qp->pfn_ud_post_send = h_qp->pfn_post_send;
\r
702 h_qp->h_ud_send_qp = h_qp->h_send_qp;
\r
704 h_qp->pfn_post_send = ud_post_send;
\r
705 h_qp->h_send_qp = h_qp;
\r
708 h_qp->h_recv_cq = p_qp_create->h_rq_cq;
\r
709 h_qp->h_send_cq = p_qp_create->h_sq_cq;
\r
711 h_qp->recv_cq_rel.p_child_obj = (cl_obj_t*)h_qp;
\r
712 h_qp->send_cq_rel.p_child_obj = (cl_obj_t*)h_qp;
\r
714 cq_attach_qp( h_qp->h_recv_cq, &h_qp->recv_cq_rel );
\r
715 cq_attach_qp( h_qp->h_send_cq, &h_qp->send_cq_rel );
\r
717 h_qp->num = qp_attr.num;
\r
726 IN al_conn_qp_t* const p_conn_qp,
\r
727 IN const ib_pd_handle_t h_pd,
\r
728 IN const ib_qp_create_t* const p_qp_create,
\r
729 IN OUT ci_umv_buf_t* const p_umv_buf )
\r
731 ib_api_status_t status;
\r
732 CL_ASSERT( p_conn_qp );
\r
734 /* Initialize the inherited QP first. */
\r
735 status = init_raw_qp( &p_conn_qp->qp, h_pd, UNBOUND_PORT_GUID,
\r
736 p_qp_create, p_umv_buf );
\r
746 IN al_dgrm_qp_t* const p_dgrm_qp,
\r
747 IN const ib_pd_handle_t h_pd,
\r
748 IN const ib_qp_create_t* const p_qp_create,
\r
749 IN OUT ci_umv_buf_t* const p_umv_buf )
\r
751 ib_api_status_t status;
\r
752 CL_ASSERT( p_dgrm_qp );
\r
754 /* Initialize the inherited QP first. */
\r
755 status = init_raw_qp( p_dgrm_qp, h_pd, UNBOUND_PORT_GUID,
\r
756 p_qp_create, p_umv_buf );
\r
757 if( status != IB_SUCCESS )
\r
762 /* Override function pointers. */
\r
763 p_dgrm_qp->pfn_init_dgrm_svc = init_dgrm_svc;
\r
764 p_dgrm_qp->pfn_join_mcast = al_join_mcast;
\r
773 IN al_special_qp_t* const p_special_qp,
\r
774 IN const ib_pd_handle_t h_pd,
\r
775 IN const ib_net64_t port_guid,
\r
776 IN const ib_qp_create_t* const p_qp_create )
\r
778 ib_api_status_t status;
\r
779 CL_ASSERT( p_special_qp );
\r
781 /* Construct the special QP. */
\r
782 cl_qlist_init( &p_special_qp->to_send_queue );
\r
784 /* Initialize the inherited QP first. */
\r
786 init_raw_qp( &p_special_qp->qp, h_pd, port_guid, p_qp_create, NULL );
\r
787 if( status != IB_SUCCESS )
\r
792 /* Override function pointers. */
\r
793 p_special_qp->qp.pfn_init_dgrm_svc = init_dgrm_svc;
\r
794 p_special_qp->qp.pfn_queue_mad = special_qp_queue_mad;
\r
795 p_special_qp->qp.pfn_resume_mad = special_qp_resume_sends;
\r
803 IN al_qp_alias_t* const p_qp_alias,
\r
804 IN const ib_pd_handle_t h_pd,
\r
805 IN const ib_net64_t port_guid,
\r
806 IN const ib_qp_create_t* const p_qp_create )
\r
808 ib_api_status_t status;
\r
810 CL_ASSERT( p_qp_alias );
\r
811 UNUSED_PARAM( p_qp_create );
\r
813 if( h_pd->type != IB_PDT_ALIAS )
\r
815 return IB_INVALID_PD_HANDLE;
\r
818 status = attach_al_obj( &h_pd->obj, &p_qp_alias->qp.obj );
\r
819 if( status != IB_SUCCESS )
\r
822 switch( p_qp_alias->qp.type )
\r
824 case IB_QPT_QP0_ALIAS:
\r
825 status = acquire_smi_disp( port_guid, &p_qp_alias->h_mad_disp );
\r
828 case IB_QPT_QP1_ALIAS:
\r
829 status = acquire_gsi_disp( port_guid, &p_qp_alias->h_mad_disp );
\r
833 CL_ASSERT( p_qp_alias->qp.type == IB_QPT_QP0_ALIAS ||
\r
834 p_qp_alias->qp.type == IB_QPT_QP1_ALIAS );
\r
838 if( status != IB_SUCCESS )
\r
841 /* Get a copy of the QP used by the MAD dispatcher. */
\r
842 ref_al_obj( &p_qp_alias->h_mad_disp->h_qp->obj );
\r
843 p_qp_alias->qp.h_ci_qp = p_qp_alias->h_mad_disp->h_qp->h_ci_qp;
\r
845 /* Override function pointers. */
\r
846 p_qp_alias->qp.pfn_reg_mad_svc = reg_mad_svc;
\r
850 #endif /* CL_KERNEL */
\r
856 IN al_mad_qp_t* const p_mad_qp,
\r
857 IN const ib_pd_handle_t h_pd,
\r
858 IN const ib_qp_create_t* const p_qp_create,
\r
859 IN const ib_pfn_event_cb_t pfn_qp_event_cb )
\r
861 ib_cq_create_t cq_create;
\r
862 ib_qp_create_t qp_create;
\r
863 ib_al_handle_t h_al;
\r
864 ib_ca_handle_t h_ca;
\r
865 ib_api_status_t status;
\r
867 CL_ASSERT( p_mad_qp );
\r
869 /* Initialize the send and receive tracking queues. */
\r
870 cl_qlist_init( &p_mad_qp->to_send_queue );
\r
871 cl_qlist_init( &p_mad_qp->send_queue );
\r
872 cl_qlist_init( &p_mad_qp->recv_queue );
\r
874 /* The CQ handles must be NULL when creating a MAD queue pair. */
\r
875 if( p_qp_create->h_sq_cq || p_qp_create->h_rq_cq )
\r
877 return IB_INVALID_SETTING;
\r
880 /* Initialize the CQs used with the MAD QP. */
\r
881 cl_memclr( &cq_create, sizeof( ib_cq_create_t ) );
\r
883 /* Create the send CQ. */
\r
884 cq_create.size = p_qp_create->sq_depth;
\r
885 cq_create.pfn_comp_cb = mad_send_comp_cb;
\r
887 status = ib_create_cq( h_pd->obj.p_ci_ca->h_ca, &cq_create,
\r
888 p_mad_qp, mad_qp_cq_event_cb, &p_mad_qp->h_send_cq );
\r
890 if( status != IB_SUCCESS )
\r
895 /* Reference the MAD QP on behalf of ib_create_cq. */
\r
896 ref_al_obj( &p_mad_qp->qp.obj );
\r
898 /* Create the receive CQ. */
\r
899 cq_create.size = p_qp_create->rq_depth;
\r
900 cq_create.pfn_comp_cb = mad_recv_comp_cb;
\r
902 h_ca = PARENT_STRUCT( h_pd->obj.p_parent_obj, ib_ca_t, obj );
\r
903 status = ib_create_cq( h_ca, &cq_create, p_mad_qp, mad_qp_cq_event_cb,
\r
904 &p_mad_qp->h_recv_cq );
\r
906 if( status != IB_SUCCESS )
\r
911 /* Reference the MAD QP on behalf of ib_create_cq. */
\r
912 ref_al_obj( &p_mad_qp->qp.obj );
\r
914 /* Save the requested receive queue depth. This is used to post MADs. */
\r
915 p_mad_qp->max_rq_depth = p_qp_create->rq_depth;
\r
917 /* Allocate a datagram QP for the MAD QP. */
\r
918 qp_create = *p_qp_create;
\r
919 qp_create.qp_type = IB_QPT_UNRELIABLE_DGRM;
\r
920 qp_create.sq_sge = 1;
\r
921 qp_create.rq_sge = 1;
\r
922 qp_create.h_rq_cq = p_mad_qp->h_recv_cq;
\r
923 qp_create.h_sq_cq = p_mad_qp->h_send_cq;
\r
925 status = ib_create_qp( h_pd, &qp_create, p_mad_qp, pfn_qp_event_cb,
\r
926 &p_mad_qp->h_dgrm_qp );
\r
928 if( status != IB_SUCCESS )
\r
933 /* Reference the MAD QP on behalf of ib_create_qp. */
\r
934 ref_al_obj( &p_mad_qp->qp.obj );
\r
936 /* Create the MAD dispatch service. */
\r
937 status = create_mad_disp( &p_mad_qp->qp.obj, &p_mad_qp->qp,
\r
938 &p_mad_qp->h_mad_disp );
\r
939 if( status != IB_SUCCESS )
\r
944 /* Override function pointers. */
\r
945 p_mad_qp->qp.pfn_init_dgrm_svc = init_mad_dgrm_svc;
\r
946 p_mad_qp->qp.pfn_queue_mad = mad_qp_queue_mad;
\r
947 p_mad_qp->qp.pfn_resume_mad = mad_qp_resume_sends;
\r
948 p_mad_qp->qp.pfn_reg_mad_svc = reg_mad_svc;
\r
950 /* The client's AL handle is the grandparent of the PD. */
\r
951 h_al = PARENT_STRUCT( h_pd->obj.p_parent_obj->p_parent_obj, ib_al_t, obj );
\r
953 /* Create a receive MAD pool. */
\r
954 status = ib_create_mad_pool( h_al, p_mad_qp->max_rq_depth + 16, 0, 16,
\r
955 &p_mad_qp->h_pool );
\r
957 if (status != IB_SUCCESS)
\r
963 * The MAD pool is a child of the client's AL instance. If the client
\r
964 * closes AL, the MAD pool will be destroyed before the MAD queue pair.
\r
965 * Therefore, we hold a reference on the MAD pool to keep it from being
\r
966 * destroyed until the MAD queue pair is destroyed. Refer to the MAD
\r
967 * queue pair cleanup code.
\r
969 ref_al_obj( &p_mad_qp->h_pool->obj );
\r
971 /* Register the MAD pool with the PD. */
\r
972 status = ib_reg_mad_pool( p_mad_qp->h_pool, h_pd, &p_mad_qp->pool_key );
\r
974 if (status != IB_SUCCESS)
\r
980 * Attach the MAD queue pair to the protection domain. This must be
\r
981 * done after creating the datagram queue pair and the MAD pool to set
\r
982 * the correct order of object destruction.
\r
984 status = attach_al_obj( &h_pd->obj, &p_mad_qp->qp.obj );
\r
986 /* Get a copy of the CI datagram QP for ib_query_qp. */
\r
987 p_mad_qp->qp.h_ci_qp = p_mad_qp->h_dgrm_qp->h_ci_qp;
\r
996 IN const ib_qp_handle_t h_qp,
\r
997 IN const ib_pfn_destroy_cb_t pfn_destroy_cb OPTIONAL )
\r
999 AL_ENTER( AL_DBG_QP );
\r
1001 if( AL_OBJ_INVALID_HANDLE( h_qp, AL_OBJ_TYPE_H_QP ) )
\r
1003 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_QP_HANDLE\n") );
\r
1004 return IB_INVALID_QP_HANDLE;
\r
1007 ref_al_obj( &h_qp->obj );
\r
1008 h_qp->obj.pfn_destroy( &h_qp->obj, pfn_destroy_cb );
\r
1010 AL_EXIT( AL_DBG_QP );
\r
1011 return IB_SUCCESS;
\r
1017 * Release any resources that must be cleaned up immediately, such as
\r
1018 * any AL resources acquired by calling through the main API.
\r
1022 IN al_obj_t *p_obj )
\r
1024 ib_qp_handle_t h_qp;
\r
1025 al_mad_qp_t *p_mad_qp;
\r
1026 al_qp_alias_t *p_qp_alias;
\r
1029 CL_ASSERT( p_obj );
\r
1030 h_qp = PARENT_STRUCT( p_obj, ib_qp_t, obj );
\r
1032 switch( h_qp->type )
\r
1035 /* Destroy QP and CQ services required for MAD QP support. */
\r
1036 p_mad_qp = PARENT_STRUCT( h_qp, al_mad_qp_t, qp );
\r
1038 if( p_mad_qp->h_dgrm_qp )
\r
1040 ib_destroy_qp( p_mad_qp->h_dgrm_qp,
\r
1041 (ib_pfn_destroy_cb_t)deref_al_obj );
\r
1042 p_mad_qp->qp.h_ci_qp = NULL;
\r
1045 if( p_mad_qp->h_recv_cq )
\r
1047 ib_destroy_cq( p_mad_qp->h_recv_cq,
\r
1048 (ib_pfn_destroy_cb_t)deref_al_obj );
\r
1051 if( p_mad_qp->h_send_cq )
\r
1053 ib_destroy_cq( p_mad_qp->h_send_cq,
\r
1054 (ib_pfn_destroy_cb_t)deref_al_obj );
\r
1058 case IB_QPT_QP0_ALIAS:
\r
1059 case IB_QPT_QP1_ALIAS:
\r
1060 p_qp_alias = PARENT_STRUCT( h_qp, al_qp_alias_t, qp );
\r
1062 if( p_qp_alias->pool_key )
\r
1064 ib_api_status_t status;
\r
1065 /* Deregister the pool_key. */
\r
1066 ref_al_obj( &p_qp_alias->pool_key->obj );
\r
1067 status = dereg_mad_pool( p_qp_alias->pool_key, AL_KEY_ALIAS );
\r
1068 if( status != IB_SUCCESS )
\r
1070 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("dereg_mad_pool returned %s.\n",
\r
1071 ib_get_err_str(status)) );
\r
1072 deref_al_obj( &p_qp_alias->pool_key->obj );
\r
1074 p_qp_alias->pool_key = NULL;
\r
1077 if( p_qp_alias->qp.h_ci_qp )
\r
1079 deref_al_obj( &p_qp_alias->h_mad_disp->h_qp->obj );
\r
1080 p_qp_alias->qp.h_ci_qp = NULL;
\r
1084 * If the pool_key still exists here, then the QP is being destroyed
\r
1085 * by destroying its parent (the PD). Destruction of the PD will also
\r
1086 * destroy the pool_key.
\r
1089 if( p_qp_alias->h_mad_disp )
\r
1090 deref_al_obj( &p_qp_alias->h_mad_disp->obj );
\r
1093 case IB_QPT_RELIABLE_CONN:
\r
1094 case IB_QPT_UNRELIABLE_CONN:
\r
1095 cid = cl_atomic_xchg(
\r
1096 &((al_conn_qp_t*)h_qp)->cid, AL_INVALID_CID );
\r
1097 if( cid != AL_INVALID_CID )
\r
1099 ref_al_obj( &h_qp->obj );
\r
1100 if( al_destroy_cep(
\r
1101 h_qp->obj.h_al, cid, deref_al_obj ) != IB_SUCCESS )
\r
1103 deref_al_obj( &h_qp->obj );
\r
1107 /* Fall through. */
\r
1108 case IB_QPT_UNRELIABLE_DGRM:
\r
1110 /* Multicast membership gets cleaned up by object hierarchy. */
\r
1111 cq_detach_qp( h_qp->h_recv_cq, &h_qp->recv_cq_rel );
\r
1112 cq_detach_qp( h_qp->h_send_cq, &h_qp->send_cq_rel );
\r
1119 * Release any HW resources.
\r
1123 IN al_obj_t *p_obj )
\r
1125 ib_qp_handle_t h_qp;
\r
1126 al_mad_qp_t* p_mad_qp;
\r
1127 al_mad_wr_t* p_mad_wr;
\r
1128 cl_list_item_t* p_list_item;
\r
1129 al_mad_element_t* p_al_mad;
\r
1130 ib_api_status_t status;
\r
1132 CL_ASSERT( p_obj );
\r
1133 h_qp = PARENT_STRUCT( p_obj, ib_qp_t, obj );
\r
1135 if( verbs_check_qp( h_qp ) )
\r
1137 status = verbs_destroy_qp( h_qp );
\r
1138 if( status != IB_SUCCESS )
\r
1140 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
1141 ("verbs_destroy_qp failed with status %s.\n",
\r
1142 ib_get_err_str(status)) );
\r
1144 h_qp->h_ci_qp = NULL;
\r
1147 if( h_qp->type == IB_QPT_MAD )
\r
1149 /* All MAD queue pair operations are complete. */
\r
1150 p_mad_qp = PARENT_STRUCT( h_qp, al_mad_qp_t, qp );
\r
1152 /* Append the pending MAD send queue to the posted MAD send queue. */
\r
1153 cl_qlist_insert_list_tail( &p_mad_qp->send_queue,
\r
1154 &p_mad_qp->to_send_queue );
\r
1156 /* Complete all MAD sends as "flushed". */
\r
1157 for( p_list_item = cl_qlist_remove_head( &p_mad_qp->send_queue );
\r
1158 p_list_item != cl_qlist_end( &p_mad_qp->send_queue );
\r
1159 p_list_item = cl_qlist_remove_head( &p_mad_qp->send_queue ) )
\r
1161 p_mad_wr = PARENT_STRUCT( p_list_item, al_mad_wr_t, list_item );
\r
1162 mad_qp_flush_send( p_mad_qp, p_mad_wr );
\r
1165 /* Return any posted receive MAD elements to the pool. */
\r
1166 for( p_list_item = cl_qlist_remove_head( &p_mad_qp->recv_queue );
\r
1167 p_list_item != cl_qlist_end( &p_mad_qp->recv_queue );
\r
1168 p_list_item = cl_qlist_remove_head( &p_mad_qp->recv_queue ) )
\r
1170 p_al_mad = PARENT_STRUCT( p_list_item, al_mad_element_t,
\r
1173 status = ib_put_mad( &p_al_mad->element );
\r
1174 CL_ASSERT( status == IB_SUCCESS );
\r
1177 if( p_mad_qp->h_pool )
\r
1180 * Destroy the receive MAD pool. If the client has closed the
\r
1181 * AL instance, the MAD pool should already be destroying. In
\r
1182 * this case, we simply release our reference on the pool to
\r
1183 * allow it to cleanup and deallocate. Otherwise, we initiate
\r
1184 * the destruction of the MAD pool and release our reference.
\r
1186 cl_spinlock_acquire( &p_mad_qp->h_pool->obj.lock );
\r
1187 if( p_mad_qp->h_pool->obj.state == CL_DESTROYING )
\r
1189 cl_spinlock_release( &p_mad_qp->h_pool->obj.lock );
\r
1193 cl_spinlock_release( &p_mad_qp->h_pool->obj.lock );
\r
1194 ib_destroy_mad_pool( p_mad_qp->h_pool );
\r
1196 deref_al_obj( &p_mad_qp->h_pool->obj );
\r
1201 if( h_qp->h_recv_cq )
\r
1202 deref_al_obj( &h_qp->h_recv_cq->obj );
\r
1203 if( h_qp->h_send_cq )
\r
1204 deref_al_obj( &h_qp->h_send_cq->obj );
\r
1212 IN al_obj_t *p_obj )
\r
1214 ib_qp_handle_t h_qp;
\r
1216 CL_ASSERT( p_obj );
\r
1217 h_qp = PARENT_STRUCT( p_obj, ib_qp_t, obj );
\r
1219 destroy_al_obj( p_obj );
\r
1227 IN const ib_qp_handle_t h_qp,
\r
1228 OUT ib_qp_attr_t* const p_qp_attr )
\r
1230 return query_qp( h_qp, p_qp_attr, NULL );
\r
1236 IN const ib_qp_handle_t h_qp,
\r
1237 OUT ib_qp_attr_t* const p_qp_attr,
\r
1238 IN OUT ci_umv_buf_t* const p_umv_buf )
\r
1240 ib_api_status_t status;
\r
1242 AL_ENTER( AL_DBG_QP );
\r
1244 if( AL_OBJ_INVALID_HANDLE( h_qp, AL_OBJ_TYPE_H_QP ) )
\r
1246 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_QP_HANDLE\n") );
\r
1247 return IB_INVALID_QP_HANDLE;
\r
1251 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") );
\r
1252 return IB_INVALID_PARAMETER;
\r
1255 status = verbs_query_qp( h_qp, p_qp_attr );
\r
1256 if( status != IB_SUCCESS )
\r
1258 AL_EXIT( AL_DBG_QP );
\r
1262 /* Convert to using AL's handles. */
\r
1263 p_qp_attr->h_pd = PARENT_STRUCT( h_qp->obj.p_parent_obj, ib_pd_t, obj );
\r
1264 p_qp_attr->h_rq_cq = h_qp->h_recv_cq;
\r
1265 p_qp_attr->h_sq_cq = h_qp->h_send_cq;
\r
1266 p_qp_attr->qp_type = h_qp->type;
\r
1268 AL_EXIT( AL_DBG_QP );
\r
1269 return IB_SUCCESS;
\r
1276 IN const ib_qp_handle_t h_qp,
\r
1277 IN const ib_qp_mod_t* const p_qp_mod )
\r
1279 return modify_qp( h_qp, p_qp_mod, NULL );
\r
1286 IN const ib_qp_handle_t h_qp,
\r
1287 IN const ib_qp_mod_t* const p_qp_mod,
\r
1288 IN OUT ci_umv_buf_t* const p_umv_buf )
\r
1290 ib_api_status_t status;
\r
1292 AL_ENTER( AL_DBG_QP );
\r
1294 if( AL_OBJ_INVALID_HANDLE( h_qp, AL_OBJ_TYPE_H_QP ) )
\r
1296 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_QP_HANDLE\n") );
\r
1297 return IB_INVALID_QP_HANDLE;
\r
1301 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") );
\r
1302 return IB_INVALID_PARAMETER;
\r
1305 status = h_qp->pfn_modify_qp( h_qp, p_qp_mod, p_umv_buf );
\r
1307 AL_EXIT( AL_DBG_QP );
\r
1315 IN const ib_qp_handle_t h_qp,
\r
1316 IN const ib_qp_mod_t* const p_qp_mod,
\r
1317 IN OUT ci_umv_buf_t* const p_umv_buf )
\r
1319 ib_api_status_t status;
\r
1320 ib_qp_attr_t qp_attr;
\r
1322 CL_ASSERT( h_qp );
\r
1325 /* Only allow ERROR and RESET state changes during timewait. */
\r
1326 if( (h_qp->type == IB_QPT_RELIABLE_CONN ||
\r
1327 h_qp->type == IB_QPT_UNRELIABLE_CONN) &&
\r
1328 p_qp_mod->req_state != IB_QPS_ERROR &&
\r
1329 p_qp_mod->req_state != IB_QPS_RESET &&
\r
1330 p_qp_mod->req_state != IB_QPS_INIT &&
\r
1331 cl_get_time_stamp() < h_qp->timewait )
\r
1333 return IB_QP_IN_TIMEWAIT;
\r
1335 #endif /* CL_KERNEL */
\r
1337 /* Modify the actual QP attributes. */
\r
1338 status = verbs_modify_qp( h_qp, p_qp_mod, qp_attr );
\r
1340 /* Record the QP state if the modify was successful. */
\r
1341 if( status == IB_SUCCESS )
\r
1342 h_qp->state = p_qp_mod->req_state;
\r
1351 IN const ib_qp_handle_t h_qp,
\r
1352 IN const ib_dgrm_info_t* const p_dgrm_info OPTIONAL )
\r
1354 ib_api_status_t status;
\r
1356 AL_ENTER( AL_DBG_QP );
\r
1358 if( AL_OBJ_INVALID_HANDLE( h_qp, AL_OBJ_TYPE_H_QP ) )
\r
1360 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_QP_HANDLE\n") );
\r
1361 return IB_INVALID_QP_HANDLE;
\r
1364 switch( h_qp->type )
\r
1368 case IB_QPT_RAW_IPV6:
\r
1369 case IB_QPT_RAW_ETHER:
\r
1372 case IB_QPT_UNRELIABLE_DGRM:
\r
1374 if( !p_dgrm_info )
\r
1376 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
1377 ("IB_INVALID_PARAMETER\n") );
\r
1378 return IB_INVALID_PARAMETER;
\r
1383 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") );
\r
1384 return IB_INVALID_PARAMETER;
\r
1387 status = h_qp->pfn_init_dgrm_svc( h_qp, p_dgrm_info );
\r
1389 AL_EXIT( AL_DBG_QP );
\r
1396 * Initialize a datagram QP to send and receive datagrams.
\r
1400 IN const ib_qp_handle_t h_qp,
\r
1401 IN const ib_dgrm_info_t* const p_dgrm_info OPTIONAL )
\r
1403 al_dgrm_qp_t *p_dgrm_qp;
\r
1404 ib_qp_mod_t qp_mod;
\r
1405 ib_api_status_t status;
\r
1407 CL_ASSERT( h_qp );
\r
1409 p_dgrm_qp = (al_dgrm_qp_t*)h_qp;
\r
1411 /* Change to the RESET state. */
\r
1412 cl_memclr( &qp_mod, sizeof( ib_qp_mod_t ) );
\r
1413 qp_mod.req_state = IB_QPS_RESET;
\r
1415 status = ib_modify_qp( h_qp, &qp_mod );
\r
1416 if( status != IB_SUCCESS )
\r
1421 /* Change to the INIT state. */
\r
1422 cl_memclr( &qp_mod, sizeof( ib_qp_mod_t ) );
\r
1423 qp_mod.req_state = IB_QPS_INIT;
\r
1426 qp_mod.state.init.qkey = p_dgrm_info->qkey;
\r
1427 qp_mod.state.init.pkey_index = p_dgrm_info->pkey_index;
\r
1428 status = get_port_num( h_qp->obj.p_ci_ca, p_dgrm_info->port_guid,
\r
1429 &qp_mod.state.init.primary_port );
\r
1433 if( h_qp->type == IB_QPT_QP0 )
\r
1434 qp_mod.state.init.qkey = 0;
\r
1436 qp_mod.state.init.qkey = IB_QP1_WELL_KNOWN_Q_KEY;
\r
1437 status = get_port_num( h_qp->obj.p_ci_ca, h_qp->port_guid,
\r
1438 &qp_mod.state.init.primary_port );
\r
1440 if( status != IB_SUCCESS )
\r
1445 status = ib_modify_qp( h_qp, &qp_mod );
\r
1446 if( status != IB_SUCCESS )
\r
1451 /* Change to the RTR state. */
\r
1452 cl_memclr( &qp_mod, sizeof( ib_qp_mod_t ) );
\r
1453 qp_mod.req_state = IB_QPS_RTR;
\r
1455 status = ib_modify_qp( h_qp, &qp_mod );
\r
1456 if( status != IB_SUCCESS )
\r
1461 /* Change to the RTS state. */
\r
1462 cl_memclr( &qp_mod, sizeof( ib_qp_mod_t ) );
\r
1463 qp_mod.req_state = IB_QPS_RTS;
\r
1464 qp_mod.state.rts.sq_psn = CL_HTON32(cl_get_time_stamp_sec() & 0x00ffffff);
\r
1465 status = ib_modify_qp( h_qp, &qp_mod );
\r
1473 init_mad_dgrm_svc(
\r
1474 IN const ib_qp_handle_t h_qp,
\r
1475 IN const ib_dgrm_info_t* const p_dgrm_info )
\r
1477 al_mad_qp_t *p_mad_qp;
\r
1478 ib_api_status_t status;
\r
1480 CL_ASSERT( h_qp );
\r
1482 p_mad_qp = (al_mad_qp_t*)h_qp;
\r
1483 status = ib_init_dgrm_svc( p_mad_qp->h_dgrm_qp, p_dgrm_info );
\r
1484 if( status != IB_SUCCESS )
\r
1489 /* Post receive buffers. */
\r
1490 status = mad_qp_post_recvs( p_mad_qp );
\r
1491 if (status != IB_SUCCESS)
\r
1496 /* Force a completion callback to rearm the CQs. */
\r
1497 mad_send_comp_cb( p_mad_qp->h_send_cq, p_mad_qp );
\r
1498 mad_recv_comp_cb( p_mad_qp->h_recv_cq, p_mad_qp );
\r
1507 IN const ib_qp_handle_t h_qp,
\r
1508 IN const ib_mad_svc_t* const p_mad_svc,
\r
1509 OUT ib_mad_svc_handle_t* const ph_mad_svc )
\r
1511 ib_api_status_t status;
\r
1513 AL_ENTER( AL_DBG_MAD_SVC );
\r
1515 if( AL_OBJ_INVALID_HANDLE( h_qp, AL_OBJ_TYPE_H_QP ) )
\r
1517 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_QP_HANDLE\n") );
\r
1518 return IB_INVALID_QP_HANDLE;
\r
1521 status = h_qp->pfn_reg_mad_svc( h_qp, p_mad_svc, ph_mad_svc );
\r
1523 /* Release the reference taken in init_al_obj. */
\r
1524 if( status == IB_SUCCESS )
\r
1525 deref_al_obj( &(*ph_mad_svc)->obj );
\r
1527 AL_EXIT( AL_DBG_MAD_SVC );
\r
1534 IN const ib_qp_handle_t h_qp,
\r
1535 IN const ib_mcast_req_t* const p_mcast_req )
\r
1537 ib_api_status_t status;
\r
1539 AL_ENTER( AL_DBG_MCAST );
\r
1541 if( AL_OBJ_INVALID_HANDLE( h_qp, AL_OBJ_TYPE_H_QP ) )
\r
1543 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_QP_HANDLE\n") );
\r
1544 return IB_INVALID_QP_HANDLE;
\r
1546 if( !p_mcast_req )
\r
1548 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") );
\r
1549 return IB_INVALID_PARAMETER;
\r
1552 status = h_qp->pfn_join_mcast( h_qp, p_mcast_req );
\r
1554 AL_EXIT( AL_DBG_MCAST );
\r
1561 * Post a work request to the send queue of the QP.
\r
1565 IN const ib_qp_handle_t h_qp,
\r
1566 IN ib_send_wr_t* const p_send_wr,
\r
1567 OUT ib_send_wr_t **pp_send_failure OPTIONAL )
\r
1569 ib_api_status_t status;
\r
1570 PERF_DECLARE( IbPostSend );
\r
1571 PERF_DECLARE( PostSend );
\r
1573 cl_perf_start( IbPostSend );
\r
1574 AL_ENTER( AL_DBG_QP );
\r
1576 if( AL_OBJ_INVALID_HANDLE( h_qp, AL_OBJ_TYPE_H_QP ) )
\r
1578 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_QP_HANDLE\n") );
\r
1579 return IB_INVALID_QP_HANDLE;
\r
1581 if( !p_send_wr || ( p_send_wr->p_next && !pp_send_failure ) )
\r
1583 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") );
\r
1584 return IB_INVALID_PARAMETER;
\r
1587 cl_perf_start( PostSend );
\r
1589 h_qp->pfn_post_send( h_qp->h_send_qp, p_send_wr, pp_send_failure );
\r
1590 cl_perf_stop( &g_perf, PostSend );
\r
1592 AL_EXIT( AL_DBG_QP );
\r
1593 cl_perf_stop( &g_perf, IbPostSend );
\r
1601 IN const ib_qp_handle_t h_qp,
\r
1602 IN ib_send_wr_t* const p_send_wr,
\r
1603 OUT ib_send_wr_t **pp_send_failure )
\r
1605 ib_api_status_t status;
\r
1606 ib_send_wr_t *p_wr;
\r
1608 CL_ASSERT( h_qp );
\r
1610 /* Convert all AV handles for verb provider usage. */
\r
1611 for( p_wr = p_send_wr; p_wr; p_wr = p_wr->p_next )
\r
1613 CL_ASSERT( p_wr->dgrm.ud.h_av );
\r
1614 p_wr->dgrm.ud.rsvd = p_wr->dgrm.ud.h_av;
\r
1615 p_wr->dgrm.ud.h_av = convert_av_handle( h_qp, p_wr->dgrm.ud.h_av );
\r
1618 status = h_qp->pfn_ud_post_send(
\r
1619 h_qp->h_ud_send_qp, p_send_wr, pp_send_failure );
\r
1621 /* Restore all AV handles. */
\r
1622 for( p_wr = p_send_wr; p_wr; p_wr = p_wr->p_next )
\r
1623 p_wr->dgrm.ud.h_av = (ib_av_handle_t)p_wr->dgrm.ud.rsvd;
\r
1632 * Post a work request to the send queue of a special QP.
\r
1633 * The special QP is owned by the GSA or SMA, so care must be taken to prevent
\r
1634 * overruning the QP by multiple owners.
\r
1637 special_qp_queue_mad(
\r
1638 IN const ib_qp_handle_t h_qp,
\r
1639 IN al_mad_wr_t* const p_mad_wr )
\r
1641 al_special_qp_t* p_special_qp;
\r
1643 CL_ASSERT( h_qp );
\r
1644 CL_ASSERT( p_mad_wr );
\r
1646 p_special_qp = (al_special_qp_t*)h_qp;
\r
1648 /* Queue the send work request. */
\r
1649 cl_spinlock_acquire( &h_qp->obj.lock );
\r
1650 cl_qlist_insert_tail( &p_special_qp->to_send_queue, &p_mad_wr->list_item );
\r
1651 cl_spinlock_release( &h_qp->obj.lock );
\r
1657 special_qp_resume_sends(
\r
1658 IN const ib_qp_handle_t h_qp )
\r
1660 al_special_qp_t* p_special_qp;
\r
1661 cl_list_item_t* p_list_item;
\r
1662 al_mad_wr_t* p_mad_wr;
\r
1663 ib_api_status_t status;
\r
1665 CL_ASSERT( h_qp );
\r
1666 p_special_qp = (al_special_qp_t*)h_qp;
\r
1668 cl_spinlock_acquire( &p_special_qp->qp.obj.lock );
\r
1670 for( p_list_item = cl_qlist_remove_head( &p_special_qp->to_send_queue );
\r
1671 p_list_item != cl_qlist_end( &p_special_qp->to_send_queue );
\r
1672 p_list_item = cl_qlist_remove_head( &p_special_qp->to_send_queue ) )
\r
1674 p_mad_wr = PARENT_STRUCT( p_list_item, al_mad_wr_t, list_item );
\r
1676 cl_spinlock_release( &p_special_qp->qp.obj.lock );
\r
1677 status = spl_qp_svc_send( &p_special_qp->qp, &p_mad_wr->send_wr );
\r
1678 cl_spinlock_acquire( &p_special_qp->qp.obj.lock );
\r
1680 if( status != IB_SUCCESS )
\r
1682 cl_qlist_insert_head( &p_special_qp->to_send_queue, p_list_item );
\r
1687 cl_spinlock_release( &p_special_qp->qp.obj.lock );
\r
1689 #endif /* CL_KERNEL */
\r
1694 IN const ib_qp_handle_t h_qp,
\r
1695 IN al_mad_wr_t* const p_mad_wr )
\r
1697 al_mad_qp_t *p_mad_qp;
\r
1699 CL_ASSERT( h_qp );
\r
1700 p_mad_qp = (al_mad_qp_t*)h_qp;
\r
1702 /* Queue the send work request on the to_send_queue. */
\r
1703 cl_spinlock_acquire( &p_mad_qp->qp.obj.lock );
\r
1704 cl_qlist_insert_tail( &p_mad_qp->to_send_queue, &p_mad_wr->list_item );
\r
1705 cl_spinlock_release( &p_mad_qp->qp.obj.lock );
\r
1711 mad_qp_resume_sends(
\r
1712 IN ib_qp_handle_t h_qp )
\r
1714 al_mad_qp_t *p_mad_qp;
\r
1715 cl_list_item_t* p_list_item;
\r
1716 al_mad_wr_t* p_mad_wr;
\r
1717 ib_api_status_t status;
\r
1719 CL_ASSERT( h_qp );
\r
1721 p_mad_qp = (al_mad_qp_t*)h_qp;
\r
1723 cl_spinlock_acquire( &p_mad_qp->qp.obj.lock );
\r
1725 /* Do not post sends if the MAD queue pair is being destroyed. */
\r
1726 if( p_mad_qp->qp.obj.state == CL_DESTROYING )
\r
1728 cl_spinlock_release( &p_mad_qp->qp.obj.lock );
\r
1732 for( p_list_item = cl_qlist_remove_head( &p_mad_qp->to_send_queue );
\r
1733 p_list_item != cl_qlist_end( &p_mad_qp->to_send_queue );
\r
1734 p_list_item = cl_qlist_remove_head( &p_mad_qp->to_send_queue ) )
\r
1736 p_mad_wr = PARENT_STRUCT( p_list_item, al_mad_wr_t, list_item );
\r
1738 /* Always generate send completions. */
\r
1739 p_mad_wr->send_wr.send_opt |= IB_SEND_OPT_SIGNALED;
\r
1741 status = ib_post_send( p_mad_qp->h_dgrm_qp, &p_mad_wr->send_wr, NULL );
\r
1743 if( status == IB_SUCCESS )
\r
1745 /* Queue the MAD work request on the send tracking queue. */
\r
1746 cl_qlist_insert_tail( &p_mad_qp->send_queue, &p_mad_wr->list_item );
\r
1750 /* Re-queue the send work request on the to_send_queue. */
\r
1751 cl_qlist_insert_head( &p_mad_qp->to_send_queue, p_list_item );
\r
1756 cl_spinlock_release( &p_mad_qp->qp.obj.lock );
\r
1762 mad_qp_flush_send(
\r
1763 IN al_mad_qp_t* p_mad_qp,
\r
1764 IN al_mad_wr_t* const p_mad_wr )
\r
1768 cl_memclr( &wc, sizeof( ib_wc_t ) );
\r
1769 wc.wr_id = p_mad_wr->send_wr.wr_id;
\r
1770 wc.wc_type = IB_WC_SEND;
\r
1771 wc.status = IB_WCS_WR_FLUSHED_ERR;
\r
1773 mad_disp_send_done( p_mad_qp->h_mad_disp, p_mad_wr, &wc );
\r
1780 IN const ib_qp_handle_t h_qp,
\r
1781 IN ib_recv_wr_t* const p_recv_wr,
\r
1782 OUT ib_recv_wr_t **pp_recv_failure OPTIONAL )
\r
1784 ib_api_status_t status;
\r
1786 AL_ENTER( AL_DBG_QP );
\r
1788 if( AL_OBJ_INVALID_HANDLE( h_qp, AL_OBJ_TYPE_H_QP ) )
\r
1790 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_QP_HANDLE\n") );
\r
1791 return IB_INVALID_QP_HANDLE;
\r
1793 if( !p_recv_wr || ( p_recv_wr->p_next && !pp_recv_failure ) )
\r
1795 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") );
\r
1796 return IB_INVALID_PARAMETER;
\r
1800 h_qp->pfn_post_recv( h_qp->h_recv_qp, p_recv_wr, pp_recv_failure );
\r
1802 AL_EXIT( AL_DBG_QP );
\r
1809 * Post receive buffers to a MAD QP.
\r
1812 mad_qp_post_recvs(
\r
1813 IN al_mad_qp_t* const p_mad_qp )
\r
1815 ib_mad_element_t* p_mad_element;
\r
1816 al_mad_element_t* p_al_element;
\r
1817 ib_recv_wr_t recv_wr;
\r
1818 ib_api_status_t status = IB_SUCCESS;
\r
1820 CL_ASSERT( p_mad_qp );
\r
1822 /* Attempt to post receive buffers up to the max_rq_depth limit. */
\r
1823 cl_spinlock_acquire( &p_mad_qp->qp.obj.lock );
\r
1824 while( p_mad_qp->cur_rq_depth < (int32_t)p_mad_qp->max_rq_depth )
\r
1826 /* Get a MAD element from the pool. */
\r
1827 status = ib_get_mad( p_mad_qp->pool_key, MAD_BLOCK_SIZE,
\r
1830 if( status != IB_SUCCESS ) break;
\r
1832 p_al_element = PARENT_STRUCT( p_mad_element, al_mad_element_t,
\r
1835 /* Build the receive work request. */
\r
1836 recv_wr.p_next = NULL;
\r
1837 recv_wr.wr_id = (uintn_t)p_al_element;
\r
1838 recv_wr.num_ds = 1;
\r
1839 recv_wr.ds_array = &p_al_element->grh_ds;
\r
1841 /* Queue the receive on the service tracking list. */
\r
1842 cl_qlist_insert_tail( &p_mad_qp->recv_queue, &p_al_element->list_item );
\r
1844 /* Post the receive. */
\r
1845 status = ib_post_recv( p_mad_qp->h_dgrm_qp, &recv_wr, NULL );
\r
1847 if( status != IB_SUCCESS )
\r
1849 cl_qlist_remove_item( &p_mad_qp->recv_queue,
\r
1850 &p_al_element->list_item );
\r
1852 ib_put_mad( p_mad_element );
\r
1856 cl_atomic_inc( &p_mad_qp->cur_rq_depth );
\r
1858 cl_spinlock_release( &p_mad_qp->qp.obj.lock );
\r
1867 IN const ib_cq_handle_t h_cq,
\r
1868 IN void *cq_context )
\r
1870 al_mad_qp_t *p_mad_qp;
\r
1872 CL_ASSERT( cq_context );
\r
1873 p_mad_qp = (al_mad_qp_t*)cq_context;
\r
1875 CL_ASSERT( h_cq == p_mad_qp->h_recv_cq );
\r
1876 mad_qp_comp( p_mad_qp, h_cq, IB_WC_RECV );
\r
1883 IN const ib_cq_handle_t h_cq,
\r
1884 IN void *cq_context )
\r
1886 al_mad_qp_t *p_mad_qp;
\r
1888 CL_ASSERT( cq_context );
\r
1889 p_mad_qp = (al_mad_qp_t*)cq_context;
\r
1891 CL_ASSERT( h_cq == p_mad_qp->h_send_cq );
\r
1892 mad_qp_comp( p_mad_qp, h_cq, IB_WC_SEND );
\r
1894 /* Continue processing any queued MADs on the QP. */
\r
1895 mad_qp_resume_sends( &p_mad_qp->qp );
\r
1902 IN al_mad_qp_t* p_mad_qp,
\r
1903 IN const ib_cq_handle_t h_cq,
\r
1904 IN ib_wc_type_t wc_type )
\r
1907 ib_wc_t* p_free_wc = &wc;
\r
1908 ib_wc_t* p_done_wc;
\r
1909 al_mad_wr_t* p_mad_wr;
\r
1910 al_mad_element_t* p_al_mad;
\r
1911 ib_mad_element_t* p_mad_element;
\r
1912 ib_api_status_t status;
\r
1914 CL_ASSERT( p_mad_qp );
\r
1915 CL_ASSERT( h_cq );
\r
1917 /* Rearm the CQ before polling to avoid missing completions. */
\r
1918 status = ib_rearm_cq( h_cq, FALSE );
\r
1919 CL_ASSERT( status == IB_SUCCESS );
\r
1922 /* Process work completions. */
\r
1923 while( ib_poll_cq( h_cq, &p_free_wc, &p_done_wc ) == IB_SUCCESS )
\r
1925 /* Process completions one at a time. */
\r
1928 * Process the work completion. Per IBA specification, the
\r
1929 * wc.wc_type is undefined if wc.status is not IB_WCS_SUCCESS.
\r
1930 * Use the wc_type function parameter instead of wc.wc_type.
\r
1935 /* Get a pointer to the MAD work request. */
\r
1936 p_mad_wr = (al_mad_wr_t*)((uintn_t)wc.wr_id);
\r
1938 /* Remove the MAD work request from the send tracking queue. */
\r
1939 cl_spinlock_acquire( &p_mad_qp->qp.obj.lock );
\r
1940 cl_qlist_remove_item( &p_mad_qp->send_queue, &p_mad_wr->list_item );
\r
1941 cl_spinlock_release( &p_mad_qp->qp.obj.lock );
\r
1943 /* Report the send completion to the dispatcher. */
\r
1944 mad_disp_send_done( p_mad_qp->h_mad_disp, p_mad_wr, &wc );
\r
1948 /* A receive buffer was consumed. */
\r
1949 cl_atomic_dec( &p_mad_qp->cur_rq_depth );
\r
1951 /* Replenish the receive buffer. */
\r
1952 mad_qp_post_recvs( p_mad_qp );
\r
1954 /* Initialize pointers to the MAD element. */
\r
1955 p_al_mad = (al_mad_element_t*)((uintn_t)wc.wr_id);
\r
1956 p_mad_element = &p_al_mad->element;
\r
1958 /* Remove the AL MAD element from the receive tracking queue. */
\r
1959 cl_spinlock_acquire( &p_mad_qp->qp.obj.lock );
\r
1960 cl_qlist_remove_item( &p_mad_qp->recv_queue, &p_al_mad->list_item );
\r
1961 cl_spinlock_release( &p_mad_qp->qp.obj.lock );
\r
1963 /* Construct the MAD element from the receive work completion. */
\r
1964 build_mad_recv( p_mad_element, &wc );
\r
1966 /* Process the received MAD. */
\r
1967 status = mad_disp_recv_done( p_mad_qp->h_mad_disp,
\r
1970 /* Discard this MAD on error. */
\r
1971 if( status != IB_SUCCESS )
\r
1973 status = ib_put_mad( p_mad_element );
\r
1974 CL_ASSERT( status == IB_SUCCESS );
\r
1979 CL_ASSERT( wc_type == IB_WC_SEND || wc_type == IB_WC_RECV );
\r
1989 * Process an event on a CQ associated with a MAD QP.
\r
1992 mad_qp_cq_event_cb(
\r
1993 IN ib_async_event_rec_t *p_event_rec )
\r
1995 al_mad_qp_t *p_mad_qp;
\r
1997 CL_ASSERT( p_event_rec );
\r
1998 CL_ASSERT( p_event_rec->context );
\r
2000 if( p_event_rec->code == IB_AE_SQ_DRAINED )
\r
2003 p_mad_qp = (al_mad_qp_t* __ptr64)p_event_rec->context;
\r
2005 /* Nothing to do here. */
\r
2011 * Process an asynchronous event on the QP. Notify the user of the event.
\r
2014 qp_async_event_cb(
\r
2015 IN ib_async_event_rec_t* const p_event_rec )
\r
2017 ib_qp_handle_t h_qp;
\r
2019 CL_ASSERT( p_event_rec );
\r
2020 h_qp = (ib_qp_handle_t)p_event_rec->context;
\r
2022 #if defined(CL_KERNEL)
\r
2023 switch( p_event_rec->code )
\r
2025 case IB_AE_QP_COMM:
\r
2026 al_cep_established( h_qp->obj.h_al, ((al_conn_qp_t*)h_qp)->cid );
\r
2029 case IB_AE_QP_APM:
\r
2030 al_cep_migrate( h_qp->obj.h_al, ((al_conn_qp_t*)h_qp)->cid );
\r
2033 case IB_AE_QP_APM_ERROR:
\r
2034 //***TODO: Figure out how to handle these errors.
\r
2042 p_event_rec->context = (void*)h_qp->obj.context;
\r
2043 p_event_rec->handle.h_qp = h_qp;
\r
2045 if( h_qp->pfn_event_cb )
\r
2046 h_qp->pfn_event_cb( p_event_rec );
\r
2053 IN const ib_mw_handle_t h_mw,
\r
2054 IN const ib_qp_handle_t h_qp,
\r
2055 IN ib_bind_wr_t * const p_mw_bind,
\r
2056 OUT net32_t * const p_rkey )
\r
2058 ib_mr_handle_t h_mr;
\r
2059 ib_api_status_t status;
\r
2061 AL_ENTER( AL_DBG_MW );
\r
2063 if( AL_OBJ_INVALID_HANDLE( h_mw, AL_OBJ_TYPE_H_MW ) )
\r
2065 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_MW_HANDLE\n") );
\r
2066 return IB_INVALID_MW_HANDLE;
\r
2068 if( AL_OBJ_INVALID_HANDLE( h_qp, AL_OBJ_TYPE_H_QP ) )
\r
2070 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_QP_HANDLE\n") );
\r
2071 return IB_INVALID_QP_HANDLE;
\r
2073 if( !p_mw_bind || !p_rkey )
\r
2075 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") );
\r
2076 return IB_INVALID_PARAMETER;
\r
2079 /* Convert to the CI handles. */
\r
2080 h_mr = p_mw_bind->h_mr;
\r
2081 p_mw_bind->h_mr = convert_mr_handle( h_mr );
\r
2083 status = verbs_bind_mw(h_mw, h_qp, p_mw_bind, p_rkey);
\r
2085 p_mw_bind->h_mr = h_mr;
\r
2087 AL_EXIT( AL_DBG_MW );
\r