2 * Copyright (c) 2005 SilverStorm Technologies. All rights reserved.
\r
3 * Copyright (c) 1996-2003 Intel Corporation. All rights reserved.
\r
5 * This software is available to you under the OpenIB.org BSD license
\r
8 * Redistribution and use in source and binary forms, with or
\r
9 * without modification, are permitted provided that the following
\r
10 * conditions are met:
\r
12 * - Redistributions of source code must retain the above
\r
13 * copyright notice, this list of conditions and the following
\r
16 * - Redistributions in binary form must reproduce the above
\r
17 * copyright notice, this list of conditions and the following
\r
18 * disclaimer in the documentation and/or other materials
\r
19 * provided with the distribution.
\r
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
\r
22 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
\r
23 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
\r
24 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
\r
25 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
\r
26 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
\r
27 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
\r
34 #include <iba/ib_al.h>
\r
35 #include <complib/cl_timer.h>
\r
37 #include "ib_common.h"
\r
38 #include "al_common.h"
\r
39 #include "al_debug.h"
\r
40 #include "al_verbs.h"
\r
48 extern char node_desc[IB_NODE_DESCRIPTION_SIZE];
\r
50 #define SMI_POLL_INTERVAL 20000 /* Milliseconds */
\r
51 #define LOCAL_MAD_TIMEOUT 50 /* Milliseconds */
\r
52 #define DEFAULT_QP0_DEPTH 256
\r
53 #define DEFAULT_QP1_DEPTH 1024
\r
55 uint32_t g_smi_poll_interval = SMI_POLL_INTERVAL;
\r
56 spl_qp_mgr_t* gp_spl_qp_mgr = NULL;
\r
60 * Function prototypes.
\r
63 destroying_spl_qp_mgr(
\r
64 IN al_obj_t* p_obj );
\r
68 IN al_obj_t* p_obj );
\r
71 spl_qp0_agent_pnp_cb(
\r
72 IN ib_pnp_rec_t* p_pnp_rec );
\r
75 spl_qp1_agent_pnp_cb(
\r
76 IN ib_pnp_rec_t* p_pnp_rec );
\r
80 IN ib_pnp_rec_t* p_pnp_rec,
\r
81 IN ib_qp_type_t qp_type );
\r
85 IN ib_pnp_port_rec_t* p_pnp_rec,
\r
86 IN const ib_qp_type_t qp_type );
\r
89 destroying_spl_qp_svc(
\r
90 IN al_obj_t* p_obj );
\r
94 IN al_obj_t* p_obj );
\r
97 spl_qp_svc_lid_change(
\r
99 IN ib_pnp_port_rec_t* p_pnp_rec );
\r
103 IN spl_qp_svc_t* p_spl_qp_svc,
\r
104 IN al_mad_wr_t* const p_mad_wr );
\r
106 static ib_api_status_t
\r
108 IN spl_qp_svc_t* p_spl_qp_svc,
\r
109 IN al_mad_wr_t* const p_mad_wr );
\r
111 static ib_api_status_t
\r
113 IN spl_qp_svc_t* p_spl_qp_svc,
\r
114 IN al_mad_wr_t* const p_mad_wr );
\r
116 static ib_api_status_t
\r
118 IN spl_qp_svc_t* p_spl_qp_svc,
\r
119 IN al_mad_wr_t* const p_mad_wr );
\r
121 static ib_api_status_t
\r
123 IN spl_qp_svc_t* p_spl_qp_svc,
\r
124 IN al_mad_wr_t* const p_mad_wr );
\r
128 IN cl_async_proc_item_t* p_item );
\r
131 spl_qp_send_comp_cb(
\r
132 IN const ib_cq_handle_t h_cq,
\r
133 IN void *cq_context );
\r
136 spl_qp_recv_comp_cb(
\r
137 IN const ib_cq_handle_t h_cq,
\r
138 IN void *cq_context );
\r
142 IN spl_qp_svc_t* p_spl_qp_svc,
\r
143 IN const ib_cq_handle_t h_cq,
\r
144 IN ib_wc_type_t wc_type );
\r
148 IN spl_qp_svc_t* p_spl_qp_svc,
\r
149 IN ib_mad_element_t* p_mad_element );
\r
153 IN ib_mad_element_t* p_mad_element );
\r
156 route_recv_smp_attr(
\r
157 IN ib_mad_element_t* p_mad_element );
\r
161 IN ib_mad_element_t* p_mad_element );
\r
165 IN ib_mad_element_t* p_mad_element );
\r
168 route_recv_gmp_attr(
\r
169 IN ib_mad_element_t* p_mad_element );
\r
173 IN spl_qp_svc_t* p_spl_qp_svc,
\r
174 IN ib_mad_element_t* p_mad_element );
\r
178 IN spl_qp_svc_t* p_spl_qp_svc,
\r
179 IN ib_mad_element_t* p_mad_request );
\r
182 spl_qp_alias_send_cb(
\r
183 IN ib_mad_svc_handle_t h_mad_svc,
\r
184 IN void *mad_svc_context,
\r
185 IN ib_mad_element_t *p_mad_element );
\r
188 spl_qp_alias_recv_cb(
\r
189 IN ib_mad_svc_handle_t h_mad_svc,
\r
190 IN void *mad_svc_context,
\r
191 IN ib_mad_element_t *p_mad_response );
\r
193 static ib_api_status_t
\r
194 spl_qp_svc_post_recvs(
\r
195 IN spl_qp_svc_t* const p_spl_qp_svc );
\r
198 spl_qp_svc_event_cb(
\r
199 IN ib_async_event_rec_t *p_event_rec );
\r
202 spl_qp_alias_event_cb(
\r
203 IN ib_async_event_rec_t *p_event_rec );
\r
207 IN spl_qp_svc_t* p_spl_qp_svc );
\r
210 spl_qp_svc_reset_cb(
\r
211 IN cl_async_proc_item_t* p_item );
\r
215 IN const cl_qmap_t* const p_svc_map,
\r
216 IN const ib_net64_t port_guid,
\r
217 OUT al_mad_disp_handle_t *ph_mad_disp );
\r
221 IN void* context );
\r
225 IN cl_list_item_t* const p_list_item,
\r
226 IN void* context );
\r
228 #if defined( CL_USE_MUTEX )
\r
230 spl_qp_send_async_cb(
\r
231 IN cl_async_proc_item_t* p_item );
\r
234 spl_qp_recv_async_cb(
\r
235 IN cl_async_proc_item_t* p_item );
\r
239 * Create the special QP manager.
\r
243 IN al_obj_t* const p_parent_obj )
\r
245 ib_pnp_req_t pnp_req;
\r
246 ib_api_status_t status;
\r
247 cl_status_t cl_status;
\r
249 CL_ENTER( AL_DBG_SMI, g_al_dbg_lvl );
\r
251 CL_ASSERT( p_parent_obj );
\r
252 CL_ASSERT( !gp_spl_qp_mgr );
\r
254 gp_spl_qp_mgr = cl_zalloc( sizeof( spl_qp_mgr_t ) );
\r
255 if( !gp_spl_qp_mgr )
\r
257 CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl,
\r
258 ("IB_INSUFFICIENT_MEMORY\n") );
\r
259 return IB_INSUFFICIENT_MEMORY;
\r
262 /* Construct the special QP manager. */
\r
263 construct_al_obj( &gp_spl_qp_mgr->obj, AL_OBJ_TYPE_SMI );
\r
264 cl_timer_construct( &gp_spl_qp_mgr->poll_timer );
\r
266 /* Initialize the lists. */
\r
267 cl_qmap_init( &gp_spl_qp_mgr->smi_map );
\r
268 cl_qmap_init( &gp_spl_qp_mgr->gsi_map );
\r
270 /* Initialize the global SMI/GSI manager object. */
\r
271 status = init_al_obj( &gp_spl_qp_mgr->obj, gp_spl_qp_mgr, TRUE,
\r
272 destroying_spl_qp_mgr, NULL, free_spl_qp_mgr );
\r
273 if( status != IB_SUCCESS )
\r
275 free_spl_qp_mgr( &gp_spl_qp_mgr->obj );
\r
276 CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl,
\r
277 ("init_al_obj failed, %s\n", ib_get_err_str( status ) ) );
\r
281 /* Attach the special QP manager to the parent object. */
\r
282 status = attach_al_obj( p_parent_obj, &gp_spl_qp_mgr->obj );
\r
283 if( status != IB_SUCCESS )
\r
285 gp_spl_qp_mgr->obj.pfn_destroy( &gp_spl_qp_mgr->obj, NULL );
\r
286 AL_TRACE_EXIT( AL_DBG_ERROR,
\r
287 ("attach_al_obj returned %s.\n", ib_get_err_str(status)) );
\r
291 /* Initialize the SMI polling timer. */
\r
292 cl_status = cl_timer_init( &gp_spl_qp_mgr->poll_timer, smi_poll_timer_cb,
\r
294 if( cl_status != CL_SUCCESS )
\r
296 gp_spl_qp_mgr->obj.pfn_destroy( &gp_spl_qp_mgr->obj, NULL );
\r
297 CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl,
\r
298 ("cl_timer_init failed, status 0x%x\n", cl_status ) );
\r
299 return ib_convert_cl_status( cl_status );
\r
303 * Note: PnP registrations for port events must be done
\r
304 * when the special QP manager is created. This ensures that
\r
305 * the registrations are listed sequentially and the reporting
\r
306 * of PnP events occurs in the proper order.
\r
310 * Separate context is needed for each special QP. Therefore, a
\r
311 * separate PnP event registration is performed for QP0 and QP1.
\r
314 /* Register for port PnP events for QP0. */
\r
315 cl_memclr( &pnp_req, sizeof( ib_pnp_req_t ) );
\r
316 pnp_req.pnp_class = IB_PNP_PORT;
\r
317 pnp_req.pnp_context = &gp_spl_qp_mgr->obj;
\r
318 pnp_req.pfn_pnp_cb = spl_qp0_agent_pnp_cb;
\r
320 status = ib_reg_pnp( gh_al, &pnp_req, &gp_spl_qp_mgr->h_qp0_pnp );
\r
322 if( status != IB_SUCCESS )
\r
324 gp_spl_qp_mgr->obj.pfn_destroy( &gp_spl_qp_mgr->obj, NULL );
\r
325 CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl,
\r
326 ("ib_reg_pnp QP0 failed, %s\n", ib_get_err_str( status ) ) );
\r
330 /* Reference the special QP manager on behalf of the ib_reg_pnp call. */
\r
331 ref_al_obj( &gp_spl_qp_mgr->obj );
\r
333 /* Register for port PnP events for QP1. */
\r
334 cl_memclr( &pnp_req, sizeof( ib_pnp_req_t ) );
\r
335 pnp_req.pnp_class = IB_PNP_PORT;
\r
336 pnp_req.pnp_context = &gp_spl_qp_mgr->obj;
\r
337 pnp_req.pfn_pnp_cb = spl_qp1_agent_pnp_cb;
\r
339 status = ib_reg_pnp( gh_al, &pnp_req, &gp_spl_qp_mgr->h_qp1_pnp );
\r
341 if( status != IB_SUCCESS )
\r
343 gp_spl_qp_mgr->obj.pfn_destroy( &gp_spl_qp_mgr->obj, NULL );
\r
344 CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl,
\r
345 ("ib_reg_pnp QP1 failed, %s\n", ib_get_err_str( status ) ) );
\r
350 * Note that we don't release the referende taken in init_al_obj
\r
351 * because we need one on behalf of the ib_reg_pnp call.
\r
354 CL_EXIT( AL_DBG_SMI, g_al_dbg_lvl );
\r
361 * Pre-destroy the special QP manager.
\r
364 destroying_spl_qp_mgr(
\r
365 IN al_obj_t* p_obj )
\r
367 ib_api_status_t status;
\r
369 CL_ASSERT( p_obj );
\r
370 CL_ASSERT( gp_spl_qp_mgr == PARENT_STRUCT( p_obj, spl_qp_mgr_t, obj ) );
\r
371 UNUSED_PARAM( p_obj );
\r
373 /* Deregister for port PnP events for QP0. */
\r
374 if( gp_spl_qp_mgr->h_qp0_pnp )
\r
376 status = ib_dereg_pnp( gp_spl_qp_mgr->h_qp0_pnp,
\r
377 (ib_pfn_destroy_cb_t)deref_al_obj );
\r
378 CL_ASSERT( status == IB_SUCCESS );
\r
381 /* Deregister for port PnP events for QP1. */
\r
382 if( gp_spl_qp_mgr->h_qp1_pnp )
\r
384 status = ib_dereg_pnp( gp_spl_qp_mgr->h_qp1_pnp,
\r
385 (ib_pfn_destroy_cb_t)deref_al_obj );
\r
386 CL_ASSERT( status == IB_SUCCESS );
\r
389 /* Destroy the SMI polling timer. */
\r
390 cl_timer_destroy( &gp_spl_qp_mgr->poll_timer );
\r
396 * Free the special QP manager.
\r
400 IN al_obj_t* p_obj )
\r
402 CL_ASSERT( p_obj );
\r
403 CL_ASSERT( gp_spl_qp_mgr == PARENT_STRUCT( p_obj, spl_qp_mgr_t, obj ) );
\r
404 UNUSED_PARAM( p_obj );
\r
406 destroy_al_obj( &gp_spl_qp_mgr->obj );
\r
407 cl_free( gp_spl_qp_mgr );
\r
408 gp_spl_qp_mgr = NULL;
\r
414 * Special QP0 agent PnP event callback.
\r
417 spl_qp0_agent_pnp_cb(
\r
418 IN ib_pnp_rec_t* p_pnp_rec )
\r
420 ib_api_status_t status;
\r
421 CL_ENTER( AL_DBG_SMI_CB, g_al_dbg_lvl );
\r
423 status = spl_qp_agent_pnp( p_pnp_rec, IB_QPT_QP0 );
\r
425 CL_EXIT( AL_DBG_SMI_CB, g_al_dbg_lvl );
\r
432 * Special QP1 agent PnP event callback.
\r
435 spl_qp1_agent_pnp_cb(
\r
436 IN ib_pnp_rec_t* p_pnp_rec )
\r
438 ib_api_status_t status;
\r
439 CL_ENTER( AL_DBG_SMI_CB, g_al_dbg_lvl );
\r
441 status = spl_qp_agent_pnp( p_pnp_rec, IB_QPT_QP1 );
\r
443 CL_EXIT( AL_DBG_SMI, g_al_dbg_lvl );
\r
450 * Special QP agent PnP event callback.
\r
454 IN ib_pnp_rec_t* p_pnp_rec,
\r
455 IN ib_qp_type_t qp_type )
\r
457 ib_api_status_t status;
\r
460 CL_ENTER( AL_DBG_SMI_CB, g_al_dbg_lvl );
\r
462 CL_ASSERT( p_pnp_rec );
\r
463 p_obj = p_pnp_rec->context;
\r
465 /* Dispatch based on the PnP event type. */
\r
466 switch( p_pnp_rec->pnp_event )
\r
468 case IB_PNP_PORT_ADD:
\r
469 CL_ASSERT( !p_obj );
\r
470 status = create_spl_qp_svc( (ib_pnp_port_rec_t*)p_pnp_rec, qp_type );
\r
473 case IB_PNP_PORT_REMOVE:
\r
474 CL_ASSERT( p_obj );
\r
475 ref_al_obj( p_obj );
\r
476 p_obj->pfn_destroy( p_obj, NULL );
\r
477 status = IB_SUCCESS;
\r
480 case IB_PNP_LID_CHANGE:
\r
481 CL_ASSERT( p_obj );
\r
482 spl_qp_svc_lid_change( p_obj, (ib_pnp_port_rec_t*)p_pnp_rec );
\r
483 status = IB_SUCCESS;
\r
487 /* All other events are ignored. */
\r
488 status = IB_SUCCESS;
\r
492 CL_EXIT( AL_DBG_SMI, g_al_dbg_lvl );
\r
499 * Create a special QP service.
\r
503 IN ib_pnp_port_rec_t* p_pnp_rec,
\r
504 IN const ib_qp_type_t qp_type )
\r
506 cl_status_t cl_status;
\r
507 spl_qp_svc_t* p_spl_qp_svc;
\r
508 ib_ca_handle_t h_ca;
\r
509 ib_cq_create_t cq_create;
\r
510 ib_qp_create_t qp_create;
\r
511 ib_qp_attr_t qp_attr;
\r
512 ib_mad_svc_t mad_svc;
\r
513 ib_api_status_t status;
\r
515 CL_ENTER( AL_DBG_SMI, g_al_dbg_lvl );
\r
517 CL_ASSERT( p_pnp_rec );
\r
519 if( ( qp_type != IB_QPT_QP0 ) && ( qp_type != IB_QPT_QP1 ) )
\r
521 CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl, ("IB_INVALID_PARAMETER\n") );
\r
522 return IB_INVALID_PARAMETER;
\r
525 CL_ASSERT( p_pnp_rec->pnp_rec.pnp_context );
\r
526 CL_ASSERT( p_pnp_rec->p_ca_attr );
\r
527 CL_ASSERT( p_pnp_rec->p_port_attr );
\r
529 p_spl_qp_svc = cl_zalloc( sizeof( spl_qp_svc_t ) );
\r
530 if( !p_spl_qp_svc )
\r
532 CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl,
\r
533 ("IB_INSUFFICIENT_MEMORY\n") );
\r
534 return IB_INSUFFICIENT_MEMORY;
\r
537 /* Tie the special QP service to the port by setting the port number. */
\r
538 p_spl_qp_svc->port_num = p_pnp_rec->p_port_attr->port_num;
\r
539 /* Store the port GUID to allow faster lookups of the dispatchers. */
\r
540 p_spl_qp_svc->port_guid = p_pnp_rec->p_port_attr->port_guid;
\r
542 /* Initialize the send and receive queues. */
\r
543 cl_qlist_init( &p_spl_qp_svc->send_queue );
\r
544 cl_qlist_init( &p_spl_qp_svc->recv_queue );
\r
546 #if defined( CL_USE_MUTEX )
\r
547 /* Initialize async callbacks and flags for send/receive processing. */
\r
548 p_spl_qp_svc->send_async_queued = FALSE;
\r
549 p_spl_qp_svc->send_async_cb.pfn_callback = spl_qp_send_async_cb;
\r
550 p_spl_qp_svc->recv_async_queued = FALSE;
\r
551 p_spl_qp_svc->recv_async_cb.pfn_callback = spl_qp_recv_async_cb;
\r
554 /* Initialize the async callback function to process local sends. */
\r
555 p_spl_qp_svc->send_async.pfn_callback = send_local_mad_cb;
\r
557 /* Initialize the async callback function to reset the QP on error. */
\r
558 p_spl_qp_svc->reset_async.pfn_callback = spl_qp_svc_reset_cb;
\r
560 /* Construct the special QP service object. */
\r
561 construct_al_obj( &p_spl_qp_svc->obj, AL_OBJ_TYPE_SMI );
\r
563 /* Initialize the special QP service object. */
\r
564 status = init_al_obj( &p_spl_qp_svc->obj, p_spl_qp_svc, TRUE,
\r
565 destroying_spl_qp_svc, NULL, free_spl_qp_svc );
\r
566 if( status != IB_SUCCESS )
\r
568 free_spl_qp_svc( &p_spl_qp_svc->obj );
\r
572 /* Attach the special QP service to the parent object. */
\r
573 status = attach_al_obj(
\r
574 (al_obj_t* __ptr64)p_pnp_rec->pnp_rec.pnp_context, &p_spl_qp_svc->obj );
\r
575 if( status != IB_SUCCESS )
\r
577 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );
\r
578 AL_TRACE_EXIT( AL_DBG_ERROR,
\r
579 ("attach_al_obj returned %s.\n", ib_get_err_str(status)) );
\r
583 h_ca = acquire_ca( p_pnp_rec->p_ca_attr->ca_guid );
\r
587 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );
\r
588 AL_TRACE_EXIT( AL_DBG_ERROR, ("acquire_ca failed.\n") );
\r
589 return IB_INVALID_GUID;
\r
592 p_spl_qp_svc->obj.p_ci_ca = h_ca->obj.p_ci_ca;
\r
594 /* Determine the maximum queue depth of the QP and CQs. */
\r
595 p_spl_qp_svc->max_qp_depth =
\r
596 ( p_pnp_rec->p_ca_attr->max_wrs <
\r
597 p_pnp_rec->p_ca_attr->max_cqes ) ?
\r
598 p_pnp_rec->p_ca_attr->max_wrs :
\r
599 p_pnp_rec->p_ca_attr->max_cqes;
\r
601 /* Compare this maximum to the default special queue depth. */
\r
602 if( ( qp_type == IB_QPT_QP0 ) &&
\r
603 ( p_spl_qp_svc->max_qp_depth > DEFAULT_QP0_DEPTH ) )
\r
604 p_spl_qp_svc->max_qp_depth = DEFAULT_QP0_DEPTH;
\r
605 if( ( qp_type == IB_QPT_QP1 ) &&
\r
606 ( p_spl_qp_svc->max_qp_depth > DEFAULT_QP1_DEPTH ) )
\r
607 p_spl_qp_svc->max_qp_depth = DEFAULT_QP1_DEPTH;
\r
609 /* Create the send CQ. */
\r
610 cl_memclr( &cq_create, sizeof( ib_cq_create_t ) );
\r
611 cq_create.size = p_spl_qp_svc->max_qp_depth;
\r
612 cq_create.pfn_comp_cb = spl_qp_send_comp_cb;
\r
614 status = ib_create_cq( p_spl_qp_svc->obj.p_ci_ca->h_ca, &cq_create,
\r
615 p_spl_qp_svc, spl_qp_svc_event_cb, &p_spl_qp_svc->h_send_cq );
\r
617 if( status != IB_SUCCESS )
\r
619 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );
\r
620 CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl,
\r
621 ("ib_create_cq send CQ failed, %s\n", ib_get_err_str( status ) ) );
\r
625 /* Reference the special QP service on behalf of ib_create_cq. */
\r
626 ref_al_obj( &p_spl_qp_svc->obj );
\r
628 /* Check the result of the creation request. */
\r
629 if( cq_create.size < p_spl_qp_svc->max_qp_depth )
\r
631 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );
\r
632 CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl,
\r
633 ("ib_create_cq allocated insufficient send CQ size\n") );
\r
634 return IB_INSUFFICIENT_RESOURCES;
\r
637 /* Create the receive CQ. */
\r
638 cl_memclr( &cq_create, sizeof( ib_cq_create_t ) );
\r
639 cq_create.size = p_spl_qp_svc->max_qp_depth;
\r
640 cq_create.pfn_comp_cb = spl_qp_recv_comp_cb;
\r
642 status = ib_create_cq( p_spl_qp_svc->obj.p_ci_ca->h_ca, &cq_create,
\r
643 p_spl_qp_svc, spl_qp_svc_event_cb, &p_spl_qp_svc->h_recv_cq );
\r
645 if( status != IB_SUCCESS )
\r
647 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );
\r
648 CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl,
\r
649 ("ib_create_cq recv CQ failed, %s\n", ib_get_err_str( status ) ) );
\r
653 /* Reference the special QP service on behalf of ib_create_cq. */
\r
654 ref_al_obj( &p_spl_qp_svc->obj );
\r
656 /* Check the result of the creation request. */
\r
657 if( cq_create.size < p_spl_qp_svc->max_qp_depth )
\r
659 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );
\r
660 CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl,
\r
661 ("ib_create_cq allocated insufficient recv CQ size\n") );
\r
662 return IB_INSUFFICIENT_RESOURCES;
\r
665 /* Create the special QP. */
\r
666 cl_memclr( &qp_create, sizeof( ib_qp_create_t ) );
\r
667 qp_create.qp_type = qp_type;
\r
668 qp_create.sq_depth = p_spl_qp_svc->max_qp_depth;
\r
669 qp_create.rq_depth = p_spl_qp_svc->max_qp_depth;
\r
670 qp_create.sq_sge = 3; /* Three entries are required for segmentation. */
\r
671 qp_create.rq_sge = 1;
\r
672 qp_create.h_sq_cq = p_spl_qp_svc->h_send_cq;
\r
673 qp_create.h_rq_cq = p_spl_qp_svc->h_recv_cq;
\r
674 qp_create.sq_signaled = TRUE;
\r
676 status = ib_get_spl_qp( p_spl_qp_svc->obj.p_ci_ca->h_pd,
\r
677 p_pnp_rec->p_port_attr->port_guid, &qp_create,
\r
678 p_spl_qp_svc, spl_qp_svc_event_cb, NULL, &p_spl_qp_svc->h_qp );
\r
680 if( status != IB_SUCCESS )
\r
682 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );
\r
683 CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl,
\r
684 ("ib_get_spl_qp failed, %s\n", ib_get_err_str( status ) ) );
\r
688 /* Reference the special QP service on behalf of ib_get_spl_qp. */
\r
689 ref_al_obj( &p_spl_qp_svc->obj );
\r
691 /* Check the result of the creation request. */
\r
692 status = ib_query_qp( p_spl_qp_svc->h_qp, &qp_attr );
\r
693 if( status != IB_SUCCESS )
\r
695 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );
\r
696 CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl,
\r
697 ("ib_query_qp failed, %s\n", ib_get_err_str( status ) ) );
\r
701 if( ( qp_attr.rq_depth < p_spl_qp_svc->max_qp_depth ) ||
\r
702 ( qp_attr.sq_depth < p_spl_qp_svc->max_qp_depth ) ||
\r
703 ( qp_attr.sq_sge < 3 ) || ( qp_attr.rq_sge < 1 ) )
\r
705 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );
\r
706 CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl,
\r
707 ("ib_get_spl_qp allocated attributes are insufficient\n") );
\r
708 return IB_INSUFFICIENT_RESOURCES;
\r
711 /* Initialize the QP for use. */
\r
712 status = ib_init_dgrm_svc( p_spl_qp_svc->h_qp, NULL );
\r
713 if( status != IB_SUCCESS )
\r
715 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );
\r
716 AL_TRACE_EXIT( AL_DBG_ERROR,
\r
717 ("ib_init_dgrm_svc failed, %s\n", ib_get_err_str( status ) ) );
\r
721 /* Post receive buffers. */
\r
722 status = spl_qp_svc_post_recvs( p_spl_qp_svc );
\r
723 if( status != IB_SUCCESS )
\r
725 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );
\r
726 CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl,
\r
727 ("spl_qp_svc_post_recvs failed, %s\n",
\r
728 ib_get_err_str( status ) ) );
\r
732 /* Create the MAD dispatcher. */
\r
733 status = create_mad_disp( &p_spl_qp_svc->obj, p_spl_qp_svc->h_qp,
\r
734 &p_spl_qp_svc->h_mad_disp );
\r
735 if( status != IB_SUCCESS )
\r
737 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );
\r
738 AL_TRACE_EXIT( AL_DBG_ERROR,
\r
739 ("create_mad_disp failed, %s\n", ib_get_err_str( status ) ) );
\r
744 * Add this service to the special QP manager lookup lists.
\r
745 * The service must be added to allow the creation of a QP alias.
\r
747 cl_spinlock_acquire( &gp_spl_qp_mgr->obj.lock );
\r
748 if( qp_type == IB_QPT_QP0 )
\r
750 cl_qmap_insert( &gp_spl_qp_mgr->smi_map, p_spl_qp_svc->port_guid,
\r
751 &p_spl_qp_svc->map_item );
\r
755 cl_qmap_insert( &gp_spl_qp_mgr->gsi_map, p_spl_qp_svc->port_guid,
\r
756 &p_spl_qp_svc->map_item );
\r
758 cl_spinlock_release( &gp_spl_qp_mgr->obj.lock );
\r
761 * If the CA does not support HW agents, create a QP alias and register
\r
762 * a MAD service for sending responses from the local MAD interface.
\r
764 if( check_local_mad( p_spl_qp_svc->h_qp ) )
\r
766 /* Create a QP alias. */
\r
767 cl_memclr( &qp_create, sizeof( ib_qp_create_t ) );
\r
768 qp_create.qp_type =
\r
769 ( qp_type == IB_QPT_QP0 ) ? IB_QPT_QP0_ALIAS : IB_QPT_QP1_ALIAS;
\r
770 qp_create.sq_depth = p_spl_qp_svc->max_qp_depth;
\r
771 qp_create.sq_sge = 1;
\r
772 qp_create.sq_signaled = TRUE;
\r
774 status = ib_get_spl_qp( p_spl_qp_svc->obj.p_ci_ca->h_pd_alias,
\r
775 p_pnp_rec->p_port_attr->port_guid, &qp_create,
\r
776 p_spl_qp_svc, spl_qp_alias_event_cb, &p_spl_qp_svc->pool_key,
\r
777 &p_spl_qp_svc->h_qp_alias );
\r
779 if (status != IB_SUCCESS)
\r
781 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );
\r
782 CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl,
\r
783 ("ib_get_spl_qp alias failed, %s\n",
\r
784 ib_get_err_str( status ) ) );
\r
788 /* Reference the special QP service on behalf of ib_get_spl_qp. */
\r
789 ref_al_obj( &p_spl_qp_svc->obj );
\r
791 /* Register a MAD service for sends. */
\r
792 cl_memclr( &mad_svc, sizeof( ib_mad_svc_t ) );
\r
793 mad_svc.mad_svc_context = p_spl_qp_svc;
\r
794 mad_svc.pfn_mad_send_cb = spl_qp_alias_send_cb;
\r
795 mad_svc.pfn_mad_recv_cb = spl_qp_alias_recv_cb;
\r
797 status = ib_reg_mad_svc( p_spl_qp_svc->h_qp_alias, &mad_svc,
\r
798 &p_spl_qp_svc->h_mad_svc );
\r
800 if( status != IB_SUCCESS )
\r
802 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );
\r
803 CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl,
\r
804 ("ib_reg_mad_svc failed, %s\n", ib_get_err_str( status ) ) );
\r
809 /* Set the context of the PnP event to this child object. */
\r
810 p_pnp_rec->pnp_rec.context = &p_spl_qp_svc->obj;
\r
812 /* The QP is ready. Change the state. */
\r
813 p_spl_qp_svc->state = SPL_QP_ACTIVE;
\r
815 /* Force a completion callback to rearm the CQs. */
\r
816 spl_qp_recv_comp_cb( p_spl_qp_svc->h_recv_cq, p_spl_qp_svc );
\r
817 spl_qp_send_comp_cb( p_spl_qp_svc->h_send_cq, p_spl_qp_svc );
\r
819 /* Start the polling thread timer. */
\r
820 if( g_smi_poll_interval )
\r
823 cl_timer_trim( &gp_spl_qp_mgr->poll_timer, g_smi_poll_interval );
\r
825 if( cl_status != CL_SUCCESS )
\r
827 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );
\r
828 CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl,
\r
829 ("cl_timer_start failed, status 0x%x\n", cl_status ) );
\r
830 return ib_convert_cl_status( cl_status );
\r
834 /* Release the reference taken in init_al_obj. */
\r
835 deref_al_obj( &p_spl_qp_svc->obj );
\r
837 CL_EXIT( AL_DBG_SMI, g_al_dbg_lvl );
\r
844 * Return a work completion to the MAD dispatcher for the specified MAD.
\r
847 __complete_send_mad(
\r
848 IN const al_mad_disp_handle_t h_mad_disp,
\r
849 IN al_mad_wr_t* const p_mad_wr,
\r
850 IN const ib_wc_status_t wc_status )
\r
854 /* Construct a send work completion. */
\r
855 cl_memclr( &wc, sizeof( ib_wc_t ) );
\r
856 wc.wr_id = p_mad_wr->send_wr.wr_id;
\r
857 wc.wc_type = IB_WC_SEND;
\r
858 wc.status = wc_status;
\r
860 /* Set the send size if we were successful with the send. */
\r
861 if( wc_status == IB_WCS_SUCCESS )
\r
862 wc.length = MAD_BLOCK_SIZE;
\r
864 mad_disp_send_done( h_mad_disp, p_mad_wr, &wc );
\r
870 * Pre-destroy a special QP service.
\r
873 destroying_spl_qp_svc(
\r
874 IN al_obj_t* p_obj )
\r
876 spl_qp_svc_t* p_spl_qp_svc;
\r
877 cl_list_item_t* p_list_item;
\r
878 al_mad_wr_t* p_mad_wr;
\r
880 ib_api_status_t status;
\r
882 CL_ENTER( AL_DBG_SMI, g_al_dbg_lvl );
\r
884 CL_ASSERT( p_obj );
\r
885 p_spl_qp_svc = PARENT_STRUCT( p_obj, spl_qp_svc_t, obj );
\r
887 /* Change the state to prevent processing new send requests. */
\r
888 cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );
\r
889 p_spl_qp_svc->state = SPL_QP_DESTROYING;
\r
890 cl_spinlock_release( &p_spl_qp_svc->obj.lock );
\r
892 /* Wait here until the special QP service is no longer in use. */
\r
893 while( p_spl_qp_svc->in_use_cnt )
\r
895 cl_thread_suspend( 0 );
\r
898 /* Destroy the special QP. */
\r
899 if( p_spl_qp_svc->h_qp )
\r
901 /* If present, remove the special QP service from the tracking map. */
\r
902 cl_spinlock_acquire( &gp_spl_qp_mgr->obj.lock );
\r
903 if( p_spl_qp_svc->h_qp->type == IB_QPT_QP0 )
\r
905 cl_qmap_remove( &gp_spl_qp_mgr->smi_map, p_spl_qp_svc->port_guid );
\r
909 cl_qmap_remove( &gp_spl_qp_mgr->gsi_map, p_spl_qp_svc->port_guid );
\r
911 cl_spinlock_release( &gp_spl_qp_mgr->obj.lock );
\r
913 status = ib_destroy_qp( p_spl_qp_svc->h_qp,
\r
914 (ib_pfn_destroy_cb_t)deref_al_obj );
\r
915 CL_ASSERT( status == IB_SUCCESS );
\r
917 cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );
\r
919 /* Complete any outstanding MAD sends operations as "flushed". */
\r
920 for( p_list_item = cl_qlist_remove_head( &p_spl_qp_svc->send_queue );
\r
921 p_list_item != cl_qlist_end( &p_spl_qp_svc->send_queue );
\r
922 p_list_item = cl_qlist_remove_head( &p_spl_qp_svc->send_queue ) )
\r
924 p_mad_wr = PARENT_STRUCT( p_list_item, al_mad_wr_t, list_item );
\r
925 __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr,
\r
926 IB_WCS_WR_FLUSHED_ERR );
\r
929 cl_spinlock_release( &p_spl_qp_svc->obj.lock );
\r
930 /* Receive MAD elements are returned to the pool by the free routine. */
\r
933 /* Destroy the special QP alias and CQs. */
\r
934 if( p_spl_qp_svc->h_qp_alias )
\r
936 status = ib_destroy_qp( p_spl_qp_svc->h_qp_alias,
\r
937 (ib_pfn_destroy_cb_t)deref_al_obj );
\r
938 CL_ASSERT( status == IB_SUCCESS );
\r
940 if( p_spl_qp_svc->h_send_cq )
\r
942 status = ib_destroy_cq( p_spl_qp_svc->h_send_cq,
\r
943 (ib_pfn_destroy_cb_t)deref_al_obj );
\r
944 CL_ASSERT( status == IB_SUCCESS );
\r
946 if( p_spl_qp_svc->h_recv_cq )
\r
948 status = ib_destroy_cq( p_spl_qp_svc->h_recv_cq,
\r
949 (ib_pfn_destroy_cb_t)deref_al_obj );
\r
950 CL_ASSERT( status == IB_SUCCESS );
\r
953 CL_EXIT( AL_DBG_SMI, g_al_dbg_lvl );
\r
959 * Free a special QP service.
\r
963 IN al_obj_t* p_obj )
\r
965 spl_qp_svc_t* p_spl_qp_svc;
\r
966 cl_list_item_t* p_list_item;
\r
967 al_mad_element_t* p_al_mad;
\r
968 ib_api_status_t status;
\r
970 CL_ENTER( AL_DBG_SMI, g_al_dbg_lvl );
\r
972 CL_ASSERT( p_obj );
\r
973 p_spl_qp_svc = PARENT_STRUCT( p_obj, spl_qp_svc_t, obj );
\r
975 /* Dereference the CA. */
\r
976 if( p_spl_qp_svc->obj.p_ci_ca )
\r
977 deref_al_obj( &p_spl_qp_svc->obj.p_ci_ca->h_ca->obj );
\r
979 /* Return receive MAD elements to the pool. */
\r
980 for( p_list_item = cl_qlist_remove_head( &p_spl_qp_svc->recv_queue );
\r
981 p_list_item != cl_qlist_end( &p_spl_qp_svc->recv_queue );
\r
982 p_list_item = cl_qlist_remove_head( &p_spl_qp_svc->recv_queue ) )
\r
984 p_al_mad = PARENT_STRUCT( p_list_item, al_mad_element_t, list_item );
\r
986 status = ib_put_mad( &p_al_mad->element );
\r
987 CL_ASSERT( status == IB_SUCCESS );
\r
990 CL_ASSERT( cl_is_qlist_empty( &p_spl_qp_svc->send_queue ) );
\r
992 destroy_al_obj( &p_spl_qp_svc->obj );
\r
993 cl_free( p_spl_qp_svc );
\r
995 CL_EXIT( AL_DBG_SMI, g_al_dbg_lvl );
\r
1001 * Update the base LID of a special QP service.
\r
1004 spl_qp_svc_lid_change(
\r
1005 IN al_obj_t* p_obj,
\r
1006 IN ib_pnp_port_rec_t* p_pnp_rec )
\r
1008 spl_qp_svc_t* p_spl_qp_svc;
\r
1010 CL_ENTER( AL_DBG_SMI, g_al_dbg_lvl );
\r
1012 CL_ASSERT( p_obj );
\r
1013 CL_ASSERT( p_pnp_rec );
\r
1014 CL_ASSERT( p_pnp_rec->p_port_attr );
\r
1016 p_spl_qp_svc = PARENT_STRUCT( p_obj, spl_qp_svc_t, obj );
\r
1018 p_spl_qp_svc->base_lid = p_pnp_rec->p_port_attr->lid;
\r
1019 p_spl_qp_svc->lmc = p_pnp_rec->p_port_attr->lmc;
\r
1021 CL_EXIT( AL_DBG_SMI, g_al_dbg_lvl );
\r
1027 * Route a send work request.
\r
1031 IN spl_qp_svc_t* p_spl_qp_svc,
\r
1032 IN ib_send_wr_t* const p_send_wr )
\r
1034 al_mad_wr_t* p_mad_wr;
\r
1035 al_mad_send_t* p_mad_send;
\r
1038 ib_av_handle_t h_av;
\r
1039 mad_route_t route;
\r
1040 boolean_t local, loopback, discard;
\r
1042 CL_ENTER( AL_DBG_SMI, g_al_dbg_lvl );
\r
1044 CL_ASSERT( p_spl_qp_svc );
\r
1045 CL_ASSERT( p_send_wr );
\r
1047 /* Initialize a pointers to the MAD work request and the MAD. */
\r
1048 p_mad_wr = PARENT_STRUCT( p_send_wr, al_mad_wr_t, send_wr );
\r
1049 p_mad_send = PARENT_STRUCT( p_mad_wr, al_mad_send_t, mad_wr );
\r
1050 p_mad = get_mad_hdr_from_wr( p_mad_wr );
\r
1051 p_smp = (ib_smp_t*)p_mad;
\r
1053 /* Check if the CA has a local MAD interface. */
\r
1054 local = loopback = discard = FALSE;
\r
1055 if( check_local_mad( p_spl_qp_svc->h_qp ) )
\r
1058 * If the MAD is a locally addressed Subnet Management, Performance
\r
1059 * Management, or Connection Management datagram, process the work
\r
1060 * request locally.
\r
1062 h_av = p_send_wr->dgrm.ud.h_av;
\r
1063 switch( p_mad->mgmt_class )
\r
1065 case IB_MCLASS_SUBN_DIR:
\r
1066 /* Perform special checks on directed route SMPs. */
\r
1067 if( ib_smp_is_response( p_smp ) )
\r
1070 * This node is the originator of the response. Discard
\r
1071 * if the hop count or pointer is zero, an intermediate hop,
\r
1072 * out of bounds hop, or if the first port of the directed
\r
1073 * route retrun path is not this port.
\r
1075 if( ( p_smp->hop_count == 0 ) || ( p_smp->hop_ptr == 0 ) )
\r
1077 CL_TRACE( AL_DBG_ERROR, g_al_dbg_lvl,
\r
1078 ("hop cnt or hop ptr set to 0...discarding\n") );
\r
1081 else if( p_smp->hop_count != ( p_smp->hop_ptr - 1 ) )
\r
1083 CL_TRACE( AL_DBG_ERROR, g_al_dbg_lvl,
\r
1084 ("hop cnt != (hop ptr - 1)...discarding\n") );
\r
1087 else if( p_smp->hop_count >= IB_SUBNET_PATH_HOPS_MAX )
\r
1089 CL_TRACE( AL_DBG_ERROR, g_al_dbg_lvl,
\r
1090 ("hop cnt > max hops...discarding\n") );
\r
1093 else if( ( p_smp->dr_dlid == IB_LID_PERMISSIVE ) &&
\r
1094 ( p_smp->return_path[ p_smp->hop_ptr - 1 ] !=
\r
1095 p_spl_qp_svc->port_num ) )
\r
1097 CL_TRACE( AL_DBG_ERROR, g_al_dbg_lvl,
\r
1098 ("return path[hop ptr - 1] != port num...discarding\n") );
\r
1104 /* The SMP is a request. */
\r
1105 if( ( p_smp->hop_count >= IB_SUBNET_PATH_HOPS_MAX ) ||
\r
1106 ( p_smp->hop_ptr >= IB_SUBNET_PATH_HOPS_MAX ) )
\r
1110 else if( ( p_smp->hop_count == 0 ) && ( p_smp->hop_ptr == 0 ) )
\r
1112 /* Self Addressed: Sent locally, routed locally. */
\r
1114 discard = ( p_smp->dr_slid != IB_LID_PERMISSIVE ) ||
\r
1115 ( p_smp->dr_dlid != IB_LID_PERMISSIVE );
\r
1117 else if( ( p_smp->hop_count != 0 ) &&
\r
1118 ( p_smp->hop_count == ( p_smp->hop_ptr - 1 ) ) )
\r
1120 /* End of Path: Sent remotely, routed locally. */
\r
1123 else if( ( p_smp->hop_count != 0 ) &&
\r
1124 ( p_smp->hop_ptr == 0 ) )
\r
1126 /* Beginning of Path: Sent locally, routed remotely. */
\r
1127 if( p_smp->dr_slid == IB_LID_PERMISSIVE )
\r
1130 ( p_smp->initial_path[ p_smp->hop_ptr + 1 ] !=
\r
1131 p_spl_qp_svc->port_num );
\r
1136 /* Intermediate hop. */
\r
1140 /* Loopback locally addressed SM to SM "heartbeat" messages. */
\r
1141 loopback = (p_mad->attr_id == IB_MAD_ATTR_SM_INFO);
\r
1144 case IB_MCLASS_SUBN_LID:
\r
1145 /* Loopback locally addressed SM to SM "heartbeat" messages. */
\r
1146 loopback = (p_mad->attr_id == IB_MAD_ATTR_SM_INFO);
\r
1148 /* Fall through to check for a local MAD. */
\r
1150 case IB_MCLASS_PERF:
\r
1151 case IB_MCLASS_BM:
\r
1153 ( h_av->av_attr.dlid ==
\r
1154 ( h_av->av_attr.path_bits | p_spl_qp_svc->base_lid ) ) );
\r
1158 /* Route vendor specific MADs to the HCA provider. */
\r
1159 if( ib_class_is_vendor_specific( p_mad->mgmt_class ) )
\r
1162 ( h_av->av_attr.dlid ==
\r
1163 ( h_av->av_attr.path_bits | p_spl_qp_svc->base_lid ) ) );
\r
1169 route = ( p_mad_send->p_send_mad->send_opt & IB_SEND_OPT_LOCAL ) ?
\r
1170 ROUTE_LOCAL : ROUTE_REMOTE;
\r
1171 if( local ) route = ROUTE_LOCAL;
\r
1172 if( loopback && local ) route = ROUTE_LOOPBACK;
\r
1173 if( discard ) route = ROUTE_DISCARD;
\r
1175 CL_EXIT( AL_DBG_SMI, g_al_dbg_lvl );
\r
1182 * Send a work request on the special QP.
\r
1186 IN const ib_qp_handle_t h_qp,
\r
1187 IN ib_send_wr_t* const p_send_wr )
\r
1189 spl_qp_svc_t* p_spl_qp_svc;
\r
1190 al_mad_wr_t* p_mad_wr;
\r
1191 mad_route_t route;
\r
1192 ib_api_status_t status;
\r
1194 CL_ENTER( AL_DBG_SMI, g_al_dbg_lvl );
\r
1196 CL_ASSERT( h_qp );
\r
1197 CL_ASSERT( p_send_wr );
\r
1199 /* Get the special QP service. */
\r
1200 p_spl_qp_svc = (spl_qp_svc_t*)h_qp->obj.context;
\r
1201 CL_ASSERT( p_spl_qp_svc );
\r
1202 CL_ASSERT( p_spl_qp_svc->h_qp == h_qp );
\r
1204 /* Determine how to route the MAD. */
\r
1205 route = route_mad_send( p_spl_qp_svc, p_send_wr );
\r
1208 * Check the QP state and guard against error handling. Also,
\r
1209 * to maintain proper order of work completions, delay processing
\r
1210 * a local MAD until any remote MAD work requests have completed,
\r
1211 * and delay processing a remote MAD until local MAD work requests
\r
1214 cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );
\r
1215 if( (p_spl_qp_svc->state != SPL_QP_ACTIVE) || p_spl_qp_svc->local_mad_wr ||
\r
1216 (is_local(route) && !cl_is_qlist_empty( &p_spl_qp_svc->send_queue )) ||
\r
1217 ( cl_qlist_count( &p_spl_qp_svc->send_queue ) >=
\r
1218 p_spl_qp_svc->max_qp_depth ) )
\r
1221 * Return busy status.
\r
1222 * The special QP will resume sends at this point.
\r
1224 cl_spinlock_release( &p_spl_qp_svc->obj.lock );
\r
1226 CL_EXIT( AL_DBG_SMI, g_al_dbg_lvl );
\r
1227 return IB_RESOURCE_BUSY;
\r
1230 p_mad_wr = PARENT_STRUCT( p_send_wr, al_mad_wr_t, send_wr );
\r
1232 if( is_local( route ) )
\r
1234 /* Save the local MAD work request for processing. */
\r
1235 p_spl_qp_svc->local_mad_wr = p_mad_wr;
\r
1237 /* Flag the service as in use by the asynchronous processing thread. */
\r
1238 cl_atomic_inc( &p_spl_qp_svc->in_use_cnt );
\r
1240 cl_spinlock_release( &p_spl_qp_svc->obj.lock );
\r
1242 status = local_mad_send( p_spl_qp_svc, p_mad_wr );
\r
1246 /* Process a remote MAD send work request. */
\r
1247 status = remote_mad_send( p_spl_qp_svc, p_mad_wr );
\r
1249 cl_spinlock_release( &p_spl_qp_svc->obj.lock );
\r
1252 CL_EXIT( AL_DBG_SMI, g_al_dbg_lvl );
\r
1259 * Process a remote MAD send work request. Called holding the spl_qp_svc lock.
\r
1263 IN spl_qp_svc_t* p_spl_qp_svc,
\r
1264 IN al_mad_wr_t* const p_mad_wr )
\r
1267 ib_api_status_t status;
\r
1269 CL_ENTER( AL_DBG_SMI, g_al_dbg_lvl );
\r
1271 CL_ASSERT( p_spl_qp_svc );
\r
1272 CL_ASSERT( p_mad_wr );
\r
1274 /* Initialize a pointers to the MAD work request and outbound MAD. */
\r
1275 p_smp = (ib_smp_t*)get_mad_hdr_from_wr( p_mad_wr );
\r
1277 /* Perform outbound MAD processing. */
\r
1279 /* Adjust directed route SMPs as required by IBA. */
\r
1280 if( p_smp->mgmt_class == IB_MCLASS_SUBN_DIR )
\r
1282 if( ib_smp_is_response( p_smp ) )
\r
1284 if( p_smp->dr_dlid == IB_LID_PERMISSIVE )
\r
1287 else if( p_smp->dr_slid == IB_LID_PERMISSIVE )
\r
1290 * Only update the pointer if the hw_agent is not implemented.
\r
1291 * Fujitsu implements SMI in hardware, so the following has to
\r
1292 * be passed down to the hardware SMI.
\r
1294 ci_ca_lock_attr( p_spl_qp_svc->obj.p_ci_ca );
\r
1295 if( !p_spl_qp_svc->obj.p_ci_ca->p_pnp_attr->hw_agents )
\r
1297 ci_ca_unlock_attr( p_spl_qp_svc->obj.p_ci_ca );
\r
1301 /* Always generate send completions. */
\r
1302 p_mad_wr->send_wr.send_opt |= IB_SEND_OPT_SIGNALED;
\r
1304 /* Queue the MAD work request on the service tracking queue. */
\r
1305 cl_qlist_insert_tail( &p_spl_qp_svc->send_queue, &p_mad_wr->list_item );
\r
1307 status = ib_post_send( p_spl_qp_svc->h_qp, &p_mad_wr->send_wr, NULL );
\r
1309 if( status != IB_SUCCESS )
\r
1311 cl_qlist_remove_item( &p_spl_qp_svc->send_queue, &p_mad_wr->list_item );
\r
1313 /* Reset directed route SMPs as required by IBA. */
\r
1314 if( p_smp->mgmt_class == IB_MCLASS_SUBN_DIR )
\r
1316 if( ib_smp_is_response( p_smp ) )
\r
1318 if( p_smp->dr_dlid == IB_LID_PERMISSIVE )
\r
1321 else if( p_smp->dr_slid == IB_LID_PERMISSIVE )
\r
1323 /* Only update if the hw_agent is not implemented. */
\r
1324 ci_ca_lock_attr( p_spl_qp_svc->obj.p_ci_ca );
\r
1325 if( p_spl_qp_svc->obj.p_ci_ca->p_pnp_attr->hw_agents == FALSE )
\r
1327 ci_ca_unlock_attr( p_spl_qp_svc->obj.p_ci_ca );
\r
1332 CL_EXIT( AL_DBG_SMI, g_al_dbg_lvl );
\r
1338 * Handle a MAD destined for the local CA, using cached data
\r
1339 * as much as possible.
\r
1341 static ib_api_status_t
\r
1343 IN spl_qp_svc_t* p_spl_qp_svc,
\r
1344 IN al_mad_wr_t* const p_mad_wr )
\r
1346 mad_route_t route;
\r
1347 ib_api_status_t status = IB_SUCCESS;
\r
1349 AL_ENTER( AL_DBG_SMI );
\r
1351 CL_ASSERT( p_spl_qp_svc );
\r
1352 CL_ASSERT( p_mad_wr );
\r
1354 /* Determine how to route the MAD. */
\r
1355 route = route_mad_send( p_spl_qp_svc, &p_mad_wr->send_wr );
\r
1357 /* Check if this MAD should be discarded. */
\r
1358 if( is_discard( route ) )
\r
1360 /* Deliver a "work completion" to the dispatcher. */
\r
1361 __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr,
\r
1362 IB_WCS_LOCAL_OP_ERR );
\r
1363 status = IB_INVALID_SETTING;
\r
1365 else if( is_loopback( route ) )
\r
1367 /* Loopback local SM to SM "heartbeat" messages. */
\r
1368 status = loopback_mad( p_spl_qp_svc, p_mad_wr );
\r
1372 switch( get_mad_hdr_from_wr( p_mad_wr )->mgmt_class )
\r
1374 case IB_MCLASS_SUBN_DIR:
\r
1375 case IB_MCLASS_SUBN_LID:
\r
1376 status = process_subn_mad( p_spl_qp_svc, p_mad_wr );
\r
1380 status = IB_NOT_DONE;
\r
1384 if( status == IB_NOT_DONE )
\r
1386 /* Queue an asynchronous processing item to process the local MAD. */
\r
1387 cl_async_proc_queue( gp_async_proc_mgr, &p_spl_qp_svc->send_async );
\r
1392 * Clear the local MAD pointer to allow processing of other MADs.
\r
1393 * This is done after polling for attribute changes to ensure that
\r
1394 * subsequent MADs pick up any changes performed by this one.
\r
1396 cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );
\r
1397 p_spl_qp_svc->local_mad_wr = NULL;
\r
1398 cl_spinlock_release( &p_spl_qp_svc->obj.lock );
\r
1400 /* No longer in use by the asynchronous processing thread. */
\r
1401 cl_atomic_dec( &p_spl_qp_svc->in_use_cnt );
\r
1403 /* Special QP operations will resume by unwinding. */
\r
1406 AL_EXIT( AL_DBG_SMI );
\r
1407 return IB_SUCCESS;
\r
1411 static ib_api_status_t
\r
1413 IN spl_qp_svc_t* p_spl_qp_svc,
\r
1414 IN al_mad_wr_t* const p_mad_wr,
\r
1415 OUT ib_mad_element_t** const pp_mad_resp )
\r
1417 ib_api_status_t status;
\r
1419 AL_ENTER( AL_DBG_SMI );
\r
1421 CL_ASSERT( p_spl_qp_svc );
\r
1422 CL_ASSERT( p_mad_wr );
\r
1423 CL_ASSERT( pp_mad_resp );
\r
1425 /* Get a MAD element from the pool for the response. */
\r
1426 status = ib_get_mad( p_spl_qp_svc->h_qp->obj.p_ci_ca->pool_key,
\r
1427 MAD_BLOCK_SIZE, pp_mad_resp );
\r
1428 if( status != IB_SUCCESS )
\r
1430 __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr,
\r
1431 IB_WCS_LOCAL_OP_ERR );
\r
1434 AL_EXIT( AL_DBG_SMI );
\r
1439 static ib_api_status_t
\r
1440 complete_local_mad(
\r
1441 IN spl_qp_svc_t* p_spl_qp_svc,
\r
1442 IN al_mad_wr_t* const p_mad_wr,
\r
1443 IN ib_mad_element_t* const p_mad_resp )
\r
1445 ib_api_status_t status;
\r
1447 AL_ENTER( AL_DBG_SMI );
\r
1449 CL_ASSERT( p_spl_qp_svc );
\r
1450 CL_ASSERT( p_mad_wr );
\r
1451 CL_ASSERT( p_mad_resp );
\r
1453 /* Construct the receive MAD element. */
\r
1454 p_mad_resp->status = IB_WCS_SUCCESS;
\r
1455 p_mad_resp->remote_qp = p_mad_wr->send_wr.dgrm.ud.remote_qp;
\r
1456 p_mad_resp->remote_lid = p_spl_qp_svc->base_lid;
\r
1457 if( p_mad_wr->send_wr.send_opt & IB_RECV_OPT_IMMEDIATE )
\r
1459 p_mad_resp->immediate_data = p_mad_wr->send_wr.immediate_data;
\r
1460 p_mad_resp->recv_opt |= IB_RECV_OPT_IMMEDIATE;
\r
1464 * Hand the receive MAD element to the dispatcher before completing
\r
1465 * the send. This guarantees that the send request cannot time out.
\r
1467 status = mad_disp_recv_done( p_spl_qp_svc->h_mad_disp, p_mad_resp );
\r
1469 /* Forward the send work completion to the dispatcher. */
\r
1470 __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr, IB_WCS_SUCCESS );
\r
1472 AL_EXIT( AL_DBG_SMI );
\r
1477 static ib_api_status_t
\r
1479 IN spl_qp_svc_t* p_spl_qp_svc,
\r
1480 IN al_mad_wr_t* const p_mad_wr )
\r
1483 ib_mad_element_t *p_mad_resp;
\r
1484 ib_api_status_t status;
\r
1486 AL_ENTER( AL_DBG_SMI );
\r
1488 CL_ASSERT( p_spl_qp_svc );
\r
1489 CL_ASSERT( p_mad_wr );
\r
1491 /* Get a MAD element from the pool for the response. */
\r
1492 status = get_resp_mad( p_spl_qp_svc, p_mad_wr, &p_mad_resp );
\r
1493 if( status == IB_SUCCESS )
\r
1495 /* Initialize a pointers to the MAD work request and outbound MAD. */
\r
1496 p_mad = get_mad_hdr_from_wr( p_mad_wr );
\r
1498 /* Simulate a send/receive between local managers. */
\r
1499 cl_memcpy( p_mad_resp->p_mad_buf, p_mad, MAD_BLOCK_SIZE );
\r
1501 status = complete_local_mad( p_spl_qp_svc, p_mad_wr, p_mad_resp );
\r
1504 AL_EXIT( AL_DBG_SMI );
\r
1509 static ib_api_status_t
\r
1510 process_node_info(
\r
1511 IN spl_qp_svc_t* p_spl_qp_svc,
\r
1512 IN al_mad_wr_t* const p_mad_wr )
\r
1515 ib_mad_element_t *p_mad_resp;
\r
1517 ib_node_info_t *p_node_info;
\r
1518 ib_ca_attr_t *p_ca_attr;
\r
1519 ib_port_attr_t *p_port_attr;
\r
1520 ib_api_status_t status;
\r
1522 AL_ENTER( AL_DBG_SMI );
\r
1524 CL_ASSERT( p_spl_qp_svc );
\r
1525 CL_ASSERT( p_mad_wr );
\r
1527 /* Initialize a pointers to the MAD work request and outbound MAD. */
\r
1528 p_mad = get_mad_hdr_from_wr( p_mad_wr );
\r
1529 if( p_mad->method != IB_MAD_METHOD_GET )
\r
1531 /* Node description is a GET-only attribute. */
\r
1532 __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr,
\r
1533 IB_WCS_LOCAL_OP_ERR );
\r
1534 AL_EXIT( AL_DBG_SMI );
\r
1535 return IB_INVALID_SETTING;
\r
1538 /* Get a MAD element from the pool for the response. */
\r
1539 status = get_resp_mad( p_spl_qp_svc, p_mad_wr, &p_mad_resp );
\r
1540 if( status == IB_SUCCESS )
\r
1542 p_smp = (ib_smp_t*)p_mad_resp->p_mad_buf;
\r
1543 cl_memcpy( p_smp, p_mad, MAD_BLOCK_SIZE );
\r
1544 p_smp->method |= IB_MAD_METHOD_RESP_MASK;
\r
1545 if( p_smp->mgmt_class == IB_MCLASS_SUBN_DIR )
\r
1546 p_smp->status = IB_SMP_DIRECTION;
\r
1548 p_smp->status = 0;
\r
1550 p_node_info = (ib_node_info_t*)ib_smp_get_payload_ptr( p_smp );
\r
1553 * Fill in the node info, protecting against the
\r
1554 * attributes being changed by PnP.
\r
1556 cl_spinlock_acquire( &p_spl_qp_svc->obj.p_ci_ca->obj.lock );
\r
1558 p_ca_attr = p_spl_qp_svc->obj.p_ci_ca->p_pnp_attr;
\r
1559 p_port_attr = &p_ca_attr->p_port_attr[p_spl_qp_svc->port_num - 1];
\r
1561 p_node_info->base_version = 1;
\r
1562 p_node_info->class_version = 1;
\r
1563 p_node_info->node_type = IB_NODE_TYPE_CA;
\r
1564 p_node_info->num_ports = p_ca_attr->num_ports;
\r
1565 /* TODO: Get some unique identifier for the system */
\r
1566 p_node_info->sys_guid = p_ca_attr->ca_guid;
\r
1567 p_node_info->node_guid = p_ca_attr->ca_guid;
\r
1568 p_node_info->port_guid = p_port_attr->port_guid;
\r
1569 p_node_info->partition_cap = cl_hton16( p_port_attr->num_pkeys );
\r
1570 p_node_info->device_id = cl_hton16( p_ca_attr->dev_id );
\r
1571 p_node_info->revision = cl_hton32( p_ca_attr->revision );
\r
1572 p_node_info->port_num_vendor_id =
\r
1573 cl_hton32( p_ca_attr->vend_id & 0x00FFFFFF ) | p_port_attr->port_num;
\r
1574 cl_spinlock_release( &p_spl_qp_svc->obj.p_ci_ca->obj.lock );
\r
1576 status = complete_local_mad( p_spl_qp_svc, p_mad_wr, p_mad_resp );
\r
1579 CL_EXIT( AL_DBG_SMI, g_al_dbg_lvl );
\r
1584 static ib_api_status_t
\r
1585 process_node_desc(
\r
1586 IN spl_qp_svc_t* p_spl_qp_svc,
\r
1587 IN al_mad_wr_t* const p_mad_wr )
\r
1590 ib_mad_element_t *p_mad_resp;
\r
1591 ib_api_status_t status;
\r
1593 AL_ENTER( AL_DBG_SMI );
\r
1595 CL_ASSERT( p_spl_qp_svc );
\r
1596 CL_ASSERT( p_mad_wr );
\r
1598 /* Initialize a pointers to the MAD work request and outbound MAD. */
\r
1599 p_mad = get_mad_hdr_from_wr( p_mad_wr );
\r
1600 if( p_mad->method != IB_MAD_METHOD_GET )
\r
1602 /* Node info is a GET-only attribute. */
\r
1603 __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr,
\r
1604 IB_WCS_LOCAL_OP_ERR );
\r
1605 AL_EXIT( AL_DBG_SMI );
\r
1606 return IB_INVALID_SETTING;
\r
1609 /* Get a MAD element from the pool for the response. */
\r
1610 status = get_resp_mad( p_spl_qp_svc, p_mad_wr, &p_mad_resp );
\r
1611 if( status == IB_SUCCESS )
\r
1613 cl_memcpy( p_mad_resp->p_mad_buf, p_mad, MAD_BLOCK_SIZE );
\r
1614 p_mad_resp->p_mad_buf->method |= IB_MAD_METHOD_RESP_MASK;
\r
1615 if( p_mad_resp->p_mad_buf->mgmt_class == IB_MCLASS_SUBN_DIR )
\r
1616 p_mad_resp->p_mad_buf->status = IB_SMP_DIRECTION;
\r
1618 p_mad_resp->p_mad_buf->status = 0;
\r
1619 /* Set the node description to the machine name. */
\r
1620 cl_memcpy( ((ib_smp_t*)p_mad_resp->p_mad_buf)->data,
\r
1621 node_desc, sizeof(node_desc) );
\r
1623 status = complete_local_mad( p_spl_qp_svc, p_mad_wr, p_mad_resp );
\r
1626 CL_EXIT( AL_DBG_SMI, g_al_dbg_lvl );
\r
1632 * Process subnet administration MADs using cached data if possible.
\r
1634 static ib_api_status_t
\r
1636 IN spl_qp_svc_t* p_spl_qp_svc,
\r
1637 IN al_mad_wr_t* const p_mad_wr )
\r
1639 ib_api_status_t status;
\r
1642 AL_ENTER( AL_DBG_SMI );
\r
1644 CL_ASSERT( p_spl_qp_svc );
\r
1645 CL_ASSERT( p_mad_wr );
\r
1647 p_smp = (ib_smp_t*)get_mad_hdr_from_wr( p_mad_wr );
\r
1649 CL_ASSERT( p_smp->mgmt_class == IB_MCLASS_SUBN_DIR ||
\r
1650 p_smp->mgmt_class == IB_MCLASS_SUBN_LID );
\r
1652 switch( p_smp->attr_id )
\r
1654 case IB_MAD_ATTR_NODE_INFO:
\r
1655 status = process_node_info( p_spl_qp_svc, p_mad_wr );
\r
1658 case IB_MAD_ATTR_NODE_DESC:
\r
1659 status = process_node_desc( p_spl_qp_svc, p_mad_wr );
\r
1663 status = IB_NOT_DONE;
\r
1667 AL_EXIT( AL_DBG_SMI );
\r
1673 * Process a local MAD send work request.
\r
1677 IN spl_qp_svc_t* p_spl_qp_svc,
\r
1678 IN al_mad_wr_t* const p_mad_wr )
\r
1682 al_mad_send_t* p_mad_send;
\r
1683 ib_mad_element_t* p_mad_response;
\r
1684 ib_mad_t* p_mad_response_buf;
\r
1685 ib_api_status_t status = IB_SUCCESS;
\r
1686 boolean_t smp_is_set;
\r
1688 CL_ENTER( AL_DBG_SMI, g_al_dbg_lvl );
\r
1690 CL_ASSERT( p_spl_qp_svc );
\r
1691 CL_ASSERT( p_mad_wr );
\r
1693 /* Initialize a pointers to the MAD work request and outbound MAD. */
\r
1694 p_mad = get_mad_hdr_from_wr( p_mad_wr );
\r
1695 p_smp = (ib_smp_t*)p_mad;
\r
1697 smp_is_set = (p_smp->method == IB_MAD_METHOD_SET);
\r
1699 /* Get a MAD element from the pool for the response. */
\r
1700 p_mad_send = PARENT_STRUCT( p_mad_wr, al_mad_send_t, mad_wr );
\r
1701 //*** Commented code to work-around ib_local_mad() requiring a response MAD
\r
1702 //*** as input. Remove comments once the ib_local_mad() implementation allows
\r
1703 //*** for a NULL response MAD, when one is not expected.
\r
1704 //*** Note that an attempt to route an invalid response MAD in this case
\r
1705 //*** will fail harmlessly.
\r
1706 //*** if( p_mad_send->p_send_mad->resp_expected )
\r
1708 status = get_resp_mad( p_spl_qp_svc, p_mad_wr, &p_mad_response );
\r
1709 if( status != IB_SUCCESS )
\r
1711 CL_EXIT( AL_DBG_SMI, g_al_dbg_lvl );
\r
1714 p_mad_response_buf = p_mad_response->p_mad_buf;
\r
1718 //*** p_mad_response_buf = NULL;
\r
1721 /* Adjust directed route SMPs as required by IBA. */
\r
1722 if( p_mad->mgmt_class == IB_MCLASS_SUBN_DIR )
\r
1724 CL_ASSERT( !ib_smp_is_response( p_smp ) );
\r
1727 * If this was a self addressed, directed route SMP, increment
\r
1728 * the hop pointer in the request before delivery as required
\r
1729 * by IBA. Otherwise, adjustment for remote requests occurs
\r
1730 * during inbound processing.
\r
1732 if( p_smp->hop_count == 0 )
\r
1736 /* Forward the locally addressed MAD to the CA interface. */
\r
1737 status = ib_local_mad( p_spl_qp_svc->h_qp->obj.p_ci_ca->h_ca,
\r
1738 p_spl_qp_svc->port_num, p_mad, p_mad_response_buf );
\r
1740 /* Reset directed route SMPs as required by IBA. */
\r
1741 if( p_mad->mgmt_class == IB_MCLASS_SUBN_DIR )
\r
1744 * If this was a self addressed, directed route SMP, decrement
\r
1745 * the hop pointer in the response before delivery as required
\r
1746 * by IBA. Otherwise, adjustment for remote responses occurs
\r
1747 * during outbound processing.
\r
1749 if( p_smp->hop_count == 0 )
\r
1751 /* Adjust the request SMP. */
\r
1754 /* Adjust the response SMP. */
\r
1755 if( p_mad_response_buf )
\r
1757 p_smp = (ib_smp_t*)p_mad_response_buf;
\r
1763 if( status != IB_SUCCESS )
\r
1765 if( p_mad_response )
\r
1766 ib_put_mad( p_mad_response );
\r
1768 __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr,
\r
1769 IB_WCS_LOCAL_OP_ERR );
\r
1770 CL_EXIT( AL_DBG_SMI, g_al_dbg_lvl );
\r
1774 /* Check the completion status of this simulated send. */
\r
1775 if( p_mad_response_buf )
\r
1778 * The SMI is uses PnP polling to refresh the base_lid and lmc.
\r
1779 * Polling takes time, so we update the values here to prevent
\r
1780 * the failure of LID routed MADs sent immediately following this
\r
1781 * assignment. Check the response to see if the port info was set.
\r
1785 ib_port_info_t* p_port_info = NULL;
\r
1787 switch( p_mad_response_buf->mgmt_class )
\r
1789 case IB_MCLASS_SUBN_DIR:
\r
1790 if( ( p_smp->attr_id == IB_MAD_ATTR_PORT_INFO ) &&
\r
1791 ( ib_smp_get_status( p_smp ) == IB_SA_MAD_STATUS_SUCCESS ) )
\r
1794 (ib_port_info_t*)ib_smp_get_payload_ptr( p_smp );
\r
1798 case IB_MCLASS_SUBN_LID:
\r
1799 if( ( p_mad_response_buf->attr_id == IB_MAD_ATTR_PORT_INFO ) &&
\r
1800 ( p_mad_response_buf->status == IB_SA_MAD_STATUS_SUCCESS ) )
\r
1803 (ib_port_info_t*)( p_mad_response_buf + 1 );
\r
1813 p_spl_qp_svc->base_lid = p_port_info->base_lid;
\r
1814 p_spl_qp_svc->lmc = ib_port_info_get_lmc( p_port_info );
\r
1815 if (p_port_info->subnet_timeout & 0x80)
\r
1817 AL_TRACE(AL_DBG_PNP,
\r
1818 ("Client reregister event, setting sm_lid to 0.\n"));
\r
1819 ci_ca_lock_attr(p_spl_qp_svc->obj.p_ci_ca);
\r
1820 p_spl_qp_svc->obj.p_ci_ca->p_pnp_attr->
\r
1821 p_port_attr->sm_lid= 0;
\r
1822 ci_ca_unlock_attr(p_spl_qp_svc->obj.p_ci_ca);
\r
1828 status = complete_local_mad( p_spl_qp_svc, p_mad_wr, p_mad_response );
\r
1830 /* If the SMP was a Get, no need to trigger a PnP poll. */
\r
1831 if( status == IB_SUCCESS && !smp_is_set )
\r
1832 status = IB_NOT_DONE;
\r
1834 CL_EXIT( AL_DBG_SMI, g_al_dbg_lvl );
\r
1841 * Asynchronous processing thread callback to send a local MAD.
\r
1844 send_local_mad_cb(
\r
1845 IN cl_async_proc_item_t* p_item )
\r
1847 spl_qp_svc_t* p_spl_qp_svc;
\r
1848 ib_api_status_t status;
\r
1850 CL_ENTER( AL_DBG_SMI_CB, g_al_dbg_lvl );
\r
1852 CL_ASSERT( p_item );
\r
1853 p_spl_qp_svc = PARENT_STRUCT( p_item, spl_qp_svc_t, send_async );
\r
1855 /* Process a local MAD send work request. */
\r
1856 CL_ASSERT( p_spl_qp_svc->local_mad_wr );
\r
1857 status = fwd_local_mad( p_spl_qp_svc, p_spl_qp_svc->local_mad_wr );
\r
1860 * If we successfully processed a local MAD, which could have changed
\r
1861 * something (e.g. the LID) on the HCA. Scan for changes.
\r
1863 if( status == IB_SUCCESS )
\r
1867 * Clear the local MAD pointer to allow processing of other MADs.
\r
1868 * This is done after polling for attribute changes to ensure that
\r
1869 * subsequent MADs pick up any changes performed by this one.
\r
1871 cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );
\r
1872 p_spl_qp_svc->local_mad_wr = NULL;
\r
1873 cl_spinlock_release( &p_spl_qp_svc->obj.lock );
\r
1875 /* Continue processing any queued MADs on the QP. */
\r
1876 special_qp_resume_sends( p_spl_qp_svc->h_qp );
\r
1878 /* No longer in use by the asynchronous processing thread. */
\r
1879 cl_atomic_dec( &p_spl_qp_svc->in_use_cnt );
\r
1881 CL_EXIT( AL_DBG_SMI, g_al_dbg_lvl );
\r
1887 * Special QP send completion callback.
\r
1890 spl_qp_send_comp_cb(
\r
1891 IN const ib_cq_handle_t h_cq,
\r
1892 IN void* cq_context )
\r
1894 spl_qp_svc_t* p_spl_qp_svc;
\r
1896 CL_ENTER( AL_DBG_SMI_CB, g_al_dbg_lvl );
\r
1898 CL_ASSERT( cq_context );
\r
1899 p_spl_qp_svc = cq_context;
\r
1901 #if defined( CL_USE_MUTEX )
\r
1903 /* Queue an asynchronous processing item to process sends. */
\r
1904 cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );
\r
1905 if( !p_spl_qp_svc->send_async_queued )
\r
1907 p_spl_qp_svc->send_async_queued = TRUE;
\r
1908 ref_al_obj( &p_spl_qp_svc->obj );
\r
1909 cl_async_proc_queue( gp_async_proc_mgr, &p_spl_qp_svc->send_async_cb );
\r
1911 cl_spinlock_release( &p_spl_qp_svc->obj.lock );
\r
1915 /* Invoke the callback directly. */
\r
1916 CL_ASSERT( h_cq == p_spl_qp_svc->h_send_cq );
\r
1917 spl_qp_comp( p_spl_qp_svc, h_cq, IB_WC_SEND );
\r
1919 /* Continue processing any queued MADs on the QP. */
\r
1920 special_qp_resume_sends( p_spl_qp_svc->h_qp );
\r
1924 CL_EXIT( AL_DBG_SMI, g_al_dbg_lvl );
\r
1929 #if defined( CL_USE_MUTEX )
\r
1931 spl_qp_send_async_cb(
\r
1932 IN cl_async_proc_item_t* p_item )
\r
1934 spl_qp_svc_t* p_spl_qp_svc;
\r
1935 ib_api_status_t status;
\r
1937 CL_ENTER( AL_DBG_SMI_CB, g_al_dbg_lvl );
\r
1939 CL_ASSERT( p_item );
\r
1940 p_spl_qp_svc = PARENT_STRUCT( p_item, spl_qp_svc_t, send_async_cb );
\r
1942 /* Reset asynchronous queue flag. */
\r
1943 cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );
\r
1944 p_spl_qp_svc->send_async_queued = FALSE;
\r
1945 cl_spinlock_release( &p_spl_qp_svc->obj.lock );
\r
1947 spl_qp_comp( p_spl_qp_svc, p_spl_qp_svc->h_send_cq, IB_WC_SEND );
\r
1949 /* Continue processing any queued MADs on the QP. */
\r
1950 status = special_qp_resume_sends( p_spl_qp_svc->h_qp );
\r
1951 CL_ASSERT( status == IB_SUCCESS );
\r
1953 deref_al_obj( &p_spl_qp_svc->obj );
\r
1955 CL_EXIT( AL_DBG_SMI, g_al_dbg_lvl );
\r
1962 * Special QP receive completion callback.
\r
1965 spl_qp_recv_comp_cb(
\r
1966 IN const ib_cq_handle_t h_cq,
\r
1967 IN void* cq_context )
\r
1969 spl_qp_svc_t* p_spl_qp_svc;
\r
1971 CL_ENTER( AL_DBG_SMI, g_al_dbg_lvl );
\r
1973 CL_ASSERT( cq_context );
\r
1974 p_spl_qp_svc = cq_context;
\r
1976 #if defined( CL_USE_MUTEX )
\r
1978 /* Queue an asynchronous processing item to process receives. */
\r
1979 cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );
\r
1980 if( !p_spl_qp_svc->recv_async_queued )
\r
1982 p_spl_qp_svc->recv_async_queued = TRUE;
\r
1983 ref_al_obj( &p_spl_qp_svc->obj );
\r
1984 cl_async_proc_queue( gp_async_proc_mgr, &p_spl_qp_svc->recv_async_cb );
\r
1986 cl_spinlock_release( &p_spl_qp_svc->obj.lock );
\r
1990 CL_ASSERT( h_cq == p_spl_qp_svc->h_recv_cq );
\r
1991 spl_qp_comp( p_spl_qp_svc, h_cq, IB_WC_RECV );
\r
1995 CL_EXIT( AL_DBG_SMI, g_al_dbg_lvl );
\r
2000 #if defined( CL_USE_MUTEX )
\r
2002 spl_qp_recv_async_cb(
\r
2003 IN cl_async_proc_item_t* p_item )
\r
2005 spl_qp_svc_t* p_spl_qp_svc;
\r
2007 CL_ENTER( AL_DBG_SMI, g_al_dbg_lvl );
\r
2009 CL_ASSERT( p_item );
\r
2010 p_spl_qp_svc = PARENT_STRUCT( p_item, spl_qp_svc_t, recv_async_cb );
\r
2012 /* Reset asynchronous queue flag. */
\r
2013 cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );
\r
2014 p_spl_qp_svc->recv_async_queued = FALSE;
\r
2015 cl_spinlock_release( &p_spl_qp_svc->obj.lock );
\r
2017 spl_qp_comp( p_spl_qp_svc, p_spl_qp_svc->h_recv_cq, IB_WC_RECV );
\r
2019 deref_al_obj( &p_spl_qp_svc->obj );
\r
2021 CL_EXIT( AL_DBG_SMI, g_al_dbg_lvl );
\r
2028 * Special QP completion handler.
\r
2032 IN spl_qp_svc_t* p_spl_qp_svc,
\r
2033 IN const ib_cq_handle_t h_cq,
\r
2034 IN ib_wc_type_t wc_type )
\r
2037 ib_wc_t* p_free_wc = &wc;
\r
2038 ib_wc_t* p_done_wc;
\r
2039 al_mad_wr_t* p_mad_wr;
\r
2040 al_mad_element_t* p_al_mad;
\r
2041 ib_mad_element_t* p_mad_element;
\r
2043 ib_api_status_t status;
\r
2045 CL_ENTER( AL_DBG_SMI_CB, g_al_dbg_lvl );
\r
2047 CL_ASSERT( p_spl_qp_svc );
\r
2048 CL_ASSERT( h_cq );
\r
2050 /* Check the QP state and guard against error handling. */
\r
2051 cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );
\r
2052 if( p_spl_qp_svc->state != SPL_QP_ACTIVE )
\r
2054 cl_spinlock_release( &p_spl_qp_svc->obj.lock );
\r
2057 cl_atomic_inc( &p_spl_qp_svc->in_use_cnt );
\r
2058 cl_spinlock_release( &p_spl_qp_svc->obj.lock );
\r
2061 /* Process work completions. */
\r
2062 while( ib_poll_cq( h_cq, &p_free_wc, &p_done_wc ) == IB_SUCCESS )
\r
2064 /* Process completions one at a time. */
\r
2065 CL_ASSERT( p_done_wc );
\r
2067 /* Flushed completions are handled elsewhere. */
\r
2068 if( wc.status == IB_WCS_WR_FLUSHED_ERR )
\r
2075 * Process the work completion. Per IBA specification, the
\r
2076 * wc.wc_type is undefined if wc.status is not IB_WCS_SUCCESS.
\r
2077 * Use the wc_type parameter.
\r
2082 /* Get a pointer to the MAD work request. */
\r
2083 p_mad_wr = (al_mad_wr_t*)((uintn_t)wc.wr_id);
\r
2085 /* Remove the MAD work request from the service tracking queue. */
\r
2086 cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );
\r
2087 cl_qlist_remove_item( &p_spl_qp_svc->send_queue,
\r
2088 &p_mad_wr->list_item );
\r
2089 cl_spinlock_release( &p_spl_qp_svc->obj.lock );
\r
2091 /* Reset directed route SMPs as required by IBA. */
\r
2092 p_smp = (ib_smp_t*)get_mad_hdr_from_wr( p_mad_wr );
\r
2093 if( p_smp->mgmt_class == IB_MCLASS_SUBN_DIR )
\r
2095 if( ib_smp_is_response( p_smp ) )
\r
2101 /* Report the send completion to the dispatcher. */
\r
2102 mad_disp_send_done( p_spl_qp_svc->h_mad_disp, p_mad_wr, &wc );
\r
2107 /* Initialize pointers to the MAD element. */
\r
2108 p_al_mad = (al_mad_element_t*)((uintn_t)wc.wr_id);
\r
2109 p_mad_element = &p_al_mad->element;
\r
2111 /* Remove the AL MAD element from the service tracking list. */
\r
2112 cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );
\r
2114 cl_qlist_remove_item( &p_spl_qp_svc->recv_queue,
\r
2115 &p_al_mad->list_item );
\r
2117 /* Replenish the receive buffer. */
\r
2118 spl_qp_svc_post_recvs( p_spl_qp_svc );
\r
2119 cl_spinlock_release( &p_spl_qp_svc->obj.lock );
\r
2121 /* Construct the MAD element from the receive work completion. */
\r
2122 build_mad_recv( p_mad_element, &wc );
\r
2124 /* Process the received MAD. */
\r
2125 status = process_mad_recv( p_spl_qp_svc, p_mad_element );
\r
2127 /* Discard this MAD on error. */
\r
2128 if( status != IB_SUCCESS )
\r
2130 status = ib_put_mad( p_mad_element );
\r
2131 CL_ASSERT( status == IB_SUCCESS );
\r
2136 CL_ASSERT( wc_type == IB_WC_SEND || wc_type == IB_WC_RECV );
\r
2140 if( wc.status != IB_WCS_SUCCESS )
\r
2142 CL_TRACE( CL_DBG_ERROR, g_al_dbg_lvl,
\r
2143 ("special QP completion error: %s!\n",
\r
2144 ib_get_wc_status_str( wc.status )) );
\r
2146 /* Reset the special QP service and return. */
\r
2147 spl_qp_svc_reset( p_spl_qp_svc );
\r
2152 /* Rearm the CQ. */
\r
2153 status = ib_rearm_cq( h_cq, FALSE );
\r
2154 CL_ASSERT( status == IB_SUCCESS );
\r
2156 cl_atomic_dec( &p_spl_qp_svc->in_use_cnt );
\r
2157 CL_EXIT( AL_DBG_SMI_CB, g_al_dbg_lvl );
\r
2163 * Process a received MAD.
\r
2167 IN spl_qp_svc_t* p_spl_qp_svc,
\r
2168 IN ib_mad_element_t* p_mad_element )
\r
2171 mad_route_t route;
\r
2172 ib_api_status_t status;
\r
2174 CL_ENTER( AL_DBG_SMI, g_al_dbg_lvl );
\r
2176 CL_ASSERT( p_spl_qp_svc );
\r
2177 CL_ASSERT( p_mad_element );
\r
2180 * If the CA has a HW agent then this MAD should have been
\r
2181 * consumed below verbs. The fact that it was received here
\r
2182 * indicates that it should be forwarded to the dispatcher
\r
2183 * for delivery to a class manager. Otherwise, determine how
\r
2184 * the MAD should be routed.
\r
2186 route = ROUTE_DISPATCHER;
\r
2187 if( check_local_mad( p_spl_qp_svc->h_qp ) )
\r
2190 * SMP and GMP processing is branched here to handle overlaps
\r
2191 * between class methods and attributes.
\r
2193 switch( p_mad_element->p_mad_buf->mgmt_class )
\r
2195 case IB_MCLASS_SUBN_DIR:
\r
2196 /* Perform special checks on directed route SMPs. */
\r
2197 p_smp = (ib_smp_t*)p_mad_element->p_mad_buf;
\r
2199 if( ( p_smp->hop_count >= IB_SUBNET_PATH_HOPS_MAX ) ||
\r
2200 ( p_smp->hop_ptr >= IB_SUBNET_PATH_HOPS_MAX ) )
\r
2202 route = ROUTE_DISCARD;
\r
2204 else if( ib_smp_is_response( p_smp ) )
\r
2207 * This node is the destination of the response. Discard
\r
2208 * the source LID or hop pointer are incorrect.
\r
2210 if( p_smp->dr_slid == IB_LID_PERMISSIVE )
\r
2212 if( p_smp->hop_ptr == 1 )
\r
2214 p_smp->hop_ptr--; /* Adjust ptr per IBA spec. */
\r
2218 route = ROUTE_DISCARD;
\r
2221 else if( ( p_smp->dr_slid < p_spl_qp_svc->base_lid ) ||
\r
2222 ( p_smp->dr_slid >= p_spl_qp_svc->base_lid +
\r
2223 ( 1 << p_spl_qp_svc->lmc ) ) )
\r
2225 route = ROUTE_DISCARD;
\r
2231 * This node is the destination of the request. Discard
\r
2232 * the destination LID or hop pointer are incorrect.
\r
2234 if( p_smp->dr_dlid == IB_LID_PERMISSIVE )
\r
2236 if( p_smp->hop_count == p_smp->hop_ptr )
\r
2238 p_smp->return_path[ p_smp->hop_ptr++ ] =
\r
2239 p_spl_qp_svc->port_num; /* Set path per IBA spec. */
\r
2243 route = ROUTE_DISCARD;
\r
2246 else if( ( p_smp->dr_dlid < p_spl_qp_svc->base_lid ) ||
\r
2247 ( p_smp->dr_dlid >= p_spl_qp_svc->base_lid +
\r
2248 ( 1 << p_spl_qp_svc->lmc ) ) )
\r
2250 route = ROUTE_DISCARD;
\r
2254 if( route == ROUTE_DISCARD ) break;
\r
2255 /* else fall through next case */
\r
2257 case IB_MCLASS_SUBN_LID:
\r
2258 route = route_recv_smp( p_mad_element );
\r
2261 case IB_MCLASS_PERF:
\r
2262 route = ROUTE_LOCAL;
\r
2265 case IB_MCLASS_BM:
\r
2266 route = route_recv_gmp( p_mad_element );
\r
2270 /* Route vendor specific MADs to the HCA provider. */
\r
2271 if( ib_class_is_vendor_specific(
\r
2272 p_mad_element->p_mad_buf->mgmt_class ) )
\r
2274 route = route_recv_gmp( p_mad_element );
\r
2280 /* Route the MAD. */
\r
2281 if ( is_discard( route ) )
\r
2282 status = IB_ERROR;
\r
2283 else if( is_dispatcher( route ) )
\r
2284 status = mad_disp_recv_done( p_spl_qp_svc->h_mad_disp, p_mad_element );
\r
2285 else if( is_remote( route ) )
\r
2286 status = forward_sm_trap( p_spl_qp_svc, p_mad_element );
\r
2288 status = recv_local_mad( p_spl_qp_svc, p_mad_element );
\r
2290 CL_EXIT( AL_DBG_SMI, g_al_dbg_lvl );
\r
2297 * Route a received SMP.
\r
2301 IN ib_mad_element_t* p_mad_element )
\r
2303 mad_route_t route;
\r
2305 CL_ENTER( AL_DBG_SMI, g_al_dbg_lvl );
\r
2307 CL_ASSERT( p_mad_element );
\r
2309 /* Process the received SMP. */
\r
2310 switch( p_mad_element->p_mad_buf->method )
\r
2312 case IB_MAD_METHOD_GET:
\r
2313 case IB_MAD_METHOD_SET:
\r
2314 route = route_recv_smp_attr( p_mad_element );
\r
2317 case IB_MAD_METHOD_TRAP:
\r
2319 * Special check to route locally generated traps to the remote SM.
\r
2320 * Distinguished from other receives by the p_wc->recv.ud.recv_opt
\r
2321 * IB_RECV_OPT_FORWARD flag.
\r
2323 * Note that because forwarded traps use AL MAD services, the upper
\r
2324 * 32-bits of the TID are reserved by the access layer. When matching
\r
2325 * a Trap Repress MAD, the SMA must only use the lower 32-bits of the
\r
2328 route = ( p_mad_element->recv_opt & IB_RECV_OPT_FORWARD ) ?
\r
2329 ROUTE_REMOTE : ROUTE_DISPATCHER;
\r
2332 case IB_MAD_METHOD_TRAP_REPRESS:
\r
2334 * Note that because forwarded traps use AL MAD services, the upper
\r
2335 * 32-bits of the TID are reserved by the access layer. When matching
\r
2336 * a Trap Repress MAD, the SMA must only use the lower 32-bits of the
\r
2339 route = ROUTE_LOCAL;
\r
2343 route = ROUTE_DISPATCHER;
\r
2347 CL_EXIT( AL_DBG_SMI, g_al_dbg_lvl );
\r
2354 * Route received SMP attributes.
\r
2357 route_recv_smp_attr(
\r
2358 IN ib_mad_element_t* p_mad_element )
\r
2360 mad_route_t route;
\r
2362 CL_ENTER( AL_DBG_SMI, g_al_dbg_lvl );
\r
2364 CL_ASSERT( p_mad_element );
\r
2366 /* Process the received SMP attributes. */
\r
2367 switch( p_mad_element->p_mad_buf->attr_id )
\r
2369 case IB_MAD_ATTR_NODE_DESC:
\r
2370 case IB_MAD_ATTR_NODE_INFO:
\r
2371 case IB_MAD_ATTR_GUID_INFO:
\r
2372 case IB_MAD_ATTR_PORT_INFO:
\r
2373 case IB_MAD_ATTR_P_KEY_TABLE:
\r
2374 case IB_MAD_ATTR_SLVL_TABLE:
\r
2375 case IB_MAD_ATTR_VL_ARBITRATION:
\r
2376 case IB_MAD_ATTR_VENDOR_DIAG:
\r
2377 case IB_MAD_ATTR_LED_INFO:
\r
2378 route = ROUTE_LOCAL;
\r
2382 route = ROUTE_DISPATCHER;
\r
2386 CL_EXIT( AL_DBG_SMI, g_al_dbg_lvl );
\r
2392 * Route a received GMP.
\r
2396 IN ib_mad_element_t* p_mad_element )
\r
2398 mad_route_t route;
\r
2400 CL_ENTER( AL_DBG_SMI, g_al_dbg_lvl );
\r
2402 CL_ASSERT( p_mad_element );
\r
2404 /* Process the received GMP. */
\r
2405 switch( p_mad_element->p_mad_buf->method )
\r
2407 case IB_MAD_METHOD_GET:
\r
2408 case IB_MAD_METHOD_SET:
\r
2409 /* Route vendor specific MADs to the HCA provider. */
\r
2410 if( ib_class_is_vendor_specific(
\r
2411 p_mad_element->p_mad_buf->mgmt_class ) )
\r
2413 route = ROUTE_LOCAL;
\r
2417 route = route_recv_gmp_attr( p_mad_element );
\r
2422 route = ROUTE_DISPATCHER;
\r
2426 CL_EXIT( AL_DBG_SMI, g_al_dbg_lvl );
\r
2433 * Route received GMP attributes.
\r
2436 route_recv_gmp_attr(
\r
2437 IN ib_mad_element_t* p_mad_element )
\r
2439 mad_route_t route;
\r
2441 CL_ENTER( AL_DBG_SMI, g_al_dbg_lvl );
\r
2443 CL_ASSERT( p_mad_element );
\r
2445 /* Process the received GMP attributes. */
\r
2446 if( p_mad_element->p_mad_buf->attr_id == IB_MAD_ATTR_CLASS_PORT_INFO )
\r
2447 route = ROUTE_LOCAL;
\r
2449 route = ROUTE_DISPATCHER;
\r
2451 CL_EXIT( AL_DBG_SMI, g_al_dbg_lvl );
\r
2458 * Forward a locally generated Subnet Management trap.
\r
2462 IN spl_qp_svc_t* p_spl_qp_svc,
\r
2463 IN ib_mad_element_t* p_mad_element )
\r
2465 ib_av_attr_t av_attr;
\r
2466 ib_api_status_t status;
\r
2468 CL_ENTER( AL_DBG_SMI_CB, g_al_dbg_lvl );
\r
2470 CL_ASSERT( p_spl_qp_svc );
\r
2471 CL_ASSERT( p_mad_element );
\r
2473 /* Check the SMP class. */
\r
2474 if( p_mad_element->p_mad_buf->mgmt_class != IB_MCLASS_SUBN_LID )
\r
2477 * Per IBA Specification Release 1.1 Section 14.2.2.1,
\r
2478 * "C14-5: Only a SM shall originate a directed route SMP."
\r
2479 * Therefore all traps should be LID routed; drop this one.
\r
2481 CL_EXIT( AL_DBG_SMI_CB, g_al_dbg_lvl );
\r
2485 /* Create an address vector for the SM. */
\r
2486 cl_memclr( &av_attr, sizeof( ib_av_attr_t ) );
\r
2487 av_attr.port_num = p_spl_qp_svc->port_num;
\r
2488 av_attr.sl = p_mad_element->remote_sl;
\r
2489 av_attr.dlid = p_mad_element->remote_lid;
\r
2490 if( p_mad_element->grh_valid )
\r
2492 cl_memcpy( &av_attr.grh, p_mad_element->p_grh, sizeof( ib_grh_t ) );
\r
2493 av_attr.grh.src_gid = p_mad_element->p_grh->dest_gid;
\r
2494 av_attr.grh.dest_gid = p_mad_element->p_grh->src_gid;
\r
2495 av_attr.grh_valid = TRUE;
\r
2498 status = ib_create_av( p_spl_qp_svc->h_qp->obj.p_ci_ca->h_pd_alias,
\r
2499 &av_attr, &p_mad_element->h_av );
\r
2501 if( status != IB_SUCCESS )
\r
2503 CL_EXIT( AL_DBG_SMI_CB, g_al_dbg_lvl );
\r
2507 /* Complete the initialization of the MAD element. */
\r
2508 p_mad_element->p_next = NULL;
\r
2509 p_mad_element->remote_qkey = IB_QP_PRIVILEGED_Q_KEY;
\r
2510 p_mad_element->resp_expected = FALSE;
\r
2512 /* Clear context1 for proper send completion callback processing. */
\r
2513 p_mad_element->context1 = NULL;
\r
2516 * Forward the trap. Note that because forwarded traps use AL MAD
\r
2517 * services, the upper 32-bits of the TID are reserved by the access
\r
2518 * layer. When matching a Trap Repress MAD, the SMA must only use
\r
2519 * the lower 32-bits of the TID.
\r
2521 status = ib_send_mad( p_spl_qp_svc->h_mad_svc, p_mad_element, NULL );
\r
2523 if( status != IB_SUCCESS )
\r
2524 ib_destroy_av( p_mad_element->h_av );
\r
2526 CL_EXIT( AL_DBG_SMI_CB, g_al_dbg_lvl );
\r
2532 * Process a locally routed MAD received from the special QP.
\r
2536 IN spl_qp_svc_t* p_spl_qp_svc,
\r
2537 IN ib_mad_element_t* p_mad_request )
\r
2539 ib_mad_t* p_mad_hdr;
\r
2540 ib_api_status_t status;
\r
2542 CL_ENTER( AL_DBG_SMI_CB, g_al_dbg_lvl );
\r
2544 CL_ASSERT( p_spl_qp_svc );
\r
2545 CL_ASSERT( p_mad_request );
\r
2547 /* Initialize the MAD element. */
\r
2548 p_mad_hdr = ib_get_mad_buf( p_mad_request );
\r
2549 p_mad_request->context1 = p_mad_request;
\r
2551 /* Save the TID. */
\r
2552 p_mad_request->context2 =
\r
2553 (void*)(uintn_t)al_get_al_tid( p_mad_hdr->trans_id );
\r
2555 * Disable warning about passing unaligned 64-bit value.
\r
2556 * The value is always aligned given how buffers are allocated
\r
2557 * and given the layout of a MAD.
\r
2559 #pragma warning( push, 3 )
\r
2560 al_set_al_tid( &p_mad_hdr->trans_id, 0 );
\r
2561 #pragma warning( pop )
\r
2564 * We need to get a response from the local HCA to this MAD only if this
\r
2565 * MAD is not itself a response.
\r
2567 p_mad_request->resp_expected = !( ib_mad_is_response( p_mad_hdr ) ||
\r
2568 ( p_mad_hdr->method == IB_MAD_METHOD_TRAP_REPRESS ) );
\r
2569 p_mad_request->timeout_ms = LOCAL_MAD_TIMEOUT;
\r
2570 p_mad_request->send_opt = IB_SEND_OPT_LOCAL;
\r
2572 /* Send the locally addressed MAD request to the CA for processing. */
\r
2573 status = ib_send_mad( p_spl_qp_svc->h_mad_svc, p_mad_request, NULL );
\r
2575 CL_EXIT( AL_DBG_SMI_CB, g_al_dbg_lvl );
\r
2582 * Special QP alias send completion callback.
\r
2585 spl_qp_alias_send_cb(
\r
2586 IN ib_mad_svc_handle_t h_mad_svc,
\r
2587 IN void* mad_svc_context,
\r
2588 IN ib_mad_element_t* p_mad_element )
\r
2590 ib_api_status_t status;
\r
2592 CL_ENTER( AL_DBG_SMI_CB, g_al_dbg_lvl );
\r
2594 UNUSED_PARAM( h_mad_svc );
\r
2595 UNUSED_PARAM( mad_svc_context );
\r
2596 CL_ASSERT( p_mad_element );
\r
2598 if( p_mad_element->h_av )
\r
2600 status = ib_destroy_av( p_mad_element->h_av );
\r
2601 CL_ASSERT( status == IB_SUCCESS );
\r
2604 status = ib_put_mad( p_mad_element );
\r
2605 CL_ASSERT( status == IB_SUCCESS );
\r
2607 CL_EXIT( AL_DBG_SMI_CB, g_al_dbg_lvl );
\r
2613 * Special QP alias receive completion callback.
\r
2616 spl_qp_alias_recv_cb(
\r
2617 IN ib_mad_svc_handle_t h_mad_svc,
\r
2618 IN void* mad_svc_context,
\r
2619 IN ib_mad_element_t* p_mad_response )
\r
2621 spl_qp_svc_t* p_spl_qp_svc;
\r
2622 ib_mad_element_t* p_mad_request;
\r
2623 ib_mad_t* p_mad_hdr;
\r
2624 ib_av_attr_t av_attr;
\r
2625 ib_api_status_t status;
\r
2627 CL_ENTER( AL_DBG_SMI_CB, g_al_dbg_lvl );
\r
2629 CL_ASSERT( mad_svc_context );
\r
2630 CL_ASSERT( p_mad_response );
\r
2631 CL_ASSERT( p_mad_response->send_context1 );
\r
2633 /* Initialize pointers. */
\r
2634 p_spl_qp_svc = mad_svc_context;
\r
2635 p_mad_request = p_mad_response->send_context1;
\r
2636 p_mad_hdr = ib_get_mad_buf( p_mad_response );
\r
2638 /* Restore the TID, so it will match on the remote side. */
\r
2639 #pragma warning( push, 3 )
\r
2640 al_set_al_tid( &p_mad_hdr->trans_id,
\r
2641 (uint32_t)(uintn_t)p_mad_response->send_context2 );
\r
2642 #pragma warning( pop )
\r
2644 /* Set the remote QP. */
\r
2645 p_mad_response->remote_qp = p_mad_request->remote_qp;
\r
2646 p_mad_response->remote_qkey = p_mad_request->remote_qkey;
\r
2648 /* Prepare to create an address vector. */
\r
2649 cl_memclr( &av_attr, sizeof( ib_av_attr_t ) );
\r
2650 av_attr.port_num = p_spl_qp_svc->port_num;
\r
2651 av_attr.sl = p_mad_request->remote_sl;
\r
2652 av_attr.static_rate = IB_PATH_RECORD_RATE_10_GBS;
\r
2653 av_attr.path_bits = p_mad_request->path_bits;
\r
2654 if( p_mad_request->grh_valid )
\r
2656 cl_memcpy( &av_attr.grh, p_mad_request->p_grh, sizeof( ib_grh_t ) );
\r
2657 av_attr.grh.src_gid = p_mad_request->p_grh->dest_gid;
\r
2658 av_attr.grh.dest_gid = p_mad_request->p_grh->src_gid;
\r
2659 av_attr.grh_valid = TRUE;
\r
2661 if( ( p_mad_hdr->mgmt_class == IB_MCLASS_SUBN_DIR ) &&
\r
2662 ( ((ib_smp_t *)p_mad_hdr)->dr_dlid == IB_LID_PERMISSIVE ) )
\r
2663 av_attr.dlid = IB_LID_PERMISSIVE;
\r
2665 av_attr.dlid = p_mad_request->remote_lid;
\r
2667 /* Create an address vector. */
\r
2668 status = ib_create_av( p_spl_qp_svc->h_qp->obj.p_ci_ca->h_pd_alias,
\r
2669 &av_attr, &p_mad_response->h_av );
\r
2671 if( status != IB_SUCCESS )
\r
2673 ib_put_mad( p_mad_response );
\r
2675 CL_EXIT( AL_DBG_SMI, g_al_dbg_lvl );
\r
2679 /* Send the response. */
\r
2680 status = ib_send_mad( h_mad_svc, p_mad_response, NULL );
\r
2682 if( status != IB_SUCCESS )
\r
2684 ib_destroy_av( p_mad_response->h_av );
\r
2685 ib_put_mad( p_mad_response );
\r
2688 CL_EXIT( AL_DBG_SMI_CB, g_al_dbg_lvl );
\r
2694 * Post receive buffers to a special QP.
\r
2696 static ib_api_status_t
\r
2697 spl_qp_svc_post_recvs(
\r
2698 IN spl_qp_svc_t* const p_spl_qp_svc )
\r
2700 ib_mad_element_t* p_mad_element;
\r
2701 al_mad_element_t* p_al_element;
\r
2702 ib_recv_wr_t recv_wr;
\r
2703 ib_api_status_t status = IB_SUCCESS;
\r
2705 /* Attempt to post receive buffers up to the max_qp_depth limit. */
\r
2706 while( cl_qlist_count( &p_spl_qp_svc->recv_queue ) <
\r
2707 (int32_t)p_spl_qp_svc->max_qp_depth )
\r
2709 /* Get a MAD element from the pool. */
\r
2710 status = ib_get_mad( p_spl_qp_svc->obj.p_ci_ca->pool_key,
\r
2711 MAD_BLOCK_SIZE, &p_mad_element );
\r
2713 if( status != IB_SUCCESS ) break;
\r
2715 p_al_element = PARENT_STRUCT( p_mad_element, al_mad_element_t,
\r
2718 /* Build the receive work request. */
\r
2719 recv_wr.p_next = NULL;
\r
2720 recv_wr.wr_id = (uintn_t)p_al_element;
\r
2721 recv_wr.num_ds = 1;
\r
2722 recv_wr.ds_array = &p_al_element->grh_ds;
\r
2724 /* Queue the receive on the service tracking list. */
\r
2725 cl_qlist_insert_tail( &p_spl_qp_svc->recv_queue,
\r
2726 &p_al_element->list_item );
\r
2728 /* Post the receive. */
\r
2729 status = ib_post_recv( p_spl_qp_svc->h_qp, &recv_wr, NULL );
\r
2731 if( status != IB_SUCCESS )
\r
2733 AL_TRACE( AL_DBG_ERROR,
\r
2734 ("Failed to post receive %p\n", p_al_element) );
\r
2735 cl_qlist_remove_item( &p_spl_qp_svc->recv_queue,
\r
2736 &p_al_element->list_item );
\r
2738 ib_put_mad( p_mad_element );
\r
2749 * Special QP service asynchronous event callback.
\r
2752 spl_qp_svc_event_cb(
\r
2753 IN ib_async_event_rec_t *p_event_rec )
\r
2755 spl_qp_svc_t* p_spl_qp_svc;
\r
2757 CL_ENTER( AL_DBG_SMI_CB, g_al_dbg_lvl );
\r
2759 CL_ASSERT( p_event_rec );
\r
2760 CL_ASSERT( p_event_rec->context );
\r
2762 if( p_event_rec->code == IB_AE_SQ_DRAINED )
\r
2764 CL_EXIT( AL_DBG_SMI, g_al_dbg_lvl );
\r
2768 p_spl_qp_svc = p_event_rec->context;
\r
2770 spl_qp_svc_reset( p_spl_qp_svc );
\r
2772 CL_EXIT( AL_DBG_SMI_CB, g_al_dbg_lvl );
\r
2778 * Special QP service reset.
\r
2782 IN spl_qp_svc_t* p_spl_qp_svc )
\r
2784 cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );
\r
2786 if( p_spl_qp_svc->state != SPL_QP_ACTIVE )
\r
2788 cl_spinlock_release( &p_spl_qp_svc->obj.lock );
\r
2792 /* Change the special QP service to the error state. */
\r
2793 p_spl_qp_svc->state = SPL_QP_ERROR;
\r
2795 /* Flag the service as in use by the asynchronous processing thread. */
\r
2796 cl_atomic_inc( &p_spl_qp_svc->in_use_cnt );
\r
2798 cl_spinlock_release( &p_spl_qp_svc->obj.lock );
\r
2800 /* Queue an asynchronous processing item to reset the special QP. */
\r
2801 cl_async_proc_queue( gp_async_proc_mgr, &p_spl_qp_svc->reset_async );
\r
2807 * Asynchronous processing thread callback to reset the special QP service.
\r
2810 spl_qp_svc_reset_cb(
\r
2811 IN cl_async_proc_item_t* p_item )
\r
2813 spl_qp_svc_t* p_spl_qp_svc;
\r
2814 cl_list_item_t* p_list_item;
\r
2816 ib_wc_t* p_free_wc;
\r
2817 ib_wc_t* p_done_wc;
\r
2818 al_mad_wr_t* p_mad_wr;
\r
2819 al_mad_element_t* p_al_mad;
\r
2820 ib_qp_mod_t qp_mod;
\r
2821 ib_api_status_t status;
\r
2823 CL_ENTER( AL_DBG_SMI_CB, g_al_dbg_lvl );
\r
2825 CL_ASSERT( p_item );
\r
2826 p_spl_qp_svc = PARENT_STRUCT( p_item, spl_qp_svc_t, reset_async );
\r
2828 /* Wait here until the special QP service is only in use by this thread. */
\r
2829 while( p_spl_qp_svc->in_use_cnt != 1 )
\r
2831 cl_thread_suspend( 0 );
\r
2834 /* Change the QP to the RESET state. */
\r
2835 cl_memclr( &qp_mod, sizeof( ib_qp_mod_t ) );
\r
2836 qp_mod.req_state = IB_QPS_RESET;
\r
2838 status = ib_modify_qp( p_spl_qp_svc->h_qp, &qp_mod );
\r
2839 CL_ASSERT( status == IB_SUCCESS );
\r
2841 /* Return receive MAD elements to the pool. */
\r
2842 for( p_list_item = cl_qlist_remove_head( &p_spl_qp_svc->recv_queue );
\r
2843 p_list_item != cl_qlist_end( &p_spl_qp_svc->recv_queue );
\r
2844 p_list_item = cl_qlist_remove_head( &p_spl_qp_svc->recv_queue ) )
\r
2846 p_al_mad = PARENT_STRUCT( p_list_item, al_mad_element_t, list_item );
\r
2848 status = ib_put_mad( &p_al_mad->element );
\r
2849 CL_ASSERT( status == IB_SUCCESS );
\r
2852 /* Re-initialize the QP. */
\r
2853 status = ib_init_dgrm_svc( p_spl_qp_svc->h_qp, NULL );
\r
2854 CL_ASSERT( status == IB_SUCCESS );
\r
2856 /* Poll to remove any remaining send completions from the CQ. */
\r
2859 cl_memclr( &wc, sizeof( ib_wc_t ) );
\r
2861 status = ib_poll_cq( p_spl_qp_svc->h_send_cq, &p_free_wc, &p_done_wc );
\r
2863 } while( status == IB_SUCCESS );
\r
2865 /* Post receive buffers. */
\r
2866 spl_qp_svc_post_recvs( p_spl_qp_svc );
\r
2869 * Re-queue any outstanding MAD send operations.
\r
2870 * Work from tail to head to maintain the request order.
\r
2872 for( p_list_item = cl_qlist_remove_tail( &p_spl_qp_svc->send_queue );
\r
2873 p_list_item != cl_qlist_end( &p_spl_qp_svc->send_queue );
\r
2874 p_list_item = cl_qlist_remove_tail( &p_spl_qp_svc->send_queue ) )
\r
2876 p_mad_wr = PARENT_STRUCT( p_list_item, al_mad_wr_t, list_item );
\r
2877 special_qp_queue_mad( p_spl_qp_svc->h_qp, p_mad_wr );
\r
2880 cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );
\r
2881 if( p_spl_qp_svc->state == SPL_QP_ERROR )
\r
2883 /* The QP is ready. Change the state. */
\r
2884 p_spl_qp_svc->state = SPL_QP_ACTIVE;
\r
2885 cl_spinlock_release( &p_spl_qp_svc->obj.lock );
\r
2887 /* Re-arm the CQs. */
\r
2888 status = ib_rearm_cq( p_spl_qp_svc->h_recv_cq, FALSE );
\r
2889 CL_ASSERT( status == IB_SUCCESS );
\r
2890 status = ib_rearm_cq( p_spl_qp_svc->h_send_cq, FALSE );
\r
2891 CL_ASSERT( status == IB_SUCCESS );
\r
2893 /* Resume send processing. */
\r
2894 special_qp_resume_sends( p_spl_qp_svc->h_qp );
\r
2898 cl_spinlock_release( &p_spl_qp_svc->obj.lock );
\r
2901 /* No longer in use by the asynchronous processing thread. */
\r
2902 cl_atomic_dec( &p_spl_qp_svc->in_use_cnt );
\r
2904 CL_EXIT( AL_DBG_SMI_CB, g_al_dbg_lvl );
\r
2910 * Special QP alias asynchronous event callback.
\r
2913 spl_qp_alias_event_cb(
\r
2914 IN ib_async_event_rec_t *p_event_rec )
\r
2916 UNUSED_PARAM( p_event_rec );
\r
2922 * Acquire the SMI dispatcher for the given port.
\r
2926 IN const ib_net64_t port_guid,
\r
2927 OUT al_mad_disp_handle_t* const ph_mad_disp )
\r
2929 CL_ASSERT( gp_spl_qp_mgr );
\r
2930 return acquire_svc_disp( &gp_spl_qp_mgr->smi_map, port_guid, ph_mad_disp );
\r
2936 * Acquire the GSI dispatcher for the given port.
\r
2940 IN const ib_net64_t port_guid,
\r
2941 OUT al_mad_disp_handle_t* const ph_mad_disp )
\r
2943 CL_ASSERT( gp_spl_qp_mgr );
\r
2944 return acquire_svc_disp( &gp_spl_qp_mgr->gsi_map, port_guid, ph_mad_disp );
\r
2950 * Acquire the service dispatcher for the given port.
\r
2954 IN const cl_qmap_t* const p_svc_map,
\r
2955 IN const ib_net64_t port_guid,
\r
2956 OUT al_mad_disp_handle_t *ph_mad_disp )
\r
2958 cl_map_item_t* p_svc_item;
\r
2959 spl_qp_svc_t* p_spl_qp_svc;
\r
2961 AL_ENTER( AL_DBG_SMI );
\r
2963 CL_ASSERT( p_svc_map );
\r
2964 CL_ASSERT( gp_spl_qp_mgr );
\r
2966 /* Search for the SMI or GSI service for the given port. */
\r
2967 cl_spinlock_acquire( &gp_spl_qp_mgr->obj.lock );
\r
2968 p_svc_item = cl_qmap_get( p_svc_map, port_guid );
\r
2969 cl_spinlock_release( &gp_spl_qp_mgr->obj.lock );
\r
2970 if( p_svc_item == cl_qmap_end( p_svc_map ) )
\r
2972 /* The port does not have an active agent. */
\r
2973 AL_EXIT( AL_DBG_SMI );
\r
2974 return IB_INVALID_GUID;
\r
2977 p_spl_qp_svc = PARENT_STRUCT( p_svc_item, spl_qp_svc_t, map_item );
\r
2979 /* Found a match. Get MAD dispatcher handle. */
\r
2980 *ph_mad_disp = p_spl_qp_svc->h_mad_disp;
\r
2982 /* Reference the MAD dispatcher on behalf of the client. */
\r
2983 ref_al_obj( &p_spl_qp_svc->h_mad_disp->obj );
\r
2985 AL_EXIT( AL_DBG_SMI );
\r
2986 return IB_SUCCESS;
\r
2992 * Force a poll for CA attribute changes.
\r
2998 CL_ENTER( AL_DBG_SMI_CB, g_al_dbg_lvl );
\r
3001 * Stop the poll timer. Just invoke the timer callback directly to
\r
3002 * save the thread context switching.
\r
3004 smi_poll_timer_cb( gp_spl_qp_mgr );
\r
3006 CL_EXIT( AL_DBG_SMI_CB, g_al_dbg_lvl );
\r
3012 * Poll for CA port attribute changes.
\r
3015 smi_poll_timer_cb(
\r
3016 IN void* context )
\r
3018 cl_status_t cl_status;
\r
3020 CL_ENTER( AL_DBG_SMI_CB, g_al_dbg_lvl );
\r
3022 CL_ASSERT( context );
\r
3023 CL_ASSERT( gp_spl_qp_mgr == context );
\r
3024 UNUSED_PARAM( context );
\r
3027 * Scan for changes on the local HCAs. Since the PnP manager has its
\r
3028 * own thread for processing changes, we kick off that thread in parallel
\r
3029 * reposting receive buffers to the SQP agents.
\r
3034 * To handle the case where force_smi_poll is called at the same time
\r
3035 * the timer expires, check if the asynchronous processing item is in
\r
3036 * use. If it is already in use, it means that we're about to poll
\r
3037 * anyway, so just ignore this call.
\r
3039 cl_spinlock_acquire( &gp_spl_qp_mgr->obj.lock );
\r
3041 /* Perform port processing on the special QP agents. */
\r
3042 cl_qlist_apply_func( &gp_spl_qp_mgr->obj.obj_list, smi_post_recvs,
\r
3045 /* Determine if there are any special QP agents to poll. */
\r
3046 if( !cl_is_qlist_empty( &gp_spl_qp_mgr->obj.obj_list ) && g_smi_poll_interval )
\r
3048 /* Restart the polling timer. */
\r
3050 cl_timer_start( &gp_spl_qp_mgr->poll_timer, g_smi_poll_interval );
\r
3051 CL_ASSERT( cl_status == CL_SUCCESS );
\r
3053 cl_spinlock_release( &gp_spl_qp_mgr->obj.lock );
\r
3055 CL_EXIT( AL_DBG_SMI_CB, g_al_dbg_lvl );
\r
3061 * Post receive buffers to a special QP.
\r
3065 IN cl_list_item_t* const p_list_item,
\r
3066 IN void* context )
\r
3069 spl_qp_svc_t* p_spl_qp_svc;
\r
3071 CL_ENTER( AL_DBG_SMI_CB, g_al_dbg_lvl );
\r
3073 CL_ASSERT( p_list_item );
\r
3074 UNUSED_PARAM( context );
\r
3076 p_obj = PARENT_STRUCT( p_list_item, al_obj_t, pool_item );
\r
3077 p_spl_qp_svc = PARENT_STRUCT( p_obj, spl_qp_svc_t, obj );
\r
3079 cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );
\r
3080 if( p_spl_qp_svc->state != SPL_QP_ACTIVE )
\r
3082 cl_spinlock_release( &p_spl_qp_svc->obj.lock );
\r
3086 spl_qp_svc_post_recvs( p_spl_qp_svc );
\r
3087 cl_spinlock_release( &p_spl_qp_svc->obj.lock );
\r
3089 CL_EXIT( AL_DBG_SMI, g_al_dbg_lvl );
\r