2 * Copyright (c) 2005 SilverStorm Technologies. All rights reserved.
\r
3 * Copyright (c) 1996-2003 Intel Corporation. All rights reserved.
\r
5 * This software is available to you under the OpenIB.org BSD license
\r
8 * Redistribution and use in source and binary forms, with or
\r
9 * without modification, are permitted provided that the following
\r
10 * conditions are met:
\r
12 * - Redistributions of source code must retain the above
\r
13 * copyright notice, this list of conditions and the following
\r
16 * - Redistributions in binary form must reproduce the above
\r
17 * copyright notice, this list of conditions and the following
\r
18 * disclaimer in the documentation and/or other materials
\r
19 * provided with the distribution.
\r
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
\r
22 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
\r
23 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
\r
24 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
\r
25 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
\r
26 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
\r
27 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
\r
34 #include <iba/ib_al.h>
\r
35 #include <complib/cl_timer.h>
\r
37 #include "ib_common.h"
\r
38 #include "al_common.h"
\r
39 #include "al_debug.h"
\r
40 #if defined(EVENT_TRACING)
\r
44 #include "al_smi.tmh"
\r
46 #include "al_verbs.h"
\r
54 extern char node_desc[IB_NODE_DESCRIPTION_SIZE];
\r
56 #define SMI_POLL_INTERVAL 20000 /* Milliseconds */
\r
57 #define LOCAL_MAD_TIMEOUT 50 /* Milliseconds */
\r
58 #define DEFAULT_QP0_DEPTH 256
\r
59 #define DEFAULT_QP1_DEPTH 1024
\r
61 uint32_t g_smi_poll_interval = SMI_POLL_INTERVAL;
\r
62 spl_qp_mgr_t* gp_spl_qp_mgr = NULL;
\r
66 * Function prototypes.
\r
69 destroying_spl_qp_mgr(
\r
70 IN al_obj_t* p_obj );
\r
74 IN al_obj_t* p_obj );
\r
77 spl_qp0_agent_pnp_cb(
\r
78 IN ib_pnp_rec_t* p_pnp_rec );
\r
81 spl_qp1_agent_pnp_cb(
\r
82 IN ib_pnp_rec_t* p_pnp_rec );
\r
86 IN ib_pnp_rec_t* p_pnp_rec,
\r
87 IN ib_qp_type_t qp_type );
\r
91 IN ib_pnp_port_rec_t* p_pnp_rec,
\r
92 IN const ib_qp_type_t qp_type );
\r
95 destroying_spl_qp_svc(
\r
96 IN al_obj_t* p_obj );
\r
100 IN al_obj_t* p_obj );
\r
103 spl_qp_svc_lid_change(
\r
104 IN al_obj_t* p_obj,
\r
105 IN ib_pnp_port_rec_t* p_pnp_rec );
\r
109 IN spl_qp_svc_t* p_spl_qp_svc,
\r
110 IN al_mad_wr_t* const p_mad_wr );
\r
112 static ib_api_status_t
\r
114 IN spl_qp_svc_t* p_spl_qp_svc,
\r
115 IN al_mad_wr_t* const p_mad_wr );
\r
117 static ib_api_status_t
\r
119 IN spl_qp_svc_t* p_spl_qp_svc,
\r
120 IN al_mad_wr_t* const p_mad_wr );
\r
122 static ib_api_status_t
\r
124 IN spl_qp_svc_t* p_spl_qp_svc,
\r
125 IN al_mad_wr_t* const p_mad_wr );
\r
127 static ib_api_status_t
\r
129 IN spl_qp_svc_t* p_spl_qp_svc,
\r
130 IN al_mad_wr_t* const p_mad_wr );
\r
134 IN cl_async_proc_item_t* p_item );
\r
137 spl_qp_send_comp_cb(
\r
138 IN const ib_cq_handle_t h_cq,
\r
139 IN void *cq_context );
\r
142 spl_qp_recv_comp_cb(
\r
143 IN const ib_cq_handle_t h_cq,
\r
144 IN void *cq_context );
\r
148 IN spl_qp_svc_t* p_spl_qp_svc,
\r
149 IN const ib_cq_handle_t h_cq,
\r
150 IN ib_wc_type_t wc_type );
\r
154 IN spl_qp_svc_t* p_spl_qp_svc,
\r
155 IN ib_mad_element_t* p_mad_element );
\r
159 IN ib_mad_element_t* p_mad_element );
\r
162 route_recv_smp_attr(
\r
163 IN ib_mad_element_t* p_mad_element );
\r
167 IN ib_mad_element_t* p_mad_element );
\r
171 IN ib_mad_element_t* p_mad_element );
\r
174 route_recv_gmp_attr(
\r
175 IN ib_mad_element_t* p_mad_element );
\r
179 IN spl_qp_svc_t* p_spl_qp_svc,
\r
180 IN ib_mad_element_t* p_mad_element );
\r
184 IN spl_qp_svc_t* p_spl_qp_svc,
\r
185 IN ib_mad_element_t* p_mad_request );
\r
188 spl_qp_alias_send_cb(
\r
189 IN ib_mad_svc_handle_t h_mad_svc,
\r
190 IN void *mad_svc_context,
\r
191 IN ib_mad_element_t *p_mad_element );
\r
194 spl_qp_alias_recv_cb(
\r
195 IN ib_mad_svc_handle_t h_mad_svc,
\r
196 IN void *mad_svc_context,
\r
197 IN ib_mad_element_t *p_mad_response );
\r
199 static ib_api_status_t
\r
200 spl_qp_svc_post_recvs(
\r
201 IN spl_qp_svc_t* const p_spl_qp_svc );
\r
204 spl_qp_svc_event_cb(
\r
205 IN ib_async_event_rec_t *p_event_rec );
\r
208 spl_qp_alias_event_cb(
\r
209 IN ib_async_event_rec_t *p_event_rec );
\r
213 IN spl_qp_svc_t* p_spl_qp_svc );
\r
216 spl_qp_svc_reset_cb(
\r
217 IN cl_async_proc_item_t* p_item );
\r
221 IN const cl_qmap_t* const p_svc_map,
\r
222 IN const ib_net64_t port_guid,
\r
223 OUT al_mad_disp_handle_t *ph_mad_disp );
\r
227 IN void* context );
\r
231 IN cl_list_item_t* const p_list_item,
\r
232 IN void* context );
\r
234 #if defined( CL_USE_MUTEX )
\r
236 spl_qp_send_async_cb(
\r
237 IN cl_async_proc_item_t* p_item );
\r
240 spl_qp_recv_async_cb(
\r
241 IN cl_async_proc_item_t* p_item );
\r
245 * Create the special QP manager.
\r
249 IN al_obj_t* const p_parent_obj )
\r
251 ib_pnp_req_t pnp_req;
\r
252 ib_api_status_t status;
\r
253 cl_status_t cl_status;
\r
255 AL_ENTER( AL_DBG_SMI );
\r
257 CL_ASSERT( p_parent_obj );
\r
258 CL_ASSERT( !gp_spl_qp_mgr );
\r
260 gp_spl_qp_mgr = cl_zalloc( sizeof( spl_qp_mgr_t ) );
\r
261 if( !gp_spl_qp_mgr )
\r
263 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
264 ("IB_INSUFFICIENT_MEMORY\n") );
\r
265 return IB_INSUFFICIENT_MEMORY;
\r
268 /* Construct the special QP manager. */
\r
269 construct_al_obj( &gp_spl_qp_mgr->obj, AL_OBJ_TYPE_SMI );
\r
270 cl_timer_construct( &gp_spl_qp_mgr->poll_timer );
\r
272 /* Initialize the lists. */
\r
273 cl_qmap_init( &gp_spl_qp_mgr->smi_map );
\r
274 cl_qmap_init( &gp_spl_qp_mgr->gsi_map );
\r
276 /* Initialize the global SMI/GSI manager object. */
\r
277 status = init_al_obj( &gp_spl_qp_mgr->obj, gp_spl_qp_mgr, TRUE,
\r
278 destroying_spl_qp_mgr, NULL, free_spl_qp_mgr );
\r
279 if( status != IB_SUCCESS )
\r
281 free_spl_qp_mgr( &gp_spl_qp_mgr->obj );
\r
282 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
283 ("init_al_obj failed, %s\n", ib_get_err_str( status ) ) );
\r
287 /* Attach the special QP manager to the parent object. */
\r
288 status = attach_al_obj( p_parent_obj, &gp_spl_qp_mgr->obj );
\r
289 if( status != IB_SUCCESS )
\r
291 gp_spl_qp_mgr->obj.pfn_destroy( &gp_spl_qp_mgr->obj, NULL );
\r
292 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
293 ("attach_al_obj returned %s.\n", ib_get_err_str(status)) );
\r
297 /* Initialize the SMI polling timer. */
\r
298 cl_status = cl_timer_init( &gp_spl_qp_mgr->poll_timer, smi_poll_timer_cb,
\r
300 if( cl_status != CL_SUCCESS )
\r
302 gp_spl_qp_mgr->obj.pfn_destroy( &gp_spl_qp_mgr->obj, NULL );
\r
303 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
304 ("cl_timer_init failed, status 0x%x\n", cl_status ) );
\r
305 return ib_convert_cl_status( cl_status );
\r
309 * Note: PnP registrations for port events must be done
\r
310 * when the special QP manager is created. This ensures that
\r
311 * the registrations are listed sequentially and the reporting
\r
312 * of PnP events occurs in the proper order.
\r
316 * Separate context is needed for each special QP. Therefore, a
\r
317 * separate PnP event registration is performed for QP0 and QP1.
\r
320 /* Register for port PnP events for QP0. */
\r
321 cl_memclr( &pnp_req, sizeof( ib_pnp_req_t ) );
\r
322 pnp_req.pnp_class = IB_PNP_PORT;
\r
323 pnp_req.pnp_context = &gp_spl_qp_mgr->obj;
\r
324 pnp_req.pfn_pnp_cb = spl_qp0_agent_pnp_cb;
\r
326 status = ib_reg_pnp( gh_al, &pnp_req, &gp_spl_qp_mgr->h_qp0_pnp );
\r
328 if( status != IB_SUCCESS )
\r
330 gp_spl_qp_mgr->obj.pfn_destroy( &gp_spl_qp_mgr->obj, NULL );
\r
331 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
332 ("ib_reg_pnp QP0 failed, %s\n", ib_get_err_str( status ) ) );
\r
336 /* Reference the special QP manager on behalf of the ib_reg_pnp call. */
\r
337 ref_al_obj( &gp_spl_qp_mgr->obj );
\r
339 /* Register for port PnP events for QP1. */
\r
340 cl_memclr( &pnp_req, sizeof( ib_pnp_req_t ) );
\r
341 pnp_req.pnp_class = IB_PNP_PORT;
\r
342 pnp_req.pnp_context = &gp_spl_qp_mgr->obj;
\r
343 pnp_req.pfn_pnp_cb = spl_qp1_agent_pnp_cb;
\r
345 status = ib_reg_pnp( gh_al, &pnp_req, &gp_spl_qp_mgr->h_qp1_pnp );
\r
347 if( status != IB_SUCCESS )
\r
349 gp_spl_qp_mgr->obj.pfn_destroy( &gp_spl_qp_mgr->obj, NULL );
\r
350 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
351 ("ib_reg_pnp QP1 failed, %s\n", ib_get_err_str( status ) ) );
\r
356 * Note that we don't release the referende taken in init_al_obj
\r
357 * because we need one on behalf of the ib_reg_pnp call.
\r
360 AL_EXIT( AL_DBG_SMI );
\r
367 * Pre-destroy the special QP manager.
\r
370 destroying_spl_qp_mgr(
\r
371 IN al_obj_t* p_obj )
\r
373 ib_api_status_t status;
\r
375 CL_ASSERT( p_obj );
\r
376 CL_ASSERT( gp_spl_qp_mgr == PARENT_STRUCT( p_obj, spl_qp_mgr_t, obj ) );
\r
377 UNUSED_PARAM( p_obj );
\r
379 /* Deregister for port PnP events for QP0. */
\r
380 if( gp_spl_qp_mgr->h_qp0_pnp )
\r
382 status = ib_dereg_pnp( gp_spl_qp_mgr->h_qp0_pnp,
\r
383 (ib_pfn_destroy_cb_t)deref_al_obj );
\r
384 CL_ASSERT( status == IB_SUCCESS );
\r
387 /* Deregister for port PnP events for QP1. */
\r
388 if( gp_spl_qp_mgr->h_qp1_pnp )
\r
390 status = ib_dereg_pnp( gp_spl_qp_mgr->h_qp1_pnp,
\r
391 (ib_pfn_destroy_cb_t)deref_al_obj );
\r
392 CL_ASSERT( status == IB_SUCCESS );
\r
395 /* Destroy the SMI polling timer. */
\r
396 cl_timer_destroy( &gp_spl_qp_mgr->poll_timer );
\r
402 * Free the special QP manager.
\r
406 IN al_obj_t* p_obj )
\r
408 CL_ASSERT( p_obj );
\r
409 CL_ASSERT( gp_spl_qp_mgr == PARENT_STRUCT( p_obj, spl_qp_mgr_t, obj ) );
\r
410 UNUSED_PARAM( p_obj );
\r
412 destroy_al_obj( &gp_spl_qp_mgr->obj );
\r
413 cl_free( gp_spl_qp_mgr );
\r
414 gp_spl_qp_mgr = NULL;
\r
420 * Special QP0 agent PnP event callback.
\r
423 spl_qp0_agent_pnp_cb(
\r
424 IN ib_pnp_rec_t* p_pnp_rec )
\r
426 ib_api_status_t status;
\r
427 AL_ENTER( AL_DBG_SMI_CB );
\r
429 status = spl_qp_agent_pnp( p_pnp_rec, IB_QPT_QP0 );
\r
431 AL_EXIT( AL_DBG_SMI_CB );
\r
438 * Special QP1 agent PnP event callback.
\r
441 spl_qp1_agent_pnp_cb(
\r
442 IN ib_pnp_rec_t* p_pnp_rec )
\r
444 ib_api_status_t status;
\r
445 AL_ENTER( AL_DBG_SMI_CB );
\r
447 status = spl_qp_agent_pnp( p_pnp_rec, IB_QPT_QP1 );
\r
449 AL_EXIT( AL_DBG_SMI );
\r
456 * Special QP agent PnP event callback.
\r
460 IN ib_pnp_rec_t* p_pnp_rec,
\r
461 IN ib_qp_type_t qp_type )
\r
463 ib_api_status_t status;
\r
466 AL_ENTER( AL_DBG_SMI_CB );
\r
468 CL_ASSERT( p_pnp_rec );
\r
469 p_obj = p_pnp_rec->context;
\r
471 /* Dispatch based on the PnP event type. */
\r
472 switch( p_pnp_rec->pnp_event )
\r
474 case IB_PNP_PORT_ADD:
\r
475 CL_ASSERT( !p_obj );
\r
476 status = create_spl_qp_svc( (ib_pnp_port_rec_t*)p_pnp_rec, qp_type );
\r
479 case IB_PNP_PORT_REMOVE:
\r
480 CL_ASSERT( p_obj );
\r
481 ref_al_obj( p_obj );
\r
482 p_obj->pfn_destroy( p_obj, NULL );
\r
483 status = IB_SUCCESS;
\r
486 case IB_PNP_LID_CHANGE:
\r
487 CL_ASSERT( p_obj );
\r
488 spl_qp_svc_lid_change( p_obj, (ib_pnp_port_rec_t*)p_pnp_rec );
\r
489 status = IB_SUCCESS;
\r
493 /* All other events are ignored. */
\r
494 status = IB_SUCCESS;
\r
498 AL_EXIT( AL_DBG_SMI );
\r
505 * Create a special QP service.
\r
509 IN ib_pnp_port_rec_t* p_pnp_rec,
\r
510 IN const ib_qp_type_t qp_type )
\r
512 cl_status_t cl_status;
\r
513 spl_qp_svc_t* p_spl_qp_svc;
\r
514 ib_ca_handle_t h_ca;
\r
515 ib_cq_create_t cq_create;
\r
516 ib_qp_create_t qp_create;
\r
517 ib_qp_attr_t qp_attr;
\r
518 ib_mad_svc_t mad_svc;
\r
519 ib_api_status_t status;
\r
521 AL_ENTER( AL_DBG_SMI );
\r
523 CL_ASSERT( p_pnp_rec );
\r
525 if( ( qp_type != IB_QPT_QP0 ) && ( qp_type != IB_QPT_QP1 ) )
\r
527 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") );
\r
528 return IB_INVALID_PARAMETER;
\r
531 CL_ASSERT( p_pnp_rec->pnp_rec.pnp_context );
\r
532 CL_ASSERT( p_pnp_rec->p_ca_attr );
\r
533 CL_ASSERT( p_pnp_rec->p_port_attr );
\r
535 p_spl_qp_svc = cl_zalloc( sizeof( spl_qp_svc_t ) );
\r
536 if( !p_spl_qp_svc )
\r
538 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
539 ("IB_INSUFFICIENT_MEMORY\n") );
\r
540 return IB_INSUFFICIENT_MEMORY;
\r
543 /* Tie the special QP service to the port by setting the port number. */
\r
544 p_spl_qp_svc->port_num = p_pnp_rec->p_port_attr->port_num;
\r
545 /* Store the port GUID to allow faster lookups of the dispatchers. */
\r
546 p_spl_qp_svc->port_guid = p_pnp_rec->p_port_attr->port_guid;
\r
548 /* Initialize the send and receive queues. */
\r
549 cl_qlist_init( &p_spl_qp_svc->send_queue );
\r
550 cl_qlist_init( &p_spl_qp_svc->recv_queue );
\r
552 #if defined( CL_USE_MUTEX )
\r
553 /* Initialize async callbacks and flags for send/receive processing. */
\r
554 p_spl_qp_svc->send_async_queued = FALSE;
\r
555 p_spl_qp_svc->send_async_cb.pfn_callback = spl_qp_send_async_cb;
\r
556 p_spl_qp_svc->recv_async_queued = FALSE;
\r
557 p_spl_qp_svc->recv_async_cb.pfn_callback = spl_qp_recv_async_cb;
\r
560 /* Initialize the async callback function to process local sends. */
\r
561 p_spl_qp_svc->send_async.pfn_callback = send_local_mad_cb;
\r
563 /* Initialize the async callback function to reset the QP on error. */
\r
564 p_spl_qp_svc->reset_async.pfn_callback = spl_qp_svc_reset_cb;
\r
566 /* Construct the special QP service object. */
\r
567 construct_al_obj( &p_spl_qp_svc->obj, AL_OBJ_TYPE_SMI );
\r
569 /* Initialize the special QP service object. */
\r
570 status = init_al_obj( &p_spl_qp_svc->obj, p_spl_qp_svc, TRUE,
\r
571 destroying_spl_qp_svc, NULL, free_spl_qp_svc );
\r
572 if( status != IB_SUCCESS )
\r
574 free_spl_qp_svc( &p_spl_qp_svc->obj );
\r
578 /* Attach the special QP service to the parent object. */
\r
579 status = attach_al_obj(
\r
580 (al_obj_t* __ptr64)p_pnp_rec->pnp_rec.pnp_context, &p_spl_qp_svc->obj );
\r
581 if( status != IB_SUCCESS )
\r
583 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );
\r
584 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
585 ("attach_al_obj returned %s.\n", ib_get_err_str(status)) );
\r
589 h_ca = acquire_ca( p_pnp_rec->p_ca_attr->ca_guid );
\r
593 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );
\r
594 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("acquire_ca failed.\n") );
\r
595 return IB_INVALID_GUID;
\r
598 p_spl_qp_svc->obj.p_ci_ca = h_ca->obj.p_ci_ca;
\r
600 /* Determine the maximum queue depth of the QP and CQs. */
\r
601 p_spl_qp_svc->max_qp_depth =
\r
602 ( p_pnp_rec->p_ca_attr->max_wrs <
\r
603 p_pnp_rec->p_ca_attr->max_cqes ) ?
\r
604 p_pnp_rec->p_ca_attr->max_wrs :
\r
605 p_pnp_rec->p_ca_attr->max_cqes;
\r
607 /* Compare this maximum to the default special queue depth. */
\r
608 if( ( qp_type == IB_QPT_QP0 ) &&
\r
609 ( p_spl_qp_svc->max_qp_depth > DEFAULT_QP0_DEPTH ) )
\r
610 p_spl_qp_svc->max_qp_depth = DEFAULT_QP0_DEPTH;
\r
611 if( ( qp_type == IB_QPT_QP1 ) &&
\r
612 ( p_spl_qp_svc->max_qp_depth > DEFAULT_QP1_DEPTH ) )
\r
613 p_spl_qp_svc->max_qp_depth = DEFAULT_QP1_DEPTH;
\r
615 /* Create the send CQ. */
\r
616 cl_memclr( &cq_create, sizeof( ib_cq_create_t ) );
\r
617 cq_create.size = p_spl_qp_svc->max_qp_depth;
\r
618 cq_create.pfn_comp_cb = spl_qp_send_comp_cb;
\r
620 status = ib_create_cq( p_spl_qp_svc->obj.p_ci_ca->h_ca, &cq_create,
\r
621 p_spl_qp_svc, spl_qp_svc_event_cb, &p_spl_qp_svc->h_send_cq );
\r
623 if( status != IB_SUCCESS )
\r
625 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );
\r
626 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
627 ("ib_create_cq send CQ failed, %s\n", ib_get_err_str( status ) ) );
\r
631 /* Reference the special QP service on behalf of ib_create_cq. */
\r
632 ref_al_obj( &p_spl_qp_svc->obj );
\r
634 /* Check the result of the creation request. */
\r
635 if( cq_create.size < p_spl_qp_svc->max_qp_depth )
\r
637 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );
\r
638 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
639 ("ib_create_cq allocated insufficient send CQ size\n") );
\r
640 return IB_INSUFFICIENT_RESOURCES;
\r
643 /* Create the receive CQ. */
\r
644 cl_memclr( &cq_create, sizeof( ib_cq_create_t ) );
\r
645 cq_create.size = p_spl_qp_svc->max_qp_depth;
\r
646 cq_create.pfn_comp_cb = spl_qp_recv_comp_cb;
\r
648 status = ib_create_cq( p_spl_qp_svc->obj.p_ci_ca->h_ca, &cq_create,
\r
649 p_spl_qp_svc, spl_qp_svc_event_cb, &p_spl_qp_svc->h_recv_cq );
\r
651 if( status != IB_SUCCESS )
\r
653 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );
\r
654 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
655 ("ib_create_cq recv CQ failed, %s\n", ib_get_err_str( status ) ) );
\r
659 /* Reference the special QP service on behalf of ib_create_cq. */
\r
660 ref_al_obj( &p_spl_qp_svc->obj );
\r
662 /* Check the result of the creation request. */
\r
663 if( cq_create.size < p_spl_qp_svc->max_qp_depth )
\r
665 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );
\r
666 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
667 ("ib_create_cq allocated insufficient recv CQ size\n") );
\r
668 return IB_INSUFFICIENT_RESOURCES;
\r
671 /* Create the special QP. */
\r
672 cl_memclr( &qp_create, sizeof( ib_qp_create_t ) );
\r
673 qp_create.qp_type = qp_type;
\r
674 qp_create.sq_depth = p_spl_qp_svc->max_qp_depth;
\r
675 qp_create.rq_depth = p_spl_qp_svc->max_qp_depth;
\r
676 qp_create.sq_sge = 3; /* Three entries are required for segmentation. */
\r
677 qp_create.rq_sge = 1;
\r
678 qp_create.h_sq_cq = p_spl_qp_svc->h_send_cq;
\r
679 qp_create.h_rq_cq = p_spl_qp_svc->h_recv_cq;
\r
680 qp_create.sq_signaled = TRUE;
\r
682 status = ib_get_spl_qp( p_spl_qp_svc->obj.p_ci_ca->h_pd,
\r
683 p_pnp_rec->p_port_attr->port_guid, &qp_create,
\r
684 p_spl_qp_svc, spl_qp_svc_event_cb, NULL, &p_spl_qp_svc->h_qp );
\r
686 if( status != IB_SUCCESS )
\r
688 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );
\r
689 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
690 ("ib_get_spl_qp failed, %s\n", ib_get_err_str( status ) ) );
\r
694 /* Reference the special QP service on behalf of ib_get_spl_qp. */
\r
695 ref_al_obj( &p_spl_qp_svc->obj );
\r
697 /* Check the result of the creation request. */
\r
698 status = ib_query_qp( p_spl_qp_svc->h_qp, &qp_attr );
\r
699 if( status != IB_SUCCESS )
\r
701 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );
\r
702 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
703 ("ib_query_qp failed, %s\n", ib_get_err_str( status ) ) );
\r
707 if( ( qp_attr.rq_depth < p_spl_qp_svc->max_qp_depth ) ||
\r
708 ( qp_attr.sq_depth < p_spl_qp_svc->max_qp_depth ) ||
\r
709 ( qp_attr.sq_sge < 3 ) || ( qp_attr.rq_sge < 1 ) )
\r
711 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );
\r
712 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
713 ("ib_get_spl_qp allocated attributes are insufficient\n") );
\r
714 return IB_INSUFFICIENT_RESOURCES;
\r
717 /* Initialize the QP for use. */
\r
718 status = ib_init_dgrm_svc( p_spl_qp_svc->h_qp, NULL );
\r
719 if( status != IB_SUCCESS )
\r
721 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );
\r
722 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
723 ("ib_init_dgrm_svc failed, %s\n", ib_get_err_str( status ) ) );
\r
727 /* Post receive buffers. */
\r
728 cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );
\r
729 status = spl_qp_svc_post_recvs( p_spl_qp_svc );
\r
730 cl_spinlock_release( &p_spl_qp_svc->obj.lock );
\r
731 if( status != IB_SUCCESS )
\r
733 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );
\r
734 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
735 ("spl_qp_svc_post_recvs failed, %s\n",
\r
736 ib_get_err_str( status ) ) );
\r
740 /* Create the MAD dispatcher. */
\r
741 status = create_mad_disp( &p_spl_qp_svc->obj, p_spl_qp_svc->h_qp,
\r
742 &p_spl_qp_svc->h_mad_disp );
\r
743 if( status != IB_SUCCESS )
\r
745 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );
\r
746 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
747 ("create_mad_disp failed, %s\n", ib_get_err_str( status ) ) );
\r
752 * Add this service to the special QP manager lookup lists.
\r
753 * The service must be added to allow the creation of a QP alias.
\r
755 cl_spinlock_acquire( &gp_spl_qp_mgr->obj.lock );
\r
756 if( qp_type == IB_QPT_QP0 )
\r
758 cl_qmap_insert( &gp_spl_qp_mgr->smi_map, p_spl_qp_svc->port_guid,
\r
759 &p_spl_qp_svc->map_item );
\r
763 cl_qmap_insert( &gp_spl_qp_mgr->gsi_map, p_spl_qp_svc->port_guid,
\r
764 &p_spl_qp_svc->map_item );
\r
766 cl_spinlock_release( &gp_spl_qp_mgr->obj.lock );
\r
769 * If the CA does not support HW agents, create a QP alias and register
\r
770 * a MAD service for sending responses from the local MAD interface.
\r
772 if( check_local_mad( p_spl_qp_svc->h_qp ) )
\r
774 /* Create a QP alias. */
\r
775 cl_memclr( &qp_create, sizeof( ib_qp_create_t ) );
\r
776 qp_create.qp_type =
\r
777 ( qp_type == IB_QPT_QP0 ) ? IB_QPT_QP0_ALIAS : IB_QPT_QP1_ALIAS;
\r
778 qp_create.sq_depth = p_spl_qp_svc->max_qp_depth;
\r
779 qp_create.sq_sge = 1;
\r
780 qp_create.sq_signaled = TRUE;
\r
782 status = ib_get_spl_qp( p_spl_qp_svc->obj.p_ci_ca->h_pd_alias,
\r
783 p_pnp_rec->p_port_attr->port_guid, &qp_create,
\r
784 p_spl_qp_svc, spl_qp_alias_event_cb, &p_spl_qp_svc->pool_key,
\r
785 &p_spl_qp_svc->h_qp_alias );
\r
787 if (status != IB_SUCCESS)
\r
789 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );
\r
790 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
791 ("ib_get_spl_qp alias failed, %s\n",
\r
792 ib_get_err_str( status ) ) );
\r
796 /* Reference the special QP service on behalf of ib_get_spl_qp. */
\r
797 ref_al_obj( &p_spl_qp_svc->obj );
\r
799 /* Register a MAD service for sends. */
\r
800 cl_memclr( &mad_svc, sizeof( ib_mad_svc_t ) );
\r
801 mad_svc.mad_svc_context = p_spl_qp_svc;
\r
802 mad_svc.pfn_mad_send_cb = spl_qp_alias_send_cb;
\r
803 mad_svc.pfn_mad_recv_cb = spl_qp_alias_recv_cb;
\r
805 status = ib_reg_mad_svc( p_spl_qp_svc->h_qp_alias, &mad_svc,
\r
806 &p_spl_qp_svc->h_mad_svc );
\r
808 if( status != IB_SUCCESS )
\r
810 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );
\r
811 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
812 ("ib_reg_mad_svc failed, %s\n", ib_get_err_str( status ) ) );
\r
817 /* Set the context of the PnP event to this child object. */
\r
818 p_pnp_rec->pnp_rec.context = &p_spl_qp_svc->obj;
\r
820 /* The QP is ready. Change the state. */
\r
821 p_spl_qp_svc->state = SPL_QP_ACTIVE;
\r
823 /* Force a completion callback to rearm the CQs. */
\r
824 spl_qp_recv_comp_cb( p_spl_qp_svc->h_recv_cq, p_spl_qp_svc );
\r
825 spl_qp_send_comp_cb( p_spl_qp_svc->h_send_cq, p_spl_qp_svc );
\r
827 /* Start the polling thread timer. */
\r
828 if( g_smi_poll_interval )
\r
831 cl_timer_trim( &gp_spl_qp_mgr->poll_timer, g_smi_poll_interval );
\r
833 if( cl_status != CL_SUCCESS )
\r
835 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );
\r
836 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
837 ("cl_timer_start failed, status 0x%x\n", cl_status ) );
\r
838 return ib_convert_cl_status( cl_status );
\r
842 /* Release the reference taken in init_al_obj. */
\r
843 deref_al_obj( &p_spl_qp_svc->obj );
\r
845 AL_EXIT( AL_DBG_SMI );
\r
852 * Return a work completion to the MAD dispatcher for the specified MAD.
\r
855 __complete_send_mad(
\r
856 IN const al_mad_disp_handle_t h_mad_disp,
\r
857 IN al_mad_wr_t* const p_mad_wr,
\r
858 IN const ib_wc_status_t wc_status )
\r
862 /* Construct a send work completion. */
\r
863 cl_memclr( &wc, sizeof( ib_wc_t ) );
\r
864 wc.wr_id = p_mad_wr->send_wr.wr_id;
\r
865 wc.wc_type = IB_WC_SEND;
\r
866 wc.status = wc_status;
\r
868 /* Set the send size if we were successful with the send. */
\r
869 if( wc_status == IB_WCS_SUCCESS )
\r
870 wc.length = MAD_BLOCK_SIZE;
\r
872 mad_disp_send_done( h_mad_disp, p_mad_wr, &wc );
\r
878 * Pre-destroy a special QP service.
\r
881 destroying_spl_qp_svc(
\r
882 IN al_obj_t* p_obj )
\r
884 spl_qp_svc_t* p_spl_qp_svc;
\r
885 cl_list_item_t* p_list_item;
\r
886 al_mad_wr_t* p_mad_wr;
\r
888 ib_api_status_t status;
\r
890 AL_ENTER( AL_DBG_SMI );
\r
892 CL_ASSERT( p_obj );
\r
893 p_spl_qp_svc = PARENT_STRUCT( p_obj, spl_qp_svc_t, obj );
\r
895 /* Change the state to prevent processing new send requests. */
\r
896 cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );
\r
897 p_spl_qp_svc->state = SPL_QP_DESTROYING;
\r
898 cl_spinlock_release( &p_spl_qp_svc->obj.lock );
\r
900 /* Wait here until the special QP service is no longer in use. */
\r
901 while( p_spl_qp_svc->in_use_cnt )
\r
903 cl_thread_suspend( 0 );
\r
906 /* Destroy the special QP. */
\r
907 if( p_spl_qp_svc->h_qp )
\r
909 /* If present, remove the special QP service from the tracking map. */
\r
910 cl_spinlock_acquire( &gp_spl_qp_mgr->obj.lock );
\r
911 if( p_spl_qp_svc->h_qp->type == IB_QPT_QP0 )
\r
913 cl_qmap_remove( &gp_spl_qp_mgr->smi_map, p_spl_qp_svc->port_guid );
\r
917 cl_qmap_remove( &gp_spl_qp_mgr->gsi_map, p_spl_qp_svc->port_guid );
\r
919 cl_spinlock_release( &gp_spl_qp_mgr->obj.lock );
\r
921 status = ib_destroy_qp( p_spl_qp_svc->h_qp,
\r
922 (ib_pfn_destroy_cb_t)deref_al_obj );
\r
923 CL_ASSERT( status == IB_SUCCESS );
\r
925 cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );
\r
927 /* Complete any outstanding MAD sends operations as "flushed". */
\r
928 for( p_list_item = cl_qlist_remove_head( &p_spl_qp_svc->send_queue );
\r
929 p_list_item != cl_qlist_end( &p_spl_qp_svc->send_queue );
\r
930 p_list_item = cl_qlist_remove_head( &p_spl_qp_svc->send_queue ) )
\r
932 cl_spinlock_release( &p_spl_qp_svc->obj.lock );
\r
933 p_mad_wr = PARENT_STRUCT( p_list_item, al_mad_wr_t, list_item );
\r
934 __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr,
\r
935 IB_WCS_WR_FLUSHED_ERR );
\r
936 cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );
\r
939 cl_spinlock_release( &p_spl_qp_svc->obj.lock );
\r
940 /* Receive MAD elements are returned to the pool by the free routine. */
\r
943 /* Destroy the special QP alias and CQs. */
\r
944 if( p_spl_qp_svc->h_qp_alias )
\r
946 status = ib_destroy_qp( p_spl_qp_svc->h_qp_alias,
\r
947 (ib_pfn_destroy_cb_t)deref_al_obj );
\r
948 CL_ASSERT( status == IB_SUCCESS );
\r
950 if( p_spl_qp_svc->h_send_cq )
\r
952 status = ib_destroy_cq( p_spl_qp_svc->h_send_cq,
\r
953 (ib_pfn_destroy_cb_t)deref_al_obj );
\r
954 CL_ASSERT( status == IB_SUCCESS );
\r
956 if( p_spl_qp_svc->h_recv_cq )
\r
958 status = ib_destroy_cq( p_spl_qp_svc->h_recv_cq,
\r
959 (ib_pfn_destroy_cb_t)deref_al_obj );
\r
960 CL_ASSERT( status == IB_SUCCESS );
\r
963 AL_EXIT( AL_DBG_SMI );
\r
969 * Free a special QP service.
\r
973 IN al_obj_t* p_obj )
\r
975 spl_qp_svc_t* p_spl_qp_svc;
\r
976 cl_list_item_t* p_list_item;
\r
977 al_mad_element_t* p_al_mad;
\r
978 ib_api_status_t status;
\r
980 AL_ENTER( AL_DBG_SMI );
\r
982 CL_ASSERT( p_obj );
\r
983 p_spl_qp_svc = PARENT_STRUCT( p_obj, spl_qp_svc_t, obj );
\r
985 /* Dereference the CA. */
\r
986 if( p_spl_qp_svc->obj.p_ci_ca )
\r
987 deref_al_obj( &p_spl_qp_svc->obj.p_ci_ca->h_ca->obj );
\r
989 /* Return receive MAD elements to the pool. */
\r
990 for( p_list_item = cl_qlist_remove_head( &p_spl_qp_svc->recv_queue );
\r
991 p_list_item != cl_qlist_end( &p_spl_qp_svc->recv_queue );
\r
992 p_list_item = cl_qlist_remove_head( &p_spl_qp_svc->recv_queue ) )
\r
994 p_al_mad = PARENT_STRUCT( p_list_item, al_mad_element_t, list_item );
\r
996 status = ib_put_mad( &p_al_mad->element );
\r
997 CL_ASSERT( status == IB_SUCCESS );
\r
1000 CL_ASSERT( cl_is_qlist_empty( &p_spl_qp_svc->send_queue ) );
\r
1002 destroy_al_obj( &p_spl_qp_svc->obj );
\r
1003 cl_free( p_spl_qp_svc );
\r
1005 AL_EXIT( AL_DBG_SMI );
\r
1011 * Update the base LID of a special QP service.
\r
1014 spl_qp_svc_lid_change(
\r
1015 IN al_obj_t* p_obj,
\r
1016 IN ib_pnp_port_rec_t* p_pnp_rec )
\r
1018 spl_qp_svc_t* p_spl_qp_svc;
\r
1020 AL_ENTER( AL_DBG_SMI );
\r
1022 CL_ASSERT( p_obj );
\r
1023 CL_ASSERT( p_pnp_rec );
\r
1024 CL_ASSERT( p_pnp_rec->p_port_attr );
\r
1026 p_spl_qp_svc = PARENT_STRUCT( p_obj, spl_qp_svc_t, obj );
\r
1028 p_spl_qp_svc->base_lid = p_pnp_rec->p_port_attr->lid;
\r
1029 p_spl_qp_svc->lmc = p_pnp_rec->p_port_attr->lmc;
\r
1031 AL_EXIT( AL_DBG_SMI );
\r
1037 * Route a send work request.
\r
1041 IN spl_qp_svc_t* p_spl_qp_svc,
\r
1042 IN ib_send_wr_t* const p_send_wr )
\r
1044 al_mad_wr_t* p_mad_wr;
\r
1045 al_mad_send_t* p_mad_send;
\r
1048 ib_av_handle_t h_av;
\r
1049 mad_route_t route;
\r
1050 boolean_t local, loopback, discard;
\r
1052 AL_ENTER( AL_DBG_SMI );
\r
1054 CL_ASSERT( p_spl_qp_svc );
\r
1055 CL_ASSERT( p_send_wr );
\r
1057 /* Initialize a pointers to the MAD work request and the MAD. */
\r
1058 p_mad_wr = PARENT_STRUCT( p_send_wr, al_mad_wr_t, send_wr );
\r
1059 p_mad_send = PARENT_STRUCT( p_mad_wr, al_mad_send_t, mad_wr );
\r
1060 p_mad = get_mad_hdr_from_wr( p_mad_wr );
\r
1061 p_smp = (ib_smp_t*)p_mad;
\r
1063 /* Check if the CA has a local MAD interface. */
\r
1064 local = loopback = discard = FALSE;
\r
1065 if( check_local_mad( p_spl_qp_svc->h_qp ) )
\r
1068 * If the MAD is a locally addressed Subnet Management, Performance
\r
1069 * Management, or Connection Management datagram, process the work
\r
1070 * request locally.
\r
1072 h_av = p_send_wr->dgrm.ud.h_av;
\r
1073 switch( p_mad->mgmt_class )
\r
1075 case IB_MCLASS_SUBN_DIR:
\r
1076 /* Perform special checks on directed route SMPs. */
\r
1077 if( ib_smp_is_response( p_smp ) )
\r
1080 * This node is the originator of the response. Discard
\r
1081 * if the hop count or pointer is zero, an intermediate hop,
\r
1082 * out of bounds hop, or if the first port of the directed
\r
1083 * route retrun path is not this port.
\r
1085 if( ( p_smp->hop_count == 0 ) || ( p_smp->hop_ptr == 0 ) )
\r
1087 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
1088 ("hop cnt or hop ptr set to 0...discarding\n") );
\r
1091 else if( p_smp->hop_count != ( p_smp->hop_ptr - 1 ) )
\r
1093 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
1094 ("hop cnt != (hop ptr - 1)...discarding\n") );
\r
1097 else if( p_smp->hop_count >= IB_SUBNET_PATH_HOPS_MAX )
\r
1099 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
1100 ("hop cnt > max hops...discarding\n") );
\r
1103 else if( ( p_smp->dr_dlid == IB_LID_PERMISSIVE ) &&
\r
1104 ( p_smp->return_path[ p_smp->hop_ptr - 1 ] !=
\r
1105 p_spl_qp_svc->port_num ) )
\r
1107 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
1108 ("return path[hop ptr - 1] != port num...discarding\n") );
\r
1114 /* The SMP is a request. */
\r
1115 if( ( p_smp->hop_count >= IB_SUBNET_PATH_HOPS_MAX ) ||
\r
1116 ( p_smp->hop_ptr >= IB_SUBNET_PATH_HOPS_MAX ) )
\r
1120 else if( ( p_smp->hop_count == 0 ) && ( p_smp->hop_ptr == 0 ) )
\r
1122 /* Self Addressed: Sent locally, routed locally. */
\r
1124 discard = ( p_smp->dr_slid != IB_LID_PERMISSIVE ) ||
\r
1125 ( p_smp->dr_dlid != IB_LID_PERMISSIVE );
\r
1127 else if( ( p_smp->hop_count != 0 ) &&
\r
1128 ( p_smp->hop_count == ( p_smp->hop_ptr - 1 ) ) )
\r
1130 /* End of Path: Sent remotely, routed locally. */
\r
1133 else if( ( p_smp->hop_count != 0 ) &&
\r
1134 ( p_smp->hop_ptr == 0 ) )
\r
1136 /* Beginning of Path: Sent locally, routed remotely. */
\r
1137 if( p_smp->dr_slid == IB_LID_PERMISSIVE )
\r
1140 ( p_smp->initial_path[ p_smp->hop_ptr + 1 ] !=
\r
1141 p_spl_qp_svc->port_num );
\r
1146 /* Intermediate hop. */
\r
1150 /* Loopback locally addressed SM to SM "heartbeat" messages. */
\r
1151 loopback = (p_mad->attr_id == IB_MAD_ATTR_SM_INFO);
\r
1154 case IB_MCLASS_SUBN_LID:
\r
1155 /* Loopback locally addressed SM to SM "heartbeat" messages. */
\r
1156 loopback = (p_mad->attr_id == IB_MAD_ATTR_SM_INFO);
\r
1158 /* Fall through to check for a local MAD. */
\r
1160 case IB_MCLASS_PERF:
\r
1161 case IB_MCLASS_BM:
\r
1163 ( h_av->av_attr.dlid ==
\r
1164 ( h_av->av_attr.path_bits | p_spl_qp_svc->base_lid ) ) );
\r
1168 /* Route vendor specific MADs to the HCA provider. */
\r
1169 if( ib_class_is_vendor_specific( p_mad->mgmt_class ) )
\r
1172 ( h_av->av_attr.dlid ==
\r
1173 ( h_av->av_attr.path_bits | p_spl_qp_svc->base_lid ) ) );
\r
1179 route = ( p_mad_send->p_send_mad->send_opt & IB_SEND_OPT_LOCAL ) ?
\r
1180 ROUTE_LOCAL : ROUTE_REMOTE;
\r
1181 if( local ) route = ROUTE_LOCAL;
\r
1182 if( loopback && local ) route = ROUTE_LOOPBACK;
\r
1183 if( discard ) route = ROUTE_DISCARD;
\r
1185 AL_EXIT( AL_DBG_SMI );
\r
1192 * Send a work request on the special QP.
\r
1196 IN const ib_qp_handle_t h_qp,
\r
1197 IN ib_send_wr_t* const p_send_wr )
\r
1199 spl_qp_svc_t* p_spl_qp_svc;
\r
1200 al_mad_wr_t* p_mad_wr;
\r
1201 mad_route_t route;
\r
1202 ib_api_status_t status;
\r
1204 AL_ENTER( AL_DBG_SMI );
\r
1206 CL_ASSERT( h_qp );
\r
1207 CL_ASSERT( p_send_wr );
\r
1209 /* Get the special QP service. */
\r
1210 p_spl_qp_svc = (spl_qp_svc_t*)h_qp->obj.context;
\r
1211 CL_ASSERT( p_spl_qp_svc );
\r
1212 CL_ASSERT( p_spl_qp_svc->h_qp == h_qp );
\r
1214 /* Determine how to route the MAD. */
\r
1215 route = route_mad_send( p_spl_qp_svc, p_send_wr );
\r
1218 * Check the QP state and guard against error handling. Also,
\r
1219 * to maintain proper order of work completions, delay processing
\r
1220 * a local MAD until any remote MAD work requests have completed,
\r
1221 * and delay processing a remote MAD until local MAD work requests
\r
1224 cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );
\r
1225 if( (p_spl_qp_svc->state != SPL_QP_ACTIVE) || p_spl_qp_svc->local_mad_wr ||
\r
1226 (is_local(route) && !cl_is_qlist_empty( &p_spl_qp_svc->send_queue )) ||
\r
1227 ( cl_qlist_count( &p_spl_qp_svc->send_queue ) >=
\r
1228 p_spl_qp_svc->max_qp_depth ) )
\r
1231 * Return busy status.
\r
1232 * The special QP will resume sends at this point.
\r
1234 cl_spinlock_release( &p_spl_qp_svc->obj.lock );
\r
1236 AL_EXIT( AL_DBG_SMI );
\r
1237 return IB_RESOURCE_BUSY;
\r
1240 p_mad_wr = PARENT_STRUCT( p_send_wr, al_mad_wr_t, send_wr );
\r
1242 if( is_local( route ) )
\r
1244 /* Save the local MAD work request for processing. */
\r
1245 p_spl_qp_svc->local_mad_wr = p_mad_wr;
\r
1247 /* Flag the service as in use by the asynchronous processing thread. */
\r
1248 cl_atomic_inc( &p_spl_qp_svc->in_use_cnt );
\r
1250 cl_spinlock_release( &p_spl_qp_svc->obj.lock );
\r
1252 status = local_mad_send( p_spl_qp_svc, p_mad_wr );
\r
1256 /* Process a remote MAD send work request. */
\r
1257 status = remote_mad_send( p_spl_qp_svc, p_mad_wr );
\r
1259 cl_spinlock_release( &p_spl_qp_svc->obj.lock );
\r
1262 AL_EXIT( AL_DBG_SMI );
\r
1269 * Process a remote MAD send work request. Called holding the spl_qp_svc lock.
\r
1273 IN spl_qp_svc_t* p_spl_qp_svc,
\r
1274 IN al_mad_wr_t* const p_mad_wr )
\r
1277 ib_api_status_t status;
\r
1279 AL_ENTER( AL_DBG_SMI );
\r
1281 CL_ASSERT( p_spl_qp_svc );
\r
1282 CL_ASSERT( p_mad_wr );
\r
1284 /* Initialize a pointers to the MAD work request and outbound MAD. */
\r
1285 p_smp = (ib_smp_t*)get_mad_hdr_from_wr( p_mad_wr );
\r
1287 /* Perform outbound MAD processing. */
\r
1289 /* Adjust directed route SMPs as required by IBA. */
\r
1290 if( p_smp->mgmt_class == IB_MCLASS_SUBN_DIR )
\r
1292 if( ib_smp_is_response( p_smp ) )
\r
1294 if( p_smp->dr_dlid == IB_LID_PERMISSIVE )
\r
1297 else if( p_smp->dr_slid == IB_LID_PERMISSIVE )
\r
1300 * Only update the pointer if the hw_agent is not implemented.
\r
1301 * Fujitsu implements SMI in hardware, so the following has to
\r
1302 * be passed down to the hardware SMI.
\r
1304 ci_ca_lock_attr( p_spl_qp_svc->obj.p_ci_ca );
\r
1305 if( !p_spl_qp_svc->obj.p_ci_ca->p_pnp_attr->hw_agents )
\r
1307 ci_ca_unlock_attr( p_spl_qp_svc->obj.p_ci_ca );
\r
1311 /* Always generate send completions. */
\r
1312 p_mad_wr->send_wr.send_opt |= IB_SEND_OPT_SIGNALED;
\r
1314 /* Queue the MAD work request on the service tracking queue. */
\r
1315 cl_qlist_insert_tail( &p_spl_qp_svc->send_queue, &p_mad_wr->list_item );
\r
1317 status = ib_post_send( p_spl_qp_svc->h_qp, &p_mad_wr->send_wr, NULL );
\r
1319 if( status != IB_SUCCESS )
\r
1321 cl_qlist_remove_item( &p_spl_qp_svc->send_queue, &p_mad_wr->list_item );
\r
1323 /* Reset directed route SMPs as required by IBA. */
\r
1324 if( p_smp->mgmt_class == IB_MCLASS_SUBN_DIR )
\r
1326 if( ib_smp_is_response( p_smp ) )
\r
1328 if( p_smp->dr_dlid == IB_LID_PERMISSIVE )
\r
1331 else if( p_smp->dr_slid == IB_LID_PERMISSIVE )
\r
1333 /* Only update if the hw_agent is not implemented. */
\r
1334 ci_ca_lock_attr( p_spl_qp_svc->obj.p_ci_ca );
\r
1335 if( p_spl_qp_svc->obj.p_ci_ca->p_pnp_attr->hw_agents == FALSE )
\r
1337 ci_ca_unlock_attr( p_spl_qp_svc->obj.p_ci_ca );
\r
1342 AL_EXIT( AL_DBG_SMI );
\r
1348 * Handle a MAD destined for the local CA, using cached data
\r
1349 * as much as possible.
\r
1351 static ib_api_status_t
\r
1353 IN spl_qp_svc_t* p_spl_qp_svc,
\r
1354 IN al_mad_wr_t* const p_mad_wr )
\r
1356 mad_route_t route;
\r
1357 ib_api_status_t status = IB_SUCCESS;
\r
1359 AL_ENTER( AL_DBG_SMI );
\r
1361 CL_ASSERT( p_spl_qp_svc );
\r
1362 CL_ASSERT( p_mad_wr );
\r
1364 /* Determine how to route the MAD. */
\r
1365 route = route_mad_send( p_spl_qp_svc, &p_mad_wr->send_wr );
\r
1367 /* Check if this MAD should be discarded. */
\r
1368 if( is_discard( route ) )
\r
1370 /* Deliver a "work completion" to the dispatcher. */
\r
1371 __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr,
\r
1372 IB_WCS_LOCAL_OP_ERR );
\r
1373 status = IB_INVALID_SETTING;
\r
1375 else if( is_loopback( route ) )
\r
1377 /* Loopback local SM to SM "heartbeat" messages. */
\r
1378 status = loopback_mad( p_spl_qp_svc, p_mad_wr );
\r
1382 switch( get_mad_hdr_from_wr( p_mad_wr )->mgmt_class )
\r
1384 case IB_MCLASS_SUBN_DIR:
\r
1385 case IB_MCLASS_SUBN_LID:
\r
1386 status = process_subn_mad( p_spl_qp_svc, p_mad_wr );
\r
1390 status = IB_NOT_DONE;
\r
1394 if( status == IB_NOT_DONE )
\r
1396 /* Queue an asynchronous processing item to process the local MAD. */
\r
1397 cl_async_proc_queue( gp_async_proc_mgr, &p_spl_qp_svc->send_async );
\r
1402 * Clear the local MAD pointer to allow processing of other MADs.
\r
1403 * This is done after polling for attribute changes to ensure that
\r
1404 * subsequent MADs pick up any changes performed by this one.
\r
1406 cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );
\r
1407 p_spl_qp_svc->local_mad_wr = NULL;
\r
1408 cl_spinlock_release( &p_spl_qp_svc->obj.lock );
\r
1410 /* No longer in use by the asynchronous processing thread. */
\r
1411 cl_atomic_dec( &p_spl_qp_svc->in_use_cnt );
\r
1413 /* Special QP operations will resume by unwinding. */
\r
1416 AL_EXIT( AL_DBG_SMI );
\r
1417 return IB_SUCCESS;
\r
1421 static ib_api_status_t
\r
1423 IN spl_qp_svc_t* p_spl_qp_svc,
\r
1424 IN al_mad_wr_t* const p_mad_wr,
\r
1425 OUT ib_mad_element_t** const pp_mad_resp )
\r
1427 ib_api_status_t status;
\r
1429 AL_ENTER( AL_DBG_SMI );
\r
1431 CL_ASSERT( p_spl_qp_svc );
\r
1432 CL_ASSERT( p_mad_wr );
\r
1433 CL_ASSERT( pp_mad_resp );
\r
1435 /* Get a MAD element from the pool for the response. */
\r
1436 status = ib_get_mad( p_spl_qp_svc->h_qp->obj.p_ci_ca->pool_key,
\r
1437 MAD_BLOCK_SIZE, pp_mad_resp );
\r
1438 if( status != IB_SUCCESS )
\r
1440 __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr,
\r
1441 IB_WCS_LOCAL_OP_ERR );
\r
1444 AL_EXIT( AL_DBG_SMI );
\r
1449 static ib_api_status_t
\r
1450 complete_local_mad(
\r
1451 IN spl_qp_svc_t* p_spl_qp_svc,
\r
1452 IN al_mad_wr_t* const p_mad_wr,
\r
1453 IN ib_mad_element_t* const p_mad_resp )
\r
1455 ib_api_status_t status;
\r
1457 AL_ENTER( AL_DBG_SMI );
\r
1459 CL_ASSERT( p_spl_qp_svc );
\r
1460 CL_ASSERT( p_mad_wr );
\r
1461 CL_ASSERT( p_mad_resp );
\r
1463 /* Construct the receive MAD element. */
\r
1464 p_mad_resp->status = IB_WCS_SUCCESS;
\r
1465 p_mad_resp->remote_qp = p_mad_wr->send_wr.dgrm.ud.remote_qp;
\r
1466 p_mad_resp->remote_lid = p_spl_qp_svc->base_lid;
\r
1467 if( p_mad_wr->send_wr.send_opt & IB_RECV_OPT_IMMEDIATE )
\r
1469 p_mad_resp->immediate_data = p_mad_wr->send_wr.immediate_data;
\r
1470 p_mad_resp->recv_opt |= IB_RECV_OPT_IMMEDIATE;
\r
1474 * Hand the receive MAD element to the dispatcher before completing
\r
1475 * the send. This guarantees that the send request cannot time out.
\r
1477 status = mad_disp_recv_done( p_spl_qp_svc->h_mad_disp, p_mad_resp );
\r
1479 /* Forward the send work completion to the dispatcher. */
\r
1480 __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr, IB_WCS_SUCCESS );
\r
1482 AL_EXIT( AL_DBG_SMI );
\r
1487 static ib_api_status_t
\r
1489 IN spl_qp_svc_t* p_spl_qp_svc,
\r
1490 IN al_mad_wr_t* const p_mad_wr )
\r
1493 ib_mad_element_t *p_mad_resp;
\r
1494 ib_api_status_t status;
\r
1496 AL_ENTER( AL_DBG_SMI );
\r
1498 CL_ASSERT( p_spl_qp_svc );
\r
1499 CL_ASSERT( p_mad_wr );
\r
1501 /* Get a MAD element from the pool for the response. */
\r
1502 status = get_resp_mad( p_spl_qp_svc, p_mad_wr, &p_mad_resp );
\r
1503 if( status == IB_SUCCESS )
\r
1505 /* Initialize a pointers to the MAD work request and outbound MAD. */
\r
1506 p_mad = get_mad_hdr_from_wr( p_mad_wr );
\r
1508 /* Simulate a send/receive between local managers. */
\r
1509 cl_memcpy( p_mad_resp->p_mad_buf, p_mad, MAD_BLOCK_SIZE );
\r
1511 status = complete_local_mad( p_spl_qp_svc, p_mad_wr, p_mad_resp );
\r
1514 AL_EXIT( AL_DBG_SMI );
\r
1519 static ib_api_status_t
\r
1520 process_node_info(
\r
1521 IN spl_qp_svc_t* p_spl_qp_svc,
\r
1522 IN al_mad_wr_t* const p_mad_wr )
\r
1525 ib_mad_element_t *p_mad_resp;
\r
1527 ib_node_info_t *p_node_info;
\r
1528 ib_ca_attr_t *p_ca_attr;
\r
1529 ib_port_attr_t *p_port_attr;
\r
1530 ib_api_status_t status;
\r
1532 AL_ENTER( AL_DBG_SMI );
\r
1534 CL_ASSERT( p_spl_qp_svc );
\r
1535 CL_ASSERT( p_mad_wr );
\r
1537 /* Initialize a pointers to the MAD work request and outbound MAD. */
\r
1538 p_mad = get_mad_hdr_from_wr( p_mad_wr );
\r
1539 if( p_mad->method != IB_MAD_METHOD_GET )
\r
1541 /* Node description is a GET-only attribute. */
\r
1542 __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr,
\r
1543 IB_WCS_LOCAL_OP_ERR );
\r
1544 AL_EXIT( AL_DBG_SMI );
\r
1545 return IB_INVALID_SETTING;
\r
1548 /* Get a MAD element from the pool for the response. */
\r
1549 status = get_resp_mad( p_spl_qp_svc, p_mad_wr, &p_mad_resp );
\r
1550 if( status == IB_SUCCESS )
\r
1552 p_smp = (ib_smp_t*)p_mad_resp->p_mad_buf;
\r
1553 cl_memcpy( p_smp, p_mad, MAD_BLOCK_SIZE );
\r
1554 p_smp->method |= IB_MAD_METHOD_RESP_MASK;
\r
1555 if( p_smp->mgmt_class == IB_MCLASS_SUBN_DIR )
\r
1556 p_smp->status = IB_SMP_DIRECTION;
\r
1558 p_smp->status = 0;
\r
1560 p_node_info = (ib_node_info_t*)ib_smp_get_payload_ptr( p_smp );
\r
1563 * Fill in the node info, protecting against the
\r
1564 * attributes being changed by PnP.
\r
1566 cl_spinlock_acquire( &p_spl_qp_svc->obj.p_ci_ca->obj.lock );
\r
1568 p_ca_attr = p_spl_qp_svc->obj.p_ci_ca->p_pnp_attr;
\r
1569 p_port_attr = &p_ca_attr->p_port_attr[p_spl_qp_svc->port_num - 1];
\r
1571 p_node_info->base_version = 1;
\r
1572 p_node_info->class_version = 1;
\r
1573 p_node_info->node_type = IB_NODE_TYPE_CA;
\r
1574 p_node_info->num_ports = p_ca_attr->num_ports;
\r
1575 /* TODO: Get some unique identifier for the system */
\r
1576 p_node_info->sys_guid = p_ca_attr->ca_guid;
\r
1577 p_node_info->node_guid = p_ca_attr->ca_guid;
\r
1578 p_node_info->port_guid = p_port_attr->port_guid;
\r
1579 p_node_info->partition_cap = cl_hton16( p_port_attr->num_pkeys );
\r
1580 p_node_info->device_id = cl_hton16( p_ca_attr->dev_id );
\r
1581 p_node_info->revision = cl_hton32( p_ca_attr->revision );
\r
1582 p_node_info->port_num_vendor_id =
\r
1583 cl_hton32( p_ca_attr->vend_id & 0x00FFFFFF ) | p_port_attr->port_num;
\r
1584 cl_spinlock_release( &p_spl_qp_svc->obj.p_ci_ca->obj.lock );
\r
1586 status = complete_local_mad( p_spl_qp_svc, p_mad_wr, p_mad_resp );
\r
1589 AL_EXIT( AL_DBG_SMI );
\r
1594 static ib_api_status_t
\r
1595 process_node_desc(
\r
1596 IN spl_qp_svc_t* p_spl_qp_svc,
\r
1597 IN al_mad_wr_t* const p_mad_wr )
\r
1600 ib_mad_element_t *p_mad_resp;
\r
1601 ib_api_status_t status;
\r
1603 AL_ENTER( AL_DBG_SMI );
\r
1605 CL_ASSERT( p_spl_qp_svc );
\r
1606 CL_ASSERT( p_mad_wr );
\r
1608 /* Initialize a pointers to the MAD work request and outbound MAD. */
\r
1609 p_mad = get_mad_hdr_from_wr( p_mad_wr );
\r
1610 if( p_mad->method != IB_MAD_METHOD_GET )
\r
1612 /* Node info is a GET-only attribute. */
\r
1613 __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr,
\r
1614 IB_WCS_LOCAL_OP_ERR );
\r
1615 AL_EXIT( AL_DBG_SMI );
\r
1616 return IB_INVALID_SETTING;
\r
1619 /* Get a MAD element from the pool for the response. */
\r
1620 status = get_resp_mad( p_spl_qp_svc, p_mad_wr, &p_mad_resp );
\r
1621 if( status == IB_SUCCESS )
\r
1623 cl_memcpy( p_mad_resp->p_mad_buf, p_mad, MAD_BLOCK_SIZE );
\r
1624 p_mad_resp->p_mad_buf->method |= IB_MAD_METHOD_RESP_MASK;
\r
1625 if( p_mad_resp->p_mad_buf->mgmt_class == IB_MCLASS_SUBN_DIR )
\r
1626 p_mad_resp->p_mad_buf->status = IB_SMP_DIRECTION;
\r
1628 p_mad_resp->p_mad_buf->status = 0;
\r
1629 /* Set the node description to the machine name. */
\r
1630 cl_memcpy( ((ib_smp_t*)p_mad_resp->p_mad_buf)->data,
\r
1631 node_desc, sizeof(node_desc) );
\r
1633 status = complete_local_mad( p_spl_qp_svc, p_mad_wr, p_mad_resp );
\r
1636 AL_EXIT( AL_DBG_SMI );
\r
1642 * Process subnet administration MADs using cached data if possible.
\r
1644 static ib_api_status_t
\r
1646 IN spl_qp_svc_t* p_spl_qp_svc,
\r
1647 IN al_mad_wr_t* const p_mad_wr )
\r
1649 ib_api_status_t status;
\r
1652 AL_ENTER( AL_DBG_SMI );
\r
1654 CL_ASSERT( p_spl_qp_svc );
\r
1655 CL_ASSERT( p_mad_wr );
\r
1657 p_smp = (ib_smp_t*)get_mad_hdr_from_wr( p_mad_wr );
\r
1659 CL_ASSERT( p_smp->mgmt_class == IB_MCLASS_SUBN_DIR ||
\r
1660 p_smp->mgmt_class == IB_MCLASS_SUBN_LID );
\r
1662 switch( p_smp->attr_id )
\r
1664 case IB_MAD_ATTR_NODE_INFO:
\r
1665 status = process_node_info( p_spl_qp_svc, p_mad_wr );
\r
1668 case IB_MAD_ATTR_NODE_DESC:
\r
1669 status = process_node_desc( p_spl_qp_svc, p_mad_wr );
\r
1673 status = IB_NOT_DONE;
\r
1677 AL_EXIT( AL_DBG_SMI );
\r
1683 * Process a local MAD send work request.
\r
1687 IN spl_qp_svc_t* p_spl_qp_svc,
\r
1688 IN al_mad_wr_t* const p_mad_wr )
\r
1692 al_mad_send_t* p_mad_send;
\r
1693 ib_mad_element_t* p_mad_response;
\r
1694 ib_mad_t* p_mad_response_buf;
\r
1695 ib_api_status_t status = IB_SUCCESS;
\r
1696 boolean_t smp_is_set;
\r
1698 AL_ENTER( AL_DBG_SMI );
\r
1700 CL_ASSERT( p_spl_qp_svc );
\r
1701 CL_ASSERT( p_mad_wr );
\r
1703 /* Initialize a pointers to the MAD work request and outbound MAD. */
\r
1704 p_mad = get_mad_hdr_from_wr( p_mad_wr );
\r
1705 p_smp = (ib_smp_t*)p_mad;
\r
1707 smp_is_set = (p_smp->method == IB_MAD_METHOD_SET);
\r
1709 /* Get a MAD element from the pool for the response. */
\r
1710 p_mad_send = PARENT_STRUCT( p_mad_wr, al_mad_send_t, mad_wr );
\r
1711 //*** Commented code to work-around ib_local_mad() requiring a response MAD
\r
1712 //*** as input. Remove comments once the ib_local_mad() implementation allows
\r
1713 //*** for a NULL response MAD, when one is not expected.
\r
1714 //*** Note that an attempt to route an invalid response MAD in this case
\r
1715 //*** will fail harmlessly.
\r
1716 //*** if( p_mad_send->p_send_mad->resp_expected )
\r
1718 status = get_resp_mad( p_spl_qp_svc, p_mad_wr, &p_mad_response );
\r
1719 if( status != IB_SUCCESS )
\r
1721 AL_EXIT( AL_DBG_SMI );
\r
1724 p_mad_response_buf = p_mad_response->p_mad_buf;
\r
1728 //*** p_mad_response_buf = NULL;
\r
1731 /* Adjust directed route SMPs as required by IBA. */
\r
1732 if( p_mad->mgmt_class == IB_MCLASS_SUBN_DIR )
\r
1734 CL_ASSERT( !ib_smp_is_response( p_smp ) );
\r
1737 * If this was a self addressed, directed route SMP, increment
\r
1738 * the hop pointer in the request before delivery as required
\r
1739 * by IBA. Otherwise, adjustment for remote requests occurs
\r
1740 * during inbound processing.
\r
1742 if( p_smp->hop_count == 0 )
\r
1746 /* Forward the locally addressed MAD to the CA interface. */
\r
1747 status = ib_local_mad( p_spl_qp_svc->h_qp->obj.p_ci_ca->h_ca,
\r
1748 p_spl_qp_svc->port_num, p_mad, p_mad_response_buf );
\r
1750 /* Reset directed route SMPs as required by IBA. */
\r
1751 if( p_mad->mgmt_class == IB_MCLASS_SUBN_DIR )
\r
1754 * If this was a self addressed, directed route SMP, decrement
\r
1755 * the hop pointer in the response before delivery as required
\r
1756 * by IBA. Otherwise, adjustment for remote responses occurs
\r
1757 * during outbound processing.
\r
1759 if( p_smp->hop_count == 0 )
\r
1761 /* Adjust the request SMP. */
\r
1764 /* Adjust the response SMP. */
\r
1765 if( p_mad_response_buf )
\r
1767 p_smp = (ib_smp_t*)p_mad_response_buf;
\r
1773 if( status != IB_SUCCESS )
\r
1775 if( p_mad_response )
\r
1776 ib_put_mad( p_mad_response );
\r
1778 __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr,
\r
1779 IB_WCS_LOCAL_OP_ERR );
\r
1780 AL_EXIT( AL_DBG_SMI );
\r
1784 /* Check the completion status of this simulated send. */
\r
1785 if( p_mad_response_buf )
\r
1788 * The SMI is uses PnP polling to refresh the base_lid and lmc.
\r
1789 * Polling takes time, so we update the values here to prevent
\r
1790 * the failure of LID routed MADs sent immediately following this
\r
1791 * assignment. Check the response to see if the port info was set.
\r
1795 ib_port_info_t* p_port_info = NULL;
\r
1797 switch( p_mad_response_buf->mgmt_class )
\r
1799 case IB_MCLASS_SUBN_DIR:
\r
1800 if( ( p_smp->attr_id == IB_MAD_ATTR_PORT_INFO ) &&
\r
1801 ( ib_smp_get_status( p_smp ) == IB_SA_MAD_STATUS_SUCCESS ) )
\r
1804 (ib_port_info_t*)ib_smp_get_payload_ptr( p_smp );
\r
1808 case IB_MCLASS_SUBN_LID:
\r
1809 if( ( p_mad_response_buf->attr_id == IB_MAD_ATTR_PORT_INFO ) &&
\r
1810 ( p_mad_response_buf->status == IB_SA_MAD_STATUS_SUCCESS ) )
\r
1813 (ib_port_info_t*)( p_mad_response_buf + 1 );
\r
1823 p_spl_qp_svc->base_lid = p_port_info->base_lid;
\r
1824 p_spl_qp_svc->lmc = ib_port_info_get_lmc( p_port_info );
\r
1825 if (p_port_info->subnet_timeout & 0x80)
\r
1827 AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_PNP,
\r
1828 ("Client reregister event, setting sm_lid to 0.\n"));
\r
1829 ci_ca_lock_attr(p_spl_qp_svc->obj.p_ci_ca);
\r
1830 p_spl_qp_svc->obj.p_ci_ca->p_pnp_attr->
\r
1831 p_port_attr->sm_lid= 0;
\r
1832 ci_ca_unlock_attr(p_spl_qp_svc->obj.p_ci_ca);
\r
1838 status = complete_local_mad( p_spl_qp_svc, p_mad_wr, p_mad_response );
\r
1840 /* If the SMP was a Get, no need to trigger a PnP poll. */
\r
1841 if( status == IB_SUCCESS && !smp_is_set )
\r
1842 status = IB_NOT_DONE;
\r
1844 AL_EXIT( AL_DBG_SMI );
\r
1851 * Asynchronous processing thread callback to send a local MAD.
\r
1854 send_local_mad_cb(
\r
1855 IN cl_async_proc_item_t* p_item )
\r
1857 spl_qp_svc_t* p_spl_qp_svc;
\r
1858 ib_api_status_t status;
\r
1860 AL_ENTER( AL_DBG_SMI_CB );
\r
1862 CL_ASSERT( p_item );
\r
1863 p_spl_qp_svc = PARENT_STRUCT( p_item, spl_qp_svc_t, send_async );
\r
1865 /* Process a local MAD send work request. */
\r
1866 CL_ASSERT( p_spl_qp_svc->local_mad_wr );
\r
1867 status = fwd_local_mad( p_spl_qp_svc, p_spl_qp_svc->local_mad_wr );
\r
1870 * If we successfully processed a local MAD, which could have changed
\r
1871 * something (e.g. the LID) on the HCA. Scan for changes.
\r
1873 if( status == IB_SUCCESS )
\r
1877 * Clear the local MAD pointer to allow processing of other MADs.
\r
1878 * This is done after polling for attribute changes to ensure that
\r
1879 * subsequent MADs pick up any changes performed by this one.
\r
1881 cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );
\r
1882 p_spl_qp_svc->local_mad_wr = NULL;
\r
1883 cl_spinlock_release( &p_spl_qp_svc->obj.lock );
\r
1885 /* Continue processing any queued MADs on the QP. */
\r
1886 special_qp_resume_sends( p_spl_qp_svc->h_qp );
\r
1888 /* No longer in use by the asynchronous processing thread. */
\r
1889 cl_atomic_dec( &p_spl_qp_svc->in_use_cnt );
\r
1891 AL_EXIT( AL_DBG_SMI );
\r
1897 * Special QP send completion callback.
\r
1900 spl_qp_send_comp_cb(
\r
1901 IN const ib_cq_handle_t h_cq,
\r
1902 IN void* cq_context )
\r
1904 spl_qp_svc_t* p_spl_qp_svc;
\r
1906 AL_ENTER( AL_DBG_SMI_CB );
\r
1908 CL_ASSERT( cq_context );
\r
1909 p_spl_qp_svc = cq_context;
\r
1911 #if defined( CL_USE_MUTEX )
\r
1913 /* Queue an asynchronous processing item to process sends. */
\r
1914 cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );
\r
1915 if( !p_spl_qp_svc->send_async_queued )
\r
1917 p_spl_qp_svc->send_async_queued = TRUE;
\r
1918 ref_al_obj( &p_spl_qp_svc->obj );
\r
1919 cl_async_proc_queue( gp_async_proc_mgr, &p_spl_qp_svc->send_async_cb );
\r
1921 cl_spinlock_release( &p_spl_qp_svc->obj.lock );
\r
1925 /* Invoke the callback directly. */
\r
1926 CL_ASSERT( h_cq == p_spl_qp_svc->h_send_cq );
\r
1927 spl_qp_comp( p_spl_qp_svc, h_cq, IB_WC_SEND );
\r
1929 /* Continue processing any queued MADs on the QP. */
\r
1930 special_qp_resume_sends( p_spl_qp_svc->h_qp );
\r
1934 AL_EXIT( AL_DBG_SMI );
\r
1939 #if defined( CL_USE_MUTEX )
\r
1941 spl_qp_send_async_cb(
\r
1942 IN cl_async_proc_item_t* p_item )
\r
1944 spl_qp_svc_t* p_spl_qp_svc;
\r
1945 ib_api_status_t status;
\r
1947 AL_ENTER( AL_DBG_SMI_CB );
\r
1949 CL_ASSERT( p_item );
\r
1950 p_spl_qp_svc = PARENT_STRUCT( p_item, spl_qp_svc_t, send_async_cb );
\r
1952 /* Reset asynchronous queue flag. */
\r
1953 cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );
\r
1954 p_spl_qp_svc->send_async_queued = FALSE;
\r
1955 cl_spinlock_release( &p_spl_qp_svc->obj.lock );
\r
1957 spl_qp_comp( p_spl_qp_svc, p_spl_qp_svc->h_send_cq, IB_WC_SEND );
\r
1959 /* Continue processing any queued MADs on the QP. */
\r
1960 status = special_qp_resume_sends( p_spl_qp_svc->h_qp );
\r
1961 CL_ASSERT( status == IB_SUCCESS );
\r
1963 deref_al_obj( &p_spl_qp_svc->obj );
\r
1965 AL_EXIT( AL_DBG_SMI );
\r
1972 * Special QP receive completion callback.
\r
1975 spl_qp_recv_comp_cb(
\r
1976 IN const ib_cq_handle_t h_cq,
\r
1977 IN void* cq_context )
\r
1979 spl_qp_svc_t* p_spl_qp_svc;
\r
1981 AL_ENTER( AL_DBG_SMI );
\r
1983 CL_ASSERT( cq_context );
\r
1984 p_spl_qp_svc = cq_context;
\r
1986 #if defined( CL_USE_MUTEX )
\r
1988 /* Queue an asynchronous processing item to process receives. */
\r
1989 cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );
\r
1990 if( !p_spl_qp_svc->recv_async_queued )
\r
1992 p_spl_qp_svc->recv_async_queued = TRUE;
\r
1993 ref_al_obj( &p_spl_qp_svc->obj );
\r
1994 cl_async_proc_queue( gp_async_proc_mgr, &p_spl_qp_svc->recv_async_cb );
\r
1996 cl_spinlock_release( &p_spl_qp_svc->obj.lock );
\r
2000 CL_ASSERT( h_cq == p_spl_qp_svc->h_recv_cq );
\r
2001 spl_qp_comp( p_spl_qp_svc, h_cq, IB_WC_RECV );
\r
2005 AL_EXIT( AL_DBG_SMI );
\r
2010 #if defined( CL_USE_MUTEX )
\r
2012 spl_qp_recv_async_cb(
\r
2013 IN cl_async_proc_item_t* p_item )
\r
2015 spl_qp_svc_t* p_spl_qp_svc;
\r
2017 AL_ENTER( AL_DBG_SMI );
\r
2019 CL_ASSERT( p_item );
\r
2020 p_spl_qp_svc = PARENT_STRUCT( p_item, spl_qp_svc_t, recv_async_cb );
\r
2022 /* Reset asynchronous queue flag. */
\r
2023 cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );
\r
2024 p_spl_qp_svc->recv_async_queued = FALSE;
\r
2025 cl_spinlock_release( &p_spl_qp_svc->obj.lock );
\r
2027 spl_qp_comp( p_spl_qp_svc, p_spl_qp_svc->h_recv_cq, IB_WC_RECV );
\r
2029 deref_al_obj( &p_spl_qp_svc->obj );
\r
2031 AL_EXIT( AL_DBG_SMI );
\r
2038 * Special QP completion handler.
\r
2042 IN spl_qp_svc_t* p_spl_qp_svc,
\r
2043 IN const ib_cq_handle_t h_cq,
\r
2044 IN ib_wc_type_t wc_type )
\r
2047 ib_wc_t* p_free_wc = &wc;
\r
2048 ib_wc_t* p_done_wc;
\r
2049 al_mad_wr_t* p_mad_wr;
\r
2050 al_mad_element_t* p_al_mad;
\r
2051 ib_mad_element_t* p_mad_element;
\r
2053 ib_api_status_t status;
\r
2055 AL_ENTER( AL_DBG_SMI_CB );
\r
2057 CL_ASSERT( p_spl_qp_svc );
\r
2058 CL_ASSERT( h_cq );
\r
2060 /* Check the QP state and guard against error handling. */
\r
2061 cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );
\r
2062 if( p_spl_qp_svc->state != SPL_QP_ACTIVE )
\r
2064 cl_spinlock_release( &p_spl_qp_svc->obj.lock );
\r
2067 cl_atomic_inc( &p_spl_qp_svc->in_use_cnt );
\r
2068 cl_spinlock_release( &p_spl_qp_svc->obj.lock );
\r
2071 /* Process work completions. */
\r
2072 while( ib_poll_cq( h_cq, &p_free_wc, &p_done_wc ) == IB_SUCCESS )
\r
2074 /* Process completions one at a time. */
\r
2075 CL_ASSERT( p_done_wc );
\r
2077 /* Flushed completions are handled elsewhere. */
\r
2078 if( wc.status == IB_WCS_WR_FLUSHED_ERR )
\r
2085 * Process the work completion. Per IBA specification, the
\r
2086 * wc.wc_type is undefined if wc.status is not IB_WCS_SUCCESS.
\r
2087 * Use the wc_type parameter.
\r
2092 /* Get a pointer to the MAD work request. */
\r
2093 p_mad_wr = (al_mad_wr_t*)((uintn_t)wc.wr_id);
\r
2095 /* Remove the MAD work request from the service tracking queue. */
\r
2096 cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );
\r
2097 cl_qlist_remove_item( &p_spl_qp_svc->send_queue,
\r
2098 &p_mad_wr->list_item );
\r
2099 cl_spinlock_release( &p_spl_qp_svc->obj.lock );
\r
2101 /* Reset directed route SMPs as required by IBA. */
\r
2102 p_smp = (ib_smp_t*)get_mad_hdr_from_wr( p_mad_wr );
\r
2103 if( p_smp->mgmt_class == IB_MCLASS_SUBN_DIR )
\r
2105 if( ib_smp_is_response( p_smp ) )
\r
2111 /* Report the send completion to the dispatcher. */
\r
2112 mad_disp_send_done( p_spl_qp_svc->h_mad_disp, p_mad_wr, &wc );
\r
2117 /* Initialize pointers to the MAD element. */
\r
2118 p_al_mad = (al_mad_element_t*)((uintn_t)wc.wr_id);
\r
2119 p_mad_element = &p_al_mad->element;
\r
2121 /* Remove the AL MAD element from the service tracking list. */
\r
2122 cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );
\r
2124 cl_qlist_remove_item( &p_spl_qp_svc->recv_queue,
\r
2125 &p_al_mad->list_item );
\r
2127 /* Replenish the receive buffer. */
\r
2128 spl_qp_svc_post_recvs( p_spl_qp_svc );
\r
2129 cl_spinlock_release( &p_spl_qp_svc->obj.lock );
\r
2131 /* Construct the MAD element from the receive work completion. */
\r
2132 build_mad_recv( p_mad_element, &wc );
\r
2134 /* Process the received MAD. */
\r
2135 status = process_mad_recv( p_spl_qp_svc, p_mad_element );
\r
2137 /* Discard this MAD on error. */
\r
2138 if( status != IB_SUCCESS )
\r
2140 status = ib_put_mad( p_mad_element );
\r
2141 CL_ASSERT( status == IB_SUCCESS );
\r
2146 CL_ASSERT( wc_type == IB_WC_SEND || wc_type == IB_WC_RECV );
\r
2150 if( wc.status != IB_WCS_SUCCESS )
\r
2152 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
2153 ("special QP completion error: %s! internal syndrome 0x%I64x\n",
\r
2154 ib_get_wc_status_str( wc.status ), wc.vendor_specific) );
\r
2156 /* Reset the special QP service and return. */
\r
2157 spl_qp_svc_reset( p_spl_qp_svc );
\r
2162 /* Rearm the CQ. */
\r
2163 status = ib_rearm_cq( h_cq, FALSE );
\r
2164 CL_ASSERT( status == IB_SUCCESS );
\r
2166 cl_atomic_dec( &p_spl_qp_svc->in_use_cnt );
\r
2167 AL_EXIT( AL_DBG_SMI_CB );
\r
2173 * Process a received MAD.
\r
2177 IN spl_qp_svc_t* p_spl_qp_svc,
\r
2178 IN ib_mad_element_t* p_mad_element )
\r
2181 mad_route_t route;
\r
2182 ib_api_status_t status;
\r
2184 AL_ENTER( AL_DBG_SMI );
\r
2186 CL_ASSERT( p_spl_qp_svc );
\r
2187 CL_ASSERT( p_mad_element );
\r
2190 * If the CA has a HW agent then this MAD should have been
\r
2191 * consumed below verbs. The fact that it was received here
\r
2192 * indicates that it should be forwarded to the dispatcher
\r
2193 * for delivery to a class manager. Otherwise, determine how
\r
2194 * the MAD should be routed.
\r
2196 route = ROUTE_DISPATCHER;
\r
2197 if( check_local_mad( p_spl_qp_svc->h_qp ) )
\r
2200 * SMP and GMP processing is branched here to handle overlaps
\r
2201 * between class methods and attributes.
\r
2203 switch( p_mad_element->p_mad_buf->mgmt_class )
\r
2205 case IB_MCLASS_SUBN_DIR:
\r
2206 /* Perform special checks on directed route SMPs. */
\r
2207 p_smp = (ib_smp_t*)p_mad_element->p_mad_buf;
\r
2209 if( ( p_smp->hop_count >= IB_SUBNET_PATH_HOPS_MAX ) ||
\r
2210 ( p_smp->hop_ptr >= IB_SUBNET_PATH_HOPS_MAX ) )
\r
2212 route = ROUTE_DISCARD;
\r
2214 else if( ib_smp_is_response( p_smp ) )
\r
2217 * This node is the destination of the response. Discard
\r
2218 * the source LID or hop pointer are incorrect.
\r
2220 if( p_smp->dr_slid == IB_LID_PERMISSIVE )
\r
2222 if( p_smp->hop_ptr == 1 )
\r
2224 p_smp->hop_ptr--; /* Adjust ptr per IBA spec. */
\r
2228 route = ROUTE_DISCARD;
\r
2231 else if( ( p_smp->dr_slid < p_spl_qp_svc->base_lid ) ||
\r
2232 ( p_smp->dr_slid >= p_spl_qp_svc->base_lid +
\r
2233 ( 1 << p_spl_qp_svc->lmc ) ) )
\r
2235 route = ROUTE_DISCARD;
\r
2241 * This node is the destination of the request. Discard
\r
2242 * the destination LID or hop pointer are incorrect.
\r
2244 if( p_smp->dr_dlid == IB_LID_PERMISSIVE )
\r
2246 if( p_smp->hop_count == p_smp->hop_ptr )
\r
2248 p_smp->return_path[ p_smp->hop_ptr++ ] =
\r
2249 p_spl_qp_svc->port_num; /* Set path per IBA spec. */
\r
2253 route = ROUTE_DISCARD;
\r
2256 else if( ( p_smp->dr_dlid < p_spl_qp_svc->base_lid ) ||
\r
2257 ( p_smp->dr_dlid >= p_spl_qp_svc->base_lid +
\r
2258 ( 1 << p_spl_qp_svc->lmc ) ) )
\r
2260 route = ROUTE_DISCARD;
\r
2264 if( route == ROUTE_DISCARD ) break;
\r
2265 /* else fall through next case */
\r
2267 case IB_MCLASS_SUBN_LID:
\r
2268 route = route_recv_smp( p_mad_element );
\r
2271 case IB_MCLASS_PERF:
\r
2272 route = ROUTE_LOCAL;
\r
2275 case IB_MCLASS_BM:
\r
2276 route = route_recv_gmp( p_mad_element );
\r
2280 /* Route vendor specific MADs to the HCA provider. */
\r
2281 if( ib_class_is_vendor_specific(
\r
2282 p_mad_element->p_mad_buf->mgmt_class ) )
\r
2284 route = route_recv_gmp( p_mad_element );
\r
2290 /* Route the MAD. */
\r
2291 if ( is_discard( route ) )
\r
2292 status = IB_ERROR;
\r
2293 else if( is_dispatcher( route ) )
\r
2294 status = mad_disp_recv_done( p_spl_qp_svc->h_mad_disp, p_mad_element );
\r
2295 else if( is_remote( route ) )
\r
2296 status = forward_sm_trap( p_spl_qp_svc, p_mad_element );
\r
2298 status = recv_local_mad( p_spl_qp_svc, p_mad_element );
\r
2300 AL_EXIT( AL_DBG_SMI );
\r
2307 * Route a received SMP.
\r
2311 IN ib_mad_element_t* p_mad_element )
\r
2313 mad_route_t route;
\r
2315 AL_ENTER( AL_DBG_SMI );
\r
2317 CL_ASSERT( p_mad_element );
\r
2319 /* Process the received SMP. */
\r
2320 switch( p_mad_element->p_mad_buf->method )
\r
2322 case IB_MAD_METHOD_GET:
\r
2323 case IB_MAD_METHOD_SET:
\r
2324 route = route_recv_smp_attr( p_mad_element );
\r
2327 case IB_MAD_METHOD_TRAP:
\r
2329 * Special check to route locally generated traps to the remote SM.
\r
2330 * Distinguished from other receives by the p_wc->recv.ud.recv_opt
\r
2331 * IB_RECV_OPT_FORWARD flag.
\r
2333 * Note that because forwarded traps use AL MAD services, the upper
\r
2334 * 32-bits of the TID are reserved by the access layer. When matching
\r
2335 * a Trap Repress MAD, the SMA must only use the lower 32-bits of the
\r
2338 route = ( p_mad_element->recv_opt & IB_RECV_OPT_FORWARD ) ?
\r
2339 ROUTE_REMOTE : ROUTE_DISPATCHER;
\r
2342 case IB_MAD_METHOD_TRAP_REPRESS:
\r
2344 * Note that because forwarded traps use AL MAD services, the upper
\r
2345 * 32-bits of the TID are reserved by the access layer. When matching
\r
2346 * a Trap Repress MAD, the SMA must only use the lower 32-bits of the
\r
2349 route = ROUTE_LOCAL;
\r
2353 route = ROUTE_DISPATCHER;
\r
2357 AL_EXIT( AL_DBG_SMI );
\r
2364 * Route received SMP attributes.
\r
2367 route_recv_smp_attr(
\r
2368 IN ib_mad_element_t* p_mad_element )
\r
2370 mad_route_t route;
\r
2372 AL_ENTER( AL_DBG_SMI );
\r
2374 CL_ASSERT( p_mad_element );
\r
2376 /* Process the received SMP attributes. */
\r
2377 switch( p_mad_element->p_mad_buf->attr_id )
\r
2379 case IB_MAD_ATTR_NODE_DESC:
\r
2380 case IB_MAD_ATTR_NODE_INFO:
\r
2381 case IB_MAD_ATTR_GUID_INFO:
\r
2382 case IB_MAD_ATTR_PORT_INFO:
\r
2383 case IB_MAD_ATTR_P_KEY_TABLE:
\r
2384 case IB_MAD_ATTR_SLVL_TABLE:
\r
2385 case IB_MAD_ATTR_VL_ARBITRATION:
\r
2386 case IB_MAD_ATTR_VENDOR_DIAG:
\r
2387 case IB_MAD_ATTR_LED_INFO:
\r
2388 route = ROUTE_LOCAL;
\r
2392 route = ROUTE_DISPATCHER;
\r
2396 AL_EXIT( AL_DBG_SMI );
\r
2402 * Route a received GMP.
\r
2406 IN ib_mad_element_t* p_mad_element )
\r
2408 mad_route_t route;
\r
2410 AL_ENTER( AL_DBG_SMI );
\r
2412 CL_ASSERT( p_mad_element );
\r
2414 /* Process the received GMP. */
\r
2415 switch( p_mad_element->p_mad_buf->method )
\r
2417 case IB_MAD_METHOD_GET:
\r
2418 case IB_MAD_METHOD_SET:
\r
2419 /* Route vendor specific MADs to the HCA provider. */
\r
2420 if( ib_class_is_vendor_specific(
\r
2421 p_mad_element->p_mad_buf->mgmt_class ) )
\r
2423 route = ROUTE_LOCAL;
\r
2427 route = route_recv_gmp_attr( p_mad_element );
\r
2432 route = ROUTE_DISPATCHER;
\r
2436 AL_EXIT( AL_DBG_SMI );
\r
2443 * Route received GMP attributes.
\r
2446 route_recv_gmp_attr(
\r
2447 IN ib_mad_element_t* p_mad_element )
\r
2449 mad_route_t route;
\r
2451 AL_ENTER( AL_DBG_SMI );
\r
2453 CL_ASSERT( p_mad_element );
\r
2455 /* Process the received GMP attributes. */
\r
2456 if( p_mad_element->p_mad_buf->attr_id == IB_MAD_ATTR_CLASS_PORT_INFO )
\r
2457 route = ROUTE_LOCAL;
\r
2459 route = ROUTE_DISPATCHER;
\r
2461 AL_EXIT( AL_DBG_SMI );
\r
2468 * Forward a locally generated Subnet Management trap.
\r
2472 IN spl_qp_svc_t* p_spl_qp_svc,
\r
2473 IN ib_mad_element_t* p_mad_element )
\r
2475 ib_av_attr_t av_attr;
\r
2476 ib_api_status_t status;
\r
2478 AL_ENTER( AL_DBG_SMI_CB );
\r
2480 CL_ASSERT( p_spl_qp_svc );
\r
2481 CL_ASSERT( p_mad_element );
\r
2483 /* Check the SMP class. */
\r
2484 if( p_mad_element->p_mad_buf->mgmt_class != IB_MCLASS_SUBN_LID )
\r
2487 * Per IBA Specification Release 1.1 Section 14.2.2.1,
\r
2488 * "C14-5: Only a SM shall originate a directed route SMP."
\r
2489 * Therefore all traps should be LID routed; drop this one.
\r
2491 AL_EXIT( AL_DBG_SMI_CB );
\r
2495 /* Create an address vector for the SM. */
\r
2496 cl_memclr( &av_attr, sizeof( ib_av_attr_t ) );
\r
2497 av_attr.port_num = p_spl_qp_svc->port_num;
\r
2498 av_attr.sl = p_mad_element->remote_sl;
\r
2499 av_attr.dlid = p_mad_element->remote_lid;
\r
2500 if( p_mad_element->grh_valid )
\r
2502 cl_memcpy( &av_attr.grh, p_mad_element->p_grh, sizeof( ib_grh_t ) );
\r
2503 av_attr.grh.src_gid = p_mad_element->p_grh->dest_gid;
\r
2504 av_attr.grh.dest_gid = p_mad_element->p_grh->src_gid;
\r
2505 av_attr.grh_valid = TRUE;
\r
2508 status = ib_create_av( p_spl_qp_svc->h_qp->obj.p_ci_ca->h_pd_alias,
\r
2509 &av_attr, &p_mad_element->h_av );
\r
2511 if( status != IB_SUCCESS )
\r
2513 AL_EXIT( AL_DBG_SMI_CB );
\r
2517 /* Complete the initialization of the MAD element. */
\r
2518 p_mad_element->p_next = NULL;
\r
2519 p_mad_element->remote_qkey = IB_QP_PRIVILEGED_Q_KEY;
\r
2520 p_mad_element->resp_expected = FALSE;
\r
2522 /* Clear context1 for proper send completion callback processing. */
\r
2523 p_mad_element->context1 = NULL;
\r
2526 * Forward the trap. Note that because forwarded traps use AL MAD
\r
2527 * services, the upper 32-bits of the TID are reserved by the access
\r
2528 * layer. When matching a Trap Repress MAD, the SMA must only use
\r
2529 * the lower 32-bits of the TID.
\r
2531 status = ib_send_mad( p_spl_qp_svc->h_mad_svc, p_mad_element, NULL );
\r
2533 if( status != IB_SUCCESS )
\r
2534 ib_destroy_av( p_mad_element->h_av );
\r
2536 AL_EXIT( AL_DBG_SMI_CB );
\r
2542 * Process a locally routed MAD received from the special QP.
\r
2546 IN spl_qp_svc_t* p_spl_qp_svc,
\r
2547 IN ib_mad_element_t* p_mad_request )
\r
2549 ib_mad_t* p_mad_hdr;
\r
2550 ib_api_status_t status;
\r
2552 AL_ENTER( AL_DBG_SMI_CB );
\r
2554 CL_ASSERT( p_spl_qp_svc );
\r
2555 CL_ASSERT( p_mad_request );
\r
2557 /* Initialize the MAD element. */
\r
2558 p_mad_hdr = ib_get_mad_buf( p_mad_request );
\r
2559 p_mad_request->context1 = p_mad_request;
\r
2561 /* Save the TID. */
\r
2562 p_mad_request->context2 =
\r
2563 (void*)(uintn_t)al_get_al_tid( p_mad_hdr->trans_id );
\r
2565 * Disable warning about passing unaligned 64-bit value.
\r
2566 * The value is always aligned given how buffers are allocated
\r
2567 * and given the layout of a MAD.
\r
2569 #pragma warning( push, 3 )
\r
2570 al_set_al_tid( &p_mad_hdr->trans_id, 0 );
\r
2571 #pragma warning( pop )
\r
2574 * We need to get a response from the local HCA to this MAD only if this
\r
2575 * MAD is not itself a response.
\r
2577 p_mad_request->resp_expected = !( ib_mad_is_response( p_mad_hdr ) ||
\r
2578 ( p_mad_hdr->method == IB_MAD_METHOD_TRAP_REPRESS ) );
\r
2579 p_mad_request->timeout_ms = LOCAL_MAD_TIMEOUT;
\r
2580 p_mad_request->send_opt = IB_SEND_OPT_LOCAL;
\r
2582 /* Send the locally addressed MAD request to the CA for processing. */
\r
2583 status = ib_send_mad( p_spl_qp_svc->h_mad_svc, p_mad_request, NULL );
\r
2585 AL_EXIT( AL_DBG_SMI_CB );
\r
2592 * Special QP alias send completion callback.
\r
2595 spl_qp_alias_send_cb(
\r
2596 IN ib_mad_svc_handle_t h_mad_svc,
\r
2597 IN void* mad_svc_context,
\r
2598 IN ib_mad_element_t* p_mad_element )
\r
2600 ib_api_status_t status;
\r
2602 AL_ENTER( AL_DBG_SMI_CB );
\r
2604 UNUSED_PARAM( h_mad_svc );
\r
2605 UNUSED_PARAM( mad_svc_context );
\r
2606 CL_ASSERT( p_mad_element );
\r
2608 if( p_mad_element->h_av )
\r
2610 status = ib_destroy_av( p_mad_element->h_av );
\r
2611 CL_ASSERT( status == IB_SUCCESS );
\r
2614 status = ib_put_mad( p_mad_element );
\r
2615 CL_ASSERT( status == IB_SUCCESS );
\r
2617 AL_EXIT( AL_DBG_SMI_CB );
\r
2623 * Special QP alias receive completion callback.
\r
2626 spl_qp_alias_recv_cb(
\r
2627 IN ib_mad_svc_handle_t h_mad_svc,
\r
2628 IN void* mad_svc_context,
\r
2629 IN ib_mad_element_t* p_mad_response )
\r
2631 spl_qp_svc_t* p_spl_qp_svc;
\r
2632 ib_mad_element_t* p_mad_request;
\r
2633 ib_mad_t* p_mad_hdr;
\r
2634 ib_av_attr_t av_attr;
\r
2635 ib_api_status_t status;
\r
2637 AL_ENTER( AL_DBG_SMI_CB );
\r
2639 CL_ASSERT( mad_svc_context );
\r
2640 CL_ASSERT( p_mad_response );
\r
2641 CL_ASSERT( p_mad_response->send_context1 );
\r
2643 /* Initialize pointers. */
\r
2644 p_spl_qp_svc = mad_svc_context;
\r
2645 p_mad_request = p_mad_response->send_context1;
\r
2646 p_mad_hdr = ib_get_mad_buf( p_mad_response );
\r
2648 /* Restore the TID, so it will match on the remote side. */
\r
2649 #pragma warning( push, 3 )
\r
2650 al_set_al_tid( &p_mad_hdr->trans_id,
\r
2651 (uint32_t)(uintn_t)p_mad_response->send_context2 );
\r
2652 #pragma warning( pop )
\r
2654 /* Set the remote QP. */
\r
2655 p_mad_response->remote_qp = p_mad_request->remote_qp;
\r
2656 p_mad_response->remote_qkey = p_mad_request->remote_qkey;
\r
2658 /* Prepare to create an address vector. */
\r
2659 cl_memclr( &av_attr, sizeof( ib_av_attr_t ) );
\r
2660 av_attr.port_num = p_spl_qp_svc->port_num;
\r
2661 av_attr.sl = p_mad_request->remote_sl;
\r
2662 av_attr.static_rate = IB_PATH_RECORD_RATE_10_GBS;
\r
2663 av_attr.path_bits = p_mad_request->path_bits;
\r
2664 if( p_mad_request->grh_valid )
\r
2666 cl_memcpy( &av_attr.grh, p_mad_request->p_grh, sizeof( ib_grh_t ) );
\r
2667 av_attr.grh.src_gid = p_mad_request->p_grh->dest_gid;
\r
2668 av_attr.grh.dest_gid = p_mad_request->p_grh->src_gid;
\r
2669 av_attr.grh_valid = TRUE;
\r
2671 if( ( p_mad_hdr->mgmt_class == IB_MCLASS_SUBN_DIR ) &&
\r
2672 ( ((ib_smp_t *)p_mad_hdr)->dr_dlid == IB_LID_PERMISSIVE ) )
\r
2673 av_attr.dlid = IB_LID_PERMISSIVE;
\r
2675 av_attr.dlid = p_mad_request->remote_lid;
\r
2677 /* Create an address vector. */
\r
2678 status = ib_create_av( p_spl_qp_svc->h_qp->obj.p_ci_ca->h_pd_alias,
\r
2679 &av_attr, &p_mad_response->h_av );
\r
2681 if( status != IB_SUCCESS )
\r
2683 ib_put_mad( p_mad_response );
\r
2685 AL_EXIT( AL_DBG_SMI );
\r
2689 /* Send the response. */
\r
2690 status = ib_send_mad( h_mad_svc, p_mad_response, NULL );
\r
2692 if( status != IB_SUCCESS )
\r
2694 ib_destroy_av( p_mad_response->h_av );
\r
2695 ib_put_mad( p_mad_response );
\r
2698 AL_EXIT( AL_DBG_SMI_CB );
\r
2704 * Post receive buffers to a special QP.
\r
2706 static ib_api_status_t
\r
2707 spl_qp_svc_post_recvs(
\r
2708 IN spl_qp_svc_t* const p_spl_qp_svc )
\r
2710 ib_mad_element_t* p_mad_element;
\r
2711 al_mad_element_t* p_al_element;
\r
2712 ib_recv_wr_t recv_wr;
\r
2713 ib_api_status_t status = IB_SUCCESS;
\r
2715 /* Attempt to post receive buffers up to the max_qp_depth limit. */
\r
2716 while( cl_qlist_count( &p_spl_qp_svc->recv_queue ) <
\r
2717 (int32_t)p_spl_qp_svc->max_qp_depth )
\r
2719 /* Get a MAD element from the pool. */
\r
2720 status = ib_get_mad( p_spl_qp_svc->obj.p_ci_ca->pool_key,
\r
2721 MAD_BLOCK_SIZE, &p_mad_element );
\r
2723 if( status != IB_SUCCESS ) break;
\r
2725 p_al_element = PARENT_STRUCT( p_mad_element, al_mad_element_t,
\r
2728 /* Build the receive work request. */
\r
2729 recv_wr.p_next = NULL;
\r
2730 recv_wr.wr_id = (uintn_t)p_al_element;
\r
2731 recv_wr.num_ds = 1;
\r
2732 recv_wr.ds_array = &p_al_element->grh_ds;
\r
2734 /* Queue the receive on the service tracking list. */
\r
2735 cl_qlist_insert_tail( &p_spl_qp_svc->recv_queue,
\r
2736 &p_al_element->list_item );
\r
2738 /* Post the receive. */
\r
2739 status = ib_post_recv( p_spl_qp_svc->h_qp, &recv_wr, NULL );
\r
2741 if( status != IB_SUCCESS )
\r
2743 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
2744 ("Failed to post receive %016I64x\n",
\r
2745 (LONG_PTR)p_al_element) );
\r
2746 cl_qlist_remove_item( &p_spl_qp_svc->recv_queue,
\r
2747 &p_al_element->list_item );
\r
2749 ib_put_mad( p_mad_element );
\r
2760 * Special QP service asynchronous event callback.
\r
2763 spl_qp_svc_event_cb(
\r
2764 IN ib_async_event_rec_t *p_event_rec )
\r
2766 spl_qp_svc_t* p_spl_qp_svc;
\r
2768 AL_ENTER( AL_DBG_SMI_CB );
\r
2770 CL_ASSERT( p_event_rec );
\r
2771 CL_ASSERT( p_event_rec->context );
\r
2773 if( p_event_rec->code == IB_AE_SQ_DRAINED )
\r
2775 AL_EXIT( AL_DBG_SMI );
\r
2779 p_spl_qp_svc = p_event_rec->context;
\r
2781 spl_qp_svc_reset( p_spl_qp_svc );
\r
2783 AL_EXIT( AL_DBG_SMI_CB );
\r
2789 * Special QP service reset.
\r
2793 IN spl_qp_svc_t* p_spl_qp_svc )
\r
2795 cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );
\r
2797 if( p_spl_qp_svc->state != SPL_QP_ACTIVE )
\r
2799 cl_spinlock_release( &p_spl_qp_svc->obj.lock );
\r
2803 /* Change the special QP service to the error state. */
\r
2804 p_spl_qp_svc->state = SPL_QP_ERROR;
\r
2806 /* Flag the service as in use by the asynchronous processing thread. */
\r
2807 cl_atomic_inc( &p_spl_qp_svc->in_use_cnt );
\r
2809 cl_spinlock_release( &p_spl_qp_svc->obj.lock );
\r
2811 /* Queue an asynchronous processing item to reset the special QP. */
\r
2812 cl_async_proc_queue( gp_async_proc_mgr, &p_spl_qp_svc->reset_async );
\r
2818 * Asynchronous processing thread callback to reset the special QP service.
\r
2821 spl_qp_svc_reset_cb(
\r
2822 IN cl_async_proc_item_t* p_item )
\r
2824 spl_qp_svc_t* p_spl_qp_svc;
\r
2825 cl_list_item_t* p_list_item;
\r
2827 ib_wc_t* p_free_wc;
\r
2828 ib_wc_t* p_done_wc;
\r
2829 al_mad_wr_t* p_mad_wr;
\r
2830 al_mad_element_t* p_al_mad;
\r
2831 ib_qp_mod_t qp_mod;
\r
2832 ib_api_status_t status;
\r
2833 cl_qlist_t mad_wr_list;
\r
2835 AL_ENTER( AL_DBG_SMI_CB );
\r
2837 CL_ASSERT( p_item );
\r
2838 p_spl_qp_svc = PARENT_STRUCT( p_item, spl_qp_svc_t, reset_async );
\r
2840 /* Wait here until the special QP service is only in use by this thread. */
\r
2841 while( p_spl_qp_svc->in_use_cnt != 1 )
\r
2843 cl_thread_suspend( 0 );
\r
2846 /* Change the QP to the RESET state. */
\r
2847 cl_memclr( &qp_mod, sizeof( ib_qp_mod_t ) );
\r
2848 qp_mod.req_state = IB_QPS_RESET;
\r
2850 status = ib_modify_qp( p_spl_qp_svc->h_qp, &qp_mod );
\r
2851 CL_ASSERT( status == IB_SUCCESS );
\r
2853 /* Return receive MAD elements to the pool. */
\r
2854 cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );
\r
2855 for( p_list_item = cl_qlist_remove_head( &p_spl_qp_svc->recv_queue );
\r
2856 p_list_item != cl_qlist_end( &p_spl_qp_svc->recv_queue );
\r
2857 p_list_item = cl_qlist_remove_head( &p_spl_qp_svc->recv_queue ) )
\r
2859 p_al_mad = PARENT_STRUCT( p_list_item, al_mad_element_t, list_item );
\r
2861 status = ib_put_mad( &p_al_mad->element );
\r
2862 CL_ASSERT( status == IB_SUCCESS );
\r
2864 cl_spinlock_release( &p_spl_qp_svc->obj.lock );
\r
2866 /* Re-initialize the QP. */
\r
2867 status = ib_init_dgrm_svc( p_spl_qp_svc->h_qp, NULL );
\r
2868 CL_ASSERT( status == IB_SUCCESS );
\r
2870 /* Poll to remove any remaining send completions from the CQ. */
\r
2873 cl_memclr( &wc, sizeof( ib_wc_t ) );
\r
2875 status = ib_poll_cq( p_spl_qp_svc->h_send_cq, &p_free_wc, &p_done_wc );
\r
2877 } while( status == IB_SUCCESS );
\r
2879 /* Post receive buffers. */
\r
2880 cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );
\r
2881 spl_qp_svc_post_recvs( p_spl_qp_svc );
\r
2883 /* Re-queue any outstanding MAD send operations. */
\r
2884 cl_qlist_init( &mad_wr_list );
\r
2885 cl_qlist_insert_list_tail( &mad_wr_list, &p_spl_qp_svc->send_queue );
\r
2886 cl_spinlock_release( &p_spl_qp_svc->obj.lock );
\r
2888 for( p_list_item = cl_qlist_remove_head( &mad_wr_list );
\r
2889 p_list_item != cl_qlist_end( &mad_wr_list );
\r
2890 p_list_item = cl_qlist_remove_head( &mad_wr_list ) )
\r
2892 p_mad_wr = PARENT_STRUCT( p_list_item, al_mad_wr_t, list_item );
\r
2893 special_qp_queue_mad( p_spl_qp_svc->h_qp, p_mad_wr );
\r
2896 cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );
\r
2897 if( p_spl_qp_svc->state == SPL_QP_ERROR )
\r
2899 /* The QP is ready. Change the state. */
\r
2900 p_spl_qp_svc->state = SPL_QP_ACTIVE;
\r
2901 cl_spinlock_release( &p_spl_qp_svc->obj.lock );
\r
2903 /* Re-arm the CQs. */
\r
2904 status = ib_rearm_cq( p_spl_qp_svc->h_recv_cq, FALSE );
\r
2905 CL_ASSERT( status == IB_SUCCESS );
\r
2906 status = ib_rearm_cq( p_spl_qp_svc->h_send_cq, FALSE );
\r
2907 CL_ASSERT( status == IB_SUCCESS );
\r
2909 /* Resume send processing. */
\r
2910 special_qp_resume_sends( p_spl_qp_svc->h_qp );
\r
2914 cl_spinlock_release( &p_spl_qp_svc->obj.lock );
\r
2917 /* No longer in use by the asynchronous processing thread. */
\r
2918 cl_atomic_dec( &p_spl_qp_svc->in_use_cnt );
\r
2920 AL_EXIT( AL_DBG_SMI_CB );
\r
2926 * Special QP alias asynchronous event callback.
\r
2929 spl_qp_alias_event_cb(
\r
2930 IN ib_async_event_rec_t *p_event_rec )
\r
2932 UNUSED_PARAM( p_event_rec );
\r
2938 * Acquire the SMI dispatcher for the given port.
\r
2942 IN const ib_net64_t port_guid,
\r
2943 OUT al_mad_disp_handle_t* const ph_mad_disp )
\r
2945 CL_ASSERT( gp_spl_qp_mgr );
\r
2946 return acquire_svc_disp( &gp_spl_qp_mgr->smi_map, port_guid, ph_mad_disp );
\r
2952 * Acquire the GSI dispatcher for the given port.
\r
2956 IN const ib_net64_t port_guid,
\r
2957 OUT al_mad_disp_handle_t* const ph_mad_disp )
\r
2959 CL_ASSERT( gp_spl_qp_mgr );
\r
2960 return acquire_svc_disp( &gp_spl_qp_mgr->gsi_map, port_guid, ph_mad_disp );
\r
2966 * Acquire the service dispatcher for the given port.
\r
2970 IN const cl_qmap_t* const p_svc_map,
\r
2971 IN const ib_net64_t port_guid,
\r
2972 OUT al_mad_disp_handle_t *ph_mad_disp )
\r
2974 cl_map_item_t* p_svc_item;
\r
2975 spl_qp_svc_t* p_spl_qp_svc;
\r
2977 AL_ENTER( AL_DBG_SMI );
\r
2979 CL_ASSERT( p_svc_map );
\r
2980 CL_ASSERT( gp_spl_qp_mgr );
\r
2982 /* Search for the SMI or GSI service for the given port. */
\r
2983 cl_spinlock_acquire( &gp_spl_qp_mgr->obj.lock );
\r
2984 p_svc_item = cl_qmap_get( p_svc_map, port_guid );
\r
2985 cl_spinlock_release( &gp_spl_qp_mgr->obj.lock );
\r
2986 if( p_svc_item == cl_qmap_end( p_svc_map ) )
\r
2988 /* The port does not have an active agent. */
\r
2989 AL_EXIT( AL_DBG_SMI );
\r
2990 return IB_INVALID_GUID;
\r
2993 p_spl_qp_svc = PARENT_STRUCT( p_svc_item, spl_qp_svc_t, map_item );
\r
2995 /* Found a match. Get MAD dispatcher handle. */
\r
2996 *ph_mad_disp = p_spl_qp_svc->h_mad_disp;
\r
2998 /* Reference the MAD dispatcher on behalf of the client. */
\r
2999 ref_al_obj( &p_spl_qp_svc->h_mad_disp->obj );
\r
3001 AL_EXIT( AL_DBG_SMI );
\r
3002 return IB_SUCCESS;
\r
3008 * Force a poll for CA attribute changes.
\r
3014 AL_ENTER( AL_DBG_SMI_CB );
\r
3017 * Stop the poll timer. Just invoke the timer callback directly to
\r
3018 * save the thread context switching.
\r
3020 smi_poll_timer_cb( gp_spl_qp_mgr );
\r
3022 AL_EXIT( AL_DBG_SMI_CB );
\r
3028 * Poll for CA port attribute changes.
\r
3031 smi_poll_timer_cb(
\r
3032 IN void* context )
\r
3034 cl_status_t cl_status;
\r
3036 AL_ENTER( AL_DBG_SMI_CB );
\r
3038 CL_ASSERT( context );
\r
3039 CL_ASSERT( gp_spl_qp_mgr == context );
\r
3040 UNUSED_PARAM( context );
\r
3043 * Scan for changes on the local HCAs. Since the PnP manager has its
\r
3044 * own thread for processing changes, we kick off that thread in parallel
\r
3045 * reposting receive buffers to the SQP agents.
\r
3050 * To handle the case where force_smi_poll is called at the same time
\r
3051 * the timer expires, check if the asynchronous processing item is in
\r
3052 * use. If it is already in use, it means that we're about to poll
\r
3053 * anyway, so just ignore this call.
\r
3055 cl_spinlock_acquire( &gp_spl_qp_mgr->obj.lock );
\r
3057 /* Perform port processing on the special QP agents. */
\r
3058 cl_qlist_apply_func( &gp_spl_qp_mgr->obj.obj_list, smi_post_recvs,
\r
3061 /* Determine if there are any special QP agents to poll. */
\r
3062 if( !cl_is_qlist_empty( &gp_spl_qp_mgr->obj.obj_list ) && g_smi_poll_interval )
\r
3064 /* Restart the polling timer. */
\r
3066 cl_timer_start( &gp_spl_qp_mgr->poll_timer, g_smi_poll_interval );
\r
3067 CL_ASSERT( cl_status == CL_SUCCESS );
\r
3069 cl_spinlock_release( &gp_spl_qp_mgr->obj.lock );
\r
3071 AL_EXIT( AL_DBG_SMI_CB );
\r
3077 * Post receive buffers to a special QP.
\r
3081 IN cl_list_item_t* const p_list_item,
\r
3082 IN void* context )
\r
3085 spl_qp_svc_t* p_spl_qp_svc;
\r
3087 AL_ENTER( AL_DBG_SMI_CB );
\r
3089 CL_ASSERT( p_list_item );
\r
3090 UNUSED_PARAM( context );
\r
3092 p_obj = PARENT_STRUCT( p_list_item, al_obj_t, pool_item );
\r
3093 p_spl_qp_svc = PARENT_STRUCT( p_obj, spl_qp_svc_t, obj );
\r
3095 cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );
\r
3096 if( p_spl_qp_svc->state != SPL_QP_ACTIVE )
\r
3098 cl_spinlock_release( &p_spl_qp_svc->obj.lock );
\r
3102 spl_qp_svc_post_recvs( p_spl_qp_svc );
\r
3103 cl_spinlock_release( &p_spl_qp_svc->obj.lock );
\r
3105 AL_EXIT( AL_DBG_SMI );
\r