2 * Copyright (c) 2005 SilverStorm Technologies. All rights reserved.
\r
3 * Copyright (c) 1996-2003 Intel Corporation. All rights reserved.
\r
4 * Copyright (c) 2006 Voltaire Corporation. All rights reserved.
\r
6 * This software is available to you under the OpenIB.org BSD license
\r
9 * Redistribution and use in source and binary forms, with or
\r
10 * without modification, are permitted provided that the following
\r
11 * conditions are met:
\r
13 * - Redistributions of source code must retain the above
\r
14 * copyright notice, this list of conditions and the following
\r
17 * - Redistributions in binary form must reproduce the above
\r
18 * copyright notice, this list of conditions and the following
\r
19 * disclaimer in the documentation and/or other materials
\r
20 * provided with the distribution.
\r
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
\r
23 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
\r
24 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
\r
25 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
\r
26 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
\r
27 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
\r
28 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
\r
35 #include <iba/ib_al.h>
\r
36 #include <complib/cl_timer.h>
\r
38 #include "ib_common.h"
\r
39 #include "al_common.h"
\r
40 #include "al_debug.h"
\r
41 #if defined(EVENT_TRACING)
\r
45 #include "al_smi.tmh"
\r
47 #include "al_verbs.h"
\r
55 extern char node_desc[IB_NODE_DESCRIPTION_SIZE];
\r
57 #define SMI_POLL_INTERVAL 20000 /* Milliseconds */
\r
58 #define LOCAL_MAD_TIMEOUT 50 /* Milliseconds */
\r
59 #define DEFAULT_QP0_DEPTH 256
\r
60 #define DEFAULT_QP1_DEPTH 1024
\r
62 uint32_t g_smi_poll_interval = SMI_POLL_INTERVAL;
\r
63 spl_qp_mgr_t* gp_spl_qp_mgr = NULL;
\r
67 * Function prototypes.
\r
70 destroying_spl_qp_mgr(
\r
71 IN al_obj_t* p_obj );
\r
75 IN al_obj_t* p_obj );
\r
78 spl_qp0_agent_pnp_cb(
\r
79 IN ib_pnp_rec_t* p_pnp_rec );
\r
82 spl_qp1_agent_pnp_cb(
\r
83 IN ib_pnp_rec_t* p_pnp_rec );
\r
87 IN ib_pnp_rec_t* p_pnp_rec,
\r
88 IN ib_qp_type_t qp_type );
\r
92 IN ib_pnp_port_rec_t* p_pnp_rec,
\r
93 IN const ib_qp_type_t qp_type );
\r
96 destroying_spl_qp_svc(
\r
97 IN al_obj_t* p_obj );
\r
101 IN al_obj_t* p_obj );
\r
104 spl_qp_svc_lid_change(
\r
105 IN al_obj_t* p_obj,
\r
106 IN ib_pnp_port_rec_t* p_pnp_rec );
\r
110 IN spl_qp_svc_t* p_spl_qp_svc,
\r
111 IN al_mad_wr_t* const p_mad_wr );
\r
113 static ib_api_status_t
\r
115 IN spl_qp_svc_t* p_spl_qp_svc,
\r
116 IN al_mad_wr_t* const p_mad_wr );
\r
118 static ib_api_status_t
\r
120 IN spl_qp_svc_t* p_spl_qp_svc,
\r
121 IN al_mad_wr_t* const p_mad_wr );
\r
123 static ib_api_status_t
\r
125 IN spl_qp_svc_t* p_spl_qp_svc,
\r
126 IN al_mad_wr_t* const p_mad_wr );
\r
128 static ib_api_status_t
\r
130 IN spl_qp_svc_t* p_spl_qp_svc,
\r
131 IN al_mad_wr_t* const p_mad_wr );
\r
135 IN cl_async_proc_item_t* p_item );
\r
138 spl_qp_send_comp_cb(
\r
139 IN const ib_cq_handle_t h_cq,
\r
140 IN void *cq_context );
\r
143 spl_qp_recv_comp_cb(
\r
144 IN const ib_cq_handle_t h_cq,
\r
145 IN void *cq_context );
\r
149 IN spl_qp_svc_t* p_spl_qp_svc,
\r
150 IN const ib_cq_handle_t h_cq,
\r
151 IN ib_wc_type_t wc_type );
\r
155 IN spl_qp_svc_t* p_spl_qp_svc,
\r
156 IN ib_mad_element_t* p_mad_element );
\r
160 IN ib_mad_element_t* p_mad_element );
\r
163 route_recv_smp_attr(
\r
164 IN ib_mad_element_t* p_mad_element );
\r
168 IN ib_mad_element_t* p_mad_element );
\r
172 IN ib_mad_element_t* p_mad_element );
\r
175 route_recv_gmp_attr(
\r
176 IN ib_mad_element_t* p_mad_element );
\r
180 IN spl_qp_svc_t* p_spl_qp_svc,
\r
181 IN ib_mad_element_t* p_mad_element );
\r
185 IN spl_qp_svc_t* p_spl_qp_svc,
\r
186 IN ib_mad_element_t* p_mad_request );
\r
189 spl_qp_alias_send_cb(
\r
190 IN ib_mad_svc_handle_t h_mad_svc,
\r
191 IN void *mad_svc_context,
\r
192 IN ib_mad_element_t *p_mad_element );
\r
195 spl_qp_alias_recv_cb(
\r
196 IN ib_mad_svc_handle_t h_mad_svc,
\r
197 IN void *mad_svc_context,
\r
198 IN ib_mad_element_t *p_mad_response );
\r
200 static ib_api_status_t
\r
201 spl_qp_svc_post_recvs(
\r
202 IN spl_qp_svc_t* const p_spl_qp_svc );
\r
205 spl_qp_svc_event_cb(
\r
206 IN ib_async_event_rec_t *p_event_rec );
\r
209 spl_qp_alias_event_cb(
\r
210 IN ib_async_event_rec_t *p_event_rec );
\r
214 IN spl_qp_svc_t* p_spl_qp_svc );
\r
217 spl_qp_svc_reset_cb(
\r
218 IN cl_async_proc_item_t* p_item );
\r
222 IN const cl_qmap_t* const p_svc_map,
\r
223 IN const ib_net64_t port_guid,
\r
224 OUT al_mad_disp_handle_t *ph_mad_disp );
\r
228 IN void* context );
\r
232 IN cl_list_item_t* const p_list_item,
\r
233 IN void* context );
\r
235 #if defined( CL_USE_MUTEX )
\r
237 spl_qp_send_async_cb(
\r
238 IN cl_async_proc_item_t* p_item );
\r
241 spl_qp_recv_async_cb(
\r
242 IN cl_async_proc_item_t* p_item );
\r
246 * Create the special QP manager.
\r
250 IN al_obj_t* const p_parent_obj )
\r
252 ib_pnp_req_t pnp_req;
\r
253 ib_api_status_t status;
\r
254 cl_status_t cl_status;
\r
256 AL_ENTER( AL_DBG_SMI );
\r
258 CL_ASSERT( p_parent_obj );
\r
259 CL_ASSERT( !gp_spl_qp_mgr );
\r
261 gp_spl_qp_mgr = cl_zalloc( sizeof( spl_qp_mgr_t ) );
\r
262 if( !gp_spl_qp_mgr )
\r
264 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
265 ("IB_INSUFFICIENT_MEMORY\n") );
\r
266 return IB_INSUFFICIENT_MEMORY;
\r
269 /* Construct the special QP manager. */
\r
270 construct_al_obj( &gp_spl_qp_mgr->obj, AL_OBJ_TYPE_SMI );
\r
271 cl_timer_construct( &gp_spl_qp_mgr->poll_timer );
\r
273 /* Initialize the lists. */
\r
274 cl_qmap_init( &gp_spl_qp_mgr->smi_map );
\r
275 cl_qmap_init( &gp_spl_qp_mgr->gsi_map );
\r
277 /* Initialize the global SMI/GSI manager object. */
\r
278 status = init_al_obj( &gp_spl_qp_mgr->obj, gp_spl_qp_mgr, TRUE,
\r
279 destroying_spl_qp_mgr, NULL, free_spl_qp_mgr );
\r
280 if( status != IB_SUCCESS )
\r
282 free_spl_qp_mgr( &gp_spl_qp_mgr->obj );
\r
283 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
284 ("init_al_obj failed, %s\n", ib_get_err_str( status ) ) );
\r
288 /* Attach the special QP manager to the parent object. */
\r
289 status = attach_al_obj( p_parent_obj, &gp_spl_qp_mgr->obj );
\r
290 if( status != IB_SUCCESS )
\r
292 gp_spl_qp_mgr->obj.pfn_destroy( &gp_spl_qp_mgr->obj, NULL );
\r
293 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
294 ("attach_al_obj returned %s.\n", ib_get_err_str(status)) );
\r
298 /* Initialize the SMI polling timer. */
\r
299 cl_status = cl_timer_init( &gp_spl_qp_mgr->poll_timer, smi_poll_timer_cb,
\r
301 if( cl_status != CL_SUCCESS )
\r
303 gp_spl_qp_mgr->obj.pfn_destroy( &gp_spl_qp_mgr->obj, NULL );
\r
304 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
305 ("cl_timer_init failed, status 0x%x\n", cl_status ) );
\r
306 return ib_convert_cl_status( cl_status );
\r
310 * Note: PnP registrations for port events must be done
\r
311 * when the special QP manager is created. This ensures that
\r
312 * the registrations are listed sequentially and the reporting
\r
313 * of PnP events occurs in the proper order.
\r
317 * Separate context is needed for each special QP. Therefore, a
\r
318 * separate PnP event registration is performed for QP0 and QP1.
\r
321 /* Register for port PnP events for QP0. */
\r
322 cl_memclr( &pnp_req, sizeof( ib_pnp_req_t ) );
\r
323 pnp_req.pnp_class = IB_PNP_PORT;
\r
324 pnp_req.pnp_context = &gp_spl_qp_mgr->obj;
\r
325 pnp_req.pfn_pnp_cb = spl_qp0_agent_pnp_cb;
\r
327 status = ib_reg_pnp( gh_al, &pnp_req, &gp_spl_qp_mgr->h_qp0_pnp );
\r
329 if( status != IB_SUCCESS )
\r
331 gp_spl_qp_mgr->obj.pfn_destroy( &gp_spl_qp_mgr->obj, NULL );
\r
332 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
333 ("ib_reg_pnp QP0 failed, %s\n", ib_get_err_str( status ) ) );
\r
337 /* Reference the special QP manager on behalf of the ib_reg_pnp call. */
\r
338 ref_al_obj( &gp_spl_qp_mgr->obj );
\r
340 /* Register for port PnP events for QP1. */
\r
341 cl_memclr( &pnp_req, sizeof( ib_pnp_req_t ) );
\r
342 pnp_req.pnp_class = IB_PNP_PORT;
\r
343 pnp_req.pnp_context = &gp_spl_qp_mgr->obj;
\r
344 pnp_req.pfn_pnp_cb = spl_qp1_agent_pnp_cb;
\r
346 status = ib_reg_pnp( gh_al, &pnp_req, &gp_spl_qp_mgr->h_qp1_pnp );
\r
348 if( status != IB_SUCCESS )
\r
350 gp_spl_qp_mgr->obj.pfn_destroy( &gp_spl_qp_mgr->obj, NULL );
\r
351 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
352 ("ib_reg_pnp QP1 failed, %s\n", ib_get_err_str( status ) ) );
\r
357 * Note that we don't release the referende taken in init_al_obj
\r
358 * because we need one on behalf of the ib_reg_pnp call.
\r
361 AL_EXIT( AL_DBG_SMI );
\r
368 * Pre-destroy the special QP manager.
\r
371 destroying_spl_qp_mgr(
\r
372 IN al_obj_t* p_obj )
\r
374 ib_api_status_t status;
\r
376 CL_ASSERT( p_obj );
\r
377 CL_ASSERT( gp_spl_qp_mgr == PARENT_STRUCT( p_obj, spl_qp_mgr_t, obj ) );
\r
378 UNUSED_PARAM( p_obj );
\r
380 /* Deregister for port PnP events for QP0. */
\r
381 if( gp_spl_qp_mgr->h_qp0_pnp )
\r
383 status = ib_dereg_pnp( gp_spl_qp_mgr->h_qp0_pnp,
\r
384 (ib_pfn_destroy_cb_t)deref_al_obj );
\r
385 CL_ASSERT( status == IB_SUCCESS );
\r
388 /* Deregister for port PnP events for QP1. */
\r
389 if( gp_spl_qp_mgr->h_qp1_pnp )
\r
391 status = ib_dereg_pnp( gp_spl_qp_mgr->h_qp1_pnp,
\r
392 (ib_pfn_destroy_cb_t)deref_al_obj );
\r
393 CL_ASSERT( status == IB_SUCCESS );
\r
396 /* Destroy the SMI polling timer. */
\r
397 cl_timer_destroy( &gp_spl_qp_mgr->poll_timer );
\r
403 * Free the special QP manager.
\r
407 IN al_obj_t* p_obj )
\r
409 CL_ASSERT( p_obj );
\r
410 CL_ASSERT( gp_spl_qp_mgr == PARENT_STRUCT( p_obj, spl_qp_mgr_t, obj ) );
\r
411 UNUSED_PARAM( p_obj );
\r
413 destroy_al_obj( &gp_spl_qp_mgr->obj );
\r
414 cl_free( gp_spl_qp_mgr );
\r
415 gp_spl_qp_mgr = NULL;
\r
421 * Special QP0 agent PnP event callback.
\r
424 spl_qp0_agent_pnp_cb(
\r
425 IN ib_pnp_rec_t* p_pnp_rec )
\r
427 ib_api_status_t status;
\r
428 AL_ENTER( AL_DBG_SMI );
\r
430 status = spl_qp_agent_pnp( p_pnp_rec, IB_QPT_QP0 );
\r
432 AL_EXIT( AL_DBG_SMI );
\r
439 * Special QP1 agent PnP event callback.
\r
442 spl_qp1_agent_pnp_cb(
\r
443 IN ib_pnp_rec_t* p_pnp_rec )
\r
445 ib_api_status_t status;
\r
446 AL_ENTER( AL_DBG_SMI );
\r
448 status = spl_qp_agent_pnp( p_pnp_rec, IB_QPT_QP1 );
\r
450 AL_EXIT( AL_DBG_SMI );
\r
457 * Special QP agent PnP event callback.
\r
461 IN ib_pnp_rec_t* p_pnp_rec,
\r
462 IN ib_qp_type_t qp_type )
\r
464 ib_api_status_t status;
\r
467 AL_ENTER( AL_DBG_SMI );
\r
469 CL_ASSERT( p_pnp_rec );
\r
470 p_obj = p_pnp_rec->context;
\r
472 AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_SMI,
\r
473 ("p_pnp_rec->pnp_event = 0x%x (%s)\n",
\r
474 p_pnp_rec->pnp_event, ib_get_pnp_event_str( p_pnp_rec->pnp_event )) );
\r
475 /* Dispatch based on the PnP event type. */
\r
476 switch( p_pnp_rec->pnp_event )
\r
478 case IB_PNP_PORT_ADD:
\r
479 CL_ASSERT( !p_obj );
\r
480 status = create_spl_qp_svc( (ib_pnp_port_rec_t*)p_pnp_rec, qp_type );
\r
483 case IB_PNP_PORT_REMOVE:
\r
484 CL_ASSERT( p_obj );
\r
485 ref_al_obj( p_obj );
\r
486 p_obj->pfn_destroy( p_obj, NULL );
\r
487 status = IB_SUCCESS;
\r
490 case IB_PNP_LID_CHANGE:
\r
491 CL_ASSERT( p_obj );
\r
492 spl_qp_svc_lid_change( p_obj, (ib_pnp_port_rec_t*)p_pnp_rec );
\r
493 status = IB_SUCCESS;
\r
497 /* All other events are ignored. */
\r
498 status = IB_SUCCESS;
\r
502 AL_EXIT( AL_DBG_SMI );
\r
509 * Create a special QP service.
\r
513 IN ib_pnp_port_rec_t* p_pnp_rec,
\r
514 IN const ib_qp_type_t qp_type )
\r
516 cl_status_t cl_status;
\r
517 spl_qp_svc_t* p_spl_qp_svc;
\r
518 ib_ca_handle_t h_ca;
\r
519 ib_cq_create_t cq_create;
\r
520 ib_qp_create_t qp_create;
\r
521 ib_qp_attr_t qp_attr;
\r
522 ib_mad_svc_t mad_svc;
\r
523 ib_api_status_t status;
\r
525 AL_ENTER( AL_DBG_SMI );
\r
527 CL_ASSERT( p_pnp_rec );
\r
529 if( ( qp_type != IB_QPT_QP0 ) && ( qp_type != IB_QPT_QP1 ) )
\r
531 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") );
\r
532 return IB_INVALID_PARAMETER;
\r
535 CL_ASSERT( p_pnp_rec->pnp_rec.pnp_context );
\r
536 CL_ASSERT( p_pnp_rec->p_ca_attr );
\r
537 CL_ASSERT( p_pnp_rec->p_port_attr );
\r
539 p_spl_qp_svc = cl_zalloc( sizeof( spl_qp_svc_t ) );
\r
540 if( !p_spl_qp_svc )
\r
542 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
543 ("IB_INSUFFICIENT_MEMORY\n") );
\r
544 return IB_INSUFFICIENT_MEMORY;
\r
547 /* Tie the special QP service to the port by setting the port number. */
\r
548 p_spl_qp_svc->port_num = p_pnp_rec->p_port_attr->port_num;
\r
549 /* Store the port GUID to allow faster lookups of the dispatchers. */
\r
550 p_spl_qp_svc->port_guid = p_pnp_rec->p_port_attr->port_guid;
\r
552 /* Initialize the send and receive queues. */
\r
553 cl_qlist_init( &p_spl_qp_svc->send_queue );
\r
554 cl_qlist_init( &p_spl_qp_svc->recv_queue );
\r
556 #if defined( CL_USE_MUTEX )
\r
557 /* Initialize async callbacks and flags for send/receive processing. */
\r
558 p_spl_qp_svc->send_async_queued = FALSE;
\r
559 p_spl_qp_svc->send_async_cb.pfn_callback = spl_qp_send_async_cb;
\r
560 p_spl_qp_svc->recv_async_queued = FALSE;
\r
561 p_spl_qp_svc->recv_async_cb.pfn_callback = spl_qp_recv_async_cb;
\r
564 /* Initialize the async callback function to process local sends. */
\r
565 p_spl_qp_svc->send_async.pfn_callback = send_local_mad_cb;
\r
567 /* Initialize the async callback function to reset the QP on error. */
\r
568 p_spl_qp_svc->reset_async.pfn_callback = spl_qp_svc_reset_cb;
\r
570 /* Construct the special QP service object. */
\r
571 construct_al_obj( &p_spl_qp_svc->obj, AL_OBJ_TYPE_SMI );
\r
573 /* Initialize the special QP service object. */
\r
574 status = init_al_obj( &p_spl_qp_svc->obj, p_spl_qp_svc, TRUE,
\r
575 destroying_spl_qp_svc, NULL, free_spl_qp_svc );
\r
576 if( status != IB_SUCCESS )
\r
578 free_spl_qp_svc( &p_spl_qp_svc->obj );
\r
582 /* Attach the special QP service to the parent object. */
\r
583 status = attach_al_obj(
\r
584 (al_obj_t* __ptr64)p_pnp_rec->pnp_rec.pnp_context, &p_spl_qp_svc->obj );
\r
585 if( status != IB_SUCCESS )
\r
587 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );
\r
588 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
589 ("attach_al_obj returned %s.\n", ib_get_err_str(status)) );
\r
593 h_ca = acquire_ca( p_pnp_rec->p_ca_attr->ca_guid );
\r
597 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );
\r
598 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("acquire_ca failed.\n") );
\r
599 return IB_INVALID_GUID;
\r
602 p_spl_qp_svc->obj.p_ci_ca = h_ca->obj.p_ci_ca;
\r
604 /* Determine the maximum queue depth of the QP and CQs. */
\r
605 p_spl_qp_svc->max_qp_depth =
\r
606 ( p_pnp_rec->p_ca_attr->max_wrs <
\r
607 p_pnp_rec->p_ca_attr->max_cqes ) ?
\r
608 p_pnp_rec->p_ca_attr->max_wrs :
\r
609 p_pnp_rec->p_ca_attr->max_cqes;
\r
611 /* Compare this maximum to the default special queue depth. */
\r
612 if( ( qp_type == IB_QPT_QP0 ) &&
\r
613 ( p_spl_qp_svc->max_qp_depth > DEFAULT_QP0_DEPTH ) )
\r
614 p_spl_qp_svc->max_qp_depth = DEFAULT_QP0_DEPTH;
\r
615 if( ( qp_type == IB_QPT_QP1 ) &&
\r
616 ( p_spl_qp_svc->max_qp_depth > DEFAULT_QP1_DEPTH ) )
\r
617 p_spl_qp_svc->max_qp_depth = DEFAULT_QP1_DEPTH;
\r
619 /* Create the send CQ. */
\r
620 cl_memclr( &cq_create, sizeof( ib_cq_create_t ) );
\r
621 cq_create.size = p_spl_qp_svc->max_qp_depth;
\r
622 cq_create.pfn_comp_cb = spl_qp_send_comp_cb;
\r
624 status = ib_create_cq( p_spl_qp_svc->obj.p_ci_ca->h_ca, &cq_create,
\r
625 p_spl_qp_svc, spl_qp_svc_event_cb, &p_spl_qp_svc->h_send_cq );
\r
627 if( status != IB_SUCCESS )
\r
629 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );
\r
630 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
631 ("ib_create_cq send CQ failed, %s\n", ib_get_err_str( status ) ) );
\r
635 /* Reference the special QP service on behalf of ib_create_cq. */
\r
636 ref_al_obj( &p_spl_qp_svc->obj );
\r
638 /* Check the result of the creation request. */
\r
639 if( cq_create.size < p_spl_qp_svc->max_qp_depth )
\r
641 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );
\r
642 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
643 ("ib_create_cq allocated insufficient send CQ size\n") );
\r
644 return IB_INSUFFICIENT_RESOURCES;
\r
647 /* Create the receive CQ. */
\r
648 cl_memclr( &cq_create, sizeof( ib_cq_create_t ) );
\r
649 cq_create.size = p_spl_qp_svc->max_qp_depth;
\r
650 cq_create.pfn_comp_cb = spl_qp_recv_comp_cb;
\r
652 status = ib_create_cq( p_spl_qp_svc->obj.p_ci_ca->h_ca, &cq_create,
\r
653 p_spl_qp_svc, spl_qp_svc_event_cb, &p_spl_qp_svc->h_recv_cq );
\r
655 if( status != IB_SUCCESS )
\r
657 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );
\r
658 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
659 ("ib_create_cq recv CQ failed, %s\n", ib_get_err_str( status ) ) );
\r
663 /* Reference the special QP service on behalf of ib_create_cq. */
\r
664 ref_al_obj( &p_spl_qp_svc->obj );
\r
666 /* Check the result of the creation request. */
\r
667 if( cq_create.size < p_spl_qp_svc->max_qp_depth )
\r
669 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );
\r
670 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
671 ("ib_create_cq allocated insufficient recv CQ size\n") );
\r
672 return IB_INSUFFICIENT_RESOURCES;
\r
675 /* Create the special QP. */
\r
676 cl_memclr( &qp_create, sizeof( ib_qp_create_t ) );
\r
677 qp_create.qp_type = qp_type;
\r
678 qp_create.sq_depth = p_spl_qp_svc->max_qp_depth;
\r
679 qp_create.rq_depth = p_spl_qp_svc->max_qp_depth;
\r
680 qp_create.sq_sge = 3; /* Three entries are required for segmentation. */
\r
681 qp_create.rq_sge = 1;
\r
682 qp_create.h_sq_cq = p_spl_qp_svc->h_send_cq;
\r
683 qp_create.h_rq_cq = p_spl_qp_svc->h_recv_cq;
\r
684 qp_create.sq_signaled = TRUE;
\r
686 status = ib_get_spl_qp( p_spl_qp_svc->obj.p_ci_ca->h_pd,
\r
687 p_pnp_rec->p_port_attr->port_guid, &qp_create,
\r
688 p_spl_qp_svc, spl_qp_svc_event_cb, NULL, &p_spl_qp_svc->h_qp );
\r
690 if( status != IB_SUCCESS )
\r
692 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );
\r
693 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
694 ("ib_get_spl_qp failed, %s\n", ib_get_err_str( status ) ) );
\r
698 /* Reference the special QP service on behalf of ib_get_spl_qp. */
\r
699 ref_al_obj( &p_spl_qp_svc->obj );
\r
701 /* Check the result of the creation request. */
\r
702 status = ib_query_qp( p_spl_qp_svc->h_qp, &qp_attr );
\r
703 if( status != IB_SUCCESS )
\r
705 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );
\r
706 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
707 ("ib_query_qp failed, %s\n", ib_get_err_str( status ) ) );
\r
711 if( ( qp_attr.rq_depth < p_spl_qp_svc->max_qp_depth ) ||
\r
712 ( qp_attr.sq_depth < p_spl_qp_svc->max_qp_depth ) ||
\r
713 ( qp_attr.sq_sge < 3 ) || ( qp_attr.rq_sge < 1 ) )
\r
715 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );
\r
716 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
717 ("ib_get_spl_qp allocated attributes are insufficient\n") );
\r
718 return IB_INSUFFICIENT_RESOURCES;
\r
721 /* Initialize the QP for use. */
\r
722 status = ib_init_dgrm_svc( p_spl_qp_svc->h_qp, NULL );
\r
723 if( status != IB_SUCCESS )
\r
725 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );
\r
726 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
727 ("ib_init_dgrm_svc failed, %s\n", ib_get_err_str( status ) ) );
\r
731 /* Post receive buffers. */
\r
732 cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );
\r
733 status = spl_qp_svc_post_recvs( p_spl_qp_svc );
\r
734 cl_spinlock_release( &p_spl_qp_svc->obj.lock );
\r
735 if( status != IB_SUCCESS )
\r
737 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );
\r
738 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
739 ("spl_qp_svc_post_recvs failed, %s\n",
\r
740 ib_get_err_str( status ) ) );
\r
744 /* Create the MAD dispatcher. */
\r
745 status = create_mad_disp( &p_spl_qp_svc->obj, p_spl_qp_svc->h_qp,
\r
746 &p_spl_qp_svc->h_mad_disp );
\r
747 if( status != IB_SUCCESS )
\r
749 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );
\r
750 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
751 ("create_mad_disp failed, %s\n", ib_get_err_str( status ) ) );
\r
756 * Add this service to the special QP manager lookup lists.
\r
757 * The service must be added to allow the creation of a QP alias.
\r
759 cl_spinlock_acquire( &gp_spl_qp_mgr->obj.lock );
\r
760 if( qp_type == IB_QPT_QP0 )
\r
762 cl_qmap_insert( &gp_spl_qp_mgr->smi_map, p_spl_qp_svc->port_guid,
\r
763 &p_spl_qp_svc->map_item );
\r
767 cl_qmap_insert( &gp_spl_qp_mgr->gsi_map, p_spl_qp_svc->port_guid,
\r
768 &p_spl_qp_svc->map_item );
\r
770 cl_spinlock_release( &gp_spl_qp_mgr->obj.lock );
\r
773 * If the CA does not support HW agents, create a QP alias and register
\r
774 * a MAD service for sending responses from the local MAD interface.
\r
776 if( check_local_mad( p_spl_qp_svc->h_qp ) )
\r
778 /* Create a QP alias. */
\r
779 cl_memclr( &qp_create, sizeof( ib_qp_create_t ) );
\r
780 qp_create.qp_type =
\r
781 ( qp_type == IB_QPT_QP0 ) ? IB_QPT_QP0_ALIAS : IB_QPT_QP1_ALIAS;
\r
782 qp_create.sq_depth = p_spl_qp_svc->max_qp_depth;
\r
783 qp_create.sq_sge = 1;
\r
784 qp_create.sq_signaled = TRUE;
\r
786 status = ib_get_spl_qp( p_spl_qp_svc->obj.p_ci_ca->h_pd_alias,
\r
787 p_pnp_rec->p_port_attr->port_guid, &qp_create,
\r
788 p_spl_qp_svc, spl_qp_alias_event_cb, &p_spl_qp_svc->pool_key,
\r
789 &p_spl_qp_svc->h_qp_alias );
\r
791 if (status != IB_SUCCESS)
\r
793 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );
\r
794 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
795 ("ib_get_spl_qp alias failed, %s\n",
\r
796 ib_get_err_str( status ) ) );
\r
800 /* Reference the special QP service on behalf of ib_get_spl_qp. */
\r
801 ref_al_obj( &p_spl_qp_svc->obj );
\r
803 /* Register a MAD service for sends. */
\r
804 cl_memclr( &mad_svc, sizeof( ib_mad_svc_t ) );
\r
805 mad_svc.mad_svc_context = p_spl_qp_svc;
\r
806 mad_svc.pfn_mad_send_cb = spl_qp_alias_send_cb;
\r
807 mad_svc.pfn_mad_recv_cb = spl_qp_alias_recv_cb;
\r
809 status = ib_reg_mad_svc( p_spl_qp_svc->h_qp_alias, &mad_svc,
\r
810 &p_spl_qp_svc->h_mad_svc );
\r
812 if( status != IB_SUCCESS )
\r
814 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );
\r
815 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
816 ("ib_reg_mad_svc failed, %s\n", ib_get_err_str( status ) ) );
\r
821 /* Set the context of the PnP event to this child object. */
\r
822 p_pnp_rec->pnp_rec.context = &p_spl_qp_svc->obj;
\r
824 /* The QP is ready. Change the state. */
\r
825 p_spl_qp_svc->state = SPL_QP_ACTIVE;
\r
827 /* Force a completion callback to rearm the CQs. */
\r
828 spl_qp_recv_comp_cb( p_spl_qp_svc->h_recv_cq, p_spl_qp_svc );
\r
829 spl_qp_send_comp_cb( p_spl_qp_svc->h_send_cq, p_spl_qp_svc );
\r
831 /* Start the polling thread timer. */
\r
832 if( g_smi_poll_interval )
\r
835 cl_timer_trim( &gp_spl_qp_mgr->poll_timer, g_smi_poll_interval );
\r
837 if( cl_status != CL_SUCCESS )
\r
839 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );
\r
840 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
841 ("cl_timer_start failed, status 0x%x\n", cl_status ) );
\r
842 return ib_convert_cl_status( cl_status );
\r
846 /* Release the reference taken in init_al_obj. */
\r
847 deref_al_obj( &p_spl_qp_svc->obj );
\r
849 AL_EXIT( AL_DBG_SMI );
\r
856 * Return a work completion to the MAD dispatcher for the specified MAD.
\r
859 __complete_send_mad(
\r
860 IN const al_mad_disp_handle_t h_mad_disp,
\r
861 IN al_mad_wr_t* const p_mad_wr,
\r
862 IN const ib_wc_status_t wc_status )
\r
866 /* Construct a send work completion. */
\r
867 cl_memclr( &wc, sizeof( ib_wc_t ) );
\r
868 wc.wr_id = p_mad_wr->send_wr.wr_id;
\r
869 wc.wc_type = IB_WC_SEND;
\r
870 wc.status = wc_status;
\r
872 /* Set the send size if we were successful with the send. */
\r
873 if( wc_status == IB_WCS_SUCCESS )
\r
874 wc.length = MAD_BLOCK_SIZE;
\r
876 mad_disp_send_done( h_mad_disp, p_mad_wr, &wc );
\r
882 * Pre-destroy a special QP service.
\r
885 destroying_spl_qp_svc(
\r
886 IN al_obj_t* p_obj )
\r
888 spl_qp_svc_t* p_spl_qp_svc;
\r
889 cl_list_item_t* p_list_item;
\r
890 al_mad_wr_t* p_mad_wr;
\r
892 ib_api_status_t status;
\r
894 AL_ENTER( AL_DBG_SMI );
\r
896 CL_ASSERT( p_obj );
\r
897 p_spl_qp_svc = PARENT_STRUCT( p_obj, spl_qp_svc_t, obj );
\r
899 /* Change the state to prevent processing new send requests. */
\r
900 cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );
\r
901 p_spl_qp_svc->state = SPL_QP_DESTROYING;
\r
902 cl_spinlock_release( &p_spl_qp_svc->obj.lock );
\r
904 /* Wait here until the special QP service is no longer in use. */
\r
905 while( p_spl_qp_svc->in_use_cnt )
\r
907 cl_thread_suspend( 0 );
\r
910 /* Destroy the special QP. */
\r
911 if( p_spl_qp_svc->h_qp )
\r
913 /* If present, remove the special QP service from the tracking map. */
\r
914 cl_spinlock_acquire( &gp_spl_qp_mgr->obj.lock );
\r
915 if( p_spl_qp_svc->h_qp->type == IB_QPT_QP0 )
\r
917 cl_qmap_remove( &gp_spl_qp_mgr->smi_map, p_spl_qp_svc->port_guid );
\r
921 cl_qmap_remove( &gp_spl_qp_mgr->gsi_map, p_spl_qp_svc->port_guid );
\r
923 cl_spinlock_release( &gp_spl_qp_mgr->obj.lock );
\r
925 status = ib_destroy_qp( p_spl_qp_svc->h_qp,
\r
926 (ib_pfn_destroy_cb_t)deref_al_obj );
\r
927 CL_ASSERT( status == IB_SUCCESS );
\r
929 cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );
\r
931 /* Complete any outstanding MAD sends operations as "flushed". */
\r
932 for( p_list_item = cl_qlist_remove_head( &p_spl_qp_svc->send_queue );
\r
933 p_list_item != cl_qlist_end( &p_spl_qp_svc->send_queue );
\r
934 p_list_item = cl_qlist_remove_head( &p_spl_qp_svc->send_queue ) )
\r
936 cl_spinlock_release( &p_spl_qp_svc->obj.lock );
\r
937 p_mad_wr = PARENT_STRUCT( p_list_item, al_mad_wr_t, list_item );
\r
938 __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr,
\r
939 IB_WCS_WR_FLUSHED_ERR );
\r
940 cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );
\r
943 cl_spinlock_release( &p_spl_qp_svc->obj.lock );
\r
944 /* Receive MAD elements are returned to the pool by the free routine. */
\r
947 /* Destroy the special QP alias and CQs. */
\r
948 if( p_spl_qp_svc->h_qp_alias )
\r
950 status = ib_destroy_qp( p_spl_qp_svc->h_qp_alias,
\r
951 (ib_pfn_destroy_cb_t)deref_al_obj );
\r
952 CL_ASSERT( status == IB_SUCCESS );
\r
954 if( p_spl_qp_svc->h_send_cq )
\r
956 status = ib_destroy_cq( p_spl_qp_svc->h_send_cq,
\r
957 (ib_pfn_destroy_cb_t)deref_al_obj );
\r
958 CL_ASSERT( status == IB_SUCCESS );
\r
960 if( p_spl_qp_svc->h_recv_cq )
\r
962 status = ib_destroy_cq( p_spl_qp_svc->h_recv_cq,
\r
963 (ib_pfn_destroy_cb_t)deref_al_obj );
\r
964 CL_ASSERT( status == IB_SUCCESS );
\r
967 AL_EXIT( AL_DBG_SMI );
\r
973 * Free a special QP service.
\r
977 IN al_obj_t* p_obj )
\r
979 spl_qp_svc_t* p_spl_qp_svc;
\r
980 cl_list_item_t* p_list_item;
\r
981 al_mad_element_t* p_al_mad;
\r
982 ib_api_status_t status;
\r
984 AL_ENTER( AL_DBG_SMI );
\r
986 CL_ASSERT( p_obj );
\r
987 p_spl_qp_svc = PARENT_STRUCT( p_obj, spl_qp_svc_t, obj );
\r
989 /* Dereference the CA. */
\r
990 if( p_spl_qp_svc->obj.p_ci_ca )
\r
991 deref_al_obj( &p_spl_qp_svc->obj.p_ci_ca->h_ca->obj );
\r
993 /* Return receive MAD elements to the pool. */
\r
994 for( p_list_item = cl_qlist_remove_head( &p_spl_qp_svc->recv_queue );
\r
995 p_list_item != cl_qlist_end( &p_spl_qp_svc->recv_queue );
\r
996 p_list_item = cl_qlist_remove_head( &p_spl_qp_svc->recv_queue ) )
\r
998 p_al_mad = PARENT_STRUCT( p_list_item, al_mad_element_t, list_item );
\r
1000 status = ib_put_mad( &p_al_mad->element );
\r
1001 CL_ASSERT( status == IB_SUCCESS );
\r
1004 CL_ASSERT( cl_is_qlist_empty( &p_spl_qp_svc->send_queue ) );
\r
1006 destroy_al_obj( &p_spl_qp_svc->obj );
\r
1007 cl_free( p_spl_qp_svc );
\r
1009 AL_EXIT( AL_DBG_SMI );
\r
1015 * Update the base LID of a special QP service.
\r
1018 spl_qp_svc_lid_change(
\r
1019 IN al_obj_t* p_obj,
\r
1020 IN ib_pnp_port_rec_t* p_pnp_rec )
\r
1022 spl_qp_svc_t* p_spl_qp_svc;
\r
1024 AL_ENTER( AL_DBG_SMI );
\r
1026 CL_ASSERT( p_obj );
\r
1027 CL_ASSERT( p_pnp_rec );
\r
1028 CL_ASSERT( p_pnp_rec->p_port_attr );
\r
1030 p_spl_qp_svc = PARENT_STRUCT( p_obj, spl_qp_svc_t, obj );
\r
1032 p_spl_qp_svc->base_lid = p_pnp_rec->p_port_attr->lid;
\r
1033 p_spl_qp_svc->lmc = p_pnp_rec->p_port_attr->lmc;
\r
1035 AL_EXIT( AL_DBG_SMI );
\r
1041 * Route a send work request.
\r
1045 IN spl_qp_svc_t* p_spl_qp_svc,
\r
1046 IN ib_send_wr_t* const p_send_wr )
\r
1048 al_mad_wr_t* p_mad_wr;
\r
1049 al_mad_send_t* p_mad_send;
\r
1052 ib_av_handle_t h_av;
\r
1053 mad_route_t route;
\r
1054 boolean_t local, loopback, discard;
\r
1056 AL_ENTER( AL_DBG_SMI );
\r
1058 CL_ASSERT( p_spl_qp_svc );
\r
1059 CL_ASSERT( p_send_wr );
\r
1061 /* Initialize a pointers to the MAD work request and the MAD. */
\r
1062 p_mad_wr = PARENT_STRUCT( p_send_wr, al_mad_wr_t, send_wr );
\r
1063 p_mad_send = PARENT_STRUCT( p_mad_wr, al_mad_send_t, mad_wr );
\r
1064 p_mad = get_mad_hdr_from_wr( p_mad_wr );
\r
1065 p_smp = (ib_smp_t*)p_mad;
\r
1067 /* Check if the CA has a local MAD interface. */
\r
1068 local = loopback = discard = FALSE;
\r
1069 if( check_local_mad( p_spl_qp_svc->h_qp ) )
\r
1072 * If the MAD is a locally addressed Subnet Management, Performance
\r
1073 * Management, or Connection Management datagram, process the work
\r
1074 * request locally.
\r
1076 h_av = p_send_wr->dgrm.ud.h_av;
\r
1077 switch( p_mad->mgmt_class )
\r
1079 case IB_MCLASS_SUBN_DIR:
\r
1080 /* Perform special checks on directed route SMPs. */
\r
1081 if( ib_smp_is_response( p_smp ) )
\r
1084 * This node is the originator of the response. Discard
\r
1085 * if the hop count or pointer is zero, an intermediate hop,
\r
1086 * out of bounds hop, or if the first port of the directed
\r
1087 * route retrun path is not this port.
\r
1089 if( ( p_smp->hop_count == 0 ) || ( p_smp->hop_ptr == 0 ) )
\r
1091 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
1092 ("hop cnt or hop ptr set to 0...discarding\n") );
\r
1095 else if( p_smp->hop_count != ( p_smp->hop_ptr - 1 ) )
\r
1097 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
1098 ("hop cnt != (hop ptr - 1)...discarding\n") );
\r
1101 else if( p_smp->hop_count >= IB_SUBNET_PATH_HOPS_MAX )
\r
1103 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
1104 ("hop cnt > max hops...discarding\n") );
\r
1107 else if( ( p_smp->dr_dlid == IB_LID_PERMISSIVE ) &&
\r
1108 ( p_smp->return_path[ p_smp->hop_ptr - 1 ] !=
\r
1109 p_spl_qp_svc->port_num ) )
\r
1111 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
1112 ("return path[hop ptr - 1] != port num...discarding\n") );
\r
1118 /* The SMP is a request. */
\r
1119 if( ( p_smp->hop_count >= IB_SUBNET_PATH_HOPS_MAX ) ||
\r
1120 ( p_smp->hop_ptr >= IB_SUBNET_PATH_HOPS_MAX ) )
\r
1124 else if( ( p_smp->hop_count == 0 ) && ( p_smp->hop_ptr == 0 ) )
\r
1126 /* Self Addressed: Sent locally, routed locally. */
\r
1128 discard = ( p_smp->dr_slid != IB_LID_PERMISSIVE ) ||
\r
1129 ( p_smp->dr_dlid != IB_LID_PERMISSIVE );
\r
1131 else if( ( p_smp->hop_count != 0 ) &&
\r
1132 ( p_smp->hop_count == ( p_smp->hop_ptr - 1 ) ) )
\r
1134 /* End of Path: Sent remotely, routed locally. */
\r
1137 else if( ( p_smp->hop_count != 0 ) &&
\r
1138 ( p_smp->hop_ptr == 0 ) )
\r
1140 /* Beginning of Path: Sent locally, routed remotely. */
\r
1141 if( p_smp->dr_slid == IB_LID_PERMISSIVE )
\r
1144 ( p_smp->initial_path[ p_smp->hop_ptr + 1 ] !=
\r
1145 p_spl_qp_svc->port_num );
\r
1150 /* Intermediate hop. */
\r
1154 /* Loopback locally addressed SM to SM "heartbeat" messages. */
\r
1155 loopback = (p_mad->attr_id == IB_MAD_ATTR_SM_INFO);
\r
1158 case IB_MCLASS_SUBN_LID:
\r
1159 /* Loopback locally addressed SM to SM "heartbeat" messages. */
\r
1160 loopback = (p_mad->attr_id == IB_MAD_ATTR_SM_INFO);
\r
1162 /* Fall through to check for a local MAD. */
\r
1164 case IB_MCLASS_PERF:
\r
1165 case IB_MCLASS_BM:
\r
1167 ( h_av->av_attr.dlid ==
\r
1168 ( h_av->av_attr.path_bits | p_spl_qp_svc->base_lid ) ) );
\r
1172 /* Route vendor specific MADs to the HCA provider. */
\r
1173 if( ib_class_is_vendor_specific( p_mad->mgmt_class ) )
\r
1176 ( h_av->av_attr.dlid ==
\r
1177 ( h_av->av_attr.path_bits | p_spl_qp_svc->base_lid ) ) );
\r
1183 route = ( p_mad_send->p_send_mad->send_opt & IB_SEND_OPT_LOCAL ) ?
\r
1184 ROUTE_LOCAL : ROUTE_REMOTE;
\r
1185 if( local ) route = ROUTE_LOCAL;
\r
1186 if( loopback && local ) route = ROUTE_LOOPBACK;
\r
1187 if( discard ) route = ROUTE_DISCARD;
\r
1189 AL_EXIT( AL_DBG_SMI );
\r
1196 * Send a work request on the special QP.
\r
1200 IN const ib_qp_handle_t h_qp,
\r
1201 IN ib_send_wr_t* const p_send_wr )
\r
1203 spl_qp_svc_t* p_spl_qp_svc;
\r
1204 al_mad_wr_t* p_mad_wr;
\r
1205 mad_route_t route;
\r
1206 ib_api_status_t status;
\r
1208 AL_ENTER( AL_DBG_SMI );
\r
1210 CL_ASSERT( h_qp );
\r
1211 CL_ASSERT( p_send_wr );
\r
1213 /* Get the special QP service. */
\r
1214 p_spl_qp_svc = (spl_qp_svc_t*)h_qp->obj.context;
\r
1215 CL_ASSERT( p_spl_qp_svc );
\r
1216 CL_ASSERT( p_spl_qp_svc->h_qp == h_qp );
\r
1218 /* Determine how to route the MAD. */
\r
1219 route = route_mad_send( p_spl_qp_svc, p_send_wr );
\r
1222 * Check the QP state and guard against error handling. Also,
\r
1223 * to maintain proper order of work completions, delay processing
\r
1224 * a local MAD until any remote MAD work requests have completed,
\r
1225 * and delay processing a remote MAD until local MAD work requests
\r
1228 cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );
\r
1229 if( (p_spl_qp_svc->state != SPL_QP_ACTIVE) || p_spl_qp_svc->local_mad_wr ||
\r
1230 (is_local(route) && !cl_is_qlist_empty( &p_spl_qp_svc->send_queue )) ||
\r
1231 ( cl_qlist_count( &p_spl_qp_svc->send_queue ) >=
\r
1232 p_spl_qp_svc->max_qp_depth ) )
\r
1235 * Return busy status.
\r
1236 * The special QP will resume sends at this point.
\r
1238 cl_spinlock_release( &p_spl_qp_svc->obj.lock );
\r
1240 AL_EXIT( AL_DBG_SMI );
\r
1241 return IB_RESOURCE_BUSY;
\r
1244 p_mad_wr = PARENT_STRUCT( p_send_wr, al_mad_wr_t, send_wr );
\r
1246 if( is_local( route ) )
\r
1248 /* Save the local MAD work request for processing. */
\r
1249 p_spl_qp_svc->local_mad_wr = p_mad_wr;
\r
1251 /* Flag the service as in use by the asynchronous processing thread. */
\r
1252 cl_atomic_inc( &p_spl_qp_svc->in_use_cnt );
\r
1254 cl_spinlock_release( &p_spl_qp_svc->obj.lock );
\r
1256 status = local_mad_send( p_spl_qp_svc, p_mad_wr );
\r
1260 /* Process a remote MAD send work request. */
\r
1261 status = remote_mad_send( p_spl_qp_svc, p_mad_wr );
\r
1263 cl_spinlock_release( &p_spl_qp_svc->obj.lock );
\r
1266 AL_EXIT( AL_DBG_SMI );
\r
1273 * Process a remote MAD send work request. Called holding the spl_qp_svc lock.
\r
1277 IN spl_qp_svc_t* p_spl_qp_svc,
\r
1278 IN al_mad_wr_t* const p_mad_wr )
\r
1281 ib_api_status_t status;
\r
1283 AL_ENTER( AL_DBG_SMI );
\r
1285 CL_ASSERT( p_spl_qp_svc );
\r
1286 CL_ASSERT( p_mad_wr );
\r
1288 /* Initialize a pointers to the MAD work request and outbound MAD. */
\r
1289 p_smp = (ib_smp_t*)get_mad_hdr_from_wr( p_mad_wr );
\r
1291 /* Perform outbound MAD processing. */
\r
1293 /* Adjust directed route SMPs as required by IBA. */
\r
1294 if( p_smp->mgmt_class == IB_MCLASS_SUBN_DIR )
\r
1296 if( ib_smp_is_response( p_smp ) )
\r
1298 if( p_smp->dr_dlid == IB_LID_PERMISSIVE )
\r
1301 else if( p_smp->dr_slid == IB_LID_PERMISSIVE )
\r
1304 * Only update the pointer if the hw_agent is not implemented.
\r
1305 * Fujitsu implements SMI in hardware, so the following has to
\r
1306 * be passed down to the hardware SMI.
\r
1308 ci_ca_lock_attr( p_spl_qp_svc->obj.p_ci_ca );
\r
1309 if( !p_spl_qp_svc->obj.p_ci_ca->p_pnp_attr->hw_agents )
\r
1311 ci_ca_unlock_attr( p_spl_qp_svc->obj.p_ci_ca );
\r
1315 /* Always generate send completions. */
\r
1316 p_mad_wr->send_wr.send_opt |= IB_SEND_OPT_SIGNALED;
\r
1318 /* Queue the MAD work request on the service tracking queue. */
\r
1319 cl_qlist_insert_tail( &p_spl_qp_svc->send_queue, &p_mad_wr->list_item );
\r
1321 status = ib_post_send( p_spl_qp_svc->h_qp, &p_mad_wr->send_wr, NULL );
\r
1323 if( status != IB_SUCCESS )
\r
1325 cl_qlist_remove_item( &p_spl_qp_svc->send_queue, &p_mad_wr->list_item );
\r
1327 /* Reset directed route SMPs as required by IBA. */
\r
1328 if( p_smp->mgmt_class == IB_MCLASS_SUBN_DIR )
\r
1330 if( ib_smp_is_response( p_smp ) )
\r
1332 if( p_smp->dr_dlid == IB_LID_PERMISSIVE )
\r
1335 else if( p_smp->dr_slid == IB_LID_PERMISSIVE )
\r
1337 /* Only update if the hw_agent is not implemented. */
\r
1338 ci_ca_lock_attr( p_spl_qp_svc->obj.p_ci_ca );
\r
1339 if( p_spl_qp_svc->obj.p_ci_ca->p_pnp_attr->hw_agents == FALSE )
\r
1341 ci_ca_unlock_attr( p_spl_qp_svc->obj.p_ci_ca );
\r
1346 AL_EXIT( AL_DBG_SMI );
\r
1352 * Handle a MAD destined for the local CA, using cached data
\r
1353 * as much as possible.
\r
1355 static ib_api_status_t
\r
1357 IN spl_qp_svc_t* p_spl_qp_svc,
\r
1358 IN al_mad_wr_t* const p_mad_wr )
\r
1360 mad_route_t route;
\r
1361 ib_api_status_t status = IB_SUCCESS;
\r
1363 AL_ENTER( AL_DBG_SMI );
\r
1365 CL_ASSERT( p_spl_qp_svc );
\r
1366 CL_ASSERT( p_mad_wr );
\r
1368 /* Determine how to route the MAD. */
\r
1369 route = route_mad_send( p_spl_qp_svc, &p_mad_wr->send_wr );
\r
1371 /* Check if this MAD should be discarded. */
\r
1372 if( is_discard( route ) )
\r
1374 /* Deliver a "work completion" to the dispatcher. */
\r
1375 __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr,
\r
1376 IB_WCS_LOCAL_OP_ERR );
\r
1377 status = IB_INVALID_SETTING;
\r
1379 else if( is_loopback( route ) )
\r
1381 /* Loopback local SM to SM "heartbeat" messages. */
\r
1382 status = loopback_mad( p_spl_qp_svc, p_mad_wr );
\r
1386 switch( get_mad_hdr_from_wr( p_mad_wr )->mgmt_class )
\r
1388 case IB_MCLASS_SUBN_DIR:
\r
1389 case IB_MCLASS_SUBN_LID:
\r
1390 //DO not use the cache in order to force Mkey check
\r
1391 //status = process_subn_mad( p_spl_qp_svc, p_mad_wr );
\r
1392 status = IB_NOT_DONE;
\r
1396 status = IB_NOT_DONE;
\r
1400 if( status == IB_NOT_DONE )
\r
1402 /* Queue an asynchronous processing item to process the local MAD. */
\r
1403 cl_async_proc_queue( gp_async_proc_mgr, &p_spl_qp_svc->send_async );
\r
1408 * Clear the local MAD pointer to allow processing of other MADs.
\r
1409 * This is done after polling for attribute changes to ensure that
\r
1410 * subsequent MADs pick up any changes performed by this one.
\r
1412 cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );
\r
1413 p_spl_qp_svc->local_mad_wr = NULL;
\r
1414 cl_spinlock_release( &p_spl_qp_svc->obj.lock );
\r
1416 /* No longer in use by the asynchronous processing thread. */
\r
1417 cl_atomic_dec( &p_spl_qp_svc->in_use_cnt );
\r
1419 /* Special QP operations will resume by unwinding. */
\r
1422 AL_EXIT( AL_DBG_SMI );
\r
1423 return IB_SUCCESS;
\r
1427 static ib_api_status_t
\r
1429 IN spl_qp_svc_t* p_spl_qp_svc,
\r
1430 IN al_mad_wr_t* const p_mad_wr,
\r
1431 OUT ib_mad_element_t** const pp_mad_resp )
\r
1433 ib_api_status_t status;
\r
1435 AL_ENTER( AL_DBG_SMI );
\r
1437 CL_ASSERT( p_spl_qp_svc );
\r
1438 CL_ASSERT( p_mad_wr );
\r
1439 CL_ASSERT( pp_mad_resp );
\r
1441 /* Get a MAD element from the pool for the response. */
\r
1442 status = ib_get_mad( p_spl_qp_svc->h_qp->obj.p_ci_ca->pool_key,
\r
1443 MAD_BLOCK_SIZE, pp_mad_resp );
\r
1444 if( status != IB_SUCCESS )
\r
1446 __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr,
\r
1447 IB_WCS_LOCAL_OP_ERR );
\r
1450 AL_EXIT( AL_DBG_SMI );
\r
1455 static ib_api_status_t
\r
1456 complete_local_mad(
\r
1457 IN spl_qp_svc_t* p_spl_qp_svc,
\r
1458 IN al_mad_wr_t* const p_mad_wr,
\r
1459 IN ib_mad_element_t* const p_mad_resp )
\r
1461 ib_api_status_t status;
\r
1463 AL_ENTER( AL_DBG_SMI );
\r
1465 CL_ASSERT( p_spl_qp_svc );
\r
1466 CL_ASSERT( p_mad_wr );
\r
1467 CL_ASSERT( p_mad_resp );
\r
1469 /* Construct the receive MAD element. */
\r
1470 p_mad_resp->status = IB_WCS_SUCCESS;
\r
1471 p_mad_resp->remote_qp = p_mad_wr->send_wr.dgrm.ud.remote_qp;
\r
1472 p_mad_resp->remote_lid = p_spl_qp_svc->base_lid;
\r
1473 if( p_mad_wr->send_wr.send_opt & IB_RECV_OPT_IMMEDIATE )
\r
1475 p_mad_resp->immediate_data = p_mad_wr->send_wr.immediate_data;
\r
1476 p_mad_resp->recv_opt |= IB_RECV_OPT_IMMEDIATE;
\r
1480 * Hand the receive MAD element to the dispatcher before completing
\r
1481 * the send. This guarantees that the send request cannot time out.
\r
1483 status = mad_disp_recv_done( p_spl_qp_svc->h_mad_disp, p_mad_resp );
\r
1485 /* Forward the send work completion to the dispatcher. */
\r
1486 __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr, IB_WCS_SUCCESS );
\r
1488 AL_EXIT( AL_DBG_SMI );
\r
1493 static ib_api_status_t
\r
1495 IN spl_qp_svc_t* p_spl_qp_svc,
\r
1496 IN al_mad_wr_t* const p_mad_wr )
\r
1499 ib_mad_element_t *p_mad_resp;
\r
1500 ib_api_status_t status;
\r
1502 AL_ENTER( AL_DBG_SMI );
\r
1504 CL_ASSERT( p_spl_qp_svc );
\r
1505 CL_ASSERT( p_mad_wr );
\r
1507 /* Get a MAD element from the pool for the response. */
\r
1508 status = get_resp_mad( p_spl_qp_svc, p_mad_wr, &p_mad_resp );
\r
1509 if( status == IB_SUCCESS )
\r
1511 /* Initialize a pointers to the MAD work request and outbound MAD. */
\r
1512 p_mad = get_mad_hdr_from_wr( p_mad_wr );
\r
1514 /* Simulate a send/receive between local managers. */
\r
1515 cl_memcpy( p_mad_resp->p_mad_buf, p_mad, MAD_BLOCK_SIZE );
\r
1517 /* Construct the receive MAD element. */
\r
1518 p_mad_resp->status = IB_WCS_SUCCESS;
\r
1519 p_mad_resp->remote_qp = p_mad_wr->send_wr.dgrm.ud.remote_qp;
\r
1520 p_mad_resp->remote_lid = p_spl_qp_svc->base_lid;
\r
1521 if( p_mad_wr->send_wr.send_opt & IB_RECV_OPT_IMMEDIATE )
\r
1523 p_mad_resp->immediate_data = p_mad_wr->send_wr.immediate_data;
\r
1524 p_mad_resp->recv_opt |= IB_RECV_OPT_IMMEDIATE;
\r
1528 * Hand the receive MAD element to the dispatcher before completing
\r
1529 * the send. This guarantees that the send request cannot time out.
\r
1531 status = mad_disp_recv_done( p_spl_qp_svc->h_mad_disp, p_mad_resp );
\r
1533 /* Forward the send work completion to the dispatcher. */
\r
1534 __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr, IB_WCS_SUCCESS );
\r
1538 AL_EXIT( AL_DBG_SMI );
\r
1543 static ib_api_status_t
\r
1544 process_node_info(
\r
1545 IN spl_qp_svc_t* p_spl_qp_svc,
\r
1546 IN al_mad_wr_t* const p_mad_wr )
\r
1549 ib_mad_element_t *p_mad_resp;
\r
1551 ib_node_info_t *p_node_info;
\r
1552 ib_ca_attr_t *p_ca_attr;
\r
1553 ib_port_attr_t *p_port_attr;
\r
1554 ib_api_status_t status;
\r
1556 AL_ENTER( AL_DBG_SMI );
\r
1558 CL_ASSERT( p_spl_qp_svc );
\r
1559 CL_ASSERT( p_mad_wr );
\r
1561 /* Initialize a pointers to the MAD work request and outbound MAD. */
\r
1562 p_mad = get_mad_hdr_from_wr( p_mad_wr );
\r
1563 if( p_mad->method != IB_MAD_METHOD_GET )
\r
1565 /* Node description is a GET-only attribute. */
\r
1566 __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr,
\r
1567 IB_WCS_LOCAL_OP_ERR );
\r
1568 AL_EXIT( AL_DBG_SMI );
\r
1569 return IB_INVALID_SETTING;
\r
1572 /* Get a MAD element from the pool for the response. */
\r
1573 status = get_resp_mad( p_spl_qp_svc, p_mad_wr, &p_mad_resp );
\r
1574 if( status == IB_SUCCESS )
\r
1576 p_smp = (ib_smp_t*)p_mad_resp->p_mad_buf;
\r
1577 cl_memcpy( p_smp, p_mad, MAD_BLOCK_SIZE );
\r
1578 p_smp->method = (IB_MAD_METHOD_RESP_MASK | IB_MAD_METHOD_GET);
\r
1579 if( p_smp->mgmt_class == IB_MCLASS_SUBN_DIR )
\r
1580 p_smp->status = IB_SMP_DIRECTION;
\r
1582 p_smp->status = 0;
\r
1584 p_node_info = (ib_node_info_t*)ib_smp_get_payload_ptr( p_smp );
\r
1587 * Fill in the node info, protecting against the
\r
1588 * attributes being changed by PnP.
\r
1590 cl_spinlock_acquire( &p_spl_qp_svc->obj.p_ci_ca->obj.lock );
\r
1592 p_ca_attr = p_spl_qp_svc->obj.p_ci_ca->p_pnp_attr;
\r
1593 p_port_attr = &p_ca_attr->p_port_attr[p_spl_qp_svc->port_num - 1];
\r
1595 p_node_info->base_version = 1;
\r
1596 p_node_info->class_version = 1;
\r
1597 p_node_info->node_type = IB_NODE_TYPE_CA;
\r
1598 p_node_info->num_ports = p_ca_attr->num_ports;
\r
1599 /* TODO: Get some unique identifier for the system */
\r
1600 p_node_info->sys_guid = p_ca_attr->ca_guid;
\r
1601 p_node_info->node_guid = p_ca_attr->ca_guid;
\r
1602 p_node_info->port_guid = p_port_attr->port_guid;
\r
1603 p_node_info->partition_cap = cl_hton16( p_port_attr->num_pkeys );
\r
1604 p_node_info->device_id = cl_hton16( p_ca_attr->dev_id );
\r
1605 p_node_info->revision = cl_hton32( p_ca_attr->revision );
\r
1606 p_node_info->port_num_vendor_id =
\r
1607 cl_hton32( p_ca_attr->vend_id & 0x00FFFFFF ) | p_port_attr->port_num;
\r
1608 cl_spinlock_release( &p_spl_qp_svc->obj.p_ci_ca->obj.lock );
\r
1610 status = complete_local_mad( p_spl_qp_svc, p_mad_wr, p_mad_resp );
\r
1613 AL_EXIT( AL_DBG_SMI );
\r
1618 static ib_api_status_t
\r
1619 process_node_desc(
\r
1620 IN spl_qp_svc_t* p_spl_qp_svc,
\r
1621 IN al_mad_wr_t* const p_mad_wr )
\r
1624 ib_mad_element_t *p_mad_resp;
\r
1625 ib_api_status_t status;
\r
1627 AL_ENTER( AL_DBG_SMI );
\r
1629 CL_ASSERT( p_spl_qp_svc );
\r
1630 CL_ASSERT( p_mad_wr );
\r
1632 /* Initialize a pointers to the MAD work request and outbound MAD. */
\r
1633 p_mad = get_mad_hdr_from_wr( p_mad_wr );
\r
1634 if( p_mad->method != IB_MAD_METHOD_GET )
\r
1636 /* Node info is a GET-only attribute. */
\r
1637 __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr,
\r
1638 IB_WCS_LOCAL_OP_ERR );
\r
1639 AL_EXIT( AL_DBG_SMI );
\r
1640 return IB_INVALID_SETTING;
\r
1643 /* Get a MAD element from the pool for the response. */
\r
1644 status = get_resp_mad( p_spl_qp_svc, p_mad_wr, &p_mad_resp );
\r
1645 if( status == IB_SUCCESS )
\r
1647 cl_memcpy( p_mad_resp->p_mad_buf, p_mad, MAD_BLOCK_SIZE );
\r
1648 p_mad_resp->p_mad_buf->method =
\r
1649 (IB_MAD_METHOD_RESP_MASK | IB_MAD_METHOD_GET);
\r
1650 if( p_mad_resp->p_mad_buf->mgmt_class == IB_MCLASS_SUBN_DIR )
\r
1651 p_mad_resp->p_mad_buf->status = IB_SMP_DIRECTION;
\r
1653 p_mad_resp->p_mad_buf->status = 0;
\r
1654 /* Set the node description to the machine name. */
\r
1655 cl_memcpy( ((ib_smp_t*)p_mad_resp->p_mad_buf)->data,
\r
1656 node_desc, sizeof(node_desc) );
\r
1658 status = complete_local_mad( p_spl_qp_svc, p_mad_wr, p_mad_resp );
\r
1661 AL_EXIT( AL_DBG_SMI );
\r
1667 * Process subnet administration MADs using cached data if possible.
\r
1669 static ib_api_status_t
\r
1671 IN spl_qp_svc_t* p_spl_qp_svc,
\r
1672 IN al_mad_wr_t* const p_mad_wr )
\r
1674 ib_api_status_t status;
\r
1677 AL_ENTER( AL_DBG_SMI );
\r
1679 CL_ASSERT( p_spl_qp_svc );
\r
1680 CL_ASSERT( p_mad_wr );
\r
1682 p_smp = (ib_smp_t*)get_mad_hdr_from_wr( p_mad_wr );
\r
1684 CL_ASSERT( p_smp->mgmt_class == IB_MCLASS_SUBN_DIR ||
\r
1685 p_smp->mgmt_class == IB_MCLASS_SUBN_LID );
\r
1687 switch( p_smp->attr_id )
\r
1689 case IB_MAD_ATTR_NODE_INFO:
\r
1690 status = process_node_info( p_spl_qp_svc, p_mad_wr );
\r
1693 case IB_MAD_ATTR_NODE_DESC:
\r
1694 status = process_node_desc( p_spl_qp_svc, p_mad_wr );
\r
1698 status = IB_NOT_DONE;
\r
1702 AL_EXIT( AL_DBG_SMI );
\r
1708 * Process a local MAD send work request.
\r
1712 IN spl_qp_svc_t* p_spl_qp_svc,
\r
1713 IN al_mad_wr_t* const p_mad_wr )
\r
1717 al_mad_send_t* p_mad_send;
\r
1718 ib_mad_element_t* p_mad_response = NULL;
\r
1719 ib_mad_t* p_mad_response_buf;
\r
1720 ib_api_status_t status = IB_SUCCESS;
\r
1721 boolean_t smp_is_set;
\r
1723 AL_ENTER( AL_DBG_SMI );
\r
1725 CL_ASSERT( p_spl_qp_svc );
\r
1726 CL_ASSERT( p_mad_wr );
\r
1728 /* Initialize a pointers to the MAD work request and outbound MAD. */
\r
1729 p_mad = get_mad_hdr_from_wr( p_mad_wr );
\r
1730 p_smp = (ib_smp_t*)p_mad;
\r
1732 smp_is_set = (p_smp->method == IB_MAD_METHOD_SET);
\r
1734 /* Get a MAD element from the pool for the response. */
\r
1735 p_mad_send = PARENT_STRUCT( p_mad_wr, al_mad_send_t, mad_wr );
\r
1736 if( p_mad_send->p_send_mad->resp_expected )
\r
1738 status = get_resp_mad( p_spl_qp_svc, p_mad_wr, &p_mad_response );
\r
1739 if( status != IB_SUCCESS )
\r
1741 __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr,
\r
1742 IB_WCS_LOCAL_OP_ERR );
\r
1743 AL_EXIT( AL_DBG_SMI );
\r
1746 p_mad_response_buf = p_mad_response->p_mad_buf;
\r
1750 p_mad_response_buf = NULL;
\r
1753 /* Adjust directed route SMPs as required by IBA. */
\r
1754 if( p_mad->mgmt_class == IB_MCLASS_SUBN_DIR )
\r
1756 CL_ASSERT( !ib_smp_is_response( p_smp ) );
\r
1759 * If this was a self addressed, directed route SMP, increment
\r
1760 * the hop pointer in the request before delivery as required
\r
1761 * by IBA. Otherwise, adjustment for remote requests occurs
\r
1762 * during inbound processing.
\r
1764 if( p_smp->hop_count == 0 )
\r
1768 /* Forward the locally addressed MAD to the CA interface. */
\r
1769 status = al_local_mad( p_spl_qp_svc->h_qp->obj.p_ci_ca->h_ca,
\r
1770 p_spl_qp_svc->port_num, &p_mad_wr->send_wr.dgrm.ud.h_av->av_attr, p_mad, p_mad_response_buf );
\r
1772 /* Reset directed route SMPs as required by IBA. */
\r
1773 if( p_mad->mgmt_class == IB_MCLASS_SUBN_DIR )
\r
1776 * If this was a self addressed, directed route SMP, decrement
\r
1777 * the hop pointer in the response before delivery as required
\r
1778 * by IBA. Otherwise, adjustment for remote responses occurs
\r
1779 * during outbound processing.
\r
1781 if( p_smp->hop_count == 0 )
\r
1783 /* Adjust the request SMP. */
\r
1786 /* Adjust the response SMP. */
\r
1787 if( p_mad_response_buf )
\r
1789 p_smp = (ib_smp_t*)p_mad_response_buf;
\r
1795 if( status != IB_SUCCESS )
\r
1797 if( p_mad_response )
\r
1798 ib_put_mad( p_mad_response );
\r
1800 __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr,
\r
1801 IB_WCS_LOCAL_OP_ERR );
\r
1802 AL_EXIT( AL_DBG_SMI );
\r
1806 /* Check the completion status of this simulated send. */
\r
1807 if( p_mad_send->p_send_mad->resp_expected )
\r
1810 * The SMI is uses PnP polling to refresh the base_lid and lmc.
\r
1811 * Polling takes time, so we update the values here to prevent
\r
1812 * the failure of LID routed MADs sent immediately following this
\r
1813 * assignment. Check the response to see if the port info was set.
\r
1817 ib_port_info_t* p_port_info = NULL;
\r
1819 switch( p_mad_response_buf->mgmt_class )
\r
1821 case IB_MCLASS_SUBN_DIR:
\r
1822 if( ( p_smp->attr_id == IB_MAD_ATTR_PORT_INFO ) &&
\r
1823 ( ib_smp_get_status( p_smp ) == IB_SA_MAD_STATUS_SUCCESS ) )
\r
1826 (ib_port_info_t*)ib_smp_get_payload_ptr( p_smp );
\r
1830 case IB_MCLASS_SUBN_LID:
\r
1831 if( ( p_mad_response_buf->attr_id == IB_MAD_ATTR_PORT_INFO ) &&
\r
1832 ( p_mad_response_buf->status == IB_SA_MAD_STATUS_SUCCESS ) )
\r
1835 (ib_port_info_t*)ib_smp_get_payload_ptr((ib_smp_t*)p_mad_response_buf);
\r
1845 p_spl_qp_svc->base_lid = p_port_info->base_lid;
\r
1846 p_spl_qp_svc->lmc = ib_port_info_get_lmc( p_port_info );
\r
1847 p_spl_qp_svc->sm_lid = p_port_info->master_sm_base_lid;
\r
1848 p_spl_qp_svc->sm_sl = ib_port_info_get_sm_sl( p_port_info );
\r
1850 if (p_port_info->subnet_timeout & 0x80)
\r
1852 AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_PNP,
\r
1853 ("Client reregister event, setting sm_lid to 0.\n"));
\r
1854 ci_ca_lock_attr(p_spl_qp_svc->obj.p_ci_ca);
\r
1855 p_spl_qp_svc->obj.p_ci_ca->p_pnp_attr->
\r
1856 p_port_attr->sm_lid= 0;
\r
1857 ci_ca_unlock_attr(p_spl_qp_svc->obj.p_ci_ca);
\r
1863 /* Construct the receive MAD element. */
\r
1864 p_mad_response->status = IB_WCS_SUCCESS;
\r
1865 p_mad_response->remote_qp = p_mad_wr->send_wr.dgrm.ud.remote_qp;
\r
1866 p_mad_response->remote_lid = p_spl_qp_svc->base_lid;
\r
1867 if( p_mad_wr->send_wr.send_opt & IB_RECV_OPT_IMMEDIATE )
\r
1869 p_mad_response->immediate_data = p_mad_wr->send_wr.immediate_data;
\r
1870 p_mad_response->recv_opt |= IB_RECV_OPT_IMMEDIATE;
\r
1874 * Hand the receive MAD element to the dispatcher before completing
\r
1875 * the send. This guarantees that the send request cannot time out.
\r
1877 status = mad_disp_recv_done( p_spl_qp_svc->h_mad_disp, p_mad_response );
\r
1880 __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr,IB_WCS_SUCCESS);
\r
1884 /* If the SMP was a Get, no need to trigger a PnP poll. */
\r
1885 if( status == IB_SUCCESS && !smp_is_set )
\r
1886 status = IB_NOT_DONE;
\r
1888 AL_EXIT( AL_DBG_SMI );
\r
1895 * Asynchronous processing thread callback to send a local MAD.
\r
1898 send_local_mad_cb(
\r
1899 IN cl_async_proc_item_t* p_item )
\r
1901 spl_qp_svc_t* p_spl_qp_svc;
\r
1902 ib_api_status_t status;
\r
1904 AL_ENTER( AL_DBG_SMI );
\r
1906 CL_ASSERT( p_item );
\r
1907 p_spl_qp_svc = PARENT_STRUCT( p_item, spl_qp_svc_t, send_async );
\r
1909 /* Process a local MAD send work request. */
\r
1910 CL_ASSERT( p_spl_qp_svc->local_mad_wr );
\r
1911 status = fwd_local_mad( p_spl_qp_svc, p_spl_qp_svc->local_mad_wr );
\r
1914 * If we successfully processed a local MAD, which could have changed
\r
1915 * something (e.g. the LID) on the HCA. Scan for changes.
\r
1917 if( status == IB_SUCCESS )
\r
1921 * Clear the local MAD pointer to allow processing of other MADs.
\r
1922 * This is done after polling for attribute changes to ensure that
\r
1923 * subsequent MADs pick up any changes performed by this one.
\r
1925 cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );
\r
1926 p_spl_qp_svc->local_mad_wr = NULL;
\r
1927 cl_spinlock_release( &p_spl_qp_svc->obj.lock );
\r
1929 /* Continue processing any queued MADs on the QP. */
\r
1930 special_qp_resume_sends( p_spl_qp_svc->h_qp );
\r
1932 /* No longer in use by the asynchronous processing thread. */
\r
1933 cl_atomic_dec( &p_spl_qp_svc->in_use_cnt );
\r
1935 AL_EXIT( AL_DBG_SMI );
\r
1941 * Special QP send completion callback.
\r
1944 spl_qp_send_comp_cb(
\r
1945 IN const ib_cq_handle_t h_cq,
\r
1946 IN void* cq_context )
\r
1948 spl_qp_svc_t* p_spl_qp_svc;
\r
1950 AL_ENTER( AL_DBG_SMI );
\r
1952 CL_ASSERT( cq_context );
\r
1953 p_spl_qp_svc = cq_context;
\r
1955 #if defined( CL_USE_MUTEX )
\r
1957 /* Queue an asynchronous processing item to process sends. */
\r
1958 cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );
\r
1959 if( !p_spl_qp_svc->send_async_queued )
\r
1961 p_spl_qp_svc->send_async_queued = TRUE;
\r
1962 ref_al_obj( &p_spl_qp_svc->obj );
\r
1963 cl_async_proc_queue( gp_async_proc_mgr, &p_spl_qp_svc->send_async_cb );
\r
1965 cl_spinlock_release( &p_spl_qp_svc->obj.lock );
\r
1969 /* Invoke the callback directly. */
\r
1970 CL_ASSERT( h_cq == p_spl_qp_svc->h_send_cq );
\r
1971 spl_qp_comp( p_spl_qp_svc, h_cq, IB_WC_SEND );
\r
1973 /* Continue processing any queued MADs on the QP. */
\r
1974 special_qp_resume_sends( p_spl_qp_svc->h_qp );
\r
1978 AL_EXIT( AL_DBG_SMI );
\r
1983 #if defined( CL_USE_MUTEX )
\r
1985 spl_qp_send_async_cb(
\r
1986 IN cl_async_proc_item_t* p_item )
\r
1988 spl_qp_svc_t* p_spl_qp_svc;
\r
1989 ib_api_status_t status;
\r
1991 AL_ENTER( AL_DBG_SMI );
\r
1993 CL_ASSERT( p_item );
\r
1994 p_spl_qp_svc = PARENT_STRUCT( p_item, spl_qp_svc_t, send_async_cb );
\r
1996 /* Reset asynchronous queue flag. */
\r
1997 cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );
\r
1998 p_spl_qp_svc->send_async_queued = FALSE;
\r
1999 cl_spinlock_release( &p_spl_qp_svc->obj.lock );
\r
2001 spl_qp_comp( p_spl_qp_svc, p_spl_qp_svc->h_send_cq, IB_WC_SEND );
\r
2003 /* Continue processing any queued MADs on the QP. */
\r
2004 status = special_qp_resume_sends( p_spl_qp_svc->h_qp );
\r
2005 CL_ASSERT( status == IB_SUCCESS );
\r
2007 deref_al_obj( &p_spl_qp_svc->obj );
\r
2009 AL_EXIT( AL_DBG_SMI );
\r
2016 * Special QP receive completion callback.
\r
2019 spl_qp_recv_comp_cb(
\r
2020 IN const ib_cq_handle_t h_cq,
\r
2021 IN void* cq_context )
\r
2023 spl_qp_svc_t* p_spl_qp_svc;
\r
2025 AL_ENTER( AL_DBG_SMI );
\r
2027 CL_ASSERT( cq_context );
\r
2028 p_spl_qp_svc = cq_context;
\r
2030 #if defined( CL_USE_MUTEX )
\r
2032 /* Queue an asynchronous processing item to process receives. */
\r
2033 cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );
\r
2034 if( !p_spl_qp_svc->recv_async_queued )
\r
2036 p_spl_qp_svc->recv_async_queued = TRUE;
\r
2037 ref_al_obj( &p_spl_qp_svc->obj );
\r
2038 cl_async_proc_queue( gp_async_proc_mgr, &p_spl_qp_svc->recv_async_cb );
\r
2040 cl_spinlock_release( &p_spl_qp_svc->obj.lock );
\r
2044 CL_ASSERT( h_cq == p_spl_qp_svc->h_recv_cq );
\r
2045 spl_qp_comp( p_spl_qp_svc, h_cq, IB_WC_RECV );
\r
2049 AL_EXIT( AL_DBG_SMI );
\r
2054 #if defined( CL_USE_MUTEX )
\r
2056 spl_qp_recv_async_cb(
\r
2057 IN cl_async_proc_item_t* p_item )
\r
2059 spl_qp_svc_t* p_spl_qp_svc;
\r
2061 AL_ENTER( AL_DBG_SMI );
\r
2063 CL_ASSERT( p_item );
\r
2064 p_spl_qp_svc = PARENT_STRUCT( p_item, spl_qp_svc_t, recv_async_cb );
\r
2066 /* Reset asynchronous queue flag. */
\r
2067 cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );
\r
2068 p_spl_qp_svc->recv_async_queued = FALSE;
\r
2069 cl_spinlock_release( &p_spl_qp_svc->obj.lock );
\r
2071 spl_qp_comp( p_spl_qp_svc, p_spl_qp_svc->h_recv_cq, IB_WC_RECV );
\r
2073 deref_al_obj( &p_spl_qp_svc->obj );
\r
2075 AL_EXIT( AL_DBG_SMI );
\r
2082 * Special QP completion handler.
\r
2086 IN spl_qp_svc_t* p_spl_qp_svc,
\r
2087 IN const ib_cq_handle_t h_cq,
\r
2088 IN ib_wc_type_t wc_type )
\r
2091 ib_wc_t* p_free_wc = &wc;
\r
2092 ib_wc_t* p_done_wc;
\r
2093 al_mad_wr_t* p_mad_wr;
\r
2094 al_mad_element_t* p_al_mad;
\r
2095 ib_mad_element_t* p_mad_element;
\r
2097 ib_api_status_t status;
\r
2099 AL_ENTER( AL_DBG_SMI_CB );
\r
2101 CL_ASSERT( p_spl_qp_svc );
\r
2102 CL_ASSERT( h_cq );
\r
2104 /* Check the QP state and guard against error handling. */
\r
2105 cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );
\r
2106 if( p_spl_qp_svc->state != SPL_QP_ACTIVE )
\r
2108 cl_spinlock_release( &p_spl_qp_svc->obj.lock );
\r
2111 cl_atomic_inc( &p_spl_qp_svc->in_use_cnt );
\r
2112 cl_spinlock_release( &p_spl_qp_svc->obj.lock );
\r
2115 /* Process work completions. */
\r
2116 while( ib_poll_cq( h_cq, &p_free_wc, &p_done_wc ) == IB_SUCCESS )
\r
2118 /* Process completions one at a time. */
\r
2119 CL_ASSERT( p_done_wc );
\r
2121 /* Flushed completions are handled elsewhere. */
\r
2122 if( wc.status == IB_WCS_WR_FLUSHED_ERR )
\r
2129 * Process the work completion. Per IBA specification, the
\r
2130 * wc.wc_type is undefined if wc.status is not IB_WCS_SUCCESS.
\r
2131 * Use the wc_type parameter.
\r
2136 /* Get a pointer to the MAD work request. */
\r
2137 p_mad_wr = (al_mad_wr_t*)((uintn_t)wc.wr_id);
\r
2139 /* Remove the MAD work request from the service tracking queue. */
\r
2140 cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );
\r
2141 cl_qlist_remove_item( &p_spl_qp_svc->send_queue,
\r
2142 &p_mad_wr->list_item );
\r
2143 cl_spinlock_release( &p_spl_qp_svc->obj.lock );
\r
2145 /* Reset directed route SMPs as required by IBA. */
\r
2146 p_smp = (ib_smp_t*)get_mad_hdr_from_wr( p_mad_wr );
\r
2147 if( p_smp->mgmt_class == IB_MCLASS_SUBN_DIR )
\r
2149 if( ib_smp_is_response( p_smp ) )
\r
2155 /* Report the send completion to the dispatcher. */
\r
2156 mad_disp_send_done( p_spl_qp_svc->h_mad_disp, p_mad_wr, &wc );
\r
2161 /* Initialize pointers to the MAD element. */
\r
2162 p_al_mad = (al_mad_element_t*)((uintn_t)wc.wr_id);
\r
2163 p_mad_element = &p_al_mad->element;
\r
2165 /* Remove the AL MAD element from the service tracking list. */
\r
2166 cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );
\r
2168 cl_qlist_remove_item( &p_spl_qp_svc->recv_queue,
\r
2169 &p_al_mad->list_item );
\r
2171 /* Replenish the receive buffer. */
\r
2172 spl_qp_svc_post_recvs( p_spl_qp_svc );
\r
2173 cl_spinlock_release( &p_spl_qp_svc->obj.lock );
\r
2175 /* Construct the MAD element from the receive work completion. */
\r
2176 build_mad_recv( p_mad_element, &wc );
\r
2178 /* Process the received MAD. */
\r
2179 status = process_mad_recv( p_spl_qp_svc, p_mad_element );
\r
2181 /* Discard this MAD on error. */
\r
2182 if( status != IB_SUCCESS )
\r
2184 status = ib_put_mad( p_mad_element );
\r
2185 CL_ASSERT( status == IB_SUCCESS );
\r
2190 CL_ASSERT( wc_type == IB_WC_SEND || wc_type == IB_WC_RECV );
\r
2194 if( wc.status != IB_WCS_SUCCESS )
\r
2196 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
2197 ("special QP completion error: %s! internal syndrome 0x%I64x\n",
\r
2198 ib_get_wc_status_str( wc.status ), wc.vendor_specific) );
\r
2200 /* Reset the special QP service and return. */
\r
2201 spl_qp_svc_reset( p_spl_qp_svc );
\r
2206 /* Rearm the CQ. */
\r
2207 status = ib_rearm_cq( h_cq, FALSE );
\r
2208 CL_ASSERT( status == IB_SUCCESS );
\r
2210 cl_atomic_dec( &p_spl_qp_svc->in_use_cnt );
\r
2211 AL_EXIT( AL_DBG_SMI_CB );
\r
2217 * Process a received MAD.
\r
2221 IN spl_qp_svc_t* p_spl_qp_svc,
\r
2222 IN ib_mad_element_t* p_mad_element )
\r
2225 mad_route_t route;
\r
2226 ib_api_status_t status;
\r
2228 AL_ENTER( AL_DBG_SMI );
\r
2230 CL_ASSERT( p_spl_qp_svc );
\r
2231 CL_ASSERT( p_mad_element );
\r
2234 * If the CA has a HW agent then this MAD should have been
\r
2235 * consumed below verbs. The fact that it was received here
\r
2236 * indicates that it should be forwarded to the dispatcher
\r
2237 * for delivery to a class manager. Otherwise, determine how
\r
2238 * the MAD should be routed.
\r
2240 route = ROUTE_DISPATCHER;
\r
2241 if( check_local_mad( p_spl_qp_svc->h_qp ) )
\r
2244 * SMP and GMP processing is branched here to handle overlaps
\r
2245 * between class methods and attributes.
\r
2247 switch( p_mad_element->p_mad_buf->mgmt_class )
\r
2249 case IB_MCLASS_SUBN_DIR:
\r
2250 /* Perform special checks on directed route SMPs. */
\r
2251 p_smp = (ib_smp_t*)p_mad_element->p_mad_buf;
\r
2253 if( ( p_smp->hop_count >= IB_SUBNET_PATH_HOPS_MAX ) ||
\r
2254 ( p_smp->hop_ptr >= IB_SUBNET_PATH_HOPS_MAX ) )
\r
2256 route = ROUTE_DISCARD;
\r
2258 else if( ib_smp_is_response( p_smp ) )
\r
2261 * This node is the destination of the response. Discard
\r
2262 * the source LID or hop pointer are incorrect.
\r
2264 if( p_smp->dr_slid == IB_LID_PERMISSIVE )
\r
2266 if( p_smp->hop_ptr == 1 )
\r
2268 p_smp->hop_ptr--; /* Adjust ptr per IBA spec. */
\r
2272 route = ROUTE_DISCARD;
\r
2275 else if( ( p_smp->dr_slid < p_spl_qp_svc->base_lid ) ||
\r
2276 ( p_smp->dr_slid >= p_spl_qp_svc->base_lid +
\r
2277 ( 1 << p_spl_qp_svc->lmc ) ) )
\r
2279 route = ROUTE_DISCARD;
\r
2285 * This node is the destination of the request. Discard
\r
2286 * the destination LID or hop pointer are incorrect.
\r
2288 if( p_smp->dr_dlid == IB_LID_PERMISSIVE )
\r
2290 if( p_smp->hop_count == p_smp->hop_ptr )
\r
2292 p_smp->return_path[ p_smp->hop_ptr++ ] =
\r
2293 p_spl_qp_svc->port_num; /* Set path per IBA spec. */
\r
2297 route = ROUTE_DISCARD;
\r
2300 else if( ( p_smp->dr_dlid < p_spl_qp_svc->base_lid ) ||
\r
2301 ( p_smp->dr_dlid >= p_spl_qp_svc->base_lid +
\r
2302 ( 1 << p_spl_qp_svc->lmc ) ) )
\r
2304 route = ROUTE_DISCARD;
\r
2308 if( route == ROUTE_DISCARD ) break;
\r
2309 /* else fall through next case */
\r
2311 case IB_MCLASS_SUBN_LID:
\r
2312 route = route_recv_smp( p_mad_element );
\r
2315 case IB_MCLASS_PERF:
\r
2316 /* Process the received GMP. */
\r
2317 switch( p_mad_element->p_mad_buf->method )
\r
2319 case IB_MAD_METHOD_GET:
\r
2320 case IB_MAD_METHOD_SET:
\r
2321 route = ROUTE_LOCAL;
\r
2328 case IB_MCLASS_BM:
\r
2329 route = route_recv_gmp( p_mad_element );
\r
2332 case IB_MCLASS_SUBN_ADM:
\r
2333 case IB_MCLASS_DEV_MGMT:
\r
2334 case IB_MCLASS_COMM_MGMT:
\r
2335 case IB_MCLASS_SNMP:
\r
2339 /* Route vendor specific MADs to the HCA provider. */
\r
2340 if( ib_class_is_vendor_specific(
\r
2341 p_mad_element->p_mad_buf->mgmt_class ) )
\r
2343 route = route_recv_gmp( p_mad_element );
\r
2349 /* Route the MAD. */
\r
2350 if( is_discard( route ) )
\r
2351 status = IB_ERROR;
\r
2352 else if( is_dispatcher( route ) )
\r
2353 status = mad_disp_recv_done( p_spl_qp_svc->h_mad_disp, p_mad_element );
\r
2354 else if( is_remote( route ) )
\r
2355 status = forward_sm_trap( p_spl_qp_svc, p_mad_element );
\r
2357 status = recv_local_mad( p_spl_qp_svc, p_mad_element );
\r
2359 AL_EXIT( AL_DBG_SMI );
\r
2366 * Route a received SMP.
\r
2370 IN ib_mad_element_t* p_mad_element )
\r
2372 mad_route_t route;
\r
2374 AL_ENTER( AL_DBG_SMI );
\r
2376 CL_ASSERT( p_mad_element );
\r
2378 /* Process the received SMP. */
\r
2379 switch( p_mad_element->p_mad_buf->method )
\r
2381 case IB_MAD_METHOD_GET:
\r
2382 case IB_MAD_METHOD_SET:
\r
2383 route = route_recv_smp_attr( p_mad_element );
\r
2386 case IB_MAD_METHOD_TRAP:
\r
2388 * Special check to route locally generated traps to the remote SM.
\r
2389 * Distinguished from other receives by the p_wc->recv.ud.recv_opt
\r
2390 * IB_RECV_OPT_FORWARD flag.
\r
2392 * Note that because forwarded traps use AL MAD services, the upper
\r
2393 * 32-bits of the TID are reserved by the access layer. When matching
\r
2394 * a Trap Repress MAD, the SMA must only use the lower 32-bits of the
\r
2397 route = ( p_mad_element->recv_opt & IB_RECV_OPT_FORWARD ) ?
\r
2398 ROUTE_REMOTE : ROUTE_DISPATCHER;
\r
2401 case IB_MAD_METHOD_TRAP_REPRESS:
\r
2403 * Note that because forwarded traps use AL MAD services, the upper
\r
2404 * 32-bits of the TID are reserved by the access layer. When matching
\r
2405 * a Trap Repress MAD, the SMA must only use the lower 32-bits of the
\r
2408 route = ROUTE_LOCAL;
\r
2412 route = ROUTE_DISPATCHER;
\r
2416 AL_EXIT( AL_DBG_SMI );
\r
2423 * Route received SMP attributes.
\r
2426 route_recv_smp_attr(
\r
2427 IN ib_mad_element_t* p_mad_element )
\r
2429 mad_route_t route;
\r
2431 AL_ENTER( AL_DBG_SMI );
\r
2433 CL_ASSERT( p_mad_element );
\r
2435 /* Process the received SMP attributes. */
\r
2436 switch( p_mad_element->p_mad_buf->attr_id )
\r
2438 case IB_MAD_ATTR_NODE_DESC:
\r
2439 case IB_MAD_ATTR_NODE_INFO:
\r
2440 case IB_MAD_ATTR_GUID_INFO:
\r
2441 case IB_MAD_ATTR_PORT_INFO:
\r
2442 case IB_MAD_ATTR_P_KEY_TABLE:
\r
2443 case IB_MAD_ATTR_SLVL_TABLE:
\r
2444 case IB_MAD_ATTR_VL_ARBITRATION:
\r
2445 case IB_MAD_ATTR_VENDOR_DIAG:
\r
2446 case IB_MAD_ATTR_LED_INFO:
\r
2447 case IB_MAD_ATTR_SWITCH_INFO:
\r
2448 route = ROUTE_LOCAL;
\r
2452 route = ROUTE_DISPATCHER;
\r
2456 AL_EXIT( AL_DBG_SMI );
\r
2462 * Route a received GMP.
\r
2466 IN ib_mad_element_t* p_mad_element )
\r
2468 mad_route_t route;
\r
2470 AL_ENTER( AL_DBG_SMI );
\r
2472 CL_ASSERT( p_mad_element );
\r
2474 /* Process the received GMP. */
\r
2475 switch( p_mad_element->p_mad_buf->method )
\r
2477 case IB_MAD_METHOD_GET:
\r
2478 case IB_MAD_METHOD_SET:
\r
2479 /* Route vendor specific MADs to the HCA provider. */
\r
2480 if( ib_class_is_vendor_specific(
\r
2481 p_mad_element->p_mad_buf->mgmt_class ) )
\r
2483 route = ROUTE_LOCAL;
\r
2487 route = route_recv_gmp_attr( p_mad_element );
\r
2492 route = ROUTE_DISPATCHER;
\r
2496 AL_EXIT( AL_DBG_SMI );
\r
2503 * Route received GMP attributes.
\r
2506 route_recv_gmp_attr(
\r
2507 IN ib_mad_element_t* p_mad_element )
\r
2509 mad_route_t route;
\r
2511 AL_ENTER( AL_DBG_SMI );
\r
2513 CL_ASSERT( p_mad_element );
\r
2515 /* Process the received GMP attributes. */
\r
2516 if( p_mad_element->p_mad_buf->attr_id == IB_MAD_ATTR_CLASS_PORT_INFO )
\r
2517 route = ROUTE_LOCAL;
\r
2519 route = ROUTE_DISPATCHER;
\r
2521 AL_EXIT( AL_DBG_SMI );
\r
2528 * Forward a locally generated Subnet Management trap.
\r
2532 IN spl_qp_svc_t* p_spl_qp_svc,
\r
2533 IN ib_mad_element_t* p_mad_element )
\r
2535 ib_av_attr_t av_attr;
\r
2536 ib_api_status_t status;
\r
2538 AL_ENTER( AL_DBG_SMI );
\r
2540 CL_ASSERT( p_spl_qp_svc );
\r
2541 CL_ASSERT( p_mad_element );
\r
2543 /* Check the SMP class. */
\r
2544 if( p_mad_element->p_mad_buf->mgmt_class != IB_MCLASS_SUBN_LID )
\r
2547 * Per IBA Specification Release 1.1 Section 14.2.2.1,
\r
2548 * "C14-5: Only a SM shall originate a directed route SMP."
\r
2549 * Therefore all traps should be LID routed; drop this one.
\r
2551 AL_EXIT( AL_DBG_SMI );
\r
2555 /* Create an address vector for the SM. */
\r
2556 cl_memclr( &av_attr, sizeof( ib_av_attr_t ) );
\r
2557 av_attr.port_num = p_spl_qp_svc->port_num;
\r
2558 av_attr.sl = p_spl_qp_svc->sm_sl;
\r
2559 av_attr.dlid = p_spl_qp_svc->sm_lid;
\r
2560 av_attr.grh_valid = FALSE;
\r
2562 status = ib_create_av( p_spl_qp_svc->h_qp->obj.p_ci_ca->h_pd_alias,
\r
2563 &av_attr, &p_mad_element->h_av );
\r
2565 if( status != IB_SUCCESS )
\r
2567 AL_EXIT( AL_DBG_SMI );
\r
2571 /* Complete the initialization of the MAD element. */
\r
2572 p_mad_element->p_next = NULL;
\r
2573 p_mad_element->remote_qkey = IB_QP_PRIVILEGED_Q_KEY;
\r
2574 p_mad_element->resp_expected = FALSE;
\r
2576 /* Clear context1 for proper send completion callback processing. */
\r
2577 p_mad_element->context1 = NULL;
\r
2580 * Forward the trap. Note that because forwarded traps use AL MAD
\r
2581 * services, the upper 32-bits of the TID are reserved by the access
\r
2582 * layer. When matching a Trap Repress MAD, the SMA must only use
\r
2583 * the lower 32-bits of the TID.
\r
2585 status = ib_send_mad( p_spl_qp_svc->h_mad_svc, p_mad_element, NULL );
\r
2587 if( status != IB_SUCCESS )
\r
2588 ib_destroy_av( p_mad_element->h_av );
\r
2590 AL_EXIT( AL_DBG_SMI );
\r
2596 * Process a locally routed MAD received from the special QP.
\r
2600 IN spl_qp_svc_t* p_spl_qp_svc,
\r
2601 IN ib_mad_element_t* p_mad_request )
\r
2603 ib_mad_t* p_mad_hdr;
\r
2604 ib_api_status_t status;
\r
2606 AL_ENTER( AL_DBG_SMI );
\r
2608 CL_ASSERT( p_spl_qp_svc );
\r
2609 CL_ASSERT( p_mad_request );
\r
2611 /* Initialize the MAD element. */
\r
2612 p_mad_hdr = ib_get_mad_buf( p_mad_request );
\r
2613 p_mad_request->context1 = p_mad_request;
\r
2615 /* Save the TID. */
\r
2616 p_mad_request->context2 =
\r
2617 (void*)(uintn_t)al_get_al_tid( p_mad_hdr->trans_id );
\r
2619 * Disable warning about passing unaligned 64-bit value.
\r
2620 * The value is always aligned given how buffers are allocated
\r
2621 * and given the layout of a MAD.
\r
2623 #pragma warning( push, 3 )
\r
2624 al_set_al_tid( &p_mad_hdr->trans_id, 0 );
\r
2625 #pragma warning( pop )
\r
2628 * We need to get a response from the local HCA to this MAD only if this
\r
2629 * MAD is not itself a response.
\r
2631 p_mad_request->resp_expected = !( ib_mad_is_response( p_mad_hdr ) ||
\r
2632 ( p_mad_hdr->method == IB_MAD_METHOD_TRAP_REPRESS ) );
\r
2633 p_mad_request->timeout_ms = LOCAL_MAD_TIMEOUT;
\r
2634 p_mad_request->send_opt = IB_SEND_OPT_LOCAL;
\r
2636 /* Send the locally addressed MAD request to the CA for processing. */
\r
2637 status = ib_send_mad( p_spl_qp_svc->h_mad_svc, p_mad_request, NULL );
\r
2639 AL_EXIT( AL_DBG_SMI );
\r
2646 * Special QP alias send completion callback.
\r
2649 spl_qp_alias_send_cb(
\r
2650 IN ib_mad_svc_handle_t h_mad_svc,
\r
2651 IN void* mad_svc_context,
\r
2652 IN ib_mad_element_t* p_mad_element )
\r
2654 ib_api_status_t status;
\r
2656 AL_ENTER( AL_DBG_SMI );
\r
2658 UNUSED_PARAM( h_mad_svc );
\r
2659 UNUSED_PARAM( mad_svc_context );
\r
2660 CL_ASSERT( p_mad_element );
\r
2662 if( p_mad_element->h_av )
\r
2664 status = ib_destroy_av( p_mad_element->h_av );
\r
2665 CL_ASSERT( status == IB_SUCCESS );
\r
2668 status = ib_put_mad( p_mad_element );
\r
2669 CL_ASSERT( status == IB_SUCCESS );
\r
2671 AL_EXIT( AL_DBG_SMI );
\r
2677 * Special QP alias receive completion callback.
\r
2680 spl_qp_alias_recv_cb(
\r
2681 IN ib_mad_svc_handle_t h_mad_svc,
\r
2682 IN void* mad_svc_context,
\r
2683 IN ib_mad_element_t* p_mad_response )
\r
2685 spl_qp_svc_t* p_spl_qp_svc;
\r
2686 ib_mad_element_t* p_mad_request;
\r
2687 ib_mad_t* p_mad_hdr;
\r
2688 ib_av_attr_t av_attr;
\r
2689 ib_api_status_t status;
\r
2691 AL_ENTER( AL_DBG_SMI );
\r
2693 CL_ASSERT( mad_svc_context );
\r
2694 CL_ASSERT( p_mad_response );
\r
2695 CL_ASSERT( p_mad_response->send_context1 );
\r
2697 /* Initialize pointers. */
\r
2698 p_spl_qp_svc = mad_svc_context;
\r
2699 p_mad_request = p_mad_response->send_context1;
\r
2700 p_mad_hdr = ib_get_mad_buf( p_mad_response );
\r
2702 /* Restore the TID, so it will match on the remote side. */
\r
2703 #pragma warning( push, 3 )
\r
2704 al_set_al_tid( &p_mad_hdr->trans_id,
\r
2705 (uint32_t)(uintn_t)p_mad_response->send_context2 );
\r
2706 #pragma warning( pop )
\r
2708 /* Set the remote QP. */
\r
2709 p_mad_response->remote_qp = p_mad_request->remote_qp;
\r
2710 p_mad_response->remote_qkey = p_mad_request->remote_qkey;
\r
2712 /* Prepare to create an address vector. */
\r
2713 cl_memclr( &av_attr, sizeof( ib_av_attr_t ) );
\r
2714 av_attr.port_num = p_spl_qp_svc->port_num;
\r
2715 av_attr.sl = p_mad_request->remote_sl;
\r
2716 av_attr.static_rate = IB_PATH_RECORD_RATE_10_GBS;
\r
2717 av_attr.path_bits = p_mad_request->path_bits;
\r
2718 if( p_mad_request->grh_valid )
\r
2720 cl_memcpy( &av_attr.grh, p_mad_request->p_grh, sizeof( ib_grh_t ) );
\r
2721 av_attr.grh.src_gid = p_mad_request->p_grh->dest_gid;
\r
2722 av_attr.grh.dest_gid = p_mad_request->p_grh->src_gid;
\r
2723 av_attr.grh_valid = TRUE;
\r
2725 if( ( p_mad_hdr->mgmt_class == IB_MCLASS_SUBN_DIR ) &&
\r
2726 ( ((ib_smp_t *)p_mad_hdr)->dr_dlid == IB_LID_PERMISSIVE ) )
\r
2727 av_attr.dlid = IB_LID_PERMISSIVE;
\r
2729 av_attr.dlid = p_mad_request->remote_lid;
\r
2731 /* Create an address vector. */
\r
2732 status = ib_create_av( p_spl_qp_svc->h_qp->obj.p_ci_ca->h_pd_alias,
\r
2733 &av_attr, &p_mad_response->h_av );
\r
2735 if( status != IB_SUCCESS )
\r
2737 ib_put_mad( p_mad_response );
\r
2739 AL_EXIT( AL_DBG_SMI );
\r
2743 /* Send the response. */
\r
2744 status = ib_send_mad( h_mad_svc, p_mad_response, NULL );
\r
2746 if( status != IB_SUCCESS )
\r
2748 ib_destroy_av( p_mad_response->h_av );
\r
2749 ib_put_mad( p_mad_response );
\r
2752 AL_EXIT( AL_DBG_SMI );
\r
2758 * Post receive buffers to a special QP.
\r
2760 static ib_api_status_t
\r
2761 spl_qp_svc_post_recvs(
\r
2762 IN spl_qp_svc_t* const p_spl_qp_svc )
\r
2764 ib_mad_element_t* p_mad_element;
\r
2765 al_mad_element_t* p_al_element;
\r
2766 ib_recv_wr_t recv_wr;
\r
2767 ib_api_status_t status = IB_SUCCESS;
\r
2769 /* Attempt to post receive buffers up to the max_qp_depth limit. */
\r
2770 while( cl_qlist_count( &p_spl_qp_svc->recv_queue ) <
\r
2771 (int32_t)p_spl_qp_svc->max_qp_depth )
\r
2773 /* Get a MAD element from the pool. */
\r
2774 status = ib_get_mad( p_spl_qp_svc->obj.p_ci_ca->pool_key,
\r
2775 MAD_BLOCK_SIZE, &p_mad_element );
\r
2777 if( status != IB_SUCCESS ) break;
\r
2779 p_al_element = PARENT_STRUCT( p_mad_element, al_mad_element_t,
\r
2782 /* Build the receive work request. */
\r
2783 recv_wr.p_next = NULL;
\r
2784 recv_wr.wr_id = (uintn_t)p_al_element;
\r
2785 recv_wr.num_ds = 1;
\r
2786 recv_wr.ds_array = &p_al_element->grh_ds;
\r
2788 /* Queue the receive on the service tracking list. */
\r
2789 cl_qlist_insert_tail( &p_spl_qp_svc->recv_queue,
\r
2790 &p_al_element->list_item );
\r
2792 /* Post the receive. */
\r
2793 status = ib_post_recv( p_spl_qp_svc->h_qp, &recv_wr, NULL );
\r
2795 if( status != IB_SUCCESS )
\r
2797 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
2798 ("Failed to post receive %016I64x\n",
\r
2799 (LONG_PTR)p_al_element) );
\r
2800 cl_qlist_remove_item( &p_spl_qp_svc->recv_queue,
\r
2801 &p_al_element->list_item );
\r
2803 ib_put_mad( p_mad_element );
\r
2814 * Special QP service asynchronous event callback.
\r
2817 spl_qp_svc_event_cb(
\r
2818 IN ib_async_event_rec_t *p_event_rec )
\r
2820 spl_qp_svc_t* p_spl_qp_svc;
\r
2822 AL_ENTER( AL_DBG_SMI );
\r
2824 CL_ASSERT( p_event_rec );
\r
2825 CL_ASSERT( p_event_rec->context );
\r
2827 if( p_event_rec->code == IB_AE_SQ_DRAINED )
\r
2829 AL_EXIT( AL_DBG_SMI );
\r
2833 p_spl_qp_svc = p_event_rec->context;
\r
2835 spl_qp_svc_reset( p_spl_qp_svc );
\r
2837 AL_EXIT( AL_DBG_SMI );
\r
2843 * Special QP service reset.
\r
2847 IN spl_qp_svc_t* p_spl_qp_svc )
\r
2849 cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );
\r
2851 if( p_spl_qp_svc->state != SPL_QP_ACTIVE )
\r
2853 cl_spinlock_release( &p_spl_qp_svc->obj.lock );
\r
2857 /* Change the special QP service to the error state. */
\r
2858 p_spl_qp_svc->state = SPL_QP_ERROR;
\r
2860 /* Flag the service as in use by the asynchronous processing thread. */
\r
2861 cl_atomic_inc( &p_spl_qp_svc->in_use_cnt );
\r
2863 cl_spinlock_release( &p_spl_qp_svc->obj.lock );
\r
2865 /* Queue an asynchronous processing item to reset the special QP. */
\r
2866 cl_async_proc_queue( gp_async_proc_mgr, &p_spl_qp_svc->reset_async );
\r
2872 * Asynchronous processing thread callback to reset the special QP service.
\r
2875 spl_qp_svc_reset_cb(
\r
2876 IN cl_async_proc_item_t* p_item )
\r
2878 spl_qp_svc_t* p_spl_qp_svc;
\r
2879 cl_list_item_t* p_list_item;
\r
2881 ib_wc_t* p_free_wc;
\r
2882 ib_wc_t* p_done_wc;
\r
2883 al_mad_wr_t* p_mad_wr;
\r
2884 al_mad_element_t* p_al_mad;
\r
2885 ib_qp_mod_t qp_mod;
\r
2886 ib_api_status_t status;
\r
2887 cl_qlist_t mad_wr_list;
\r
2889 AL_ENTER( AL_DBG_SMI );
\r
2891 CL_ASSERT( p_item );
\r
2892 p_spl_qp_svc = PARENT_STRUCT( p_item, spl_qp_svc_t, reset_async );
\r
2894 /* Wait here until the special QP service is only in use by this thread. */
\r
2895 while( p_spl_qp_svc->in_use_cnt != 1 )
\r
2897 cl_thread_suspend( 0 );
\r
2900 /* Change the QP to the RESET state. */
\r
2901 cl_memclr( &qp_mod, sizeof( ib_qp_mod_t ) );
\r
2902 qp_mod.req_state = IB_QPS_RESET;
\r
2904 status = ib_modify_qp( p_spl_qp_svc->h_qp, &qp_mod );
\r
2905 CL_ASSERT( status == IB_SUCCESS );
\r
2907 /* Return receive MAD elements to the pool. */
\r
2908 cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );
\r
2909 for( p_list_item = cl_qlist_remove_head( &p_spl_qp_svc->recv_queue );
\r
2910 p_list_item != cl_qlist_end( &p_spl_qp_svc->recv_queue );
\r
2911 p_list_item = cl_qlist_remove_head( &p_spl_qp_svc->recv_queue ) )
\r
2913 p_al_mad = PARENT_STRUCT( p_list_item, al_mad_element_t, list_item );
\r
2915 status = ib_put_mad( &p_al_mad->element );
\r
2916 CL_ASSERT( status == IB_SUCCESS );
\r
2918 cl_spinlock_release( &p_spl_qp_svc->obj.lock );
\r
2920 /* Re-initialize the QP. */
\r
2921 status = ib_init_dgrm_svc( p_spl_qp_svc->h_qp, NULL );
\r
2922 CL_ASSERT( status == IB_SUCCESS );
\r
2924 /* Poll to remove any remaining send completions from the CQ. */
\r
2927 cl_memclr( &wc, sizeof( ib_wc_t ) );
\r
2929 status = ib_poll_cq( p_spl_qp_svc->h_send_cq, &p_free_wc, &p_done_wc );
\r
2931 } while( status == IB_SUCCESS );
\r
2933 /* Post receive buffers. */
\r
2934 cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );
\r
2935 spl_qp_svc_post_recvs( p_spl_qp_svc );
\r
2937 /* Re-queue any outstanding MAD send operations. */
\r
2938 cl_qlist_init( &mad_wr_list );
\r
2939 cl_qlist_insert_list_tail( &mad_wr_list, &p_spl_qp_svc->send_queue );
\r
2940 cl_spinlock_release( &p_spl_qp_svc->obj.lock );
\r
2942 for( p_list_item = cl_qlist_remove_head( &mad_wr_list );
\r
2943 p_list_item != cl_qlist_end( &mad_wr_list );
\r
2944 p_list_item = cl_qlist_remove_head( &mad_wr_list ) )
\r
2946 p_mad_wr = PARENT_STRUCT( p_list_item, al_mad_wr_t, list_item );
\r
2947 special_qp_queue_mad( p_spl_qp_svc->h_qp, p_mad_wr );
\r
2950 cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );
\r
2951 if( p_spl_qp_svc->state == SPL_QP_ERROR )
\r
2953 /* The QP is ready. Change the state. */
\r
2954 p_spl_qp_svc->state = SPL_QP_ACTIVE;
\r
2955 cl_spinlock_release( &p_spl_qp_svc->obj.lock );
\r
2957 /* Re-arm the CQs. */
\r
2958 status = ib_rearm_cq( p_spl_qp_svc->h_recv_cq, FALSE );
\r
2959 CL_ASSERT( status == IB_SUCCESS );
\r
2960 status = ib_rearm_cq( p_spl_qp_svc->h_send_cq, FALSE );
\r
2961 CL_ASSERT( status == IB_SUCCESS );
\r
2963 /* Resume send processing. */
\r
2964 special_qp_resume_sends( p_spl_qp_svc->h_qp );
\r
2968 cl_spinlock_release( &p_spl_qp_svc->obj.lock );
\r
2971 /* No longer in use by the asynchronous processing thread. */
\r
2972 cl_atomic_dec( &p_spl_qp_svc->in_use_cnt );
\r
2974 AL_EXIT( AL_DBG_SMI );
\r
2980 * Special QP alias asynchronous event callback.
\r
2983 spl_qp_alias_event_cb(
\r
2984 IN ib_async_event_rec_t *p_event_rec )
\r
2986 UNUSED_PARAM( p_event_rec );
\r
2992 * Acquire the SMI dispatcher for the given port.
\r
2996 IN const ib_net64_t port_guid,
\r
2997 OUT al_mad_disp_handle_t* const ph_mad_disp )
\r
2999 CL_ASSERT( gp_spl_qp_mgr );
\r
3000 return acquire_svc_disp( &gp_spl_qp_mgr->smi_map, port_guid, ph_mad_disp );
\r
3006 * Acquire the GSI dispatcher for the given port.
\r
3010 IN const ib_net64_t port_guid,
\r
3011 OUT al_mad_disp_handle_t* const ph_mad_disp )
\r
3013 CL_ASSERT( gp_spl_qp_mgr );
\r
3014 return acquire_svc_disp( &gp_spl_qp_mgr->gsi_map, port_guid, ph_mad_disp );
\r
3020 * Acquire the service dispatcher for the given port.
\r
3024 IN const cl_qmap_t* const p_svc_map,
\r
3025 IN const ib_net64_t port_guid,
\r
3026 OUT al_mad_disp_handle_t *ph_mad_disp )
\r
3028 cl_map_item_t* p_svc_item;
\r
3029 spl_qp_svc_t* p_spl_qp_svc;
\r
3031 AL_ENTER( AL_DBG_SMI );
\r
3033 CL_ASSERT( p_svc_map );
\r
3034 CL_ASSERT( gp_spl_qp_mgr );
\r
3036 /* Search for the SMI or GSI service for the given port. */
\r
3037 cl_spinlock_acquire( &gp_spl_qp_mgr->obj.lock );
\r
3038 p_svc_item = cl_qmap_get( p_svc_map, port_guid );
\r
3039 cl_spinlock_release( &gp_spl_qp_mgr->obj.lock );
\r
3040 if( p_svc_item == cl_qmap_end( p_svc_map ) )
\r
3042 /* The port does not have an active agent. */
\r
3043 AL_EXIT( AL_DBG_SMI );
\r
3044 return IB_INVALID_GUID;
\r
3047 p_spl_qp_svc = PARENT_STRUCT( p_svc_item, spl_qp_svc_t, map_item );
\r
3049 /* Found a match. Get MAD dispatcher handle. */
\r
3050 *ph_mad_disp = p_spl_qp_svc->h_mad_disp;
\r
3052 /* Reference the MAD dispatcher on behalf of the client. */
\r
3053 ref_al_obj( &p_spl_qp_svc->h_mad_disp->obj );
\r
3055 AL_EXIT( AL_DBG_SMI );
\r
3056 return IB_SUCCESS;
\r
3062 * Force a poll for CA attribute changes.
\r
3068 AL_ENTER( AL_DBG_SMI );
\r
3071 * Stop the poll timer. Just invoke the timer callback directly to
\r
3072 * save the thread context switching.
\r
3074 smi_poll_timer_cb( gp_spl_qp_mgr );
\r
3076 AL_EXIT( AL_DBG_SMI );
\r
3082 * Poll for CA port attribute changes.
\r
3085 smi_poll_timer_cb(
\r
3086 IN void* context )
\r
3088 cl_status_t cl_status;
\r
3090 AL_ENTER( AL_DBG_SMI );
\r
3092 CL_ASSERT( context );
\r
3093 CL_ASSERT( gp_spl_qp_mgr == context );
\r
3094 UNUSED_PARAM( context );
\r
3097 * Scan for changes on the local HCAs. Since the PnP manager has its
\r
3098 * own thread for processing changes, we kick off that thread in parallel
\r
3099 * reposting receive buffers to the SQP agents.
\r
3104 * To handle the case where force_smi_poll is called at the same time
\r
3105 * the timer expires, check if the asynchronous processing item is in
\r
3106 * use. If it is already in use, it means that we're about to poll
\r
3107 * anyway, so just ignore this call.
\r
3109 cl_spinlock_acquire( &gp_spl_qp_mgr->obj.lock );
\r
3111 /* Perform port processing on the special QP agents. */
\r
3112 cl_qlist_apply_func( &gp_spl_qp_mgr->obj.obj_list, smi_post_recvs,
\r
3115 /* Determine if there are any special QP agents to poll. */
\r
3116 if( !cl_is_qlist_empty( &gp_spl_qp_mgr->obj.obj_list ) && g_smi_poll_interval )
\r
3118 /* Restart the polling timer. */
\r
3120 cl_timer_start( &gp_spl_qp_mgr->poll_timer, g_smi_poll_interval );
\r
3121 CL_ASSERT( cl_status == CL_SUCCESS );
\r
3123 cl_spinlock_release( &gp_spl_qp_mgr->obj.lock );
\r
3125 AL_EXIT( AL_DBG_SMI );
\r
3131 * Post receive buffers to a special QP.
\r
3135 IN cl_list_item_t* const p_list_item,
\r
3136 IN void* context )
\r
3139 spl_qp_svc_t* p_spl_qp_svc;
\r
3141 AL_ENTER( AL_DBG_SMI );
\r
3143 CL_ASSERT( p_list_item );
\r
3144 UNUSED_PARAM( context );
\r
3146 p_obj = PARENT_STRUCT( p_list_item, al_obj_t, pool_item );
\r
3147 p_spl_qp_svc = PARENT_STRUCT( p_obj, spl_qp_svc_t, obj );
\r
3149 cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );
\r
3150 if( p_spl_qp_svc->state != SPL_QP_ACTIVE )
\r
3152 cl_spinlock_release( &p_spl_qp_svc->obj.lock );
\r
3156 spl_qp_svc_post_recvs( p_spl_qp_svc );
\r
3157 cl_spinlock_release( &p_spl_qp_svc->obj.lock );
\r
3159 AL_EXIT( AL_DBG_SMI );
\r