[MTHCA\MT23108\IBAL] change to support TRAP and TRAP_REPRESS
[mirror/winof/.git] / core / al / kernel / al_smi.c
1 /*\r
2  * Copyright (c) 2005 SilverStorm Technologies.  All rights reserved.\r
3  * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. \r
4  * Copyright (c) 2006 Voltaire Corporation.  All rights reserved.\r
5  *\r
6  * This software is available to you under the OpenIB.org BSD license\r
7  * below:\r
8  *\r
9  *     Redistribution and use in source and binary forms, with or\r
10  *     without modification, are permitted provided that the following\r
11  *     conditions are met:\r
12  *\r
13  *      - Redistributions of source code must retain the above\r
14  *        copyright notice, this list of conditions and the following\r
15  *        disclaimer.\r
16  *\r
17  *      - Redistributions in binary form must reproduce the above\r
18  *        copyright notice, this list of conditions and the following\r
19  *        disclaimer in the documentation and/or other materials\r
20  *        provided with the distribution.\r
21  *\r
22  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
23  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
24  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
25  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
26  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
27  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
28  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
29  * SOFTWARE.\r
30  *\r
31  * $Id$\r
32  */\r
33 \r
34 \r
35 #include <iba/ib_al.h>\r
36 #include <complib/cl_timer.h>\r
37 \r
38 #include "ib_common.h"\r
39 #include "al_common.h"\r
40 #include "al_debug.h"\r
41 #if defined(EVENT_TRACING)\r
42 #ifdef offsetof\r
43 #undef offsetof\r
44 #endif\r
45 #include "al_smi.tmh"\r
46 #endif\r
47 #include "al_verbs.h"\r
48 #include "al_mgr.h"\r
49 #include "al_pnp.h"\r
50 #include "al_qp.h"\r
51 #include "al_smi.h"\r
52 #include "al_av.h"\r
53 \r
54 \r
55 extern char                                             node_desc[IB_NODE_DESCRIPTION_SIZE];\r
56 \r
57 #define SMI_POLL_INTERVAL                       20000           /* Milliseconds */\r
58 #define LOCAL_MAD_TIMEOUT                       50                      /* Milliseconds */\r
59 #define DEFAULT_QP0_DEPTH                       256\r
60 #define DEFAULT_QP1_DEPTH                       1024\r
61 \r
62 uint32_t                                g_smi_poll_interval =   SMI_POLL_INTERVAL;\r
63 spl_qp_mgr_t*                   gp_spl_qp_mgr = NULL;\r
64 \r
65 \r
66 /*\r
67  * Function prototypes.\r
68  */\r
69 void\r
70 destroying_spl_qp_mgr(\r
71         IN                              al_obj_t*                                       p_obj );\r
72 \r
73 void\r
74 free_spl_qp_mgr(\r
75         IN                              al_obj_t*                                       p_obj );\r
76 \r
77 ib_api_status_t\r
78 spl_qp0_agent_pnp_cb(\r
79         IN                              ib_pnp_rec_t*                           p_pnp_rec );\r
80 \r
81 ib_api_status_t\r
82 spl_qp1_agent_pnp_cb(\r
83         IN                              ib_pnp_rec_t*                           p_pnp_rec );\r
84 \r
85 ib_api_status_t\r
86 spl_qp_agent_pnp(\r
87         IN                              ib_pnp_rec_t*                           p_pnp_rec,\r
88         IN                              ib_qp_type_t                            qp_type );\r
89 \r
90 ib_api_status_t\r
91 create_spl_qp_svc(\r
92         IN                              ib_pnp_port_rec_t*                      p_pnp_rec,\r
93         IN              const   ib_qp_type_t                            qp_type );\r
94 \r
95 void\r
96 destroying_spl_qp_svc(\r
97         IN                              al_obj_t*                                       p_obj );\r
98 \r
99 void\r
100 free_spl_qp_svc(\r
101         IN                              al_obj_t*                                       p_obj );\r
102 \r
103 void\r
104 spl_qp_svc_lid_change(\r
105         IN                              al_obj_t*                                       p_obj,\r
106         IN                              ib_pnp_port_rec_t*                      p_pnp_rec );\r
107 \r
108 ib_api_status_t\r
109 remote_mad_send(\r
110         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
111         IN                              al_mad_wr_t* const                      p_mad_wr );\r
112 \r
113 static ib_api_status_t\r
114 local_mad_send(\r
115         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
116         IN                              al_mad_wr_t* const                      p_mad_wr );\r
117 \r
118 static ib_api_status_t\r
119 loopback_mad(\r
120         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
121         IN                              al_mad_wr_t* const                      p_mad_wr );\r
122 \r
123 static ib_api_status_t\r
124 process_subn_mad(\r
125         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
126         IN                              al_mad_wr_t* const                      p_mad_wr );\r
127 \r
128 static ib_api_status_t\r
129 fwd_local_mad(\r
130         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
131         IN                              al_mad_wr_t* const                      p_mad_wr );\r
132 \r
133 void\r
134 send_local_mad_cb(\r
135         IN                              cl_async_proc_item_t*           p_item );\r
136 \r
137 void\r
138 spl_qp_send_comp_cb(\r
139         IN              const   ib_cq_handle_t                          h_cq,\r
140         IN                              void                                            *cq_context );\r
141 \r
142 void\r
143 spl_qp_recv_comp_cb(\r
144         IN              const   ib_cq_handle_t                          h_cq,\r
145         IN                              void                                            *cq_context );\r
146 \r
147 void\r
148 spl_qp_comp(\r
149         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
150         IN              const   ib_cq_handle_t                          h_cq,\r
151         IN                              ib_wc_type_t                            wc_type );\r
152 \r
153 ib_api_status_t\r
154 process_mad_recv(\r
155         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
156         IN                              ib_mad_element_t*                       p_mad_element );\r
157 \r
158 mad_route_t\r
159 route_recv_smp(\r
160         IN                              ib_mad_element_t*                       p_mad_element );\r
161 \r
162 mad_route_t\r
163 route_recv_smp_attr(\r
164         IN                              ib_mad_element_t*                       p_mad_element );\r
165 \r
166 mad_route_t\r
167 route_recv_dm_mad(\r
168         IN                              ib_mad_element_t*                       p_mad_element );\r
169 \r
170 mad_route_t\r
171 route_recv_gmp(\r
172         IN                              ib_mad_element_t*                       p_mad_element );\r
173 \r
174 mad_route_t\r
175 route_recv_gmp_attr(\r
176         IN                              ib_mad_element_t*                       p_mad_element );\r
177 \r
178 ib_api_status_t\r
179 forward_sm_trap(\r
180         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
181         IN                              ib_mad_element_t*                       p_mad_element );\r
182 \r
183 ib_api_status_t\r
184 recv_local_mad(\r
185         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
186         IN                              ib_mad_element_t*                       p_mad_request );\r
187 \r
188 void\r
189 spl_qp_alias_send_cb(\r
190         IN                              ib_mad_svc_handle_t                     h_mad_svc,\r
191         IN                              void                                            *mad_svc_context,\r
192         IN                              ib_mad_element_t                        *p_mad_element );\r
193 \r
194 void\r
195 spl_qp_alias_recv_cb(\r
196         IN                              ib_mad_svc_handle_t                     h_mad_svc,\r
197         IN                              void                                            *mad_svc_context,\r
198         IN                              ib_mad_element_t                        *p_mad_response );\r
199 \r
200 static ib_api_status_t\r
201 spl_qp_svc_post_recvs(\r
202         IN                              spl_qp_svc_t*   const           p_spl_qp_svc );\r
203 \r
204 void\r
205 spl_qp_svc_event_cb(\r
206         IN                              ib_async_event_rec_t            *p_event_rec );\r
207 \r
208 void\r
209 spl_qp_alias_event_cb(\r
210         IN                              ib_async_event_rec_t            *p_event_rec );\r
211 \r
212 void\r
213 spl_qp_svc_reset(\r
214         IN                              spl_qp_svc_t*                           p_spl_qp_svc );\r
215 \r
216 void\r
217 spl_qp_svc_reset_cb(\r
218         IN                              cl_async_proc_item_t*           p_item );\r
219 \r
220 ib_api_status_t\r
221 acquire_svc_disp(\r
222         IN              const   cl_qmap_t* const                        p_svc_map,\r
223         IN              const   ib_net64_t                                      port_guid,\r
224                 OUT                     al_mad_disp_handle_t            *ph_mad_disp );\r
225 \r
226 void\r
227 smi_poll_timer_cb(\r
228         IN                              void*                                           context );\r
229 \r
230 void\r
231 smi_post_recvs(\r
232         IN                              cl_list_item_t* const           p_list_item,\r
233         IN                              void*                                           context );\r
234 \r
235 #if defined( CL_USE_MUTEX )\r
236 void\r
237 spl_qp_send_async_cb(\r
238         IN                              cl_async_proc_item_t*           p_item );\r
239 \r
240 void\r
241 spl_qp_recv_async_cb(\r
242         IN                              cl_async_proc_item_t*           p_item );\r
243 #endif\r
244 \r
245 /*\r
246  * Create the special QP manager.\r
247  */\r
248 ib_api_status_t\r
249 create_spl_qp_mgr(\r
250         IN                              al_obj_t*       const                   p_parent_obj )\r
251 {\r
252         ib_pnp_req_t                    pnp_req;\r
253         ib_api_status_t                 status;\r
254         cl_status_t                             cl_status;\r
255 \r
256         AL_ENTER( AL_DBG_SMI );\r
257 \r
258         CL_ASSERT( p_parent_obj );\r
259         CL_ASSERT( !gp_spl_qp_mgr );\r
260 \r
261         gp_spl_qp_mgr = cl_zalloc( sizeof( spl_qp_mgr_t ) );\r
262         if( !gp_spl_qp_mgr )\r
263         {\r
264                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
265                         ("IB_INSUFFICIENT_MEMORY\n") );\r
266                 return IB_INSUFFICIENT_MEMORY;\r
267         }\r
268 \r
269         /* Construct the special QP manager. */\r
270         construct_al_obj( &gp_spl_qp_mgr->obj, AL_OBJ_TYPE_SMI );\r
271         cl_timer_construct( &gp_spl_qp_mgr->poll_timer );\r
272 \r
273         /* Initialize the lists. */\r
274         cl_qmap_init( &gp_spl_qp_mgr->smi_map );\r
275         cl_qmap_init( &gp_spl_qp_mgr->gsi_map );\r
276 \r
277         /* Initialize the global SMI/GSI manager object. */\r
278         status = init_al_obj( &gp_spl_qp_mgr->obj, gp_spl_qp_mgr, TRUE,\r
279                 destroying_spl_qp_mgr, NULL, free_spl_qp_mgr );\r
280         if( status != IB_SUCCESS )\r
281         {\r
282                 free_spl_qp_mgr( &gp_spl_qp_mgr->obj );\r
283                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
284                         ("init_al_obj failed, %s\n", ib_get_err_str( status ) ) );\r
285                 return status;\r
286         }\r
287 \r
288         /* Attach the special QP manager to the parent object. */\r
289         status = attach_al_obj( p_parent_obj, &gp_spl_qp_mgr->obj );\r
290         if( status != IB_SUCCESS )\r
291         {\r
292                 gp_spl_qp_mgr->obj.pfn_destroy( &gp_spl_qp_mgr->obj, NULL );\r
293                 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
294                         ("attach_al_obj returned %s.\n", ib_get_err_str(status)) );\r
295                 return status;\r
296         }\r
297 \r
298         /* Initialize the SMI polling timer. */\r
299         cl_status = cl_timer_init( &gp_spl_qp_mgr->poll_timer, smi_poll_timer_cb,\r
300                 gp_spl_qp_mgr );\r
301         if( cl_status != CL_SUCCESS )\r
302         {\r
303                 gp_spl_qp_mgr->obj.pfn_destroy( &gp_spl_qp_mgr->obj, NULL );\r
304                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
305                         ("cl_timer_init failed, status 0x%x\n", cl_status ) );\r
306                 return ib_convert_cl_status( cl_status );\r
307         }\r
308 \r
309         /*\r
310          * Note: PnP registrations for port events must be done\r
311          * when the special QP manager is created.  This ensures that\r
312          * the registrations are listed sequentially and the reporting\r
313          * of PnP events occurs in the proper order.\r
314          */\r
315 \r
316         /*\r
317          * Separate context is needed for each special QP.  Therefore, a\r
318          * separate PnP event registration is performed for QP0 and QP1.\r
319          */\r
320 \r
321         /* Register for port PnP events for QP0. */\r
322         cl_memclr( &pnp_req, sizeof( ib_pnp_req_t ) );\r
323         pnp_req.pnp_class       = IB_PNP_PORT;\r
324         pnp_req.pnp_context = &gp_spl_qp_mgr->obj;\r
325         pnp_req.pfn_pnp_cb      = spl_qp0_agent_pnp_cb;\r
326 \r
327         status = ib_reg_pnp( gh_al, &pnp_req, &gp_spl_qp_mgr->h_qp0_pnp );\r
328 \r
329         if( status != IB_SUCCESS )\r
330         {\r
331                 gp_spl_qp_mgr->obj.pfn_destroy( &gp_spl_qp_mgr->obj, NULL );\r
332                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
333                         ("ib_reg_pnp QP0 failed, %s\n", ib_get_err_str( status ) ) );\r
334                 return status;\r
335         }\r
336 \r
337         /* Reference the special QP manager on behalf of the ib_reg_pnp call. */\r
338         ref_al_obj( &gp_spl_qp_mgr->obj );\r
339 \r
340         /* Register for port PnP events for QP1. */\r
341         cl_memclr( &pnp_req, sizeof( ib_pnp_req_t ) );\r
342         pnp_req.pnp_class       = IB_PNP_PORT;\r
343         pnp_req.pnp_context = &gp_spl_qp_mgr->obj;\r
344         pnp_req.pfn_pnp_cb      = spl_qp1_agent_pnp_cb;\r
345 \r
346         status = ib_reg_pnp( gh_al, &pnp_req, &gp_spl_qp_mgr->h_qp1_pnp );\r
347 \r
348         if( status != IB_SUCCESS )\r
349         {\r
350                 gp_spl_qp_mgr->obj.pfn_destroy( &gp_spl_qp_mgr->obj, NULL );\r
351                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
352                         ("ib_reg_pnp QP1 failed, %s\n", ib_get_err_str( status ) ) );\r
353                 return status;\r
354         }\r
355 \r
356         /*\r
357          * Note that we don't release the referende taken in init_al_obj\r
358          * because we need one on behalf of the ib_reg_pnp call.\r
359          */\r
360 \r
361         AL_EXIT( AL_DBG_SMI );\r
362         return IB_SUCCESS;\r
363 }\r
364 \r
365 \r
366 \r
367 /*\r
368  * Pre-destroy the special QP manager.\r
369  */\r
370 void\r
371 destroying_spl_qp_mgr(\r
372         IN                              al_obj_t*                                       p_obj )\r
373 {\r
374         ib_api_status_t                 status;\r
375 \r
376         CL_ASSERT( p_obj );\r
377         CL_ASSERT( gp_spl_qp_mgr == PARENT_STRUCT( p_obj, spl_qp_mgr_t, obj ) );\r
378         UNUSED_PARAM( p_obj );\r
379 \r
380         /* Deregister for port PnP events for QP0. */\r
381         if( gp_spl_qp_mgr->h_qp0_pnp )\r
382         {\r
383                 status = ib_dereg_pnp( gp_spl_qp_mgr->h_qp0_pnp,\r
384                         (ib_pfn_destroy_cb_t)deref_al_obj );\r
385                 CL_ASSERT( status == IB_SUCCESS );\r
386         }\r
387 \r
388         /* Deregister for port PnP events for QP1. */\r
389         if( gp_spl_qp_mgr->h_qp1_pnp )\r
390         {\r
391                 status = ib_dereg_pnp( gp_spl_qp_mgr->h_qp1_pnp,\r
392                         (ib_pfn_destroy_cb_t)deref_al_obj );\r
393                 CL_ASSERT( status == IB_SUCCESS );\r
394         }\r
395 \r
396         /* Destroy the SMI polling timer. */\r
397         cl_timer_destroy( &gp_spl_qp_mgr->poll_timer );\r
398 }\r
399 \r
400 \r
401 \r
402 /*\r
403  * Free the special QP manager.\r
404  */\r
405 void\r
406 free_spl_qp_mgr(\r
407         IN                              al_obj_t*                                       p_obj )\r
408 {\r
409         CL_ASSERT( p_obj );\r
410         CL_ASSERT( gp_spl_qp_mgr == PARENT_STRUCT( p_obj, spl_qp_mgr_t, obj ) );\r
411         UNUSED_PARAM( p_obj );\r
412 \r
413         destroy_al_obj( &gp_spl_qp_mgr->obj );\r
414         cl_free( gp_spl_qp_mgr );\r
415         gp_spl_qp_mgr = NULL;\r
416 }\r
417 \r
418 \r
419 \r
420 /*\r
421  * Special QP0 agent PnP event callback.\r
422  */\r
423 ib_api_status_t\r
424 spl_qp0_agent_pnp_cb(\r
425         IN                              ib_pnp_rec_t*                           p_pnp_rec )\r
426 {\r
427         ib_api_status_t status;\r
428         AL_ENTER( AL_DBG_SMI );\r
429 \r
430         status = spl_qp_agent_pnp( p_pnp_rec, IB_QPT_QP0 );\r
431 \r
432         AL_EXIT( AL_DBG_SMI );\r
433         return status;\r
434 }\r
435 \r
436 \r
437 \r
438 /*\r
439  * Special QP1 agent PnP event callback.\r
440  */\r
441 ib_api_status_t\r
442 spl_qp1_agent_pnp_cb(\r
443         IN                              ib_pnp_rec_t*                           p_pnp_rec )\r
444 {\r
445         ib_api_status_t status;\r
446         AL_ENTER( AL_DBG_SMI );\r
447 \r
448         status = spl_qp_agent_pnp( p_pnp_rec, IB_QPT_QP1 );\r
449 \r
450         AL_EXIT( AL_DBG_SMI );\r
451         return status;\r
452 }\r
453 \r
454 \r
455 \r
456 /*\r
457  * Special QP agent PnP event callback.\r
458  */\r
459 ib_api_status_t\r
460 spl_qp_agent_pnp(\r
461         IN                              ib_pnp_rec_t*                           p_pnp_rec,\r
462         IN                              ib_qp_type_t                            qp_type )\r
463 {\r
464         ib_api_status_t                 status;\r
465         al_obj_t*                               p_obj;\r
466 \r
467         AL_ENTER( AL_DBG_SMI );\r
468 \r
469         CL_ASSERT( p_pnp_rec );\r
470         p_obj = p_pnp_rec->context;\r
471 \r
472         AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_SMI,\r
473                 ("p_pnp_rec->pnp_event = 0x%x (%s)\n",\r
474                 p_pnp_rec->pnp_event, ib_get_pnp_event_str( p_pnp_rec->pnp_event )) );\r
475         /* Dispatch based on the PnP event type. */\r
476         switch( p_pnp_rec->pnp_event )\r
477         {\r
478         case IB_PNP_PORT_ADD:\r
479                 CL_ASSERT( !p_obj );\r
480                 status = create_spl_qp_svc( (ib_pnp_port_rec_t*)p_pnp_rec, qp_type );\r
481                 break;\r
482 \r
483         case IB_PNP_PORT_REMOVE:\r
484                 CL_ASSERT( p_obj );\r
485                 ref_al_obj( p_obj );\r
486                 p_obj->pfn_destroy( p_obj, NULL );\r
487                 status = IB_SUCCESS;\r
488                 break;\r
489 \r
490         case IB_PNP_LID_CHANGE:\r
491                 CL_ASSERT( p_obj );\r
492                 spl_qp_svc_lid_change( p_obj, (ib_pnp_port_rec_t*)p_pnp_rec );\r
493                 status = IB_SUCCESS;\r
494                 break;\r
495 \r
496         default:\r
497                 /* All other events are ignored. */\r
498                 status = IB_SUCCESS;\r
499                 break;\r
500         }\r
501 \r
502         AL_EXIT( AL_DBG_SMI );\r
503         return status;\r
504 }\r
505 \r
506 \r
507 \r
508 /*\r
509  * Create a special QP service.\r
510  */\r
511 ib_api_status_t\r
512 create_spl_qp_svc(\r
513         IN                              ib_pnp_port_rec_t*                      p_pnp_rec,\r
514         IN              const   ib_qp_type_t                            qp_type )\r
515 {\r
516         cl_status_t                             cl_status;\r
517         spl_qp_svc_t*                   p_spl_qp_svc;\r
518         ib_ca_handle_t                  h_ca;\r
519         ib_cq_create_t                  cq_create;\r
520         ib_qp_create_t                  qp_create;\r
521         ib_qp_attr_t                    qp_attr;\r
522         ib_mad_svc_t                    mad_svc;\r
523         ib_api_status_t                 status;\r
524 \r
525         AL_ENTER( AL_DBG_SMI );\r
526 \r
527         CL_ASSERT( p_pnp_rec );\r
528 \r
529         if( ( qp_type != IB_QPT_QP0 ) && ( qp_type != IB_QPT_QP1 ) )\r
530         {\r
531                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") );\r
532                 return IB_INVALID_PARAMETER;\r
533         }\r
534 \r
535         CL_ASSERT( p_pnp_rec->pnp_rec.pnp_context );\r
536         CL_ASSERT( p_pnp_rec->p_ca_attr );\r
537         CL_ASSERT( p_pnp_rec->p_port_attr );\r
538 \r
539         p_spl_qp_svc = cl_zalloc( sizeof( spl_qp_svc_t ) );\r
540         if( !p_spl_qp_svc )\r
541         {\r
542                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
543                         ("IB_INSUFFICIENT_MEMORY\n") );\r
544                 return IB_INSUFFICIENT_MEMORY;\r
545         }\r
546 \r
547         /* Tie the special QP service to the port by setting the port number. */\r
548         p_spl_qp_svc->port_num = p_pnp_rec->p_port_attr->port_num;\r
549         /* Store the port GUID to allow faster lookups of the dispatchers. */\r
550         p_spl_qp_svc->port_guid = p_pnp_rec->p_port_attr->port_guid;\r
551 \r
552         /* Initialize the send and receive queues. */\r
553         cl_qlist_init( &p_spl_qp_svc->send_queue );\r
554         cl_qlist_init( &p_spl_qp_svc->recv_queue );\r
555 \r
556 #if defined( CL_USE_MUTEX )\r
557         /* Initialize async callbacks and flags for send/receive processing. */\r
558         p_spl_qp_svc->send_async_queued = FALSE;\r
559         p_spl_qp_svc->send_async_cb.pfn_callback = spl_qp_send_async_cb;\r
560         p_spl_qp_svc->recv_async_queued = FALSE;\r
561         p_spl_qp_svc->recv_async_cb.pfn_callback = spl_qp_recv_async_cb;\r
562 #endif\r
563 \r
564         /* Initialize the async callback function to process local sends. */\r
565         p_spl_qp_svc->send_async.pfn_callback = send_local_mad_cb;\r
566 \r
567         /* Initialize the async callback function to reset the QP on error. */\r
568         p_spl_qp_svc->reset_async.pfn_callback = spl_qp_svc_reset_cb;\r
569 \r
570         /* Construct the special QP service object. */\r
571         construct_al_obj( &p_spl_qp_svc->obj, AL_OBJ_TYPE_SMI );\r
572 \r
573         /* Initialize the special QP service object. */\r
574         status = init_al_obj( &p_spl_qp_svc->obj, p_spl_qp_svc, TRUE,\r
575                 destroying_spl_qp_svc, NULL, free_spl_qp_svc );\r
576         if( status != IB_SUCCESS )\r
577         {\r
578                 free_spl_qp_svc( &p_spl_qp_svc->obj );\r
579                 return status;\r
580         }\r
581 \r
582         /* Attach the special QP service to the parent object. */\r
583         status = attach_al_obj(\r
584                 (al_obj_t* __ptr64)p_pnp_rec->pnp_rec.pnp_context, &p_spl_qp_svc->obj );\r
585         if( status != IB_SUCCESS )\r
586         {\r
587                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
588                 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
589                         ("attach_al_obj returned %s.\n", ib_get_err_str(status)) );\r
590                 return status;\r
591         }\r
592 \r
593         h_ca = acquire_ca( p_pnp_rec->p_ca_attr->ca_guid );\r
594         CL_ASSERT( h_ca );\r
595         if( !h_ca )\r
596         {\r
597                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
598                 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("acquire_ca failed.\n") );\r
599                 return IB_INVALID_GUID;\r
600         }\r
601 \r
602         p_spl_qp_svc->obj.p_ci_ca = h_ca->obj.p_ci_ca;\r
603 \r
604         /* Determine the maximum queue depth of the QP and CQs. */\r
605         p_spl_qp_svc->max_qp_depth =\r
606                 ( p_pnp_rec->p_ca_attr->max_wrs <\r
607                 p_pnp_rec->p_ca_attr->max_cqes ) ?\r
608                 p_pnp_rec->p_ca_attr->max_wrs :\r
609                 p_pnp_rec->p_ca_attr->max_cqes;\r
610 \r
611         /* Compare this maximum to the default special queue depth. */\r
612         if( ( qp_type == IB_QPT_QP0 ) &&\r
613                 ( p_spl_qp_svc->max_qp_depth > DEFAULT_QP0_DEPTH ) )\r
614                   p_spl_qp_svc->max_qp_depth = DEFAULT_QP0_DEPTH;\r
615         if( ( qp_type == IB_QPT_QP1 ) &&\r
616                 ( p_spl_qp_svc->max_qp_depth > DEFAULT_QP1_DEPTH ) )\r
617                   p_spl_qp_svc->max_qp_depth = DEFAULT_QP1_DEPTH;\r
618 \r
619         /* Create the send CQ. */\r
620         cl_memclr( &cq_create, sizeof( ib_cq_create_t ) );\r
621         cq_create.size = p_spl_qp_svc->max_qp_depth;\r
622         cq_create.pfn_comp_cb = spl_qp_send_comp_cb;\r
623 \r
624         status = ib_create_cq( p_spl_qp_svc->obj.p_ci_ca->h_ca, &cq_create,\r
625                 p_spl_qp_svc, spl_qp_svc_event_cb, &p_spl_qp_svc->h_send_cq );\r
626 \r
627         if( status != IB_SUCCESS )\r
628         {\r
629                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
630                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
631                         ("ib_create_cq send CQ failed, %s\n", ib_get_err_str( status ) ) );\r
632                 return status;\r
633         }\r
634 \r
635         /* Reference the special QP service on behalf of ib_create_cq. */\r
636         ref_al_obj( &p_spl_qp_svc->obj );\r
637 \r
638         /* Check the result of the creation request. */\r
639         if( cq_create.size < p_spl_qp_svc->max_qp_depth )\r
640         {\r
641                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
642                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
643                         ("ib_create_cq allocated insufficient send CQ size\n") );\r
644                 return IB_INSUFFICIENT_RESOURCES;\r
645         }\r
646 \r
647         /* Create the receive CQ. */\r
648         cl_memclr( &cq_create, sizeof( ib_cq_create_t ) );\r
649         cq_create.size = p_spl_qp_svc->max_qp_depth;\r
650         cq_create.pfn_comp_cb = spl_qp_recv_comp_cb;\r
651 \r
652         status = ib_create_cq( p_spl_qp_svc->obj.p_ci_ca->h_ca, &cq_create,\r
653                 p_spl_qp_svc, spl_qp_svc_event_cb, &p_spl_qp_svc->h_recv_cq );\r
654 \r
655         if( status != IB_SUCCESS )\r
656         {\r
657                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
658                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
659                         ("ib_create_cq recv CQ failed, %s\n", ib_get_err_str( status ) ) );\r
660                 return status;\r
661         }\r
662 \r
663         /* Reference the special QP service on behalf of ib_create_cq. */\r
664         ref_al_obj( &p_spl_qp_svc->obj );\r
665 \r
666         /* Check the result of the creation request. */\r
667         if( cq_create.size < p_spl_qp_svc->max_qp_depth )\r
668         {\r
669                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
670                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
671                         ("ib_create_cq allocated insufficient recv CQ size\n") );\r
672                 return IB_INSUFFICIENT_RESOURCES;\r
673         }\r
674 \r
675         /* Create the special QP. */\r
676         cl_memclr( &qp_create, sizeof( ib_qp_create_t ) );\r
677         qp_create.qp_type = qp_type;\r
678         qp_create.sq_depth = p_spl_qp_svc->max_qp_depth;\r
679         qp_create.rq_depth = p_spl_qp_svc->max_qp_depth;\r
680         qp_create.sq_sge = 3;   /* Three entries are required for segmentation. */\r
681         qp_create.rq_sge = 1;\r
682         qp_create.h_sq_cq = p_spl_qp_svc->h_send_cq;\r
683         qp_create.h_rq_cq = p_spl_qp_svc->h_recv_cq;\r
684         qp_create.sq_signaled = TRUE;\r
685 \r
686         status = ib_get_spl_qp( p_spl_qp_svc->obj.p_ci_ca->h_pd,\r
687                 p_pnp_rec->p_port_attr->port_guid, &qp_create,\r
688                 p_spl_qp_svc, spl_qp_svc_event_cb, NULL, &p_spl_qp_svc->h_qp );\r
689 \r
690         if( status != IB_SUCCESS )\r
691         {\r
692                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
693                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
694                         ("ib_get_spl_qp failed, %s\n", ib_get_err_str( status ) ) );\r
695                 return status;\r
696         }\r
697 \r
698         /* Reference the special QP service on behalf of ib_get_spl_qp. */\r
699         ref_al_obj( &p_spl_qp_svc->obj );\r
700 \r
701         /* Check the result of the creation request. */\r
702         status = ib_query_qp( p_spl_qp_svc->h_qp, &qp_attr );\r
703         if( status != IB_SUCCESS )\r
704         {\r
705                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
706                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
707                         ("ib_query_qp failed, %s\n", ib_get_err_str( status ) ) );\r
708                 return status;\r
709         }\r
710 \r
711         if( ( qp_attr.rq_depth < p_spl_qp_svc->max_qp_depth ) ||\r
712                 ( qp_attr.sq_depth < p_spl_qp_svc->max_qp_depth ) ||\r
713                 ( qp_attr.sq_sge < 3 ) || ( qp_attr.rq_sge < 1 ) )\r
714         {\r
715                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
716                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
717                         ("ib_get_spl_qp allocated attributes are insufficient\n") );\r
718                 return IB_INSUFFICIENT_RESOURCES;\r
719         }\r
720 \r
721         /* Initialize the QP for use. */\r
722         status = ib_init_dgrm_svc( p_spl_qp_svc->h_qp, NULL );\r
723         if( status != IB_SUCCESS )\r
724         {\r
725                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
726                 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
727                         ("ib_init_dgrm_svc failed, %s\n", ib_get_err_str( status ) ) );\r
728                 return status;\r
729         }\r
730 \r
731         /* Post receive buffers. */\r
732         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
733         status = spl_qp_svc_post_recvs( p_spl_qp_svc );\r
734         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
735         if( status != IB_SUCCESS )\r
736         {\r
737                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
738                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
739                         ("spl_qp_svc_post_recvs failed, %s\n",\r
740                         ib_get_err_str( status ) ) );\r
741                 return status;\r
742         }\r
743 \r
744         /* Create the MAD dispatcher. */\r
745         status = create_mad_disp( &p_spl_qp_svc->obj, p_spl_qp_svc->h_qp,\r
746                 &p_spl_qp_svc->h_mad_disp );\r
747         if( status != IB_SUCCESS )\r
748         {\r
749                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
750                 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
751                         ("create_mad_disp failed, %s\n", ib_get_err_str( status ) ) );\r
752                 return status;\r
753         }\r
754 \r
755         /*\r
756          * Add this service to the special QP manager lookup lists.\r
757          * The service must be added to allow the creation of a QP alias.\r
758          */\r
759         cl_spinlock_acquire( &gp_spl_qp_mgr->obj.lock );\r
760         if( qp_type == IB_QPT_QP0 )\r
761         {\r
762                 cl_qmap_insert( &gp_spl_qp_mgr->smi_map, p_spl_qp_svc->port_guid,\r
763                         &p_spl_qp_svc->map_item );\r
764         }\r
765         else\r
766         {\r
767                 cl_qmap_insert( &gp_spl_qp_mgr->gsi_map, p_spl_qp_svc->port_guid,\r
768                         &p_spl_qp_svc->map_item );\r
769         }\r
770         cl_spinlock_release( &gp_spl_qp_mgr->obj.lock );\r
771 \r
772         /*\r
773          * If the CA does not support HW agents, create a QP alias and register\r
774          * a MAD service for sending responses from the local MAD interface.\r
775          */\r
776         if( check_local_mad( p_spl_qp_svc->h_qp ) )\r
777         {\r
778                 /* Create a QP alias. */\r
779                 cl_memclr( &qp_create, sizeof( ib_qp_create_t ) );\r
780                 qp_create.qp_type =\r
781                         ( qp_type == IB_QPT_QP0 ) ? IB_QPT_QP0_ALIAS : IB_QPT_QP1_ALIAS;\r
782                 qp_create.sq_depth              = p_spl_qp_svc->max_qp_depth;\r
783                 qp_create.sq_sge                = 1;\r
784                 qp_create.sq_signaled   = TRUE;\r
785 \r
786                 status = ib_get_spl_qp( p_spl_qp_svc->obj.p_ci_ca->h_pd_alias,\r
787                         p_pnp_rec->p_port_attr->port_guid, &qp_create,\r
788                         p_spl_qp_svc, spl_qp_alias_event_cb, &p_spl_qp_svc->pool_key,\r
789                         &p_spl_qp_svc->h_qp_alias );\r
790 \r
791                 if (status != IB_SUCCESS)\r
792                 {\r
793                         p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
794                         AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
795                                 ("ib_get_spl_qp alias failed, %s\n",\r
796                                 ib_get_err_str( status ) ) );\r
797                         return status;\r
798                 }\r
799 \r
800                 /* Reference the special QP service on behalf of ib_get_spl_qp. */\r
801                 ref_al_obj( &p_spl_qp_svc->obj );\r
802 \r
803                 /* Register a MAD service for sends. */\r
804                 cl_memclr( &mad_svc, sizeof( ib_mad_svc_t ) );\r
805                 mad_svc.mad_svc_context = p_spl_qp_svc;\r
806                 mad_svc.pfn_mad_send_cb = spl_qp_alias_send_cb;\r
807                 mad_svc.pfn_mad_recv_cb = spl_qp_alias_recv_cb;\r
808 \r
809                 status = ib_reg_mad_svc( p_spl_qp_svc->h_qp_alias, &mad_svc,\r
810                         &p_spl_qp_svc->h_mad_svc );\r
811 \r
812                 if( status != IB_SUCCESS )\r
813                 {\r
814                         p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
815                         AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
816                                 ("ib_reg_mad_svc failed, %s\n", ib_get_err_str( status ) ) );\r
817                         return status;\r
818                 }\r
819         }\r
820 \r
821         /* Set the context of the PnP event to this child object. */\r
822         p_pnp_rec->pnp_rec.context = &p_spl_qp_svc->obj;\r
823 \r
824         /* The QP is ready.  Change the state. */\r
825         p_spl_qp_svc->state = SPL_QP_ACTIVE;\r
826 \r
827         /* Force a completion callback to rearm the CQs. */\r
828         spl_qp_recv_comp_cb( p_spl_qp_svc->h_recv_cq, p_spl_qp_svc );\r
829         spl_qp_send_comp_cb( p_spl_qp_svc->h_send_cq, p_spl_qp_svc );\r
830 \r
831         /* Start the polling thread timer. */\r
832         if( g_smi_poll_interval )\r
833         {\r
834                 cl_status =\r
835                         cl_timer_trim( &gp_spl_qp_mgr->poll_timer, g_smi_poll_interval );\r
836 \r
837                 if( cl_status != CL_SUCCESS )\r
838                 {\r
839                         p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
840                         AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
841                                 ("cl_timer_start failed, status 0x%x\n", cl_status ) );\r
842                         return ib_convert_cl_status( cl_status );\r
843                 }\r
844         }\r
845 \r
846         /* Release the reference taken in init_al_obj. */\r
847         deref_al_obj( &p_spl_qp_svc->obj );\r
848 \r
849         AL_EXIT( AL_DBG_SMI );\r
850         return IB_SUCCESS;\r
851 }\r
852 \r
853 \r
854 \r
855 /*\r
856  * Return a work completion to the MAD dispatcher for the specified MAD.\r
857  */\r
858 static void\r
859 __complete_send_mad(\r
860         IN              const   al_mad_disp_handle_t            h_mad_disp,\r
861         IN                              al_mad_wr_t* const                      p_mad_wr,\r
862         IN              const   ib_wc_status_t                          wc_status )\r
863 {\r
864         ib_wc_t                 wc;\r
865 \r
866         /* Construct a send work completion. */\r
867         cl_memclr( &wc, sizeof( ib_wc_t ) );\r
868         wc.wr_id        = p_mad_wr->send_wr.wr_id;\r
869         wc.wc_type      = IB_WC_SEND;\r
870         wc.status       = wc_status;\r
871 \r
872         /* Set the send size if we were successful with the send. */\r
873         if( wc_status == IB_WCS_SUCCESS )\r
874                 wc.length = MAD_BLOCK_SIZE;\r
875 \r
876         mad_disp_send_done( h_mad_disp, p_mad_wr, &wc );\r
877 }\r
878 \r
879 \r
880 \r
881 /*\r
882  * Pre-destroy a special QP service.\r
883  */\r
884 void\r
885 destroying_spl_qp_svc(\r
886         IN                              al_obj_t*                                       p_obj )\r
887 {\r
888         spl_qp_svc_t*                   p_spl_qp_svc;\r
889         cl_list_item_t*                 p_list_item;\r
890         al_mad_wr_t*                    p_mad_wr;\r
891 \r
892         ib_api_status_t                 status;\r
893 \r
894         AL_ENTER( AL_DBG_SMI );\r
895 \r
896         CL_ASSERT( p_obj );\r
897         p_spl_qp_svc = PARENT_STRUCT( p_obj, spl_qp_svc_t, obj );\r
898 \r
899         /* Change the state to prevent processing new send requests. */\r
900         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
901         p_spl_qp_svc->state = SPL_QP_DESTROYING;\r
902         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
903 \r
904         /* Wait here until the special QP service is no longer in use. */\r
905         while( p_spl_qp_svc->in_use_cnt )\r
906         {\r
907                 cl_thread_suspend( 0 );\r
908         }\r
909 \r
910         /* Destroy the special QP. */\r
911         if( p_spl_qp_svc->h_qp )\r
912         {\r
913                 /* If present, remove the special QP service from the tracking map. */\r
914                 cl_spinlock_acquire( &gp_spl_qp_mgr->obj.lock );\r
915                 if( p_spl_qp_svc->h_qp->type == IB_QPT_QP0 )\r
916                 {\r
917                         cl_qmap_remove( &gp_spl_qp_mgr->smi_map, p_spl_qp_svc->port_guid );\r
918                 }\r
919                 else\r
920                 {\r
921                         cl_qmap_remove( &gp_spl_qp_mgr->gsi_map, p_spl_qp_svc->port_guid );\r
922                 }\r
923                 cl_spinlock_release( &gp_spl_qp_mgr->obj.lock );\r
924 \r
925                 status = ib_destroy_qp( p_spl_qp_svc->h_qp,\r
926                         (ib_pfn_destroy_cb_t)deref_al_obj );\r
927                 CL_ASSERT( status == IB_SUCCESS );\r
928 \r
929                 cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
930 \r
931                 /* Complete any outstanding MAD sends operations as "flushed". */\r
932                 for( p_list_item = cl_qlist_remove_head( &p_spl_qp_svc->send_queue );\r
933                          p_list_item != cl_qlist_end( &p_spl_qp_svc->send_queue );\r
934                          p_list_item = cl_qlist_remove_head( &p_spl_qp_svc->send_queue ) )\r
935                 {\r
936                         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
937                         p_mad_wr = PARENT_STRUCT( p_list_item, al_mad_wr_t, list_item );\r
938                         __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr,\r
939                                 IB_WCS_WR_FLUSHED_ERR );\r
940                         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
941                 }\r
942 \r
943                 cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
944                 /* Receive MAD elements are returned to the pool by the free routine. */\r
945         }\r
946 \r
947         /* Destroy the special QP alias and CQs. */\r
948         if( p_spl_qp_svc->h_qp_alias )\r
949         {\r
950                 status = ib_destroy_qp( p_spl_qp_svc->h_qp_alias,\r
951                         (ib_pfn_destroy_cb_t)deref_al_obj );\r
952                 CL_ASSERT( status == IB_SUCCESS );\r
953         }\r
954         if( p_spl_qp_svc->h_send_cq )\r
955         {\r
956                 status = ib_destroy_cq( p_spl_qp_svc->h_send_cq,\r
957                         (ib_pfn_destroy_cb_t)deref_al_obj );\r
958                 CL_ASSERT( status == IB_SUCCESS );\r
959         }\r
960         if( p_spl_qp_svc->h_recv_cq )\r
961         {\r
962                 status = ib_destroy_cq( p_spl_qp_svc->h_recv_cq,\r
963                         (ib_pfn_destroy_cb_t)deref_al_obj );\r
964                 CL_ASSERT( status == IB_SUCCESS );\r
965         }\r
966 \r
967         AL_EXIT( AL_DBG_SMI );\r
968 }\r
969 \r
970 \r
971 \r
972 /*\r
973  * Free a special QP service.\r
974  */\r
975 void\r
976 free_spl_qp_svc(\r
977         IN                              al_obj_t*                                       p_obj )\r
978 {\r
979         spl_qp_svc_t*                   p_spl_qp_svc;\r
980         cl_list_item_t*                 p_list_item;\r
981         al_mad_element_t*               p_al_mad;\r
982         ib_api_status_t                 status;\r
983 \r
984         AL_ENTER( AL_DBG_SMI );\r
985 \r
986         CL_ASSERT( p_obj );\r
987         p_spl_qp_svc = PARENT_STRUCT( p_obj, spl_qp_svc_t, obj );\r
988 \r
989         /* Dereference the CA. */\r
990         if( p_spl_qp_svc->obj.p_ci_ca )\r
991                 deref_al_obj( &p_spl_qp_svc->obj.p_ci_ca->h_ca->obj );\r
992 \r
993         /* Return receive MAD elements to the pool. */\r
994         for( p_list_item = cl_qlist_remove_head( &p_spl_qp_svc->recv_queue );\r
995                  p_list_item != cl_qlist_end( &p_spl_qp_svc->recv_queue );\r
996                  p_list_item = cl_qlist_remove_head( &p_spl_qp_svc->recv_queue ) )\r
997         {\r
998                 p_al_mad = PARENT_STRUCT( p_list_item, al_mad_element_t, list_item );\r
999 \r
1000                 status = ib_put_mad( &p_al_mad->element );\r
1001                 CL_ASSERT( status == IB_SUCCESS );\r
1002         }\r
1003 \r
1004         CL_ASSERT( cl_is_qlist_empty( &p_spl_qp_svc->send_queue ) );\r
1005 \r
1006         destroy_al_obj( &p_spl_qp_svc->obj );\r
1007         cl_free( p_spl_qp_svc );\r
1008 \r
1009         AL_EXIT( AL_DBG_SMI );\r
1010 }\r
1011 \r
1012 \r
1013 \r
1014 /*\r
1015  * Update the base LID of a special QP service.\r
1016  */\r
1017 void\r
1018 spl_qp_svc_lid_change(\r
1019         IN                              al_obj_t*                                       p_obj,\r
1020         IN                              ib_pnp_port_rec_t*                      p_pnp_rec )\r
1021 {\r
1022         spl_qp_svc_t*                   p_spl_qp_svc;\r
1023 \r
1024         AL_ENTER( AL_DBG_SMI );\r
1025 \r
1026         CL_ASSERT( p_obj );\r
1027         CL_ASSERT( p_pnp_rec );\r
1028         CL_ASSERT( p_pnp_rec->p_port_attr );\r
1029 \r
1030         p_spl_qp_svc = PARENT_STRUCT( p_obj, spl_qp_svc_t, obj );\r
1031 \r
1032         p_spl_qp_svc->base_lid = p_pnp_rec->p_port_attr->lid;\r
1033         p_spl_qp_svc->lmc = p_pnp_rec->p_port_attr->lmc;\r
1034 \r
1035         AL_EXIT( AL_DBG_SMI );\r
1036 }\r
1037 \r
1038 \r
1039 \r
1040 /*\r
1041  * Route a send work request.\r
1042  */\r
1043 mad_route_t\r
1044 route_mad_send(\r
1045         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
1046         IN                              ib_send_wr_t* const                     p_send_wr )\r
1047 {\r
1048         al_mad_wr_t*                    p_mad_wr;\r
1049         al_mad_send_t*                  p_mad_send;\r
1050         ib_mad_t*                               p_mad;\r
1051         ib_smp_t*                               p_smp;\r
1052         ib_av_handle_t                  h_av;\r
1053         mad_route_t                             route;\r
1054         boolean_t                               local, loopback, discard;\r
1055 \r
1056         AL_ENTER( AL_DBG_SMI );\r
1057 \r
1058         CL_ASSERT( p_spl_qp_svc );\r
1059         CL_ASSERT( p_send_wr );\r
1060 \r
1061         /* Initialize a pointers to the MAD work request and the MAD. */\r
1062         p_mad_wr = PARENT_STRUCT( p_send_wr, al_mad_wr_t, send_wr );\r
1063         p_mad_send = PARENT_STRUCT( p_mad_wr, al_mad_send_t, mad_wr );\r
1064         p_mad = get_mad_hdr_from_wr( p_mad_wr );\r
1065         p_smp = (ib_smp_t*)p_mad;\r
1066 \r
1067         /* Check if the CA has a local MAD interface. */\r
1068         local = loopback = discard = FALSE;\r
1069         if( check_local_mad( p_spl_qp_svc->h_qp ) )\r
1070         {\r
1071                 /*\r
1072                  * If the MAD is a locally addressed Subnet Management, Performance\r
1073                  * Management, or Connection Management datagram, process the work\r
1074                  * request locally.\r
1075                  */\r
1076                 h_av = p_send_wr->dgrm.ud.h_av;\r
1077                 switch( p_mad->mgmt_class )\r
1078                 {\r
1079                 case IB_MCLASS_SUBN_DIR:\r
1080                         /* Perform special checks on directed route SMPs. */\r
1081                         if( ib_smp_is_response( p_smp ) )\r
1082                         {\r
1083                                 /*\r
1084                                  * This node is the originator of the response.  Discard\r
1085                                  * if the hop count or pointer is zero, an intermediate hop,\r
1086                                  * out of bounds hop, or if the first port of the directed\r
1087                                  * route retrun path is not this port.\r
1088                                  */\r
1089                                 if( ( p_smp->hop_count == 0 ) || ( p_smp->hop_ptr == 0 ) )\r
1090                                 {\r
1091                                         AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
1092                                                 ("hop cnt or hop ptr set to 0...discarding\n") );\r
1093                                         discard = TRUE;\r
1094                                 }\r
1095                                 else if( p_smp->hop_count != ( p_smp->hop_ptr - 1 ) )\r
1096                                 {\r
1097                                         AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
1098                                                 ("hop cnt != (hop ptr - 1)...discarding\n") );\r
1099                                         discard = TRUE;\r
1100                                 }\r
1101                                 else if( p_smp->hop_count >= IB_SUBNET_PATH_HOPS_MAX )\r
1102                                 {\r
1103                                         AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
1104                                                 ("hop cnt > max hops...discarding\n") );\r
1105                                         discard = TRUE;\r
1106                                 }\r
1107                                 else if( ( p_smp->dr_dlid == IB_LID_PERMISSIVE ) &&\r
1108                                                  ( p_smp->return_path[ p_smp->hop_ptr - 1 ] !=\r
1109                                                         p_spl_qp_svc->port_num ) )\r
1110                                 {\r
1111                                         AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
1112                                                 ("return path[hop ptr - 1] != port num...discarding\n") );\r
1113                                         discard = TRUE;\r
1114                                 }\r
1115                         }\r
1116                         else\r
1117                         {\r
1118                                 /* The SMP is a request. */\r
1119                                 if( ( p_smp->hop_count >= IB_SUBNET_PATH_HOPS_MAX ) ||\r
1120                                         ( p_smp->hop_ptr >= IB_SUBNET_PATH_HOPS_MAX ) )\r
1121                                 {\r
1122                                         discard = TRUE;\r
1123                                 }\r
1124                                 else if( ( p_smp->hop_count == 0 ) && ( p_smp->hop_ptr == 0 ) )\r
1125                                 {\r
1126                                         /* Self Addressed: Sent locally, routed locally. */\r
1127                                         local = TRUE;\r
1128                                         discard = ( p_smp->dr_slid != IB_LID_PERMISSIVE ) ||\r
1129                                                           ( p_smp->dr_dlid != IB_LID_PERMISSIVE );\r
1130                                 }\r
1131                                 else if( ( p_smp->hop_count != 0 ) &&\r
1132                                                  ( p_smp->hop_count == ( p_smp->hop_ptr - 1 ) ) )\r
1133                                 {\r
1134                                         /* End of Path: Sent remotely, routed locally. */\r
1135                                         local = TRUE;\r
1136                                 }\r
1137                                 else if( ( p_smp->hop_count != 0 ) &&\r
1138                                                  ( p_smp->hop_ptr       == 0 ) )\r
1139                                 {\r
1140                                         /* Beginning of Path: Sent locally, routed remotely. */\r
1141                                         if( p_smp->dr_slid == IB_LID_PERMISSIVE )\r
1142                                         {\r
1143                                                 discard =\r
1144                                                         ( p_smp->initial_path[ p_smp->hop_ptr + 1 ] !=\r
1145                                                           p_spl_qp_svc->port_num );\r
1146                                         }\r
1147                                 }\r
1148                                 else\r
1149                                 {\r
1150                                         /* Intermediate hop. */\r
1151                                         discard = TRUE;\r
1152                                 }\r
1153                         }\r
1154                         /* Loopback locally addressed SM to SM "heartbeat" messages. */\r
1155                         loopback = (p_mad->attr_id == IB_MAD_ATTR_SM_INFO);\r
1156                         break;\r
1157 \r
1158                 case IB_MCLASS_SUBN_LID:\r
1159                         /* Loopback locally addressed SM to SM "heartbeat" messages. */\r
1160                         loopback = (p_mad->attr_id == IB_MAD_ATTR_SM_INFO);\r
1161 \r
1162                         /* Fall through to check for a local MAD. */\r
1163 \r
1164                 case IB_MCLASS_PERF:\r
1165                 case IB_MCLASS_BM:\r
1166                         local = ( h_av &&\r
1167                                 ( h_av->av_attr.dlid ==\r
1168                                 ( h_av->av_attr.path_bits | p_spl_qp_svc->base_lid ) ) );\r
1169                         break;\r
1170 \r
1171                 default:\r
1172                         /* Route vendor specific MADs to the HCA provider. */\r
1173                         if( ib_class_is_vendor_specific( p_mad->mgmt_class ) )\r
1174                         {\r
1175                                 local = ( h_av &&\r
1176                                         ( h_av->av_attr.dlid ==\r
1177                                         ( h_av->av_attr.path_bits | p_spl_qp_svc->base_lid ) ) );\r
1178                         }\r
1179                         break;\r
1180                 }\r
1181         }\r
1182 \r
1183         route = ( p_mad_send->p_send_mad->send_opt & IB_SEND_OPT_LOCAL ) ?\r
1184                 ROUTE_LOCAL : ROUTE_REMOTE;\r
1185         if( local ) route = ROUTE_LOCAL;\r
1186         if( loopback && local ) route = ROUTE_LOOPBACK;\r
1187         if( discard ) route = ROUTE_DISCARD;\r
1188 \r
1189         AL_EXIT( AL_DBG_SMI );\r
1190         return route;\r
1191 }\r
1192 \r
1193 \r
1194 \r
1195 /*\r
1196  * Send a work request on the special QP.\r
1197  */\r
1198 ib_api_status_t\r
1199 spl_qp_svc_send(\r
1200         IN              const   ib_qp_handle_t                          h_qp,\r
1201         IN                              ib_send_wr_t* const                     p_send_wr )\r
1202 {\r
1203         spl_qp_svc_t*                   p_spl_qp_svc;\r
1204         al_mad_wr_t*                    p_mad_wr;\r
1205         mad_route_t                             route;\r
1206         ib_api_status_t                 status;\r
1207 \r
1208         AL_ENTER( AL_DBG_SMI );\r
1209 \r
1210         CL_ASSERT( h_qp );\r
1211         CL_ASSERT( p_send_wr );\r
1212 \r
1213         /* Get the special QP service. */\r
1214         p_spl_qp_svc = (spl_qp_svc_t*)h_qp->obj.context;\r
1215         CL_ASSERT( p_spl_qp_svc );\r
1216         CL_ASSERT( p_spl_qp_svc->h_qp == h_qp );\r
1217 \r
1218         /* Determine how to route the MAD. */\r
1219         route = route_mad_send( p_spl_qp_svc, p_send_wr );\r
1220 \r
1221         /*\r
1222          * Check the QP state and guard against error handling.  Also,\r
1223          * to maintain proper order of work completions, delay processing\r
1224          * a local MAD until any remote MAD work requests have completed,\r
1225          * and delay processing a remote MAD until local MAD work requests\r
1226          * have completed.\r
1227          */\r
1228         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
1229         if( (p_spl_qp_svc->state != SPL_QP_ACTIVE) || p_spl_qp_svc->local_mad_wr ||\r
1230                 (is_local(route) && !cl_is_qlist_empty( &p_spl_qp_svc->send_queue )) ||\r
1231                 ( cl_qlist_count( &p_spl_qp_svc->send_queue ) >=\r
1232                         p_spl_qp_svc->max_qp_depth ) )\r
1233         {\r
1234                 /*\r
1235                  * Return busy status.\r
1236                  * The special QP will resume sends at this point.\r
1237                  */\r
1238                 cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
1239 \r
1240                 AL_EXIT( AL_DBG_SMI );\r
1241                 return IB_RESOURCE_BUSY;\r
1242         }\r
1243 \r
1244         p_mad_wr = PARENT_STRUCT( p_send_wr, al_mad_wr_t, send_wr );\r
1245 \r
1246         if( is_local( route ) )\r
1247         {\r
1248                 /* Save the local MAD work request for processing. */\r
1249                 p_spl_qp_svc->local_mad_wr = p_mad_wr;\r
1250 \r
1251                 /* Flag the service as in use by the asynchronous processing thread. */\r
1252                 cl_atomic_inc( &p_spl_qp_svc->in_use_cnt );\r
1253 \r
1254                 cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
1255 \r
1256                 status = local_mad_send( p_spl_qp_svc, p_mad_wr );\r
1257         }\r
1258         else\r
1259         {\r
1260                 /* Process a remote MAD send work request. */\r
1261                 status = remote_mad_send( p_spl_qp_svc, p_mad_wr );\r
1262 \r
1263                 cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
1264         }\r
1265 \r
1266         AL_EXIT( AL_DBG_SMI );\r
1267         return status;\r
1268 }\r
1269 \r
1270 \r
1271 \r
1272 /*\r
1273  * Process a remote MAD send work request.  Called holding the spl_qp_svc lock.\r
1274  */\r
1275 ib_api_status_t\r
1276 remote_mad_send(\r
1277         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
1278         IN                              al_mad_wr_t* const                      p_mad_wr )\r
1279 {\r
1280         ib_smp_t*                               p_smp;\r
1281         ib_api_status_t                 status;\r
1282 \r
1283         AL_ENTER( AL_DBG_SMI );\r
1284 \r
1285         CL_ASSERT( p_spl_qp_svc );\r
1286         CL_ASSERT( p_mad_wr );\r
1287 \r
1288         /* Initialize a pointers to the MAD work request and outbound MAD. */\r
1289         p_smp = (ib_smp_t*)get_mad_hdr_from_wr( p_mad_wr );\r
1290 \r
1291         /* Perform outbound MAD processing. */\r
1292 \r
1293         /* Adjust directed route SMPs as required by IBA. */\r
1294         if( p_smp->mgmt_class == IB_MCLASS_SUBN_DIR )\r
1295         {\r
1296                 if( ib_smp_is_response( p_smp ) )\r
1297                 {\r
1298                         if( p_smp->dr_dlid == IB_LID_PERMISSIVE )\r
1299                                 p_smp->hop_ptr--;\r
1300                 }\r
1301                 else if( p_smp->dr_slid == IB_LID_PERMISSIVE )\r
1302                 {\r
1303                         /*\r
1304                          * Only update the pointer if the hw_agent is not implemented.\r
1305                          * Fujitsu implements SMI in hardware, so the following has to\r
1306                          * be passed down to the hardware SMI.\r
1307                          */\r
1308                         ci_ca_lock_attr( p_spl_qp_svc->obj.p_ci_ca );\r
1309                         if( !p_spl_qp_svc->obj.p_ci_ca->p_pnp_attr->hw_agents )\r
1310                                 p_smp->hop_ptr++;\r
1311                         ci_ca_unlock_attr( p_spl_qp_svc->obj.p_ci_ca );\r
1312                 }\r
1313         }\r
1314 \r
1315         /* Always generate send completions. */\r
1316         p_mad_wr->send_wr.send_opt |= IB_SEND_OPT_SIGNALED;\r
1317 \r
1318         /* Queue the MAD work request on the service tracking queue. */\r
1319         cl_qlist_insert_tail( &p_spl_qp_svc->send_queue, &p_mad_wr->list_item );\r
1320 \r
1321         status = ib_post_send( p_spl_qp_svc->h_qp, &p_mad_wr->send_wr, NULL );\r
1322 \r
1323         if( status != IB_SUCCESS )\r
1324         {\r
1325                 cl_qlist_remove_item( &p_spl_qp_svc->send_queue, &p_mad_wr->list_item );\r
1326 \r
1327                 /* Reset directed route SMPs as required by IBA. */\r
1328                 if( p_smp->mgmt_class == IB_MCLASS_SUBN_DIR )\r
1329                 {\r
1330                         if( ib_smp_is_response( p_smp ) )\r
1331                         {\r
1332                                 if( p_smp->dr_dlid == IB_LID_PERMISSIVE )\r
1333                                         p_smp->hop_ptr++;\r
1334                         }\r
1335                         else if( p_smp->dr_slid == IB_LID_PERMISSIVE )\r
1336                         {\r
1337                                 /* Only update if the hw_agent is not implemented. */\r
1338                                 ci_ca_lock_attr( p_spl_qp_svc->obj.p_ci_ca );\r
1339                                 if( p_spl_qp_svc->obj.p_ci_ca->p_pnp_attr->hw_agents == FALSE )\r
1340                                         p_smp->hop_ptr--;\r
1341                                 ci_ca_unlock_attr( p_spl_qp_svc->obj.p_ci_ca );\r
1342                         }\r
1343                 }\r
1344         }\r
1345 \r
1346         AL_EXIT( AL_DBG_SMI );\r
1347         return status;\r
1348 }\r
1349 \r
1350 \r
1351 /*\r
1352  * Handle a MAD destined for the local CA, using cached data\r
1353  * as much as possible.\r
1354  */\r
1355 static ib_api_status_t\r
1356 local_mad_send(\r
1357         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
1358         IN                              al_mad_wr_t* const                      p_mad_wr )\r
1359 {\r
1360         mad_route_t                             route;\r
1361         ib_api_status_t                 status = IB_SUCCESS;\r
1362 \r
1363         AL_ENTER( AL_DBG_SMI );\r
1364 \r
1365         CL_ASSERT( p_spl_qp_svc );\r
1366         CL_ASSERT( p_mad_wr );\r
1367 \r
1368         /* Determine how to route the MAD. */\r
1369         route = route_mad_send( p_spl_qp_svc, &p_mad_wr->send_wr );\r
1370 \r
1371         /* Check if this MAD should be discarded. */\r
1372         if( is_discard( route ) )\r
1373         {\r
1374                 /* Deliver a "work completion" to the dispatcher. */\r
1375                 __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr,\r
1376                         IB_WCS_LOCAL_OP_ERR );\r
1377                 status = IB_INVALID_SETTING;\r
1378         }\r
1379         else if( is_loopback( route ) )\r
1380         {\r
1381                 /* Loopback local SM to SM "heartbeat" messages. */\r
1382                 status = loopback_mad( p_spl_qp_svc, p_mad_wr );\r
1383         }\r
1384         else\r
1385         {\r
1386                 switch( get_mad_hdr_from_wr( p_mad_wr )->mgmt_class )\r
1387                 {\r
1388                 case IB_MCLASS_SUBN_DIR:\r
1389                 case IB_MCLASS_SUBN_LID:\r
1390                         //DO not use the cache in order to force Mkey  check\r
1391                         //status = process_subn_mad( p_spl_qp_svc, p_mad_wr );\r
1392                         status = IB_NOT_DONE;\r
1393                         break;\r
1394 \r
1395                 default:\r
1396                         status = IB_NOT_DONE;\r
1397                 }\r
1398         }\r
1399 \r
1400         if( status == IB_NOT_DONE )\r
1401         {\r
1402                 /* Queue an asynchronous processing item to process the local MAD. */\r
1403                 cl_async_proc_queue( gp_async_proc_mgr, &p_spl_qp_svc->send_async );\r
1404         }\r
1405         else\r
1406         {\r
1407                 /*\r
1408                  * Clear the local MAD pointer to allow processing of other MADs.\r
1409                  * This is done after polling for attribute changes to ensure that\r
1410                  * subsequent MADs pick up any changes performed by this one.\r
1411                  */\r
1412                 cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
1413                 p_spl_qp_svc->local_mad_wr = NULL;\r
1414                 cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
1415 \r
1416                 /* No longer in use by the asynchronous processing thread. */\r
1417                 cl_atomic_dec( &p_spl_qp_svc->in_use_cnt );\r
1418 \r
1419                 /* Special QP operations will resume by unwinding. */\r
1420         }\r
1421 \r
1422         AL_EXIT( AL_DBG_SMI );\r
1423         return IB_SUCCESS;\r
1424 }\r
1425 \r
1426 \r
1427 static ib_api_status_t\r
1428 get_resp_mad(\r
1429         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
1430         IN                              al_mad_wr_t* const                      p_mad_wr,\r
1431                 OUT                     ib_mad_element_t** const        pp_mad_resp )\r
1432 {\r
1433         ib_api_status_t                 status;\r
1434 \r
1435         AL_ENTER( AL_DBG_SMI );\r
1436 \r
1437         CL_ASSERT( p_spl_qp_svc );\r
1438         CL_ASSERT( p_mad_wr );\r
1439         CL_ASSERT( pp_mad_resp );\r
1440 \r
1441         /* Get a MAD element from the pool for the response. */\r
1442         status = ib_get_mad( p_spl_qp_svc->h_qp->obj.p_ci_ca->pool_key,\r
1443                 MAD_BLOCK_SIZE, pp_mad_resp );\r
1444         if( status != IB_SUCCESS )\r
1445         {\r
1446                 __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr,\r
1447                         IB_WCS_LOCAL_OP_ERR );\r
1448         }\r
1449 \r
1450         AL_EXIT( AL_DBG_SMI );\r
1451         return status;\r
1452 }\r
1453 \r
1454 \r
1455 static ib_api_status_t\r
1456 complete_local_mad(\r
1457         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
1458         IN                              al_mad_wr_t* const                      p_mad_wr,\r
1459         IN                              ib_mad_element_t* const         p_mad_resp )\r
1460 {\r
1461         ib_api_status_t                 status;\r
1462 \r
1463         AL_ENTER( AL_DBG_SMI );\r
1464 \r
1465         CL_ASSERT( p_spl_qp_svc );\r
1466         CL_ASSERT( p_mad_wr );\r
1467         CL_ASSERT( p_mad_resp );\r
1468 \r
1469         /* Construct the receive MAD element. */\r
1470         p_mad_resp->status              = IB_WCS_SUCCESS;\r
1471         p_mad_resp->remote_qp   = p_mad_wr->send_wr.dgrm.ud.remote_qp;\r
1472         p_mad_resp->remote_lid  = p_spl_qp_svc->base_lid;\r
1473         if( p_mad_wr->send_wr.send_opt & IB_RECV_OPT_IMMEDIATE )\r
1474         {\r
1475                 p_mad_resp->immediate_data = p_mad_wr->send_wr.immediate_data;\r
1476                 p_mad_resp->recv_opt |= IB_RECV_OPT_IMMEDIATE;\r
1477         }\r
1478 \r
1479         /*\r
1480          * Hand the receive MAD element to the dispatcher before completing\r
1481          * the send.  This guarantees that the send request cannot time out.\r
1482          */\r
1483         status = mad_disp_recv_done( p_spl_qp_svc->h_mad_disp, p_mad_resp );\r
1484 \r
1485         /* Forward the send work completion to the dispatcher. */\r
1486         __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr, IB_WCS_SUCCESS );\r
1487 \r
1488         AL_EXIT( AL_DBG_SMI );\r
1489         return status;\r
1490 }\r
1491 \r
1492 \r
1493 static ib_api_status_t\r
1494 loopback_mad(\r
1495         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
1496         IN                              al_mad_wr_t* const                      p_mad_wr )\r
1497 {\r
1498         ib_mad_t                                *p_mad;\r
1499         ib_mad_element_t                *p_mad_resp;\r
1500         ib_api_status_t                 status;\r
1501 \r
1502         AL_ENTER( AL_DBG_SMI );\r
1503 \r
1504         CL_ASSERT( p_spl_qp_svc );\r
1505         CL_ASSERT( p_mad_wr );\r
1506 \r
1507         /* Get a MAD element from the pool for the response. */\r
1508         status = get_resp_mad( p_spl_qp_svc, p_mad_wr, &p_mad_resp );\r
1509         if( status == IB_SUCCESS )\r
1510         {\r
1511                 /* Initialize a pointers to the MAD work request and outbound MAD. */\r
1512                 p_mad = get_mad_hdr_from_wr( p_mad_wr );\r
1513 \r
1514                 /* Simulate a send/receive between local managers. */\r
1515                 cl_memcpy( p_mad_resp->p_mad_buf, p_mad, MAD_BLOCK_SIZE );\r
1516 \r
1517                 /* Construct the receive MAD element. */\r
1518                 p_mad_resp->status              = IB_WCS_SUCCESS;\r
1519                 p_mad_resp->remote_qp   = p_mad_wr->send_wr.dgrm.ud.remote_qp;\r
1520                 p_mad_resp->remote_lid  = p_spl_qp_svc->base_lid;\r
1521                 if( p_mad_wr->send_wr.send_opt & IB_RECV_OPT_IMMEDIATE )\r
1522                 {\r
1523                         p_mad_resp->immediate_data = p_mad_wr->send_wr.immediate_data;\r
1524                         p_mad_resp->recv_opt |= IB_RECV_OPT_IMMEDIATE;\r
1525                 }\r
1526 \r
1527                 /*\r
1528                  * Hand the receive MAD element to the dispatcher before completing\r
1529                  * the send.  This guarantees that the send request cannot time out.\r
1530                  */\r
1531                 status = mad_disp_recv_done( p_spl_qp_svc->h_mad_disp, p_mad_resp );\r
1532 \r
1533                 /* Forward the send work completion to the dispatcher. */\r
1534                 __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr, IB_WCS_SUCCESS );\r
1535 \r
1536         }\r
1537 \r
1538         AL_EXIT( AL_DBG_SMI );\r
1539         return status;\r
1540 }\r
1541 \r
1542 \r
1543 static ib_api_status_t\r
1544 process_node_info(\r
1545         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
1546         IN                              al_mad_wr_t* const                      p_mad_wr )\r
1547 {\r
1548         ib_mad_t                                *p_mad;\r
1549         ib_mad_element_t                *p_mad_resp;\r
1550         ib_smp_t                                *p_smp;\r
1551         ib_node_info_t                  *p_node_info;\r
1552         ib_ca_attr_t                    *p_ca_attr;\r
1553         ib_port_attr_t                  *p_port_attr;\r
1554         ib_api_status_t                 status;\r
1555 \r
1556         AL_ENTER( AL_DBG_SMI );\r
1557 \r
1558         CL_ASSERT( p_spl_qp_svc );\r
1559         CL_ASSERT( p_mad_wr );\r
1560 \r
1561         /* Initialize a pointers to the MAD work request and outbound MAD. */\r
1562         p_mad = get_mad_hdr_from_wr( p_mad_wr );\r
1563         if( p_mad->method != IB_MAD_METHOD_GET )\r
1564         {\r
1565                 /* Node description is a GET-only attribute. */\r
1566                 __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr,\r
1567                         IB_WCS_LOCAL_OP_ERR );\r
1568                 AL_EXIT( AL_DBG_SMI );\r
1569                 return IB_INVALID_SETTING;\r
1570         }\r
1571 \r
1572         /* Get a MAD element from the pool for the response. */\r
1573         status = get_resp_mad( p_spl_qp_svc, p_mad_wr, &p_mad_resp );\r
1574         if( status == IB_SUCCESS )\r
1575         {\r
1576                 p_smp = (ib_smp_t*)p_mad_resp->p_mad_buf;\r
1577                 cl_memcpy( p_smp, p_mad, MAD_BLOCK_SIZE );\r
1578                 p_smp->method = (IB_MAD_METHOD_RESP_MASK | IB_MAD_METHOD_GET);\r
1579                 if( p_smp->mgmt_class == IB_MCLASS_SUBN_DIR )\r
1580                         p_smp->status = IB_SMP_DIRECTION;\r
1581                 else\r
1582                         p_smp->status = 0;\r
1583 \r
1584                 p_node_info = (ib_node_info_t*)ib_smp_get_payload_ptr( p_smp );\r
1585 \r
1586                 /*\r
1587                  * Fill in the node info, protecting against the\r
1588                  * attributes being changed by PnP.\r
1589                  */\r
1590                 cl_spinlock_acquire( &p_spl_qp_svc->obj.p_ci_ca->obj.lock );\r
1591 \r
1592                 p_ca_attr = p_spl_qp_svc->obj.p_ci_ca->p_pnp_attr;\r
1593                 p_port_attr = &p_ca_attr->p_port_attr[p_spl_qp_svc->port_num - 1];\r
1594 \r
1595                 p_node_info->base_version = 1;\r
1596                 p_node_info->class_version = 1;\r
1597                 p_node_info->node_type = IB_NODE_TYPE_CA;\r
1598                 p_node_info->num_ports = p_ca_attr->num_ports;\r
1599                 /* TODO: Get some unique identifier for the system */\r
1600                 p_node_info->sys_guid = p_ca_attr->ca_guid;\r
1601                 p_node_info->node_guid = p_ca_attr->ca_guid;\r
1602                 p_node_info->port_guid = p_port_attr->port_guid;\r
1603                 p_node_info->partition_cap = cl_hton16( p_port_attr->num_pkeys );\r
1604                 p_node_info->device_id = cl_hton16( p_ca_attr->dev_id );\r
1605                 p_node_info->revision = cl_hton32( p_ca_attr->revision );\r
1606                 p_node_info->port_num_vendor_id =\r
1607                         cl_hton32( p_ca_attr->vend_id & 0x00FFFFFF ) | p_port_attr->port_num;\r
1608                 cl_spinlock_release( &p_spl_qp_svc->obj.p_ci_ca->obj.lock );\r
1609 \r
1610                 status = complete_local_mad( p_spl_qp_svc, p_mad_wr, p_mad_resp );\r
1611         }\r
1612 \r
1613         AL_EXIT( AL_DBG_SMI );\r
1614         return status;\r
1615 }\r
1616 \r
1617 \r
1618 static ib_api_status_t\r
1619 process_node_desc(\r
1620         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
1621         IN                              al_mad_wr_t* const                      p_mad_wr )\r
1622 {\r
1623         ib_mad_t                                *p_mad;\r
1624         ib_mad_element_t                *p_mad_resp;\r
1625         ib_api_status_t                 status;\r
1626 \r
1627         AL_ENTER( AL_DBG_SMI );\r
1628 \r
1629         CL_ASSERT( p_spl_qp_svc );\r
1630         CL_ASSERT( p_mad_wr );\r
1631 \r
1632         /* Initialize a pointers to the MAD work request and outbound MAD. */\r
1633         p_mad = get_mad_hdr_from_wr( p_mad_wr );\r
1634         if( p_mad->method != IB_MAD_METHOD_GET )\r
1635         {\r
1636                 /* Node info is a GET-only attribute. */\r
1637                 __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr,\r
1638                         IB_WCS_LOCAL_OP_ERR );\r
1639                 AL_EXIT( AL_DBG_SMI );\r
1640                 return IB_INVALID_SETTING;\r
1641         }\r
1642 \r
1643         /* Get a MAD element from the pool for the response. */\r
1644         status = get_resp_mad( p_spl_qp_svc, p_mad_wr, &p_mad_resp );\r
1645         if( status == IB_SUCCESS )\r
1646         {\r
1647                 cl_memcpy( p_mad_resp->p_mad_buf, p_mad, MAD_BLOCK_SIZE );\r
1648                 p_mad_resp->p_mad_buf->method =\r
1649                         (IB_MAD_METHOD_RESP_MASK | IB_MAD_METHOD_GET);\r
1650                 if( p_mad_resp->p_mad_buf->mgmt_class == IB_MCLASS_SUBN_DIR )\r
1651                         p_mad_resp->p_mad_buf->status = IB_SMP_DIRECTION;\r
1652                 else\r
1653                         p_mad_resp->p_mad_buf->status = 0;\r
1654                 /* Set the node description to the machine name. */\r
1655                 cl_memcpy( ((ib_smp_t*)p_mad_resp->p_mad_buf)->data, \r
1656                         node_desc, sizeof(node_desc) );\r
1657 \r
1658                 status = complete_local_mad( p_spl_qp_svc, p_mad_wr, p_mad_resp );\r
1659         }\r
1660 \r
1661         AL_EXIT( AL_DBG_SMI );\r
1662         return status;\r
1663 }\r
1664 \r
1665 \r
1666 /*\r
1667  * Process subnet administration MADs using cached data if possible.\r
1668  */\r
1669 static ib_api_status_t\r
1670 process_subn_mad(\r
1671         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
1672         IN                              al_mad_wr_t* const                      p_mad_wr )\r
1673 {\r
1674         ib_api_status_t         status;\r
1675         ib_smp_t                        *p_smp;\r
1676 \r
1677         AL_ENTER( AL_DBG_SMI );\r
1678 \r
1679         CL_ASSERT( p_spl_qp_svc );\r
1680         CL_ASSERT( p_mad_wr );\r
1681 \r
1682         p_smp = (ib_smp_t*)get_mad_hdr_from_wr( p_mad_wr );\r
1683 \r
1684         CL_ASSERT( p_smp->mgmt_class == IB_MCLASS_SUBN_DIR ||\r
1685                 p_smp->mgmt_class == IB_MCLASS_SUBN_LID );\r
1686 \r
1687         switch( p_smp->attr_id )\r
1688         {\r
1689         case IB_MAD_ATTR_NODE_INFO:\r
1690                 status = process_node_info( p_spl_qp_svc, p_mad_wr );\r
1691                 break;\r
1692 \r
1693         case IB_MAD_ATTR_NODE_DESC:\r
1694                 status = process_node_desc( p_spl_qp_svc, p_mad_wr );\r
1695                 break;\r
1696 \r
1697         default:\r
1698                 status = IB_NOT_DONE;\r
1699                 break;\r
1700         }\r
1701 \r
1702         AL_EXIT( AL_DBG_SMI );\r
1703         return status;\r
1704 }\r
1705 \r
1706 \r
1707 /*\r
1708  * Process a local MAD send work request.\r
1709  */\r
1710 ib_api_status_t\r
1711 fwd_local_mad(\r
1712         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
1713         IN                              al_mad_wr_t* const                      p_mad_wr )\r
1714 {\r
1715         ib_mad_t*                               p_mad;\r
1716         ib_smp_t*                               p_smp;\r
1717         al_mad_send_t*                  p_mad_send;\r
1718         ib_mad_element_t*               p_mad_response = NULL;\r
1719         ib_mad_t*                               p_mad_response_buf;\r
1720         ib_api_status_t                 status = IB_SUCCESS;\r
1721         boolean_t                               smp_is_set;\r
1722 \r
1723         AL_ENTER( AL_DBG_SMI );\r
1724 \r
1725         CL_ASSERT( p_spl_qp_svc );\r
1726         CL_ASSERT( p_mad_wr );\r
1727 \r
1728         /* Initialize a pointers to the MAD work request and outbound MAD. */\r
1729         p_mad = get_mad_hdr_from_wr( p_mad_wr );\r
1730         p_smp = (ib_smp_t*)p_mad;\r
1731 \r
1732         smp_is_set = (p_smp->method == IB_MAD_METHOD_SET);\r
1733 \r
1734         /* Get a MAD element from the pool for the response. */\r
1735         p_mad_send = PARENT_STRUCT( p_mad_wr, al_mad_send_t, mad_wr );\r
1736         if( p_mad_send->p_send_mad->resp_expected )\r
1737         {\r
1738                 status = get_resp_mad( p_spl_qp_svc, p_mad_wr, &p_mad_response );\r
1739                 if( status != IB_SUCCESS )\r
1740                 {\r
1741                         __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr,\r
1742                                 IB_WCS_LOCAL_OP_ERR );\r
1743                         AL_EXIT( AL_DBG_SMI );\r
1744                         return status;\r
1745                 }\r
1746                 p_mad_response_buf = p_mad_response->p_mad_buf;\r
1747         }\r
1748         else\r
1749         {\r
1750                         p_mad_response_buf = NULL;\r
1751         }\r
1752 \r
1753         /* Adjust directed route SMPs as required by IBA. */\r
1754         if( p_mad->mgmt_class == IB_MCLASS_SUBN_DIR )\r
1755         {\r
1756                 CL_ASSERT( !ib_smp_is_response( p_smp ) );\r
1757 \r
1758                 /*\r
1759                  * If this was a self addressed, directed route SMP, increment\r
1760                  * the hop pointer in the request before delivery as required\r
1761                  * by IBA.  Otherwise, adjustment for remote requests occurs\r
1762                  * during inbound processing.\r
1763                  */\r
1764                 if( p_smp->hop_count == 0 )\r
1765                         p_smp->hop_ptr++;\r
1766         }\r
1767 \r
1768         /* Forward the locally addressed MAD to the CA interface. */\r
1769         status = al_local_mad( p_spl_qp_svc->h_qp->obj.p_ci_ca->h_ca,\r
1770                 p_spl_qp_svc->port_num, &p_mad_wr->send_wr.dgrm.ud.h_av->av_attr, p_mad, p_mad_response_buf );\r
1771 \r
1772         /* Reset directed route SMPs as required by IBA. */\r
1773         if( p_mad->mgmt_class == IB_MCLASS_SUBN_DIR )\r
1774         {\r
1775                 /*\r
1776                  * If this was a self addressed, directed route SMP, decrement\r
1777                  * the hop pointer in the response before delivery as required\r
1778                  * by IBA.  Otherwise, adjustment for remote responses occurs\r
1779                  * during outbound processing.\r
1780                  */\r
1781                 if( p_smp->hop_count == 0 )\r
1782                 {\r
1783                         /* Adjust the request SMP. */\r
1784                         p_smp->hop_ptr--;\r
1785 \r
1786                         /* Adjust the response SMP. */\r
1787                         if( p_mad_response_buf )\r
1788                         {\r
1789                                 p_smp = (ib_smp_t*)p_mad_response_buf;\r
1790                                 p_smp->hop_ptr--;\r
1791                         }\r
1792                 }\r
1793         }\r
1794 \r
1795         if( status != IB_SUCCESS )\r
1796         {\r
1797                 if( p_mad_response )\r
1798                         ib_put_mad( p_mad_response );\r
1799 \r
1800                 __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr,\r
1801                         IB_WCS_LOCAL_OP_ERR );\r
1802                 AL_EXIT( AL_DBG_SMI );\r
1803                 return status;\r
1804         }\r
1805 \r
1806         /* Check the completion status of this simulated send. */\r
1807         if( p_mad_send->p_send_mad->resp_expected )\r
1808         {\r
1809                 /*\r
1810                  * The SMI is uses PnP polling to refresh the base_lid and lmc.\r
1811                  * Polling takes time, so we update the values here to prevent\r
1812                  * the failure of LID routed MADs sent immediately following this\r
1813                  * assignment.  Check the response to see if the port info was set.\r
1814                  */\r
1815                 if( smp_is_set )\r
1816                 {\r
1817                         ib_port_info_t*         p_port_info = NULL;\r
1818 \r
1819                         switch( p_mad_response_buf->mgmt_class )\r
1820                         {\r
1821                         case IB_MCLASS_SUBN_DIR:\r
1822                                 if( ( p_smp->attr_id == IB_MAD_ATTR_PORT_INFO ) &&\r
1823                                         ( ib_smp_get_status( p_smp ) == IB_SA_MAD_STATUS_SUCCESS ) )\r
1824                                 {\r
1825                                         p_port_info =\r
1826                                                 (ib_port_info_t*)ib_smp_get_payload_ptr( p_smp );\r
1827                                 }\r
1828                                 break;\r
1829 \r
1830                         case IB_MCLASS_SUBN_LID:\r
1831                                 if( ( p_mad_response_buf->attr_id == IB_MAD_ATTR_PORT_INFO ) &&\r
1832                                         ( p_mad_response_buf->status == IB_SA_MAD_STATUS_SUCCESS ) )\r
1833                                 {\r
1834                                         p_port_info =\r
1835                                                 (ib_port_info_t*)ib_smp_get_payload_ptr((ib_smp_t*)p_mad_response_buf);\r
1836                                 }\r
1837                                 break;\r
1838 \r
1839                         default:\r
1840                                 break;\r
1841                         }\r
1842 \r
1843                         if( p_port_info )\r
1844                         { \r
1845                                 p_spl_qp_svc->base_lid = p_port_info->base_lid;\r
1846                                 p_spl_qp_svc->lmc = ib_port_info_get_lmc( p_port_info );\r
1847                                 p_spl_qp_svc->sm_lid = p_port_info->master_sm_base_lid;\r
1848                                 p_spl_qp_svc->sm_sl = ib_port_info_get_sm_sl( p_port_info );\r
1849 \r
1850                                 if (p_port_info->subnet_timeout & 0x80)\r
1851                                 {\r
1852                                         AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_PNP,\r
1853                                                 ("Client reregister event, setting sm_lid to 0.\n"));\r
1854                                         ci_ca_lock_attr(p_spl_qp_svc->obj.p_ci_ca);\r
1855                                         p_spl_qp_svc->obj.p_ci_ca->p_pnp_attr->\r
1856                                                 p_port_attr->sm_lid= 0;\r
1857                                         ci_ca_unlock_attr(p_spl_qp_svc->obj.p_ci_ca);\r
1858                                 }\r
1859                         }\r
1860                 }\r
1861                 \r
1862 \r
1863                 /* Construct the receive MAD element. */\r
1864                 p_mad_response->status          = IB_WCS_SUCCESS;\r
1865                 p_mad_response->remote_qp       = p_mad_wr->send_wr.dgrm.ud.remote_qp;\r
1866                 p_mad_response->remote_lid      = p_spl_qp_svc->base_lid;\r
1867                 if( p_mad_wr->send_wr.send_opt & IB_RECV_OPT_IMMEDIATE )\r
1868                 {\r
1869                         p_mad_response->immediate_data = p_mad_wr->send_wr.immediate_data;\r
1870                         p_mad_response->recv_opt |= IB_RECV_OPT_IMMEDIATE;\r
1871                 }\r
1872 \r
1873                 /*\r
1874                  * Hand the receive MAD element to the dispatcher before completing\r
1875                  * the send.  This guarantees that the send request cannot time out.\r
1876                  */\r
1877                 status = mad_disp_recv_done( p_spl_qp_svc->h_mad_disp, p_mad_response );\r
1878         }\r
1879         \r
1880         __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr,IB_WCS_SUCCESS);\r
1881 \r
1882         \r
1883         \r
1884         /* If the SMP was a Get, no need to trigger a PnP poll. */\r
1885         if( status == IB_SUCCESS && !smp_is_set )\r
1886                 status = IB_NOT_DONE;\r
1887 \r
1888         AL_EXIT( AL_DBG_SMI );\r
1889         return status;\r
1890 }\r
1891 \r
1892 \r
1893 \r
1894 /*\r
1895  * Asynchronous processing thread callback to send a local MAD.\r
1896  */\r
1897 void\r
1898 send_local_mad_cb(\r
1899         IN                              cl_async_proc_item_t*           p_item )\r
1900 {\r
1901         spl_qp_svc_t*                   p_spl_qp_svc;\r
1902         ib_api_status_t                 status;\r
1903 \r
1904         AL_ENTER( AL_DBG_SMI );\r
1905 \r
1906         CL_ASSERT( p_item );\r
1907         p_spl_qp_svc = PARENT_STRUCT( p_item, spl_qp_svc_t, send_async );\r
1908 \r
1909         /* Process a local MAD send work request. */\r
1910         CL_ASSERT( p_spl_qp_svc->local_mad_wr );\r
1911         status = fwd_local_mad( p_spl_qp_svc, p_spl_qp_svc->local_mad_wr );\r
1912 \r
1913         /*\r
1914          * If we successfully processed a local MAD, which could have changed\r
1915          * something (e.g. the LID) on the HCA.  Scan for changes.\r
1916          */\r
1917         if( status == IB_SUCCESS )\r
1918                 pnp_poll();\r
1919 \r
1920         /*\r
1921          * Clear the local MAD pointer to allow processing of other MADs.\r
1922          * This is done after polling for attribute changes to ensure that\r
1923          * subsequent MADs pick up any changes performed by this one.\r
1924          */\r
1925         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
1926         p_spl_qp_svc->local_mad_wr = NULL;\r
1927         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
1928 \r
1929         /* Continue processing any queued MADs on the QP. */\r
1930         special_qp_resume_sends( p_spl_qp_svc->h_qp );\r
1931 \r
1932         /* No longer in use by the asynchronous processing thread. */\r
1933         cl_atomic_dec( &p_spl_qp_svc->in_use_cnt );\r
1934 \r
1935         AL_EXIT( AL_DBG_SMI );\r
1936 }\r
1937 \r
1938 \r
1939 \r
1940 /*\r
1941  * Special QP send completion callback.\r
1942  */\r
1943 void\r
1944 spl_qp_send_comp_cb(\r
1945         IN              const   ib_cq_handle_t                          h_cq,\r
1946         IN                              void*                                           cq_context )\r
1947 {\r
1948         spl_qp_svc_t*                   p_spl_qp_svc;\r
1949 \r
1950         AL_ENTER( AL_DBG_SMI );\r
1951 \r
1952         CL_ASSERT( cq_context );\r
1953         p_spl_qp_svc = cq_context;\r
1954 \r
1955 #if defined( CL_USE_MUTEX )\r
1956 \r
1957         /* Queue an asynchronous processing item to process sends. */\r
1958         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
1959         if( !p_spl_qp_svc->send_async_queued )\r
1960         {\r
1961                 p_spl_qp_svc->send_async_queued = TRUE;\r
1962                 ref_al_obj( &p_spl_qp_svc->obj );\r
1963                 cl_async_proc_queue( gp_async_proc_mgr, &p_spl_qp_svc->send_async_cb );\r
1964         }\r
1965         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
1966 \r
1967 #else\r
1968 \r
1969         /* Invoke the callback directly. */\r
1970         CL_ASSERT( h_cq == p_spl_qp_svc->h_send_cq );\r
1971         spl_qp_comp( p_spl_qp_svc, h_cq, IB_WC_SEND );\r
1972 \r
1973         /* Continue processing any queued MADs on the QP. */\r
1974         special_qp_resume_sends( p_spl_qp_svc->h_qp );\r
1975 \r
1976 #endif\r
1977 \r
1978         AL_EXIT( AL_DBG_SMI );\r
1979 }\r
1980 \r
1981 \r
1982 \r
1983 #if defined( CL_USE_MUTEX )\r
1984 void\r
1985 spl_qp_send_async_cb(\r
1986         IN                              cl_async_proc_item_t*           p_item )\r
1987 {\r
1988         spl_qp_svc_t*                   p_spl_qp_svc;\r
1989         ib_api_status_t                 status;\r
1990 \r
1991         AL_ENTER( AL_DBG_SMI );\r
1992 \r
1993         CL_ASSERT( p_item );\r
1994         p_spl_qp_svc = PARENT_STRUCT( p_item, spl_qp_svc_t, send_async_cb );\r
1995 \r
1996         /* Reset asynchronous queue flag. */\r
1997         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
1998         p_spl_qp_svc->send_async_queued = FALSE;\r
1999         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2000 \r
2001         spl_qp_comp( p_spl_qp_svc, p_spl_qp_svc->h_send_cq, IB_WC_SEND );\r
2002 \r
2003         /* Continue processing any queued MADs on the QP. */\r
2004         status = special_qp_resume_sends( p_spl_qp_svc->h_qp );\r
2005         CL_ASSERT( status == IB_SUCCESS );\r
2006 \r
2007         deref_al_obj( &p_spl_qp_svc->obj );\r
2008 \r
2009         AL_EXIT( AL_DBG_SMI );\r
2010 }\r
2011 #endif\r
2012 \r
2013 \r
2014 \r
2015 /*\r
2016  * Special QP receive completion callback.\r
2017  */\r
2018 void\r
2019 spl_qp_recv_comp_cb(\r
2020         IN              const   ib_cq_handle_t                          h_cq,\r
2021         IN                              void*                                           cq_context )\r
2022 {\r
2023         spl_qp_svc_t*                   p_spl_qp_svc;\r
2024 \r
2025         AL_ENTER( AL_DBG_SMI );\r
2026 \r
2027         CL_ASSERT( cq_context );\r
2028         p_spl_qp_svc = cq_context;\r
2029 \r
2030 #if defined( CL_USE_MUTEX )\r
2031 \r
2032         /* Queue an asynchronous processing item to process receives. */\r
2033         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
2034         if( !p_spl_qp_svc->recv_async_queued )\r
2035         {\r
2036                 p_spl_qp_svc->recv_async_queued = TRUE;\r
2037                 ref_al_obj( &p_spl_qp_svc->obj );\r
2038                 cl_async_proc_queue( gp_async_proc_mgr, &p_spl_qp_svc->recv_async_cb );\r
2039         }\r
2040         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2041 \r
2042 #else\r
2043 \r
2044         CL_ASSERT( h_cq == p_spl_qp_svc->h_recv_cq );\r
2045         spl_qp_comp( p_spl_qp_svc, h_cq, IB_WC_RECV );\r
2046 \r
2047 #endif\r
2048 \r
2049         AL_EXIT( AL_DBG_SMI );\r
2050 }\r
2051 \r
2052 \r
2053 \r
2054 #if defined( CL_USE_MUTEX )\r
2055 void\r
2056 spl_qp_recv_async_cb(\r
2057         IN                              cl_async_proc_item_t*           p_item )\r
2058 {\r
2059         spl_qp_svc_t*                   p_spl_qp_svc;\r
2060 \r
2061         AL_ENTER( AL_DBG_SMI );\r
2062 \r
2063         CL_ASSERT( p_item );\r
2064         p_spl_qp_svc = PARENT_STRUCT( p_item, spl_qp_svc_t, recv_async_cb );\r
2065 \r
2066         /* Reset asynchronous queue flag. */\r
2067         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
2068         p_spl_qp_svc->recv_async_queued = FALSE;\r
2069         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2070 \r
2071         spl_qp_comp( p_spl_qp_svc, p_spl_qp_svc->h_recv_cq, IB_WC_RECV );\r
2072 \r
2073         deref_al_obj( &p_spl_qp_svc->obj );\r
2074 \r
2075         AL_EXIT( AL_DBG_SMI );\r
2076 }\r
2077 #endif\r
2078 \r
2079 \r
2080 \r
2081 /*\r
2082  * Special QP completion handler.\r
2083  */\r
2084 void\r
2085 spl_qp_comp(\r
2086         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
2087         IN              const   ib_cq_handle_t                          h_cq,\r
2088         IN                              ib_wc_type_t                            wc_type )\r
2089 {\r
2090         ib_wc_t                                 wc;\r
2091         ib_wc_t*                                p_free_wc = &wc;\r
2092         ib_wc_t*                                p_done_wc;\r
2093         al_mad_wr_t*                    p_mad_wr;\r
2094         al_mad_element_t*               p_al_mad;\r
2095         ib_mad_element_t*               p_mad_element;\r
2096         ib_smp_t*                               p_smp;\r
2097         ib_api_status_t                 status;\r
2098 \r
2099         AL_ENTER( AL_DBG_SMI_CB );\r
2100 \r
2101         CL_ASSERT( p_spl_qp_svc );\r
2102         CL_ASSERT( h_cq );\r
2103 \r
2104         /* Check the QP state and guard against error handling. */\r
2105         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
2106         if( p_spl_qp_svc->state != SPL_QP_ACTIVE )\r
2107         {\r
2108                 cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2109                 return;\r
2110         }\r
2111         cl_atomic_inc( &p_spl_qp_svc->in_use_cnt );\r
2112         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2113 \r
2114         wc.p_next = NULL;\r
2115         /* Process work completions. */\r
2116         while( ib_poll_cq( h_cq, &p_free_wc, &p_done_wc ) == IB_SUCCESS )\r
2117         {\r
2118                 /* Process completions one at a time. */\r
2119                 CL_ASSERT( p_done_wc );\r
2120 \r
2121                 /* Flushed completions are handled elsewhere. */\r
2122                 if( wc.status == IB_WCS_WR_FLUSHED_ERR )\r
2123                 {\r
2124                         p_free_wc = &wc;\r
2125                         continue;\r
2126                 }\r
2127 \r
2128                 /*\r
2129                  * Process the work completion.  Per IBA specification, the\r
2130                  * wc.wc_type is undefined if wc.status is not IB_WCS_SUCCESS.\r
2131                  * Use the wc_type parameter.\r
2132                  */\r
2133                 switch( wc_type )\r
2134                 {\r
2135                 case IB_WC_SEND:\r
2136                         /* Get a pointer to the MAD work request. */\r
2137                         p_mad_wr = (al_mad_wr_t*)((uintn_t)wc.wr_id);\r
2138 \r
2139                         /* Remove the MAD work request from the service tracking queue. */\r
2140                         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
2141                         cl_qlist_remove_item( &p_spl_qp_svc->send_queue,\r
2142                                 &p_mad_wr->list_item );\r
2143                         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2144 \r
2145                         /* Reset directed route SMPs as required by IBA. */\r
2146                         p_smp = (ib_smp_t*)get_mad_hdr_from_wr( p_mad_wr );\r
2147                         if( p_smp->mgmt_class == IB_MCLASS_SUBN_DIR )\r
2148                         {\r
2149                                 if( ib_smp_is_response( p_smp ) )\r
2150                                         p_smp->hop_ptr++;\r
2151                                 else\r
2152                                         p_smp->hop_ptr--;\r
2153                         }\r
2154 \r
2155                         /* Report the send completion to the dispatcher. */\r
2156                         mad_disp_send_done( p_spl_qp_svc->h_mad_disp, p_mad_wr, &wc );\r
2157                         break;\r
2158 \r
2159                 case IB_WC_RECV:\r
2160 \r
2161                         /* Initialize pointers to the MAD element. */\r
2162                         p_al_mad = (al_mad_element_t*)((uintn_t)wc.wr_id);\r
2163                         p_mad_element = &p_al_mad->element;\r
2164 \r
2165                         /* Remove the AL MAD element from the service tracking list. */\r
2166                         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
2167 \r
2168                         cl_qlist_remove_item( &p_spl_qp_svc->recv_queue,\r
2169                                 &p_al_mad->list_item );\r
2170 \r
2171                         /* Replenish the receive buffer. */\r
2172                         spl_qp_svc_post_recvs( p_spl_qp_svc );\r
2173                         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2174 \r
2175                         /* Construct the MAD element from the receive work completion. */\r
2176                         build_mad_recv( p_mad_element, &wc );\r
2177 \r
2178                         /* Process the received MAD. */\r
2179                         status = process_mad_recv( p_spl_qp_svc, p_mad_element );\r
2180 \r
2181                         /* Discard this MAD on error. */\r
2182                         if( status != IB_SUCCESS )\r
2183                         {\r
2184                                 status = ib_put_mad( p_mad_element );\r
2185                                 CL_ASSERT( status == IB_SUCCESS );\r
2186                         }\r
2187                         break;\r
2188 \r
2189                 default:\r
2190                         CL_ASSERT( wc_type == IB_WC_SEND || wc_type == IB_WC_RECV );\r
2191                         break;\r
2192                 }\r
2193 \r
2194                 if( wc.status != IB_WCS_SUCCESS )\r
2195                 {\r
2196                         AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
2197                                 ("special QP completion error: %s! internal syndrome 0x%I64x\n",\r
2198                                 ib_get_wc_status_str( wc.status ), wc.vendor_specific) );\r
2199 \r
2200                         /* Reset the special QP service and return. */\r
2201                         spl_qp_svc_reset( p_spl_qp_svc );\r
2202                 }\r
2203                 p_free_wc = &wc;\r
2204         }\r
2205 \r
2206         /* Rearm the CQ. */\r
2207         status = ib_rearm_cq( h_cq, FALSE );\r
2208         CL_ASSERT( status == IB_SUCCESS );\r
2209 \r
2210         cl_atomic_dec( &p_spl_qp_svc->in_use_cnt );\r
2211         AL_EXIT( AL_DBG_SMI_CB );\r
2212 }\r
2213 \r
2214 \r
2215 \r
2216 /*\r
2217  * Process a received MAD.\r
2218  */\r
2219 ib_api_status_t\r
2220 process_mad_recv(\r
2221         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
2222         IN                              ib_mad_element_t*                       p_mad_element )\r
2223 {\r
2224         ib_smp_t*                               p_smp;\r
2225         mad_route_t                             route;\r
2226         ib_api_status_t                 status;\r
2227 \r
2228         AL_ENTER( AL_DBG_SMI );\r
2229 \r
2230         CL_ASSERT( p_spl_qp_svc );\r
2231         CL_ASSERT( p_mad_element );\r
2232 \r
2233         /*\r
2234          * If the CA has a HW agent then this MAD should have been\r
2235          * consumed below verbs.  The fact that it was received here\r
2236          * indicates that it should be forwarded to the dispatcher\r
2237          * for delivery to a class manager.  Otherwise, determine how\r
2238          * the MAD should be routed.\r
2239          */\r
2240         route = ROUTE_DISPATCHER;\r
2241         if( check_local_mad( p_spl_qp_svc->h_qp ) )\r
2242         {\r
2243                 /*\r
2244                  * SMP and GMP processing is branched here to handle overlaps\r
2245                  * between class methods and attributes.\r
2246                  */\r
2247                 switch( p_mad_element->p_mad_buf->mgmt_class )\r
2248                 {\r
2249                 case IB_MCLASS_SUBN_DIR:\r
2250                         /* Perform special checks on directed route SMPs. */\r
2251                         p_smp = (ib_smp_t*)p_mad_element->p_mad_buf;\r
2252 \r
2253                         if( ( p_smp->hop_count >= IB_SUBNET_PATH_HOPS_MAX ) ||\r
2254                                 ( p_smp->hop_ptr >= IB_SUBNET_PATH_HOPS_MAX ) )\r
2255                         {\r
2256                                 route = ROUTE_DISCARD;\r
2257                         }\r
2258                         else if( ib_smp_is_response( p_smp ) )\r
2259                         {\r
2260                                 /*\r
2261                                  * This node is the destination of the response.  Discard\r
2262                                  * the source LID or hop pointer are incorrect.\r
2263                                  */\r
2264                                 if( p_smp->dr_slid == IB_LID_PERMISSIVE )\r
2265                                 {\r
2266                                         if( p_smp->hop_ptr == 1 )\r
2267                                         {\r
2268                                                 p_smp->hop_ptr--;               /* Adjust ptr per IBA spec. */\r
2269                                         }\r
2270                                         else\r
2271                                         {\r
2272                                                 route = ROUTE_DISCARD;\r
2273                                         }\r
2274                                 }\r
2275                                 else if( ( p_smp->dr_slid <  p_spl_qp_svc->base_lid ) ||\r
2276                                                  ( p_smp->dr_slid >= p_spl_qp_svc->base_lid +\r
2277                                                         ( 1 << p_spl_qp_svc->lmc ) ) )\r
2278                                 {\r
2279                                                 route = ROUTE_DISCARD;\r
2280                                 }\r
2281                         }\r
2282                         else\r
2283                         {\r
2284                                 /*\r
2285                                  * This node is the destination of the request.  Discard\r
2286                                  * the destination LID or hop pointer are incorrect.\r
2287                                  */\r
2288                                 if( p_smp->dr_dlid == IB_LID_PERMISSIVE )\r
2289                                 {\r
2290                                         if( p_smp->hop_count == p_smp->hop_ptr )\r
2291                                         {\r
2292                                                 p_smp->return_path[ p_smp->hop_ptr++ ] =\r
2293                                                         p_spl_qp_svc->port_num; /* Set path per IBA spec. */\r
2294                                         }\r
2295                                         else\r
2296                                         {\r
2297                                                 route = ROUTE_DISCARD;\r
2298                                         }\r
2299                                 }\r
2300                                 else if( ( p_smp->dr_dlid <  p_spl_qp_svc->base_lid ) ||\r
2301                                                  ( p_smp->dr_dlid >= p_spl_qp_svc->base_lid +\r
2302                                                         ( 1 << p_spl_qp_svc->lmc ) ) )\r
2303                                 {\r
2304                                         route = ROUTE_DISCARD;\r
2305                                 }\r
2306                         }\r
2307 \r
2308                         if( route == ROUTE_DISCARD ) break;\r
2309                         /* else fall through next case */\r
2310 \r
2311                 case IB_MCLASS_SUBN_LID:\r
2312                         route = route_recv_smp( p_mad_element );\r
2313                         break;\r
2314 \r
2315                 case IB_MCLASS_PERF:\r
2316                         /* Process the received GMP. */\r
2317                         switch( p_mad_element->p_mad_buf->method )\r
2318                         {\r
2319                         case IB_MAD_METHOD_GET:\r
2320                         case IB_MAD_METHOD_SET:\r
2321                                 route = ROUTE_LOCAL;\r
2322                                 break;\r
2323                         default:\r
2324                                 break;\r
2325                         }\r
2326                         break;\r
2327 \r
2328                 case IB_MCLASS_BM:\r
2329                         route = route_recv_gmp( p_mad_element );\r
2330                         break;\r
2331 \r
2332                 case IB_MCLASS_SUBN_ADM:\r
2333                 case IB_MCLASS_DEV_MGMT:\r
2334                 case IB_MCLASS_COMM_MGMT:\r
2335                 case IB_MCLASS_SNMP:\r
2336                         break;\r
2337 \r
2338                 default:\r
2339                         /* Route vendor specific MADs to the HCA provider. */\r
2340                         if( ib_class_is_vendor_specific(\r
2341                                 p_mad_element->p_mad_buf->mgmt_class ) )\r
2342                         {\r
2343                                 route = route_recv_gmp( p_mad_element );\r
2344                         }\r
2345                         break;\r
2346                 }\r
2347         }\r
2348 \r
2349         /* Route the MAD. */\r
2350         if( is_discard( route ) )\r
2351                 status = IB_ERROR;\r
2352         else if( is_dispatcher( route ) )\r
2353                 status = mad_disp_recv_done( p_spl_qp_svc->h_mad_disp, p_mad_element );\r
2354         else if( is_remote( route ) )\r
2355                 status = forward_sm_trap( p_spl_qp_svc, p_mad_element );\r
2356         else\r
2357                 status = recv_local_mad( p_spl_qp_svc, p_mad_element );\r
2358 \r
2359         AL_EXIT( AL_DBG_SMI );\r
2360         return status;\r
2361 }\r
2362 \r
2363 \r
2364 \r
2365 /*\r
2366  * Route a received SMP.\r
2367  */\r
2368 mad_route_t\r
2369 route_recv_smp(\r
2370         IN                              ib_mad_element_t*                       p_mad_element )\r
2371 {\r
2372         mad_route_t                             route;\r
2373 \r
2374         AL_ENTER( AL_DBG_SMI );\r
2375 \r
2376         CL_ASSERT( p_mad_element );\r
2377 \r
2378         /* Process the received SMP. */\r
2379         switch( p_mad_element->p_mad_buf->method )\r
2380         {\r
2381         case IB_MAD_METHOD_GET:\r
2382         case IB_MAD_METHOD_SET:\r
2383                 route = route_recv_smp_attr( p_mad_element );\r
2384                 break;\r
2385 \r
2386         case IB_MAD_METHOD_TRAP:\r
2387                 /*\r
2388                  * Special check to route locally generated traps to the remote SM.\r
2389                  * Distinguished from other receives by the p_wc->recv.ud.recv_opt\r
2390                  * IB_RECV_OPT_FORWARD flag.\r
2391                  *\r
2392                  * Note that because forwarded traps use AL MAD services, the upper\r
2393                  * 32-bits of the TID are reserved by the access layer.  When matching\r
2394                  * a Trap Repress MAD, the SMA must only use the lower 32-bits of the\r
2395                  * TID.\r
2396                  */\r
2397                 route = ( p_mad_element->recv_opt & IB_RECV_OPT_FORWARD ) ?\r
2398                         ROUTE_REMOTE : ROUTE_DISPATCHER;\r
2399                 break;\r
2400 \r
2401         case IB_MAD_METHOD_TRAP_REPRESS:\r
2402                 /*\r
2403                  * Note that because forwarded traps use AL MAD services, the upper\r
2404                  * 32-bits of the TID are reserved by the access layer.  When matching\r
2405                  * a Trap Repress MAD, the SMA must only use the lower 32-bits of the\r
2406                  * TID.\r
2407                  */\r
2408                 route = ROUTE_LOCAL;\r
2409                 break;\r
2410 \r
2411         default:\r
2412                 route = ROUTE_DISPATCHER;\r
2413                 break;\r
2414         }\r
2415 \r
2416         AL_EXIT( AL_DBG_SMI );\r
2417         return route;\r
2418 }\r
2419 \r
2420 \r
2421 \r
2422 /*\r
2423  * Route received SMP attributes.\r
2424  */\r
2425 mad_route_t\r
2426 route_recv_smp_attr(\r
2427         IN                              ib_mad_element_t*                       p_mad_element )\r
2428 {\r
2429         mad_route_t                             route;\r
2430 \r
2431         AL_ENTER( AL_DBG_SMI );\r
2432 \r
2433         CL_ASSERT( p_mad_element );\r
2434 \r
2435         /* Process the received SMP attributes. */\r
2436         switch( p_mad_element->p_mad_buf->attr_id )\r
2437         {\r
2438         case IB_MAD_ATTR_NODE_DESC:\r
2439         case IB_MAD_ATTR_NODE_INFO:\r
2440         case IB_MAD_ATTR_GUID_INFO:\r
2441         case IB_MAD_ATTR_PORT_INFO:\r
2442         case IB_MAD_ATTR_P_KEY_TABLE:\r
2443         case IB_MAD_ATTR_SLVL_TABLE:\r
2444         case IB_MAD_ATTR_VL_ARBITRATION:\r
2445         case IB_MAD_ATTR_VENDOR_DIAG:\r
2446         case IB_MAD_ATTR_LED_INFO:\r
2447         case IB_MAD_ATTR_SWITCH_INFO:\r
2448                 route = ROUTE_LOCAL;\r
2449                 break;\r
2450 \r
2451         default:\r
2452                 route = ROUTE_DISPATCHER;\r
2453                 break;\r
2454         }\r
2455 \r
2456         AL_EXIT( AL_DBG_SMI );\r
2457         return route;\r
2458 }\r
2459 \r
2460 \r
2461 /*\r
2462  * Route a received GMP.\r
2463  */\r
2464 mad_route_t\r
2465 route_recv_gmp(\r
2466         IN                              ib_mad_element_t*                       p_mad_element )\r
2467 {\r
2468         mad_route_t                             route;\r
2469 \r
2470         AL_ENTER( AL_DBG_SMI );\r
2471 \r
2472         CL_ASSERT( p_mad_element );\r
2473 \r
2474         /* Process the received GMP. */\r
2475         switch( p_mad_element->p_mad_buf->method )\r
2476         {\r
2477         case IB_MAD_METHOD_GET:\r
2478         case IB_MAD_METHOD_SET:\r
2479                 /* Route vendor specific MADs to the HCA provider. */\r
2480                 if( ib_class_is_vendor_specific(\r
2481                         p_mad_element->p_mad_buf->mgmt_class ) )\r
2482                 {\r
2483                         route = ROUTE_LOCAL;\r
2484                 }\r
2485                 else\r
2486                 {\r
2487                         route = route_recv_gmp_attr( p_mad_element );\r
2488                 }\r
2489                 break;\r
2490 \r
2491         default:\r
2492                 route = ROUTE_DISPATCHER;\r
2493                 break;\r
2494         }\r
2495 \r
2496         AL_EXIT( AL_DBG_SMI );\r
2497         return route;\r
2498 }\r
2499 \r
2500 \r
2501 \r
2502 /*\r
2503  * Route received GMP attributes.\r
2504  */\r
2505 mad_route_t\r
2506 route_recv_gmp_attr(\r
2507         IN                              ib_mad_element_t*                       p_mad_element )\r
2508 {\r
2509         mad_route_t                             route;\r
2510 \r
2511         AL_ENTER( AL_DBG_SMI );\r
2512 \r
2513         CL_ASSERT( p_mad_element );\r
2514 \r
2515         /* Process the received GMP attributes. */\r
2516         if( p_mad_element->p_mad_buf->attr_id == IB_MAD_ATTR_CLASS_PORT_INFO )\r
2517                 route = ROUTE_LOCAL;\r
2518         else\r
2519                 route = ROUTE_DISPATCHER;\r
2520 \r
2521         AL_EXIT( AL_DBG_SMI );\r
2522         return route;\r
2523 }\r
2524 \r
2525 \r
2526 \r
2527 /*\r
2528  * Forward a locally generated Subnet Management trap.\r
2529  */\r
2530 ib_api_status_t\r
2531 forward_sm_trap(\r
2532         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
2533         IN                              ib_mad_element_t*                       p_mad_element )\r
2534 {\r
2535         ib_av_attr_t                    av_attr;\r
2536         ib_api_status_t                 status;\r
2537 \r
2538         AL_ENTER( AL_DBG_SMI );\r
2539 \r
2540         CL_ASSERT( p_spl_qp_svc );\r
2541         CL_ASSERT( p_mad_element );\r
2542 \r
2543         /* Check the SMP class. */\r
2544         if( p_mad_element->p_mad_buf->mgmt_class != IB_MCLASS_SUBN_LID )\r
2545         {\r
2546                 /*\r
2547                  * Per IBA Specification Release 1.1 Section 14.2.2.1,\r
2548                  * "C14-5: Only a SM shall originate a directed route SMP."\r
2549                  * Therefore all traps should be LID routed; drop this one.\r
2550                  */\r
2551                 AL_EXIT( AL_DBG_SMI );\r
2552                 return IB_ERROR;\r
2553         }\r
2554 \r
2555         /* Create an address vector for the SM. */\r
2556         cl_memclr( &av_attr, sizeof( ib_av_attr_t ) );\r
2557         av_attr.port_num = p_spl_qp_svc->port_num;\r
2558         av_attr.sl = p_spl_qp_svc->sm_sl;\r
2559         av_attr.dlid = p_spl_qp_svc->sm_lid;\r
2560         av_attr.grh_valid = FALSE;\r
2561 \r
2562         status = ib_create_av( p_spl_qp_svc->h_qp->obj.p_ci_ca->h_pd_alias,\r
2563                 &av_attr, &p_mad_element->h_av );\r
2564 \r
2565         if( status != IB_SUCCESS )\r
2566         {\r
2567                 AL_EXIT( AL_DBG_SMI );\r
2568                 return status;\r
2569         }\r
2570 \r
2571         /* Complete the initialization of the MAD element. */\r
2572         p_mad_element->p_next = NULL;\r
2573         p_mad_element->remote_qkey = IB_QP_PRIVILEGED_Q_KEY;\r
2574         p_mad_element->resp_expected = FALSE;\r
2575 \r
2576         /* Clear context1 for proper send completion callback processing. */\r
2577         p_mad_element->context1 = NULL;\r
2578 \r
2579         /*\r
2580          * Forward the trap.  Note that because forwarded traps use AL MAD\r
2581          * services, the upper 32-bits of the TID are reserved by the access\r
2582          * layer.  When matching a Trap Repress MAD, the SMA must only use\r
2583          * the lower 32-bits of the TID.\r
2584          */\r
2585         status = ib_send_mad( p_spl_qp_svc->h_mad_svc, p_mad_element, NULL );\r
2586 \r
2587         if( status != IB_SUCCESS )\r
2588                 ib_destroy_av( p_mad_element->h_av );\r
2589 \r
2590         AL_EXIT( AL_DBG_SMI );\r
2591         return status;\r
2592 }\r
2593 \r
2594 \r
2595 /*\r
2596  * Process a locally routed MAD received from the special QP.\r
2597  */\r
2598 ib_api_status_t\r
2599 recv_local_mad(\r
2600         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
2601         IN                              ib_mad_element_t*                       p_mad_request )\r
2602 {\r
2603         ib_mad_t*                               p_mad_hdr;\r
2604         ib_api_status_t                 status;\r
2605 \r
2606         AL_ENTER( AL_DBG_SMI );\r
2607 \r
2608         CL_ASSERT( p_spl_qp_svc );\r
2609         CL_ASSERT( p_mad_request );\r
2610 \r
2611         /* Initialize the MAD element. */\r
2612         p_mad_hdr = ib_get_mad_buf( p_mad_request );\r
2613         p_mad_request->context1 = p_mad_request;\r
2614 \r
2615         /* Save the TID. */\r
2616         p_mad_request->context2 =\r
2617                 (void*)(uintn_t)al_get_al_tid( p_mad_hdr->trans_id );\r
2618 /*\r
2619  * Disable warning about passing unaligned 64-bit value.\r
2620  * The value is always aligned given how buffers are allocated\r
2621  * and given the layout of a MAD.\r
2622  */\r
2623 #pragma warning( push, 3 )\r
2624         al_set_al_tid( &p_mad_hdr->trans_id, 0 );\r
2625 #pragma warning( pop )\r
2626 \r
2627         /*\r
2628          * We need to get a response from the local HCA to this MAD only if this\r
2629          * MAD is not itself a response.\r
2630          */\r
2631         p_mad_request->resp_expected = !( ib_mad_is_response( p_mad_hdr ) ||\r
2632                 ( p_mad_hdr->method == IB_MAD_METHOD_TRAP_REPRESS ) );\r
2633         p_mad_request->timeout_ms = LOCAL_MAD_TIMEOUT;\r
2634         p_mad_request->send_opt = IB_SEND_OPT_LOCAL;\r
2635 \r
2636         /* Send the locally addressed MAD request to the CA for processing. */\r
2637         status = ib_send_mad( p_spl_qp_svc->h_mad_svc, p_mad_request, NULL );\r
2638 \r
2639         AL_EXIT( AL_DBG_SMI );\r
2640         return status;\r
2641 }\r
2642 \r
2643 \r
2644 \r
2645 /*\r
2646  * Special QP alias send completion callback.\r
2647  */\r
2648 void\r
2649 spl_qp_alias_send_cb(\r
2650         IN                              ib_mad_svc_handle_t                     h_mad_svc,\r
2651         IN                              void*                                           mad_svc_context,\r
2652         IN                              ib_mad_element_t*                       p_mad_element )\r
2653 {\r
2654         ib_api_status_t                 status;\r
2655 \r
2656         AL_ENTER( AL_DBG_SMI );\r
2657 \r
2658         UNUSED_PARAM( h_mad_svc );\r
2659         UNUSED_PARAM( mad_svc_context );\r
2660         CL_ASSERT( p_mad_element );\r
2661 \r
2662         if( p_mad_element->h_av )\r
2663         {\r
2664                 status = ib_destroy_av( p_mad_element->h_av );\r
2665                 CL_ASSERT( status == IB_SUCCESS );\r
2666         }\r
2667 \r
2668         status = ib_put_mad( p_mad_element );\r
2669         CL_ASSERT( status == IB_SUCCESS );\r
2670 \r
2671         AL_EXIT( AL_DBG_SMI );\r
2672 }\r
2673 \r
2674 \r
2675 \r
2676 /*\r
2677  * Special QP alias receive completion callback.\r
2678  */\r
2679 void\r
2680 spl_qp_alias_recv_cb(\r
2681         IN                              ib_mad_svc_handle_t                     h_mad_svc,\r
2682         IN                              void*                                           mad_svc_context,\r
2683         IN                              ib_mad_element_t*                       p_mad_response )\r
2684 {\r
2685         spl_qp_svc_t*                   p_spl_qp_svc;\r
2686         ib_mad_element_t*               p_mad_request;\r
2687         ib_mad_t*                               p_mad_hdr;\r
2688         ib_av_attr_t                    av_attr;\r
2689         ib_api_status_t                 status;\r
2690 \r
2691         AL_ENTER( AL_DBG_SMI );\r
2692 \r
2693         CL_ASSERT( mad_svc_context );\r
2694         CL_ASSERT( p_mad_response );\r
2695         CL_ASSERT( p_mad_response->send_context1 );\r
2696 \r
2697         /* Initialize pointers. */\r
2698         p_spl_qp_svc = mad_svc_context;\r
2699         p_mad_request = p_mad_response->send_context1;\r
2700         p_mad_hdr = ib_get_mad_buf( p_mad_response );\r
2701 \r
2702         /* Restore the TID, so it will match on the remote side. */\r
2703 #pragma warning( push, 3 )\r
2704         al_set_al_tid( &p_mad_hdr->trans_id,\r
2705                 (uint32_t)(uintn_t)p_mad_response->send_context2 );\r
2706 #pragma warning( pop )\r
2707 \r
2708         /* Set the remote QP. */\r
2709         p_mad_response->remote_qp       = p_mad_request->remote_qp;\r
2710         p_mad_response->remote_qkey = p_mad_request->remote_qkey;\r
2711 \r
2712         /* Prepare to create an address vector. */\r
2713         cl_memclr( &av_attr, sizeof( ib_av_attr_t ) );\r
2714         av_attr.port_num        = p_spl_qp_svc->port_num;\r
2715         av_attr.sl                      = p_mad_request->remote_sl;\r
2716         av_attr.static_rate = IB_PATH_RECORD_RATE_10_GBS;\r
2717         av_attr.path_bits       = p_mad_request->path_bits;\r
2718         if( p_mad_request->grh_valid )\r
2719         {\r
2720                 cl_memcpy( &av_attr.grh, p_mad_request->p_grh, sizeof( ib_grh_t ) );\r
2721                 av_attr.grh.src_gid      = p_mad_request->p_grh->dest_gid;\r
2722                 av_attr.grh.dest_gid = p_mad_request->p_grh->src_gid;\r
2723                 av_attr.grh_valid = TRUE;\r
2724         }\r
2725         if( ( p_mad_hdr->mgmt_class == IB_MCLASS_SUBN_DIR ) &&\r
2726                 ( ((ib_smp_t *)p_mad_hdr)->dr_dlid == IB_LID_PERMISSIVE ) )\r
2727                 av_attr.dlid = IB_LID_PERMISSIVE;\r
2728         else\r
2729                 av_attr.dlid = p_mad_request->remote_lid;\r
2730 \r
2731         /* Create an address vector. */\r
2732         status = ib_create_av( p_spl_qp_svc->h_qp->obj.p_ci_ca->h_pd_alias,\r
2733                 &av_attr, &p_mad_response->h_av );\r
2734 \r
2735         if( status != IB_SUCCESS )\r
2736         {\r
2737                 ib_put_mad( p_mad_response );\r
2738 \r
2739                 AL_EXIT( AL_DBG_SMI );\r
2740                 return;\r
2741         }\r
2742 \r
2743         /* Send the response. */\r
2744         status = ib_send_mad( h_mad_svc, p_mad_response, NULL );\r
2745 \r
2746         if( status != IB_SUCCESS )\r
2747         {\r
2748                 ib_destroy_av( p_mad_response->h_av );\r
2749                 ib_put_mad( p_mad_response );\r
2750         }\r
2751 \r
2752         AL_EXIT( AL_DBG_SMI );\r
2753 }\r
2754 \r
2755 \r
2756 \r
2757 /*\r
2758  * Post receive buffers to a special QP.\r
2759  */\r
2760 static ib_api_status_t\r
2761 spl_qp_svc_post_recvs(\r
2762         IN                              spl_qp_svc_t*   const           p_spl_qp_svc )\r
2763 {\r
2764         ib_mad_element_t*               p_mad_element;\r
2765         al_mad_element_t*               p_al_element;\r
2766         ib_recv_wr_t                    recv_wr;\r
2767         ib_api_status_t                 status = IB_SUCCESS;\r
2768 \r
2769         /* Attempt to post receive buffers up to the max_qp_depth limit. */\r
2770         while( cl_qlist_count( &p_spl_qp_svc->recv_queue ) <\r
2771                 (int32_t)p_spl_qp_svc->max_qp_depth )\r
2772         {\r
2773                 /* Get a MAD element from the pool. */\r
2774                 status = ib_get_mad( p_spl_qp_svc->obj.p_ci_ca->pool_key,\r
2775                         MAD_BLOCK_SIZE, &p_mad_element );\r
2776 \r
2777                 if( status != IB_SUCCESS ) break;\r
2778 \r
2779                 p_al_element = PARENT_STRUCT( p_mad_element, al_mad_element_t,\r
2780                         element );\r
2781 \r
2782                 /* Build the receive work request. */\r
2783                 recv_wr.p_next   = NULL;\r
2784                 recv_wr.wr_id    = (uintn_t)p_al_element;\r
2785                 recv_wr.num_ds = 1;\r
2786                 recv_wr.ds_array = &p_al_element->grh_ds;\r
2787 \r
2788                 /* Queue the receive on the service tracking list. */\r
2789                 cl_qlist_insert_tail( &p_spl_qp_svc->recv_queue,\r
2790                         &p_al_element->list_item );\r
2791 \r
2792                 /* Post the receive. */\r
2793                 status = ib_post_recv( p_spl_qp_svc->h_qp, &recv_wr, NULL );\r
2794 \r
2795                 if( status != IB_SUCCESS )\r
2796                 {\r
2797                         AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
2798                                 ("Failed to post receive %016I64x\n",\r
2799                                 (LONG_PTR)p_al_element) );\r
2800                         cl_qlist_remove_item( &p_spl_qp_svc->recv_queue,\r
2801                                 &p_al_element->list_item );\r
2802 \r
2803                         ib_put_mad( p_mad_element );\r
2804                         break;\r
2805                 }\r
2806         }\r
2807 \r
2808         return status;\r
2809 }\r
2810 \r
2811 \r
2812 \r
2813 /*\r
2814  * Special QP service asynchronous event callback.\r
2815  */\r
2816 void\r
2817 spl_qp_svc_event_cb(\r
2818         IN                              ib_async_event_rec_t            *p_event_rec )\r
2819 {\r
2820         spl_qp_svc_t*                   p_spl_qp_svc;\r
2821 \r
2822         AL_ENTER( AL_DBG_SMI );\r
2823 \r
2824         CL_ASSERT( p_event_rec );\r
2825         CL_ASSERT( p_event_rec->context );\r
2826 \r
2827         if( p_event_rec->code == IB_AE_SQ_DRAINED )\r
2828         {\r
2829                 AL_EXIT( AL_DBG_SMI );\r
2830                 return;\r
2831         }\r
2832 \r
2833         p_spl_qp_svc = p_event_rec->context;\r
2834 \r
2835         spl_qp_svc_reset( p_spl_qp_svc );\r
2836 \r
2837         AL_EXIT( AL_DBG_SMI );\r
2838 }\r
2839 \r
2840 \r
2841 \r
2842 /*\r
2843  * Special QP service reset.\r
2844  */\r
2845 void\r
2846 spl_qp_svc_reset(\r
2847         IN                              spl_qp_svc_t*                           p_spl_qp_svc )\r
2848 {\r
2849         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
2850 \r
2851         if( p_spl_qp_svc->state != SPL_QP_ACTIVE )\r
2852         {\r
2853                 cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2854                 return;\r
2855         }\r
2856 \r
2857         /* Change the special QP service to the error state. */\r
2858         p_spl_qp_svc->state = SPL_QP_ERROR;\r
2859 \r
2860         /* Flag the service as in use by the asynchronous processing thread. */\r
2861         cl_atomic_inc( &p_spl_qp_svc->in_use_cnt );\r
2862 \r
2863         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2864 \r
2865         /* Queue an asynchronous processing item to reset the special QP. */\r
2866         cl_async_proc_queue( gp_async_proc_mgr, &p_spl_qp_svc->reset_async );\r
2867 }\r
2868 \r
2869 \r
2870 \r
2871 /*\r
2872  * Asynchronous processing thread callback to reset the special QP service.\r
2873  */\r
2874 void\r
2875 spl_qp_svc_reset_cb(\r
2876         IN                              cl_async_proc_item_t*           p_item )\r
2877 {\r
2878         spl_qp_svc_t*                   p_spl_qp_svc;\r
2879         cl_list_item_t*                 p_list_item;\r
2880         ib_wc_t                                 wc;\r
2881         ib_wc_t*                                p_free_wc;\r
2882         ib_wc_t*                                p_done_wc;\r
2883         al_mad_wr_t*                    p_mad_wr;\r
2884         al_mad_element_t*               p_al_mad;\r
2885         ib_qp_mod_t                             qp_mod;\r
2886         ib_api_status_t                 status;\r
2887         cl_qlist_t                              mad_wr_list;\r
2888 \r
2889         AL_ENTER( AL_DBG_SMI );\r
2890 \r
2891         CL_ASSERT( p_item );\r
2892         p_spl_qp_svc = PARENT_STRUCT( p_item, spl_qp_svc_t, reset_async );\r
2893 \r
2894         /* Wait here until the special QP service is only in use by this thread. */\r
2895         while( p_spl_qp_svc->in_use_cnt != 1 )\r
2896         {\r
2897                 cl_thread_suspend( 0 );\r
2898         }\r
2899 \r
2900         /* Change the QP to the RESET state. */\r
2901         cl_memclr( &qp_mod, sizeof( ib_qp_mod_t ) );\r
2902         qp_mod.req_state = IB_QPS_RESET;\r
2903 \r
2904         status = ib_modify_qp( p_spl_qp_svc->h_qp, &qp_mod );\r
2905         CL_ASSERT( status == IB_SUCCESS );\r
2906 \r
2907         /* Return receive MAD elements to the pool. */\r
2908         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
2909         for( p_list_item = cl_qlist_remove_head( &p_spl_qp_svc->recv_queue );\r
2910                  p_list_item != cl_qlist_end( &p_spl_qp_svc->recv_queue );\r
2911                  p_list_item = cl_qlist_remove_head( &p_spl_qp_svc->recv_queue ) )\r
2912         {\r
2913                 p_al_mad = PARENT_STRUCT( p_list_item, al_mad_element_t, list_item );\r
2914 \r
2915                 status = ib_put_mad( &p_al_mad->element );\r
2916                 CL_ASSERT( status == IB_SUCCESS );\r
2917         }\r
2918         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2919 \r
2920         /* Re-initialize the QP. */\r
2921         status = ib_init_dgrm_svc( p_spl_qp_svc->h_qp, NULL );\r
2922         CL_ASSERT( status == IB_SUCCESS );\r
2923 \r
2924         /* Poll to remove any remaining send completions from the CQ. */\r
2925         do\r
2926         {\r
2927                 cl_memclr( &wc, sizeof( ib_wc_t ) );\r
2928                 p_free_wc = &wc;\r
2929                 status = ib_poll_cq( p_spl_qp_svc->h_send_cq, &p_free_wc, &p_done_wc );\r
2930 \r
2931         } while( status == IB_SUCCESS );\r
2932 \r
2933         /* Post receive buffers. */\r
2934         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
2935         spl_qp_svc_post_recvs( p_spl_qp_svc );\r
2936 \r
2937         /* Re-queue any outstanding MAD send operations. */\r
2938         cl_qlist_init( &mad_wr_list );\r
2939         cl_qlist_insert_list_tail( &mad_wr_list, &p_spl_qp_svc->send_queue );\r
2940         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2941 \r
2942         for( p_list_item = cl_qlist_remove_head( &mad_wr_list );\r
2943                  p_list_item != cl_qlist_end( &mad_wr_list );\r
2944                  p_list_item = cl_qlist_remove_head( &mad_wr_list ) )\r
2945         {\r
2946                 p_mad_wr = PARENT_STRUCT( p_list_item, al_mad_wr_t, list_item );\r
2947                 special_qp_queue_mad( p_spl_qp_svc->h_qp, p_mad_wr );\r
2948         }\r
2949 \r
2950         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
2951         if( p_spl_qp_svc->state == SPL_QP_ERROR )\r
2952         {\r
2953                 /* The QP is ready.  Change the state. */\r
2954                 p_spl_qp_svc->state = SPL_QP_ACTIVE;\r
2955                 cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2956 \r
2957                 /* Re-arm the CQs. */\r
2958                 status = ib_rearm_cq( p_spl_qp_svc->h_recv_cq, FALSE );\r
2959                 CL_ASSERT( status == IB_SUCCESS );\r
2960                 status = ib_rearm_cq( p_spl_qp_svc->h_send_cq, FALSE );\r
2961                 CL_ASSERT( status == IB_SUCCESS );\r
2962 \r
2963                 /* Resume send processing. */\r
2964                 special_qp_resume_sends( p_spl_qp_svc->h_qp );\r
2965         }\r
2966         else\r
2967         {\r
2968                 cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2969         }\r
2970 \r
2971         /* No longer in use by the asynchronous processing thread. */\r
2972         cl_atomic_dec( &p_spl_qp_svc->in_use_cnt );\r
2973 \r
2974         AL_EXIT( AL_DBG_SMI );\r
2975 }\r
2976 \r
2977 \r
2978 \r
2979 /*\r
2980  * Special QP alias asynchronous event callback.\r
2981  */\r
2982 void\r
2983 spl_qp_alias_event_cb(\r
2984         IN                              ib_async_event_rec_t            *p_event_rec )\r
2985 {\r
2986         UNUSED_PARAM( p_event_rec );\r
2987 }\r
2988 \r
2989 \r
2990 \r
2991 /*\r
2992  * Acquire the SMI dispatcher for the given port.\r
2993  */\r
2994 ib_api_status_t\r
2995 acquire_smi_disp(\r
2996         IN              const   ib_net64_t                                      port_guid,\r
2997                 OUT                     al_mad_disp_handle_t* const     ph_mad_disp )\r
2998 {\r
2999         CL_ASSERT( gp_spl_qp_mgr );\r
3000         return acquire_svc_disp( &gp_spl_qp_mgr->smi_map, port_guid, ph_mad_disp );\r
3001 }\r
3002 \r
3003 \r
3004 \r
3005 /*\r
3006  * Acquire the GSI dispatcher for the given port.\r
3007  */\r
3008 ib_api_status_t\r
3009 acquire_gsi_disp(\r
3010         IN              const   ib_net64_t                                      port_guid,\r
3011                 OUT                     al_mad_disp_handle_t* const     ph_mad_disp )\r
3012 {\r
3013         CL_ASSERT( gp_spl_qp_mgr );\r
3014         return acquire_svc_disp( &gp_spl_qp_mgr->gsi_map, port_guid, ph_mad_disp );\r
3015 }\r
3016 \r
3017 \r
3018 \r
3019 /*\r
3020  * Acquire the service dispatcher for the given port.\r
3021  */\r
3022 ib_api_status_t\r
3023 acquire_svc_disp(\r
3024         IN              const   cl_qmap_t* const                        p_svc_map,\r
3025         IN              const   ib_net64_t                                      port_guid,\r
3026                 OUT                     al_mad_disp_handle_t            *ph_mad_disp )\r
3027 {\r
3028         cl_map_item_t*                  p_svc_item;\r
3029         spl_qp_svc_t*                   p_spl_qp_svc;\r
3030 \r
3031         AL_ENTER( AL_DBG_SMI );\r
3032 \r
3033         CL_ASSERT( p_svc_map );\r
3034         CL_ASSERT( gp_spl_qp_mgr );\r
3035 \r
3036         /* Search for the SMI or GSI service for the given port. */\r
3037         cl_spinlock_acquire( &gp_spl_qp_mgr->obj.lock );\r
3038         p_svc_item = cl_qmap_get( p_svc_map, port_guid );\r
3039         cl_spinlock_release( &gp_spl_qp_mgr->obj.lock );\r
3040         if( p_svc_item == cl_qmap_end( p_svc_map ) )\r
3041         {\r
3042                 /* The port does not have an active agent. */\r
3043                 AL_EXIT( AL_DBG_SMI );\r
3044                 return IB_INVALID_GUID;\r
3045         }\r
3046 \r
3047         p_spl_qp_svc = PARENT_STRUCT( p_svc_item, spl_qp_svc_t, map_item );\r
3048 \r
3049         /* Found a match.  Get MAD dispatcher handle. */\r
3050         *ph_mad_disp = p_spl_qp_svc->h_mad_disp;\r
3051 \r
3052         /* Reference the MAD dispatcher on behalf of the client. */\r
3053         ref_al_obj( &p_spl_qp_svc->h_mad_disp->obj );\r
3054 \r
3055         AL_EXIT( AL_DBG_SMI );\r
3056         return IB_SUCCESS;\r
3057 }\r
3058 \r
3059 \r
3060 \r
3061 /*\r
3062  * Force a poll for CA attribute changes.\r
3063  */\r
3064 void\r
3065 force_smi_poll(\r
3066         void )\r
3067 {\r
3068         AL_ENTER( AL_DBG_SMI );\r
3069 \r
3070         /*\r
3071          * Stop the poll timer.  Just invoke the timer callback directly to\r
3072          * save the thread context switching.\r
3073          */\r
3074         smi_poll_timer_cb( gp_spl_qp_mgr );\r
3075 \r
3076         AL_EXIT( AL_DBG_SMI );\r
3077 }\r
3078 \r
3079 \r
3080 \r
3081 /*\r
3082  * Poll for CA port attribute changes.\r
3083  */\r
3084 void\r
3085 smi_poll_timer_cb(\r
3086         IN                              void*                                           context )\r
3087 {\r
3088         cl_status_t                     cl_status;\r
3089 \r
3090         AL_ENTER( AL_DBG_SMI );\r
3091 \r
3092         CL_ASSERT( context );\r
3093         CL_ASSERT( gp_spl_qp_mgr == context );\r
3094         UNUSED_PARAM( context );\r
3095 \r
3096         /*\r
3097          * Scan for changes on the local HCAs.  Since the PnP manager has its\r
3098          * own thread for processing changes, we kick off that thread in parallel\r
3099          * reposting receive buffers to the SQP agents.\r
3100          */\r
3101         pnp_poll();\r
3102 \r
3103         /*\r
3104          * To handle the case where force_smi_poll is called at the same time\r
3105          * the timer expires, check if the asynchronous processing item is in\r
3106          * use.  If it is already in use, it means that we're about to poll\r
3107          * anyway, so just ignore this call.\r
3108          */\r
3109         cl_spinlock_acquire( &gp_spl_qp_mgr->obj.lock );\r
3110 \r
3111         /* Perform port processing on the special QP agents. */\r
3112         cl_qlist_apply_func( &gp_spl_qp_mgr->obj.obj_list, smi_post_recvs,\r
3113                 gp_spl_qp_mgr );\r
3114 \r
3115         /* Determine if there are any special QP agents to poll. */\r
3116         if( !cl_is_qlist_empty( &gp_spl_qp_mgr->obj.obj_list ) && g_smi_poll_interval )\r
3117         {\r
3118                 /* Restart the polling timer. */\r
3119                 cl_status =\r
3120                         cl_timer_start( &gp_spl_qp_mgr->poll_timer, g_smi_poll_interval );\r
3121                 CL_ASSERT( cl_status == CL_SUCCESS );\r
3122         }\r
3123         cl_spinlock_release( &gp_spl_qp_mgr->obj.lock );\r
3124 \r
3125         AL_EXIT( AL_DBG_SMI );\r
3126 }\r
3127 \r
3128 \r
3129 \r
3130 /*\r
3131  * Post receive buffers to a special QP.\r
3132  */\r
3133 void\r
3134 smi_post_recvs(\r
3135         IN                              cl_list_item_t* const           p_list_item,\r
3136         IN                              void*                                           context )\r
3137 {\r
3138         al_obj_t*                               p_obj;\r
3139         spl_qp_svc_t*                   p_spl_qp_svc;\r
3140 \r
3141         AL_ENTER( AL_DBG_SMI );\r
3142 \r
3143         CL_ASSERT( p_list_item );\r
3144         UNUSED_PARAM( context );\r
3145 \r
3146         p_obj = PARENT_STRUCT( p_list_item, al_obj_t, pool_item );\r
3147         p_spl_qp_svc = PARENT_STRUCT( p_obj, spl_qp_svc_t, obj );\r
3148 \r
3149         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
3150         if( p_spl_qp_svc->state != SPL_QP_ACTIVE )\r
3151         {\r
3152                 cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
3153                 return;\r
3154         }\r
3155 \r
3156         spl_qp_svc_post_recvs( p_spl_qp_svc );\r
3157         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
3158 \r
3159         AL_EXIT( AL_DBG_SMI );\r
3160 }\r