7f5abee90ba9ed3c778e57032dd5e531ffa993ab
[mirror/winof/.git] / core / al / kernel / al_smi.c
1 /*\r
2  * Copyright (c) 2005 SilverStorm Technologies.  All rights reserved.\r
3  * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. \r
4  * Copyright (c) 2006 Voltaire Corporation.  All rights reserved.\r
5  * Portions Copyright (c) 2008 Microsoft Corporation.  All rights reserved.\r
6  *\r
7  * This software is available to you under the OpenIB.org BSD license\r
8  * below:\r
9  *\r
10  *     Redistribution and use in source and binary forms, with or\r
11  *     without modification, are permitted provided that the following\r
12  *     conditions are met:\r
13  *\r
14  *      - Redistributions of source code must retain the above\r
15  *        copyright notice, this list of conditions and the following\r
16  *        disclaimer.\r
17  *\r
18  *      - Redistributions in binary form must reproduce the above\r
19  *        copyright notice, this list of conditions and the following\r
20  *        disclaimer in the documentation and/or other materials\r
21  *        provided with the distribution.\r
22  *\r
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
30  * SOFTWARE.\r
31  *\r
32  * $Id$\r
33  */\r
34 \r
35 \r
36 #include <iba/ib_al.h>\r
37 #include <complib/cl_timer.h>\r
38 \r
39 #include "ib_common.h"\r
40 #include "al_common.h"\r
41 #include "al_debug.h"\r
42 #if defined(EVENT_TRACING)\r
43 #ifdef offsetof\r
44 #undef offsetof\r
45 #endif\r
46 #include "al_smi.tmh"\r
47 #endif\r
48 #include "al_verbs.h"\r
49 #include "al_mgr.h"\r
50 #include "al_pnp.h"\r
51 #include "al_qp.h"\r
52 #include "al_smi.h"\r
53 #include "al_av.h"\r
54 \r
55 \r
56 extern char                                             node_desc[IB_NODE_DESCRIPTION_SIZE];\r
57 \r
58 #define SMI_POLL_INTERVAL                       20000           /* Milliseconds */\r
59 #define LOCAL_MAD_TIMEOUT                       50                      /* Milliseconds */\r
60 #define DEFAULT_QP0_DEPTH                       256\r
61 #define DEFAULT_QP1_DEPTH                       1024\r
62 \r
63 uint32_t                                g_smi_poll_interval =   SMI_POLL_INTERVAL;\r
64 spl_qp_mgr_t*                   gp_spl_qp_mgr = NULL;\r
65 \r
66 \r
67 /*\r
68  * Function prototypes.\r
69  */\r
70 void\r
71 destroying_spl_qp_mgr(\r
72         IN                              al_obj_t*                                       p_obj );\r
73 \r
74 void\r
75 free_spl_qp_mgr(\r
76         IN                              al_obj_t*                                       p_obj );\r
77 \r
78 ib_api_status_t\r
79 spl_qp0_agent_pnp_cb(\r
80         IN                              ib_pnp_rec_t*                           p_pnp_rec );\r
81 \r
82 ib_api_status_t\r
83 spl_qp1_agent_pnp_cb(\r
84         IN                              ib_pnp_rec_t*                           p_pnp_rec );\r
85 \r
86 ib_api_status_t\r
87 spl_qp_agent_pnp(\r
88         IN                              ib_pnp_rec_t*                           p_pnp_rec,\r
89         IN                              ib_qp_type_t                            qp_type );\r
90 \r
91 ib_api_status_t\r
92 create_spl_qp_svc(\r
93         IN                              ib_pnp_port_rec_t*                      p_pnp_rec,\r
94         IN              const   ib_qp_type_t                            qp_type );\r
95 \r
96 void\r
97 destroying_spl_qp_svc(\r
98         IN                              al_obj_t*                                       p_obj );\r
99 \r
100 void\r
101 free_spl_qp_svc(\r
102         IN                              al_obj_t*                                       p_obj );\r
103 \r
104 void\r
105 spl_qp_svc_lid_change(\r
106         IN                              al_obj_t*                                       p_obj,\r
107         IN                              ib_pnp_port_rec_t*                      p_pnp_rec );\r
108 \r
109 ib_api_status_t\r
110 remote_mad_send(\r
111         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
112         IN                              al_mad_wr_t* const                      p_mad_wr );\r
113 \r
114 static ib_api_status_t\r
115 local_mad_send(\r
116         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
117         IN                              al_mad_wr_t* const                      p_mad_wr );\r
118 \r
119 static ib_api_status_t\r
120 loopback_mad(\r
121         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
122         IN                              al_mad_wr_t* const                      p_mad_wr );\r
123 \r
124 static ib_api_status_t\r
125 __process_subn_mad(\r
126         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
127         IN                              al_mad_wr_t* const                      p_mad_wr );\r
128 \r
129 static ib_api_status_t\r
130 fwd_local_mad(\r
131         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
132         IN                              al_mad_wr_t* const                      p_mad_wr );\r
133 \r
134 void\r
135 send_local_mad_cb(\r
136         IN                              cl_async_proc_item_t*           p_item );\r
137 \r
138 void\r
139 spl_qp_send_comp_cb(\r
140         IN              const   ib_cq_handle_t                          h_cq,\r
141         IN                              void                                            *cq_context );\r
142 \r
143 void\r
144 spl_qp_send_dpc_cb(\r
145     IN              KDPC                        *p_dpc,\r
146     IN              void                        *context,\r
147     IN              void                        *arg1,\r
148     IN              void                        *arg2\r
149     );\r
150 \r
151 void\r
152 spl_qp_recv_dpc_cb(\r
153     IN              KDPC                        *p_dpc,\r
154     IN              void                        *context,\r
155     IN              void                        *arg1,\r
156     IN              void                        *arg2\r
157     );\r
158 \r
159 void\r
160 spl_qp_recv_comp_cb(\r
161         IN              const   ib_cq_handle_t                          h_cq,\r
162         IN                              void                                            *cq_context );\r
163 \r
164 void\r
165 spl_qp_comp(\r
166         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
167         IN              const   ib_cq_handle_t                          h_cq,\r
168         IN                              ib_wc_type_t                            wc_type );\r
169 \r
170 ib_api_status_t\r
171 process_mad_recv(\r
172         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
173         IN                              ib_mad_element_t*                       p_mad_element );\r
174 \r
175 static mad_route_t\r
176 route_recv_smp(\r
177         IN                              ib_mad_element_t*                       p_mad_element );\r
178 \r
179 static mad_route_t\r
180 route_recv_smp_attr(\r
181         IN                              ib_mad_element_t*                       p_mad_element );\r
182 \r
183 mad_route_t\r
184 route_recv_dm_mad(\r
185         IN                              ib_mad_element_t*                       p_mad_element );\r
186 \r
187 static mad_route_t\r
188 route_recv_bm(\r
189         IN                              ib_mad_element_t*                       p_mad_element );\r
190 \r
191 static mad_route_t\r
192 route_recv_perf(\r
193         IN                              ib_mad_element_t*                       p_mad_element );\r
194 \r
195 ib_api_status_t\r
196 forward_sm_trap(\r
197         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
198         IN                              ib_mad_element_t*                       p_mad_element );\r
199 \r
200 ib_api_status_t\r
201 recv_local_mad(\r
202         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
203         IN                              ib_mad_element_t*                       p_mad_request );\r
204 \r
205 void\r
206 spl_qp_alias_send_cb(\r
207         IN                              ib_mad_svc_handle_t                     h_mad_svc,\r
208         IN                              void                                            *mad_svc_context,\r
209         IN                              ib_mad_element_t                        *p_mad_element );\r
210 \r
211 void\r
212 spl_qp_alias_recv_cb(\r
213         IN                              ib_mad_svc_handle_t                     h_mad_svc,\r
214         IN                              void                                            *mad_svc_context,\r
215         IN                              ib_mad_element_t                        *p_mad_response );\r
216 \r
217 static ib_api_status_t\r
218 spl_qp_svc_post_recvs(\r
219         IN                              spl_qp_svc_t*   const           p_spl_qp_svc );\r
220 \r
221 void\r
222 spl_qp_svc_event_cb(\r
223         IN                              ib_async_event_rec_t            *p_event_rec );\r
224 \r
225 void\r
226 spl_qp_alias_event_cb(\r
227         IN                              ib_async_event_rec_t            *p_event_rec );\r
228 \r
229 void\r
230 spl_qp_svc_reset(\r
231         IN                              spl_qp_svc_t*                           p_spl_qp_svc );\r
232 \r
233 void\r
234 spl_qp_svc_reset_cb(\r
235         IN                              cl_async_proc_item_t*           p_item );\r
236 \r
237 ib_api_status_t\r
238 acquire_svc_disp(\r
239         IN              const   cl_qmap_t* const                        p_svc_map,\r
240         IN              const   ib_net64_t                                      port_guid,\r
241                 OUT                     al_mad_disp_handle_t            *ph_mad_disp );\r
242 \r
243 void\r
244 smi_poll_timer_cb(\r
245         IN                              void*                                           context );\r
246 \r
247 void\r
248 smi_post_recvs(\r
249         IN                              cl_list_item_t* const           p_list_item,\r
250         IN                              void*                                           context );\r
251 \r
252 #if defined( CL_USE_MUTEX )\r
253 void\r
254 spl_qp_send_async_cb(\r
255         IN                              cl_async_proc_item_t*           p_item );\r
256 \r
257 void\r
258 spl_qp_recv_async_cb(\r
259         IN                              cl_async_proc_item_t*           p_item );\r
260 #endif\r
261 \r
262 /*\r
263  * Create the special QP manager.\r
264  */\r
265 ib_api_status_t\r
266 create_spl_qp_mgr(\r
267         IN                              al_obj_t*       const                   p_parent_obj )\r
268 {\r
269         ib_pnp_req_t                    pnp_req;\r
270         ib_api_status_t                 status;\r
271         cl_status_t                             cl_status;\r
272 \r
273         AL_ENTER( AL_DBG_SMI );\r
274 \r
275         CL_ASSERT( p_parent_obj );\r
276         CL_ASSERT( !gp_spl_qp_mgr );\r
277 \r
278         gp_spl_qp_mgr = cl_zalloc( sizeof( spl_qp_mgr_t ) );\r
279         if( !gp_spl_qp_mgr )\r
280         {\r
281                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
282                         ("IB_INSUFFICIENT_MEMORY\n") );\r
283                 return IB_INSUFFICIENT_MEMORY;\r
284         }\r
285 \r
286         /* Construct the special QP manager. */\r
287         construct_al_obj( &gp_spl_qp_mgr->obj, AL_OBJ_TYPE_SMI );\r
288         cl_timer_construct( &gp_spl_qp_mgr->poll_timer );\r
289 \r
290         /* Initialize the lists. */\r
291         cl_qmap_init( &gp_spl_qp_mgr->smi_map );\r
292         cl_qmap_init( &gp_spl_qp_mgr->gsi_map );\r
293 \r
294         /* Initialize the global SMI/GSI manager object. */\r
295         status = init_al_obj( &gp_spl_qp_mgr->obj, gp_spl_qp_mgr, TRUE,\r
296                 destroying_spl_qp_mgr, NULL, free_spl_qp_mgr );\r
297         if( status != IB_SUCCESS )\r
298         {\r
299                 free_spl_qp_mgr( &gp_spl_qp_mgr->obj );\r
300                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
301                         ("init_al_obj failed, %s\n", ib_get_err_str( status ) ) );\r
302                 return status;\r
303         }\r
304 \r
305         /* Attach the special QP manager to the parent object. */\r
306         status = attach_al_obj( p_parent_obj, &gp_spl_qp_mgr->obj );\r
307         if( status != IB_SUCCESS )\r
308         {\r
309                 gp_spl_qp_mgr->obj.pfn_destroy( &gp_spl_qp_mgr->obj, NULL );\r
310                 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
311                         ("attach_al_obj returned %s.\n", ib_get_err_str(status)) );\r
312                 return status;\r
313         }\r
314 \r
315         /* Initialize the SMI polling timer. */\r
316         cl_status = cl_timer_init( &gp_spl_qp_mgr->poll_timer, smi_poll_timer_cb,\r
317                 gp_spl_qp_mgr );\r
318         if( cl_status != CL_SUCCESS )\r
319         {\r
320                 gp_spl_qp_mgr->obj.pfn_destroy( &gp_spl_qp_mgr->obj, NULL );\r
321                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
322                         ("cl_timer_init failed, status 0x%x\n", cl_status ) );\r
323                 return ib_convert_cl_status( cl_status );\r
324         }\r
325 \r
326         /*\r
327          * Note: PnP registrations for port events must be done\r
328          * when the special QP manager is created.  This ensures that\r
329          * the registrations are listed sequentially and the reporting\r
330          * of PnP events occurs in the proper order.\r
331          */\r
332 \r
333         /*\r
334          * Separate context is needed for each special QP.  Therefore, a\r
335          * separate PnP event registration is performed for QP0 and QP1.\r
336          */\r
337 \r
338         /* Register for port PnP events for QP0. */\r
339         cl_memclr( &pnp_req, sizeof( ib_pnp_req_t ) );\r
340         pnp_req.pnp_class       = IB_PNP_PORT;\r
341         pnp_req.pnp_context = &gp_spl_qp_mgr->obj;\r
342         pnp_req.pfn_pnp_cb      = spl_qp0_agent_pnp_cb;\r
343 \r
344         status = ib_reg_pnp( gh_al, &pnp_req, &gp_spl_qp_mgr->h_qp0_pnp );\r
345 \r
346         if( status != IB_SUCCESS )\r
347         {\r
348                 gp_spl_qp_mgr->obj.pfn_destroy( &gp_spl_qp_mgr->obj, NULL );\r
349                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
350                         ("ib_reg_pnp QP0 failed, %s\n", ib_get_err_str( status ) ) );\r
351                 return status;\r
352         }\r
353 \r
354         /* Reference the special QP manager on behalf of the ib_reg_pnp call. */\r
355         ref_al_obj( &gp_spl_qp_mgr->obj );\r
356 \r
357         /* Register for port PnP events for QP1. */\r
358         cl_memclr( &pnp_req, sizeof( ib_pnp_req_t ) );\r
359         pnp_req.pnp_class       = IB_PNP_PORT;\r
360         pnp_req.pnp_context = &gp_spl_qp_mgr->obj;\r
361         pnp_req.pfn_pnp_cb      = spl_qp1_agent_pnp_cb;\r
362 \r
363         status = ib_reg_pnp( gh_al, &pnp_req, &gp_spl_qp_mgr->h_qp1_pnp );\r
364 \r
365         if( status != IB_SUCCESS )\r
366         {\r
367                 gp_spl_qp_mgr->obj.pfn_destroy( &gp_spl_qp_mgr->obj, NULL );\r
368                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
369                         ("ib_reg_pnp QP1 failed, %s\n", ib_get_err_str( status ) ) );\r
370                 return status;\r
371         }\r
372 \r
373         /*\r
374          * Note that we don't release the referende taken in init_al_obj\r
375          * because we need one on behalf of the ib_reg_pnp call.\r
376          */\r
377 \r
378         AL_EXIT( AL_DBG_SMI );\r
379         return IB_SUCCESS;\r
380 }\r
381 \r
382 \r
383 \r
384 /*\r
385  * Pre-destroy the special QP manager.\r
386  */\r
387 void\r
388 destroying_spl_qp_mgr(\r
389         IN                              al_obj_t*                                       p_obj )\r
390 {\r
391         ib_api_status_t                 status;\r
392 \r
393         CL_ASSERT( p_obj );\r
394         CL_ASSERT( gp_spl_qp_mgr == PARENT_STRUCT( p_obj, spl_qp_mgr_t, obj ) );\r
395         UNUSED_PARAM( p_obj );\r
396 \r
397         /* Deregister for port PnP events for QP0. */\r
398         if( gp_spl_qp_mgr->h_qp0_pnp )\r
399         {\r
400                 status = ib_dereg_pnp( gp_spl_qp_mgr->h_qp0_pnp,\r
401                         (ib_pfn_destroy_cb_t)deref_al_obj );\r
402                 CL_ASSERT( status == IB_SUCCESS );\r
403         }\r
404 \r
405         /* Deregister for port PnP events for QP1. */\r
406         if( gp_spl_qp_mgr->h_qp1_pnp )\r
407         {\r
408                 status = ib_dereg_pnp( gp_spl_qp_mgr->h_qp1_pnp,\r
409                         (ib_pfn_destroy_cb_t)deref_al_obj );\r
410                 CL_ASSERT( status == IB_SUCCESS );\r
411         }\r
412 \r
413         /* Destroy the SMI polling timer. */\r
414         cl_timer_destroy( &gp_spl_qp_mgr->poll_timer );\r
415 }\r
416 \r
417 \r
418 \r
419 /*\r
420  * Free the special QP manager.\r
421  */\r
422 void\r
423 free_spl_qp_mgr(\r
424         IN                              al_obj_t*                                       p_obj )\r
425 {\r
426         CL_ASSERT( p_obj );\r
427         CL_ASSERT( gp_spl_qp_mgr == PARENT_STRUCT( p_obj, spl_qp_mgr_t, obj ) );\r
428         UNUSED_PARAM( p_obj );\r
429 \r
430         destroy_al_obj( &gp_spl_qp_mgr->obj );\r
431         cl_free( gp_spl_qp_mgr );\r
432         gp_spl_qp_mgr = NULL;\r
433 }\r
434 \r
435 \r
436 \r
437 /*\r
438  * Special QP0 agent PnP event callback.\r
439  */\r
440 ib_api_status_t\r
441 spl_qp0_agent_pnp_cb(\r
442         IN                              ib_pnp_rec_t*                           p_pnp_rec )\r
443 {\r
444         ib_api_status_t status;\r
445         AL_ENTER( AL_DBG_SMI );\r
446 \r
447         status = spl_qp_agent_pnp( p_pnp_rec, IB_QPT_QP0 );\r
448 \r
449         AL_EXIT( AL_DBG_SMI );\r
450         return status;\r
451 }\r
452 \r
453 \r
454 \r
455 /*\r
456  * Special QP1 agent PnP event callback.\r
457  */\r
458 ib_api_status_t\r
459 spl_qp1_agent_pnp_cb(\r
460         IN                              ib_pnp_rec_t*                           p_pnp_rec )\r
461 {\r
462         ib_api_status_t status;\r
463         AL_ENTER( AL_DBG_SMI );\r
464 \r
465         status = spl_qp_agent_pnp( p_pnp_rec, IB_QPT_QP1 );\r
466 \r
467         AL_EXIT( AL_DBG_SMI );\r
468         return status;\r
469 }\r
470 \r
471 \r
472 \r
473 /*\r
474  * Special QP agent PnP event callback.\r
475  */\r
476 ib_api_status_t\r
477 spl_qp_agent_pnp(\r
478         IN                              ib_pnp_rec_t*                           p_pnp_rec,\r
479         IN                              ib_qp_type_t                            qp_type )\r
480 {\r
481         ib_api_status_t                 status;\r
482         al_obj_t*                               p_obj;\r
483 \r
484         AL_ENTER( AL_DBG_SMI );\r
485 \r
486         CL_ASSERT( p_pnp_rec );\r
487         p_obj = p_pnp_rec->context;\r
488 \r
489         AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_SMI,\r
490                 ("p_pnp_rec->pnp_event = 0x%x (%s)\n",\r
491                 p_pnp_rec->pnp_event, ib_get_pnp_event_str( p_pnp_rec->pnp_event )) );\r
492         /* Dispatch based on the PnP event type. */\r
493         switch( p_pnp_rec->pnp_event )\r
494         {\r
495         case IB_PNP_PORT_ADD:\r
496                 CL_ASSERT( !p_obj );\r
497                 status = create_spl_qp_svc( (ib_pnp_port_rec_t*)p_pnp_rec, qp_type );\r
498                 break;\r
499 \r
500         case IB_PNP_PORT_REMOVE:\r
501                 CL_ASSERT( p_obj );\r
502                 ref_al_obj( p_obj );\r
503                 p_obj->pfn_destroy( p_obj, NULL );\r
504                 status = IB_SUCCESS;\r
505                 break;\r
506 \r
507         case IB_PNP_LID_CHANGE:\r
508                 CL_ASSERT( p_obj );\r
509                 spl_qp_svc_lid_change( p_obj, (ib_pnp_port_rec_t*)p_pnp_rec );\r
510                 status = IB_SUCCESS;\r
511                 break;\r
512 \r
513         default:\r
514                 /* All other events are ignored. */\r
515                 status = IB_SUCCESS;\r
516                 break;\r
517         }\r
518 \r
519         AL_EXIT( AL_DBG_SMI );\r
520         return status;\r
521 }\r
522 \r
523 \r
524 \r
525 /*\r
526  * Create a special QP service.\r
527  */\r
528 ib_api_status_t\r
529 create_spl_qp_svc(\r
530         IN                              ib_pnp_port_rec_t*                      p_pnp_rec,\r
531         IN              const   ib_qp_type_t                            qp_type )\r
532 {\r
533         cl_status_t                             cl_status;\r
534         spl_qp_svc_t*                   p_spl_qp_svc;\r
535         ib_ca_handle_t                  h_ca;\r
536         ib_cq_create_t                  cq_create;\r
537         ib_qp_create_t                  qp_create;\r
538         ib_qp_attr_t                    qp_attr;\r
539         ib_mad_svc_t                    mad_svc;\r
540         ib_api_status_t                 status;\r
541 \r
542         AL_ENTER( AL_DBG_SMI );\r
543 \r
544         CL_ASSERT( p_pnp_rec );\r
545 \r
546         if( ( qp_type != IB_QPT_QP0 ) && ( qp_type != IB_QPT_QP1 ) )\r
547         {\r
548                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") );\r
549                 return IB_INVALID_PARAMETER;\r
550         }\r
551 \r
552         CL_ASSERT( p_pnp_rec->pnp_rec.pnp_context );\r
553         CL_ASSERT( p_pnp_rec->p_ca_attr );\r
554         CL_ASSERT( p_pnp_rec->p_port_attr );\r
555 \r
556         p_spl_qp_svc = cl_zalloc( sizeof( spl_qp_svc_t ) );\r
557         if( !p_spl_qp_svc )\r
558         {\r
559                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
560                         ("IB_INSUFFICIENT_MEMORY\n") );\r
561                 return IB_INSUFFICIENT_MEMORY;\r
562         }\r
563 \r
564         /* Tie the special QP service to the port by setting the port number. */\r
565         p_spl_qp_svc->port_num = p_pnp_rec->p_port_attr->port_num;\r
566         /* Store the port GUID to allow faster lookups of the dispatchers. */\r
567         p_spl_qp_svc->port_guid = p_pnp_rec->p_port_attr->port_guid;\r
568 \r
569         /* Initialize the send and receive queues. */\r
570         cl_qlist_init( &p_spl_qp_svc->send_queue );\r
571         cl_qlist_init( &p_spl_qp_svc->recv_queue );\r
572         cl_spinlock_init(&p_spl_qp_svc->cache_lock);\r
573 \r
574     /* Initialize the DPCs. */\r
575     KeInitializeDpc( &p_spl_qp_svc->send_dpc, spl_qp_send_dpc_cb, p_spl_qp_svc );\r
576     KeInitializeDpc( &p_spl_qp_svc->recv_dpc, spl_qp_recv_dpc_cb, p_spl_qp_svc );\r
577 \r
578     if( qp_type == IB_QPT_QP0 )\r
579     {\r
580         KeSetImportanceDpc( &p_spl_qp_svc->send_dpc, HighImportance );\r
581         KeSetImportanceDpc( &p_spl_qp_svc->recv_dpc, HighImportance );\r
582     }\r
583 \r
584 #if defined( CL_USE_MUTEX )\r
585         /* Initialize async callbacks and flags for send/receive processing. */\r
586         p_spl_qp_svc->send_async_queued = FALSE;\r
587         p_spl_qp_svc->send_async_cb.pfn_callback = spl_qp_send_async_cb;\r
588         p_spl_qp_svc->recv_async_queued = FALSE;\r
589         p_spl_qp_svc->recv_async_cb.pfn_callback = spl_qp_recv_async_cb;\r
590 #endif\r
591 \r
592         /* Initialize the async callback function to process local sends. */\r
593         p_spl_qp_svc->send_async.pfn_callback = send_local_mad_cb;\r
594 \r
595         /* Initialize the async callback function to reset the QP on error. */\r
596         p_spl_qp_svc->reset_async.pfn_callback = spl_qp_svc_reset_cb;\r
597 \r
598         /* Construct the special QP service object. */\r
599         construct_al_obj( &p_spl_qp_svc->obj, AL_OBJ_TYPE_SMI );\r
600 \r
601         /* Initialize the special QP service object. */\r
602         status = init_al_obj( &p_spl_qp_svc->obj, p_spl_qp_svc, TRUE,\r
603                 destroying_spl_qp_svc, NULL, free_spl_qp_svc );\r
604         if( status != IB_SUCCESS )\r
605         {\r
606                 free_spl_qp_svc( &p_spl_qp_svc->obj );\r
607                 return status;\r
608         }\r
609 \r
610         /* Attach the special QP service to the parent object. */\r
611         status = attach_al_obj(\r
612                 (al_obj_t*)p_pnp_rec->pnp_rec.pnp_context, &p_spl_qp_svc->obj );\r
613         if( status != IB_SUCCESS )\r
614         {\r
615                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
616                 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
617                         ("attach_al_obj returned %s.\n", ib_get_err_str(status)) );\r
618                 return status;\r
619         }\r
620 \r
621         h_ca = acquire_ca( p_pnp_rec->p_ca_attr->ca_guid );\r
622         CL_ASSERT( h_ca );\r
623         if( !h_ca )\r
624         {\r
625                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
626                 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("acquire_ca failed.\n") );\r
627                 return IB_INVALID_GUID;\r
628         }\r
629 \r
630         p_spl_qp_svc->obj.p_ci_ca = h_ca->obj.p_ci_ca;\r
631 \r
632         /* Determine the maximum queue depth of the QP and CQs. */\r
633         p_spl_qp_svc->max_qp_depth =\r
634                 ( p_pnp_rec->p_ca_attr->max_wrs <\r
635                 p_pnp_rec->p_ca_attr->max_cqes ) ?\r
636                 p_pnp_rec->p_ca_attr->max_wrs :\r
637                 p_pnp_rec->p_ca_attr->max_cqes;\r
638 \r
639         /* Compare this maximum to the default special queue depth. */\r
640         if( ( qp_type == IB_QPT_QP0 ) &&\r
641                 ( p_spl_qp_svc->max_qp_depth > DEFAULT_QP0_DEPTH ) )\r
642                   p_spl_qp_svc->max_qp_depth = DEFAULT_QP0_DEPTH;\r
643         if( ( qp_type == IB_QPT_QP1 ) &&\r
644                 ( p_spl_qp_svc->max_qp_depth > DEFAULT_QP1_DEPTH ) )\r
645                   p_spl_qp_svc->max_qp_depth = DEFAULT_QP1_DEPTH;\r
646 \r
647         /* Create the send CQ. */\r
648         cl_memclr( &cq_create, sizeof( ib_cq_create_t ) );\r
649         cq_create.size = p_spl_qp_svc->max_qp_depth;\r
650         cq_create.pfn_comp_cb = spl_qp_send_comp_cb;\r
651 \r
652         status = ib_create_cq( p_spl_qp_svc->obj.p_ci_ca->h_ca, &cq_create,\r
653                 p_spl_qp_svc, spl_qp_svc_event_cb, &p_spl_qp_svc->h_send_cq );\r
654 \r
655         if( status != IB_SUCCESS )\r
656         {\r
657                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
658                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
659                         ("ib_create_cq send CQ failed, %s\n", ib_get_err_str( status ) ) );\r
660                 return status;\r
661         }\r
662 \r
663         /* Reference the special QP service on behalf of ib_create_cq. */\r
664         ref_al_obj( &p_spl_qp_svc->obj );\r
665 \r
666         /* Check the result of the creation request. */\r
667         if( cq_create.size < p_spl_qp_svc->max_qp_depth )\r
668         {\r
669                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
670                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
671                         ("ib_create_cq allocated insufficient send CQ size\n") );\r
672                 return IB_INSUFFICIENT_RESOURCES;\r
673         }\r
674 \r
675         /* Create the receive CQ. */\r
676         cl_memclr( &cq_create, sizeof( ib_cq_create_t ) );\r
677         cq_create.size = p_spl_qp_svc->max_qp_depth;\r
678         cq_create.pfn_comp_cb = spl_qp_recv_comp_cb;\r
679 \r
680         status = ib_create_cq( p_spl_qp_svc->obj.p_ci_ca->h_ca, &cq_create,\r
681                 p_spl_qp_svc, spl_qp_svc_event_cb, &p_spl_qp_svc->h_recv_cq );\r
682 \r
683         if( status != IB_SUCCESS )\r
684         {\r
685                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
686                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
687                         ("ib_create_cq recv CQ failed, %s\n", ib_get_err_str( status ) ) );\r
688                 return status;\r
689         }\r
690 \r
691         /* Reference the special QP service on behalf of ib_create_cq. */\r
692         ref_al_obj( &p_spl_qp_svc->obj );\r
693 \r
694         /* Check the result of the creation request. */\r
695         if( cq_create.size < p_spl_qp_svc->max_qp_depth )\r
696         {\r
697                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
698                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
699                         ("ib_create_cq allocated insufficient recv CQ size\n") );\r
700                 return IB_INSUFFICIENT_RESOURCES;\r
701         }\r
702 \r
703         /* Create the special QP. */\r
704         cl_memclr( &qp_create, sizeof( ib_qp_create_t ) );\r
705         qp_create.qp_type = qp_type;\r
706         qp_create.sq_depth = p_spl_qp_svc->max_qp_depth;\r
707         qp_create.rq_depth = p_spl_qp_svc->max_qp_depth;\r
708         qp_create.sq_sge = 3;   /* Three entries are required for segmentation. */\r
709         qp_create.rq_sge = 1;\r
710         qp_create.h_sq_cq = p_spl_qp_svc->h_send_cq;\r
711         qp_create.h_rq_cq = p_spl_qp_svc->h_recv_cq;\r
712         qp_create.sq_signaled = TRUE;\r
713 \r
714         status = ib_get_spl_qp( p_spl_qp_svc->obj.p_ci_ca->h_pd,\r
715                 p_pnp_rec->p_port_attr->port_guid, &qp_create,\r
716                 p_spl_qp_svc, spl_qp_svc_event_cb, NULL, &p_spl_qp_svc->h_qp );\r
717 \r
718         if( status != IB_SUCCESS )\r
719         {\r
720                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
721                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
722                         ("ib_get_spl_qp failed, %s\n", ib_get_err_str( status ) ) );\r
723                 return status;\r
724         }\r
725 \r
726         /* Reference the special QP service on behalf of ib_get_spl_qp. */\r
727         ref_al_obj( &p_spl_qp_svc->obj );\r
728 \r
729         /* Check the result of the creation request. */\r
730         status = ib_query_qp( p_spl_qp_svc->h_qp, &qp_attr );\r
731         if( status != IB_SUCCESS )\r
732         {\r
733                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
734                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
735                         ("ib_query_qp failed, %s\n", ib_get_err_str( status ) ) );\r
736                 return status;\r
737         }\r
738 \r
739         if( ( qp_attr.rq_depth < p_spl_qp_svc->max_qp_depth ) ||\r
740                 ( qp_attr.sq_depth < p_spl_qp_svc->max_qp_depth ) ||\r
741                 ( qp_attr.sq_sge < 3 ) || ( qp_attr.rq_sge < 1 ) )\r
742         {\r
743                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
744                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
745                         ("ib_get_spl_qp allocated attributes are insufficient\n") );\r
746                 return IB_INSUFFICIENT_RESOURCES;\r
747         }\r
748 \r
749         /* Initialize the QP for use. */\r
750         status = ib_init_dgrm_svc( p_spl_qp_svc->h_qp, NULL );\r
751         if( status != IB_SUCCESS )\r
752         {\r
753                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
754                 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
755                         ("ib_init_dgrm_svc failed, %s\n", ib_get_err_str( status ) ) );\r
756                 return status;\r
757         }\r
758 \r
759         /* Post receive buffers. */\r
760         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
761         status = spl_qp_svc_post_recvs( p_spl_qp_svc );\r
762         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
763         if( status != IB_SUCCESS )\r
764         {\r
765                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
766                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
767                         ("spl_qp_svc_post_recvs failed, %s\n",\r
768                         ib_get_err_str( status ) ) );\r
769                 return status;\r
770         }\r
771 \r
772         /* Create the MAD dispatcher. */\r
773         status = create_mad_disp( &p_spl_qp_svc->obj, p_spl_qp_svc->h_qp,\r
774                 &p_spl_qp_svc->h_mad_disp );\r
775         if( status != IB_SUCCESS )\r
776         {\r
777                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
778                 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
779                         ("create_mad_disp failed, %s\n", ib_get_err_str( status ) ) );\r
780                 return status;\r
781         }\r
782 \r
783         /*\r
784          * Add this service to the special QP manager lookup lists.\r
785          * The service must be added to allow the creation of a QP alias.\r
786          */\r
787         cl_spinlock_acquire( &gp_spl_qp_mgr->obj.lock );\r
788         if( qp_type == IB_QPT_QP0 )\r
789         {\r
790                 cl_qmap_insert( &gp_spl_qp_mgr->smi_map, p_spl_qp_svc->port_guid,\r
791                         &p_spl_qp_svc->map_item );\r
792         }\r
793         else\r
794         {\r
795                 cl_qmap_insert( &gp_spl_qp_mgr->gsi_map, p_spl_qp_svc->port_guid,\r
796                         &p_spl_qp_svc->map_item );\r
797         }\r
798         cl_spinlock_release( &gp_spl_qp_mgr->obj.lock );\r
799 \r
800         /*\r
801          * If the CA does not support HW agents, create a QP alias and register\r
802          * a MAD service for sending responses from the local MAD interface.\r
803          */\r
804         if( check_local_mad( p_spl_qp_svc->h_qp ) )\r
805         {\r
806                 /* Create a QP alias. */\r
807                 cl_memclr( &qp_create, sizeof( ib_qp_create_t ) );\r
808                 qp_create.qp_type =\r
809                         ( qp_type == IB_QPT_QP0 ) ? IB_QPT_QP0_ALIAS : IB_QPT_QP1_ALIAS;\r
810                 qp_create.sq_depth              = p_spl_qp_svc->max_qp_depth;\r
811                 qp_create.sq_sge                = 1;\r
812                 qp_create.sq_signaled   = TRUE;\r
813 \r
814                 status = ib_get_spl_qp( p_spl_qp_svc->obj.p_ci_ca->h_pd_alias,\r
815                         p_pnp_rec->p_port_attr->port_guid, &qp_create,\r
816                         p_spl_qp_svc, spl_qp_alias_event_cb, &p_spl_qp_svc->pool_key,\r
817                         &p_spl_qp_svc->h_qp_alias );\r
818 \r
819                 if (status != IB_SUCCESS)\r
820                 {\r
821                         p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
822                         AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
823                                 ("ib_get_spl_qp alias failed, %s\n",\r
824                                 ib_get_err_str( status ) ) );\r
825                         return status;\r
826                 }\r
827 \r
828                 /* Reference the special QP service on behalf of ib_get_spl_qp. */\r
829                 ref_al_obj( &p_spl_qp_svc->obj );\r
830 \r
831                 /* Register a MAD service for sends. */\r
832                 cl_memclr( &mad_svc, sizeof( ib_mad_svc_t ) );\r
833                 mad_svc.mad_svc_context = p_spl_qp_svc;\r
834                 mad_svc.pfn_mad_send_cb = spl_qp_alias_send_cb;\r
835                 mad_svc.pfn_mad_recv_cb = spl_qp_alias_recv_cb;\r
836 \r
837                 status = ib_reg_mad_svc( p_spl_qp_svc->h_qp_alias, &mad_svc,\r
838                         &p_spl_qp_svc->h_mad_svc );\r
839 \r
840                 if( status != IB_SUCCESS )\r
841                 {\r
842                         p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
843                         AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
844                                 ("ib_reg_mad_svc failed, %s\n", ib_get_err_str( status ) ) );\r
845                         return status;\r
846                 }\r
847         }\r
848 \r
849         /* Set the context of the PnP event to this child object. */\r
850         p_pnp_rec->pnp_rec.context = &p_spl_qp_svc->obj;\r
851 \r
852         /* The QP is ready.  Change the state. */\r
853         p_spl_qp_svc->state = SPL_QP_ACTIVE;\r
854 \r
855         /* Force a completion callback to rearm the CQs. */\r
856         spl_qp_recv_comp_cb( p_spl_qp_svc->h_recv_cq, p_spl_qp_svc );\r
857         spl_qp_send_comp_cb( p_spl_qp_svc->h_send_cq, p_spl_qp_svc );\r
858 \r
859         /* Start the polling thread timer. */\r
860         if( g_smi_poll_interval )\r
861         {\r
862                 cl_status =\r
863                         cl_timer_trim( &gp_spl_qp_mgr->poll_timer, g_smi_poll_interval );\r
864 \r
865                 if( cl_status != CL_SUCCESS )\r
866                 {\r
867                         p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
868                         AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
869                                 ("cl_timer_start failed, status 0x%x\n", cl_status ) );\r
870                         return ib_convert_cl_status( cl_status );\r
871                 }\r
872         }\r
873 \r
874         /* Release the reference taken in init_al_obj. */\r
875         deref_al_obj( &p_spl_qp_svc->obj );\r
876 \r
877         AL_EXIT( AL_DBG_SMI );\r
878         return IB_SUCCESS;\r
879 }\r
880 \r
881 \r
882 \r
883 /*\r
884  * Return a work completion to the MAD dispatcher for the specified MAD.\r
885  */\r
886 static void\r
887 __complete_send_mad(\r
888         IN              const   al_mad_disp_handle_t            h_mad_disp,\r
889         IN                              al_mad_wr_t* const                      p_mad_wr,\r
890         IN              const   ib_wc_status_t                          wc_status )\r
891 {\r
892         ib_wc_t                 wc;\r
893 \r
894         /* Construct a send work completion. */\r
895         cl_memclr( &wc, sizeof( ib_wc_t ) );\r
896         wc.wr_id        = p_mad_wr->send_wr.wr_id;\r
897         wc.wc_type      = IB_WC_SEND;\r
898         wc.status       = wc_status;\r
899 \r
900         /* Set the send size if we were successful with the send. */\r
901         if( wc_status == IB_WCS_SUCCESS )\r
902                 wc.length = MAD_BLOCK_SIZE;\r
903 \r
904         mad_disp_send_done( h_mad_disp, p_mad_wr, &wc );\r
905 }\r
906 \r
907 \r
908 \r
909 /*\r
910  * Pre-destroy a special QP service.\r
911  */\r
912 void\r
913 destroying_spl_qp_svc(\r
914         IN                              al_obj_t*                                       p_obj )\r
915 {\r
916         spl_qp_svc_t*                   p_spl_qp_svc;\r
917         cl_list_item_t*                 p_list_item;\r
918         al_mad_wr_t*                    p_mad_wr;\r
919 \r
920         ib_api_status_t                 status;\r
921 \r
922         AL_ENTER( AL_DBG_SMI );\r
923 \r
924         CL_ASSERT( p_obj );\r
925         p_spl_qp_svc = PARENT_STRUCT( p_obj, spl_qp_svc_t, obj );\r
926 \r
927         /* Change the state to prevent processing new send requests. */\r
928         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
929         p_spl_qp_svc->state = SPL_QP_DESTROYING;\r
930         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
931 \r
932         /* Wait here until the special QP service is no longer in use. */\r
933         while( p_spl_qp_svc->in_use_cnt )\r
934         {\r
935                 cl_thread_suspend( 0 );\r
936         }\r
937 \r
938         /* Destroy the special QP. */\r
939         if( p_spl_qp_svc->h_qp )\r
940         {\r
941                 /* If present, remove the special QP service from the tracking map. */\r
942                 cl_spinlock_acquire( &gp_spl_qp_mgr->obj.lock );\r
943                 if( p_spl_qp_svc->h_qp->type == IB_QPT_QP0 )\r
944                 {\r
945                         cl_qmap_remove( &gp_spl_qp_mgr->smi_map, p_spl_qp_svc->port_guid );\r
946                 }\r
947                 else\r
948                 {\r
949                         cl_qmap_remove( &gp_spl_qp_mgr->gsi_map, p_spl_qp_svc->port_guid );\r
950                 }\r
951                 cl_spinlock_release( &gp_spl_qp_mgr->obj.lock );\r
952 \r
953                 status = ib_destroy_qp( p_spl_qp_svc->h_qp,\r
954                         (ib_pfn_destroy_cb_t)deref_al_obj );\r
955                 CL_ASSERT( status == IB_SUCCESS );\r
956 \r
957                 cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
958 \r
959                 /* Complete any outstanding MAD sends operations as "flushed". */\r
960                 for( p_list_item = cl_qlist_remove_head( &p_spl_qp_svc->send_queue );\r
961                          p_list_item != cl_qlist_end( &p_spl_qp_svc->send_queue );\r
962                          p_list_item = cl_qlist_remove_head( &p_spl_qp_svc->send_queue ) )\r
963                 {\r
964                         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
965                         p_mad_wr = PARENT_STRUCT( p_list_item, al_mad_wr_t, list_item );\r
966                         __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr,\r
967                                 IB_WCS_WR_FLUSHED_ERR );\r
968                         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
969                 }\r
970 \r
971                 cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
972                 /* Receive MAD elements are returned to the pool by the free routine. */\r
973         }\r
974 \r
975         /* Destroy the special QP alias and CQs. */\r
976         if( p_spl_qp_svc->h_qp_alias )\r
977         {\r
978                 status = ib_destroy_qp( p_spl_qp_svc->h_qp_alias,\r
979                         (ib_pfn_destroy_cb_t)deref_al_obj );\r
980                 CL_ASSERT( status == IB_SUCCESS );\r
981         }\r
982         if( p_spl_qp_svc->h_send_cq )\r
983         {\r
984                 status = ib_destroy_cq( p_spl_qp_svc->h_send_cq,\r
985                         (ib_pfn_destroy_cb_t)deref_al_obj );\r
986                 CL_ASSERT( status == IB_SUCCESS );\r
987         }\r
988         if( p_spl_qp_svc->h_recv_cq )\r
989         {\r
990                 status = ib_destroy_cq( p_spl_qp_svc->h_recv_cq,\r
991                         (ib_pfn_destroy_cb_t)deref_al_obj );\r
992                 CL_ASSERT( status == IB_SUCCESS );\r
993         }\r
994 \r
995         AL_EXIT( AL_DBG_SMI );\r
996 }\r
997 \r
998 \r
999 \r
1000 /*\r
1001  * Free a special QP service.\r
1002  */\r
1003 void\r
1004 free_spl_qp_svc(\r
1005         IN                              al_obj_t*                                       p_obj )\r
1006 {\r
1007         spl_qp_svc_t*                   p_spl_qp_svc;\r
1008         cl_list_item_t*                 p_list_item;\r
1009         al_mad_element_t*               p_al_mad;\r
1010         ib_api_status_t                 status;\r
1011 \r
1012         AL_ENTER( AL_DBG_SMI );\r
1013 \r
1014         CL_ASSERT( p_obj );\r
1015         p_spl_qp_svc = PARENT_STRUCT( p_obj, spl_qp_svc_t, obj );\r
1016 \r
1017         /* Dereference the CA. */\r
1018         if( p_spl_qp_svc->obj.p_ci_ca )\r
1019                 deref_al_obj( &p_spl_qp_svc->obj.p_ci_ca->h_ca->obj );\r
1020 \r
1021         /* Return receive MAD elements to the pool. */\r
1022         for( p_list_item = cl_qlist_remove_head( &p_spl_qp_svc->recv_queue );\r
1023                  p_list_item != cl_qlist_end( &p_spl_qp_svc->recv_queue );\r
1024                  p_list_item = cl_qlist_remove_head( &p_spl_qp_svc->recv_queue ) )\r
1025         {\r
1026                 p_al_mad = PARENT_STRUCT( p_list_item, al_mad_element_t, list_item );\r
1027 \r
1028                 status = ib_put_mad( &p_al_mad->element );\r
1029                 CL_ASSERT( status == IB_SUCCESS );\r
1030         }\r
1031 \r
1032         CL_ASSERT( cl_is_qlist_empty( &p_spl_qp_svc->send_queue ) );\r
1033 \r
1034         destroy_al_obj( &p_spl_qp_svc->obj );\r
1035         cl_free( p_spl_qp_svc );\r
1036 \r
1037         AL_EXIT( AL_DBG_SMI );\r
1038 }\r
1039 \r
1040 \r
1041 \r
1042 /*\r
1043  * Update the base LID of a special QP service.\r
1044  */\r
1045 void\r
1046 spl_qp_svc_lid_change(\r
1047         IN                              al_obj_t*                                       p_obj,\r
1048         IN                              ib_pnp_port_rec_t*                      p_pnp_rec )\r
1049 {\r
1050         spl_qp_svc_t*                   p_spl_qp_svc;\r
1051 \r
1052         AL_ENTER( AL_DBG_SMI );\r
1053 \r
1054         CL_ASSERT( p_obj );\r
1055         CL_ASSERT( p_pnp_rec );\r
1056         CL_ASSERT( p_pnp_rec->p_port_attr );\r
1057 \r
1058         p_spl_qp_svc = PARENT_STRUCT( p_obj, spl_qp_svc_t, obj );\r
1059 \r
1060         p_spl_qp_svc->base_lid = p_pnp_rec->p_port_attr->lid;\r
1061         p_spl_qp_svc->lmc = p_pnp_rec->p_port_attr->lmc;\r
1062 \r
1063         AL_EXIT( AL_DBG_SMI );\r
1064 }\r
1065 \r
1066 \r
1067 \r
1068 /*\r
1069  * Route a send work request.\r
1070  */\r
1071 mad_route_t\r
1072 route_mad_send(\r
1073         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
1074         IN                              ib_send_wr_t* const                     p_send_wr )\r
1075 {\r
1076         al_mad_wr_t*                    p_mad_wr;\r
1077         al_mad_send_t*                  p_mad_send;\r
1078         ib_mad_t*                               p_mad;\r
1079         ib_smp_t*                               p_smp;\r
1080         ib_av_handle_t                  h_av;\r
1081         mad_route_t                             route;\r
1082         boolean_t                               local, loopback, discard;\r
1083 \r
1084         AL_ENTER( AL_DBG_SMI );\r
1085 \r
1086         CL_ASSERT( p_spl_qp_svc );\r
1087         CL_ASSERT( p_send_wr );\r
1088 \r
1089         /* Initialize a pointers to the MAD work request and the MAD. */\r
1090         p_mad_wr = PARENT_STRUCT( p_send_wr, al_mad_wr_t, send_wr );\r
1091         p_mad_send = PARENT_STRUCT( p_mad_wr, al_mad_send_t, mad_wr );\r
1092         p_mad = get_mad_hdr_from_wr( p_mad_wr );\r
1093         p_smp = (ib_smp_t*)p_mad;\r
1094 \r
1095         /* Check if the CA has a local MAD interface. */\r
1096         local = loopback = discard = FALSE;\r
1097         if( check_local_mad( p_spl_qp_svc->h_qp ) )\r
1098         {\r
1099                 /*\r
1100                  * If the MAD is a locally addressed Subnet Management, Performance\r
1101                  * Management, or Connection Management datagram, process the work\r
1102                  * request locally.\r
1103                  */\r
1104                 h_av = p_send_wr->dgrm.ud.h_av;\r
1105                 switch( p_mad->mgmt_class )\r
1106                 {\r
1107                 case IB_MCLASS_SUBN_DIR:\r
1108                         /* Perform special checks on directed route SMPs. */\r
1109                         if( ib_smp_is_response( p_smp ) )\r
1110                         {\r
1111                                 /*\r
1112                                  * This node is the originator of the response.  Discard\r
1113                                  * if the hop count or pointer is zero, an intermediate hop,\r
1114                                  * out of bounds hop, or if the first port of the directed\r
1115                                  * route retrun path is not this port.\r
1116                                  */\r
1117                                 if( ( p_smp->hop_count == 0 ) || ( p_smp->hop_ptr == 0 ) )\r
1118                                 {\r
1119                                         AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
1120                                                 ("hop cnt or hop ptr set to 0...discarding\n") );\r
1121                                         discard = TRUE;\r
1122                                 }\r
1123                                 else if( p_smp->hop_count != ( p_smp->hop_ptr - 1 ) )\r
1124                                 {\r
1125                                         AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
1126                                                 ("hop cnt != (hop ptr - 1)...discarding\n") );\r
1127                                         discard = TRUE;\r
1128                                 }\r
1129                                 else if( p_smp->hop_count >= IB_SUBNET_PATH_HOPS_MAX )\r
1130                                 {\r
1131                                         AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
1132                                                 ("hop cnt > max hops...discarding\n") );\r
1133                                         discard = TRUE;\r
1134                                 }\r
1135                                 else if( ( p_smp->dr_dlid == IB_LID_PERMISSIVE ) &&\r
1136                                                  ( p_smp->return_path[ p_smp->hop_ptr - 1 ] !=\r
1137                                                         p_spl_qp_svc->port_num ) )\r
1138                                 {\r
1139                                         AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
1140                                                 ("return path[hop ptr - 1] != port num...discarding\n") );\r
1141                                         discard = TRUE;\r
1142                                 }\r
1143                         }\r
1144                         else\r
1145                         {\r
1146                                 /* The SMP is a request. */\r
1147                                 if( ( p_smp->hop_count >= IB_SUBNET_PATH_HOPS_MAX ) ||\r
1148                                         ( p_smp->hop_ptr >= IB_SUBNET_PATH_HOPS_MAX ) )\r
1149                                 {\r
1150                                         discard = TRUE;\r
1151                                 }\r
1152                                 else if( ( p_smp->hop_count == 0 ) && ( p_smp->hop_ptr == 0 ) )\r
1153                                 {\r
1154                                         /* Self Addressed: Sent locally, routed locally. */\r
1155                                         local = TRUE;\r
1156                                         discard = ( p_smp->dr_slid != IB_LID_PERMISSIVE ) ||\r
1157                                                           ( p_smp->dr_dlid != IB_LID_PERMISSIVE );\r
1158                                 }\r
1159                                 else if( ( p_smp->hop_count != 0 ) &&\r
1160                                                  ( p_smp->hop_count == ( p_smp->hop_ptr - 1 ) ) )\r
1161                                 {\r
1162                                         /* End of Path: Sent remotely, routed locally. */\r
1163                                         local = TRUE;\r
1164                                 }\r
1165                                 else if( ( p_smp->hop_count != 0 ) &&\r
1166                                                  ( p_smp->hop_ptr       == 0 ) )\r
1167                                 {\r
1168                                         /* Beginning of Path: Sent locally, routed remotely. */\r
1169                                         if( p_smp->dr_slid == IB_LID_PERMISSIVE )\r
1170                                         {\r
1171                                                 discard =\r
1172                                                         ( p_smp->initial_path[ p_smp->hop_ptr + 1 ] !=\r
1173                                                           p_spl_qp_svc->port_num );\r
1174                                         }\r
1175                                 }\r
1176                                 else\r
1177                                 {\r
1178                                         /* Intermediate hop. */\r
1179                                         discard = TRUE;\r
1180                                 }\r
1181                         }\r
1182                         /* Loopback locally addressed SM to SM "heartbeat" messages. */\r
1183                         loopback = (p_mad->attr_id == IB_MAD_ATTR_SM_INFO);\r
1184                         break;\r
1185 \r
1186                 case IB_MCLASS_SUBN_LID:\r
1187                         /* Loopback locally addressed SM to SM "heartbeat" messages. */\r
1188                         loopback = (p_mad->attr_id == IB_MAD_ATTR_SM_INFO);\r
1189 \r
1190                         /* Fall through to check for a local MAD. */\r
1191 \r
1192                 case IB_MCLASS_PERF:\r
1193                 case IB_MCLASS_BM:\r
1194                         local = ( h_av &&\r
1195                                 ( h_av->av_attr.dlid ==\r
1196                                 ( h_av->av_attr.path_bits | p_spl_qp_svc->base_lid ) ) );\r
1197                         break;\r
1198 \r
1199                 default:\r
1200                         /* Route vendor specific MADs to the HCA provider. */\r
1201                         if( ib_class_is_vendor_specific( p_mad->mgmt_class ) )\r
1202                         {\r
1203                                 local = ( h_av &&\r
1204                                         ( h_av->av_attr.dlid ==\r
1205                                         ( h_av->av_attr.path_bits | p_spl_qp_svc->base_lid ) ) );\r
1206                         }\r
1207                         break;\r
1208                 }\r
1209         }\r
1210 \r
1211         route = ( p_mad_send->p_send_mad->send_opt & IB_SEND_OPT_LOCAL ) ?\r
1212                 ROUTE_LOCAL : ROUTE_REMOTE;\r
1213         if( local ) route = ROUTE_LOCAL;\r
1214         if( loopback && local ) route = ROUTE_LOOPBACK;\r
1215         if( discard ) route = ROUTE_DISCARD;\r
1216 \r
1217         AL_EXIT( AL_DBG_SMI );\r
1218         return route;\r
1219 }\r
1220 \r
1221 \r
1222 \r
1223 /*\r
1224  * Send a work request on the special QP.\r
1225  */\r
1226 ib_api_status_t\r
1227 spl_qp_svc_send(\r
1228         IN              const   ib_qp_handle_t                          h_qp,\r
1229         IN                              ib_send_wr_t* const                     p_send_wr )\r
1230 {\r
1231         spl_qp_svc_t*                   p_spl_qp_svc;\r
1232         al_mad_wr_t*                    p_mad_wr;\r
1233         mad_route_t                             route;\r
1234         ib_api_status_t                 status;\r
1235 \r
1236         AL_ENTER( AL_DBG_SMI );\r
1237 \r
1238         CL_ASSERT( h_qp );\r
1239         CL_ASSERT( p_send_wr );\r
1240 \r
1241         /* Get the special QP service. */\r
1242         p_spl_qp_svc = (spl_qp_svc_t*)h_qp->obj.context;\r
1243         CL_ASSERT( p_spl_qp_svc );\r
1244         CL_ASSERT( p_spl_qp_svc->h_qp == h_qp );\r
1245 \r
1246         /* Determine how to route the MAD. */\r
1247         route = route_mad_send( p_spl_qp_svc, p_send_wr );\r
1248 \r
1249         /*\r
1250          * Check the QP state and guard against error handling.  Also,\r
1251          * to maintain proper order of work completions, delay processing\r
1252          * a local MAD until any remote MAD work requests have completed,\r
1253          * and delay processing a remote MAD until local MAD work requests\r
1254          * have completed.\r
1255          */\r
1256         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
1257         if( (p_spl_qp_svc->state != SPL_QP_ACTIVE) || p_spl_qp_svc->local_mad_wr ||\r
1258                 (is_local(route) && !cl_is_qlist_empty( &p_spl_qp_svc->send_queue )) ||\r
1259                 ( cl_qlist_count( &p_spl_qp_svc->send_queue ) >=\r
1260                         p_spl_qp_svc->max_qp_depth ) )\r
1261         {\r
1262                 /*\r
1263                  * Return busy status.\r
1264                  * The special QP will resume sends at this point.\r
1265                  */\r
1266                 cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
1267 \r
1268                 AL_EXIT( AL_DBG_SMI );\r
1269                 return IB_RESOURCE_BUSY;\r
1270         }\r
1271 \r
1272         p_mad_wr = PARENT_STRUCT( p_send_wr, al_mad_wr_t, send_wr );\r
1273 \r
1274         if( is_local( route ) )\r
1275         {\r
1276                 /* Save the local MAD work request for processing. */\r
1277                 p_spl_qp_svc->local_mad_wr = p_mad_wr;\r
1278 \r
1279                 /* Flag the service as in use by the asynchronous processing thread. */\r
1280                 cl_atomic_inc( &p_spl_qp_svc->in_use_cnt );\r
1281 \r
1282                 cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
1283 \r
1284                 status = local_mad_send( p_spl_qp_svc, p_mad_wr );\r
1285         }\r
1286         else\r
1287         {\r
1288                 /* Process a remote MAD send work request. */\r
1289                 status = remote_mad_send( p_spl_qp_svc, p_mad_wr );\r
1290 \r
1291                 cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
1292         }\r
1293 \r
1294         AL_EXIT( AL_DBG_SMI );\r
1295         return status;\r
1296 }\r
1297 \r
1298 \r
1299 \r
1300 /*\r
1301  * Process a remote MAD send work request.  Called holding the spl_qp_svc lock.\r
1302  */\r
1303 ib_api_status_t\r
1304 remote_mad_send(\r
1305         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
1306         IN                              al_mad_wr_t* const                      p_mad_wr )\r
1307 {\r
1308         ib_smp_t*                               p_smp;\r
1309         ib_api_status_t                 status;\r
1310 \r
1311         AL_ENTER( AL_DBG_SMI );\r
1312 \r
1313         CL_ASSERT( p_spl_qp_svc );\r
1314         CL_ASSERT( p_mad_wr );\r
1315 \r
1316         /* Initialize a pointers to the MAD work request and outbound MAD. */\r
1317         p_smp = (ib_smp_t*)get_mad_hdr_from_wr( p_mad_wr );\r
1318 \r
1319         /* Perform outbound MAD processing. */\r
1320 \r
1321         /* Adjust directed route SMPs as required by IBA. */\r
1322         if( p_smp->mgmt_class == IB_MCLASS_SUBN_DIR )\r
1323         {\r
1324                 if( ib_smp_is_response( p_smp ) )\r
1325                 {\r
1326                         if( p_smp->dr_dlid == IB_LID_PERMISSIVE )\r
1327                                 p_smp->hop_ptr--;\r
1328                 }\r
1329                 else if( p_smp->dr_slid == IB_LID_PERMISSIVE )\r
1330                 {\r
1331                         /*\r
1332                          * Only update the pointer if the hw_agent is not implemented.\r
1333                          * Fujitsu implements SMI in hardware, so the following has to\r
1334                          * be passed down to the hardware SMI.\r
1335                          */\r
1336                         ci_ca_lock_attr( p_spl_qp_svc->obj.p_ci_ca );\r
1337                         if( !p_spl_qp_svc->obj.p_ci_ca->p_pnp_attr->hw_agents )\r
1338                                 p_smp->hop_ptr++;\r
1339                         ci_ca_unlock_attr( p_spl_qp_svc->obj.p_ci_ca );\r
1340                 }\r
1341         }\r
1342 \r
1343         /* Always generate send completions. */\r
1344         p_mad_wr->send_wr.send_opt |= IB_SEND_OPT_SIGNALED;\r
1345 \r
1346         /* Queue the MAD work request on the service tracking queue. */\r
1347         cl_qlist_insert_tail( &p_spl_qp_svc->send_queue, &p_mad_wr->list_item );\r
1348 \r
1349         status = ib_post_send( p_spl_qp_svc->h_qp, &p_mad_wr->send_wr, NULL );\r
1350 \r
1351         if( status != IB_SUCCESS )\r
1352         {\r
1353                 cl_qlist_remove_item( &p_spl_qp_svc->send_queue, &p_mad_wr->list_item );\r
1354 \r
1355                 /* Reset directed route SMPs as required by IBA. */\r
1356                 if( p_smp->mgmt_class == IB_MCLASS_SUBN_DIR )\r
1357                 {\r
1358                         if( ib_smp_is_response( p_smp ) )\r
1359                         {\r
1360                                 if( p_smp->dr_dlid == IB_LID_PERMISSIVE )\r
1361                                         p_smp->hop_ptr++;\r
1362                         }\r
1363                         else if( p_smp->dr_slid == IB_LID_PERMISSIVE )\r
1364                         {\r
1365                                 /* Only update if the hw_agent is not implemented. */\r
1366                                 ci_ca_lock_attr( p_spl_qp_svc->obj.p_ci_ca );\r
1367                                 if( p_spl_qp_svc->obj.p_ci_ca->p_pnp_attr->hw_agents == FALSE )\r
1368                                         p_smp->hop_ptr--;\r
1369                                 ci_ca_unlock_attr( p_spl_qp_svc->obj.p_ci_ca );\r
1370                         }\r
1371                 }\r
1372         }\r
1373 \r
1374         AL_EXIT( AL_DBG_SMI );\r
1375         return status;\r
1376 }\r
1377 \r
1378 \r
1379 /*\r
1380  * Handle a MAD destined for the local CA, using cached data\r
1381  * as much as possible.\r
1382  */\r
1383 static ib_api_status_t\r
1384 local_mad_send(\r
1385         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
1386         IN                              al_mad_wr_t* const                      p_mad_wr )\r
1387 {\r
1388         mad_route_t                             route;\r
1389         ib_api_status_t                 status = IB_SUCCESS;\r
1390 \r
1391         AL_ENTER( AL_DBG_SMI );\r
1392 \r
1393         CL_ASSERT( p_spl_qp_svc );\r
1394         CL_ASSERT( p_mad_wr );\r
1395 \r
1396         /* Determine how to route the MAD. */\r
1397         route = route_mad_send( p_spl_qp_svc, &p_mad_wr->send_wr );\r
1398 \r
1399         /* Check if this MAD should be discarded. */\r
1400         if( is_discard( route ) )\r
1401         {\r
1402                 /* Deliver a "work completion" to the dispatcher. */\r
1403                 __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr,\r
1404                         IB_WCS_LOCAL_OP_ERR );\r
1405                 status = IB_INVALID_SETTING;\r
1406         }\r
1407         else if( is_loopback( route ) )\r
1408         {\r
1409                 /* Loopback local SM to SM "heartbeat" messages. */\r
1410                 status = loopback_mad( p_spl_qp_svc, p_mad_wr );\r
1411         }\r
1412         else\r
1413         {\r
1414                 switch( get_mad_hdr_from_wr( p_mad_wr )->mgmt_class )\r
1415                 {\r
1416                 case IB_MCLASS_SUBN_DIR:\r
1417                 case IB_MCLASS_SUBN_LID:\r
1418                         //DO not use the cache in order to force Mkey  check\r
1419                         status = __process_subn_mad( p_spl_qp_svc, p_mad_wr );\r
1420                         //status = IB_NOT_DONE;\r
1421                         break;\r
1422 \r
1423                 default:\r
1424                         status = IB_NOT_DONE;\r
1425                 }\r
1426         }\r
1427 \r
1428         if( status == IB_NOT_DONE )\r
1429         {\r
1430                 /* Queue an asynchronous processing item to process the local MAD. */\r
1431                 cl_async_proc_queue( gp_async_proc_mgr, &p_spl_qp_svc->send_async );\r
1432         }\r
1433         else\r
1434         {\r
1435                 /*\r
1436                  * Clear the local MAD pointer to allow processing of other MADs.\r
1437                  * This is done after polling for attribute changes to ensure that\r
1438                  * subsequent MADs pick up any changes performed by this one.\r
1439                  */\r
1440                 cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
1441                 p_spl_qp_svc->local_mad_wr = NULL;\r
1442                 cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
1443 \r
1444                 /* No longer in use by the asynchronous processing thread. */\r
1445                 cl_atomic_dec( &p_spl_qp_svc->in_use_cnt );\r
1446 \r
1447                 /* Special QP operations will resume by unwinding. */\r
1448         }\r
1449 \r
1450         AL_EXIT( AL_DBG_SMI );\r
1451         return IB_SUCCESS;\r
1452 }\r
1453 \r
1454 \r
1455 static ib_api_status_t\r
1456 get_resp_mad(\r
1457         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
1458         IN                              al_mad_wr_t* const                      p_mad_wr,\r
1459                 OUT                     ib_mad_element_t** const        pp_mad_resp )\r
1460 {\r
1461         ib_api_status_t                 status;\r
1462 \r
1463         AL_ENTER( AL_DBG_SMI );\r
1464 \r
1465         CL_ASSERT( p_spl_qp_svc );\r
1466         CL_ASSERT( p_mad_wr );\r
1467         CL_ASSERT( pp_mad_resp );\r
1468 \r
1469         /* Get a MAD element from the pool for the response. */\r
1470         status = ib_get_mad( p_spl_qp_svc->h_qp->obj.p_ci_ca->pool_key,\r
1471                 MAD_BLOCK_SIZE, pp_mad_resp );\r
1472         if( status != IB_SUCCESS )\r
1473         {\r
1474                 __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr,\r
1475                         IB_WCS_LOCAL_OP_ERR );\r
1476         }\r
1477 \r
1478         AL_EXIT( AL_DBG_SMI );\r
1479         return status;\r
1480 }\r
1481 \r
1482 \r
1483 static ib_api_status_t\r
1484 complete_local_mad(\r
1485         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
1486         IN                              al_mad_wr_t* const                      p_mad_wr,\r
1487         IN                              ib_mad_element_t* const         p_mad_resp )\r
1488 {\r
1489         ib_api_status_t                 status;\r
1490 \r
1491         AL_ENTER( AL_DBG_SMI );\r
1492 \r
1493         CL_ASSERT( p_spl_qp_svc );\r
1494         CL_ASSERT( p_mad_wr );\r
1495         CL_ASSERT( p_mad_resp );\r
1496 \r
1497         /* Construct the receive MAD element. */\r
1498         p_mad_resp->status              = IB_WCS_SUCCESS;\r
1499         p_mad_resp->remote_qp   = p_mad_wr->send_wr.dgrm.ud.remote_qp;\r
1500         p_mad_resp->remote_lid  = p_spl_qp_svc->base_lid;\r
1501         if( p_mad_wr->send_wr.send_opt & IB_RECV_OPT_IMMEDIATE )\r
1502         {\r
1503                 p_mad_resp->immediate_data = p_mad_wr->send_wr.immediate_data;\r
1504                 p_mad_resp->recv_opt |= IB_RECV_OPT_IMMEDIATE;\r
1505         }\r
1506 \r
1507         /*\r
1508          * Hand the receive MAD element to the dispatcher before completing\r
1509          * the send.  This guarantees that the send request cannot time out.\r
1510          */\r
1511         status = mad_disp_recv_done( p_spl_qp_svc->h_mad_disp, p_mad_resp );\r
1512 \r
1513         /* Forward the send work completion to the dispatcher. */\r
1514         __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr, IB_WCS_SUCCESS );\r
1515 \r
1516         AL_EXIT( AL_DBG_SMI );\r
1517         return status;\r
1518 }\r
1519 \r
1520 \r
1521 static ib_api_status_t\r
1522 loopback_mad(\r
1523         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
1524         IN                              al_mad_wr_t* const                      p_mad_wr )\r
1525 {\r
1526         ib_mad_t                                *p_mad;\r
1527         ib_mad_element_t                *p_mad_resp;\r
1528         ib_api_status_t                 status;\r
1529 \r
1530         AL_ENTER( AL_DBG_SMI );\r
1531 \r
1532         CL_ASSERT( p_spl_qp_svc );\r
1533         CL_ASSERT( p_mad_wr );\r
1534 \r
1535         /* Get a MAD element from the pool for the response. */\r
1536         status = get_resp_mad( p_spl_qp_svc, p_mad_wr, &p_mad_resp );\r
1537         if( status == IB_SUCCESS )\r
1538         {\r
1539                 /* Initialize a pointers to the MAD work request and outbound MAD. */\r
1540                 p_mad = get_mad_hdr_from_wr( p_mad_wr );\r
1541 \r
1542                 /* Simulate a send/receive between local managers. */\r
1543                 cl_memcpy( p_mad_resp->p_mad_buf, p_mad, MAD_BLOCK_SIZE );\r
1544 \r
1545                 /* Construct the receive MAD element. */\r
1546                 p_mad_resp->status              = IB_WCS_SUCCESS;\r
1547                 p_mad_resp->remote_qp   = p_mad_wr->send_wr.dgrm.ud.remote_qp;\r
1548                 p_mad_resp->remote_lid  = p_spl_qp_svc->base_lid;\r
1549                 if( p_mad_wr->send_wr.send_opt & IB_RECV_OPT_IMMEDIATE )\r
1550                 {\r
1551                         p_mad_resp->immediate_data = p_mad_wr->send_wr.immediate_data;\r
1552                         p_mad_resp->recv_opt |= IB_RECV_OPT_IMMEDIATE;\r
1553                 }\r
1554 \r
1555                 /*\r
1556                  * Hand the receive MAD element to the dispatcher before completing\r
1557                  * the send.  This guarantees that the send request cannot time out.\r
1558                  */\r
1559                 status = mad_disp_recv_done( p_spl_qp_svc->h_mad_disp, p_mad_resp );\r
1560 \r
1561                 /* Forward the send work completion to the dispatcher. */\r
1562                 __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr, IB_WCS_SUCCESS );\r
1563 \r
1564         }\r
1565 \r
1566         AL_EXIT( AL_DBG_SMI );\r
1567         return status;\r
1568 }\r
1569 \r
1570 \r
1571 static void\r
1572 __update_guid_info(\r
1573         IN                              spl_qp_cache_t* const                   p_cache,\r
1574         IN              const   ib_smp_t* const                         p_mad )\r
1575 {\r
1576         uint32_t                        idx;\r
1577 \r
1578         /* Get the table selector from the attribute */\r
1579         idx = cl_ntoh32( p_mad->attr_mod );\r
1580 \r
1581         /*\r
1582          * We only get successful MADs here, so invalid settings\r
1583          * shouldn't happen.\r
1584          */\r
1585         CL_ASSERT( idx <= 31 );\r
1586 \r
1587         cl_memcpy( &p_cache->guid_block[idx].tbl,\r
1588                 ib_smp_get_payload_ptr( p_mad ),\r
1589                 sizeof(ib_guid_info_t) );\r
1590         p_cache->guid_block[idx].valid = TRUE;\r
1591 }\r
1592 \r
1593 \r
1594 static  void\r
1595 __update_pkey_table(\r
1596         IN                              spl_qp_cache_t* const                   p_cache,\r
1597         IN              const   ib_smp_t* const                         p_mad )\r
1598 {\r
1599         uint16_t                        idx;\r
1600 \r
1601         /* Get the table selector from the attribute */\r
1602         idx = ((uint16_t)cl_ntoh32( p_mad->attr_mod ));\r
1603 \r
1604         CL_ASSERT( idx <= 2047 );\r
1605 \r
1606         cl_memcpy( &p_cache->pkey_tbl[idx].tbl,\r
1607                 ib_smp_get_payload_ptr( p_mad ),\r
1608                 sizeof(ib_pkey_table_t) );\r
1609         p_cache->pkey_tbl[idx].valid = TRUE;\r
1610 }\r
1611 \r
1612 \r
1613 static void\r
1614 __update_sl_vl_table(\r
1615         IN                              spl_qp_cache_t* const                   p_cache,\r
1616         IN              const   ib_smp_t* const                         p_mad )\r
1617 {\r
1618         cl_memcpy( &p_cache->sl_vl.tbl,\r
1619                 ib_smp_get_payload_ptr( p_mad ),\r
1620                 sizeof(ib_slvl_table_t) );\r
1621         p_cache->sl_vl.valid = TRUE;\r
1622 }\r
1623 \r
1624 \r
1625 static void\r
1626 __update_vl_arb_table(\r
1627         IN                              spl_qp_cache_t* const                   p_cache,\r
1628         IN              const   ib_smp_t* const                         p_mad )\r
1629 {\r
1630         uint16_t                        idx;\r
1631 \r
1632         /* Get the table selector from the attribute */\r
1633         idx = ((uint16_t)(cl_ntoh32( p_mad->attr_mod ) >> 16)) - 1;\r
1634 \r
1635         CL_ASSERT( idx <= 3 );\r
1636 \r
1637         cl_memcpy( &p_cache->vl_arb[idx].tbl,\r
1638                 ib_smp_get_payload_ptr( p_mad ),\r
1639                 sizeof(ib_vl_arb_table_t) );\r
1640         p_cache->vl_arb[idx].valid = TRUE;\r
1641 }\r
1642 \r
1643 \r
1644 \r
1645 void\r
1646 spl_qp_svc_update_cache(\r
1647         IN                              spl_qp_svc_t                            *p_spl_qp_svc,\r
1648         IN                              ib_smp_t                                        *p_mad )\r
1649 {\r
1650 \r
1651 \r
1652 \r
1653         CL_ASSERT( p_spl_qp_svc );\r
1654         CL_ASSERT( p_mad );\r
1655         CL_ASSERT( p_mad->mgmt_class == IB_MCLASS_SUBN_DIR ||\r
1656                                  p_mad->mgmt_class == IB_MCLASS_SUBN_LID);\r
1657         CL_ASSERT(!p_mad->status);\r
1658 \r
1659         cl_spinlock_acquire(&p_spl_qp_svc->cache_lock);\r
1660         \r
1661         switch( p_mad->attr_id )\r
1662         {\r
1663         case IB_MAD_ATTR_GUID_INFO:\r
1664                 __update_guid_info(\r
1665                         &p_spl_qp_svc->cache, p_mad );\r
1666                 break;\r
1667 \r
1668         case IB_MAD_ATTR_P_KEY_TABLE:\r
1669                 __update_pkey_table(\r
1670                         &p_spl_qp_svc->cache, p_mad );\r
1671                 break;\r
1672 \r
1673         case IB_MAD_ATTR_SLVL_TABLE:\r
1674                 __update_sl_vl_table(\r
1675                         &p_spl_qp_svc->cache, p_mad );\r
1676                 break;\r
1677 \r
1678         case IB_MAD_ATTR_VL_ARBITRATION:\r
1679                 __update_vl_arb_table(\r
1680                         &p_spl_qp_svc->cache, p_mad );\r
1681                 break;\r
1682 \r
1683         default:\r
1684                 break;\r
1685         }\r
1686         \r
1687         cl_spinlock_release(&p_spl_qp_svc->cache_lock);\r
1688 }\r
1689 \r
1690 \r
1691 \r
1692 static ib_api_status_t\r
1693 __process_node_info(\r
1694         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
1695         IN                              al_mad_wr_t* const                      p_mad_wr )\r
1696 {\r
1697         ib_mad_t                                *p_mad;\r
1698         ib_mad_element_t                *p_mad_resp;\r
1699         ib_smp_t                                *p_smp;\r
1700         ib_node_info_t                  *p_node_info;\r
1701         ib_ca_attr_t                    *p_ca_attr;\r
1702         ib_port_attr_t                  *p_port_attr;\r
1703         ib_api_status_t                 status;\r
1704 \r
1705         AL_ENTER( AL_DBG_SMI );\r
1706 \r
1707         CL_ASSERT( p_spl_qp_svc );\r
1708         CL_ASSERT( p_mad_wr );\r
1709 \r
1710         /* Initialize a pointers to the MAD work request and outbound MAD. */\r
1711         p_mad = get_mad_hdr_from_wr( p_mad_wr );\r
1712         if( p_mad->method != IB_MAD_METHOD_GET )\r
1713         {\r
1714                 /* Node description is a GET-only attribute. */\r
1715                 __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr,\r
1716                         IB_WCS_LOCAL_OP_ERR );\r
1717                 AL_EXIT( AL_DBG_SMI );\r
1718                 return IB_INVALID_SETTING;\r
1719         }\r
1720 \r
1721         /* Get a MAD element from the pool for the response. */\r
1722         status = get_resp_mad( p_spl_qp_svc, p_mad_wr, &p_mad_resp );\r
1723         if( status == IB_SUCCESS )\r
1724         {\r
1725                 p_smp = (ib_smp_t*)p_mad_resp->p_mad_buf;\r
1726                 cl_memcpy( p_smp, p_mad, MAD_BLOCK_SIZE );\r
1727                 p_smp->method = (IB_MAD_METHOD_RESP_MASK | IB_MAD_METHOD_GET);\r
1728                 if( p_smp->mgmt_class == IB_MCLASS_SUBN_DIR )\r
1729                         p_smp->status = IB_SMP_DIRECTION;\r
1730                 else\r
1731                         p_smp->status = 0;\r
1732 \r
1733                 p_node_info = (ib_node_info_t*)ib_smp_get_payload_ptr( p_smp );\r
1734 \r
1735                 /*\r
1736                  * Fill in the node info, protecting against the\r
1737                  * attributes being changed by PnP.\r
1738                  */\r
1739                 cl_spinlock_acquire( &p_spl_qp_svc->obj.p_ci_ca->obj.lock );\r
1740 \r
1741                 p_ca_attr = p_spl_qp_svc->obj.p_ci_ca->p_pnp_attr;\r
1742                 p_port_attr = &p_ca_attr->p_port_attr[p_spl_qp_svc->port_num - 1];\r
1743 \r
1744                 p_node_info->base_version = 1;\r
1745                 p_node_info->class_version = 1;\r
1746                 p_node_info->node_type = IB_NODE_TYPE_CA;\r
1747                 p_node_info->num_ports = p_ca_attr->num_ports;\r
1748                 p_node_info->sys_guid = p_ca_attr->system_image_guid;\r
1749                 p_node_info->node_guid = p_ca_attr->ca_guid;\r
1750                 p_node_info->port_guid = p_port_attr->port_guid;\r
1751                 p_node_info->partition_cap = cl_hton16( p_port_attr->num_pkeys );\r
1752                 p_node_info->device_id = cl_hton16( p_ca_attr->dev_id );\r
1753                 p_node_info->revision = cl_hton32( p_ca_attr->revision );\r
1754                 p_node_info->port_num_vendor_id =\r
1755                         cl_hton32( p_ca_attr->vend_id & 0x00FFFFFF ) | p_port_attr->port_num;\r
1756                 cl_spinlock_release( &p_spl_qp_svc->obj.p_ci_ca->obj.lock );\r
1757 \r
1758                 status = complete_local_mad( p_spl_qp_svc, p_mad_wr, p_mad_resp );\r
1759         }\r
1760 \r
1761         AL_EXIT( AL_DBG_SMI );\r
1762         return status;\r
1763 }\r
1764 \r
1765 \r
1766 static ib_api_status_t\r
1767 __process_node_desc(\r
1768         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
1769         IN                              al_mad_wr_t* const                      p_mad_wr )\r
1770 {\r
1771         ib_mad_t                                *p_mad;\r
1772         ib_mad_element_t                *p_mad_resp;\r
1773         ib_api_status_t                 status;\r
1774 \r
1775         AL_ENTER( AL_DBG_SMI );\r
1776 \r
1777         CL_ASSERT( p_spl_qp_svc );\r
1778         CL_ASSERT( p_mad_wr );\r
1779 \r
1780         /* Initialize a pointers to the MAD work request and outbound MAD. */\r
1781         p_mad = get_mad_hdr_from_wr( p_mad_wr );\r
1782         if( p_mad->method != IB_MAD_METHOD_GET )\r
1783         {\r
1784                 /* Node info is a GET-only attribute. */\r
1785                 __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr,\r
1786                         IB_WCS_LOCAL_OP_ERR );\r
1787                 AL_EXIT( AL_DBG_SMI );\r
1788                 return IB_INVALID_SETTING;\r
1789         }\r
1790 \r
1791         /* Get a MAD element from the pool for the response. */\r
1792         status = get_resp_mad( p_spl_qp_svc, p_mad_wr, &p_mad_resp );\r
1793         if( status == IB_SUCCESS )\r
1794         {\r
1795                 cl_memcpy( p_mad_resp->p_mad_buf, p_mad, MAD_BLOCK_SIZE );\r
1796                 p_mad_resp->p_mad_buf->method =\r
1797                         (IB_MAD_METHOD_RESP_MASK | IB_MAD_METHOD_GET);\r
1798                 if( p_mad_resp->p_mad_buf->mgmt_class == IB_MCLASS_SUBN_DIR )\r
1799                         p_mad_resp->p_mad_buf->status = IB_SMP_DIRECTION;\r
1800                 else\r
1801                         p_mad_resp->p_mad_buf->status = 0;\r
1802                 /* Set the node description to the machine name. */\r
1803                 cl_memcpy( ((ib_smp_t*)p_mad_resp->p_mad_buf)->data, \r
1804                         node_desc, sizeof(node_desc) );\r
1805 \r
1806                 status = complete_local_mad( p_spl_qp_svc, p_mad_wr, p_mad_resp );\r
1807         }\r
1808 \r
1809         AL_EXIT( AL_DBG_SMI );\r
1810         return status;\r
1811 }\r
1812 \r
1813 static ib_api_status_t\r
1814 __process_guid_info(\r
1815         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
1816         IN                              al_mad_wr_t* const                      p_mad_wr )\r
1817 {\r
1818         \r
1819         ib_mad_t                                *p_mad;\r
1820         ib_mad_element_t                *p_mad_resp;\r
1821         ib_smp_t                                *p_smp;\r
1822         ib_guid_info_t                  *p_guid_info;\r
1823         uint16_t                                idx;\r
1824         ib_api_status_t         status;\r
1825 \r
1826 \r
1827         /* Initialize a pointers to the MAD work request and outbound MAD. */\r
1828         p_mad = get_mad_hdr_from_wr( p_mad_wr );\r
1829 \r
1830         /* Get the table selector from the attribute */\r
1831         idx = ((uint16_t)cl_ntoh32( p_mad->attr_mod ));\r
1832         \r
1833         /*\r
1834          * TODO : Setup the response to fail the MAD instead of sending\r
1835          * it down to the HCA.\r
1836          */\r
1837         if( idx > 31 )\r
1838         {\r
1839                 AL_EXIT( AL_DBG_SMI );\r
1840                 return IB_NOT_DONE;\r
1841         }\r
1842         if( !p_spl_qp_svc->cache.guid_block[idx].valid )\r
1843         {\r
1844                 AL_EXIT( AL_DBG_SMI );\r
1845                 return IB_NOT_DONE;\r
1846         }\r
1847 \r
1848         /*\r
1849          * If a SET, see if the set is identical to the cache,\r
1850          * in which case it's a no-op.\r
1851          */\r
1852         if( p_mad->method == IB_MAD_METHOD_SET )\r
1853         {\r
1854                 if( cl_memcmp( ib_smp_get_payload_ptr( (ib_smp_t*)p_mad ),\r
1855                         &p_spl_qp_svc->cache.guid_block[idx].tbl, sizeof(ib_guid_info_t) ) )\r
1856                 {\r
1857                         /* The set is requesting a change. */\r
1858                         return IB_NOT_DONE;\r
1859                 }\r
1860         }\r
1861         \r
1862         /* Get a MAD element from the pool for the response. */\r
1863         status = get_resp_mad( p_spl_qp_svc, p_mad_wr, &p_mad_resp );\r
1864         if( status == IB_SUCCESS )\r
1865         {\r
1866                 p_smp = (ib_smp_t*)p_mad_resp->p_mad_buf;\r
1867 \r
1868                 /* Setup the response mad. */\r
1869                 cl_memcpy( p_smp, p_mad, MAD_BLOCK_SIZE );\r
1870                 p_smp->method = (IB_MAD_METHOD_RESP_MASK | IB_MAD_METHOD_GET);\r
1871                 if( p_smp->mgmt_class == IB_MCLASS_SUBN_DIR )\r
1872                         p_smp->status = IB_SMP_DIRECTION;\r
1873                 else\r
1874                         p_smp->status = 0;\r
1875 \r
1876                 p_guid_info = (ib_guid_info_t*)ib_smp_get_payload_ptr( p_smp );\r
1877 \r
1878                 // TODO: do we need lock on the cache ?????\r
1879 \r
1880                 \r
1881                 /* Copy the cached data. */\r
1882                 cl_memcpy( p_guid_info,\r
1883                         &p_spl_qp_svc->cache.guid_block[idx].tbl, sizeof(ib_guid_info_t) );\r
1884 \r
1885                 status = complete_local_mad( p_spl_qp_svc, p_mad_wr, p_mad_resp );\r
1886         }\r
1887 \r
1888         AL_EXIT( AL_DBG_SMI );\r
1889         return status;\r
1890 }\r
1891 \r
1892 \r
1893 static ib_api_status_t\r
1894 __process_pkey_table(\r
1895         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
1896         IN                              al_mad_wr_t* const                      p_mad_wr )\r
1897 {\r
1898 \r
1899         ib_mad_t                                *p_mad;\r
1900         ib_mad_element_t                *p_mad_resp;\r
1901         ib_smp_t                                *p_smp;\r
1902         ib_pkey_table_t         *p_pkey_table;\r
1903         uint16_t                                idx;\r
1904         ib_api_status_t         status;\r
1905 \r
1906         AL_ENTER( AL_DBG_SMI );\r
1907 \r
1908         CL_ASSERT( p_spl_qp_svc );\r
1909         CL_ASSERT( p_mad_wr );\r
1910 \r
1911         /* Initialize a pointers to the MAD work request and outbound MAD. */\r
1912         p_mad = get_mad_hdr_from_wr( p_mad_wr );\r
1913 \r
1914         /* Get the table selector from the attribute */\r
1915         idx = ((uint16_t)cl_ntoh32( p_mad->attr_mod ));\r
1916         \r
1917         /*\r
1918          * TODO : Setup the response to fail the MAD instead of sending\r
1919          * it down to the HCA.\r
1920          */\r
1921         if( idx > 2047 )\r
1922         {\r
1923                 AL_EXIT( AL_DBG_SMI );\r
1924                 return IB_NOT_DONE;\r
1925         }\r
1926 \r
1927 \r
1928         if( !p_spl_qp_svc->cache.pkey_tbl[idx].valid )\r
1929         {\r
1930                 AL_EXIT( AL_DBG_SMI );\r
1931                 return IB_NOT_DONE;\r
1932         }\r
1933 \r
1934         /*\r
1935          * If a SET, see if the set is identical to the cache,\r
1936          * in which case it's a no-op.\r
1937          */\r
1938         if( p_mad->method == IB_MAD_METHOD_SET )\r
1939         {\r
1940                 if( cl_memcmp( ib_smp_get_payload_ptr( (ib_smp_t*)p_mad ),\r
1941                         &p_spl_qp_svc->cache.pkey_tbl[idx].tbl, sizeof(ib_pkey_table_t) ) )\r
1942                 {\r
1943                         /* The set is requesting a change. */\r
1944                         AL_EXIT( AL_DBG_SMI );\r
1945                         return IB_NOT_DONE;\r
1946                 }\r
1947         }\r
1948         \r
1949         /* Get a MAD element from the pool for the response. */\r
1950         status = get_resp_mad( p_spl_qp_svc, p_mad_wr, &p_mad_resp );\r
1951         if( status == IB_SUCCESS )\r
1952         {\r
1953                 p_smp = (ib_smp_t*)p_mad_resp->p_mad_buf;\r
1954 \r
1955                 /* Setup the response mad. */\r
1956                 cl_memcpy( p_smp, p_mad, MAD_BLOCK_SIZE );\r
1957                 p_smp->method = (IB_MAD_METHOD_RESP_MASK | IB_MAD_METHOD_GET);\r
1958                 if( p_smp->mgmt_class == IB_MCLASS_SUBN_DIR )\r
1959                         p_smp->status = IB_SMP_DIRECTION;\r
1960                 else\r
1961                         p_smp->status = 0;\r
1962 \r
1963                 p_pkey_table = (ib_pkey_table_t*)ib_smp_get_payload_ptr( p_smp );\r
1964 \r
1965                 // TODO: do we need lock on the cache ?????\r
1966 \r
1967                 \r
1968                 /* Copy the cached data. */\r
1969                 cl_memcpy( p_pkey_table,\r
1970                         &p_spl_qp_svc->cache.pkey_tbl[idx].tbl, sizeof(ib_pkey_table_t) );\r
1971 \r
1972                 status = complete_local_mad( p_spl_qp_svc, p_mad_wr, p_mad_resp );\r
1973         }\r
1974 \r
1975         AL_EXIT( AL_DBG_SMI );\r
1976         return status;\r
1977 }\r
1978 \r
1979 \r
1980 static ib_api_status_t\r
1981 __process_slvl_table(\r
1982         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
1983         IN                              al_mad_wr_t* const                      p_mad_wr )\r
1984 {\r
1985 \r
1986 \r
1987         ib_mad_t                                *p_mad;\r
1988         ib_mad_element_t                *p_mad_resp;\r
1989         ib_smp_t                                *p_smp;\r
1990         ib_slvl_table_t                 *p_slvl_table;\r
1991         ib_api_status_t         status;\r
1992 \r
1993         AL_ENTER( AL_DBG_SMI );\r
1994 \r
1995         CL_ASSERT( p_spl_qp_svc );\r
1996         CL_ASSERT( p_mad_wr );\r
1997 \r
1998         /* Initialize a pointers to the MAD work request and outbound MAD. */\r
1999         p_mad = get_mad_hdr_from_wr( p_mad_wr );\r
2000 \r
2001         if( !p_spl_qp_svc->cache.sl_vl.valid )\r
2002         {\r
2003                 AL_EXIT( AL_DBG_SMI );\r
2004                 return IB_NOT_DONE;\r
2005         }\r
2006 \r
2007         /*\r
2008          * If a SET, see if the set is identical to the cache,\r
2009          * in which case it's a no-op.\r
2010          */\r
2011         if( p_mad->method == IB_MAD_METHOD_SET )\r
2012         {\r
2013                 if( cl_memcmp( ib_smp_get_payload_ptr( (ib_smp_t*)p_mad ),\r
2014                         &p_spl_qp_svc->cache.sl_vl.tbl, sizeof(ib_slvl_table_t) ) )\r
2015                 {\r
2016                         /* The set is requesting a change. */\r
2017                         AL_EXIT( AL_DBG_SMI );\r
2018                         return IB_NOT_DONE;\r
2019                 }\r
2020         }\r
2021         \r
2022         /* Get a MAD element from the pool for the response. */\r
2023         status = get_resp_mad( p_spl_qp_svc, p_mad_wr, &p_mad_resp );\r
2024         if( status == IB_SUCCESS )\r
2025         {\r
2026                 p_smp = (ib_smp_t*)p_mad_resp->p_mad_buf;\r
2027 \r
2028                 /* Setup the response mad. */\r
2029                 cl_memcpy( p_smp, p_mad, MAD_BLOCK_SIZE );\r
2030                 p_smp->method = (IB_MAD_METHOD_RESP_MASK | IB_MAD_METHOD_GET);\r
2031                 if( p_smp->mgmt_class == IB_MCLASS_SUBN_DIR )\r
2032                         p_smp->status = IB_SMP_DIRECTION;\r
2033                 else\r
2034                         p_smp->status = 0;\r
2035 \r
2036                 p_slvl_table = (ib_slvl_table_t*)ib_smp_get_payload_ptr( p_smp );\r
2037 \r
2038                 // TODO: do we need lock on the cache ?????\r
2039 \r
2040                 \r
2041                 /* Copy the cached data. */\r
2042                 cl_memcpy( p_slvl_table,\r
2043                         &p_spl_qp_svc->cache.sl_vl.tbl, sizeof(ib_slvl_table_t) );\r
2044 \r
2045                 status = complete_local_mad( p_spl_qp_svc, p_mad_wr, p_mad_resp );\r
2046         }\r
2047 \r
2048         AL_EXIT( AL_DBG_SMI );\r
2049         return status;\r
2050 }\r
2051 \r
2052 \r
2053 \r
2054 static ib_api_status_t\r
2055 __process_vl_arb_table(\r
2056         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
2057         IN                              al_mad_wr_t* const                      p_mad_wr )\r
2058 {\r
2059 \r
2060         ib_mad_t                                *p_mad;\r
2061         ib_mad_element_t                *p_mad_resp;\r
2062         ib_smp_t                                *p_smp;\r
2063         ib_vl_arb_table_t               *p_vl_arb_table;\r
2064         uint16_t                                idx;\r
2065         ib_api_status_t         status;\r
2066 \r
2067         AL_ENTER( AL_DBG_SMI );\r
2068 \r
2069         CL_ASSERT( p_spl_qp_svc );\r
2070         CL_ASSERT( p_mad_wr );\r
2071 \r
2072         /* Initialize a pointers to the MAD work request and outbound MAD. */\r
2073         p_mad = get_mad_hdr_from_wr( p_mad_wr );\r
2074 \r
2075         /* Get the table selector from the attribute */\r
2076         idx = ((uint16_t)(cl_ntoh32( p_mad->attr_mod ) >> 16)) - 1;\r
2077         \r
2078         /*\r
2079          * TODO : Setup the response to fail the MAD instead of sending\r
2080          * it down to the HCA.\r
2081          */\r
2082         if( idx > 3 )\r
2083         {\r
2084                 AL_EXIT( AL_DBG_SMI );\r
2085                 return IB_NOT_DONE;\r
2086         }\r
2087 \r
2088 \r
2089         if( !p_spl_qp_svc->cache.vl_arb[idx].valid )\r
2090         {\r
2091                 AL_EXIT( AL_DBG_SMI );\r
2092                 return IB_NOT_DONE;\r
2093         }\r
2094 \r
2095         /*\r
2096          * If a SET, see if the set is identical to the cache,\r
2097          * in which case it's a no-op.\r
2098          */\r
2099         if( p_mad->method == IB_MAD_METHOD_SET )\r
2100         {\r
2101                 if( cl_memcmp( ib_smp_get_payload_ptr( (ib_smp_t*)p_mad ),\r
2102                         &p_spl_qp_svc->cache.vl_arb[idx].tbl, sizeof(ib_vl_arb_table_t) ) )\r
2103                 {\r
2104                         /* The set is requesting a change. */\r
2105                         AL_EXIT( AL_DBG_SMI );\r
2106                         return IB_NOT_DONE;\r
2107                 }\r
2108         }\r
2109         \r
2110         /* Get a MAD element from the pool for the response. */\r
2111         status = get_resp_mad( p_spl_qp_svc, p_mad_wr, &p_mad_resp );\r
2112         if( status == IB_SUCCESS )\r
2113         {\r
2114                 p_smp = (ib_smp_t*)p_mad_resp->p_mad_buf;\r
2115 \r
2116                 /* Setup the response mad. */\r
2117                 cl_memcpy( p_smp, p_mad, MAD_BLOCK_SIZE );\r
2118                 p_smp->method = (IB_MAD_METHOD_RESP_MASK | IB_MAD_METHOD_GET);\r
2119                 if( p_smp->mgmt_class == IB_MCLASS_SUBN_DIR )\r
2120                         p_smp->status = IB_SMP_DIRECTION;\r
2121                 else\r
2122                         p_smp->status = 0;\r
2123 \r
2124                 p_vl_arb_table = (ib_vl_arb_table_t*)ib_smp_get_payload_ptr( p_smp );\r
2125 \r
2126                 // TODO: do we need lock on the cache ?????\r
2127 \r
2128                 \r
2129                 /* Copy the cached data. */\r
2130                 cl_memcpy( p_vl_arb_table,\r
2131                         &p_spl_qp_svc->cache.pkey_tbl[idx].tbl, sizeof(ib_vl_arb_table_t) );\r
2132 \r
2133                 status = complete_local_mad( p_spl_qp_svc, p_mad_wr, p_mad_resp );\r
2134         }\r
2135 \r
2136         AL_EXIT( AL_DBG_SMI );\r
2137         return status;\r
2138 }\r
2139 \r
2140 \r
2141 \r
2142 \r
2143 /*\r
2144  * Process subnet administration MADs using cached data if possible.\r
2145  */\r
2146 static ib_api_status_t\r
2147 __process_subn_mad(\r
2148         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
2149         IN                              al_mad_wr_t* const                      p_mad_wr )\r
2150 {\r
2151         ib_api_status_t         status;\r
2152         ib_smp_t                        *p_smp;\r
2153 \r
2154         AL_ENTER( AL_DBG_SMI );\r
2155 \r
2156         CL_ASSERT( p_spl_qp_svc );\r
2157         CL_ASSERT( p_mad_wr );\r
2158 \r
2159         p_smp = (ib_smp_t*)get_mad_hdr_from_wr( p_mad_wr );\r
2160 \r
2161         CL_ASSERT( p_smp->mgmt_class == IB_MCLASS_SUBN_DIR ||\r
2162                 p_smp->mgmt_class == IB_MCLASS_SUBN_LID );\r
2163 \r
2164         /* simple m-key check */\r
2165         if( p_spl_qp_svc->m_key && p_smp->m_key == p_spl_qp_svc->m_key )\r
2166         {\r
2167                 if(!p_spl_qp_svc->cache_en )\r
2168                 {\r
2169                         p_spl_qp_svc->cache_en = TRUE;\r
2170                         AL_EXIT( AL_DBG_SMI );\r
2171                         return IB_NOT_DONE;\r
2172                 }\r
2173         }\r
2174         else\r
2175         {\r
2176                 AL_PRINT(TRACE_LEVEL_WARNING, AL_DBG_SMI, ("Mkey check failed \n"));\r
2177                 AL_PRINT(TRACE_LEVEL_WARNING, AL_DBG_SMI, ("Mkey check SMP= 0x%08x:%08x  SVC = 0x%08x:%08x \n",\r
2178                                                                         ((uint32_t*)&p_smp->m_key)[0],((uint32_t*)&p_smp->m_key)[1],\r
2179                                                                         ((uint32_t*)&p_spl_qp_svc->m_key)[0],((uint32_t*)&p_spl_qp_svc->m_key)[1]));\r
2180 \r
2181                 p_spl_qp_svc->cache_en = FALSE;\r
2182                 AL_EXIT( AL_DBG_SMI );\r
2183                 return IB_NOT_DONE;\r
2184         }\r
2185 \r
2186         cl_spinlock_acquire(&p_spl_qp_svc->cache_lock);\r
2187         \r
2188         switch( p_smp->attr_id )\r
2189         {\r
2190         case IB_MAD_ATTR_NODE_INFO:\r
2191                 status = __process_node_info( p_spl_qp_svc, p_mad_wr );\r
2192                 break;\r
2193 \r
2194         case IB_MAD_ATTR_NODE_DESC:\r
2195                 status = __process_node_desc( p_spl_qp_svc, p_mad_wr );\r
2196                 break;\r
2197 \r
2198         case IB_MAD_ATTR_GUID_INFO:\r
2199                 status = __process_guid_info( p_spl_qp_svc, p_mad_wr );\r
2200                 break;\r
2201 \r
2202         case IB_MAD_ATTR_P_KEY_TABLE:\r
2203                 status = __process_pkey_table( p_spl_qp_svc, p_mad_wr );\r
2204                 break;\r
2205                 \r
2206         case IB_MAD_ATTR_SLVL_TABLE:\r
2207                 status = __process_slvl_table( p_spl_qp_svc, p_mad_wr );\r
2208                 break;\r
2209                 \r
2210         case IB_MAD_ATTR_VL_ARBITRATION:\r
2211                 status = __process_vl_arb_table( p_spl_qp_svc, p_mad_wr );\r
2212                 break;\r
2213                 \r
2214         default:\r
2215                 status = IB_NOT_DONE;\r
2216                 break;\r
2217         }\r
2218 \r
2219         cl_spinlock_release(&p_spl_qp_svc->cache_lock);\r
2220 \r
2221         AL_EXIT( AL_DBG_SMI );\r
2222         return status;\r
2223 }\r
2224 \r
2225 \r
2226 /*\r
2227  * Process a local MAD send work request.\r
2228  */\r
2229 static ib_api_status_t\r
2230 fwd_local_mad(\r
2231         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
2232         IN                              al_mad_wr_t* const                      p_mad_wr )\r
2233 {\r
2234         ib_mad_t*                               p_mad;\r
2235         ib_smp_t*                               p_smp;\r
2236         al_mad_send_t*                  p_mad_send;\r
2237         ib_mad_element_t*               p_send_mad;\r
2238         ib_mad_element_t*               p_mad_response = NULL;\r
2239         ib_mad_t*                               p_mad_response_buf;\r
2240         ib_api_status_t                 status = IB_SUCCESS;\r
2241         boolean_t                               smp_is_set;\r
2242 \r
2243         AL_ENTER( AL_DBG_SMI );\r
2244 \r
2245         CL_ASSERT( p_spl_qp_svc );\r
2246         CL_ASSERT( p_mad_wr );\r
2247 \r
2248         /* Initialize a pointers to the MAD work request and outbound MAD. */\r
2249         p_mad = get_mad_hdr_from_wr( p_mad_wr );\r
2250         p_smp = (ib_smp_t*)p_mad;\r
2251 \r
2252         smp_is_set = (p_smp->method == IB_MAD_METHOD_SET);\r
2253 \r
2254         /* Get a MAD element from the pool for the response. */\r
2255         p_mad_send = PARENT_STRUCT( p_mad_wr, al_mad_send_t, mad_wr );\r
2256         if( p_mad_send->p_send_mad->resp_expected )\r
2257         {\r
2258                 status = get_resp_mad( p_spl_qp_svc, p_mad_wr, &p_mad_response );\r
2259                 if( status != IB_SUCCESS )\r
2260                 {\r
2261                         __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr,\r
2262                                 IB_WCS_LOCAL_OP_ERR );\r
2263                         AL_EXIT( AL_DBG_SMI );\r
2264                         return status;\r
2265                 }\r
2266                 p_mad_response_buf = p_mad_response->p_mad_buf;\r
2267                 /* Copy MAD to dispatch locally in case CA doesn't handle it. */\r
2268                 *p_mad_response_buf = *p_mad;\r
2269         }\r
2270         else\r
2271         {\r
2272                         p_mad_response_buf = NULL;\r
2273         }\r
2274 \r
2275         /* Adjust directed route SMPs as required by IBA. */\r
2276         if( p_mad->mgmt_class == IB_MCLASS_SUBN_DIR )\r
2277         {\r
2278                 CL_ASSERT( !ib_smp_is_response( p_smp ) );\r
2279 \r
2280                 /*\r
2281                  * If this was a self addressed, directed route SMP, increment\r
2282                  * the hop pointer in the request before delivery as required\r
2283                  * by IBA.  Otherwise, adjustment for remote requests occurs\r
2284                  * during inbound processing.\r
2285                  */\r
2286                 if( p_smp->hop_count == 0 )\r
2287                         p_smp->hop_ptr++;\r
2288         }\r
2289 \r
2290         /* Forward the locally addressed MAD to the CA interface. */\r
2291         status = al_local_mad( p_spl_qp_svc->h_qp->obj.p_ci_ca->h_ca,\r
2292                 p_spl_qp_svc->port_num, &p_mad_wr->send_wr.dgrm.ud.h_av->av_attr, p_mad, p_mad_response_buf );\r
2293 \r
2294         /* Reset directed route SMPs as required by IBA. */\r
2295         if( p_mad->mgmt_class == IB_MCLASS_SUBN_DIR )\r
2296         {\r
2297                 /*\r
2298                  * If this was a self addressed, directed route SMP, decrement\r
2299                  * the hop pointer in the response before delivery as required\r
2300                  * by IBA.  Otherwise, adjustment for remote responses occurs\r
2301                  * during outbound processing.\r
2302                  */\r
2303                 if( p_smp->hop_count == 0 )\r
2304                 {\r
2305                         /* Adjust the request SMP. */\r
2306                         p_smp->hop_ptr--;\r
2307 \r
2308                         /* Adjust the response SMP. */\r
2309                         if( p_mad_response_buf )\r
2310                         {\r
2311                                 p_smp = (ib_smp_t*)p_mad_response_buf;\r
2312                                 p_smp->hop_ptr--;\r
2313                         }\r
2314                 }\r
2315         }\r
2316 \r
2317         if( status != IB_SUCCESS )\r
2318         {\r
2319                 if( p_mad_response )\r
2320                         ib_put_mad( p_mad_response );\r
2321 \r
2322                 __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr,\r
2323                         IB_WCS_LOCAL_OP_ERR );\r
2324                 AL_EXIT( AL_DBG_SMI );\r
2325                 return status;\r
2326         }\r
2327 \r
2328         /* Check the completion status of this simulated send. */\r
2329         if( p_mad_send->p_send_mad->resp_expected )\r
2330         {\r
2331                 /*\r
2332                  * The SMI is uses PnP polling to refresh the base_lid and lmc.\r
2333                  * Polling takes time, so we update the values here to prevent\r
2334                  * the failure of LID routed MADs sent immediately following this\r
2335                  * assignment.  Check the response to see if the port info was set.\r
2336                  */\r
2337                 if( smp_is_set )\r
2338                 {\r
2339                         ib_smp_t*               p_smp_response = NULL;\r
2340 \r
2341                         switch( p_mad_response_buf->mgmt_class )\r
2342                         {\r
2343                         case IB_MCLASS_SUBN_DIR:\r
2344                                 if( ib_smp_get_status( p_smp ) == IB_SA_MAD_STATUS_SUCCESS ) \r
2345                                 {\r
2346                                         p_smp_response = p_smp;\r
2347                                         //p_port_info =\r
2348                                         //      (ib_port_info_t*)ib_smp_get_payload_ptr( p_smp );\r
2349                                 }\r
2350                                 break;\r
2351 \r
2352                         case IB_MCLASS_SUBN_LID:\r
2353                                 if( p_mad_response_buf->status == IB_SA_MAD_STATUS_SUCCESS )\r
2354                                 {\r
2355                                         p_smp_response = (ib_smp_t*)p_mad_response_buf;\r
2356                                         //p_port_info =\r
2357                                         //      (ib_port_info_t*)ib_smp_get_payload_ptr((ib_smp_t*)p_mad_response_buf);\r
2358                                 }\r
2359                                 break;\r
2360 \r
2361                         default:\r
2362                                 break;\r
2363                         }\r
2364 \r
2365                         if( p_smp_response )\r
2366                         {\r
2367                                 switch( p_smp_response->attr_id )\r
2368                                 {\r
2369                                         case IB_MAD_ATTR_PORT_INFO:\r
2370                                                 {\r
2371                                                         ib_port_info_t          *p_port_info =\r
2372                                                                 (ib_port_info_t*)ib_smp_get_payload_ptr(p_smp_response);\r
2373                                                         p_spl_qp_svc->base_lid = p_port_info->base_lid;\r
2374                                                         p_spl_qp_svc->lmc = ib_port_info_get_lmc( p_port_info );\r
2375                                                         p_spl_qp_svc->sm_lid = p_port_info->master_sm_base_lid;\r
2376                                                         p_spl_qp_svc->sm_sl = ib_port_info_get_sm_sl( p_port_info );\r
2377 \r
2378                                                         if(p_port_info->m_key)\r
2379                                                                 p_spl_qp_svc->m_key = p_port_info->m_key;\r
2380                                                         if (p_port_info->subnet_timeout & 0x80)\r
2381                                                         {\r
2382                                                                 AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_PNP,\r
2383                                                                         ("Client reregister event, setting sm_lid to 0.\n"));\r
2384                                                                 ci_ca_lock_attr(p_spl_qp_svc->obj.p_ci_ca);\r
2385                                                                 p_spl_qp_svc->obj.p_ci_ca->p_pnp_attr->\r
2386                                                                         p_port_attr[p_port_info->local_port_num - 1].sm_lid= 0;\r
2387                                                                 ci_ca_unlock_attr(p_spl_qp_svc->obj.p_ci_ca);\r
2388                                                         }\r
2389                                                 }\r
2390                                                 break;\r
2391                                         case IB_MAD_ATTR_P_KEY_TABLE:\r
2392                                         case IB_MAD_ATTR_GUID_INFO:\r
2393                                         case IB_MAD_ATTR_SLVL_TABLE:\r
2394                                         case IB_MAD_ATTR_VL_ARBITRATION:\r
2395                                                 spl_qp_svc_update_cache( p_spl_qp_svc, p_smp_response);\r
2396                                                 break;\r
2397                                         default :\r
2398                                                 break;\r
2399                                 }\r
2400                         }\r
2401                 }\r
2402                 \r
2403 \r
2404                 /* Construct the receive MAD element. */\r
2405                 p_send_mad = p_mad_send->p_send_mad;\r
2406                 p_mad_response->status = IB_WCS_SUCCESS;\r
2407                 p_mad_response->grh_valid = p_send_mad->grh_valid;\r
2408                 if( p_mad_response->grh_valid )\r
2409                         *p_mad_response->p_grh  = *p_send_mad->p_grh;\r
2410                 p_mad_response->path_bits   = p_send_mad->path_bits;\r
2411                 p_mad_response->pkey_index  = p_send_mad->pkey_index;\r
2412                 p_mad_response->remote_lid  = p_send_mad->remote_lid;\r
2413                 p_mad_response->remote_qkey = p_send_mad->remote_qkey;\r
2414                 p_mad_response->remote_qp   = p_send_mad->remote_qp;\r
2415                 p_mad_response->remote_sl   = p_send_mad->remote_sl;\r
2416                 if( p_mad_wr->send_wr.send_opt & IB_RECV_OPT_IMMEDIATE )\r
2417                 {\r
2418                         p_mad_response->immediate_data = p_mad_wr->send_wr.immediate_data;\r
2419                         p_mad_response->recv_opt |= IB_RECV_OPT_IMMEDIATE;\r
2420                 }\r
2421 \r
2422                 /*\r
2423                  * Hand the receive MAD element to the dispatcher before completing\r
2424                  * the send.  This guarantees that the send request cannot time out.\r
2425                  */\r
2426                 status = mad_disp_recv_done( p_spl_qp_svc->h_mad_disp, p_mad_response );\r
2427                 if( status != IB_SUCCESS )\r
2428                         ib_put_mad( p_mad_response );\r
2429         }\r
2430         \r
2431         __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr,IB_WCS_SUCCESS);\r
2432 \r
2433         \r
2434         \r
2435         /* If the SMP was a Get, no need to trigger a PnP poll. */\r
2436         if( status == IB_SUCCESS && !smp_is_set )\r
2437                 status = IB_NOT_DONE;\r
2438 \r
2439         AL_EXIT( AL_DBG_SMI );\r
2440         return status;\r
2441 }\r
2442 \r
2443 \r
2444 \r
2445 /*\r
2446  * Asynchronous processing thread callback to send a local MAD.\r
2447  */\r
2448 void\r
2449 send_local_mad_cb(\r
2450         IN                              cl_async_proc_item_t*           p_item )\r
2451 {\r
2452         spl_qp_svc_t*                   p_spl_qp_svc;\r
2453         ib_api_status_t                 status;\r
2454 \r
2455         AL_ENTER( AL_DBG_SMI );\r
2456 \r
2457         CL_ASSERT( p_item );\r
2458         p_spl_qp_svc = PARENT_STRUCT( p_item, spl_qp_svc_t, send_async );\r
2459 \r
2460         /* Process a local MAD send work request. */\r
2461         CL_ASSERT( p_spl_qp_svc->local_mad_wr );\r
2462         status = fwd_local_mad( p_spl_qp_svc, p_spl_qp_svc->local_mad_wr );\r
2463 \r
2464         /*\r
2465          * If we successfully processed a local MAD, which could have changed\r
2466          * something (e.g. the LID) on the HCA.  Scan for changes.\r
2467          */\r
2468         if( status == IB_SUCCESS )\r
2469                 pnp_poll();\r
2470 \r
2471         /*\r
2472          * Clear the local MAD pointer to allow processing of other MADs.\r
2473          * This is done after polling for attribute changes to ensure that\r
2474          * subsequent MADs pick up any changes performed by this one.\r
2475          */\r
2476         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
2477         p_spl_qp_svc->local_mad_wr = NULL;\r
2478         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2479 \r
2480         /* Continue processing any queued MADs on the QP. */\r
2481         special_qp_resume_sends( p_spl_qp_svc->h_qp );\r
2482 \r
2483         /* No longer in use by the asynchronous processing thread. */\r
2484         cl_atomic_dec( &p_spl_qp_svc->in_use_cnt );\r
2485 \r
2486         AL_EXIT( AL_DBG_SMI );\r
2487 }\r
2488 \r
2489 \r
2490 \r
2491 /*\r
2492  * Special QP send completion callback.\r
2493  */\r
2494 void\r
2495 spl_qp_send_comp_cb(\r
2496         IN              const   ib_cq_handle_t                          h_cq,\r
2497         IN                              void*                                           cq_context )\r
2498 {\r
2499         spl_qp_svc_t*                   p_spl_qp_svc;\r
2500 \r
2501         AL_ENTER( AL_DBG_SMI );\r
2502 \r
2503         UNREFERENCED_PARAMETER( h_cq );\r
2504 \r
2505         CL_ASSERT( cq_context );\r
2506         p_spl_qp_svc = cq_context;\r
2507 \r
2508 #if defined( CL_USE_MUTEX )\r
2509 \r
2510         /* Queue an asynchronous processing item to process sends. */\r
2511         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
2512         if( !p_spl_qp_svc->send_async_queued )\r
2513         {\r
2514                 p_spl_qp_svc->send_async_queued = TRUE;\r
2515                 ref_al_obj( &p_spl_qp_svc->obj );\r
2516                 cl_async_proc_queue( gp_async_proc_mgr, &p_spl_qp_svc->send_async_cb );\r
2517         }\r
2518         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2519 \r
2520 #else\r
2521     cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
2522         if( p_spl_qp_svc->state != SPL_QP_ACTIVE )\r
2523         {\r
2524                 cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2525         AL_EXIT( AL_DBG_SMI );\r
2526                 return;\r
2527         }\r
2528         cl_atomic_inc( &p_spl_qp_svc->in_use_cnt );\r
2529         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2530 \r
2531     /* Queue the DPC. */\r
2532         CL_ASSERT( h_cq == p_spl_qp_svc->h_send_cq );\r
2533     KeInsertQueueDpc( &p_spl_qp_svc->send_dpc, NULL, NULL );\r
2534 #endif\r
2535 \r
2536         AL_EXIT( AL_DBG_SMI );\r
2537 }\r
2538 \r
2539 \r
2540 void\r
2541 spl_qp_send_dpc_cb(\r
2542     IN              KDPC                        *p_dpc,\r
2543     IN              void                        *context,\r
2544     IN              void                        *arg1,\r
2545     IN              void                        *arg2\r
2546     )\r
2547 {\r
2548         spl_qp_svc_t*                   p_spl_qp_svc;\r
2549 \r
2550         AL_ENTER( AL_DBG_SMI );\r
2551 \r
2552         CL_ASSERT( context );\r
2553         p_spl_qp_svc = context;\r
2554 \r
2555     UNREFERENCED_PARAMETER( p_dpc );\r
2556     UNREFERENCED_PARAMETER( arg1 );\r
2557     UNREFERENCED_PARAMETER( arg2 );\r
2558 \r
2559         spl_qp_comp( p_spl_qp_svc, p_spl_qp_svc->h_send_cq, IB_WC_SEND );\r
2560 \r
2561         /* Continue processing any queued MADs on the QP. */\r
2562         special_qp_resume_sends( p_spl_qp_svc->h_qp );\r
2563 \r
2564     cl_atomic_dec( &p_spl_qp_svc->in_use_cnt );\r
2565 \r
2566     AL_EXIT( AL_DBG_SMI );\r
2567 }\r
2568 \r
2569 \r
2570 #if defined( CL_USE_MUTEX )\r
2571 void\r
2572 spl_qp_send_async_cb(\r
2573         IN                              cl_async_proc_item_t*           p_item )\r
2574 {\r
2575         spl_qp_svc_t*                   p_spl_qp_svc;\r
2576         ib_api_status_t                 status;\r
2577 \r
2578         AL_ENTER( AL_DBG_SMI );\r
2579 \r
2580         CL_ASSERT( p_item );\r
2581         p_spl_qp_svc = PARENT_STRUCT( p_item, spl_qp_svc_t, send_async_cb );\r
2582 \r
2583         /* Reset asynchronous queue flag. */\r
2584         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
2585         p_spl_qp_svc->send_async_queued = FALSE;\r
2586         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2587 \r
2588         spl_qp_comp( p_spl_qp_svc, p_spl_qp_svc->h_send_cq, IB_WC_SEND );\r
2589 \r
2590         /* Continue processing any queued MADs on the QP. */\r
2591         status = special_qp_resume_sends( p_spl_qp_svc->h_qp );\r
2592         CL_ASSERT( status == IB_SUCCESS );\r
2593 \r
2594         deref_al_obj( &p_spl_qp_svc->obj );\r
2595 \r
2596         AL_EXIT( AL_DBG_SMI );\r
2597 }\r
2598 #endif\r
2599 \r
2600 \r
2601 \r
2602 /*\r
2603  * Special QP receive completion callback.\r
2604  */\r
2605 void\r
2606 spl_qp_recv_comp_cb(\r
2607         IN              const   ib_cq_handle_t                          h_cq,\r
2608         IN                              void*                                           cq_context )\r
2609 {\r
2610         spl_qp_svc_t*                   p_spl_qp_svc;\r
2611 \r
2612         AL_ENTER( AL_DBG_SMI );\r
2613 \r
2614         UNREFERENCED_PARAMETER( h_cq );\r
2615 \r
2616         CL_ASSERT( cq_context );\r
2617         p_spl_qp_svc = cq_context;\r
2618 \r
2619 #if defined( CL_USE_MUTEX )\r
2620 \r
2621         /* Queue an asynchronous processing item to process receives. */\r
2622         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
2623         if( !p_spl_qp_svc->recv_async_queued )\r
2624         {\r
2625                 p_spl_qp_svc->recv_async_queued = TRUE;\r
2626                 ref_al_obj( &p_spl_qp_svc->obj );\r
2627                 cl_async_proc_queue( gp_async_proc_mgr, &p_spl_qp_svc->recv_async_cb );\r
2628         }\r
2629         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2630 \r
2631 #else\r
2632     cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
2633         if( p_spl_qp_svc->state != SPL_QP_ACTIVE )\r
2634         {\r
2635                 cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2636         AL_EXIT( AL_DBG_SMI );\r
2637                 return;\r
2638         }\r
2639         cl_atomic_inc( &p_spl_qp_svc->in_use_cnt );\r
2640         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2641 \r
2642     /* Queue the DPC. */\r
2643         CL_ASSERT( h_cq == p_spl_qp_svc->h_recv_cq );\r
2644     KeInsertQueueDpc( &p_spl_qp_svc->recv_dpc, NULL, NULL );\r
2645 #endif\r
2646 \r
2647         AL_EXIT( AL_DBG_SMI );\r
2648 }\r
2649 \r
2650 \r
2651 void\r
2652 spl_qp_recv_dpc_cb(\r
2653     IN              KDPC                        *p_dpc,\r
2654     IN              void                        *context,\r
2655     IN              void                        *arg1,\r
2656     IN              void                        *arg2\r
2657     )\r
2658 {\r
2659         spl_qp_svc_t*                   p_spl_qp_svc;\r
2660 \r
2661         AL_ENTER( AL_DBG_SMI );\r
2662 \r
2663         CL_ASSERT( context );\r
2664         p_spl_qp_svc = context;\r
2665 \r
2666     UNREFERENCED_PARAMETER( p_dpc );\r
2667     UNREFERENCED_PARAMETER( arg1 );\r
2668     UNREFERENCED_PARAMETER( arg2 );\r
2669 \r
2670         spl_qp_comp( p_spl_qp_svc, p_spl_qp_svc->h_recv_cq, IB_WC_RECV );\r
2671 \r
2672     cl_atomic_dec( &p_spl_qp_svc->in_use_cnt );\r
2673 \r
2674     AL_EXIT( AL_DBG_SMI );\r
2675 }\r
2676 \r
2677 \r
2678 #if defined( CL_USE_MUTEX )\r
2679 void\r
2680 spl_qp_recv_async_cb(\r
2681         IN                              cl_async_proc_item_t*           p_item )\r
2682 {\r
2683         spl_qp_svc_t*                   p_spl_qp_svc;\r
2684 \r
2685         AL_ENTER( AL_DBG_SMI );\r
2686 \r
2687         CL_ASSERT( p_item );\r
2688         p_spl_qp_svc = PARENT_STRUCT( p_item, spl_qp_svc_t, recv_async_cb );\r
2689 \r
2690         /* Reset asynchronous queue flag. */\r
2691         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
2692         p_spl_qp_svc->recv_async_queued = FALSE;\r
2693         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2694 \r
2695         spl_qp_comp( p_spl_qp_svc, p_spl_qp_svc->h_recv_cq, IB_WC_RECV );\r
2696 \r
2697         deref_al_obj( &p_spl_qp_svc->obj );\r
2698 \r
2699         AL_EXIT( AL_DBG_SMI );\r
2700 }\r
2701 #endif\r
2702 \r
2703 \r
2704 #define SPL_QP_MAX_POLL 16\r
2705 /*\r
2706  * Special QP completion handler.\r
2707  */\r
2708 void\r
2709 spl_qp_comp(\r
2710         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
2711         IN              const   ib_cq_handle_t                          h_cq,\r
2712         IN                              ib_wc_type_t                            wc_type )\r
2713 {\r
2714         ib_wc_t                                 wc;\r
2715         ib_wc_t*                                p_free_wc = &wc;\r
2716         ib_wc_t*                                p_done_wc;\r
2717         al_mad_wr_t*                    p_mad_wr;\r
2718         al_mad_element_t*               p_al_mad;\r
2719         ib_mad_element_t*               p_mad_element;\r
2720         ib_smp_t*                               p_smp;\r
2721         ib_api_status_t                 status;\r
2722     int                     max_poll = SPL_QP_MAX_POLL;\r
2723 \r
2724         AL_ENTER( AL_DBG_SMI_CB );\r
2725 \r
2726         CL_ASSERT( p_spl_qp_svc );\r
2727         CL_ASSERT( h_cq );\r
2728 \r
2729         /* Check the QP state and guard against error handling. */\r
2730         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
2731         if( p_spl_qp_svc->state != SPL_QP_ACTIVE )\r
2732         {\r
2733                 cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2734                 return;\r
2735         }\r
2736         cl_atomic_inc( &p_spl_qp_svc->in_use_cnt );\r
2737         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2738 \r
2739         wc.p_next = NULL;\r
2740         /* Process work completions. */\r
2741         while( max_poll && ib_poll_cq( h_cq, &p_free_wc, &p_done_wc ) == IB_SUCCESS )\r
2742         {\r
2743                 /* Process completions one at a time. */\r
2744                 CL_ASSERT( p_done_wc );\r
2745 \r
2746                 /* Flushed completions are handled elsewhere. */\r
2747                 if( wc.status == IB_WCS_WR_FLUSHED_ERR )\r
2748                 {\r
2749                         p_free_wc = &wc;\r
2750                         continue;\r
2751                 }\r
2752 \r
2753                 /*\r
2754                  * Process the work completion.  Per IBA specification, the\r
2755                  * wc.wc_type is undefined if wc.status is not IB_WCS_SUCCESS.\r
2756                  * Use the wc_type parameter.\r
2757                  */\r
2758                 switch( wc_type )\r
2759                 {\r
2760                 case IB_WC_SEND:\r
2761                         /* Get a pointer to the MAD work request. */\r
2762                         p_mad_wr = (al_mad_wr_t*)((uintn_t)wc.wr_id);\r
2763 \r
2764                         /* Remove the MAD work request from the service tracking queue. */\r
2765                         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
2766                         cl_qlist_remove_item( &p_spl_qp_svc->send_queue,\r
2767                                 &p_mad_wr->list_item );\r
2768                         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2769 \r
2770                         /* Reset directed route SMPs as required by IBA. */\r
2771                         p_smp = (ib_smp_t*)get_mad_hdr_from_wr( p_mad_wr );\r
2772                         if( p_smp->mgmt_class == IB_MCLASS_SUBN_DIR )\r
2773                         {\r
2774                                 if( ib_smp_is_response( p_smp ) )\r
2775                                         p_smp->hop_ptr++;\r
2776                                 else\r
2777                                         p_smp->hop_ptr--;\r
2778                         }\r
2779 \r
2780                         /* Report the send completion to the dispatcher. */\r
2781                         mad_disp_send_done( p_spl_qp_svc->h_mad_disp, p_mad_wr, &wc );\r
2782                         break;\r
2783 \r
2784                 case IB_WC_RECV:\r
2785 \r
2786                         /* Initialize pointers to the MAD element. */\r
2787                         p_al_mad = (al_mad_element_t*)((uintn_t)wc.wr_id);\r
2788                         p_mad_element = &p_al_mad->element;\r
2789 \r
2790                         /* Remove the AL MAD element from the service tracking list. */\r
2791                         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
2792 \r
2793                         cl_qlist_remove_item( &p_spl_qp_svc->recv_queue,\r
2794                                 &p_al_mad->list_item );\r
2795 \r
2796                         /* Replenish the receive buffer. */\r
2797                         spl_qp_svc_post_recvs( p_spl_qp_svc );\r
2798                         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2799 \r
2800                         /* Construct the MAD element from the receive work completion. */\r
2801                         build_mad_recv( p_mad_element, &wc );\r
2802 \r
2803                         /* Process the received MAD. */\r
2804                         status = process_mad_recv( p_spl_qp_svc, p_mad_element );\r
2805 \r
2806                         /* Discard this MAD on error. */\r
2807                         if( status != IB_SUCCESS )\r
2808                         {\r
2809                                 status = ib_put_mad( p_mad_element );\r
2810                                 CL_ASSERT( status == IB_SUCCESS );\r
2811                         }\r
2812                         break;\r
2813 \r
2814                 default:\r
2815                         CL_ASSERT( wc_type == IB_WC_SEND || wc_type == IB_WC_RECV );\r
2816                         break;\r
2817                 }\r
2818 \r
2819                 if( wc.status != IB_WCS_SUCCESS )\r
2820                 {\r
2821                         AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
2822                                 ("special QP completion error: %s! internal syndrome 0x%I64x\n",\r
2823                                 ib_get_wc_status_str( wc.status ), wc.vendor_specific) );\r
2824 \r
2825                         /* Reset the special QP service and return. */\r
2826                         spl_qp_svc_reset( p_spl_qp_svc );\r
2827                 }\r
2828                 p_free_wc = &wc;\r
2829         --max_poll;\r
2830         }\r
2831 \r
2832     if( max_poll == 0 )\r
2833     {\r
2834         /* We already have an in_use_cnt reference - use it to queue the DPC. */\r
2835         if( wc_type == IB_WC_SEND )\r
2836             KeInsertQueueDpc( &p_spl_qp_svc->send_dpc, NULL, NULL );\r
2837         else\r
2838             KeInsertQueueDpc( &p_spl_qp_svc->recv_dpc, NULL, NULL );\r
2839     }\r
2840     else\r
2841     {\r
2842             /* Rearm the CQ. */\r
2843             status = ib_rearm_cq( h_cq, FALSE );\r
2844             CL_ASSERT( status == IB_SUCCESS );\r
2845 \r
2846             cl_atomic_dec( &p_spl_qp_svc->in_use_cnt );\r
2847     }\r
2848         AL_EXIT( AL_DBG_SMI_CB );\r
2849 }\r
2850 \r
2851 \r
2852 \r
2853 /*\r
2854  * Process a received MAD.\r
2855  */\r
2856 ib_api_status_t\r
2857 process_mad_recv(\r
2858         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
2859         IN                              ib_mad_element_t*                       p_mad_element )\r
2860 {\r
2861         ib_smp_t*                               p_smp;\r
2862         mad_route_t                             route;\r
2863         ib_api_status_t                 status;\r
2864 \r
2865         AL_ENTER( AL_DBG_SMI );\r
2866 \r
2867         CL_ASSERT( p_spl_qp_svc );\r
2868         CL_ASSERT( p_mad_element );\r
2869 \r
2870         /*\r
2871          * If the CA has a HW agent then this MAD should have been\r
2872          * consumed below verbs.  The fact that it was received here\r
2873          * indicates that it should be forwarded to the dispatcher\r
2874          * for delivery to a class manager.  Otherwise, determine how\r
2875          * the MAD should be routed.\r
2876          */\r
2877         route = ROUTE_DISPATCHER;\r
2878         if( check_local_mad( p_spl_qp_svc->h_qp ) )\r
2879         {\r
2880                 /*\r
2881                  * SMP and GMP processing is branched here to handle overlaps\r
2882                  * between class methods and attributes.\r
2883                  */\r
2884                 switch( p_mad_element->p_mad_buf->mgmt_class )\r
2885                 {\r
2886                 case IB_MCLASS_SUBN_DIR:\r
2887                         /* Perform special checks on directed route SMPs. */\r
2888                         p_smp = (ib_smp_t*)p_mad_element->p_mad_buf;\r
2889 \r
2890                         if( ( p_smp->hop_count >= IB_SUBNET_PATH_HOPS_MAX ) ||\r
2891                                 ( p_smp->hop_ptr >= IB_SUBNET_PATH_HOPS_MAX ) )\r
2892                         {\r
2893                                 route = ROUTE_DISCARD;\r
2894                         }\r
2895                         else if( ib_smp_is_response( p_smp ) )\r
2896                         {\r
2897                                 /*\r
2898                                  * This node is the destination of the response.  Discard\r
2899                                  * the source LID or hop pointer are incorrect.\r
2900                                  */\r
2901                                 if( p_smp->dr_slid == IB_LID_PERMISSIVE )\r
2902                                 {\r
2903                                         if( p_smp->hop_ptr == 1 )\r
2904                                         {\r
2905                                                 p_smp->hop_ptr--;               /* Adjust ptr per IBA spec. */\r
2906                                         }\r
2907                                         else\r
2908                                         {\r
2909                                                 route = ROUTE_DISCARD;\r
2910                                         }\r
2911                                 }\r
2912                                 else if( ( p_smp->dr_slid <  p_spl_qp_svc->base_lid ) ||\r
2913                                                  ( p_smp->dr_slid >= p_spl_qp_svc->base_lid +\r
2914                                                         ( 1 << p_spl_qp_svc->lmc ) ) )\r
2915                                 {\r
2916                                                 route = ROUTE_DISCARD;\r
2917                                 }\r
2918                         }\r
2919                         else\r
2920                         {\r
2921                                 /*\r
2922                                  * This node is the destination of the request.  Discard\r
2923                                  * the destination LID or hop pointer are incorrect.\r
2924                                  */\r
2925                                 if( p_smp->dr_dlid == IB_LID_PERMISSIVE )\r
2926                                 {\r
2927                                         if( p_smp->hop_count == p_smp->hop_ptr )\r
2928                                         {\r
2929                                                 p_smp->return_path[ p_smp->hop_ptr++ ] =\r
2930                                                         p_spl_qp_svc->port_num; /* Set path per IBA spec. */\r
2931                                         }\r
2932                                         else\r
2933                                         {\r
2934                                                 route = ROUTE_DISCARD;\r
2935                                         }\r
2936                                 }\r
2937                                 else if( ( p_smp->dr_dlid <  p_spl_qp_svc->base_lid ) ||\r
2938                                                  ( p_smp->dr_dlid >= p_spl_qp_svc->base_lid +\r
2939                                                         ( 1 << p_spl_qp_svc->lmc ) ) )\r
2940                                 {\r
2941                                         route = ROUTE_DISCARD;\r
2942                                 }\r
2943                         }\r
2944 \r
2945                         if( route == ROUTE_DISCARD ) break;\r
2946                         /* else fall through next case */\r
2947 \r
2948                 case IB_MCLASS_SUBN_LID:\r
2949                         route = route_recv_smp( p_mad_element );\r
2950                         break;\r
2951 \r
2952                 case IB_MCLASS_PERF:\r
2953                         route = route_recv_perf( p_mad_element );\r
2954                         break;\r
2955 \r
2956                 case IB_MCLASS_BM:\r
2957                         route = route_recv_bm( p_mad_element );\r
2958                         break;\r
2959 \r
2960                 case IB_MLX_VENDOR_CLASS1:\r
2961                 case IB_MLX_VENDOR_CLASS2:\r
2962                         route = ROUTE_LOCAL;\r
2963                         break;\r
2964 \r
2965                 default:\r
2966                         break;\r
2967                 }\r
2968         }\r
2969 \r
2970         /* Route the MAD. */\r
2971         if( is_discard( route ) )\r
2972                 status = IB_ERROR;\r
2973         else if( is_dispatcher( route ) )\r
2974                 status = mad_disp_recv_done( p_spl_qp_svc->h_mad_disp, p_mad_element );\r
2975         else if( is_remote( route ) )\r
2976                 status = forward_sm_trap( p_spl_qp_svc, p_mad_element );\r
2977         else\r
2978                 status = recv_local_mad( p_spl_qp_svc, p_mad_element );\r
2979 \r
2980         AL_EXIT( AL_DBG_SMI );\r
2981         return status;\r
2982 }\r
2983 \r
2984 \r
2985 \r
2986 /*\r
2987  * Route a received SMP.\r
2988  */\r
2989 static mad_route_t\r
2990 route_recv_smp(\r
2991         IN                              ib_mad_element_t*                       p_mad_element )\r
2992 {\r
2993         mad_route_t                             route;\r
2994 \r
2995         AL_ENTER( AL_DBG_SMI );\r
2996 \r
2997         CL_ASSERT( p_mad_element );\r
2998 \r
2999         /* Process the received SMP. */\r
3000         switch( p_mad_element->p_mad_buf->method )\r
3001         {\r
3002         case IB_MAD_METHOD_GET:\r
3003         case IB_MAD_METHOD_SET:\r
3004                 route = route_recv_smp_attr( p_mad_element );\r
3005                 break;\r
3006 \r
3007         case IB_MAD_METHOD_TRAP:\r
3008                 /*\r
3009                  * Special check to route locally generated traps to the remote SM.\r
3010                  * Distinguished from other receives by the p_wc->recv.ud.recv_opt\r
3011                  * IB_RECV_OPT_FORWARD flag.\r
3012                  *\r
3013                  * Note that because forwarded traps use AL MAD services, the upper\r
3014                  * 32-bits of the TID are reserved by the access layer.  When matching\r
3015                  * a Trap Repress MAD, the SMA must only use the lower 32-bits of the\r
3016                  * TID.\r
3017                  */\r
3018                 AL_PRINT(TRACE_LEVEL_INFORMATION, AL_DBG_SMI, ("Trap TID = 0x%08x:%08x \n",\r
3019                         ((uint32_t*)&p_mad_element->p_mad_buf->trans_id)[0],\r
3020                         ((uint32_t*)&p_mad_element->p_mad_buf->trans_id)[1]));\r
3021 \r
3022                 route = ( p_mad_element->recv_opt & IB_RECV_OPT_FORWARD ) ?\r
3023                         ROUTE_REMOTE : ROUTE_DISPATCHER;\r
3024                 break;\r
3025 \r
3026         case IB_MAD_METHOD_TRAP_REPRESS:\r
3027                 /*\r
3028                  * Note that because forwarded traps use AL MAD services, the upper\r
3029                  * 32-bits of the TID are reserved by the access layer.  When matching\r
3030                  * a Trap Repress MAD, the SMA must only use the lower 32-bits of the\r
3031                  * TID.\r
3032                  */\r
3033                 AL_PRINT(TRACE_LEVEL_INFORMATION, AL_DBG_SMI, ("TrapRepress TID = 0x%08x:%08x \n",\r
3034                         ((uint32_t*)&p_mad_element->p_mad_buf->trans_id)[0],\r
3035                         ((uint32_t*)&p_mad_element->p_mad_buf->trans_id)[1]));\r
3036 \r
3037                 route = ROUTE_LOCAL;\r
3038                 break;\r
3039 \r
3040         default:\r
3041                 route = ROUTE_DISPATCHER;\r
3042                 break;\r
3043         }\r
3044 \r
3045         AL_EXIT( AL_DBG_SMI );\r
3046         return route;\r
3047 }\r
3048 \r
3049 \r
3050 \r
3051 /*\r
3052  * Route received SMP attributes.\r
3053  */\r
3054 static mad_route_t\r
3055 route_recv_smp_attr(\r
3056         IN                              ib_mad_element_t*                       p_mad_element )\r
3057 {\r
3058         mad_route_t                             route;\r
3059 \r
3060         AL_ENTER( AL_DBG_SMI );\r
3061 \r
3062         CL_ASSERT( p_mad_element );\r
3063 \r
3064         /* Process the received SMP attributes. */\r
3065         switch( p_mad_element->p_mad_buf->attr_id )\r
3066         {\r
3067         case IB_MAD_ATTR_NODE_DESC:\r
3068         case IB_MAD_ATTR_NODE_INFO:\r
3069         case IB_MAD_ATTR_GUID_INFO:\r
3070         case IB_MAD_ATTR_PORT_INFO:\r
3071         case IB_MAD_ATTR_P_KEY_TABLE:\r
3072         case IB_MAD_ATTR_SLVL_TABLE:\r
3073         case IB_MAD_ATTR_VL_ARBITRATION:\r
3074         case IB_MAD_ATTR_VENDOR_DIAG:\r
3075         case IB_MAD_ATTR_LED_INFO:\r
3076         case IB_MAD_ATTR_SWITCH_INFO:\r
3077                 route = ROUTE_LOCAL;\r
3078                 break;\r
3079 \r
3080         default:\r
3081                 route = ROUTE_DISPATCHER;\r
3082                 break;\r
3083         }\r
3084 \r
3085         AL_EXIT( AL_DBG_SMI );\r
3086         return route;\r
3087 }\r
3088 \r
3089 \r
3090 static mad_route_t\r
3091 route_recv_bm(\r
3092         IN                              ib_mad_element_t*                       p_mad_element )\r
3093 {\r
3094         switch( p_mad_element->p_mad_buf->method )\r
3095         {\r
3096         case IB_MAD_METHOD_GET:\r
3097         case IB_MAD_METHOD_SET:\r
3098                 if( p_mad_element->p_mad_buf->attr_id == IB_MAD_ATTR_CLASS_PORT_INFO )\r
3099                         return ROUTE_LOCAL;\r
3100                 break;\r
3101         default:\r
3102                 break;\r
3103         }\r
3104         return ROUTE_DISPATCHER;\r
3105 }\r
3106 \r
3107 static mad_route_t\r
3108 route_recv_perf(\r
3109         IN                              ib_mad_element_t*                       p_mad_element )\r
3110 {\r
3111         switch( p_mad_element->p_mad_buf->method )\r
3112         {\r
3113         case IB_MAD_METHOD_GET:\r
3114         case IB_MAD_METHOD_SET:\r
3115                 return ROUTE_LOCAL;\r
3116         default:\r
3117                 break;\r
3118         }\r
3119         return ROUTE_DISPATCHER;\r
3120 }\r
3121 \r
3122 /*\r
3123  * Forward a locally generated Subnet Management trap.\r
3124  */\r
3125 ib_api_status_t\r
3126 forward_sm_trap(\r
3127         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
3128         IN                              ib_mad_element_t*                       p_mad_element )\r
3129 {\r
3130         ib_av_attr_t                    av_attr;\r
3131         ib_api_status_t                 status;\r
3132 \r
3133         AL_ENTER( AL_DBG_SMI );\r
3134 \r