ib/mad: fix routing of vendor mads
[mirror/winof/.git] / core / al / kernel / al_smi.c
1 /*\r
2  * Copyright (c) 2005 SilverStorm Technologies.  All rights reserved.\r
3  * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. \r
4  * Copyright (c) 2006 Voltaire Corporation.  All rights reserved.\r
5  * Portions Copyright (c) 2008 Microsoft Corporation.  All rights reserved.\r
6  *\r
7  * This software is available to you under the OpenIB.org BSD license\r
8  * below:\r
9  *\r
10  *     Redistribution and use in source and binary forms, with or\r
11  *     without modification, are permitted provided that the following\r
12  *     conditions are met:\r
13  *\r
14  *      - Redistributions of source code must retain the above\r
15  *        copyright notice, this list of conditions and the following\r
16  *        disclaimer.\r
17  *\r
18  *      - Redistributions in binary form must reproduce the above\r
19  *        copyright notice, this list of conditions and the following\r
20  *        disclaimer in the documentation and/or other materials\r
21  *        provided with the distribution.\r
22  *\r
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
30  * SOFTWARE.\r
31  *\r
32  * $Id$\r
33  */\r
34 \r
35 \r
36 #include <iba/ib_al.h>\r
37 #include <complib/cl_timer.h>\r
38 \r
39 #include "ib_common.h"\r
40 #include "al_common.h"\r
41 #include "al_debug.h"\r
42 #if defined(EVENT_TRACING)\r
43 #ifdef offsetof\r
44 #undef offsetof\r
45 #endif\r
46 #include "al_smi.tmh"\r
47 #endif\r
48 #include "al_verbs.h"\r
49 #include "al_mgr.h"\r
50 #include "al_pnp.h"\r
51 #include "al_qp.h"\r
52 #include "al_smi.h"\r
53 #include "al_av.h"\r
54 \r
55 \r
56 extern char                                             node_desc[IB_NODE_DESCRIPTION_SIZE];\r
57 \r
58 #define SMI_POLL_INTERVAL                       20000           /* Milliseconds */\r
59 #define LOCAL_MAD_TIMEOUT                       50                      /* Milliseconds */\r
60 #define DEFAULT_QP0_DEPTH                       256\r
61 #define DEFAULT_QP1_DEPTH                       1024\r
62 \r
63 uint32_t                                g_smi_poll_interval =   SMI_POLL_INTERVAL;\r
64 spl_qp_mgr_t*                   gp_spl_qp_mgr = NULL;\r
65 \r
66 \r
67 /*\r
68  * Function prototypes.\r
69  */\r
70 void\r
71 destroying_spl_qp_mgr(\r
72         IN                              al_obj_t*                                       p_obj );\r
73 \r
74 void\r
75 free_spl_qp_mgr(\r
76         IN                              al_obj_t*                                       p_obj );\r
77 \r
78 ib_api_status_t\r
79 spl_qp0_agent_pnp_cb(\r
80         IN                              ib_pnp_rec_t*                           p_pnp_rec );\r
81 \r
82 ib_api_status_t\r
83 spl_qp1_agent_pnp_cb(\r
84         IN                              ib_pnp_rec_t*                           p_pnp_rec );\r
85 \r
86 ib_api_status_t\r
87 spl_qp_agent_pnp(\r
88         IN                              ib_pnp_rec_t*                           p_pnp_rec,\r
89         IN                              ib_qp_type_t                            qp_type );\r
90 \r
91 ib_api_status_t\r
92 create_spl_qp_svc(\r
93         IN                              ib_pnp_port_rec_t*                      p_pnp_rec,\r
94         IN              const   ib_qp_type_t                            qp_type );\r
95 \r
96 void\r
97 destroying_spl_qp_svc(\r
98         IN                              al_obj_t*                                       p_obj );\r
99 \r
100 void\r
101 free_spl_qp_svc(\r
102         IN                              al_obj_t*                                       p_obj );\r
103 \r
104 void\r
105 spl_qp_svc_lid_change(\r
106         IN                              al_obj_t*                                       p_obj,\r
107         IN                              ib_pnp_port_rec_t*                      p_pnp_rec );\r
108 \r
109 ib_api_status_t\r
110 remote_mad_send(\r
111         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
112         IN                              al_mad_wr_t* const                      p_mad_wr );\r
113 \r
114 static ib_api_status_t\r
115 local_mad_send(\r
116         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
117         IN                              al_mad_wr_t* const                      p_mad_wr );\r
118 \r
119 static ib_api_status_t\r
120 loopback_mad(\r
121         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
122         IN                              al_mad_wr_t* const                      p_mad_wr );\r
123 \r
124 static ib_api_status_t\r
125 __process_subn_mad(\r
126         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
127         IN                              al_mad_wr_t* const                      p_mad_wr );\r
128 \r
129 static ib_api_status_t\r
130 fwd_local_mad(\r
131         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
132         IN                              al_mad_wr_t* const                      p_mad_wr );\r
133 \r
134 void\r
135 send_local_mad_cb(\r
136         IN                              cl_async_proc_item_t*           p_item );\r
137 \r
138 void\r
139 spl_qp_send_comp_cb(\r
140         IN              const   ib_cq_handle_t                          h_cq,\r
141         IN                              void                                            *cq_context );\r
142 \r
143 void\r
144 spl_qp_send_dpc_cb(\r
145     IN              KDPC                        *p_dpc,\r
146     IN              void                        *context,\r
147     IN              void                        *arg1,\r
148     IN              void                        *arg2\r
149     );\r
150 \r
151 void\r
152 spl_qp_recv_dpc_cb(\r
153     IN              KDPC                        *p_dpc,\r
154     IN              void                        *context,\r
155     IN              void                        *arg1,\r
156     IN              void                        *arg2\r
157     );\r
158 \r
159 void\r
160 spl_qp_recv_comp_cb(\r
161         IN              const   ib_cq_handle_t                          h_cq,\r
162         IN                              void                                            *cq_context );\r
163 \r
164 void\r
165 spl_qp_comp(\r
166         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
167         IN              const   ib_cq_handle_t                          h_cq,\r
168         IN                              ib_wc_type_t                            wc_type );\r
169 \r
170 ib_api_status_t\r
171 process_mad_recv(\r
172         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
173         IN                              ib_mad_element_t*                       p_mad_element );\r
174 \r
175 static mad_route_t\r
176 route_recv_smp(\r
177         IN                              ib_mad_element_t*                       p_mad_element );\r
178 \r
179 static mad_route_t\r
180 route_recv_smp_attr(\r
181         IN                              ib_mad_element_t*                       p_mad_element );\r
182 \r
183 mad_route_t\r
184 route_recv_dm_mad(\r
185         IN                              ib_mad_element_t*                       p_mad_element );\r
186 \r
187 static mad_route_t\r
188 route_recv_bm(\r
189         IN                              ib_mad_element_t*                       p_mad_element );\r
190 \r
191 static mad_route_t\r
192 route_recv_perf(\r
193         IN                              ib_mad_element_t*                       p_mad_element );\r
194 \r
195 static mad_route_t\r
196 route_recv_vendor(\r
197         IN                              ib_mad_element_t*                       p_mad_element );\r
198 \r
199 ib_api_status_t\r
200 forward_sm_trap(\r
201         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
202         IN                              ib_mad_element_t*                       p_mad_element );\r
203 \r
204 ib_api_status_t\r
205 recv_local_mad(\r
206         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
207         IN                              ib_mad_element_t*                       p_mad_request );\r
208 \r
209 void\r
210 spl_qp_alias_send_cb(\r
211         IN                              ib_mad_svc_handle_t                     h_mad_svc,\r
212         IN                              void                                            *mad_svc_context,\r
213         IN                              ib_mad_element_t                        *p_mad_element );\r
214 \r
215 void\r
216 spl_qp_alias_recv_cb(\r
217         IN                              ib_mad_svc_handle_t                     h_mad_svc,\r
218         IN                              void                                            *mad_svc_context,\r
219         IN                              ib_mad_element_t                        *p_mad_response );\r
220 \r
221 static ib_api_status_t\r
222 spl_qp_svc_post_recvs(\r
223         IN                              spl_qp_svc_t*   const           p_spl_qp_svc );\r
224 \r
225 void\r
226 spl_qp_svc_event_cb(\r
227         IN                              ib_async_event_rec_t            *p_event_rec );\r
228 \r
229 void\r
230 spl_qp_alias_event_cb(\r
231         IN                              ib_async_event_rec_t            *p_event_rec );\r
232 \r
233 void\r
234 spl_qp_svc_reset(\r
235         IN                              spl_qp_svc_t*                           p_spl_qp_svc );\r
236 \r
237 void\r
238 spl_qp_svc_reset_cb(\r
239         IN                              cl_async_proc_item_t*           p_item );\r
240 \r
241 ib_api_status_t\r
242 acquire_svc_disp(\r
243         IN              const   cl_qmap_t* const                        p_svc_map,\r
244         IN              const   ib_net64_t                                      port_guid,\r
245                 OUT                     al_mad_disp_handle_t            *ph_mad_disp );\r
246 \r
247 void\r
248 smi_poll_timer_cb(\r
249         IN                              void*                                           context );\r
250 \r
251 void\r
252 smi_post_recvs(\r
253         IN                              cl_list_item_t* const           p_list_item,\r
254         IN                              void*                                           context );\r
255 \r
256 #if defined( CL_USE_MUTEX )\r
257 void\r
258 spl_qp_send_async_cb(\r
259         IN                              cl_async_proc_item_t*           p_item );\r
260 \r
261 void\r
262 spl_qp_recv_async_cb(\r
263         IN                              cl_async_proc_item_t*           p_item );\r
264 #endif\r
265 \r
266 /*\r
267  * Create the special QP manager.\r
268  */\r
269 ib_api_status_t\r
270 create_spl_qp_mgr(\r
271         IN                              al_obj_t*       const                   p_parent_obj )\r
272 {\r
273         ib_pnp_req_t                    pnp_req;\r
274         ib_api_status_t                 status;\r
275         cl_status_t                             cl_status;\r
276 \r
277         AL_ENTER( AL_DBG_SMI );\r
278 \r
279         CL_ASSERT( p_parent_obj );\r
280         CL_ASSERT( !gp_spl_qp_mgr );\r
281 \r
282         gp_spl_qp_mgr = cl_zalloc( sizeof( spl_qp_mgr_t ) );\r
283         if( !gp_spl_qp_mgr )\r
284         {\r
285                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
286                         ("IB_INSUFFICIENT_MEMORY\n") );\r
287                 return IB_INSUFFICIENT_MEMORY;\r
288         }\r
289 \r
290         /* Construct the special QP manager. */\r
291         construct_al_obj( &gp_spl_qp_mgr->obj, AL_OBJ_TYPE_SMI );\r
292         cl_timer_construct( &gp_spl_qp_mgr->poll_timer );\r
293 \r
294         /* Initialize the lists. */\r
295         cl_qmap_init( &gp_spl_qp_mgr->smi_map );\r
296         cl_qmap_init( &gp_spl_qp_mgr->gsi_map );\r
297 \r
298         /* Initialize the global SMI/GSI manager object. */\r
299         status = init_al_obj( &gp_spl_qp_mgr->obj, gp_spl_qp_mgr, TRUE,\r
300                 destroying_spl_qp_mgr, NULL, free_spl_qp_mgr );\r
301         if( status != IB_SUCCESS )\r
302         {\r
303                 free_spl_qp_mgr( &gp_spl_qp_mgr->obj );\r
304                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
305                         ("init_al_obj failed, %s\n", ib_get_err_str( status ) ) );\r
306                 return status;\r
307         }\r
308 \r
309         /* Attach the special QP manager to the parent object. */\r
310         status = attach_al_obj( p_parent_obj, &gp_spl_qp_mgr->obj );\r
311         if( status != IB_SUCCESS )\r
312         {\r
313                 gp_spl_qp_mgr->obj.pfn_destroy( &gp_spl_qp_mgr->obj, NULL );\r
314                 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
315                         ("attach_al_obj returned %s.\n", ib_get_err_str(status)) );\r
316                 return status;\r
317         }\r
318 \r
319         /* Initialize the SMI polling timer. */\r
320         cl_status = cl_timer_init( &gp_spl_qp_mgr->poll_timer, smi_poll_timer_cb,\r
321                 gp_spl_qp_mgr );\r
322         if( cl_status != CL_SUCCESS )\r
323         {\r
324                 gp_spl_qp_mgr->obj.pfn_destroy( &gp_spl_qp_mgr->obj, NULL );\r
325                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
326                         ("cl_timer_init failed, status 0x%x\n", cl_status ) );\r
327                 return ib_convert_cl_status( cl_status );\r
328         }\r
329 \r
330         /*\r
331          * Note: PnP registrations for port events must be done\r
332          * when the special QP manager is created.  This ensures that\r
333          * the registrations are listed sequentially and the reporting\r
334          * of PnP events occurs in the proper order.\r
335          */\r
336 \r
337         /*\r
338          * Separate context is needed for each special QP.  Therefore, a\r
339          * separate PnP event registration is performed for QP0 and QP1.\r
340          */\r
341 \r
342         /* Register for port PnP events for QP0. */\r
343         cl_memclr( &pnp_req, sizeof( ib_pnp_req_t ) );\r
344         pnp_req.pnp_class       = IB_PNP_PORT;\r
345         pnp_req.pnp_context = &gp_spl_qp_mgr->obj;\r
346         pnp_req.pfn_pnp_cb      = spl_qp0_agent_pnp_cb;\r
347 \r
348         status = ib_reg_pnp( gh_al, &pnp_req, &gp_spl_qp_mgr->h_qp0_pnp );\r
349 \r
350         if( status != IB_SUCCESS )\r
351         {\r
352                 gp_spl_qp_mgr->obj.pfn_destroy( &gp_spl_qp_mgr->obj, NULL );\r
353                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
354                         ("ib_reg_pnp QP0 failed, %s\n", ib_get_err_str( status ) ) );\r
355                 return status;\r
356         }\r
357 \r
358         /* Reference the special QP manager on behalf of the ib_reg_pnp call. */\r
359         ref_al_obj( &gp_spl_qp_mgr->obj );\r
360 \r
361         /* Register for port PnP events for QP1. */\r
362         cl_memclr( &pnp_req, sizeof( ib_pnp_req_t ) );\r
363         pnp_req.pnp_class       = IB_PNP_PORT;\r
364         pnp_req.pnp_context = &gp_spl_qp_mgr->obj;\r
365         pnp_req.pfn_pnp_cb      = spl_qp1_agent_pnp_cb;\r
366 \r
367         status = ib_reg_pnp( gh_al, &pnp_req, &gp_spl_qp_mgr->h_qp1_pnp );\r
368 \r
369         if( status != IB_SUCCESS )\r
370         {\r
371                 gp_spl_qp_mgr->obj.pfn_destroy( &gp_spl_qp_mgr->obj, NULL );\r
372                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
373                         ("ib_reg_pnp QP1 failed, %s\n", ib_get_err_str( status ) ) );\r
374                 return status;\r
375         }\r
376 \r
377         /*\r
378          * Note that we don't release the referende taken in init_al_obj\r
379          * because we need one on behalf of the ib_reg_pnp call.\r
380          */\r
381 \r
382         AL_EXIT( AL_DBG_SMI );\r
383         return IB_SUCCESS;\r
384 }\r
385 \r
386 \r
387 \r
388 /*\r
389  * Pre-destroy the special QP manager.\r
390  */\r
391 void\r
392 destroying_spl_qp_mgr(\r
393         IN                              al_obj_t*                                       p_obj )\r
394 {\r
395         ib_api_status_t                 status;\r
396 \r
397         CL_ASSERT( p_obj );\r
398         CL_ASSERT( gp_spl_qp_mgr == PARENT_STRUCT( p_obj, spl_qp_mgr_t, obj ) );\r
399         UNUSED_PARAM( p_obj );\r
400 \r
401         /* Deregister for port PnP events for QP0. */\r
402         if( gp_spl_qp_mgr->h_qp0_pnp )\r
403         {\r
404                 status = ib_dereg_pnp( gp_spl_qp_mgr->h_qp0_pnp,\r
405                         (ib_pfn_destroy_cb_t)deref_al_obj );\r
406                 CL_ASSERT( status == IB_SUCCESS );\r
407         }\r
408 \r
409         /* Deregister for port PnP events for QP1. */\r
410         if( gp_spl_qp_mgr->h_qp1_pnp )\r
411         {\r
412                 status = ib_dereg_pnp( gp_spl_qp_mgr->h_qp1_pnp,\r
413                         (ib_pfn_destroy_cb_t)deref_al_obj );\r
414                 CL_ASSERT( status == IB_SUCCESS );\r
415         }\r
416 \r
417         /* Destroy the SMI polling timer. */\r
418         cl_timer_destroy( &gp_spl_qp_mgr->poll_timer );\r
419 }\r
420 \r
421 \r
422 \r
423 /*\r
424  * Free the special QP manager.\r
425  */\r
426 void\r
427 free_spl_qp_mgr(\r
428         IN                              al_obj_t*                                       p_obj )\r
429 {\r
430         CL_ASSERT( p_obj );\r
431         CL_ASSERT( gp_spl_qp_mgr == PARENT_STRUCT( p_obj, spl_qp_mgr_t, obj ) );\r
432         UNUSED_PARAM( p_obj );\r
433 \r
434         destroy_al_obj( &gp_spl_qp_mgr->obj );\r
435         cl_free( gp_spl_qp_mgr );\r
436         gp_spl_qp_mgr = NULL;\r
437 }\r
438 \r
439 \r
440 \r
441 /*\r
442  * Special QP0 agent PnP event callback.\r
443  */\r
444 ib_api_status_t\r
445 spl_qp0_agent_pnp_cb(\r
446         IN                              ib_pnp_rec_t*                           p_pnp_rec )\r
447 {\r
448         ib_api_status_t status;\r
449         AL_ENTER( AL_DBG_SMI );\r
450 \r
451         status = spl_qp_agent_pnp( p_pnp_rec, IB_QPT_QP0 );\r
452 \r
453         AL_EXIT( AL_DBG_SMI );\r
454         return status;\r
455 }\r
456 \r
457 \r
458 \r
459 /*\r
460  * Special QP1 agent PnP event callback.\r
461  */\r
462 ib_api_status_t\r
463 spl_qp1_agent_pnp_cb(\r
464         IN                              ib_pnp_rec_t*                           p_pnp_rec )\r
465 {\r
466         ib_api_status_t status;\r
467         AL_ENTER( AL_DBG_SMI );\r
468 \r
469         status = spl_qp_agent_pnp( p_pnp_rec, IB_QPT_QP1 );\r
470 \r
471         AL_EXIT( AL_DBG_SMI );\r
472         return status;\r
473 }\r
474 \r
475 \r
476 \r
477 /*\r
478  * Special QP agent PnP event callback.\r
479  */\r
480 ib_api_status_t\r
481 spl_qp_agent_pnp(\r
482         IN                              ib_pnp_rec_t*                           p_pnp_rec,\r
483         IN                              ib_qp_type_t                            qp_type )\r
484 {\r
485         ib_api_status_t                 status;\r
486         al_obj_t*                               p_obj;\r
487 \r
488         AL_ENTER( AL_DBG_SMI );\r
489 \r
490         CL_ASSERT( p_pnp_rec );\r
491         p_obj = p_pnp_rec->context;\r
492 \r
493         AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_SMI,\r
494                 ("p_pnp_rec->pnp_event = 0x%x (%s)\n",\r
495                 p_pnp_rec->pnp_event, ib_get_pnp_event_str( p_pnp_rec->pnp_event )) );\r
496         /* Dispatch based on the PnP event type. */\r
497         switch( p_pnp_rec->pnp_event )\r
498         {\r
499         case IB_PNP_PORT_ADD:\r
500                 CL_ASSERT( !p_obj );\r
501                 status = create_spl_qp_svc( (ib_pnp_port_rec_t*)p_pnp_rec, qp_type );\r
502                 break;\r
503 \r
504         case IB_PNP_PORT_REMOVE:\r
505                 CL_ASSERT( p_obj );\r
506                 ref_al_obj( p_obj );\r
507                 p_obj->pfn_destroy( p_obj, NULL );\r
508                 status = IB_SUCCESS;\r
509                 break;\r
510 \r
511         case IB_PNP_LID_CHANGE:\r
512                 CL_ASSERT( p_obj );\r
513                 spl_qp_svc_lid_change( p_obj, (ib_pnp_port_rec_t*)p_pnp_rec );\r
514                 status = IB_SUCCESS;\r
515                 break;\r
516 \r
517         default:\r
518                 /* All other events are ignored. */\r
519                 status = IB_SUCCESS;\r
520                 break;\r
521         }\r
522 \r
523         AL_EXIT( AL_DBG_SMI );\r
524         return status;\r
525 }\r
526 \r
527 \r
528 \r
529 /*\r
530  * Create a special QP service.\r
531  */\r
532 ib_api_status_t\r
533 create_spl_qp_svc(\r
534         IN                              ib_pnp_port_rec_t*                      p_pnp_rec,\r
535         IN              const   ib_qp_type_t                            qp_type )\r
536 {\r
537         cl_status_t                             cl_status;\r
538         spl_qp_svc_t*                   p_spl_qp_svc;\r
539         ib_ca_handle_t                  h_ca;\r
540         ib_cq_create_t                  cq_create;\r
541         ib_qp_create_t                  qp_create;\r
542         ib_qp_attr_t                    qp_attr;\r
543         ib_mad_svc_t                    mad_svc;\r
544         ib_api_status_t                 status;\r
545 \r
546         AL_ENTER( AL_DBG_SMI );\r
547 \r
548         CL_ASSERT( p_pnp_rec );\r
549 \r
550         if( ( qp_type != IB_QPT_QP0 ) && ( qp_type != IB_QPT_QP1 ) )\r
551         {\r
552                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") );\r
553                 return IB_INVALID_PARAMETER;\r
554         }\r
555 \r
556         CL_ASSERT( p_pnp_rec->pnp_rec.pnp_context );\r
557         CL_ASSERT( p_pnp_rec->p_ca_attr );\r
558         CL_ASSERT( p_pnp_rec->p_port_attr );\r
559 \r
560         p_spl_qp_svc = cl_zalloc( sizeof( spl_qp_svc_t ) );\r
561         if( !p_spl_qp_svc )\r
562         {\r
563                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
564                         ("IB_INSUFFICIENT_MEMORY\n") );\r
565                 return IB_INSUFFICIENT_MEMORY;\r
566         }\r
567 \r
568         /* Tie the special QP service to the port by setting the port number. */\r
569         p_spl_qp_svc->port_num = p_pnp_rec->p_port_attr->port_num;\r
570         /* Store the port GUID to allow faster lookups of the dispatchers. */\r
571         p_spl_qp_svc->port_guid = p_pnp_rec->p_port_attr->port_guid;\r
572 \r
573         /* Initialize the send and receive queues. */\r
574         cl_qlist_init( &p_spl_qp_svc->send_queue );\r
575         cl_qlist_init( &p_spl_qp_svc->recv_queue );\r
576         cl_spinlock_init(&p_spl_qp_svc->cache_lock);\r
577 \r
578     /* Initialize the DPCs. */\r
579     KeInitializeDpc( &p_spl_qp_svc->send_dpc, spl_qp_send_dpc_cb, p_spl_qp_svc );\r
580     KeInitializeDpc( &p_spl_qp_svc->recv_dpc, spl_qp_recv_dpc_cb, p_spl_qp_svc );\r
581 \r
582     if( qp_type == IB_QPT_QP0 )\r
583     {\r
584         KeSetImportanceDpc( &p_spl_qp_svc->send_dpc, HighImportance );\r
585         KeSetImportanceDpc( &p_spl_qp_svc->recv_dpc, HighImportance );\r
586     }\r
587 \r
588 #if defined( CL_USE_MUTEX )\r
589         /* Initialize async callbacks and flags for send/receive processing. */\r
590         p_spl_qp_svc->send_async_queued = FALSE;\r
591         p_spl_qp_svc->send_async_cb.pfn_callback = spl_qp_send_async_cb;\r
592         p_spl_qp_svc->recv_async_queued = FALSE;\r
593         p_spl_qp_svc->recv_async_cb.pfn_callback = spl_qp_recv_async_cb;\r
594 #endif\r
595 \r
596         /* Initialize the async callback function to process local sends. */\r
597         p_spl_qp_svc->send_async.pfn_callback = send_local_mad_cb;\r
598 \r
599         /* Initialize the async callback function to reset the QP on error. */\r
600         p_spl_qp_svc->reset_async.pfn_callback = spl_qp_svc_reset_cb;\r
601 \r
602         /* Construct the special QP service object. */\r
603         construct_al_obj( &p_spl_qp_svc->obj, AL_OBJ_TYPE_SMI );\r
604 \r
605         /* Initialize the special QP service object. */\r
606         status = init_al_obj( &p_spl_qp_svc->obj, p_spl_qp_svc, TRUE,\r
607                 destroying_spl_qp_svc, NULL, free_spl_qp_svc );\r
608         if( status != IB_SUCCESS )\r
609         {\r
610                 free_spl_qp_svc( &p_spl_qp_svc->obj );\r
611                 return status;\r
612         }\r
613 \r
614         /* Attach the special QP service to the parent object. */\r
615         status = attach_al_obj(\r
616                 (al_obj_t*)p_pnp_rec->pnp_rec.pnp_context, &p_spl_qp_svc->obj );\r
617         if( status != IB_SUCCESS )\r
618         {\r
619                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
620                 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
621                         ("attach_al_obj returned %s.\n", ib_get_err_str(status)) );\r
622                 return status;\r
623         }\r
624 \r
625         h_ca = acquire_ca( p_pnp_rec->p_ca_attr->ca_guid );\r
626         CL_ASSERT( h_ca );\r
627         if( !h_ca )\r
628         {\r
629                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
630                 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("acquire_ca failed.\n") );\r
631                 return IB_INVALID_GUID;\r
632         }\r
633 \r
634         p_spl_qp_svc->obj.p_ci_ca = h_ca->obj.p_ci_ca;\r
635 \r
636         /* Determine the maximum queue depth of the QP and CQs. */\r
637         p_spl_qp_svc->max_qp_depth =\r
638                 ( p_pnp_rec->p_ca_attr->max_wrs <\r
639                 p_pnp_rec->p_ca_attr->max_cqes ) ?\r
640                 p_pnp_rec->p_ca_attr->max_wrs :\r
641                 p_pnp_rec->p_ca_attr->max_cqes;\r
642 \r
643         /* Compare this maximum to the default special queue depth. */\r
644         if( ( qp_type == IB_QPT_QP0 ) &&\r
645                 ( p_spl_qp_svc->max_qp_depth > DEFAULT_QP0_DEPTH ) )\r
646                   p_spl_qp_svc->max_qp_depth = DEFAULT_QP0_DEPTH;\r
647         if( ( qp_type == IB_QPT_QP1 ) &&\r
648                 ( p_spl_qp_svc->max_qp_depth > DEFAULT_QP1_DEPTH ) )\r
649                   p_spl_qp_svc->max_qp_depth = DEFAULT_QP1_DEPTH;\r
650 \r
651         /* Create the send CQ. */\r
652         cl_memclr( &cq_create, sizeof( ib_cq_create_t ) );\r
653         cq_create.size = p_spl_qp_svc->max_qp_depth;\r
654         cq_create.pfn_comp_cb = spl_qp_send_comp_cb;\r
655 \r
656         status = ib_create_cq( p_spl_qp_svc->obj.p_ci_ca->h_ca, &cq_create,\r
657                 p_spl_qp_svc, spl_qp_svc_event_cb, &p_spl_qp_svc->h_send_cq );\r
658 \r
659         if( status != IB_SUCCESS )\r
660         {\r
661                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
662                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
663                         ("ib_create_cq send CQ failed, %s\n", ib_get_err_str( status ) ) );\r
664                 return status;\r
665         }\r
666 \r
667         /* Reference the special QP service on behalf of ib_create_cq. */\r
668         ref_al_obj( &p_spl_qp_svc->obj );\r
669 \r
670         /* Check the result of the creation request. */\r
671         if( cq_create.size < p_spl_qp_svc->max_qp_depth )\r
672         {\r
673                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
674                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
675                         ("ib_create_cq allocated insufficient send CQ size\n") );\r
676                 return IB_INSUFFICIENT_RESOURCES;\r
677         }\r
678 \r
679         /* Create the receive CQ. */\r
680         cl_memclr( &cq_create, sizeof( ib_cq_create_t ) );\r
681         cq_create.size = p_spl_qp_svc->max_qp_depth;\r
682         cq_create.pfn_comp_cb = spl_qp_recv_comp_cb;\r
683 \r
684         status = ib_create_cq( p_spl_qp_svc->obj.p_ci_ca->h_ca, &cq_create,\r
685                 p_spl_qp_svc, spl_qp_svc_event_cb, &p_spl_qp_svc->h_recv_cq );\r
686 \r
687         if( status != IB_SUCCESS )\r
688         {\r
689                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
690                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
691                         ("ib_create_cq recv CQ failed, %s\n", ib_get_err_str( status ) ) );\r
692                 return status;\r
693         }\r
694 \r
695         /* Reference the special QP service on behalf of ib_create_cq. */\r
696         ref_al_obj( &p_spl_qp_svc->obj );\r
697 \r
698         /* Check the result of the creation request. */\r
699         if( cq_create.size < p_spl_qp_svc->max_qp_depth )\r
700         {\r
701                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
702                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
703                         ("ib_create_cq allocated insufficient recv CQ size\n") );\r
704                 return IB_INSUFFICIENT_RESOURCES;\r
705         }\r
706 \r
707         /* Create the special QP. */\r
708         cl_memclr( &qp_create, sizeof( ib_qp_create_t ) );\r
709         qp_create.qp_type = qp_type;\r
710         qp_create.sq_depth = p_spl_qp_svc->max_qp_depth;\r
711         qp_create.rq_depth = p_spl_qp_svc->max_qp_depth;\r
712         qp_create.sq_sge = 3;   /* Three entries are required for segmentation. */\r
713         qp_create.rq_sge = 1;\r
714         qp_create.h_sq_cq = p_spl_qp_svc->h_send_cq;\r
715         qp_create.h_rq_cq = p_spl_qp_svc->h_recv_cq;\r
716         qp_create.sq_signaled = TRUE;\r
717 \r
718         status = ib_get_spl_qp( p_spl_qp_svc->obj.p_ci_ca->h_pd,\r
719                 p_pnp_rec->p_port_attr->port_guid, &qp_create,\r
720                 p_spl_qp_svc, spl_qp_svc_event_cb, NULL, &p_spl_qp_svc->h_qp );\r
721 \r
722         if( status != IB_SUCCESS )\r
723         {\r
724                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
725                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
726                         ("ib_get_spl_qp failed, %s\n", ib_get_err_str( status ) ) );\r
727                 return status;\r
728         }\r
729 \r
730         /* Reference the special QP service on behalf of ib_get_spl_qp. */\r
731         ref_al_obj( &p_spl_qp_svc->obj );\r
732 \r
733         /* Check the result of the creation request. */\r
734         status = ib_query_qp( p_spl_qp_svc->h_qp, &qp_attr );\r
735         if( status != IB_SUCCESS )\r
736         {\r
737                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
738                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
739                         ("ib_query_qp failed, %s\n", ib_get_err_str( status ) ) );\r
740                 return status;\r
741         }\r
742 \r
743         if( ( qp_attr.rq_depth < p_spl_qp_svc->max_qp_depth ) ||\r
744                 ( qp_attr.sq_depth < p_spl_qp_svc->max_qp_depth ) ||\r
745                 ( qp_attr.sq_sge < 3 ) || ( qp_attr.rq_sge < 1 ) )\r
746         {\r
747                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
748                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
749                         ("ib_get_spl_qp allocated attributes are insufficient\n") );\r
750                 return IB_INSUFFICIENT_RESOURCES;\r
751         }\r
752 \r
753         /* Initialize the QP for use. */\r
754         status = ib_init_dgrm_svc( p_spl_qp_svc->h_qp, NULL );\r
755         if( status != IB_SUCCESS )\r
756         {\r
757                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
758                 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
759                         ("ib_init_dgrm_svc failed, %s\n", ib_get_err_str( status ) ) );\r
760                 return status;\r
761         }\r
762 \r
763         /* Post receive buffers. */\r
764         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
765         status = spl_qp_svc_post_recvs( p_spl_qp_svc );\r
766         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
767         if( status != IB_SUCCESS )\r
768         {\r
769                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
770                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
771                         ("spl_qp_svc_post_recvs failed, %s\n",\r
772                         ib_get_err_str( status ) ) );\r
773                 return status;\r
774         }\r
775 \r
776         /* Create the MAD dispatcher. */\r
777         status = create_mad_disp( &p_spl_qp_svc->obj, p_spl_qp_svc->h_qp,\r
778                 &p_spl_qp_svc->h_mad_disp );\r
779         if( status != IB_SUCCESS )\r
780         {\r
781                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
782                 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
783                         ("create_mad_disp failed, %s\n", ib_get_err_str( status ) ) );\r
784                 return status;\r
785         }\r
786 \r
787         /*\r
788          * Add this service to the special QP manager lookup lists.\r
789          * The service must be added to allow the creation of a QP alias.\r
790          */\r
791         cl_spinlock_acquire( &gp_spl_qp_mgr->obj.lock );\r
792         if( qp_type == IB_QPT_QP0 )\r
793         {\r
794                 cl_qmap_insert( &gp_spl_qp_mgr->smi_map, p_spl_qp_svc->port_guid,\r
795                         &p_spl_qp_svc->map_item );\r
796         }\r
797         else\r
798         {\r
799                 cl_qmap_insert( &gp_spl_qp_mgr->gsi_map, p_spl_qp_svc->port_guid,\r
800                         &p_spl_qp_svc->map_item );\r
801         }\r
802         cl_spinlock_release( &gp_spl_qp_mgr->obj.lock );\r
803 \r
804         /*\r
805          * If the CA does not support HW agents, create a QP alias and register\r
806          * a MAD service for sending responses from the local MAD interface.\r
807          */\r
808         if( check_local_mad( p_spl_qp_svc->h_qp ) )\r
809         {\r
810                 /* Create a QP alias. */\r
811                 cl_memclr( &qp_create, sizeof( ib_qp_create_t ) );\r
812                 qp_create.qp_type =\r
813                         ( qp_type == IB_QPT_QP0 ) ? IB_QPT_QP0_ALIAS : IB_QPT_QP1_ALIAS;\r
814                 qp_create.sq_depth              = p_spl_qp_svc->max_qp_depth;\r
815                 qp_create.sq_sge                = 1;\r
816                 qp_create.sq_signaled   = TRUE;\r
817 \r
818                 status = ib_get_spl_qp( p_spl_qp_svc->obj.p_ci_ca->h_pd_alias,\r
819                         p_pnp_rec->p_port_attr->port_guid, &qp_create,\r
820                         p_spl_qp_svc, spl_qp_alias_event_cb, &p_spl_qp_svc->pool_key,\r
821                         &p_spl_qp_svc->h_qp_alias );\r
822 \r
823                 if (status != IB_SUCCESS)\r
824                 {\r
825                         p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
826                         AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
827                                 ("ib_get_spl_qp alias failed, %s\n",\r
828                                 ib_get_err_str( status ) ) );\r
829                         return status;\r
830                 }\r
831 \r
832                 /* Reference the special QP service on behalf of ib_get_spl_qp. */\r
833                 ref_al_obj( &p_spl_qp_svc->obj );\r
834 \r
835                 /* Register a MAD service for sends. */\r
836                 cl_memclr( &mad_svc, sizeof( ib_mad_svc_t ) );\r
837                 mad_svc.mad_svc_context = p_spl_qp_svc;\r
838                 mad_svc.pfn_mad_send_cb = spl_qp_alias_send_cb;\r
839                 mad_svc.pfn_mad_recv_cb = spl_qp_alias_recv_cb;\r
840 \r
841                 status = ib_reg_mad_svc( p_spl_qp_svc->h_qp_alias, &mad_svc,\r
842                         &p_spl_qp_svc->h_mad_svc );\r
843 \r
844                 if( status != IB_SUCCESS )\r
845                 {\r
846                         p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
847                         AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
848                                 ("ib_reg_mad_svc failed, %s\n", ib_get_err_str( status ) ) );\r
849                         return status;\r
850                 }\r
851         }\r
852 \r
853         /* Set the context of the PnP event to this child object. */\r
854         p_pnp_rec->pnp_rec.context = &p_spl_qp_svc->obj;\r
855 \r
856         /* The QP is ready.  Change the state. */\r
857         p_spl_qp_svc->state = SPL_QP_ACTIVE;\r
858 \r
859         /* Force a completion callback to rearm the CQs. */\r
860         spl_qp_recv_comp_cb( p_spl_qp_svc->h_recv_cq, p_spl_qp_svc );\r
861         spl_qp_send_comp_cb( p_spl_qp_svc->h_send_cq, p_spl_qp_svc );\r
862 \r
863         /* Start the polling thread timer. */\r
864         if( g_smi_poll_interval )\r
865         {\r
866                 cl_status =\r
867                         cl_timer_trim( &gp_spl_qp_mgr->poll_timer, g_smi_poll_interval );\r
868 \r
869                 if( cl_status != CL_SUCCESS )\r
870                 {\r
871                         p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
872                         AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
873                                 ("cl_timer_start failed, status 0x%x\n", cl_status ) );\r
874                         return ib_convert_cl_status( cl_status );\r
875                 }\r
876         }\r
877 \r
878         /* Release the reference taken in init_al_obj. */\r
879         deref_al_obj( &p_spl_qp_svc->obj );\r
880 \r
881         AL_EXIT( AL_DBG_SMI );\r
882         return IB_SUCCESS;\r
883 }\r
884 \r
885 \r
886 \r
887 /*\r
888  * Return a work completion to the MAD dispatcher for the specified MAD.\r
889  */\r
890 static void\r
891 __complete_send_mad(\r
892         IN              const   al_mad_disp_handle_t            h_mad_disp,\r
893         IN                              al_mad_wr_t* const                      p_mad_wr,\r
894         IN              const   ib_wc_status_t                          wc_status )\r
895 {\r
896         ib_wc_t                 wc;\r
897 \r
898         /* Construct a send work completion. */\r
899         cl_memclr( &wc, sizeof( ib_wc_t ) );\r
900         wc.wr_id        = p_mad_wr->send_wr.wr_id;\r
901         wc.wc_type      = IB_WC_SEND;\r
902         wc.status       = wc_status;\r
903 \r
904         /* Set the send size if we were successful with the send. */\r
905         if( wc_status == IB_WCS_SUCCESS )\r
906                 wc.length = MAD_BLOCK_SIZE;\r
907 \r
908         mad_disp_send_done( h_mad_disp, p_mad_wr, &wc );\r
909 }\r
910 \r
911 \r
912 \r
913 /*\r
914  * Pre-destroy a special QP service.\r
915  */\r
916 void\r
917 destroying_spl_qp_svc(\r
918         IN                              al_obj_t*                                       p_obj )\r
919 {\r
920         spl_qp_svc_t*                   p_spl_qp_svc;\r
921         cl_list_item_t*                 p_list_item;\r
922         al_mad_wr_t*                    p_mad_wr;\r
923 \r
924         ib_api_status_t                 status;\r
925 \r
926         AL_ENTER( AL_DBG_SMI );\r
927 \r
928         CL_ASSERT( p_obj );\r
929         p_spl_qp_svc = PARENT_STRUCT( p_obj, spl_qp_svc_t, obj );\r
930 \r
931         /* Change the state to prevent processing new send requests. */\r
932         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
933         p_spl_qp_svc->state = SPL_QP_DESTROYING;\r
934         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
935 \r
936         /* Wait here until the special QP service is no longer in use. */\r
937         while( p_spl_qp_svc->in_use_cnt )\r
938         {\r
939                 cl_thread_suspend( 0 );\r
940         }\r
941 \r
942         /* Destroy the special QP. */\r
943         if( p_spl_qp_svc->h_qp )\r
944         {\r
945                 /* If present, remove the special QP service from the tracking map. */\r
946                 cl_spinlock_acquire( &gp_spl_qp_mgr->obj.lock );\r
947                 if( p_spl_qp_svc->h_qp->type == IB_QPT_QP0 )\r
948                 {\r
949                         cl_qmap_remove( &gp_spl_qp_mgr->smi_map, p_spl_qp_svc->port_guid );\r
950                 }\r
951                 else\r
952                 {\r
953                         cl_qmap_remove( &gp_spl_qp_mgr->gsi_map, p_spl_qp_svc->port_guid );\r
954                 }\r
955                 cl_spinlock_release( &gp_spl_qp_mgr->obj.lock );\r
956 \r
957                 status = ib_destroy_qp( p_spl_qp_svc->h_qp,\r
958                         (ib_pfn_destroy_cb_t)deref_al_obj );\r
959                 CL_ASSERT( status == IB_SUCCESS );\r
960 \r
961                 cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
962 \r
963                 /* Complete any outstanding MAD sends operations as "flushed". */\r
964                 for( p_list_item = cl_qlist_remove_head( &p_spl_qp_svc->send_queue );\r
965                          p_list_item != cl_qlist_end( &p_spl_qp_svc->send_queue );\r
966                          p_list_item = cl_qlist_remove_head( &p_spl_qp_svc->send_queue ) )\r
967                 {\r
968                         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
969                         p_mad_wr = PARENT_STRUCT( p_list_item, al_mad_wr_t, list_item );\r
970                         __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr,\r
971                                 IB_WCS_WR_FLUSHED_ERR );\r
972                         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
973                 }\r
974 \r
975                 cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
976                 /* Receive MAD elements are returned to the pool by the free routine. */\r
977         }\r
978 \r
979         /* Destroy the special QP alias and CQs. */\r
980         if( p_spl_qp_svc->h_qp_alias )\r
981         {\r
982                 status = ib_destroy_qp( p_spl_qp_svc->h_qp_alias,\r
983                         (ib_pfn_destroy_cb_t)deref_al_obj );\r
984                 CL_ASSERT( status == IB_SUCCESS );\r
985         }\r
986         if( p_spl_qp_svc->h_send_cq )\r
987         {\r
988                 status = ib_destroy_cq( p_spl_qp_svc->h_send_cq,\r
989                         (ib_pfn_destroy_cb_t)deref_al_obj );\r
990                 CL_ASSERT( status == IB_SUCCESS );\r
991         }\r
992         if( p_spl_qp_svc->h_recv_cq )\r
993         {\r
994                 status = ib_destroy_cq( p_spl_qp_svc->h_recv_cq,\r
995                         (ib_pfn_destroy_cb_t)deref_al_obj );\r
996                 CL_ASSERT( status == IB_SUCCESS );\r
997         }\r
998 \r
999         AL_EXIT( AL_DBG_SMI );\r
1000 }\r
1001 \r
1002 \r
1003 \r
1004 /*\r
1005  * Free a special QP service.\r
1006  */\r
1007 void\r
1008 free_spl_qp_svc(\r
1009         IN                              al_obj_t*                                       p_obj )\r
1010 {\r
1011         spl_qp_svc_t*                   p_spl_qp_svc;\r
1012         cl_list_item_t*                 p_list_item;\r
1013         al_mad_element_t*               p_al_mad;\r
1014         ib_api_status_t                 status;\r
1015 \r
1016         AL_ENTER( AL_DBG_SMI );\r
1017 \r
1018         CL_ASSERT( p_obj );\r
1019         p_spl_qp_svc = PARENT_STRUCT( p_obj, spl_qp_svc_t, obj );\r
1020 \r
1021         /* Dereference the CA. */\r
1022         if( p_spl_qp_svc->obj.p_ci_ca )\r
1023                 deref_al_obj( &p_spl_qp_svc->obj.p_ci_ca->h_ca->obj );\r
1024 \r
1025         /* Return receive MAD elements to the pool. */\r
1026         for( p_list_item = cl_qlist_remove_head( &p_spl_qp_svc->recv_queue );\r
1027                  p_list_item != cl_qlist_end( &p_spl_qp_svc->recv_queue );\r
1028                  p_list_item = cl_qlist_remove_head( &p_spl_qp_svc->recv_queue ) )\r
1029         {\r
1030                 p_al_mad = PARENT_STRUCT( p_list_item, al_mad_element_t, list_item );\r
1031 \r
1032                 status = ib_put_mad( &p_al_mad->element );\r
1033                 CL_ASSERT( status == IB_SUCCESS );\r
1034         }\r
1035 \r
1036         CL_ASSERT( cl_is_qlist_empty( &p_spl_qp_svc->send_queue ) );\r
1037 \r
1038         destroy_al_obj( &p_spl_qp_svc->obj );\r
1039         cl_free( p_spl_qp_svc );\r
1040 \r
1041         AL_EXIT( AL_DBG_SMI );\r
1042 }\r
1043 \r
1044 \r
1045 \r
1046 /*\r
1047  * Update the base LID of a special QP service.\r
1048  */\r
1049 void\r
1050 spl_qp_svc_lid_change(\r
1051         IN                              al_obj_t*                                       p_obj,\r
1052         IN                              ib_pnp_port_rec_t*                      p_pnp_rec )\r
1053 {\r
1054         spl_qp_svc_t*                   p_spl_qp_svc;\r
1055 \r
1056         AL_ENTER( AL_DBG_SMI );\r
1057 \r
1058         CL_ASSERT( p_obj );\r
1059         CL_ASSERT( p_pnp_rec );\r
1060         CL_ASSERT( p_pnp_rec->p_port_attr );\r
1061 \r
1062         p_spl_qp_svc = PARENT_STRUCT( p_obj, spl_qp_svc_t, obj );\r
1063 \r
1064         p_spl_qp_svc->base_lid = p_pnp_rec->p_port_attr->lid;\r
1065         p_spl_qp_svc->lmc = p_pnp_rec->p_port_attr->lmc;\r
1066 \r
1067         AL_EXIT( AL_DBG_SMI );\r
1068 }\r
1069 \r
1070 \r
1071 \r
1072 /*\r
1073  * Route a send work request.\r
1074  */\r
1075 mad_route_t\r
1076 route_mad_send(\r
1077         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
1078         IN                              ib_send_wr_t* const                     p_send_wr )\r
1079 {\r
1080         al_mad_wr_t*                    p_mad_wr;\r
1081         al_mad_send_t*                  p_mad_send;\r
1082         ib_mad_t*                               p_mad;\r
1083         ib_smp_t*                               p_smp;\r
1084         ib_av_handle_t                  h_av;\r
1085         mad_route_t                             route;\r
1086         boolean_t                               local, loopback, discard;\r
1087 \r
1088         AL_ENTER( AL_DBG_SMI );\r
1089 \r
1090         CL_ASSERT( p_spl_qp_svc );\r
1091         CL_ASSERT( p_send_wr );\r
1092 \r
1093         /* Initialize a pointers to the MAD work request and the MAD. */\r
1094         p_mad_wr = PARENT_STRUCT( p_send_wr, al_mad_wr_t, send_wr );\r
1095         p_mad_send = PARENT_STRUCT( p_mad_wr, al_mad_send_t, mad_wr );\r
1096         p_mad = get_mad_hdr_from_wr( p_mad_wr );\r
1097         p_smp = (ib_smp_t*)p_mad;\r
1098 \r
1099         /* Check if the CA has a local MAD interface. */\r
1100         local = loopback = discard = FALSE;\r
1101         if( check_local_mad( p_spl_qp_svc->h_qp ) )\r
1102         {\r
1103                 /*\r
1104                  * If the MAD is a locally addressed Subnet Management, Performance\r
1105                  * Management, or Connection Management datagram, process the work\r
1106                  * request locally.\r
1107                  */\r
1108                 h_av = p_send_wr->dgrm.ud.h_av;\r
1109                 switch( p_mad->mgmt_class )\r
1110                 {\r
1111                 case IB_MCLASS_SUBN_DIR:\r
1112                         /* Perform special checks on directed route SMPs. */\r
1113                         if( ib_smp_is_response( p_smp ) )\r
1114                         {\r
1115                                 /*\r
1116                                  * This node is the originator of the response.  Discard\r
1117                                  * if the hop count or pointer is zero, an intermediate hop,\r
1118                                  * out of bounds hop, or if the first port of the directed\r
1119                                  * route retrun path is not this port.\r
1120                                  */\r
1121                                 if( ( p_smp->hop_count == 0 ) || ( p_smp->hop_ptr == 0 ) )\r
1122                                 {\r
1123                                         AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
1124                                                 ("hop cnt or hop ptr set to 0...discarding\n") );\r
1125                                         discard = TRUE;\r
1126                                 }\r
1127                                 else if( p_smp->hop_count != ( p_smp->hop_ptr - 1 ) )\r
1128                                 {\r
1129                                         AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
1130                                                 ("hop cnt != (hop ptr - 1)...discarding\n") );\r
1131                                         discard = TRUE;\r
1132                                 }\r
1133                                 else if( p_smp->hop_count >= IB_SUBNET_PATH_HOPS_MAX )\r
1134                                 {\r
1135                                         AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
1136                                                 ("hop cnt > max hops...discarding\n") );\r
1137                                         discard = TRUE;\r
1138                                 }\r
1139                                 else if( ( p_smp->dr_dlid == IB_LID_PERMISSIVE ) &&\r
1140                                                  ( p_smp->return_path[ p_smp->hop_ptr - 1 ] !=\r
1141                                                         p_spl_qp_svc->port_num ) )\r
1142                                 {\r
1143                                         AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
1144                                                 ("return path[hop ptr - 1] != port num...discarding\n") );\r
1145                                         discard = TRUE;\r
1146                                 }\r
1147                         }\r
1148                         else\r
1149                         {\r
1150                                 /* The SMP is a request. */\r
1151                                 if( ( p_smp->hop_count >= IB_SUBNET_PATH_HOPS_MAX ) ||\r
1152                                         ( p_smp->hop_ptr >= IB_SUBNET_PATH_HOPS_MAX ) )\r
1153                                 {\r
1154                                         discard = TRUE;\r
1155                                 }\r
1156                                 else if( ( p_smp->hop_count == 0 ) && ( p_smp->hop_ptr == 0 ) )\r
1157                                 {\r
1158                                         /* Self Addressed: Sent locally, routed locally. */\r
1159                                         local = TRUE;\r
1160                                         discard = ( p_smp->dr_slid != IB_LID_PERMISSIVE ) ||\r
1161                                                           ( p_smp->dr_dlid != IB_LID_PERMISSIVE );\r
1162                                 }\r
1163                                 else if( ( p_smp->hop_count != 0 ) &&\r
1164                                                  ( p_smp->hop_count == ( p_smp->hop_ptr - 1 ) ) )\r
1165                                 {\r
1166                                         /* End of Path: Sent remotely, routed locally. */\r
1167                                         local = TRUE;\r
1168                                 }\r
1169                                 else if( ( p_smp->hop_count != 0 ) &&\r
1170                                                  ( p_smp->hop_ptr       == 0 ) )\r
1171                                 {\r
1172                                         /* Beginning of Path: Sent locally, routed remotely. */\r
1173                                         if( p_smp->dr_slid == IB_LID_PERMISSIVE )\r
1174                                         {\r
1175                                                 discard =\r
1176                                                         ( p_smp->initial_path[ p_smp->hop_ptr + 1 ] !=\r
1177                                                           p_spl_qp_svc->port_num );\r
1178                                         }\r
1179                                 }\r
1180                                 else\r
1181                                 {\r
1182                                         /* Intermediate hop. */\r
1183                                         discard = TRUE;\r
1184                                 }\r
1185                         }\r
1186                         /* Loopback locally addressed SM to SM "heartbeat" messages. */\r
1187                         loopback = (p_mad->attr_id == IB_MAD_ATTR_SM_INFO);\r
1188                         break;\r
1189 \r
1190                 case IB_MCLASS_SUBN_LID:\r
1191                         /* Loopback locally addressed SM to SM "heartbeat" messages. */\r
1192                         loopback = (p_mad->attr_id == IB_MAD_ATTR_SM_INFO);\r
1193 \r
1194                         /* Fall through to check for a local MAD. */\r
1195 \r
1196                 case IB_MCLASS_PERF:\r
1197                 case IB_MCLASS_BM:\r
1198                 case IB_MLX_VENDOR_CLASS1:\r
1199                 case IB_MLX_VENDOR_CLASS2:\r
1200                         local = !(p_mad->method & IB_MAD_METHOD_RESP_MASK) && ( h_av &&\r
1201                                 ( h_av->av_attr.dlid ==\r
1202                                 ( h_av->av_attr.path_bits | p_spl_qp_svc->base_lid ) ) );\r
1203                         break;\r
1204                 }\r
1205         }\r
1206 \r
1207         route = ( p_mad_send->p_send_mad->send_opt & IB_SEND_OPT_LOCAL ) ?\r
1208                 ROUTE_LOCAL : ROUTE_REMOTE;\r
1209         if( local ) route = ROUTE_LOCAL;\r
1210         if( loopback && local ) route = ROUTE_LOOPBACK;\r
1211         if( discard ) route = ROUTE_DISCARD;\r
1212 \r
1213         AL_EXIT( AL_DBG_SMI );\r
1214         return route;\r
1215 }\r
1216 \r
1217 \r
1218 \r
1219 /*\r
1220  * Send a work request on the special QP.\r
1221  */\r
1222 ib_api_status_t\r
1223 spl_qp_svc_send(\r
1224         IN              const   ib_qp_handle_t                          h_qp,\r
1225         IN                              ib_send_wr_t* const                     p_send_wr )\r
1226 {\r
1227         spl_qp_svc_t*                   p_spl_qp_svc;\r
1228         al_mad_wr_t*                    p_mad_wr;\r
1229         mad_route_t                             route;\r
1230         ib_api_status_t                 status;\r
1231 \r
1232         AL_ENTER( AL_DBG_SMI );\r
1233 \r
1234         CL_ASSERT( h_qp );\r
1235         CL_ASSERT( p_send_wr );\r
1236 \r
1237         /* Get the special QP service. */\r
1238         p_spl_qp_svc = (spl_qp_svc_t*)h_qp->obj.context;\r
1239         CL_ASSERT( p_spl_qp_svc );\r
1240         CL_ASSERT( p_spl_qp_svc->h_qp == h_qp );\r
1241 \r
1242         /* Determine how to route the MAD. */\r
1243         route = route_mad_send( p_spl_qp_svc, p_send_wr );\r
1244 \r
1245         /*\r
1246          * Check the QP state and guard against error handling.  Also,\r
1247          * to maintain proper order of work completions, delay processing\r
1248          * a local MAD until any remote MAD work requests have completed,\r
1249          * and delay processing a remote MAD until local MAD work requests\r
1250          * have completed.\r
1251          */\r
1252         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
1253         if( (p_spl_qp_svc->state != SPL_QP_ACTIVE) || p_spl_qp_svc->local_mad_wr ||\r
1254                 (is_local(route) && !cl_is_qlist_empty( &p_spl_qp_svc->send_queue )) ||\r
1255                 ( cl_qlist_count( &p_spl_qp_svc->send_queue ) >=\r
1256                         p_spl_qp_svc->max_qp_depth ) )\r
1257         {\r
1258                 /*\r
1259                  * Return busy status.\r
1260                  * The special QP will resume sends at this point.\r
1261                  */\r
1262                 cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
1263 \r
1264                 AL_EXIT( AL_DBG_SMI );\r
1265                 return IB_RESOURCE_BUSY;\r
1266         }\r
1267 \r
1268         p_mad_wr = PARENT_STRUCT( p_send_wr, al_mad_wr_t, send_wr );\r
1269 \r
1270         if( is_local( route ) )\r
1271         {\r
1272                 /* Save the local MAD work request for processing. */\r
1273                 p_spl_qp_svc->local_mad_wr = p_mad_wr;\r
1274 \r
1275                 /* Flag the service as in use by the asynchronous processing thread. */\r
1276                 cl_atomic_inc( &p_spl_qp_svc->in_use_cnt );\r
1277 \r
1278                 cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
1279 \r
1280                 status = local_mad_send( p_spl_qp_svc, p_mad_wr );\r
1281         }\r
1282         else\r
1283         {\r
1284                 /* Process a remote MAD send work request. */\r
1285                 status = remote_mad_send( p_spl_qp_svc, p_mad_wr );\r
1286 \r
1287                 cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
1288         }\r
1289 \r
1290         AL_EXIT( AL_DBG_SMI );\r
1291         return status;\r
1292 }\r
1293 \r
1294 \r
1295 \r
1296 /*\r
1297  * Process a remote MAD send work request.  Called holding the spl_qp_svc lock.\r
1298  */\r
1299 ib_api_status_t\r
1300 remote_mad_send(\r
1301         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
1302         IN                              al_mad_wr_t* const                      p_mad_wr )\r
1303 {\r
1304         ib_smp_t*                               p_smp;\r
1305         ib_api_status_t                 status;\r
1306 \r
1307         AL_ENTER( AL_DBG_SMI );\r
1308 \r
1309         CL_ASSERT( p_spl_qp_svc );\r
1310         CL_ASSERT( p_mad_wr );\r
1311 \r
1312         /* Initialize a pointers to the MAD work request and outbound MAD. */\r
1313         p_smp = (ib_smp_t*)get_mad_hdr_from_wr( p_mad_wr );\r
1314 \r
1315         /* Perform outbound MAD processing. */\r
1316 \r
1317         /* Adjust directed route SMPs as required by IBA. */\r
1318         if( p_smp->mgmt_class == IB_MCLASS_SUBN_DIR )\r
1319         {\r
1320                 if( ib_smp_is_response( p_smp ) )\r
1321                 {\r
1322                         if( p_smp->dr_dlid == IB_LID_PERMISSIVE )\r
1323                                 p_smp->hop_ptr--;\r
1324                 }\r
1325                 else if( p_smp->dr_slid == IB_LID_PERMISSIVE )\r
1326                 {\r
1327                         /*\r
1328                          * Only update the pointer if the hw_agent is not implemented.\r
1329                          * Fujitsu implements SMI in hardware, so the following has to\r
1330                          * be passed down to the hardware SMI.\r
1331                          */\r
1332                         ci_ca_lock_attr( p_spl_qp_svc->obj.p_ci_ca );\r
1333                         if( !p_spl_qp_svc->obj.p_ci_ca->p_pnp_attr->hw_agents )\r
1334                                 p_smp->hop_ptr++;\r
1335                         ci_ca_unlock_attr( p_spl_qp_svc->obj.p_ci_ca );\r
1336                 }\r
1337         }\r
1338 \r
1339         /* Always generate send completions. */\r
1340         p_mad_wr->send_wr.send_opt |= IB_SEND_OPT_SIGNALED;\r
1341 \r
1342         /* Queue the MAD work request on the service tracking queue. */\r
1343         cl_qlist_insert_tail( &p_spl_qp_svc->send_queue, &p_mad_wr->list_item );\r
1344 \r
1345         status = ib_post_send( p_spl_qp_svc->h_qp, &p_mad_wr->send_wr, NULL );\r
1346 \r
1347         if( status != IB_SUCCESS )\r
1348         {\r
1349                 cl_qlist_remove_item( &p_spl_qp_svc->send_queue, &p_mad_wr->list_item );\r
1350 \r
1351                 /* Reset directed route SMPs as required by IBA. */\r
1352                 if( p_smp->mgmt_class == IB_MCLASS_SUBN_DIR )\r
1353                 {\r
1354                         if( ib_smp_is_response( p_smp ) )\r
1355                         {\r
1356                                 if( p_smp->dr_dlid == IB_LID_PERMISSIVE )\r
1357                                         p_smp->hop_ptr++;\r
1358                         }\r
1359                         else if( p_smp->dr_slid == IB_LID_PERMISSIVE )\r
1360                         {\r
1361                                 /* Only update if the hw_agent is not implemented. */\r
1362                                 ci_ca_lock_attr( p_spl_qp_svc->obj.p_ci_ca );\r
1363                                 if( p_spl_qp_svc->obj.p_ci_ca->p_pnp_attr->hw_agents == FALSE )\r
1364                                         p_smp->hop_ptr--;\r
1365                                 ci_ca_unlock_attr( p_spl_qp_svc->obj.p_ci_ca );\r
1366                         }\r
1367                 }\r
1368         }\r
1369 \r
1370         AL_EXIT( AL_DBG_SMI );\r
1371         return status;\r
1372 }\r
1373 \r
1374 \r
1375 /*\r
1376  * Handle a MAD destined for the local CA, using cached data\r
1377  * as much as possible.\r
1378  */\r
1379 static ib_api_status_t\r
1380 local_mad_send(\r
1381         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
1382         IN                              al_mad_wr_t* const                      p_mad_wr )\r
1383 {\r
1384         mad_route_t                             route;\r
1385         ib_api_status_t                 status = IB_SUCCESS;\r
1386 \r
1387         AL_ENTER( AL_DBG_SMI );\r
1388 \r
1389         CL_ASSERT( p_spl_qp_svc );\r
1390         CL_ASSERT( p_mad_wr );\r
1391 \r
1392         /* Determine how to route the MAD. */\r
1393         route = route_mad_send( p_spl_qp_svc, &p_mad_wr->send_wr );\r
1394 \r
1395         /* Check if this MAD should be discarded. */\r
1396         if( is_discard( route ) )\r
1397         {\r
1398                 /* Deliver a "work completion" to the dispatcher. */\r
1399                 __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr,\r
1400                         IB_WCS_LOCAL_OP_ERR );\r
1401                 status = IB_INVALID_SETTING;\r
1402         }\r
1403         else if( is_loopback( route ) )\r
1404         {\r
1405                 /* Loopback local SM to SM "heartbeat" messages. */\r
1406                 status = loopback_mad( p_spl_qp_svc, p_mad_wr );\r
1407         }\r
1408         else\r
1409         {\r
1410                 switch( get_mad_hdr_from_wr( p_mad_wr )->mgmt_class )\r
1411                 {\r
1412                 case IB_MCLASS_SUBN_DIR:\r
1413                 case IB_MCLASS_SUBN_LID:\r
1414                         //DO not use the cache in order to force Mkey  check\r
1415                         status = __process_subn_mad( p_spl_qp_svc, p_mad_wr );\r
1416                         //status = IB_NOT_DONE;\r
1417                         break;\r
1418 \r
1419                 default:\r
1420                         status = IB_NOT_DONE;\r
1421                 }\r
1422         }\r
1423 \r
1424         if( status == IB_NOT_DONE )\r
1425         {\r
1426                 /* Queue an asynchronous processing item to process the local MAD. */\r
1427                 cl_async_proc_queue( gp_async_proc_mgr, &p_spl_qp_svc->send_async );\r
1428         }\r
1429         else\r
1430         {\r
1431                 /*\r
1432                  * Clear the local MAD pointer to allow processing of other MADs.\r
1433                  * This is done after polling for attribute changes to ensure that\r
1434                  * subsequent MADs pick up any changes performed by this one.\r
1435                  */\r
1436                 cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
1437                 p_spl_qp_svc->local_mad_wr = NULL;\r
1438                 cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
1439 \r
1440                 /* No longer in use by the asynchronous processing thread. */\r
1441                 cl_atomic_dec( &p_spl_qp_svc->in_use_cnt );\r
1442 \r
1443                 /* Special QP operations will resume by unwinding. */\r
1444         }\r
1445 \r
1446         AL_EXIT( AL_DBG_SMI );\r
1447         return IB_SUCCESS;\r
1448 }\r
1449 \r
1450 \r
1451 static ib_api_status_t\r
1452 get_resp_mad(\r
1453         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
1454         IN                              al_mad_wr_t* const                      p_mad_wr,\r
1455                 OUT                     ib_mad_element_t** const        pp_mad_resp )\r
1456 {\r
1457         ib_api_status_t                 status;\r
1458 \r
1459         AL_ENTER( AL_DBG_SMI );\r
1460 \r
1461         CL_ASSERT( p_spl_qp_svc );\r
1462         CL_ASSERT( p_mad_wr );\r
1463         CL_ASSERT( pp_mad_resp );\r
1464 \r
1465         /* Get a MAD element from the pool for the response. */\r
1466         status = ib_get_mad( p_spl_qp_svc->h_qp->obj.p_ci_ca->pool_key,\r
1467                 MAD_BLOCK_SIZE, pp_mad_resp );\r
1468         if( status != IB_SUCCESS )\r
1469         {\r
1470                 __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr,\r
1471                         IB_WCS_LOCAL_OP_ERR );\r
1472         }\r
1473 \r
1474         AL_EXIT( AL_DBG_SMI );\r
1475         return status;\r
1476 }\r
1477 \r
1478 \r
1479 static ib_api_status_t\r
1480 complete_local_mad(\r
1481         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
1482         IN                              al_mad_wr_t* const                      p_mad_wr,\r
1483         IN                              ib_mad_element_t* const         p_mad_resp )\r
1484 {\r
1485         ib_api_status_t                 status;\r
1486 \r
1487         AL_ENTER( AL_DBG_SMI );\r
1488 \r
1489         CL_ASSERT( p_spl_qp_svc );\r
1490         CL_ASSERT( p_mad_wr );\r
1491         CL_ASSERT( p_mad_resp );\r
1492 \r
1493         /* Construct the receive MAD element. */\r
1494         p_mad_resp->status              = IB_WCS_SUCCESS;\r
1495         p_mad_resp->remote_qp   = p_mad_wr->send_wr.dgrm.ud.remote_qp;\r
1496         p_mad_resp->remote_lid  = p_spl_qp_svc->base_lid;\r
1497         if( p_mad_wr->send_wr.send_opt & IB_RECV_OPT_IMMEDIATE )\r
1498         {\r
1499                 p_mad_resp->immediate_data = p_mad_wr->send_wr.immediate_data;\r
1500                 p_mad_resp->recv_opt |= IB_RECV_OPT_IMMEDIATE;\r
1501         }\r
1502 \r
1503         /*\r
1504          * Hand the receive MAD element to the dispatcher before completing\r
1505          * the send.  This guarantees that the send request cannot time out.\r
1506          */\r
1507         status = mad_disp_recv_done( p_spl_qp_svc->h_mad_disp, p_mad_resp );\r
1508 \r
1509         /* Forward the send work completion to the dispatcher. */\r
1510         __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr, IB_WCS_SUCCESS );\r
1511 \r
1512         AL_EXIT( AL_DBG_SMI );\r
1513         return status;\r
1514 }\r
1515 \r
1516 \r
1517 static ib_api_status_t\r
1518 loopback_mad(\r
1519         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
1520         IN                              al_mad_wr_t* const                      p_mad_wr )\r
1521 {\r
1522         ib_mad_t                                *p_mad;\r
1523         ib_mad_element_t                *p_mad_resp;\r
1524         ib_api_status_t                 status;\r
1525 \r
1526         AL_ENTER( AL_DBG_SMI );\r
1527 \r
1528         CL_ASSERT( p_spl_qp_svc );\r
1529         CL_ASSERT( p_mad_wr );\r
1530 \r
1531         /* Get a MAD element from the pool for the response. */\r
1532         status = get_resp_mad( p_spl_qp_svc, p_mad_wr, &p_mad_resp );\r
1533         if( status == IB_SUCCESS )\r
1534         {\r
1535                 /* Initialize a pointers to the MAD work request and outbound MAD. */\r
1536                 p_mad = get_mad_hdr_from_wr( p_mad_wr );\r
1537 \r
1538                 /* Simulate a send/receive between local managers. */\r
1539                 cl_memcpy( p_mad_resp->p_mad_buf, p_mad, MAD_BLOCK_SIZE );\r
1540 \r
1541                 /* Construct the receive MAD element. */\r
1542                 p_mad_resp->status              = IB_WCS_SUCCESS;\r
1543                 p_mad_resp->remote_qp   = p_mad_wr->send_wr.dgrm.ud.remote_qp;\r
1544                 p_mad_resp->remote_lid  = p_spl_qp_svc->base_lid;\r
1545                 if( p_mad_wr->send_wr.send_opt & IB_RECV_OPT_IMMEDIATE )\r
1546                 {\r
1547                         p_mad_resp->immediate_data = p_mad_wr->send_wr.immediate_data;\r
1548                         p_mad_resp->recv_opt |= IB_RECV_OPT_IMMEDIATE;\r
1549                 }\r
1550 \r
1551                 /*\r
1552                  * Hand the receive MAD element to the dispatcher before completing\r
1553                  * the send.  This guarantees that the send request cannot time out.\r
1554                  */\r
1555                 status = mad_disp_recv_done( p_spl_qp_svc->h_mad_disp, p_mad_resp );\r
1556 \r
1557                 /* Forward the send work completion to the dispatcher. */\r
1558                 __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr, IB_WCS_SUCCESS );\r
1559 \r
1560         }\r
1561 \r
1562         AL_EXIT( AL_DBG_SMI );\r
1563         return status;\r
1564 }\r
1565 \r
1566 \r
1567 static void\r
1568 __update_guid_info(\r
1569         IN                              spl_qp_cache_t* const                   p_cache,\r
1570         IN              const   ib_smp_t* const                         p_mad )\r
1571 {\r
1572         uint32_t                        idx;\r
1573 \r
1574         /* Get the table selector from the attribute */\r
1575         idx = cl_ntoh32( p_mad->attr_mod );\r
1576 \r
1577         /*\r
1578          * We only get successful MADs here, so invalid settings\r
1579          * shouldn't happen.\r
1580          */\r
1581         CL_ASSERT( idx <= 31 );\r
1582 \r
1583         cl_memcpy( &p_cache->guid_block[idx].tbl,\r
1584                 ib_smp_get_payload_ptr( p_mad ),\r
1585                 sizeof(ib_guid_info_t) );\r
1586         p_cache->guid_block[idx].valid = TRUE;\r
1587 }\r
1588 \r
1589 \r
1590 static  void\r
1591 __update_pkey_table(\r
1592         IN                              spl_qp_cache_t* const                   p_cache,\r
1593         IN              const   ib_smp_t* const                         p_mad )\r
1594 {\r
1595         uint16_t                        idx;\r
1596 \r
1597         /* Get the table selector from the attribute */\r
1598         idx = ((uint16_t)cl_ntoh32( p_mad->attr_mod ));\r
1599 \r
1600         CL_ASSERT( idx <= 2047 );\r
1601 \r
1602         cl_memcpy( &p_cache->pkey_tbl[idx].tbl,\r
1603                 ib_smp_get_payload_ptr( p_mad ),\r
1604                 sizeof(ib_pkey_table_t) );\r
1605         p_cache->pkey_tbl[idx].valid = TRUE;\r
1606 }\r
1607 \r
1608 \r
1609 static void\r
1610 __update_sl_vl_table(\r
1611         IN                              spl_qp_cache_t* const                   p_cache,\r
1612         IN              const   ib_smp_t* const                         p_mad )\r
1613 {\r
1614         cl_memcpy( &p_cache->sl_vl.tbl,\r
1615                 ib_smp_get_payload_ptr( p_mad ),\r
1616                 sizeof(ib_slvl_table_t) );\r
1617         p_cache->sl_vl.valid = TRUE;\r
1618 }\r
1619 \r
1620 \r
1621 static void\r
1622 __update_vl_arb_table(\r
1623         IN                              spl_qp_cache_t* const                   p_cache,\r
1624         IN              const   ib_smp_t* const                         p_mad )\r
1625 {\r
1626         uint16_t                        idx;\r
1627 \r
1628         /* Get the table selector from the attribute */\r
1629         idx = ((uint16_t)(cl_ntoh32( p_mad->attr_mod ) >> 16)) - 1;\r
1630 \r
1631         CL_ASSERT( idx <= 3 );\r
1632 \r
1633         cl_memcpy( &p_cache->vl_arb[idx].tbl,\r
1634                 ib_smp_get_payload_ptr( p_mad ),\r
1635                 sizeof(ib_vl_arb_table_t) );\r
1636         p_cache->vl_arb[idx].valid = TRUE;\r
1637 }\r
1638 \r
1639 \r
1640 \r
1641 void\r
1642 spl_qp_svc_update_cache(\r
1643         IN                              spl_qp_svc_t                            *p_spl_qp_svc,\r
1644         IN                              ib_smp_t                                        *p_mad )\r
1645 {\r
1646 \r
1647 \r
1648 \r
1649         CL_ASSERT( p_spl_qp_svc );\r
1650         CL_ASSERT( p_mad );\r
1651         CL_ASSERT( p_mad->mgmt_class == IB_MCLASS_SUBN_DIR ||\r
1652                                  p_mad->mgmt_class == IB_MCLASS_SUBN_LID);\r
1653         CL_ASSERT(!p_mad->status);\r
1654 \r
1655         cl_spinlock_acquire(&p_spl_qp_svc->cache_lock);\r
1656         \r
1657         switch( p_mad->attr_id )\r
1658         {\r
1659         case IB_MAD_ATTR_GUID_INFO:\r
1660                 __update_guid_info(\r
1661                         &p_spl_qp_svc->cache, p_mad );\r
1662                 break;\r
1663 \r
1664         case IB_MAD_ATTR_P_KEY_TABLE:\r
1665                 __update_pkey_table(\r
1666                         &p_spl_qp_svc->cache, p_mad );\r
1667                 break;\r
1668 \r
1669         case IB_MAD_ATTR_SLVL_TABLE:\r
1670                 __update_sl_vl_table(\r
1671                         &p_spl_qp_svc->cache, p_mad );\r
1672                 break;\r
1673 \r
1674         case IB_MAD_ATTR_VL_ARBITRATION:\r
1675                 __update_vl_arb_table(\r
1676                         &p_spl_qp_svc->cache, p_mad );\r
1677                 break;\r
1678 \r
1679         default:\r
1680                 break;\r
1681         }\r
1682         \r
1683         cl_spinlock_release(&p_spl_qp_svc->cache_lock);\r
1684 }\r
1685 \r
1686 \r
1687 \r
1688 static ib_api_status_t\r
1689 __process_node_info(\r
1690         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
1691         IN                              al_mad_wr_t* const                      p_mad_wr )\r
1692 {\r
1693         ib_mad_t                                *p_mad;\r
1694         ib_mad_element_t                *p_mad_resp;\r
1695         ib_smp_t                                *p_smp;\r
1696         ib_node_info_t                  *p_node_info;\r
1697         ib_ca_attr_t                    *p_ca_attr;\r
1698         ib_port_attr_t                  *p_port_attr;\r
1699         ib_api_status_t                 status;\r
1700 \r
1701         AL_ENTER( AL_DBG_SMI );\r
1702 \r
1703         CL_ASSERT( p_spl_qp_svc );\r
1704         CL_ASSERT( p_mad_wr );\r
1705 \r
1706         /* Initialize a pointers to the MAD work request and outbound MAD. */\r
1707         p_mad = get_mad_hdr_from_wr( p_mad_wr );\r
1708         if( p_mad->method != IB_MAD_METHOD_GET )\r
1709         {\r
1710                 /* Node description is a GET-only attribute. */\r
1711                 __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr,\r
1712                         IB_WCS_LOCAL_OP_ERR );\r
1713                 AL_EXIT( AL_DBG_SMI );\r
1714                 return IB_INVALID_SETTING;\r
1715         }\r
1716 \r
1717         /* Get a MAD element from the pool for the response. */\r
1718         status = get_resp_mad( p_spl_qp_svc, p_mad_wr, &p_mad_resp );\r
1719         if( status == IB_SUCCESS )\r
1720         {\r
1721                 p_smp = (ib_smp_t*)p_mad_resp->p_mad_buf;\r
1722                 cl_memcpy( p_smp, p_mad, MAD_BLOCK_SIZE );\r
1723                 p_smp->method = (IB_MAD_METHOD_RESP_MASK | IB_MAD_METHOD_GET);\r
1724                 if( p_smp->mgmt_class == IB_MCLASS_SUBN_DIR )\r
1725                         p_smp->status = IB_SMP_DIRECTION;\r
1726                 else\r
1727                         p_smp->status = 0;\r
1728 \r
1729                 p_node_info = (ib_node_info_t*)ib_smp_get_payload_ptr( p_smp );\r
1730 \r
1731                 /*\r
1732                  * Fill in the node info, protecting against the\r
1733                  * attributes being changed by PnP.\r
1734                  */\r
1735                 cl_spinlock_acquire( &p_spl_qp_svc->obj.p_ci_ca->obj.lock );\r
1736 \r
1737                 p_ca_attr = p_spl_qp_svc->obj.p_ci_ca->p_pnp_attr;\r
1738                 p_port_attr = &p_ca_attr->p_port_attr[p_spl_qp_svc->port_num - 1];\r
1739 \r
1740                 p_node_info->base_version = 1;\r
1741                 p_node_info->class_version = 1;\r
1742                 p_node_info->node_type = IB_NODE_TYPE_CA;\r
1743                 p_node_info->num_ports = p_ca_attr->num_ports;\r
1744                 p_node_info->sys_guid = p_ca_attr->system_image_guid;\r
1745                 p_node_info->node_guid = p_ca_attr->ca_guid;\r
1746                 p_node_info->port_guid = p_port_attr->port_guid;\r
1747                 p_node_info->partition_cap = cl_hton16( p_port_attr->num_pkeys );\r
1748                 p_node_info->device_id = cl_hton16( p_ca_attr->dev_id );\r
1749                 p_node_info->revision = cl_hton32( p_ca_attr->revision );\r
1750                 p_node_info->port_num_vendor_id =\r
1751                         cl_hton32( p_ca_attr->vend_id & 0x00FFFFFF ) | p_port_attr->port_num;\r
1752                 cl_spinlock_release( &p_spl_qp_svc->obj.p_ci_ca->obj.lock );\r
1753 \r
1754                 status = complete_local_mad( p_spl_qp_svc, p_mad_wr, p_mad_resp );\r
1755         }\r
1756 \r
1757         AL_EXIT( AL_DBG_SMI );\r
1758         return status;\r
1759 }\r
1760 \r
1761 \r
1762 static ib_api_status_t\r
1763 __process_node_desc(\r
1764         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
1765         IN                              al_mad_wr_t* const                      p_mad_wr )\r
1766 {\r
1767         ib_mad_t                                *p_mad;\r
1768         ib_mad_element_t                *p_mad_resp;\r
1769         ib_api_status_t                 status;\r
1770 \r
1771         AL_ENTER( AL_DBG_SMI );\r
1772 \r
1773         CL_ASSERT( p_spl_qp_svc );\r
1774         CL_ASSERT( p_mad_wr );\r
1775 \r
1776         /* Initialize a pointers to the MAD work request and outbound MAD. */\r
1777         p_mad = get_mad_hdr_from_wr( p_mad_wr );\r
1778         if( p_mad->method != IB_MAD_METHOD_GET )\r
1779         {\r
1780                 /* Node info is a GET-only attribute. */\r
1781                 __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr,\r
1782                         IB_WCS_LOCAL_OP_ERR );\r
1783                 AL_EXIT( AL_DBG_SMI );\r
1784                 return IB_INVALID_SETTING;\r
1785         }\r
1786 \r
1787         /* Get a MAD element from the pool for the response. */\r
1788         status = get_resp_mad( p_spl_qp_svc, p_mad_wr, &p_mad_resp );\r
1789         if( status == IB_SUCCESS )\r
1790         {\r
1791                 cl_memcpy( p_mad_resp->p_mad_buf, p_mad, MAD_BLOCK_SIZE );\r
1792                 p_mad_resp->p_mad_buf->method =\r
1793                         (IB_MAD_METHOD_RESP_MASK | IB_MAD_METHOD_GET);\r
1794                 if( p_mad_resp->p_mad_buf->mgmt_class == IB_MCLASS_SUBN_DIR )\r
1795                         p_mad_resp->p_mad_buf->status = IB_SMP_DIRECTION;\r
1796                 else\r
1797                         p_mad_resp->p_mad_buf->status = 0;\r
1798                 /* Set the node description to the machine name. */\r
1799                 cl_memcpy( ((ib_smp_t*)p_mad_resp->p_mad_buf)->data, \r
1800                         node_desc, sizeof(node_desc) );\r
1801 \r
1802                 status = complete_local_mad( p_spl_qp_svc, p_mad_wr, p_mad_resp );\r
1803         }\r
1804 \r
1805         AL_EXIT( AL_DBG_SMI );\r
1806         return status;\r
1807 }\r
1808 \r
1809 static ib_api_status_t\r
1810 __process_guid_info(\r
1811         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
1812         IN                              al_mad_wr_t* const                      p_mad_wr )\r
1813 {\r
1814         \r
1815         ib_mad_t                                *p_mad;\r
1816         ib_mad_element_t                *p_mad_resp;\r
1817         ib_smp_t                                *p_smp;\r
1818         ib_guid_info_t                  *p_guid_info;\r
1819         uint16_t                                idx;\r
1820         ib_api_status_t         status;\r
1821 \r
1822 \r
1823         /* Initialize a pointers to the MAD work request and outbound MAD. */\r
1824         p_mad = get_mad_hdr_from_wr( p_mad_wr );\r
1825 \r
1826         /* Get the table selector from the attribute */\r
1827         idx = ((uint16_t)cl_ntoh32( p_mad->attr_mod ));\r
1828         \r
1829         /*\r
1830          * TODO : Setup the response to fail the MAD instead of sending\r
1831          * it down to the HCA.\r
1832          */\r
1833         if( idx > 31 )\r
1834         {\r
1835                 AL_EXIT( AL_DBG_SMI );\r
1836                 return IB_NOT_DONE;\r
1837         }\r
1838         if( !p_spl_qp_svc->cache.guid_block[idx].valid )\r
1839         {\r
1840                 AL_EXIT( AL_DBG_SMI );\r
1841                 return IB_NOT_DONE;\r
1842         }\r
1843 \r
1844         /*\r
1845          * If a SET, see if the set is identical to the cache,\r
1846          * in which case it's a no-op.\r
1847          */\r
1848         if( p_mad->method == IB_MAD_METHOD_SET )\r
1849         {\r
1850                 if( cl_memcmp( ib_smp_get_payload_ptr( (ib_smp_t*)p_mad ),\r
1851                         &p_spl_qp_svc->cache.guid_block[idx].tbl, sizeof(ib_guid_info_t) ) )\r
1852                 {\r
1853                         /* The set is requesting a change. */\r
1854                         return IB_NOT_DONE;\r
1855                 }\r
1856         }\r
1857         \r
1858         /* Get a MAD element from the pool for the response. */\r
1859         status = get_resp_mad( p_spl_qp_svc, p_mad_wr, &p_mad_resp );\r
1860         if( status == IB_SUCCESS )\r
1861         {\r
1862                 p_smp = (ib_smp_t*)p_mad_resp->p_mad_buf;\r
1863 \r
1864                 /* Setup the response mad. */\r
1865                 cl_memcpy( p_smp, p_mad, MAD_BLOCK_SIZE );\r
1866                 p_smp->method = (IB_MAD_METHOD_RESP_MASK | IB_MAD_METHOD_GET);\r
1867                 if( p_smp->mgmt_class == IB_MCLASS_SUBN_DIR )\r
1868                         p_smp->status = IB_SMP_DIRECTION;\r
1869                 else\r
1870                         p_smp->status = 0;\r
1871 \r
1872                 p_guid_info = (ib_guid_info_t*)ib_smp_get_payload_ptr( p_smp );\r
1873 \r
1874                 // TODO: do we need lock on the cache ?????\r
1875 \r
1876                 \r
1877                 /* Copy the cached data. */\r
1878                 cl_memcpy( p_guid_info,\r
1879                         &p_spl_qp_svc->cache.guid_block[idx].tbl, sizeof(ib_guid_info_t) );\r
1880 \r
1881                 status = complete_local_mad( p_spl_qp_svc, p_mad_wr, p_mad_resp );\r
1882         }\r
1883 \r
1884         AL_EXIT( AL_DBG_SMI );\r
1885         return status;\r
1886 }\r
1887 \r
1888 \r
1889 static ib_api_status_t\r
1890 __process_pkey_table(\r
1891         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
1892         IN                              al_mad_wr_t* const                      p_mad_wr )\r
1893 {\r
1894 \r
1895         ib_mad_t                                *p_mad;\r
1896         ib_mad_element_t                *p_mad_resp;\r
1897         ib_smp_t                                *p_smp;\r
1898         ib_pkey_table_t         *p_pkey_table;\r
1899         uint16_t                                idx;\r
1900         ib_api_status_t         status;\r
1901 \r
1902         AL_ENTER( AL_DBG_SMI );\r
1903 \r
1904         CL_ASSERT( p_spl_qp_svc );\r
1905         CL_ASSERT( p_mad_wr );\r
1906 \r
1907         /* Initialize a pointers to the MAD work request and outbound MAD. */\r
1908         p_mad = get_mad_hdr_from_wr( p_mad_wr );\r
1909 \r
1910         /* Get the table selector from the attribute */\r
1911         idx = ((uint16_t)cl_ntoh32( p_mad->attr_mod ));\r
1912         \r
1913         /*\r
1914          * TODO : Setup the response to fail the MAD instead of sending\r
1915          * it down to the HCA.\r
1916          */\r
1917         if( idx > 2047 )\r
1918         {\r
1919                 AL_EXIT( AL_DBG_SMI );\r
1920                 return IB_NOT_DONE;\r
1921         }\r
1922 \r
1923 \r
1924         if( !p_spl_qp_svc->cache.pkey_tbl[idx].valid )\r
1925         {\r
1926                 AL_EXIT( AL_DBG_SMI );\r
1927                 return IB_NOT_DONE;\r
1928         }\r
1929 \r
1930         /*\r
1931          * If a SET, see if the set is identical to the cache,\r
1932          * in which case it's a no-op.\r
1933          */\r
1934         if( p_mad->method == IB_MAD_METHOD_SET )\r
1935         {\r
1936                 if( cl_memcmp( ib_smp_get_payload_ptr( (ib_smp_t*)p_mad ),\r
1937                         &p_spl_qp_svc->cache.pkey_tbl[idx].tbl, sizeof(ib_pkey_table_t) ) )\r
1938                 {\r
1939                         /* The set is requesting a change. */\r
1940                         AL_EXIT( AL_DBG_SMI );\r
1941                         return IB_NOT_DONE;\r
1942                 }\r
1943         }\r
1944         \r
1945         /* Get a MAD element from the pool for the response. */\r
1946         status = get_resp_mad( p_spl_qp_svc, p_mad_wr, &p_mad_resp );\r
1947         if( status == IB_SUCCESS )\r
1948         {\r
1949                 p_smp = (ib_smp_t*)p_mad_resp->p_mad_buf;\r
1950 \r
1951                 /* Setup the response mad. */\r
1952                 cl_memcpy( p_smp, p_mad, MAD_BLOCK_SIZE );\r
1953                 p_smp->method = (IB_MAD_METHOD_RESP_MASK | IB_MAD_METHOD_GET);\r
1954                 if( p_smp->mgmt_class == IB_MCLASS_SUBN_DIR )\r
1955                         p_smp->status = IB_SMP_DIRECTION;\r
1956                 else\r
1957                         p_smp->status = 0;\r
1958 \r
1959                 p_pkey_table = (ib_pkey_table_t*)ib_smp_get_payload_ptr( p_smp );\r
1960 \r
1961                 // TODO: do we need lock on the cache ?????\r
1962 \r
1963                 \r
1964                 /* Copy the cached data. */\r
1965                 cl_memcpy( p_pkey_table,\r
1966                         &p_spl_qp_svc->cache.pkey_tbl[idx].tbl, sizeof(ib_pkey_table_t) );\r
1967 \r
1968                 status = complete_local_mad( p_spl_qp_svc, p_mad_wr, p_mad_resp );\r
1969         }\r
1970 \r
1971         AL_EXIT( AL_DBG_SMI );\r
1972         return status;\r
1973 }\r
1974 \r
1975 \r
1976 static ib_api_status_t\r
1977 __process_slvl_table(\r
1978         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
1979         IN                              al_mad_wr_t* const                      p_mad_wr )\r
1980 {\r
1981 \r
1982 \r
1983         ib_mad_t                                *p_mad;\r
1984         ib_mad_element_t                *p_mad_resp;\r
1985         ib_smp_t                                *p_smp;\r
1986         ib_slvl_table_t                 *p_slvl_table;\r
1987         ib_api_status_t         status;\r
1988 \r
1989         AL_ENTER( AL_DBG_SMI );\r
1990 \r
1991         CL_ASSERT( p_spl_qp_svc );\r
1992         CL_ASSERT( p_mad_wr );\r
1993 \r
1994         /* Initialize a pointers to the MAD work request and outbound MAD. */\r
1995         p_mad = get_mad_hdr_from_wr( p_mad_wr );\r
1996 \r
1997         if( !p_spl_qp_svc->cache.sl_vl.valid )\r
1998         {\r
1999                 AL_EXIT( AL_DBG_SMI );\r
2000                 return IB_NOT_DONE;\r
2001         }\r
2002 \r
2003         /*\r
2004          * If a SET, see if the set is identical to the cache,\r
2005          * in which case it's a no-op.\r
2006          */\r
2007         if( p_mad->method == IB_MAD_METHOD_SET )\r
2008         {\r
2009                 if( cl_memcmp( ib_smp_get_payload_ptr( (ib_smp_t*)p_mad ),\r
2010                         &p_spl_qp_svc->cache.sl_vl.tbl, sizeof(ib_slvl_table_t) ) )\r
2011                 {\r
2012                         /* The set is requesting a change. */\r
2013                         AL_EXIT( AL_DBG_SMI );\r
2014                         return IB_NOT_DONE;\r
2015                 }\r
2016         }\r
2017         \r
2018         /* Get a MAD element from the pool for the response. */\r
2019         status = get_resp_mad( p_spl_qp_svc, p_mad_wr, &p_mad_resp );\r
2020         if( status == IB_SUCCESS )\r
2021         {\r
2022                 p_smp = (ib_smp_t*)p_mad_resp->p_mad_buf;\r
2023 \r
2024                 /* Setup the response mad. */\r
2025                 cl_memcpy( p_smp, p_mad, MAD_BLOCK_SIZE );\r
2026                 p_smp->method = (IB_MAD_METHOD_RESP_MASK | IB_MAD_METHOD_GET);\r
2027                 if( p_smp->mgmt_class == IB_MCLASS_SUBN_DIR )\r
2028                         p_smp->status = IB_SMP_DIRECTION;\r
2029                 else\r
2030                         p_smp->status = 0;\r
2031 \r
2032                 p_slvl_table = (ib_slvl_table_t*)ib_smp_get_payload_ptr( p_smp );\r
2033 \r
2034                 // TODO: do we need lock on the cache ?????\r
2035 \r
2036                 \r
2037                 /* Copy the cached data. */\r
2038                 cl_memcpy( p_slvl_table,\r
2039                         &p_spl_qp_svc->cache.sl_vl.tbl, sizeof(ib_slvl_table_t) );\r
2040 \r
2041                 status = complete_local_mad( p_spl_qp_svc, p_mad_wr, p_mad_resp );\r
2042         }\r
2043 \r
2044         AL_EXIT( AL_DBG_SMI );\r
2045         return status;\r
2046 }\r
2047 \r
2048 \r
2049 \r
2050 static ib_api_status_t\r
2051 __process_vl_arb_table(\r
2052         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
2053         IN                              al_mad_wr_t* const                      p_mad_wr )\r
2054 {\r
2055 \r
2056         ib_mad_t                                *p_mad;\r
2057         ib_mad_element_t                *p_mad_resp;\r
2058         ib_smp_t                                *p_smp;\r
2059         ib_vl_arb_table_t               *p_vl_arb_table;\r
2060         uint16_t                                idx;\r
2061         ib_api_status_t         status;\r
2062 \r
2063         AL_ENTER( AL_DBG_SMI );\r
2064 \r
2065         CL_ASSERT( p_spl_qp_svc );\r
2066         CL_ASSERT( p_mad_wr );\r
2067 \r
2068         /* Initialize a pointers to the MAD work request and outbound MAD. */\r
2069         p_mad = get_mad_hdr_from_wr( p_mad_wr );\r
2070 \r
2071         /* Get the table selector from the attribute */\r
2072         idx = ((uint16_t)(cl_ntoh32( p_mad->attr_mod ) >> 16)) - 1;\r
2073         \r
2074         /*\r
2075          * TODO : Setup the response to fail the MAD instead of sending\r
2076          * it down to the HCA.\r
2077          */\r
2078         if( idx > 3 )\r
2079         {\r
2080                 AL_EXIT( AL_DBG_SMI );\r
2081                 return IB_NOT_DONE;\r
2082         }\r
2083 \r
2084 \r
2085         if( !p_spl_qp_svc->cache.vl_arb[idx].valid )\r
2086         {\r
2087                 AL_EXIT( AL_DBG_SMI );\r
2088                 return IB_NOT_DONE;\r
2089         }\r
2090 \r
2091         /*\r
2092          * If a SET, see if the set is identical to the cache,\r
2093          * in which case it's a no-op.\r
2094          */\r
2095         if( p_mad->method == IB_MAD_METHOD_SET )\r
2096         {\r
2097                 if( cl_memcmp( ib_smp_get_payload_ptr( (ib_smp_t*)p_mad ),\r
2098                         &p_spl_qp_svc->cache.vl_arb[idx].tbl, sizeof(ib_vl_arb_table_t) ) )\r
2099                 {\r
2100                         /* The set is requesting a change. */\r
2101                         AL_EXIT( AL_DBG_SMI );\r
2102                         return IB_NOT_DONE;\r
2103                 }\r
2104         }\r
2105         \r
2106         /* Get a MAD element from the pool for the response. */\r
2107         status = get_resp_mad( p_spl_qp_svc, p_mad_wr, &p_mad_resp );\r
2108         if( status == IB_SUCCESS )\r
2109         {\r
2110                 p_smp = (ib_smp_t*)p_mad_resp->p_mad_buf;\r
2111 \r
2112                 /* Setup the response mad. */\r
2113                 cl_memcpy( p_smp, p_mad, MAD_BLOCK_SIZE );\r
2114                 p_smp->method = (IB_MAD_METHOD_RESP_MASK | IB_MAD_METHOD_GET);\r
2115                 if( p_smp->mgmt_class == IB_MCLASS_SUBN_DIR )\r
2116                         p_smp->status = IB_SMP_DIRECTION;\r
2117                 else\r
2118                         p_smp->status = 0;\r
2119 \r
2120                 p_vl_arb_table = (ib_vl_arb_table_t*)ib_smp_get_payload_ptr( p_smp );\r
2121 \r
2122                 // TODO: do we need lock on the cache ?????\r
2123 \r
2124                 \r
2125                 /* Copy the cached data. */\r
2126                 cl_memcpy( p_vl_arb_table,\r
2127                         &p_spl_qp_svc->cache.pkey_tbl[idx].tbl, sizeof(ib_vl_arb_table_t) );\r
2128 \r
2129                 status = complete_local_mad( p_spl_qp_svc, p_mad_wr, p_mad_resp );\r
2130         }\r
2131 \r
2132         AL_EXIT( AL_DBG_SMI );\r
2133         return status;\r
2134 }\r
2135 \r
2136 \r
2137 \r
2138 \r
2139 /*\r
2140  * Process subnet administration MADs using cached data if possible.\r
2141  */\r
2142 static ib_api_status_t\r
2143 __process_subn_mad(\r
2144         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
2145         IN                              al_mad_wr_t* const                      p_mad_wr )\r
2146 {\r
2147         ib_api_status_t         status;\r
2148         ib_smp_t                        *p_smp;\r
2149 \r
2150         AL_ENTER( AL_DBG_SMI );\r
2151 \r
2152         CL_ASSERT( p_spl_qp_svc );\r
2153         CL_ASSERT( p_mad_wr );\r
2154 \r
2155         p_smp = (ib_smp_t*)get_mad_hdr_from_wr( p_mad_wr );\r
2156 \r
2157         CL_ASSERT( p_smp->mgmt_class == IB_MCLASS_SUBN_DIR ||\r
2158                 p_smp->mgmt_class == IB_MCLASS_SUBN_LID );\r
2159 \r
2160         /* simple m-key check */\r
2161         if( p_spl_qp_svc->m_key && p_smp->m_key == p_spl_qp_svc->m_key )\r
2162         {\r
2163                 if(!p_spl_qp_svc->cache_en )\r
2164                 {\r
2165                         p_spl_qp_svc->cache_en = TRUE;\r
2166                         AL_EXIT( AL_DBG_SMI );\r
2167                         return IB_NOT_DONE;\r
2168                 }\r
2169         }\r
2170         else\r
2171         {\r
2172                 AL_PRINT(TRACE_LEVEL_WARNING, AL_DBG_SMI, ("Mkey check failed \n"));\r
2173                 AL_PRINT(TRACE_LEVEL_WARNING, AL_DBG_SMI, ("Mkey check SMP= 0x%08x:%08x  SVC = 0x%08x:%08x \n",\r
2174                                                                         ((uint32_t*)&p_smp->m_key)[0],((uint32_t*)&p_smp->m_key)[1],\r
2175                                                                         ((uint32_t*)&p_spl_qp_svc->m_key)[0],((uint32_t*)&p_spl_qp_svc->m_key)[1]));\r
2176 \r
2177                 p_spl_qp_svc->cache_en = FALSE;\r
2178                 AL_EXIT( AL_DBG_SMI );\r
2179                 return IB_NOT_DONE;\r
2180         }\r
2181 \r
2182         cl_spinlock_acquire(&p_spl_qp_svc->cache_lock);\r
2183         \r
2184         switch( p_smp->attr_id )\r
2185         {\r
2186         case IB_MAD_ATTR_NODE_INFO:\r
2187                 status = __process_node_info( p_spl_qp_svc, p_mad_wr );\r
2188                 break;\r
2189 \r
2190         case IB_MAD_ATTR_NODE_DESC:\r
2191                 status = __process_node_desc( p_spl_qp_svc, p_mad_wr );\r
2192                 break;\r
2193 \r
2194         case IB_MAD_ATTR_GUID_INFO:\r
2195                 status = __process_guid_info( p_spl_qp_svc, p_mad_wr );\r
2196                 break;\r
2197 \r
2198         case IB_MAD_ATTR_P_KEY_TABLE:\r
2199                 status = __process_pkey_table( p_spl_qp_svc, p_mad_wr );\r
2200                 break;\r
2201                 \r
2202         case IB_MAD_ATTR_SLVL_TABLE:\r
2203                 status = __process_slvl_table( p_spl_qp_svc, p_mad_wr );\r
2204                 break;\r
2205                 \r
2206         case IB_MAD_ATTR_VL_ARBITRATION:\r
2207                 status = __process_vl_arb_table( p_spl_qp_svc, p_mad_wr );\r
2208                 break;\r
2209                 \r
2210         default:\r
2211                 status = IB_NOT_DONE;\r
2212                 break;\r
2213         }\r
2214 \r
2215         cl_spinlock_release(&p_spl_qp_svc->cache_lock);\r
2216 \r
2217         AL_EXIT( AL_DBG_SMI );\r
2218         return status;\r
2219 }\r
2220 \r
2221 \r
2222 /*\r
2223  * Process a local MAD send work request.\r
2224  */\r
2225 static ib_api_status_t\r
2226 fwd_local_mad(\r
2227         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
2228         IN                              al_mad_wr_t* const                      p_mad_wr )\r
2229 {\r
2230         ib_mad_t*                               p_mad;\r
2231         ib_smp_t*                               p_smp;\r
2232         al_mad_send_t*                  p_mad_send;\r
2233         ib_mad_element_t*               p_send_mad;\r
2234         ib_mad_element_t*               p_mad_response = NULL;\r
2235         ib_mad_t*                               p_mad_response_buf;\r
2236         ib_api_status_t                 status = IB_SUCCESS;\r
2237         boolean_t                               smp_is_set;\r
2238 \r
2239         AL_ENTER( AL_DBG_SMI );\r
2240 \r
2241         CL_ASSERT( p_spl_qp_svc );\r
2242         CL_ASSERT( p_mad_wr );\r
2243 \r
2244         /* Initialize a pointers to the MAD work request and outbound MAD. */\r
2245         p_mad = get_mad_hdr_from_wr( p_mad_wr );\r
2246         p_smp = (ib_smp_t*)p_mad;\r
2247 \r
2248         smp_is_set = (p_smp->method == IB_MAD_METHOD_SET);\r
2249 \r
2250         /* Get a MAD element from the pool for the response. */\r
2251         p_mad_send = PARENT_STRUCT( p_mad_wr, al_mad_send_t, mad_wr );\r
2252         if( p_mad_send->p_send_mad->resp_expected )\r
2253         {\r
2254                 status = get_resp_mad( p_spl_qp_svc, p_mad_wr, &p_mad_response );\r
2255                 if( status != IB_SUCCESS )\r
2256                 {\r
2257                         __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr,\r
2258                                 IB_WCS_LOCAL_OP_ERR );\r
2259                         AL_EXIT( AL_DBG_SMI );\r
2260                         return status;\r
2261                 }\r
2262                 p_mad_response_buf = p_mad_response->p_mad_buf;\r
2263                 /* Copy MAD to dispatch locally in case CA doesn't handle it. */\r
2264                 *p_mad_response_buf = *p_mad;\r
2265         }\r
2266         else\r
2267         {\r
2268                         p_mad_response_buf = NULL;\r
2269         }\r
2270 \r
2271         /* Adjust directed route SMPs as required by IBA. */\r
2272         if( p_mad->mgmt_class == IB_MCLASS_SUBN_DIR )\r
2273         {\r
2274                 CL_ASSERT( !ib_smp_is_response( p_smp ) );\r
2275 \r
2276                 /*\r
2277                  * If this was a self addressed, directed route SMP, increment\r
2278                  * the hop pointer in the request before delivery as required\r
2279                  * by IBA.  Otherwise, adjustment for remote requests occurs\r
2280                  * during inbound processing.\r
2281                  */\r
2282                 if( p_smp->hop_count == 0 )\r
2283                         p_smp->hop_ptr++;\r
2284         }\r
2285 \r
2286         /* Forward the locally addressed MAD to the CA interface. */\r
2287         status = al_local_mad( p_spl_qp_svc->h_qp->obj.p_ci_ca->h_ca,\r
2288                 p_spl_qp_svc->port_num, &p_mad_wr->send_wr.dgrm.ud.h_av->av_attr, p_mad, p_mad_response_buf );\r
2289 \r
2290         /* Reset directed route SMPs as required by IBA. */\r
2291         if( p_mad->mgmt_class == IB_MCLASS_SUBN_DIR )\r
2292         {\r
2293                 /*\r
2294                  * If this was a self addressed, directed route SMP, decrement\r
2295                  * the hop pointer in the response before delivery as required\r
2296                  * by IBA.  Otherwise, adjustment for remote responses occurs\r
2297                  * during outbound processing.\r
2298                  */\r
2299                 if( p_smp->hop_count == 0 )\r
2300                 {\r
2301                         /* Adjust the request SMP. */\r
2302                         p_smp->hop_ptr--;\r
2303 \r
2304                         /* Adjust the response SMP. */\r
2305                         if( p_mad_response_buf )\r
2306                         {\r
2307                                 p_smp = (ib_smp_t*)p_mad_response_buf;\r
2308                                 p_smp->hop_ptr--;\r
2309                         }\r
2310                 }\r
2311         }\r
2312 \r
2313         if( status != IB_SUCCESS )\r
2314         {\r
2315                 if( p_mad_response )\r
2316                         ib_put_mad( p_mad_response );\r
2317 \r
2318                 __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr,\r
2319                         IB_WCS_LOCAL_OP_ERR );\r
2320                 AL_EXIT( AL_DBG_SMI );\r
2321                 return status;\r
2322         }\r
2323 \r
2324         /* Check the completion status of this simulated send. */\r
2325         if( p_mad_send->p_send_mad->resp_expected )\r
2326         {\r
2327                 /*\r
2328                  * The SMI is uses PnP polling to refresh the base_lid and lmc.\r
2329                  * Polling takes time, so we update the values here to prevent\r
2330                  * the failure of LID routed MADs sent immediately following this\r
2331                  * assignment.  Check the response to see if the port info was set.\r
2332                  */\r
2333                 if( smp_is_set )\r
2334                 {\r
2335                         ib_smp_t*               p_smp_response = NULL;\r
2336 \r
2337                         switch( p_mad_response_buf->mgmt_class )\r
2338                         {\r
2339                         case IB_MCLASS_SUBN_DIR:\r
2340                                 if( ib_smp_get_status( p_smp ) == IB_SA_MAD_STATUS_SUCCESS ) \r
2341                                 {\r
2342                                         p_smp_response = p_smp;\r
2343                                         //p_port_info =\r
2344                                         //      (ib_port_info_t*)ib_smp_get_payload_ptr( p_smp );\r
2345                                 }\r
2346                                 break;\r
2347 \r
2348                         case IB_MCLASS_SUBN_LID:\r
2349                                 if( p_mad_response_buf->status == IB_SA_MAD_STATUS_SUCCESS )\r
2350                                 {\r
2351                                         p_smp_response = (ib_smp_t*)p_mad_response_buf;\r
2352                                         //p_port_info =\r
2353                                         //      (ib_port_info_t*)ib_smp_get_payload_ptr((ib_smp_t*)p_mad_response_buf);\r
2354                                 }\r
2355                                 break;\r
2356 \r
2357                         default:\r
2358                                 break;\r
2359                         }\r
2360 \r
2361                         if( p_smp_response )\r
2362                         {\r
2363                                 switch( p_smp_response->attr_id )\r
2364                                 {\r
2365                                         case IB_MAD_ATTR_PORT_INFO:\r
2366                                                 {\r
2367                                                         ib_port_info_t          *p_port_info =\r
2368                                                                 (ib_port_info_t*)ib_smp_get_payload_ptr(p_smp_response);\r
2369                                                         p_spl_qp_svc->base_lid = p_port_info->base_lid;\r
2370                                                         p_spl_qp_svc->lmc = ib_port_info_get_lmc( p_port_info );\r
2371                                                         p_spl_qp_svc->sm_lid = p_port_info->master_sm_base_lid;\r
2372                                                         p_spl_qp_svc->sm_sl = ib_port_info_get_sm_sl( p_port_info );\r
2373 \r
2374                                                         if(p_port_info->m_key)\r
2375                                                                 p_spl_qp_svc->m_key = p_port_info->m_key;\r
2376                                                         if (p_port_info->subnet_timeout & 0x80)\r
2377                                                         {\r
2378                                                                 AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_PNP,\r
2379                                                                         ("Client reregister event, setting sm_lid to 0.\n"));\r
2380                                                                 ci_ca_lock_attr(p_spl_qp_svc->obj.p_ci_ca);\r
2381                                                                 p_spl_qp_svc->obj.p_ci_ca->p_pnp_attr->\r
2382                                                                         p_port_attr[p_port_info->local_port_num - 1].sm_lid= 0;\r
2383                                                                 ci_ca_unlock_attr(p_spl_qp_svc->obj.p_ci_ca);\r
2384                                                         }\r
2385                                                 }\r
2386                                                 break;\r
2387                                         case IB_MAD_ATTR_P_KEY_TABLE:\r
2388                                         case IB_MAD_ATTR_GUID_INFO:\r
2389                                         case IB_MAD_ATTR_SLVL_TABLE:\r
2390                                         case IB_MAD_ATTR_VL_ARBITRATION:\r
2391                                                 spl_qp_svc_update_cache( p_spl_qp_svc, p_smp_response);\r
2392                                                 break;\r
2393                                         default :\r
2394                                                 break;\r
2395                                 }\r
2396                         }\r
2397                 }\r
2398                 \r
2399 \r
2400                 /* Construct the receive MAD element. */\r
2401                 p_send_mad = p_mad_send->p_send_mad;\r
2402                 p_mad_response->status = IB_WCS_SUCCESS;\r
2403                 p_mad_response->grh_valid = p_send_mad->grh_valid;\r
2404                 if( p_mad_response->grh_valid )\r
2405                         *p_mad_response->p_grh  = *p_send_mad->p_grh;\r
2406                 p_mad_response->path_bits   = p_send_mad->path_bits;\r
2407                 p_mad_response->pkey_index  = p_send_mad->pkey_index;\r
2408                 p_mad_response->remote_lid  = p_send_mad->remote_lid;\r
2409                 p_mad_response->remote_qkey = p_send_mad->remote_qkey;\r
2410                 p_mad_response->remote_qp   = p_send_mad->remote_qp;\r
2411                 p_mad_response->remote_sl   = p_send_mad->remote_sl;\r
2412                 if( p_mad_wr->send_wr.send_opt & IB_RECV_OPT_IMMEDIATE )\r
2413                 {\r
2414                         p_mad_response->immediate_data = p_mad_wr->send_wr.immediate_data;\r
2415                         p_mad_response->recv_opt |= IB_RECV_OPT_IMMEDIATE;\r
2416                 }\r
2417 \r
2418                 /*\r
2419                  * Hand the receive MAD element to the dispatcher before completing\r
2420                  * the send.  This guarantees that the send request cannot time out.\r
2421                  */\r
2422                 status = mad_disp_recv_done( p_spl_qp_svc->h_mad_disp, p_mad_response );\r
2423                 if( status != IB_SUCCESS )\r
2424                         ib_put_mad( p_mad_response );\r
2425         }\r
2426         \r
2427         __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr,IB_WCS_SUCCESS);\r
2428 \r
2429         \r
2430         \r
2431         /* If the SMP was a Get, no need to trigger a PnP poll. */\r
2432         if( status == IB_SUCCESS && !smp_is_set )\r
2433                 status = IB_NOT_DONE;\r
2434 \r
2435         AL_EXIT( AL_DBG_SMI );\r
2436         return status;\r
2437 }\r
2438 \r
2439 \r
2440 \r
2441 /*\r
2442  * Asynchronous processing thread callback to send a local MAD.\r
2443  */\r
2444 void\r
2445 send_local_mad_cb(\r
2446         IN                              cl_async_proc_item_t*           p_item )\r
2447 {\r
2448         spl_qp_svc_t*                   p_spl_qp_svc;\r
2449         ib_api_status_t                 status;\r
2450 \r
2451         AL_ENTER( AL_DBG_SMI );\r
2452 \r
2453         CL_ASSERT( p_item );\r
2454         p_spl_qp_svc = PARENT_STRUCT( p_item, spl_qp_svc_t, send_async );\r
2455 \r
2456         /* Process a local MAD send work request. */\r
2457         CL_ASSERT( p_spl_qp_svc->local_mad_wr );\r
2458         status = fwd_local_mad( p_spl_qp_svc, p_spl_qp_svc->local_mad_wr );\r
2459 \r
2460         /*\r
2461          * If we successfully processed a local MAD, which could have changed\r
2462          * something (e.g. the LID) on the HCA.  Scan for changes.\r
2463          */\r
2464         if( status == IB_SUCCESS )\r
2465                 pnp_poll();\r
2466 \r
2467         /*\r
2468          * Clear the local MAD pointer to allow processing of other MADs.\r
2469          * This is done after polling for attribute changes to ensure that\r
2470          * subsequent MADs pick up any changes performed by this one.\r
2471          */\r
2472         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
2473         p_spl_qp_svc->local_mad_wr = NULL;\r
2474         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2475 \r
2476         /* Continue processing any queued MADs on the QP. */\r
2477         special_qp_resume_sends( p_spl_qp_svc->h_qp );\r
2478 \r
2479         /* No longer in use by the asynchronous processing thread. */\r
2480         cl_atomic_dec( &p_spl_qp_svc->in_use_cnt );\r
2481 \r
2482         AL_EXIT( AL_DBG_SMI );\r
2483 }\r
2484 \r
2485 \r
2486 \r
2487 /*\r
2488  * Special QP send completion callback.\r
2489  */\r
2490 void\r
2491 spl_qp_send_comp_cb(\r
2492         IN              const   ib_cq_handle_t                          h_cq,\r
2493         IN                              void*                                           cq_context )\r
2494 {\r
2495         spl_qp_svc_t*                   p_spl_qp_svc;\r
2496 \r
2497         AL_ENTER( AL_DBG_SMI );\r
2498 \r
2499         UNREFERENCED_PARAMETER( h_cq );\r
2500 \r
2501         CL_ASSERT( cq_context );\r
2502         p_spl_qp_svc = cq_context;\r
2503 \r
2504 #if defined( CL_USE_MUTEX )\r
2505 \r
2506         /* Queue an asynchronous processing item to process sends. */\r
2507         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
2508         if( !p_spl_qp_svc->send_async_queued )\r
2509         {\r
2510                 p_spl_qp_svc->send_async_queued = TRUE;\r
2511                 ref_al_obj( &p_spl_qp_svc->obj );\r
2512                 cl_async_proc_queue( gp_async_proc_mgr, &p_spl_qp_svc->send_async_cb );\r
2513         }\r
2514         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2515 \r
2516 #else\r
2517     cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
2518         if( p_spl_qp_svc->state != SPL_QP_ACTIVE )\r
2519         {\r
2520                 cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2521         AL_EXIT( AL_DBG_SMI );\r
2522                 return;\r
2523         }\r
2524         cl_atomic_inc( &p_spl_qp_svc->in_use_cnt );\r
2525         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2526 \r
2527     /* Queue the DPC. */\r
2528         CL_ASSERT( h_cq == p_spl_qp_svc->h_send_cq );\r
2529     KeInsertQueueDpc( &p_spl_qp_svc->send_dpc, NULL, NULL );\r
2530 #endif\r
2531 \r
2532         AL_EXIT( AL_DBG_SMI );\r
2533 }\r
2534 \r
2535 \r
2536 void\r
2537 spl_qp_send_dpc_cb(\r
2538     IN              KDPC                        *p_dpc,\r
2539     IN              void                        *context,\r
2540     IN              void                        *arg1,\r
2541     IN              void                        *arg2\r
2542     )\r
2543 {\r
2544         spl_qp_svc_t*                   p_spl_qp_svc;\r
2545 \r
2546         AL_ENTER( AL_DBG_SMI );\r
2547 \r
2548         CL_ASSERT( context );\r
2549         p_spl_qp_svc = context;\r
2550 \r
2551     UNREFERENCED_PARAMETER( p_dpc );\r
2552     UNREFERENCED_PARAMETER( arg1 );\r
2553     UNREFERENCED_PARAMETER( arg2 );\r
2554 \r
2555         spl_qp_comp( p_spl_qp_svc, p_spl_qp_svc->h_send_cq, IB_WC_SEND );\r
2556 \r
2557         /* Continue processing any queued MADs on the QP. */\r
2558         special_qp_resume_sends( p_spl_qp_svc->h_qp );\r
2559 \r
2560     cl_atomic_dec( &p_spl_qp_svc->in_use_cnt );\r
2561 \r
2562     AL_EXIT( AL_DBG_SMI );\r
2563 }\r
2564 \r
2565 \r
2566 #if defined( CL_USE_MUTEX )\r
2567 void\r
2568 spl_qp_send_async_cb(\r
2569         IN                              cl_async_proc_item_t*           p_item )\r
2570 {\r
2571         spl_qp_svc_t*                   p_spl_qp_svc;\r
2572         ib_api_status_t                 status;\r
2573 \r
2574         AL_ENTER( AL_DBG_SMI );\r
2575 \r
2576         CL_ASSERT( p_item );\r
2577         p_spl_qp_svc = PARENT_STRUCT( p_item, spl_qp_svc_t, send_async_cb );\r
2578 \r
2579         /* Reset asynchronous queue flag. */\r
2580         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
2581         p_spl_qp_svc->send_async_queued = FALSE;\r
2582         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2583 \r
2584         spl_qp_comp( p_spl_qp_svc, p_spl_qp_svc->h_send_cq, IB_WC_SEND );\r
2585 \r
2586         /* Continue processing any queued MADs on the QP. */\r
2587         status = special_qp_resume_sends( p_spl_qp_svc->h_qp );\r
2588         CL_ASSERT( status == IB_SUCCESS );\r
2589 \r
2590         deref_al_obj( &p_spl_qp_svc->obj );\r
2591 \r
2592         AL_EXIT( AL_DBG_SMI );\r
2593 }\r
2594 #endif\r
2595 \r
2596 \r
2597 \r
2598 /*\r
2599  * Special QP receive completion callback.\r
2600  */\r
2601 void\r
2602 spl_qp_recv_comp_cb(\r
2603         IN              const   ib_cq_handle_t                          h_cq,\r
2604         IN                              void*                                           cq_context )\r
2605 {\r
2606         spl_qp_svc_t*                   p_spl_qp_svc;\r
2607 \r
2608         AL_ENTER( AL_DBG_SMI );\r
2609 \r
2610         UNREFERENCED_PARAMETER( h_cq );\r
2611 \r
2612         CL_ASSERT( cq_context );\r
2613         p_spl_qp_svc = cq_context;\r
2614 \r
2615 #if defined( CL_USE_MUTEX )\r
2616 \r
2617         /* Queue an asynchronous processing item to process receives. */\r
2618         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
2619         if( !p_spl_qp_svc->recv_async_queued )\r
2620         {\r
2621                 p_spl_qp_svc->recv_async_queued = TRUE;\r
2622                 ref_al_obj( &p_spl_qp_svc->obj );\r
2623                 cl_async_proc_queue( gp_async_proc_mgr, &p_spl_qp_svc->recv_async_cb );\r
2624         }\r
2625         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2626 \r
2627 #else\r
2628     cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
2629         if( p_spl_qp_svc->state != SPL_QP_ACTIVE )\r
2630         {\r
2631                 cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2632         AL_EXIT( AL_DBG_SMI );\r
2633                 return;\r
2634         }\r
2635         cl_atomic_inc( &p_spl_qp_svc->in_use_cnt );\r
2636         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2637 \r
2638     /* Queue the DPC. */\r
2639         CL_ASSERT( h_cq == p_spl_qp_svc->h_recv_cq );\r
2640     KeInsertQueueDpc( &p_spl_qp_svc->recv_dpc, NULL, NULL );\r
2641 #endif\r
2642 \r
2643         AL_EXIT( AL_DBG_SMI );\r
2644 }\r
2645 \r
2646 \r
2647 void\r
2648 spl_qp_recv_dpc_cb(\r
2649     IN              KDPC                        *p_dpc,\r
2650     IN              void                        *context,\r
2651     IN              void                        *arg1,\r
2652     IN              void                        *arg2\r
2653     )\r
2654 {\r
2655         spl_qp_svc_t*                   p_spl_qp_svc;\r
2656 \r
2657         AL_ENTER( AL_DBG_SMI );\r
2658 \r
2659         CL_ASSERT( context );\r
2660         p_spl_qp_svc = context;\r
2661 \r
2662     UNREFERENCED_PARAMETER( p_dpc );\r
2663     UNREFERENCED_PARAMETER( arg1 );\r
2664     UNREFERENCED_PARAMETER( arg2 );\r
2665 \r
2666         spl_qp_comp( p_spl_qp_svc, p_spl_qp_svc->h_recv_cq, IB_WC_RECV );\r
2667 \r
2668     cl_atomic_dec( &p_spl_qp_svc->in_use_cnt );\r
2669 \r
2670     AL_EXIT( AL_DBG_SMI );\r
2671 }\r
2672 \r
2673 \r
2674 #if defined( CL_USE_MUTEX )\r
2675 void\r
2676 spl_qp_recv_async_cb(\r
2677         IN                              cl_async_proc_item_t*           p_item )\r
2678 {\r
2679         spl_qp_svc_t*                   p_spl_qp_svc;\r
2680 \r
2681         AL_ENTER( AL_DBG_SMI );\r
2682 \r
2683         CL_ASSERT( p_item );\r
2684         p_spl_qp_svc = PARENT_STRUCT( p_item, spl_qp_svc_t, recv_async_cb );\r
2685 \r
2686         /* Reset asynchronous queue flag. */\r
2687         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
2688         p_spl_qp_svc->recv_async_queued = FALSE;\r
2689         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2690 \r
2691         spl_qp_comp( p_spl_qp_svc, p_spl_qp_svc->h_recv_cq, IB_WC_RECV );\r
2692 \r
2693         deref_al_obj( &p_spl_qp_svc->obj );\r
2694 \r
2695         AL_EXIT( AL_DBG_SMI );\r
2696 }\r
2697 #endif\r
2698 \r
2699 \r
2700 #define SPL_QP_MAX_POLL 16\r
2701 /*\r
2702  * Special QP completion handler.\r
2703  */\r
2704 void\r
2705 spl_qp_comp(\r
2706         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
2707         IN              const   ib_cq_handle_t                          h_cq,\r
2708         IN                              ib_wc_type_t                            wc_type )\r
2709 {\r
2710         ib_wc_t                                 wc;\r
2711         ib_wc_t*                                p_free_wc = &wc;\r
2712         ib_wc_t*                                p_done_wc;\r
2713         al_mad_wr_t*                    p_mad_wr;\r
2714         al_mad_element_t*               p_al_mad;\r
2715         ib_mad_element_t*               p_mad_element;\r
2716         ib_smp_t*                               p_smp;\r
2717         ib_api_status_t                 status;\r
2718     int                     max_poll = SPL_QP_MAX_POLL;\r
2719 \r
2720         AL_ENTER( AL_DBG_SMI_CB );\r
2721 \r
2722         CL_ASSERT( p_spl_qp_svc );\r
2723         CL_ASSERT( h_cq );\r
2724 \r
2725         /* Check the QP state and guard against error handling. */\r
2726         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
2727         if( p_spl_qp_svc->state != SPL_QP_ACTIVE )\r
2728         {\r
2729                 cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2730                 return;\r
2731         }\r
2732         cl_atomic_inc( &p_spl_qp_svc->in_use_cnt );\r
2733         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2734 \r
2735         wc.p_next = NULL;\r
2736         /* Process work completions. */\r
2737         while( max_poll && ib_poll_cq( h_cq, &p_free_wc, &p_done_wc ) == IB_SUCCESS )\r
2738         {\r
2739                 /* Process completions one at a time. */\r
2740                 CL_ASSERT( p_done_wc );\r
2741 \r
2742                 /* Flushed completions are handled elsewhere. */\r
2743                 if( wc.status == IB_WCS_WR_FLUSHED_ERR )\r
2744                 {\r
2745                         p_free_wc = &wc;\r
2746                         continue;\r
2747                 }\r
2748 \r
2749                 /*\r
2750                  * Process the work completion.  Per IBA specification, the\r
2751                  * wc.wc_type is undefined if wc.status is not IB_WCS_SUCCESS.\r
2752                  * Use the wc_type parameter.\r
2753                  */\r
2754                 switch( wc_type )\r
2755                 {\r
2756                 case IB_WC_SEND:\r
2757                         /* Get a pointer to the MAD work request. */\r
2758                         p_mad_wr = (al_mad_wr_t*)((uintn_t)wc.wr_id);\r
2759 \r
2760                         /* Remove the MAD work request from the service tracking queue. */\r
2761                         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
2762                         cl_qlist_remove_item( &p_spl_qp_svc->send_queue,\r
2763                                 &p_mad_wr->list_item );\r
2764                         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2765 \r
2766                         /* Reset directed route SMPs as required by IBA. */\r
2767                         p_smp = (ib_smp_t*)get_mad_hdr_from_wr( p_mad_wr );\r
2768                         if( p_smp->mgmt_class == IB_MCLASS_SUBN_DIR )\r
2769                         {\r
2770                                 if( ib_smp_is_response( p_smp ) )\r
2771                                         p_smp->hop_ptr++;\r
2772                                 else\r
2773                                         p_smp->hop_ptr--;\r
2774                         }\r
2775 \r
2776                         /* Report the send completion to the dispatcher. */\r
2777                         mad_disp_send_done( p_spl_qp_svc->h_mad_disp, p_mad_wr, &wc );\r
2778                         break;\r
2779 \r
2780                 case IB_WC_RECV:\r
2781 \r
2782                         /* Initialize pointers to the MAD element. */\r
2783                         p_al_mad = (al_mad_element_t*)((uintn_t)wc.wr_id);\r
2784                         p_mad_element = &p_al_mad->element;\r
2785 \r
2786                         /* Remove the AL MAD element from the service tracking list. */\r
2787                         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
2788 \r
2789                         cl_qlist_remove_item( &p_spl_qp_svc->recv_queue,\r
2790                                 &p_al_mad->list_item );\r
2791 \r
2792                         /* Replenish the receive buffer. */\r
2793                         spl_qp_svc_post_recvs( p_spl_qp_svc );\r
2794                         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2795 \r
2796                         /* Construct the MAD element from the receive work completion. */\r
2797                         build_mad_recv( p_mad_element, &wc );\r
2798 \r
2799                         /* Process the received MAD. */\r
2800                         status = process_mad_recv( p_spl_qp_svc, p_mad_element );\r
2801 \r
2802                         /* Discard this MAD on error. */\r
2803                         if( status != IB_SUCCESS )\r
2804                         {\r
2805                                 status = ib_put_mad( p_mad_element );\r
2806                                 CL_ASSERT( status == IB_SUCCESS );\r
2807                         }\r
2808                         break;\r
2809 \r
2810                 default:\r
2811                         CL_ASSERT( wc_type == IB_WC_SEND || wc_type == IB_WC_RECV );\r
2812                         break;\r
2813                 }\r
2814 \r
2815                 if( wc.status != IB_WCS_SUCCESS )\r
2816                 {\r
2817                         AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
2818                                 ("special QP completion error: %s! internal syndrome 0x%I64x\n",\r
2819                                 ib_get_wc_status_str( wc.status ), wc.vendor_specific) );\r
2820 \r
2821                         /* Reset the special QP service and return. */\r
2822                         spl_qp_svc_reset( p_spl_qp_svc );\r
2823                 }\r
2824                 p_free_wc = &wc;\r
2825         --max_poll;\r
2826         }\r
2827 \r
2828     if( max_poll == 0 )\r
2829     {\r
2830         /* We already have an in_use_cnt reference - use it to queue the DPC. */\r
2831         if( wc_type == IB_WC_SEND )\r
2832             KeInsertQueueDpc( &p_spl_qp_svc->send_dpc, NULL, NULL );\r
2833         else\r
2834             KeInsertQueueDpc( &p_spl_qp_svc->recv_dpc, NULL, NULL );\r
2835     }\r
2836     else\r
2837     {\r
2838             /* Rearm the CQ. */\r
2839             status = ib_rearm_cq( h_cq, FALSE );\r
2840             CL_ASSERT( status == IB_SUCCESS );\r
2841 \r
2842             cl_atomic_dec( &p_spl_qp_svc->in_use_cnt );\r
2843     }\r
2844         AL_EXIT( AL_DBG_SMI_CB );\r
2845 }\r
2846 \r
2847 \r
2848 \r
2849 /*\r
2850  * Process a received MAD.\r
2851  */\r
2852 ib_api_status_t\r
2853 process_mad_recv(\r
2854         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
2855         IN                              ib_mad_element_t*                       p_mad_element )\r
2856 {\r
2857         ib_smp_t*                               p_smp;\r
2858         mad_route_t                             route;\r
2859         ib_api_status_t                 status;\r
2860 \r
2861         AL_ENTER( AL_DBG_SMI );\r
2862 \r
2863         CL_ASSERT( p_spl_qp_svc );\r
2864         CL_ASSERT( p_mad_element );\r
2865 \r
2866         /*\r
2867          * If the CA has a HW agent then this MAD should have been\r
2868          * consumed below verbs.  The fact that it was received here\r
2869          * indicates that it should be forwarded to the dispatcher\r
2870          * for delivery to a class manager.  Otherwise, determine how\r
2871          * the MAD should be routed.\r
2872          */\r
2873         route = ROUTE_DISPATCHER;\r
2874         if( check_local_mad( p_spl_qp_svc->h_qp ) )\r
2875         {\r
2876                 /*\r
2877                  * SMP and GMP processing is branched here to handle overlaps\r
2878                  * between class methods and attributes.\r
2879                  */\r
2880                 switch( p_mad_element->p_mad_buf->mgmt_class )\r
2881                 {\r
2882                 case IB_MCLASS_SUBN_DIR:\r
2883                         /* Perform special checks on directed route SMPs. */\r
2884                         p_smp = (ib_smp_t*)p_mad_element->p_mad_buf;\r
2885 \r
2886                         if( ( p_smp->hop_count >= IB_SUBNET_PATH_HOPS_MAX ) ||\r
2887                                 ( p_smp->hop_ptr >= IB_SUBNET_PATH_HOPS_MAX ) )\r
2888                         {\r
2889                                 route = ROUTE_DISCARD;\r
2890                         }\r
2891                         else if( ib_smp_is_response( p_smp ) )\r
2892                         {\r
2893                                 /*\r
2894                                  * This node is the destination of the response.  Discard\r
2895                                  * the source LID or hop pointer are incorrect.\r
2896                                  */\r
2897                                 if( p_smp->dr_slid == IB_LID_PERMISSIVE )\r
2898                                 {\r
2899                                         if( p_smp->hop_ptr == 1 )\r
2900                                         {\r
2901                                                 p_smp->hop_ptr--;               /* Adjust ptr per IBA spec. */\r
2902                                         }\r
2903                                         else\r
2904                                         {\r
2905                                                 route = ROUTE_DISCARD;\r
2906                                         }\r
2907                                 }\r
2908                                 else if( ( p_smp->dr_slid <  p_spl_qp_svc->base_lid ) ||\r
2909                                                  ( p_smp->dr_slid >= p_spl_qp_svc->base_lid +\r
2910                                                         ( 1 << p_spl_qp_svc->lmc ) ) )\r
2911                                 {\r
2912                                                 route = ROUTE_DISCARD;\r
2913                                 }\r
2914                         }\r
2915                         else\r
2916                         {\r
2917                                 /*\r
2918                                  * This node is the destination of the request.  Discard\r
2919                                  * the destination LID or hop pointer are incorrect.\r
2920                                  */\r
2921                                 if( p_smp->dr_dlid == IB_LID_PERMISSIVE )\r
2922                                 {\r
2923                                         if( p_smp->hop_count == p_smp->hop_ptr )\r
2924                                         {\r
2925                                                 p_smp->return_path[ p_smp->hop_ptr++ ] =\r
2926                                                         p_spl_qp_svc->port_num; /* Set path per IBA spec. */\r
2927                                         }\r
2928                                         else\r
2929                                         {\r
2930                                                 route = ROUTE_DISCARD;\r
2931                                         }\r
2932                                 }\r
2933                                 else if( ( p_smp->dr_dlid <  p_spl_qp_svc->base_lid ) ||\r
2934                                                  ( p_smp->dr_dlid >= p_spl_qp_svc->base_lid +\r
2935                                                         ( 1 << p_spl_qp_svc->lmc ) ) )\r
2936                                 {\r
2937                                         route = ROUTE_DISCARD;\r
2938                                 }\r
2939                         }\r
2940 \r
2941                         if( route == ROUTE_DISCARD ) break;\r
2942                         /* else fall through next case */\r
2943 \r
2944                 case IB_MCLASS_SUBN_LID:\r
2945                         route = route_recv_smp( p_mad_element );\r
2946                         break;\r
2947 \r
2948                 case IB_MCLASS_PERF:\r
2949                         route = route_recv_perf( p_mad_element );\r
2950                         break;\r
2951 \r
2952                 case IB_MCLASS_BM:\r
2953                         route = route_recv_bm( p_mad_element );\r
2954                         break;\r
2955 \r
2956                 case IB_MLX_VENDOR_CLASS1:\r
2957                 case IB_MLX_VENDOR_CLASS2:\r
2958                         route = route_recv_vendor( p_mad_element );\r
2959                         break;\r
2960 \r
2961                 default:\r
2962                         break;\r
2963                 }\r
2964         }\r
2965 \r
2966         /* Route the MAD. */\r
2967         if( is_discard( route ) )\r
2968                 status = IB_ERROR;\r
2969         else if( is_dispatcher( route ) )\r
2970                 status = mad_disp_recv_done( p_spl_qp_svc->h_mad_disp, p_mad_element );\r
2971         else if( is_remote( route ) )\r
2972                 status = forward_sm_trap( p_spl_qp_svc, p_mad_element );\r
2973         else\r
2974                 status = recv_local_mad( p_spl_qp_svc, p_mad_element );\r
2975 \r
2976         AL_EXIT( AL_DBG_SMI );\r
2977         return status;\r
2978 }\r
2979 \r
2980 \r
2981 \r
2982 /*\r
2983  * Route a received SMP.\r
2984  */\r
2985 static mad_route_t\r
2986 route_recv_smp(\r
2987         IN                              ib_mad_element_t*                       p_mad_element )\r
2988 {\r
2989         mad_route_t                             route;\r
2990 \r
2991         AL_ENTER( AL_DBG_SMI );\r
2992 \r
2993         CL_ASSERT( p_mad_element );\r
2994 \r
2995         /* Process the received SMP. */\r
2996         switch( p_mad_element->p_mad_buf->method )\r
2997         {\r
2998         case IB_MAD_METHOD_GET:\r
2999         case IB_MAD_METHOD_SET:\r
3000                 route = route_recv_smp_attr( p_mad_element );\r
3001                 break;\r
3002 \r
3003         case IB_MAD_METHOD_TRAP:\r
3004                 /*\r
3005                  * Special check to route locally generated traps to the remote SM.\r
3006                  * Distinguished from other receives by the p_wc->recv.ud.recv_opt\r
3007                  * IB_RECV_OPT_FORWARD flag.\r
3008                  *\r
3009                  * Note that because forwarded traps use AL MAD services, the upper\r
3010                  * 32-bits of the TID are reserved by the access layer.  When matching\r
3011                  * a Trap Repress MAD, the SMA must only use the lower 32-bits of the\r
3012                  * TID.\r
3013                  */\r
3014                 AL_PRINT(TRACE_LEVEL_INFORMATION, AL_DBG_SMI, ("Trap TID = 0x%08x:%08x \n",\r
3015                         ((uint32_t*)&p_mad_element->p_mad_buf->trans_id)[0],\r
3016                         ((uint32_t*)&p_mad_element->p_mad_buf->trans_id)[1]));\r
3017 \r
3018                 route = ( p_mad_element->recv_opt & IB_RECV_OPT_FORWARD ) ?\r
3019                         ROUTE_REMOTE : ROUTE_DISPATCHER;\r
3020                 break;\r
3021 \r
3022         case IB_MAD_METHOD_TRAP_REPRESS:\r
3023                 /*\r
3024                  * Note that because forwarded traps use AL MAD services, the upper\r
3025                  * 32-bits of the TID are reserved by the access layer.  When matching\r
3026                  * a Trap Repress MAD, the SMA must only use the lower 32-bits of the\r
3027                  * TID.\r
3028                  */\r
3029                 AL_PRINT(TRACE_LEVEL_INFORMATION, AL_DBG_SMI, ("TrapRepress TID = 0x%08x:%08x \n",\r
3030                         ((uint32_t*)&p_mad_element->p_mad_buf->trans_id)[0],\r
3031                         ((uint32_t*)&p_mad_element->p_mad_buf->trans_id)[1]));\r
3032 \r
3033                 route = ROUTE_LOCAL;\r
3034                 break;\r
3035 \r
3036         default:\r
3037                 route = ROUTE_DISPATCHER;\r
3038                 break;\r
3039         }\r
3040 \r
3041         AL_EXIT( AL_DBG_SMI );\r
3042         return route;\r
3043 }\r
3044 \r
3045 \r
3046 \r
3047 /*\r
3048  * Route received SMP attributes.\r
3049  */\r
3050 static mad_route_t\r
3051 route_recv_smp_attr(\r
3052         IN                              ib_mad_element_t*                       p_mad_element )\r
3053 {\r
3054         mad_route_t                             route;\r
3055 \r
3056         AL_ENTER( AL_DBG_SMI );\r
3057 \r
3058         CL_ASSERT( p_mad_element );\r
3059 \r
3060         /* Process the received SMP attributes. */\r
3061         switch( p_mad_element->p_mad_buf->attr_id )\r
3062         {\r
3063         case IB_MAD_ATTR_NODE_DESC:\r
3064         case IB_MAD_ATTR_NODE_INFO:\r
3065         case IB_MAD_ATTR_GUID_INFO:\r
3066         case IB_MAD_ATTR_PORT_INFO:\r
3067         case IB_MAD_ATTR_P_KEY_TABLE:\r
3068         case IB_MAD_ATTR_SLVL_TABLE:\r
3069         case IB_MAD_ATTR_VL_ARBITRATION:\r
3070         case IB_MAD_ATTR_VENDOR_DIAG:\r
3071         case IB_MAD_ATTR_LED_INFO:\r
3072         case IB_MAD_ATTR_SWITCH_INFO:\r
3073                 route = ROUTE_LOCAL;\r
3074                 break;\r
3075 \r
3076         default:\r
3077                 route = ROUTE_DISPATCHER;\r
3078                 break;\r
3079         }\r
3080 \r
3081         AL_EXIT( AL_DBG_SMI );\r
3082         return route;\r
3083 }\r
3084 \r
3085 \r
3086 static mad_route_t\r
3087 route_recv_bm(\r
3088         IN                              ib_mad_element_t*                       p_mad_element )\r
3089 {\r
3090         switch( p_mad_element->p_mad_buf->method )\r
3091         {\r
3092         case IB_MAD_METHOD_GET:\r
3093         case IB_MAD_METHOD_SET:\r
3094                 if( p_mad_element->p_mad_buf->attr_id == IB_MAD_ATTR_CLASS_PORT_INFO )\r
3095                         return ROUTE_LOCAL;\r
3096                 break;\r
3097         default:\r
3098                 break;\r
3099         }\r
3100         return ROUTE_DISPATCHER;\r
3101 }\r
3102 \r
3103 static mad_route_t\r
3104 route_recv_perf(\r
3105         IN                              ib_mad_element_t*                       p_mad_element )\r
3106 {\r
3107         switch( p_mad_element->p_mad_buf->method )\r
3108         {\r
3109         case IB_MAD_METHOD_GET:\r
3110         case IB_MAD_METHOD_SET:\r
3111                 return ROUTE_LOCAL;\r
3112         default:\r
3113                 break;\r
3114         }\r
3115         return ROUTE_DISPATCHER;\r
3116 }\r
3117 \r
3118 static mad_route_t\r
3119 route_recv_vendor(\r
3120         IN                              ib_mad_element_t*                       p_mad_element )\r
3121 {\r
3122         return ( p_mad_element->p_mad_buf->method & IB_MAD_METHOD_RESP_MASK ) ?\r
3123                 ROUTE_DISPATCHER : ROUTE_LOCAL;\r
3124 }\r
3125 \r
3126 /*\r
3127  * Forward a locally generated Subnet Management trap.\r
3128  */\r
3129 ib_api_status_t\r
3130 forward_sm_trap(\r
3131         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
3132         IN                              ib_mad_element_t*                       p_mad_element )\r
3133 {\r
3134         ib_av_attr_t                    av_attr;\r
3135         ib_api_status_t                 status;\r
3136 \r
3137         AL_ENTER( AL_DBG_SMI );\r
3138 \r
3139         CL_ASSERT( p_spl_qp_svc );\r
3140         CL_ASSERT( p_mad_element );\r
3141 \r
3142         /* Check the SMP class. */\r
3143         if( p_mad_element->p_mad_buf->mgmt_class != IB