6004e706312b9f996005ea35b0747c0e381718ca
[mirror/winof/.git] / core / al / kernel / al_smi.c
1 /*\r
2  * Copyright (c) 2005 SilverStorm Technologies.  All rights reserved.\r
3  * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. \r
4  *\r
5  * This software is available to you under the OpenIB.org BSD license\r
6  * below:\r
7  *\r
8  *     Redistribution and use in source and binary forms, with or\r
9  *     without modification, are permitted provided that the following\r
10  *     conditions are met:\r
11  *\r
12  *      - Redistributions of source code must retain the above\r
13  *        copyright notice, this list of conditions and the following\r
14  *        disclaimer.\r
15  *\r
16  *      - Redistributions in binary form must reproduce the above\r
17  *        copyright notice, this list of conditions and the following\r
18  *        disclaimer in the documentation and/or other materials\r
19  *        provided with the distribution.\r
20  *\r
21  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
22  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
23  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
24  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
25  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
26  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
27  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
28  * SOFTWARE.\r
29  *\r
30  * $Id$\r
31  */\r
32 \r
33 \r
34 #include <iba/ib_al.h>\r
35 #include <complib/cl_timer.h>\r
36 \r
37 #include "ib_common.h"\r
38 #include "al_common.h"\r
39 #include "al_debug.h"\r
40 #if defined(EVENT_TRACING)\r
41 #ifdef offsetof\r
42 #undef offsetof\r
43 #endif\r
44 #include "al_smi.tmh"\r
45 #endif\r
46 #include "al_verbs.h"\r
47 #include "al_mgr.h"\r
48 #include "al_pnp.h"\r
49 #include "al_qp.h"\r
50 #include "al_smi.h"\r
51 #include "al_av.h"\r
52 \r
53 \r
54 extern char                                             node_desc[IB_NODE_DESCRIPTION_SIZE];\r
55 \r
56 #define SMI_POLL_INTERVAL                       20000           /* Milliseconds */\r
57 #define LOCAL_MAD_TIMEOUT                       50                      /* Milliseconds */\r
58 #define DEFAULT_QP0_DEPTH                       256\r
59 #define DEFAULT_QP1_DEPTH                       1024\r
60 \r
61 uint32_t                                g_smi_poll_interval =   SMI_POLL_INTERVAL;\r
62 spl_qp_mgr_t*                   gp_spl_qp_mgr = NULL;\r
63 \r
64 \r
65 /*\r
66  * Function prototypes.\r
67  */\r
68 void\r
69 destroying_spl_qp_mgr(\r
70         IN                              al_obj_t*                                       p_obj );\r
71 \r
72 void\r
73 free_spl_qp_mgr(\r
74         IN                              al_obj_t*                                       p_obj );\r
75 \r
76 ib_api_status_t\r
77 spl_qp0_agent_pnp_cb(\r
78         IN                              ib_pnp_rec_t*                           p_pnp_rec );\r
79 \r
80 ib_api_status_t\r
81 spl_qp1_agent_pnp_cb(\r
82         IN                              ib_pnp_rec_t*                           p_pnp_rec );\r
83 \r
84 ib_api_status_t\r
85 spl_qp_agent_pnp(\r
86         IN                              ib_pnp_rec_t*                           p_pnp_rec,\r
87         IN                              ib_qp_type_t                            qp_type );\r
88 \r
89 ib_api_status_t\r
90 create_spl_qp_svc(\r
91         IN                              ib_pnp_port_rec_t*                      p_pnp_rec,\r
92         IN              const   ib_qp_type_t                            qp_type );\r
93 \r
94 void\r
95 destroying_spl_qp_svc(\r
96         IN                              al_obj_t*                                       p_obj );\r
97 \r
98 void\r
99 free_spl_qp_svc(\r
100         IN                              al_obj_t*                                       p_obj );\r
101 \r
102 void\r
103 spl_qp_svc_lid_change(\r
104         IN                              al_obj_t*                                       p_obj,\r
105         IN                              ib_pnp_port_rec_t*                      p_pnp_rec );\r
106 \r
107 ib_api_status_t\r
108 remote_mad_send(\r
109         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
110         IN                              al_mad_wr_t* const                      p_mad_wr );\r
111 \r
112 static ib_api_status_t\r
113 local_mad_send(\r
114         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
115         IN                              al_mad_wr_t* const                      p_mad_wr );\r
116 \r
117 static ib_api_status_t\r
118 loopback_mad(\r
119         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
120         IN                              al_mad_wr_t* const                      p_mad_wr );\r
121 \r
122 static ib_api_status_t\r
123 process_subn_mad(\r
124         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
125         IN                              al_mad_wr_t* const                      p_mad_wr );\r
126 \r
127 static ib_api_status_t\r
128 fwd_local_mad(\r
129         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
130         IN                              al_mad_wr_t* const                      p_mad_wr );\r
131 \r
132 void\r
133 send_local_mad_cb(\r
134         IN                              cl_async_proc_item_t*           p_item );\r
135 \r
136 void\r
137 spl_qp_send_comp_cb(\r
138         IN              const   ib_cq_handle_t                          h_cq,\r
139         IN                              void                                            *cq_context );\r
140 \r
141 void\r
142 spl_qp_recv_comp_cb(\r
143         IN              const   ib_cq_handle_t                          h_cq,\r
144         IN                              void                                            *cq_context );\r
145 \r
146 void\r
147 spl_qp_comp(\r
148         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
149         IN              const   ib_cq_handle_t                          h_cq,\r
150         IN                              ib_wc_type_t                            wc_type );\r
151 \r
152 ib_api_status_t\r
153 process_mad_recv(\r
154         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
155         IN                              ib_mad_element_t*                       p_mad_element );\r
156 \r
157 mad_route_t\r
158 route_recv_smp(\r
159         IN                              ib_mad_element_t*                       p_mad_element );\r
160 \r
161 mad_route_t\r
162 route_recv_smp_attr(\r
163         IN                              ib_mad_element_t*                       p_mad_element );\r
164 \r
165 mad_route_t\r
166 route_recv_dm_mad(\r
167         IN                              ib_mad_element_t*                       p_mad_element );\r
168 \r
169 mad_route_t\r
170 route_recv_gmp(\r
171         IN                              ib_mad_element_t*                       p_mad_element );\r
172 \r
173 mad_route_t\r
174 route_recv_gmp_attr(\r
175         IN                              ib_mad_element_t*                       p_mad_element );\r
176 \r
177 ib_api_status_t\r
178 forward_sm_trap(\r
179         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
180         IN                              ib_mad_element_t*                       p_mad_element );\r
181 \r
182 ib_api_status_t\r
183 recv_local_mad(\r
184         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
185         IN                              ib_mad_element_t*                       p_mad_request );\r
186 \r
187 void\r
188 spl_qp_alias_send_cb(\r
189         IN                              ib_mad_svc_handle_t                     h_mad_svc,\r
190         IN                              void                                            *mad_svc_context,\r
191         IN                              ib_mad_element_t                        *p_mad_element );\r
192 \r
193 void\r
194 spl_qp_alias_recv_cb(\r
195         IN                              ib_mad_svc_handle_t                     h_mad_svc,\r
196         IN                              void                                            *mad_svc_context,\r
197         IN                              ib_mad_element_t                        *p_mad_response );\r
198 \r
199 static ib_api_status_t\r
200 spl_qp_svc_post_recvs(\r
201         IN                              spl_qp_svc_t*   const           p_spl_qp_svc );\r
202 \r
203 void\r
204 spl_qp_svc_event_cb(\r
205         IN                              ib_async_event_rec_t            *p_event_rec );\r
206 \r
207 void\r
208 spl_qp_alias_event_cb(\r
209         IN                              ib_async_event_rec_t            *p_event_rec );\r
210 \r
211 void\r
212 spl_qp_svc_reset(\r
213         IN                              spl_qp_svc_t*                           p_spl_qp_svc );\r
214 \r
215 void\r
216 spl_qp_svc_reset_cb(\r
217         IN                              cl_async_proc_item_t*           p_item );\r
218 \r
219 ib_api_status_t\r
220 acquire_svc_disp(\r
221         IN              const   cl_qmap_t* const                        p_svc_map,\r
222         IN              const   ib_net64_t                                      port_guid,\r
223                 OUT                     al_mad_disp_handle_t            *ph_mad_disp );\r
224 \r
225 void\r
226 smi_poll_timer_cb(\r
227         IN                              void*                                           context );\r
228 \r
229 void\r
230 smi_post_recvs(\r
231         IN                              cl_list_item_t* const           p_list_item,\r
232         IN                              void*                                           context );\r
233 \r
234 #if defined( CL_USE_MUTEX )\r
235 void\r
236 spl_qp_send_async_cb(\r
237         IN                              cl_async_proc_item_t*           p_item );\r
238 \r
239 void\r
240 spl_qp_recv_async_cb(\r
241         IN                              cl_async_proc_item_t*           p_item );\r
242 #endif\r
243 \r
244 /*\r
245  * Create the special QP manager.\r
246  */\r
247 ib_api_status_t\r
248 create_spl_qp_mgr(\r
249         IN                              al_obj_t*       const                   p_parent_obj )\r
250 {\r
251         ib_pnp_req_t                    pnp_req;\r
252         ib_api_status_t                 status;\r
253         cl_status_t                             cl_status;\r
254 \r
255         AL_ENTER( AL_DBG_SMI );\r
256 \r
257         CL_ASSERT( p_parent_obj );\r
258         CL_ASSERT( !gp_spl_qp_mgr );\r
259 \r
260         gp_spl_qp_mgr = cl_zalloc( sizeof( spl_qp_mgr_t ) );\r
261         if( !gp_spl_qp_mgr )\r
262         {\r
263                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
264                         ("IB_INSUFFICIENT_MEMORY\n") );\r
265                 return IB_INSUFFICIENT_MEMORY;\r
266         }\r
267 \r
268         /* Construct the special QP manager. */\r
269         construct_al_obj( &gp_spl_qp_mgr->obj, AL_OBJ_TYPE_SMI );\r
270         cl_timer_construct( &gp_spl_qp_mgr->poll_timer );\r
271 \r
272         /* Initialize the lists. */\r
273         cl_qmap_init( &gp_spl_qp_mgr->smi_map );\r
274         cl_qmap_init( &gp_spl_qp_mgr->gsi_map );\r
275 \r
276         /* Initialize the global SMI/GSI manager object. */\r
277         status = init_al_obj( &gp_spl_qp_mgr->obj, gp_spl_qp_mgr, TRUE,\r
278                 destroying_spl_qp_mgr, NULL, free_spl_qp_mgr );\r
279         if( status != IB_SUCCESS )\r
280         {\r
281                 free_spl_qp_mgr( &gp_spl_qp_mgr->obj );\r
282                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
283                         ("init_al_obj failed, %s\n", ib_get_err_str( status ) ) );\r
284                 return status;\r
285         }\r
286 \r
287         /* Attach the special QP manager to the parent object. */\r
288         status = attach_al_obj( p_parent_obj, &gp_spl_qp_mgr->obj );\r
289         if( status != IB_SUCCESS )\r
290         {\r
291                 gp_spl_qp_mgr->obj.pfn_destroy( &gp_spl_qp_mgr->obj, NULL );\r
292                 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
293                         ("attach_al_obj returned %s.\n", ib_get_err_str(status)) );\r
294                 return status;\r
295         }\r
296 \r
297         /* Initialize the SMI polling timer. */\r
298         cl_status = cl_timer_init( &gp_spl_qp_mgr->poll_timer, smi_poll_timer_cb,\r
299                 gp_spl_qp_mgr );\r
300         if( cl_status != CL_SUCCESS )\r
301         {\r
302                 gp_spl_qp_mgr->obj.pfn_destroy( &gp_spl_qp_mgr->obj, NULL );\r
303                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
304                         ("cl_timer_init failed, status 0x%x\n", cl_status ) );\r
305                 return ib_convert_cl_status( cl_status );\r
306         }\r
307 \r
308         /*\r
309          * Note: PnP registrations for port events must be done\r
310          * when the special QP manager is created.  This ensures that\r
311          * the registrations are listed sequentially and the reporting\r
312          * of PnP events occurs in the proper order.\r
313          */\r
314 \r
315         /*\r
316          * Separate context is needed for each special QP.  Therefore, a\r
317          * separate PnP event registration is performed for QP0 and QP1.\r
318          */\r
319 \r
320         /* Register for port PnP events for QP0. */\r
321         cl_memclr( &pnp_req, sizeof( ib_pnp_req_t ) );\r
322         pnp_req.pnp_class       = IB_PNP_PORT;\r
323         pnp_req.pnp_context = &gp_spl_qp_mgr->obj;\r
324         pnp_req.pfn_pnp_cb      = spl_qp0_agent_pnp_cb;\r
325 \r
326         status = ib_reg_pnp( gh_al, &pnp_req, &gp_spl_qp_mgr->h_qp0_pnp );\r
327 \r
328         if( status != IB_SUCCESS )\r
329         {\r
330                 gp_spl_qp_mgr->obj.pfn_destroy( &gp_spl_qp_mgr->obj, NULL );\r
331                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
332                         ("ib_reg_pnp QP0 failed, %s\n", ib_get_err_str( status ) ) );\r
333                 return status;\r
334         }\r
335 \r
336         /* Reference the special QP manager on behalf of the ib_reg_pnp call. */\r
337         ref_al_obj( &gp_spl_qp_mgr->obj );\r
338 \r
339         /* Register for port PnP events for QP1. */\r
340         cl_memclr( &pnp_req, sizeof( ib_pnp_req_t ) );\r
341         pnp_req.pnp_class       = IB_PNP_PORT;\r
342         pnp_req.pnp_context = &gp_spl_qp_mgr->obj;\r
343         pnp_req.pfn_pnp_cb      = spl_qp1_agent_pnp_cb;\r
344 \r
345         status = ib_reg_pnp( gh_al, &pnp_req, &gp_spl_qp_mgr->h_qp1_pnp );\r
346 \r
347         if( status != IB_SUCCESS )\r
348         {\r
349                 gp_spl_qp_mgr->obj.pfn_destroy( &gp_spl_qp_mgr->obj, NULL );\r
350                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
351                         ("ib_reg_pnp QP1 failed, %s\n", ib_get_err_str( status ) ) );\r
352                 return status;\r
353         }\r
354 \r
355         /*\r
356          * Note that we don't release the referende taken in init_al_obj\r
357          * because we need one on behalf of the ib_reg_pnp call.\r
358          */\r
359 \r
360         AL_EXIT( AL_DBG_SMI );\r
361         return IB_SUCCESS;\r
362 }\r
363 \r
364 \r
365 \r
366 /*\r
367  * Pre-destroy the special QP manager.\r
368  */\r
369 void\r
370 destroying_spl_qp_mgr(\r
371         IN                              al_obj_t*                                       p_obj )\r
372 {\r
373         ib_api_status_t                 status;\r
374 \r
375         CL_ASSERT( p_obj );\r
376         CL_ASSERT( gp_spl_qp_mgr == PARENT_STRUCT( p_obj, spl_qp_mgr_t, obj ) );\r
377         UNUSED_PARAM( p_obj );\r
378 \r
379         /* Deregister for port PnP events for QP0. */\r
380         if( gp_spl_qp_mgr->h_qp0_pnp )\r
381         {\r
382                 status = ib_dereg_pnp( gp_spl_qp_mgr->h_qp0_pnp,\r
383                         (ib_pfn_destroy_cb_t)deref_al_obj );\r
384                 CL_ASSERT( status == IB_SUCCESS );\r
385         }\r
386 \r
387         /* Deregister for port PnP events for QP1. */\r
388         if( gp_spl_qp_mgr->h_qp1_pnp )\r
389         {\r
390                 status = ib_dereg_pnp( gp_spl_qp_mgr->h_qp1_pnp,\r
391                         (ib_pfn_destroy_cb_t)deref_al_obj );\r
392                 CL_ASSERT( status == IB_SUCCESS );\r
393         }\r
394 \r
395         /* Destroy the SMI polling timer. */\r
396         cl_timer_destroy( &gp_spl_qp_mgr->poll_timer );\r
397 }\r
398 \r
399 \r
400 \r
401 /*\r
402  * Free the special QP manager.\r
403  */\r
404 void\r
405 free_spl_qp_mgr(\r
406         IN                              al_obj_t*                                       p_obj )\r
407 {\r
408         CL_ASSERT( p_obj );\r
409         CL_ASSERT( gp_spl_qp_mgr == PARENT_STRUCT( p_obj, spl_qp_mgr_t, obj ) );\r
410         UNUSED_PARAM( p_obj );\r
411 \r
412         destroy_al_obj( &gp_spl_qp_mgr->obj );\r
413         cl_free( gp_spl_qp_mgr );\r
414         gp_spl_qp_mgr = NULL;\r
415 }\r
416 \r
417 \r
418 \r
419 /*\r
420  * Special QP0 agent PnP event callback.\r
421  */\r
422 ib_api_status_t\r
423 spl_qp0_agent_pnp_cb(\r
424         IN                              ib_pnp_rec_t*                           p_pnp_rec )\r
425 {\r
426         ib_api_status_t status;\r
427         AL_ENTER( AL_DBG_SMI_CB );\r
428 \r
429         status = spl_qp_agent_pnp( p_pnp_rec, IB_QPT_QP0 );\r
430 \r
431         AL_EXIT( AL_DBG_SMI_CB );\r
432         return status;\r
433 }\r
434 \r
435 \r
436 \r
437 /*\r
438  * Special QP1 agent PnP event callback.\r
439  */\r
440 ib_api_status_t\r
441 spl_qp1_agent_pnp_cb(\r
442         IN                              ib_pnp_rec_t*                           p_pnp_rec )\r
443 {\r
444         ib_api_status_t status;\r
445         AL_ENTER( AL_DBG_SMI_CB );\r
446 \r
447         status = spl_qp_agent_pnp( p_pnp_rec, IB_QPT_QP1 );\r
448 \r
449         AL_EXIT( AL_DBG_SMI );\r
450         return status;\r
451 }\r
452 \r
453 \r
454 \r
455 /*\r
456  * Special QP agent PnP event callback.\r
457  */\r
458 ib_api_status_t\r
459 spl_qp_agent_pnp(\r
460         IN                              ib_pnp_rec_t*                           p_pnp_rec,\r
461         IN                              ib_qp_type_t                            qp_type )\r
462 {\r
463         ib_api_status_t                 status;\r
464         al_obj_t*                               p_obj;\r
465 \r
466         AL_ENTER( AL_DBG_SMI_CB );\r
467 \r
468         CL_ASSERT( p_pnp_rec );\r
469         p_obj = p_pnp_rec->context;\r
470 \r
471         /* Dispatch based on the PnP event type. */\r
472         switch( p_pnp_rec->pnp_event )\r
473         {\r
474         case IB_PNP_PORT_ADD:\r
475                 CL_ASSERT( !p_obj );\r
476                 status = create_spl_qp_svc( (ib_pnp_port_rec_t*)p_pnp_rec, qp_type );\r
477                 break;\r
478 \r
479         case IB_PNP_PORT_REMOVE:\r
480                 CL_ASSERT( p_obj );\r
481                 ref_al_obj( p_obj );\r
482                 p_obj->pfn_destroy( p_obj, NULL );\r
483                 status = IB_SUCCESS;\r
484                 break;\r
485 \r
486         case IB_PNP_LID_CHANGE:\r
487                 CL_ASSERT( p_obj );\r
488                 spl_qp_svc_lid_change( p_obj, (ib_pnp_port_rec_t*)p_pnp_rec );\r
489                 status = IB_SUCCESS;\r
490                 break;\r
491 \r
492         default:\r
493                 /* All other events are ignored. */\r
494                 status = IB_SUCCESS;\r
495                 break;\r
496         }\r
497 \r
498         AL_EXIT( AL_DBG_SMI );\r
499         return status;\r
500 }\r
501 \r
502 \r
503 \r
504 /*\r
505  * Create a special QP service.\r
506  */\r
507 ib_api_status_t\r
508 create_spl_qp_svc(\r
509         IN                              ib_pnp_port_rec_t*                      p_pnp_rec,\r
510         IN              const   ib_qp_type_t                            qp_type )\r
511 {\r
512         cl_status_t                             cl_status;\r
513         spl_qp_svc_t*                   p_spl_qp_svc;\r
514         ib_ca_handle_t                  h_ca;\r
515         ib_cq_create_t                  cq_create;\r
516         ib_qp_create_t                  qp_create;\r
517         ib_qp_attr_t                    qp_attr;\r
518         ib_mad_svc_t                    mad_svc;\r
519         ib_api_status_t                 status;\r
520 \r
521         AL_ENTER( AL_DBG_SMI );\r
522 \r
523         CL_ASSERT( p_pnp_rec );\r
524 \r
525         if( ( qp_type != IB_QPT_QP0 ) && ( qp_type != IB_QPT_QP1 ) )\r
526         {\r
527                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") );\r
528                 return IB_INVALID_PARAMETER;\r
529         }\r
530 \r
531         CL_ASSERT( p_pnp_rec->pnp_rec.pnp_context );\r
532         CL_ASSERT( p_pnp_rec->p_ca_attr );\r
533         CL_ASSERT( p_pnp_rec->p_port_attr );\r
534 \r
535         p_spl_qp_svc = cl_zalloc( sizeof( spl_qp_svc_t ) );\r
536         if( !p_spl_qp_svc )\r
537         {\r
538                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
539                         ("IB_INSUFFICIENT_MEMORY\n") );\r
540                 return IB_INSUFFICIENT_MEMORY;\r
541         }\r
542 \r
543         /* Tie the special QP service to the port by setting the port number. */\r
544         p_spl_qp_svc->port_num = p_pnp_rec->p_port_attr->port_num;\r
545         /* Store the port GUID to allow faster lookups of the dispatchers. */\r
546         p_spl_qp_svc->port_guid = p_pnp_rec->p_port_attr->port_guid;\r
547 \r
548         /* Initialize the send and receive queues. */\r
549         cl_qlist_init( &p_spl_qp_svc->send_queue );\r
550         cl_qlist_init( &p_spl_qp_svc->recv_queue );\r
551 \r
552 #if defined( CL_USE_MUTEX )\r
553         /* Initialize async callbacks and flags for send/receive processing. */\r
554         p_spl_qp_svc->send_async_queued = FALSE;\r
555         p_spl_qp_svc->send_async_cb.pfn_callback = spl_qp_send_async_cb;\r
556         p_spl_qp_svc->recv_async_queued = FALSE;\r
557         p_spl_qp_svc->recv_async_cb.pfn_callback = spl_qp_recv_async_cb;\r
558 #endif\r
559 \r
560         /* Initialize the async callback function to process local sends. */\r
561         p_spl_qp_svc->send_async.pfn_callback = send_local_mad_cb;\r
562 \r
563         /* Initialize the async callback function to reset the QP on error. */\r
564         p_spl_qp_svc->reset_async.pfn_callback = spl_qp_svc_reset_cb;\r
565 \r
566         /* Construct the special QP service object. */\r
567         construct_al_obj( &p_spl_qp_svc->obj, AL_OBJ_TYPE_SMI );\r
568 \r
569         /* Initialize the special QP service object. */\r
570         status = init_al_obj( &p_spl_qp_svc->obj, p_spl_qp_svc, TRUE,\r
571                 destroying_spl_qp_svc, NULL, free_spl_qp_svc );\r
572         if( status != IB_SUCCESS )\r
573         {\r
574                 free_spl_qp_svc( &p_spl_qp_svc->obj );\r
575                 return status;\r
576         }\r
577 \r
578         /* Attach the special QP service to the parent object. */\r
579         status = attach_al_obj(\r
580                 (al_obj_t* __ptr64)p_pnp_rec->pnp_rec.pnp_context, &p_spl_qp_svc->obj );\r
581         if( status != IB_SUCCESS )\r
582         {\r
583                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
584                 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
585                         ("attach_al_obj returned %s.\n", ib_get_err_str(status)) );\r
586                 return status;\r
587         }\r
588 \r
589         h_ca = acquire_ca( p_pnp_rec->p_ca_attr->ca_guid );\r
590         CL_ASSERT( h_ca );\r
591         if( !h_ca )\r
592         {\r
593                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
594                 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("acquire_ca failed.\n") );\r
595                 return IB_INVALID_GUID;\r
596         }\r
597 \r
598         p_spl_qp_svc->obj.p_ci_ca = h_ca->obj.p_ci_ca;\r
599 \r
600         /* Determine the maximum queue depth of the QP and CQs. */\r
601         p_spl_qp_svc->max_qp_depth =\r
602                 ( p_pnp_rec->p_ca_attr->max_wrs <\r
603                 p_pnp_rec->p_ca_attr->max_cqes ) ?\r
604                 p_pnp_rec->p_ca_attr->max_wrs :\r
605                 p_pnp_rec->p_ca_attr->max_cqes;\r
606 \r
607         /* Compare this maximum to the default special queue depth. */\r
608         if( ( qp_type == IB_QPT_QP0 ) &&\r
609                 ( p_spl_qp_svc->max_qp_depth > DEFAULT_QP0_DEPTH ) )\r
610                   p_spl_qp_svc->max_qp_depth = DEFAULT_QP0_DEPTH;\r
611         if( ( qp_type == IB_QPT_QP1 ) &&\r
612                 ( p_spl_qp_svc->max_qp_depth > DEFAULT_QP1_DEPTH ) )\r
613                   p_spl_qp_svc->max_qp_depth = DEFAULT_QP1_DEPTH;\r
614 \r
615         /* Create the send CQ. */\r
616         cl_memclr( &cq_create, sizeof( ib_cq_create_t ) );\r
617         cq_create.size = p_spl_qp_svc->max_qp_depth;\r
618         cq_create.pfn_comp_cb = spl_qp_send_comp_cb;\r
619 \r
620         status = ib_create_cq( p_spl_qp_svc->obj.p_ci_ca->h_ca, &cq_create,\r
621                 p_spl_qp_svc, spl_qp_svc_event_cb, &p_spl_qp_svc->h_send_cq );\r
622 \r
623         if( status != IB_SUCCESS )\r
624         {\r
625                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
626                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
627                         ("ib_create_cq send CQ failed, %s\n", ib_get_err_str( status ) ) );\r
628                 return status;\r
629         }\r
630 \r
631         /* Reference the special QP service on behalf of ib_create_cq. */\r
632         ref_al_obj( &p_spl_qp_svc->obj );\r
633 \r
634         /* Check the result of the creation request. */\r
635         if( cq_create.size < p_spl_qp_svc->max_qp_depth )\r
636         {\r
637                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
638                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
639                         ("ib_create_cq allocated insufficient send CQ size\n") );\r
640                 return IB_INSUFFICIENT_RESOURCES;\r
641         }\r
642 \r
643         /* Create the receive CQ. */\r
644         cl_memclr( &cq_create, sizeof( ib_cq_create_t ) );\r
645         cq_create.size = p_spl_qp_svc->max_qp_depth;\r
646         cq_create.pfn_comp_cb = spl_qp_recv_comp_cb;\r
647 \r
648         status = ib_create_cq( p_spl_qp_svc->obj.p_ci_ca->h_ca, &cq_create,\r
649                 p_spl_qp_svc, spl_qp_svc_event_cb, &p_spl_qp_svc->h_recv_cq );\r
650 \r
651         if( status != IB_SUCCESS )\r
652         {\r
653                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
654                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
655                         ("ib_create_cq recv CQ failed, %s\n", ib_get_err_str( status ) ) );\r
656                 return status;\r
657         }\r
658 \r
659         /* Reference the special QP service on behalf of ib_create_cq. */\r
660         ref_al_obj( &p_spl_qp_svc->obj );\r
661 \r
662         /* Check the result of the creation request. */\r
663         if( cq_create.size < p_spl_qp_svc->max_qp_depth )\r
664         {\r
665                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
666                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
667                         ("ib_create_cq allocated insufficient recv CQ size\n") );\r
668                 return IB_INSUFFICIENT_RESOURCES;\r
669         }\r
670 \r
671         /* Create the special QP. */\r
672         cl_memclr( &qp_create, sizeof( ib_qp_create_t ) );\r
673         qp_create.qp_type = qp_type;\r
674         qp_create.sq_depth = p_spl_qp_svc->max_qp_depth;\r
675         qp_create.rq_depth = p_spl_qp_svc->max_qp_depth;\r
676         qp_create.sq_sge = 3;   /* Three entries are required for segmentation. */\r
677         qp_create.rq_sge = 1;\r
678         qp_create.h_sq_cq = p_spl_qp_svc->h_send_cq;\r
679         qp_create.h_rq_cq = p_spl_qp_svc->h_recv_cq;\r
680         qp_create.sq_signaled = TRUE;\r
681 \r
682         status = ib_get_spl_qp( p_spl_qp_svc->obj.p_ci_ca->h_pd,\r
683                 p_pnp_rec->p_port_attr->port_guid, &qp_create,\r
684                 p_spl_qp_svc, spl_qp_svc_event_cb, NULL, &p_spl_qp_svc->h_qp );\r
685 \r
686         if( status != IB_SUCCESS )\r
687         {\r
688                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
689                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
690                         ("ib_get_spl_qp failed, %s\n", ib_get_err_str( status ) ) );\r
691                 return status;\r
692         }\r
693 \r
694         /* Reference the special QP service on behalf of ib_get_spl_qp. */\r
695         ref_al_obj( &p_spl_qp_svc->obj );\r
696 \r
697         /* Check the result of the creation request. */\r
698         status = ib_query_qp( p_spl_qp_svc->h_qp, &qp_attr );\r
699         if( status != IB_SUCCESS )\r
700         {\r
701                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
702                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
703                         ("ib_query_qp failed, %s\n", ib_get_err_str( status ) ) );\r
704                 return status;\r
705         }\r
706 \r
707         if( ( qp_attr.rq_depth < p_spl_qp_svc->max_qp_depth ) ||\r
708                 ( qp_attr.sq_depth < p_spl_qp_svc->max_qp_depth ) ||\r
709                 ( qp_attr.sq_sge < 3 ) || ( qp_attr.rq_sge < 1 ) )\r
710         {\r
711                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
712                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
713                         ("ib_get_spl_qp allocated attributes are insufficient\n") );\r
714                 return IB_INSUFFICIENT_RESOURCES;\r
715         }\r
716 \r
717         /* Initialize the QP for use. */\r
718         status = ib_init_dgrm_svc( p_spl_qp_svc->h_qp, NULL );\r
719         if( status != IB_SUCCESS )\r
720         {\r
721                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
722                 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
723                         ("ib_init_dgrm_svc failed, %s\n", ib_get_err_str( status ) ) );\r
724                 return status;\r
725         }\r
726 \r
727         /* Post receive buffers. */\r
728         status = spl_qp_svc_post_recvs( p_spl_qp_svc );\r
729         if( status != IB_SUCCESS )\r
730         {\r
731                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
732                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
733                         ("spl_qp_svc_post_recvs failed, %s\n",\r
734                         ib_get_err_str( status ) ) );\r
735                 return status;\r
736         }\r
737 \r
738         /* Create the MAD dispatcher. */\r
739         status = create_mad_disp( &p_spl_qp_svc->obj, p_spl_qp_svc->h_qp,\r
740                 &p_spl_qp_svc->h_mad_disp );\r
741         if( status != IB_SUCCESS )\r
742         {\r
743                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
744                 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
745                         ("create_mad_disp failed, %s\n", ib_get_err_str( status ) ) );\r
746                 return status;\r
747         }\r
748 \r
749         /*\r
750          * Add this service to the special QP manager lookup lists.\r
751          * The service must be added to allow the creation of a QP alias.\r
752          */\r
753         cl_spinlock_acquire( &gp_spl_qp_mgr->obj.lock );\r
754         if( qp_type == IB_QPT_QP0 )\r
755         {\r
756                 cl_qmap_insert( &gp_spl_qp_mgr->smi_map, p_spl_qp_svc->port_guid,\r
757                         &p_spl_qp_svc->map_item );\r
758         }\r
759         else\r
760         {\r
761                 cl_qmap_insert( &gp_spl_qp_mgr->gsi_map, p_spl_qp_svc->port_guid,\r
762                         &p_spl_qp_svc->map_item );\r
763         }\r
764         cl_spinlock_release( &gp_spl_qp_mgr->obj.lock );\r
765 \r
766         /*\r
767          * If the CA does not support HW agents, create a QP alias and register\r
768          * a MAD service for sending responses from the local MAD interface.\r
769          */\r
770         if( check_local_mad( p_spl_qp_svc->h_qp ) )\r
771         {\r
772                 /* Create a QP alias. */\r
773                 cl_memclr( &qp_create, sizeof( ib_qp_create_t ) );\r
774                 qp_create.qp_type =\r
775                         ( qp_type == IB_QPT_QP0 ) ? IB_QPT_QP0_ALIAS : IB_QPT_QP1_ALIAS;\r
776                 qp_create.sq_depth              = p_spl_qp_svc->max_qp_depth;\r
777                 qp_create.sq_sge                = 1;\r
778                 qp_create.sq_signaled   = TRUE;\r
779 \r
780                 status = ib_get_spl_qp( p_spl_qp_svc->obj.p_ci_ca->h_pd_alias,\r
781                         p_pnp_rec->p_port_attr->port_guid, &qp_create,\r
782                         p_spl_qp_svc, spl_qp_alias_event_cb, &p_spl_qp_svc->pool_key,\r
783                         &p_spl_qp_svc->h_qp_alias );\r
784 \r
785                 if (status != IB_SUCCESS)\r
786                 {\r
787                         p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
788                         AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
789                                 ("ib_get_spl_qp alias failed, %s\n",\r
790                                 ib_get_err_str( status ) ) );\r
791                         return status;\r
792                 }\r
793 \r
794                 /* Reference the special QP service on behalf of ib_get_spl_qp. */\r
795                 ref_al_obj( &p_spl_qp_svc->obj );\r
796 \r
797                 /* Register a MAD service for sends. */\r
798                 cl_memclr( &mad_svc, sizeof( ib_mad_svc_t ) );\r
799                 mad_svc.mad_svc_context = p_spl_qp_svc;\r
800                 mad_svc.pfn_mad_send_cb = spl_qp_alias_send_cb;\r
801                 mad_svc.pfn_mad_recv_cb = spl_qp_alias_recv_cb;\r
802 \r
803                 status = ib_reg_mad_svc( p_spl_qp_svc->h_qp_alias, &mad_svc,\r
804                         &p_spl_qp_svc->h_mad_svc );\r
805 \r
806                 if( status != IB_SUCCESS )\r
807                 {\r
808                         p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
809                         AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
810                                 ("ib_reg_mad_svc failed, %s\n", ib_get_err_str( status ) ) );\r
811                         return status;\r
812                 }\r
813         }\r
814 \r
815         /* Set the context of the PnP event to this child object. */\r
816         p_pnp_rec->pnp_rec.context = &p_spl_qp_svc->obj;\r
817 \r
818         /* The QP is ready.  Change the state. */\r
819         p_spl_qp_svc->state = SPL_QP_ACTIVE;\r
820 \r
821         /* Force a completion callback to rearm the CQs. */\r
822         spl_qp_recv_comp_cb( p_spl_qp_svc->h_recv_cq, p_spl_qp_svc );\r
823         spl_qp_send_comp_cb( p_spl_qp_svc->h_send_cq, p_spl_qp_svc );\r
824 \r
825         /* Start the polling thread timer. */\r
826         if( g_smi_poll_interval )\r
827         {\r
828                 cl_status =\r
829                         cl_timer_trim( &gp_spl_qp_mgr->poll_timer, g_smi_poll_interval );\r
830 \r
831                 if( cl_status != CL_SUCCESS )\r
832                 {\r
833                         p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
834                         AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
835                                 ("cl_timer_start failed, status 0x%x\n", cl_status ) );\r
836                         return ib_convert_cl_status( cl_status );\r
837                 }\r
838         }\r
839 \r
840         /* Release the reference taken in init_al_obj. */\r
841         deref_al_obj( &p_spl_qp_svc->obj );\r
842 \r
843         AL_EXIT( AL_DBG_SMI );\r
844         return IB_SUCCESS;\r
845 }\r
846 \r
847 \r
848 \r
849 /*\r
850  * Return a work completion to the MAD dispatcher for the specified MAD.\r
851  */\r
852 static void\r
853 __complete_send_mad(\r
854         IN              const   al_mad_disp_handle_t            h_mad_disp,\r
855         IN                              al_mad_wr_t* const                      p_mad_wr,\r
856         IN              const   ib_wc_status_t                          wc_status )\r
857 {\r
858         ib_wc_t                 wc;\r
859 \r
860         /* Construct a send work completion. */\r
861         cl_memclr( &wc, sizeof( ib_wc_t ) );\r
862         wc.wr_id        = p_mad_wr->send_wr.wr_id;\r
863         wc.wc_type      = IB_WC_SEND;\r
864         wc.status       = wc_status;\r
865 \r
866         /* Set the send size if we were successful with the send. */\r
867         if( wc_status == IB_WCS_SUCCESS )\r
868                 wc.length = MAD_BLOCK_SIZE;\r
869 \r
870         mad_disp_send_done( h_mad_disp, p_mad_wr, &wc );\r
871 }\r
872 \r
873 \r
874 \r
875 /*\r
876  * Pre-destroy a special QP service.\r
877  */\r
878 void\r
879 destroying_spl_qp_svc(\r
880         IN                              al_obj_t*                                       p_obj )\r
881 {\r
882         spl_qp_svc_t*                   p_spl_qp_svc;\r
883         cl_list_item_t*                 p_list_item;\r
884         al_mad_wr_t*                    p_mad_wr;\r
885 \r
886         ib_api_status_t                 status;\r
887 \r
888         AL_ENTER( AL_DBG_SMI );\r
889 \r
890         CL_ASSERT( p_obj );\r
891         p_spl_qp_svc = PARENT_STRUCT( p_obj, spl_qp_svc_t, obj );\r
892 \r
893         /* Change the state to prevent processing new send requests. */\r
894         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
895         p_spl_qp_svc->state = SPL_QP_DESTROYING;\r
896         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
897 \r
898         /* Wait here until the special QP service is no longer in use. */\r
899         while( p_spl_qp_svc->in_use_cnt )\r
900         {\r
901                 cl_thread_suspend( 0 );\r
902         }\r
903 \r
904         /* Destroy the special QP. */\r
905         if( p_spl_qp_svc->h_qp )\r
906         {\r
907                 /* If present, remove the special QP service from the tracking map. */\r
908                 cl_spinlock_acquire( &gp_spl_qp_mgr->obj.lock );\r
909                 if( p_spl_qp_svc->h_qp->type == IB_QPT_QP0 )\r
910                 {\r
911                         cl_qmap_remove( &gp_spl_qp_mgr->smi_map, p_spl_qp_svc->port_guid );\r
912                 }\r
913                 else\r
914                 {\r
915                         cl_qmap_remove( &gp_spl_qp_mgr->gsi_map, p_spl_qp_svc->port_guid );\r
916                 }\r
917                 cl_spinlock_release( &gp_spl_qp_mgr->obj.lock );\r
918 \r
919                 status = ib_destroy_qp( p_spl_qp_svc->h_qp,\r
920                         (ib_pfn_destroy_cb_t)deref_al_obj );\r
921                 CL_ASSERT( status == IB_SUCCESS );\r
922 \r
923                 cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
924 \r
925                 /* Complete any outstanding MAD sends operations as "flushed". */\r
926                 for( p_list_item = cl_qlist_remove_head( &p_spl_qp_svc->send_queue );\r
927                          p_list_item != cl_qlist_end( &p_spl_qp_svc->send_queue );\r
928                          p_list_item = cl_qlist_remove_head( &p_spl_qp_svc->send_queue ) )\r
929                 {\r
930                         p_mad_wr = PARENT_STRUCT( p_list_item, al_mad_wr_t, list_item );\r
931                         __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr,\r
932                                 IB_WCS_WR_FLUSHED_ERR );\r
933                 }\r
934 \r
935                 cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
936                 /* Receive MAD elements are returned to the pool by the free routine. */\r
937         }\r
938 \r
939         /* Destroy the special QP alias and CQs. */\r
940         if( p_spl_qp_svc->h_qp_alias )\r
941         {\r
942                 status = ib_destroy_qp( p_spl_qp_svc->h_qp_alias,\r
943                         (ib_pfn_destroy_cb_t)deref_al_obj );\r
944                 CL_ASSERT( status == IB_SUCCESS );\r
945         }\r
946         if( p_spl_qp_svc->h_send_cq )\r
947         {\r
948                 status = ib_destroy_cq( p_spl_qp_svc->h_send_cq,\r
949                         (ib_pfn_destroy_cb_t)deref_al_obj );\r
950                 CL_ASSERT( status == IB_SUCCESS );\r
951         }\r
952         if( p_spl_qp_svc->h_recv_cq )\r
953         {\r
954                 status = ib_destroy_cq( p_spl_qp_svc->h_recv_cq,\r
955                         (ib_pfn_destroy_cb_t)deref_al_obj );\r
956                 CL_ASSERT( status == IB_SUCCESS );\r
957         }\r
958 \r
959         AL_EXIT( AL_DBG_SMI );\r
960 }\r
961 \r
962 \r
963 \r
964 /*\r
965  * Free a special QP service.\r
966  */\r
967 void\r
968 free_spl_qp_svc(\r
969         IN                              al_obj_t*                                       p_obj )\r
970 {\r
971         spl_qp_svc_t*                   p_spl_qp_svc;\r
972         cl_list_item_t*                 p_list_item;\r
973         al_mad_element_t*               p_al_mad;\r
974         ib_api_status_t                 status;\r
975 \r
976         AL_ENTER( AL_DBG_SMI );\r
977 \r
978         CL_ASSERT( p_obj );\r
979         p_spl_qp_svc = PARENT_STRUCT( p_obj, spl_qp_svc_t, obj );\r
980 \r
981         /* Dereference the CA. */\r
982         if( p_spl_qp_svc->obj.p_ci_ca )\r
983                 deref_al_obj( &p_spl_qp_svc->obj.p_ci_ca->h_ca->obj );\r
984 \r
985         /* Return receive MAD elements to the pool. */\r
986         for( p_list_item = cl_qlist_remove_head( &p_spl_qp_svc->recv_queue );\r
987                  p_list_item != cl_qlist_end( &p_spl_qp_svc->recv_queue );\r
988                  p_list_item = cl_qlist_remove_head( &p_spl_qp_svc->recv_queue ) )\r
989         {\r
990                 p_al_mad = PARENT_STRUCT( p_list_item, al_mad_element_t, list_item );\r
991 \r
992                 status = ib_put_mad( &p_al_mad->element );\r
993                 CL_ASSERT( status == IB_SUCCESS );\r
994         }\r
995 \r
996         CL_ASSERT( cl_is_qlist_empty( &p_spl_qp_svc->send_queue ) );\r
997 \r
998         destroy_al_obj( &p_spl_qp_svc->obj );\r
999         cl_free( p_spl_qp_svc );\r
1000 \r
1001         AL_EXIT( AL_DBG_SMI );\r
1002 }\r
1003 \r
1004 \r
1005 \r
1006 /*\r
1007  * Update the base LID of a special QP service.\r
1008  */\r
1009 void\r
1010 spl_qp_svc_lid_change(\r
1011         IN                              al_obj_t*                                       p_obj,\r
1012         IN                              ib_pnp_port_rec_t*                      p_pnp_rec )\r
1013 {\r
1014         spl_qp_svc_t*                   p_spl_qp_svc;\r
1015 \r
1016         AL_ENTER( AL_DBG_SMI );\r
1017 \r
1018         CL_ASSERT( p_obj );\r
1019         CL_ASSERT( p_pnp_rec );\r
1020         CL_ASSERT( p_pnp_rec->p_port_attr );\r
1021 \r
1022         p_spl_qp_svc = PARENT_STRUCT( p_obj, spl_qp_svc_t, obj );\r
1023 \r
1024         p_spl_qp_svc->base_lid = p_pnp_rec->p_port_attr->lid;\r
1025         p_spl_qp_svc->lmc = p_pnp_rec->p_port_attr->lmc;\r
1026 \r
1027         AL_EXIT( AL_DBG_SMI );\r
1028 }\r
1029 \r
1030 \r
1031 \r
1032 /*\r
1033  * Route a send work request.\r
1034  */\r
1035 mad_route_t\r
1036 route_mad_send(\r
1037         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
1038         IN                              ib_send_wr_t* const                     p_send_wr )\r
1039 {\r
1040         al_mad_wr_t*                    p_mad_wr;\r
1041         al_mad_send_t*                  p_mad_send;\r
1042         ib_mad_t*                               p_mad;\r
1043         ib_smp_t*                               p_smp;\r
1044         ib_av_handle_t                  h_av;\r
1045         mad_route_t                             route;\r
1046         boolean_t                               local, loopback, discard;\r
1047 \r
1048         AL_ENTER( AL_DBG_SMI );\r
1049 \r
1050         CL_ASSERT( p_spl_qp_svc );\r
1051         CL_ASSERT( p_send_wr );\r
1052 \r
1053         /* Initialize a pointers to the MAD work request and the MAD. */\r
1054         p_mad_wr = PARENT_STRUCT( p_send_wr, al_mad_wr_t, send_wr );\r
1055         p_mad_send = PARENT_STRUCT( p_mad_wr, al_mad_send_t, mad_wr );\r
1056         p_mad = get_mad_hdr_from_wr( p_mad_wr );\r
1057         p_smp = (ib_smp_t*)p_mad;\r
1058 \r
1059         /* Check if the CA has a local MAD interface. */\r
1060         local = loopback = discard = FALSE;\r
1061         if( check_local_mad( p_spl_qp_svc->h_qp ) )\r
1062         {\r
1063                 /*\r
1064                  * If the MAD is a locally addressed Subnet Management, Performance\r
1065                  * Management, or Connection Management datagram, process the work\r
1066                  * request locally.\r
1067                  */\r
1068                 h_av = p_send_wr->dgrm.ud.h_av;\r
1069                 switch( p_mad->mgmt_class )\r
1070                 {\r
1071                 case IB_MCLASS_SUBN_DIR:\r
1072                         /* Perform special checks on directed route SMPs. */\r
1073                         if( ib_smp_is_response( p_smp ) )\r
1074                         {\r
1075                                 /*\r
1076                                  * This node is the originator of the response.  Discard\r
1077                                  * if the hop count or pointer is zero, an intermediate hop,\r
1078                                  * out of bounds hop, or if the first port of the directed\r
1079                                  * route retrun path is not this port.\r
1080                                  */\r
1081                                 if( ( p_smp->hop_count == 0 ) || ( p_smp->hop_ptr == 0 ) )\r
1082                                 {\r
1083                                         AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
1084                                                 ("hop cnt or hop ptr set to 0...discarding\n") );\r
1085                                         discard = TRUE;\r
1086                                 }\r
1087                                 else if( p_smp->hop_count != ( p_smp->hop_ptr - 1 ) )\r
1088                                 {\r
1089                                         AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
1090                                                 ("hop cnt != (hop ptr - 1)...discarding\n") );\r
1091                                         discard = TRUE;\r
1092                                 }\r
1093                                 else if( p_smp->hop_count >= IB_SUBNET_PATH_HOPS_MAX )\r
1094                                 {\r
1095                                         AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
1096                                                 ("hop cnt > max hops...discarding\n") );\r
1097                                         discard = TRUE;\r
1098                                 }\r
1099                                 else if( ( p_smp->dr_dlid == IB_LID_PERMISSIVE ) &&\r
1100                                                  ( p_smp->return_path[ p_smp->hop_ptr - 1 ] !=\r
1101                                                         p_spl_qp_svc->port_num ) )\r
1102                                 {\r
1103                                         AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
1104                                                 ("return path[hop ptr - 1] != port num...discarding\n") );\r
1105                                         discard = TRUE;\r
1106                                 }\r
1107                         }\r
1108                         else\r
1109                         {\r
1110                                 /* The SMP is a request. */\r
1111                                 if( ( p_smp->hop_count >= IB_SUBNET_PATH_HOPS_MAX ) ||\r
1112                                         ( p_smp->hop_ptr >= IB_SUBNET_PATH_HOPS_MAX ) )\r
1113                                 {\r
1114                                         discard = TRUE;\r
1115                                 }\r
1116                                 else if( ( p_smp->hop_count == 0 ) && ( p_smp->hop_ptr == 0 ) )\r
1117                                 {\r
1118                                         /* Self Addressed: Sent locally, routed locally. */\r
1119                                         local = TRUE;\r
1120                                         discard = ( p_smp->dr_slid != IB_LID_PERMISSIVE ) ||\r
1121                                                           ( p_smp->dr_dlid != IB_LID_PERMISSIVE );\r
1122                                 }\r
1123                                 else if( ( p_smp->hop_count != 0 ) &&\r
1124                                                  ( p_smp->hop_count == ( p_smp->hop_ptr - 1 ) ) )\r
1125                                 {\r
1126                                         /* End of Path: Sent remotely, routed locally. */\r
1127                                         local = TRUE;\r
1128                                 }\r
1129                                 else if( ( p_smp->hop_count != 0 ) &&\r
1130                                                  ( p_smp->hop_ptr       == 0 ) )\r
1131                                 {\r
1132                                         /* Beginning of Path: Sent locally, routed remotely. */\r
1133                                         if( p_smp->dr_slid == IB_LID_PERMISSIVE )\r
1134                                         {\r
1135                                                 discard =\r
1136                                                         ( p_smp->initial_path[ p_smp->hop_ptr + 1 ] !=\r
1137                                                           p_spl_qp_svc->port_num );\r
1138                                         }\r
1139                                 }\r
1140                                 else\r
1141                                 {\r
1142                                         /* Intermediate hop. */\r
1143                                         discard = TRUE;\r
1144                                 }\r
1145                         }\r
1146                         /* Loopback locally addressed SM to SM "heartbeat" messages. */\r
1147                         loopback = (p_mad->attr_id == IB_MAD_ATTR_SM_INFO);\r
1148                         break;\r
1149 \r
1150                 case IB_MCLASS_SUBN_LID:\r
1151                         /* Loopback locally addressed SM to SM "heartbeat" messages. */\r
1152                         loopback = (p_mad->attr_id == IB_MAD_ATTR_SM_INFO);\r
1153 \r
1154                         /* Fall through to check for a local MAD. */\r
1155 \r
1156                 case IB_MCLASS_PERF:\r
1157                 case IB_MCLASS_BM:\r
1158                         local = ( h_av &&\r
1159                                 ( h_av->av_attr.dlid ==\r
1160                                 ( h_av->av_attr.path_bits | p_spl_qp_svc->base_lid ) ) );\r
1161                         break;\r
1162 \r
1163                 default:\r
1164                         /* Route vendor specific MADs to the HCA provider. */\r
1165                         if( ib_class_is_vendor_specific( p_mad->mgmt_class ) )\r
1166                         {\r
1167                                 local = ( h_av &&\r
1168                                         ( h_av->av_attr.dlid ==\r
1169                                         ( h_av->av_attr.path_bits | p_spl_qp_svc->base_lid ) ) );\r
1170                         }\r
1171                         break;\r
1172                 }\r
1173         }\r
1174 \r
1175         route = ( p_mad_send->p_send_mad->send_opt & IB_SEND_OPT_LOCAL ) ?\r
1176                 ROUTE_LOCAL : ROUTE_REMOTE;\r
1177         if( local ) route = ROUTE_LOCAL;\r
1178         if( loopback && local ) route = ROUTE_LOOPBACK;\r
1179         if( discard ) route = ROUTE_DISCARD;\r
1180 \r
1181         AL_EXIT( AL_DBG_SMI );\r
1182         return route;\r
1183 }\r
1184 \r
1185 \r
1186 \r
1187 /*\r
1188  * Send a work request on the special QP.\r
1189  */\r
1190 ib_api_status_t\r
1191 spl_qp_svc_send(\r
1192         IN              const   ib_qp_handle_t                          h_qp,\r
1193         IN                              ib_send_wr_t* const                     p_send_wr )\r
1194 {\r
1195         spl_qp_svc_t*                   p_spl_qp_svc;\r
1196         al_mad_wr_t*                    p_mad_wr;\r
1197         mad_route_t                             route;\r
1198         ib_api_status_t                 status;\r
1199 \r
1200         AL_ENTER( AL_DBG_SMI );\r
1201 \r
1202         CL_ASSERT( h_qp );\r
1203         CL_ASSERT( p_send_wr );\r
1204 \r
1205         /* Get the special QP service. */\r
1206         p_spl_qp_svc = (spl_qp_svc_t*)h_qp->obj.context;\r
1207         CL_ASSERT( p_spl_qp_svc );\r
1208         CL_ASSERT( p_spl_qp_svc->h_qp == h_qp );\r
1209 \r
1210         /* Determine how to route the MAD. */\r
1211         route = route_mad_send( p_spl_qp_svc, p_send_wr );\r
1212 \r
1213         /*\r
1214          * Check the QP state and guard against error handling.  Also,\r
1215          * to maintain proper order of work completions, delay processing\r
1216          * a local MAD until any remote MAD work requests have completed,\r
1217          * and delay processing a remote MAD until local MAD work requests\r
1218          * have completed.\r
1219          */\r
1220         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
1221         if( (p_spl_qp_svc->state != SPL_QP_ACTIVE) || p_spl_qp_svc->local_mad_wr ||\r
1222                 (is_local(route) && !cl_is_qlist_empty( &p_spl_qp_svc->send_queue )) ||\r
1223                 ( cl_qlist_count( &p_spl_qp_svc->send_queue ) >=\r
1224                         p_spl_qp_svc->max_qp_depth ) )\r
1225         {\r
1226                 /*\r
1227                  * Return busy status.\r
1228                  * The special QP will resume sends at this point.\r
1229                  */\r
1230                 cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
1231 \r
1232                 AL_EXIT( AL_DBG_SMI );\r
1233                 return IB_RESOURCE_BUSY;\r
1234         }\r
1235 \r
1236         p_mad_wr = PARENT_STRUCT( p_send_wr, al_mad_wr_t, send_wr );\r
1237 \r
1238         if( is_local( route ) )\r
1239         {\r
1240                 /* Save the local MAD work request for processing. */\r
1241                 p_spl_qp_svc->local_mad_wr = p_mad_wr;\r
1242 \r
1243                 /* Flag the service as in use by the asynchronous processing thread. */\r
1244                 cl_atomic_inc( &p_spl_qp_svc->in_use_cnt );\r
1245 \r
1246                 cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
1247 \r
1248                 status = local_mad_send( p_spl_qp_svc, p_mad_wr );\r
1249         }\r
1250         else\r
1251         {\r
1252                 /* Process a remote MAD send work request. */\r
1253                 status = remote_mad_send( p_spl_qp_svc, p_mad_wr );\r
1254 \r
1255                 cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
1256         }\r
1257 \r
1258         AL_EXIT( AL_DBG_SMI );\r
1259         return status;\r
1260 }\r
1261 \r
1262 \r
1263 \r
1264 /*\r
1265  * Process a remote MAD send work request.  Called holding the spl_qp_svc lock.\r
1266  */\r
1267 ib_api_status_t\r
1268 remote_mad_send(\r
1269         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
1270         IN                              al_mad_wr_t* const                      p_mad_wr )\r
1271 {\r
1272         ib_smp_t*                               p_smp;\r
1273         ib_api_status_t                 status;\r
1274 \r
1275         AL_ENTER( AL_DBG_SMI );\r
1276 \r
1277         CL_ASSERT( p_spl_qp_svc );\r
1278         CL_ASSERT( p_mad_wr );\r
1279 \r
1280         /* Initialize a pointers to the MAD work request and outbound MAD. */\r
1281         p_smp = (ib_smp_t*)get_mad_hdr_from_wr( p_mad_wr );\r
1282 \r
1283         /* Perform outbound MAD processing. */\r
1284 \r
1285         /* Adjust directed route SMPs as required by IBA. */\r
1286         if( p_smp->mgmt_class == IB_MCLASS_SUBN_DIR )\r
1287         {\r
1288                 if( ib_smp_is_response( p_smp ) )\r
1289                 {\r
1290                         if( p_smp->dr_dlid == IB_LID_PERMISSIVE )\r
1291                                 p_smp->hop_ptr--;\r
1292                 }\r
1293                 else if( p_smp->dr_slid == IB_LID_PERMISSIVE )\r
1294                 {\r
1295                         /*\r
1296                          * Only update the pointer if the hw_agent is not implemented.\r
1297                          * Fujitsu implements SMI in hardware, so the following has to\r
1298                          * be passed down to the hardware SMI.\r
1299                          */\r
1300                         ci_ca_lock_attr( p_spl_qp_svc->obj.p_ci_ca );\r
1301                         if( !p_spl_qp_svc->obj.p_ci_ca->p_pnp_attr->hw_agents )\r
1302                                 p_smp->hop_ptr++;\r
1303                         ci_ca_unlock_attr( p_spl_qp_svc->obj.p_ci_ca );\r
1304                 }\r
1305         }\r
1306 \r
1307         /* Always generate send completions. */\r
1308         p_mad_wr->send_wr.send_opt |= IB_SEND_OPT_SIGNALED;\r
1309 \r
1310         /* Queue the MAD work request on the service tracking queue. */\r
1311         cl_qlist_insert_tail( &p_spl_qp_svc->send_queue, &p_mad_wr->list_item );\r
1312 \r
1313         status = ib_post_send( p_spl_qp_svc->h_qp, &p_mad_wr->send_wr, NULL );\r
1314 \r
1315         if( status != IB_SUCCESS )\r
1316         {\r
1317                 cl_qlist_remove_item( &p_spl_qp_svc->send_queue, &p_mad_wr->list_item );\r
1318 \r
1319                 /* Reset directed route SMPs as required by IBA. */\r
1320                 if( p_smp->mgmt_class == IB_MCLASS_SUBN_DIR )\r
1321                 {\r
1322                         if( ib_smp_is_response( p_smp ) )\r
1323                         {\r
1324                                 if( p_smp->dr_dlid == IB_LID_PERMISSIVE )\r
1325                                         p_smp->hop_ptr++;\r
1326                         }\r
1327                         else if( p_smp->dr_slid == IB_LID_PERMISSIVE )\r
1328                         {\r
1329                                 /* Only update if the hw_agent is not implemented. */\r
1330                                 ci_ca_lock_attr( p_spl_qp_svc->obj.p_ci_ca );\r
1331                                 if( p_spl_qp_svc->obj.p_ci_ca->p_pnp_attr->hw_agents == FALSE )\r
1332                                         p_smp->hop_ptr--;\r
1333                                 ci_ca_unlock_attr( p_spl_qp_svc->obj.p_ci_ca );\r
1334                         }\r
1335                 }\r
1336         }\r
1337 \r
1338         AL_EXIT( AL_DBG_SMI );\r
1339         return status;\r
1340 }\r
1341 \r
1342 \r
1343 /*\r
1344  * Handle a MAD destined for the local CA, using cached data\r
1345  * as much as possible.\r
1346  */\r
1347 static ib_api_status_t\r
1348 local_mad_send(\r
1349         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
1350         IN                              al_mad_wr_t* const                      p_mad_wr )\r
1351 {\r
1352         mad_route_t                             route;\r
1353         ib_api_status_t                 status = IB_SUCCESS;\r
1354 \r
1355         AL_ENTER( AL_DBG_SMI );\r
1356 \r
1357         CL_ASSERT( p_spl_qp_svc );\r
1358         CL_ASSERT( p_mad_wr );\r
1359 \r
1360         /* Determine how to route the MAD. */\r
1361         route = route_mad_send( p_spl_qp_svc, &p_mad_wr->send_wr );\r
1362 \r
1363         /* Check if this MAD should be discarded. */\r
1364         if( is_discard( route ) )\r
1365         {\r
1366                 /* Deliver a "work completion" to the dispatcher. */\r
1367                 __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr,\r
1368                         IB_WCS_LOCAL_OP_ERR );\r
1369                 status = IB_INVALID_SETTING;\r
1370         }\r
1371         else if( is_loopback( route ) )\r
1372         {\r
1373                 /* Loopback local SM to SM "heartbeat" messages. */\r
1374                 status = loopback_mad( p_spl_qp_svc, p_mad_wr );\r
1375         }\r
1376         else\r
1377         {\r
1378                 switch( get_mad_hdr_from_wr( p_mad_wr )->mgmt_class )\r
1379                 {\r
1380                 case IB_MCLASS_SUBN_DIR:\r
1381                 case IB_MCLASS_SUBN_LID:\r
1382                         status = process_subn_mad( p_spl_qp_svc, p_mad_wr );\r
1383                         break;\r
1384 \r
1385                 default:\r
1386                         status = IB_NOT_DONE;\r
1387                 }\r
1388         }\r
1389 \r
1390         if( status == IB_NOT_DONE )\r
1391         {\r
1392                 /* Queue an asynchronous processing item to process the local MAD. */\r
1393                 cl_async_proc_queue( gp_async_proc_mgr, &p_spl_qp_svc->send_async );\r
1394         }\r
1395         else\r
1396         {\r
1397                 /*\r
1398                  * Clear the local MAD pointer to allow processing of other MADs.\r
1399                  * This is done after polling for attribute changes to ensure that\r
1400                  * subsequent MADs pick up any changes performed by this one.\r
1401                  */\r
1402                 cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
1403                 p_spl_qp_svc->local_mad_wr = NULL;\r
1404                 cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
1405 \r
1406                 /* No longer in use by the asynchronous processing thread. */\r
1407                 cl_atomic_dec( &p_spl_qp_svc->in_use_cnt );\r
1408 \r
1409                 /* Special QP operations will resume by unwinding. */\r
1410         }\r
1411 \r
1412         AL_EXIT( AL_DBG_SMI );\r
1413         return IB_SUCCESS;\r
1414 }\r
1415 \r
1416 \r
1417 static ib_api_status_t\r
1418 get_resp_mad(\r
1419         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
1420         IN                              al_mad_wr_t* const                      p_mad_wr,\r
1421                 OUT                     ib_mad_element_t** const        pp_mad_resp )\r
1422 {\r
1423         ib_api_status_t                 status;\r
1424 \r
1425         AL_ENTER( AL_DBG_SMI );\r
1426 \r
1427         CL_ASSERT( p_spl_qp_svc );\r
1428         CL_ASSERT( p_mad_wr );\r
1429         CL_ASSERT( pp_mad_resp );\r
1430 \r
1431         /* Get a MAD element from the pool for the response. */\r
1432         status = ib_get_mad( p_spl_qp_svc->h_qp->obj.p_ci_ca->pool_key,\r
1433                 MAD_BLOCK_SIZE, pp_mad_resp );\r
1434         if( status != IB_SUCCESS )\r
1435         {\r
1436                 __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr,\r
1437                         IB_WCS_LOCAL_OP_ERR );\r
1438         }\r
1439 \r
1440         AL_EXIT( AL_DBG_SMI );\r
1441         return status;\r
1442 }\r
1443 \r
1444 \r
1445 static ib_api_status_t\r
1446 complete_local_mad(\r
1447         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
1448         IN                              al_mad_wr_t* const                      p_mad_wr,\r
1449         IN                              ib_mad_element_t* const         p_mad_resp )\r
1450 {\r
1451         ib_api_status_t                 status;\r
1452 \r
1453         AL_ENTER( AL_DBG_SMI );\r
1454 \r
1455         CL_ASSERT( p_spl_qp_svc );\r
1456         CL_ASSERT( p_mad_wr );\r
1457         CL_ASSERT( p_mad_resp );\r
1458 \r
1459         /* Construct the receive MAD element. */\r
1460         p_mad_resp->status              = IB_WCS_SUCCESS;\r
1461         p_mad_resp->remote_qp   = p_mad_wr->send_wr.dgrm.ud.remote_qp;\r
1462         p_mad_resp->remote_lid  = p_spl_qp_svc->base_lid;\r
1463         if( p_mad_wr->send_wr.send_opt & IB_RECV_OPT_IMMEDIATE )\r
1464         {\r
1465                 p_mad_resp->immediate_data = p_mad_wr->send_wr.immediate_data;\r
1466                 p_mad_resp->recv_opt |= IB_RECV_OPT_IMMEDIATE;\r
1467         }\r
1468 \r
1469         /*\r
1470          * Hand the receive MAD element to the dispatcher before completing\r
1471          * the send.  This guarantees that the send request cannot time out.\r
1472          */\r
1473         status = mad_disp_recv_done( p_spl_qp_svc->h_mad_disp, p_mad_resp );\r
1474 \r
1475         /* Forward the send work completion to the dispatcher. */\r
1476         __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr, IB_WCS_SUCCESS );\r
1477 \r
1478         AL_EXIT( AL_DBG_SMI );\r
1479         return status;\r
1480 }\r
1481 \r
1482 \r
1483 static ib_api_status_t\r
1484 loopback_mad(\r
1485         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
1486         IN                              al_mad_wr_t* const                      p_mad_wr )\r
1487 {\r
1488         ib_mad_t                                *p_mad;\r
1489         ib_mad_element_t                *p_mad_resp;\r
1490         ib_api_status_t                 status;\r
1491 \r
1492         AL_ENTER( AL_DBG_SMI );\r
1493 \r
1494         CL_ASSERT( p_spl_qp_svc );\r
1495         CL_ASSERT( p_mad_wr );\r
1496 \r
1497         /* Get a MAD element from the pool for the response. */\r
1498         status = get_resp_mad( p_spl_qp_svc, p_mad_wr, &p_mad_resp );\r
1499         if( status == IB_SUCCESS )\r
1500         {\r
1501                 /* Initialize a pointers to the MAD work request and outbound MAD. */\r
1502                 p_mad = get_mad_hdr_from_wr( p_mad_wr );\r
1503 \r
1504                 /* Simulate a send/receive between local managers. */\r
1505                 cl_memcpy( p_mad_resp->p_mad_buf, p_mad, MAD_BLOCK_SIZE );\r
1506 \r
1507                 status = complete_local_mad( p_spl_qp_svc, p_mad_wr, p_mad_resp );\r
1508         }\r
1509 \r
1510         AL_EXIT( AL_DBG_SMI );\r
1511         return status;\r
1512 }\r
1513 \r
1514 \r
1515 static ib_api_status_t\r
1516 process_node_info(\r
1517         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
1518         IN                              al_mad_wr_t* const                      p_mad_wr )\r
1519 {\r
1520         ib_mad_t                                *p_mad;\r
1521         ib_mad_element_t                *p_mad_resp;\r
1522         ib_smp_t                                *p_smp;\r
1523         ib_node_info_t                  *p_node_info;\r
1524         ib_ca_attr_t                    *p_ca_attr;\r
1525         ib_port_attr_t                  *p_port_attr;\r
1526         ib_api_status_t                 status;\r
1527 \r
1528         AL_ENTER( AL_DBG_SMI );\r
1529 \r
1530         CL_ASSERT( p_spl_qp_svc );\r
1531         CL_ASSERT( p_mad_wr );\r
1532 \r
1533         /* Initialize a pointers to the MAD work request and outbound MAD. */\r
1534         p_mad = get_mad_hdr_from_wr( p_mad_wr );\r
1535         if( p_mad->method != IB_MAD_METHOD_GET )\r
1536         {\r
1537                 /* Node description is a GET-only attribute. */\r
1538                 __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr,\r
1539                         IB_WCS_LOCAL_OP_ERR );\r
1540                 AL_EXIT( AL_DBG_SMI );\r
1541                 return IB_INVALID_SETTING;\r
1542         }\r
1543 \r
1544         /* Get a MAD element from the pool for the response. */\r
1545         status = get_resp_mad( p_spl_qp_svc, p_mad_wr, &p_mad_resp );\r
1546         if( status == IB_SUCCESS )\r
1547         {\r
1548                 p_smp = (ib_smp_t*)p_mad_resp->p_mad_buf;\r
1549                 cl_memcpy( p_smp, p_mad, MAD_BLOCK_SIZE );\r
1550                 p_smp->method |= IB_MAD_METHOD_RESP_MASK;\r
1551                 if( p_smp->mgmt_class == IB_MCLASS_SUBN_DIR )\r
1552                         p_smp->status = IB_SMP_DIRECTION;\r
1553                 else\r
1554                         p_smp->status = 0;\r
1555 \r
1556                 p_node_info = (ib_node_info_t*)ib_smp_get_payload_ptr( p_smp );\r
1557 \r
1558                 /*\r
1559                  * Fill in the node info, protecting against the\r
1560                  * attributes being changed by PnP.\r
1561                  */\r
1562                 cl_spinlock_acquire( &p_spl_qp_svc->obj.p_ci_ca->obj.lock );\r
1563 \r
1564                 p_ca_attr = p_spl_qp_svc->obj.p_ci_ca->p_pnp_attr;\r
1565                 p_port_attr = &p_ca_attr->p_port_attr[p_spl_qp_svc->port_num - 1];\r
1566 \r
1567                 p_node_info->base_version = 1;\r
1568                 p_node_info->class_version = 1;\r
1569                 p_node_info->node_type = IB_NODE_TYPE_CA;\r
1570                 p_node_info->num_ports = p_ca_attr->num_ports;\r
1571                 /* TODO: Get some unique identifier for the system */\r
1572                 p_node_info->sys_guid = p_ca_attr->ca_guid;\r
1573                 p_node_info->node_guid = p_ca_attr->ca_guid;\r
1574                 p_node_info->port_guid = p_port_attr->port_guid;\r
1575                 p_node_info->partition_cap = cl_hton16( p_port_attr->num_pkeys );\r
1576                 p_node_info->device_id = cl_hton16( p_ca_attr->dev_id );\r
1577                 p_node_info->revision = cl_hton32( p_ca_attr->revision );\r
1578                 p_node_info->port_num_vendor_id =\r
1579                         cl_hton32( p_ca_attr->vend_id & 0x00FFFFFF ) | p_port_attr->port_num;\r
1580                 cl_spinlock_release( &p_spl_qp_svc->obj.p_ci_ca->obj.lock );\r
1581 \r
1582                 status = complete_local_mad( p_spl_qp_svc, p_mad_wr, p_mad_resp );\r
1583         }\r
1584 \r
1585         AL_EXIT( AL_DBG_SMI );\r
1586         return status;\r
1587 }\r
1588 \r
1589 \r
1590 static ib_api_status_t\r
1591 process_node_desc(\r
1592         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
1593         IN                              al_mad_wr_t* const                      p_mad_wr )\r
1594 {\r
1595         ib_mad_t                                *p_mad;\r
1596         ib_mad_element_t                *p_mad_resp;\r
1597         ib_api_status_t                 status;\r
1598 \r
1599         AL_ENTER( AL_DBG_SMI );\r
1600 \r
1601         CL_ASSERT( p_spl_qp_svc );\r
1602         CL_ASSERT( p_mad_wr );\r
1603 \r
1604         /* Initialize a pointers to the MAD work request and outbound MAD. */\r
1605         p_mad = get_mad_hdr_from_wr( p_mad_wr );\r
1606         if( p_mad->method != IB_MAD_METHOD_GET )\r
1607         {\r
1608                 /* Node info is a GET-only attribute. */\r
1609                 __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr,\r
1610                         IB_WCS_LOCAL_OP_ERR );\r
1611                 AL_EXIT( AL_DBG_SMI );\r
1612                 return IB_INVALID_SETTING;\r
1613         }\r
1614 \r
1615         /* Get a MAD element from the pool for the response. */\r
1616         status = get_resp_mad( p_spl_qp_svc, p_mad_wr, &p_mad_resp );\r
1617         if( status == IB_SUCCESS )\r
1618         {\r
1619                 cl_memcpy( p_mad_resp->p_mad_buf, p_mad, MAD_BLOCK_SIZE );\r
1620                 p_mad_resp->p_mad_buf->method |= IB_MAD_METHOD_RESP_MASK;\r
1621                 if( p_mad_resp->p_mad_buf->mgmt_class == IB_MCLASS_SUBN_DIR )\r
1622                         p_mad_resp->p_mad_buf->status = IB_SMP_DIRECTION;\r
1623                 else\r
1624                         p_mad_resp->p_mad_buf->status = 0;\r
1625                 /* Set the node description to the machine name. */\r
1626                 cl_memcpy( ((ib_smp_t*)p_mad_resp->p_mad_buf)->data, \r
1627                         node_desc, sizeof(node_desc) );\r
1628 \r
1629                 status = complete_local_mad( p_spl_qp_svc, p_mad_wr, p_mad_resp );\r
1630         }\r
1631 \r
1632         AL_EXIT( AL_DBG_SMI );\r
1633         return status;\r
1634 }\r
1635 \r
1636 \r
1637 /*\r
1638  * Process subnet administration MADs using cached data if possible.\r
1639  */\r
1640 static ib_api_status_t\r
1641 process_subn_mad(\r
1642         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
1643         IN                              al_mad_wr_t* const                      p_mad_wr )\r
1644 {\r
1645         ib_api_status_t         status;\r
1646         ib_smp_t                        *p_smp;\r
1647 \r
1648         AL_ENTER( AL_DBG_SMI );\r
1649 \r
1650         CL_ASSERT( p_spl_qp_svc );\r
1651         CL_ASSERT( p_mad_wr );\r
1652 \r
1653         p_smp = (ib_smp_t*)get_mad_hdr_from_wr( p_mad_wr );\r
1654 \r
1655         CL_ASSERT( p_smp->mgmt_class == IB_MCLASS_SUBN_DIR ||\r
1656                 p_smp->mgmt_class == IB_MCLASS_SUBN_LID );\r
1657 \r
1658         switch( p_smp->attr_id )\r
1659         {\r
1660         case IB_MAD_ATTR_NODE_INFO:\r
1661                 status = process_node_info( p_spl_qp_svc, p_mad_wr );\r
1662                 break;\r
1663 \r
1664         case IB_MAD_ATTR_NODE_DESC:\r
1665                 status = process_node_desc( p_spl_qp_svc, p_mad_wr );\r
1666                 break;\r
1667 \r
1668         default:\r
1669                 status = IB_NOT_DONE;\r
1670                 break;\r
1671         }\r
1672 \r
1673         AL_EXIT( AL_DBG_SMI );\r
1674         return status;\r
1675 }\r
1676 \r
1677 \r
1678 /*\r
1679  * Process a local MAD send work request.\r
1680  */\r
1681 ib_api_status_t\r
1682 fwd_local_mad(\r
1683         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
1684         IN                              al_mad_wr_t* const                      p_mad_wr )\r
1685 {\r
1686         ib_mad_t*                               p_mad;\r
1687         ib_smp_t*                               p_smp;\r
1688         al_mad_send_t*                  p_mad_send;\r
1689         ib_mad_element_t*               p_mad_response;\r
1690         ib_mad_t*                               p_mad_response_buf;\r
1691         ib_api_status_t                 status = IB_SUCCESS;\r
1692         boolean_t                               smp_is_set;\r
1693 \r
1694         AL_ENTER( AL_DBG_SMI );\r
1695 \r
1696         CL_ASSERT( p_spl_qp_svc );\r
1697         CL_ASSERT( p_mad_wr );\r
1698 \r
1699         /* Initialize a pointers to the MAD work request and outbound MAD. */\r
1700         p_mad = get_mad_hdr_from_wr( p_mad_wr );\r
1701         p_smp = (ib_smp_t*)p_mad;\r
1702 \r
1703         smp_is_set = (p_smp->method == IB_MAD_METHOD_SET);\r
1704 \r
1705         /* Get a MAD element from the pool for the response. */\r
1706         p_mad_send = PARENT_STRUCT( p_mad_wr, al_mad_send_t, mad_wr );\r
1707 //*** Commented code to work-around ib_local_mad() requiring a response MAD\r
1708 //*** as input.  Remove comments once the ib_local_mad() implementation allows\r
1709 //*** for a NULL response MAD, when one is not expected.\r
1710 //*** Note that an attempt to route an invalid response MAD in this case\r
1711 //*** will fail harmlessly.\r
1712 //***   if( p_mad_send->p_send_mad->resp_expected )\r
1713 //***   {\r
1714                 status = get_resp_mad( p_spl_qp_svc, p_mad_wr, &p_mad_response );\r
1715                 if( status != IB_SUCCESS )\r
1716                 {\r
1717                         AL_EXIT( AL_DBG_SMI );\r
1718                         return status;\r
1719                 }\r
1720                 p_mad_response_buf = p_mad_response->p_mad_buf;\r
1721 //***   }\r
1722 //***   else\r
1723 //***   {\r
1724 //***           p_mad_response_buf = NULL;\r
1725 //***   }\r
1726 \r
1727         /* Adjust directed route SMPs as required by IBA. */\r
1728         if( p_mad->mgmt_class == IB_MCLASS_SUBN_DIR )\r
1729         {\r
1730                 CL_ASSERT( !ib_smp_is_response( p_smp ) );\r
1731 \r
1732                 /*\r
1733                  * If this was a self addressed, directed route SMP, increment\r
1734                  * the hop pointer in the request before delivery as required\r
1735                  * by IBA.  Otherwise, adjustment for remote requests occurs\r
1736                  * during inbound processing.\r
1737                  */\r
1738                 if( p_smp->hop_count == 0 )\r
1739                         p_smp->hop_ptr++;\r
1740         }\r
1741 \r
1742         /* Forward the locally addressed MAD to the CA interface. */\r
1743         status = ib_local_mad( p_spl_qp_svc->h_qp->obj.p_ci_ca->h_ca,\r
1744                 p_spl_qp_svc->port_num, p_mad, p_mad_response_buf );\r
1745 \r
1746         /* Reset directed route SMPs as required by IBA. */\r
1747         if( p_mad->mgmt_class == IB_MCLASS_SUBN_DIR )\r
1748         {\r
1749                 /*\r
1750                  * If this was a self addressed, directed route SMP, decrement\r
1751                  * the hop pointer in the response before delivery as required\r
1752                  * by IBA.  Otherwise, adjustment for remote responses occurs\r
1753                  * during outbound processing.\r
1754                  */\r
1755                 if( p_smp->hop_count == 0 )\r
1756                 {\r
1757                         /* Adjust the request SMP. */\r
1758                         p_smp->hop_ptr--;\r
1759 \r
1760                         /* Adjust the response SMP. */\r
1761                         if( p_mad_response_buf )\r
1762                         {\r
1763                                 p_smp = (ib_smp_t*)p_mad_response_buf;\r
1764                                 p_smp->hop_ptr--;\r
1765                         }\r
1766                 }\r
1767         }\r
1768 \r
1769         if( status != IB_SUCCESS )\r
1770         {\r
1771                 if( p_mad_response )\r
1772                         ib_put_mad( p_mad_response );\r
1773 \r
1774                 __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr,\r
1775                         IB_WCS_LOCAL_OP_ERR );\r
1776                 AL_EXIT( AL_DBG_SMI );\r
1777                 return status;\r
1778         }\r
1779 \r
1780         /* Check the completion status of this simulated send. */\r
1781         if( p_mad_response_buf )\r
1782         {\r
1783                 /*\r
1784                  * The SMI is uses PnP polling to refresh the base_lid and lmc.\r
1785                  * Polling takes time, so we update the values here to prevent\r
1786                  * the failure of LID routed MADs sent immediately following this\r
1787                  * assignment.  Check the response to see if the port info was set.\r
1788                  */\r
1789                 if( smp_is_set )\r
1790                 {\r
1791                         ib_port_info_t*         p_port_info = NULL;\r
1792 \r
1793                         switch( p_mad_response_buf->mgmt_class )\r
1794                         {\r
1795                         case IB_MCLASS_SUBN_DIR:\r
1796                                 if( ( p_smp->attr_id == IB_MAD_ATTR_PORT_INFO ) &&\r
1797                                         ( ib_smp_get_status( p_smp ) == IB_SA_MAD_STATUS_SUCCESS ) )\r
1798                                 {\r
1799                                         p_port_info =\r
1800                                                 (ib_port_info_t*)ib_smp_get_payload_ptr( p_smp );\r
1801                                 }\r
1802                                 break;\r
1803 \r
1804                         case IB_MCLASS_SUBN_LID:\r
1805                                 if( ( p_mad_response_buf->attr_id == IB_MAD_ATTR_PORT_INFO ) &&\r
1806                                         ( p_mad_response_buf->status == IB_SA_MAD_STATUS_SUCCESS ) )\r
1807                                 {\r
1808                                         p_port_info =\r
1809                                                 (ib_port_info_t*)( p_mad_response_buf + 1 );\r
1810                                 }\r
1811                                 break;\r
1812 \r
1813                         default:\r
1814                                 break;\r
1815                         }\r
1816 \r
1817                         if( p_port_info )\r
1818                         {\r
1819                                 p_spl_qp_svc->base_lid = p_port_info->base_lid;\r
1820                                 p_spl_qp_svc->lmc = ib_port_info_get_lmc( p_port_info );\r
1821                                 if (p_port_info->subnet_timeout & 0x80)\r
1822                                 {\r
1823                                         AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_PNP,\r
1824                                                 ("Client reregister event, setting sm_lid to 0.\n"));\r
1825                                         ci_ca_lock_attr(p_spl_qp_svc->obj.p_ci_ca);\r
1826                                         p_spl_qp_svc->obj.p_ci_ca->p_pnp_attr->\r
1827                                                 p_port_attr->sm_lid= 0;\r
1828                                         ci_ca_unlock_attr(p_spl_qp_svc->obj.p_ci_ca);\r
1829                                 }\r
1830                         }\r
1831                 }\r
1832         }\r
1833 \r
1834         status = complete_local_mad( p_spl_qp_svc, p_mad_wr, p_mad_response );\r
1835 \r
1836         /* If the SMP was a Get, no need to trigger a PnP poll. */\r
1837         if( status == IB_SUCCESS && !smp_is_set )\r
1838                 status = IB_NOT_DONE;\r
1839 \r
1840         AL_EXIT( AL_DBG_SMI );\r
1841         return status;\r
1842 }\r
1843 \r
1844 \r
1845 \r
1846 /*\r
1847  * Asynchronous processing thread callback to send a local MAD.\r
1848  */\r
1849 void\r
1850 send_local_mad_cb(\r
1851         IN                              cl_async_proc_item_t*           p_item )\r
1852 {\r
1853         spl_qp_svc_t*                   p_spl_qp_svc;\r
1854         ib_api_status_t                 status;\r
1855 \r
1856         AL_ENTER( AL_DBG_SMI_CB );\r
1857 \r
1858         CL_ASSERT( p_item );\r
1859         p_spl_qp_svc = PARENT_STRUCT( p_item, spl_qp_svc_t, send_async );\r
1860 \r
1861         /* Process a local MAD send work request. */\r
1862         CL_ASSERT( p_spl_qp_svc->local_mad_wr );\r
1863         status = fwd_local_mad( p_spl_qp_svc, p_spl_qp_svc->local_mad_wr );\r
1864 \r
1865         /*\r
1866          * If we successfully processed a local MAD, which could have changed\r
1867          * something (e.g. the LID) on the HCA.  Scan for changes.\r
1868          */\r
1869         if( status == IB_SUCCESS )\r
1870                 pnp_poll();\r
1871 \r
1872         /*\r
1873          * Clear the local MAD pointer to allow processing of other MADs.\r
1874          * This is done after polling for attribute changes to ensure that\r
1875          * subsequent MADs pick up any changes performed by this one.\r
1876          */\r
1877         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
1878         p_spl_qp_svc->local_mad_wr = NULL;\r
1879         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
1880 \r
1881         /* Continue processing any queued MADs on the QP. */\r
1882         special_qp_resume_sends( p_spl_qp_svc->h_qp );\r
1883 \r
1884         /* No longer in use by the asynchronous processing thread. */\r
1885         cl_atomic_dec( &p_spl_qp_svc->in_use_cnt );\r
1886 \r
1887         AL_EXIT( AL_DBG_SMI );\r
1888 }\r
1889 \r
1890 \r
1891 \r
1892 /*\r
1893  * Special QP send completion callback.\r
1894  */\r
1895 void\r
1896 spl_qp_send_comp_cb(\r
1897         IN              const   ib_cq_handle_t                          h_cq,\r
1898         IN                              void*                                           cq_context )\r
1899 {\r
1900         spl_qp_svc_t*                   p_spl_qp_svc;\r
1901 \r
1902         AL_ENTER( AL_DBG_SMI_CB );\r
1903 \r
1904         CL_ASSERT( cq_context );\r
1905         p_spl_qp_svc = cq_context;\r
1906 \r
1907 #if defined( CL_USE_MUTEX )\r
1908 \r
1909         /* Queue an asynchronous processing item to process sends. */\r
1910         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
1911         if( !p_spl_qp_svc->send_async_queued )\r
1912         {\r
1913                 p_spl_qp_svc->send_async_queued = TRUE;\r
1914                 ref_al_obj( &p_spl_qp_svc->obj );\r
1915                 cl_async_proc_queue( gp_async_proc_mgr, &p_spl_qp_svc->send_async_cb );\r
1916         }\r
1917         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
1918 \r
1919 #else\r
1920 \r
1921         /* Invoke the callback directly. */\r
1922         CL_ASSERT( h_cq == p_spl_qp_svc->h_send_cq );\r
1923         spl_qp_comp( p_spl_qp_svc, h_cq, IB_WC_SEND );\r
1924 \r
1925         /* Continue processing any queued MADs on the QP. */\r
1926         special_qp_resume_sends( p_spl_qp_svc->h_qp );\r
1927 \r
1928 #endif\r
1929 \r
1930         AL_EXIT( AL_DBG_SMI );\r
1931 }\r
1932 \r
1933 \r
1934 \r
1935 #if defined( CL_USE_MUTEX )\r
1936 void\r
1937 spl_qp_send_async_cb(\r
1938         IN                              cl_async_proc_item_t*           p_item )\r
1939 {\r
1940         spl_qp_svc_t*                   p_spl_qp_svc;\r
1941         ib_api_status_t                 status;\r
1942 \r
1943         AL_ENTER( AL_DBG_SMI_CB );\r
1944 \r
1945         CL_ASSERT( p_item );\r
1946         p_spl_qp_svc = PARENT_STRUCT( p_item, spl_qp_svc_t, send_async_cb );\r
1947 \r
1948         /* Reset asynchronous queue flag. */\r
1949         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
1950         p_spl_qp_svc->send_async_queued = FALSE;\r
1951         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
1952 \r
1953         spl_qp_comp( p_spl_qp_svc, p_spl_qp_svc->h_send_cq, IB_WC_SEND );\r
1954 \r
1955         /* Continue processing any queued MADs on the QP. */\r
1956         status = special_qp_resume_sends( p_spl_qp_svc->h_qp );\r
1957         CL_ASSERT( status == IB_SUCCESS );\r
1958 \r
1959         deref_al_obj( &p_spl_qp_svc->obj );\r
1960 \r
1961         AL_EXIT( AL_DBG_SMI );\r
1962 }\r
1963 #endif\r
1964 \r
1965 \r
1966 \r
1967 /*\r
1968  * Special QP receive completion callback.\r
1969  */\r
1970 void\r
1971 spl_qp_recv_comp_cb(\r
1972         IN              const   ib_cq_handle_t                          h_cq,\r
1973         IN                              void*                                           cq_context )\r
1974 {\r
1975         spl_qp_svc_t*                   p_spl_qp_svc;\r
1976 \r
1977         AL_ENTER( AL_DBG_SMI );\r
1978 \r
1979         CL_ASSERT( cq_context );\r
1980         p_spl_qp_svc = cq_context;\r
1981 \r
1982 #if defined( CL_USE_MUTEX )\r
1983 \r
1984         /* Queue an asynchronous processing item to process receives. */\r
1985         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
1986         if( !p_spl_qp_svc->recv_async_queued )\r
1987         {\r
1988                 p_spl_qp_svc->recv_async_queued = TRUE;\r
1989                 ref_al_obj( &p_spl_qp_svc->obj );\r
1990                 cl_async_proc_queue( gp_async_proc_mgr, &p_spl_qp_svc->recv_async_cb );\r
1991         }\r
1992         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
1993 \r
1994 #else\r
1995 \r
1996         CL_ASSERT( h_cq == p_spl_qp_svc->h_recv_cq );\r
1997         spl_qp_comp( p_spl_qp_svc, h_cq, IB_WC_RECV );\r
1998 \r
1999 #endif\r
2000 \r
2001         AL_EXIT( AL_DBG_SMI );\r
2002 }\r
2003 \r
2004 \r
2005 \r
2006 #if defined( CL_USE_MUTEX )\r
2007 void\r
2008 spl_qp_recv_async_cb(\r
2009         IN                              cl_async_proc_item_t*           p_item )\r
2010 {\r
2011         spl_qp_svc_t*                   p_spl_qp_svc;\r
2012 \r
2013         AL_ENTER( AL_DBG_SMI );\r
2014 \r
2015         CL_ASSERT( p_item );\r
2016         p_spl_qp_svc = PARENT_STRUCT( p_item, spl_qp_svc_t, recv_async_cb );\r
2017 \r
2018         /* Reset asynchronous queue flag. */\r
2019         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
2020         p_spl_qp_svc->recv_async_queued = FALSE;\r
2021         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2022 \r
2023         spl_qp_comp( p_spl_qp_svc, p_spl_qp_svc->h_recv_cq, IB_WC_RECV );\r
2024 \r
2025         deref_al_obj( &p_spl_qp_svc->obj );\r
2026 \r
2027         AL_EXIT( AL_DBG_SMI );\r
2028 }\r
2029 #endif\r
2030 \r
2031 \r
2032 \r
2033 /*\r
2034  * Special QP completion handler.\r
2035  */\r
2036 void\r
2037 spl_qp_comp(\r
2038         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
2039         IN              const   ib_cq_handle_t                          h_cq,\r
2040         IN                              ib_wc_type_t                            wc_type )\r
2041 {\r
2042         ib_wc_t                                 wc;\r
2043         ib_wc_t*                                p_free_wc = &wc;\r
2044         ib_wc_t*                                p_done_wc;\r
2045         al_mad_wr_t*                    p_mad_wr;\r
2046         al_mad_element_t*               p_al_mad;\r
2047         ib_mad_element_t*               p_mad_element;\r
2048         ib_smp_t*                               p_smp;\r
2049         ib_api_status_t                 status;\r
2050 \r
2051         AL_ENTER( AL_DBG_SMI_CB );\r
2052 \r
2053         CL_ASSERT( p_spl_qp_svc );\r
2054         CL_ASSERT( h_cq );\r
2055 \r
2056         /* Check the QP state and guard against error handling. */\r
2057         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
2058         if( p_spl_qp_svc->state != SPL_QP_ACTIVE )\r
2059         {\r
2060                 cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2061                 return;\r
2062         }\r
2063         cl_atomic_inc( &p_spl_qp_svc->in_use_cnt );\r
2064         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2065 \r
2066         wc.p_next = NULL;\r
2067         /* Process work completions. */\r
2068         while( ib_poll_cq( h_cq, &p_free_wc, &p_done_wc ) == IB_SUCCESS )\r
2069         {\r
2070                 /* Process completions one at a time. */\r
2071                 CL_ASSERT( p_done_wc );\r
2072 \r
2073                 /* Flushed completions are handled elsewhere. */\r
2074                 if( wc.status == IB_WCS_WR_FLUSHED_ERR )\r
2075                 {\r
2076                         p_free_wc = &wc;\r
2077                         continue;\r
2078                 }\r
2079 \r
2080                 /*\r
2081                  * Process the work completion.  Per IBA specification, the\r
2082                  * wc.wc_type is undefined if wc.status is not IB_WCS_SUCCESS.\r
2083                  * Use the wc_type parameter.\r
2084                  */\r
2085                 switch( wc_type )\r
2086                 {\r
2087                 case IB_WC_SEND:\r
2088                         /* Get a pointer to the MAD work request. */\r
2089                         p_mad_wr = (al_mad_wr_t*)((uintn_t)wc.wr_id);\r
2090 \r
2091                         /* Remove the MAD work request from the service tracking queue. */\r
2092                         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
2093                         cl_qlist_remove_item( &p_spl_qp_svc->send_queue,\r
2094                                 &p_mad_wr->list_item );\r
2095                         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2096 \r
2097                         /* Reset directed route SMPs as required by IBA. */\r
2098                         p_smp = (ib_smp_t*)get_mad_hdr_from_wr( p_mad_wr );\r
2099                         if( p_smp->mgmt_class == IB_MCLASS_SUBN_DIR )\r
2100                         {\r
2101                                 if( ib_smp_is_response( p_smp ) )\r
2102                                         p_smp->hop_ptr++;\r
2103                                 else\r
2104                                         p_smp->hop_ptr--;\r
2105                         }\r
2106 \r
2107                         /* Report the send completion to the dispatcher. */\r
2108                         mad_disp_send_done( p_spl_qp_svc->h_mad_disp, p_mad_wr, &wc );\r
2109                         break;\r
2110 \r
2111                 case IB_WC_RECV:\r
2112 \r
2113                         /* Initialize pointers to the MAD element. */\r
2114                         p_al_mad = (al_mad_element_t*)((uintn_t)wc.wr_id);\r
2115                         p_mad_element = &p_al_mad->element;\r
2116 \r
2117                         /* Remove the AL MAD element from the service tracking list. */\r
2118                         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
2119 \r
2120                         cl_qlist_remove_item( &p_spl_qp_svc->recv_queue,\r
2121                                 &p_al_mad->list_item );\r
2122 \r
2123                         /* Replenish the receive buffer. */\r
2124                         spl_qp_svc_post_recvs( p_spl_qp_svc );\r
2125                         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2126 \r
2127                         /* Construct the MAD element from the receive work completion. */\r
2128                         build_mad_recv( p_mad_element, &wc );\r
2129 \r
2130                         /* Process the received MAD. */\r
2131                         status = process_mad_recv( p_spl_qp_svc, p_mad_element );\r
2132 \r
2133                         /* Discard this MAD on error. */\r
2134                         if( status != IB_SUCCESS )\r
2135                         {\r
2136                                 status = ib_put_mad( p_mad_element );\r
2137                                 CL_ASSERT( status == IB_SUCCESS );\r
2138                         }\r
2139                         break;\r
2140 \r
2141                 default:\r
2142                         CL_ASSERT( wc_type == IB_WC_SEND || wc_type == IB_WC_RECV );\r
2143                         break;\r
2144                 }\r
2145 \r
2146                 if( wc.status != IB_WCS_SUCCESS )\r
2147                 {\r
2148                         AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
2149                                 ("special QP completion error: %s! internal syndrome 0x%I64x\n",\r
2150                                 ib_get_wc_status_str( wc.status ), wc.vendor_specific) );\r
2151 \r
2152                         /* Reset the special QP service and return. */\r
2153                         spl_qp_svc_reset( p_spl_qp_svc );\r
2154                 }\r
2155                 p_free_wc = &wc;\r
2156         }\r
2157 \r
2158         /* Rearm the CQ. */\r
2159         status = ib_rearm_cq( h_cq, FALSE );\r
2160         CL_ASSERT( status == IB_SUCCESS );\r
2161 \r
2162         cl_atomic_dec( &p_spl_qp_svc->in_use_cnt );\r
2163         AL_EXIT( AL_DBG_SMI_CB );\r
2164 }\r
2165 \r
2166 \r
2167 \r
2168 /*\r
2169  * Process a received MAD.\r
2170  */\r
2171 ib_api_status_t\r
2172 process_mad_recv(\r
2173         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
2174         IN                              ib_mad_element_t*                       p_mad_element )\r
2175 {\r
2176         ib_smp_t*                               p_smp;\r
2177         mad_route_t                             route;\r
2178         ib_api_status_t                 status;\r
2179 \r
2180         AL_ENTER( AL_DBG_SMI );\r
2181 \r
2182         CL_ASSERT( p_spl_qp_svc );\r
2183         CL_ASSERT( p_mad_element );\r
2184 \r
2185         /*\r
2186          * If the CA has a HW agent then this MAD should have been\r
2187          * consumed below verbs.  The fact that it was received here\r
2188          * indicates that it should be forwarded to the dispatcher\r
2189          * for delivery to a class manager.  Otherwise, determine how\r
2190          * the MAD should be routed.\r
2191          */\r
2192         route = ROUTE_DISPATCHER;\r
2193         if( check_local_mad( p_spl_qp_svc->h_qp ) )\r
2194         {\r
2195                 /*\r
2196                  * SMP and GMP processing is branched here to handle overlaps\r
2197                  * between class methods and attributes.\r
2198                  */\r
2199                 switch( p_mad_element->p_mad_buf->mgmt_class )\r
2200                 {\r
2201                 case IB_MCLASS_SUBN_DIR:\r
2202                         /* Perform special checks on directed route SMPs. */\r
2203                         p_smp = (ib_smp_t*)p_mad_element->p_mad_buf;\r
2204 \r
2205                         if( ( p_smp->hop_count >= IB_SUBNET_PATH_HOPS_MAX ) ||\r
2206                                 ( p_smp->hop_ptr >= IB_SUBNET_PATH_HOPS_MAX ) )\r
2207                         {\r
2208                                 route = ROUTE_DISCARD;\r
2209                         }\r
2210                         else if( ib_smp_is_response( p_smp ) )\r
2211                         {\r
2212                                 /*\r
2213                                  * This node is the destination of the response.  Discard\r
2214                                  * the source LID or hop pointer are incorrect.\r
2215                                  */\r
2216                                 if( p_smp->dr_slid == IB_LID_PERMISSIVE )\r
2217                                 {\r
2218                                         if( p_smp->hop_ptr == 1 )\r
2219                                         {\r
2220                                                 p_smp->hop_ptr--;               /* Adjust ptr per IBA spec. */\r
2221                                         }\r
2222                                         else\r
2223                                         {\r
2224                                                 route = ROUTE_DISCARD;\r
2225                                         }\r
2226                                 }\r
2227                                 else if( ( p_smp->dr_slid <  p_spl_qp_svc->base_lid ) ||\r
2228                                                  ( p_smp->dr_slid >= p_spl_qp_svc->base_lid +\r
2229                                                         ( 1 << p_spl_qp_svc->lmc ) ) )\r
2230                                 {\r
2231                                                 route = ROUTE_DISCARD;\r
2232                                 }\r
2233                         }\r
2234                         else\r
2235                         {\r
2236                                 /*\r
2237                                  * This node is the destination of the request.  Discard\r
2238                                  * the destination LID or hop pointer are incorrect.\r
2239                                  */\r
2240                                 if( p_smp->dr_dlid == IB_LID_PERMISSIVE )\r
2241                                 {\r
2242                                         if( p_smp->hop_count == p_smp->hop_ptr )\r
2243                                         {\r
2244                                                 p_smp->return_path[ p_smp->hop_ptr++ ] =\r
2245                                                         p_spl_qp_svc->port_num; /* Set path per IBA spec. */\r
2246                                         }\r
2247                                         else\r
2248                                         {\r
2249                                                 route = ROUTE_DISCARD;\r
2250                                         }\r
2251                                 }\r
2252                                 else if( ( p_smp->dr_dlid <  p_spl_qp_svc->base_lid ) ||\r
2253                                                  ( p_smp->dr_dlid >= p_spl_qp_svc->base_lid +\r
2254                                                         ( 1 << p_spl_qp_svc->lmc ) ) )\r
2255                                 {\r
2256                                                 route = ROUTE_DISCARD;\r
2257                                 }\r
2258                         }\r
2259 \r
2260                         if( route == ROUTE_DISCARD ) break;\r
2261                         /* else fall through next case */\r
2262 \r
2263                 case IB_MCLASS_SUBN_LID:\r
2264                         route = route_recv_smp( p_mad_element );\r
2265                         break;\r
2266 \r
2267                 case IB_MCLASS_PERF:\r
2268                         route = ROUTE_LOCAL;\r
2269                         break;\r
2270 \r
2271                 case IB_MCLASS_BM:\r
2272                         route = route_recv_gmp( p_mad_element );\r
2273                         break;\r
2274 \r
2275                 default:\r
2276                         /* Route vendor specific MADs to the HCA provider. */\r
2277                         if( ib_class_is_vendor_specific(\r
2278                                 p_mad_element->p_mad_buf->mgmt_class ) )\r
2279                         {\r
2280                                 route = route_recv_gmp( p_mad_element );\r
2281                         }\r
2282                         break;\r
2283                 }\r
2284         }\r
2285 \r
2286         /* Route the MAD. */\r
2287         if ( is_discard( route ) )\r
2288                 status = IB_ERROR;\r
2289         else if( is_dispatcher( route ) )\r
2290                 status = mad_disp_recv_done( p_spl_qp_svc->h_mad_disp, p_mad_element );\r
2291         else if( is_remote( route ) )\r
2292                 status = forward_sm_trap( p_spl_qp_svc, p_mad_element );\r
2293         else\r
2294                 status = recv_local_mad( p_spl_qp_svc, p_mad_element );\r
2295 \r
2296         AL_EXIT( AL_DBG_SMI );\r
2297         return status;\r
2298 }\r
2299 \r
2300 \r
2301 \r
2302 /*\r
2303  * Route a received SMP.\r
2304  */\r
2305 mad_route_t\r
2306 route_recv_smp(\r
2307         IN                              ib_mad_element_t*                       p_mad_element )\r
2308 {\r
2309         mad_route_t                             route;\r
2310 \r
2311         AL_ENTER( AL_DBG_SMI );\r
2312 \r
2313         CL_ASSERT( p_mad_element );\r
2314 \r
2315         /* Process the received SMP. */\r
2316         switch( p_mad_element->p_mad_buf->method )\r
2317         {\r
2318         case IB_MAD_METHOD_GET:\r
2319         case IB_MAD_METHOD_SET:\r
2320                 route = route_recv_smp_attr( p_mad_element );\r
2321                 break;\r
2322 \r
2323         case IB_MAD_METHOD_TRAP:\r
2324                 /*\r
2325                  * Special check to route locally generated traps to the remote SM.\r
2326                  * Distinguished from other receives by the p_wc->recv.ud.recv_opt\r
2327                  * IB_RECV_OPT_FORWARD flag.\r
2328                  *\r
2329                  * Note that because forwarded traps use AL MAD services, the upper\r
2330                  * 32-bits of the TID are reserved by the access layer.  When matching\r
2331                  * a Trap Repress MAD, the SMA must only use the lower 32-bits of the\r
2332                  * TID.\r
2333                  */\r
2334                 route = ( p_mad_element->recv_opt & IB_RECV_OPT_FORWARD ) ?\r
2335                         ROUTE_REMOTE : ROUTE_DISPATCHER;\r
2336                 break;\r
2337 \r
2338         case IB_MAD_METHOD_TRAP_REPRESS:\r
2339                 /*\r
2340                  * Note that because forwarded traps use AL MAD services, the upper\r
2341                  * 32-bits of the TID are reserved by the access layer.  When matching\r
2342                  * a Trap Repress MAD, the SMA must only use the lower 32-bits of the\r
2343                  * TID.\r
2344                  */\r
2345                 route = ROUTE_LOCAL;\r
2346                 break;\r
2347 \r
2348         default:\r
2349                 route = ROUTE_DISPATCHER;\r
2350                 break;\r
2351         }\r
2352 \r
2353         AL_EXIT( AL_DBG_SMI );\r
2354         return route;\r
2355 }\r
2356 \r
2357 \r
2358 \r
2359 /*\r
2360  * Route received SMP attributes.\r
2361  */\r
2362 mad_route_t\r
2363 route_recv_smp_attr(\r
2364         IN                              ib_mad_element_t*                       p_mad_element )\r
2365 {\r
2366         mad_route_t                             route;\r
2367 \r
2368         AL_ENTER( AL_DBG_SMI );\r
2369 \r
2370         CL_ASSERT( p_mad_element );\r
2371 \r
2372         /* Process the received SMP attributes. */\r
2373         switch( p_mad_element->p_mad_buf->attr_id )\r
2374         {\r
2375         case IB_MAD_ATTR_NODE_DESC:\r
2376         case IB_MAD_ATTR_NODE_INFO:\r
2377         case IB_MAD_ATTR_GUID_INFO:\r
2378         case IB_MAD_ATTR_PORT_INFO:\r
2379         case IB_MAD_ATTR_P_KEY_TABLE:\r
2380         case IB_MAD_ATTR_SLVL_TABLE:\r
2381         case IB_MAD_ATTR_VL_ARBITRATION:\r
2382         case IB_MAD_ATTR_VENDOR_DIAG:\r
2383         case IB_MAD_ATTR_LED_INFO:\r
2384                 route = ROUTE_LOCAL;\r
2385                 break;\r
2386 \r
2387         default:\r
2388                 route = ROUTE_DISPATCHER;\r
2389                 break;\r
2390         }\r
2391 \r
2392         AL_EXIT( AL_DBG_SMI );\r
2393         return route;\r
2394 }\r
2395 \r
2396 \r
2397 /*\r
2398  * Route a received GMP.\r
2399  */\r
2400 mad_route_t\r
2401 route_recv_gmp(\r
2402         IN                              ib_mad_element_t*                       p_mad_element )\r
2403 {\r
2404         mad_route_t                             route;\r
2405 \r
2406         AL_ENTER( AL_DBG_SMI );\r
2407 \r
2408         CL_ASSERT( p_mad_element );\r
2409 \r
2410         /* Process the received GMP. */\r
2411         switch( p_mad_element->p_mad_buf->method )\r
2412         {\r
2413         case IB_MAD_METHOD_GET:\r
2414         case IB_MAD_METHOD_SET:\r
2415                 /* Route vendor specific MADs to the HCA provider. */\r
2416                 if( ib_class_is_vendor_specific(\r
2417                         p_mad_element->p_mad_buf->mgmt_class ) )\r
2418                 {\r
2419                         route = ROUTE_LOCAL;\r
2420                 }\r
2421                 else\r
2422                 {\r
2423                         route = route_recv_gmp_attr( p_mad_element );\r
2424                 }\r
2425                 break;\r
2426 \r
2427         default:\r
2428                 route = ROUTE_DISPATCHER;\r
2429                 break;\r
2430         }\r
2431 \r
2432         AL_EXIT( AL_DBG_SMI );\r
2433         return route;\r
2434 }\r
2435 \r
2436 \r
2437 \r
2438 /*\r
2439  * Route received GMP attributes.\r
2440  */\r
2441 mad_route_t\r
2442 route_recv_gmp_attr(\r
2443         IN                              ib_mad_element_t*                       p_mad_element )\r
2444 {\r
2445         mad_route_t                             route;\r
2446 \r
2447         AL_ENTER( AL_DBG_SMI );\r
2448 \r
2449         CL_ASSERT( p_mad_element );\r
2450 \r
2451         /* Process the received GMP attributes. */\r
2452         if( p_mad_element->p_mad_buf->attr_id == IB_MAD_ATTR_CLASS_PORT_INFO )\r
2453                 route = ROUTE_LOCAL;\r
2454         else\r
2455                 route = ROUTE_DISPATCHER;\r
2456 \r
2457         AL_EXIT( AL_DBG_SMI );\r
2458         return route;\r
2459 }\r
2460 \r
2461 \r
2462 \r
2463 /*\r
2464  * Forward a locally generated Subnet Management trap.\r
2465  */\r
2466 ib_api_status_t\r
2467 forward_sm_trap(\r
2468         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
2469         IN                              ib_mad_element_t*                       p_mad_element )\r
2470 {\r
2471         ib_av_attr_t                    av_attr;\r
2472         ib_api_status_t                 status;\r
2473 \r
2474         AL_ENTER( AL_DBG_SMI_CB );\r
2475 \r
2476         CL_ASSERT( p_spl_qp_svc );\r
2477         CL_ASSERT( p_mad_element );\r
2478 \r
2479         /* Check the SMP class. */\r
2480         if( p_mad_element->p_mad_buf->mgmt_class != IB_MCLASS_SUBN_LID )\r
2481         {\r
2482                 /*\r
2483                  * Per IBA Specification Release 1.1 Section 14.2.2.1,\r
2484                  * "C14-5: Only a SM shall originate a directed route SMP."\r
2485                  * Therefore all traps should be LID routed; drop this one.\r
2486                  */\r
2487                 AL_EXIT( AL_DBG_SMI_CB );\r
2488                 return IB_ERROR;\r
2489         }\r
2490 \r
2491         /* Create an address vector for the SM. */\r
2492         cl_memclr( &av_attr, sizeof( ib_av_attr_t ) );\r
2493         av_attr.port_num = p_spl_qp_svc->port_num;\r
2494         av_attr.sl = p_mad_element->remote_sl;\r
2495         av_attr.dlid = p_mad_element->remote_lid;\r
2496         if( p_mad_element->grh_valid )\r
2497         {\r
2498                 cl_memcpy( &av_attr.grh, p_mad_element->p_grh, sizeof( ib_grh_t ) );\r
2499                 av_attr.grh.src_gid      = p_mad_element->p_grh->dest_gid;\r
2500                 av_attr.grh.dest_gid = p_mad_element->p_grh->src_gid;\r
2501                 av_attr.grh_valid = TRUE;\r
2502         }\r
2503 \r
2504         status = ib_create_av( p_spl_qp_svc->h_qp->obj.p_ci_ca->h_pd_alias,\r
2505                 &av_attr, &p_mad_element->h_av );\r
2506 \r
2507         if( status != IB_SUCCESS )\r
2508         {\r
2509                 AL_EXIT( AL_DBG_SMI_CB );\r
2510                 return status;\r
2511         }\r
2512 \r
2513         /* Complete the initialization of the MAD element. */\r
2514         p_mad_element->p_next = NULL;\r
2515         p_mad_element->remote_qkey = IB_QP_PRIVILEGED_Q_KEY;\r
2516         p_mad_element->resp_expected = FALSE;\r
2517 \r
2518         /* Clear context1 for proper send completion callback processing. */\r
2519         p_mad_element->context1 = NULL;\r
2520 \r
2521         /*\r
2522          * Forward the trap.  Note that because forwarded traps use AL MAD\r
2523          * services, the upper 32-bits of the TID are reserved by the access\r
2524          * layer.  When matching a Trap Repress MAD, the SMA must only use\r
2525          * the lower 32-bits of the TID.\r
2526          */\r
2527         status = ib_send_mad( p_spl_qp_svc->h_mad_svc, p_mad_element, NULL );\r
2528 \r
2529         if( status != IB_SUCCESS )\r
2530                 ib_destroy_av( p_mad_element->h_av );\r
2531 \r
2532         AL_EXIT( AL_DBG_SMI_CB );\r
2533         return status;\r
2534 }\r
2535 \r
2536 \r
2537 /*\r
2538  * Process a locally routed MAD received from the special QP.\r
2539  */\r
2540 ib_api_status_t\r
2541 recv_local_mad(\r
2542         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
2543         IN                              ib_mad_element_t*                       p_mad_request )\r
2544 {\r
2545         ib_mad_t*                               p_mad_hdr;\r
2546         ib_api_status_t                 status;\r
2547 \r
2548         AL_ENTER( AL_DBG_SMI_CB );\r
2549 \r
2550         CL_ASSERT( p_spl_qp_svc );\r
2551         CL_ASSERT( p_mad_request );\r
2552 \r
2553         /* Initialize the MAD element. */\r
2554         p_mad_hdr = ib_get_mad_buf( p_mad_request );\r
2555         p_mad_request->context1 = p_mad_request;\r
2556 \r
2557         /* Save the TID. */\r
2558         p_mad_request->context2 =\r
2559                 (void*)(uintn_t)al_get_al_tid( p_mad_hdr->trans_id );\r
2560 /*\r
2561  * Disable warning about passing unaligned 64-bit value.\r
2562  * The value is always aligned given how buffers are allocated\r
2563  * and given the layout of a MAD.\r
2564  */\r
2565 #pragma warning( push, 3 )\r
2566         al_set_al_tid( &p_mad_hdr->trans_id, 0 );\r
2567 #pragma warning( pop )\r
2568 \r
2569         /*\r
2570          * We need to get a response from the local HCA to this MAD only if this\r
2571          * MAD is not itself a response.\r
2572          */\r
2573         p_mad_request->resp_expected = !( ib_mad_is_response( p_mad_hdr ) ||\r
2574                 ( p_mad_hdr->method == IB_MAD_METHOD_TRAP_REPRESS ) );\r
2575         p_mad_request->timeout_ms = LOCAL_MAD_TIMEOUT;\r
2576         p_mad_request->send_opt = IB_SEND_OPT_LOCAL;\r
2577 \r
2578         /* Send the locally addressed MAD request to the CA for processing. */\r
2579         status = ib_send_mad( p_spl_qp_svc->h_mad_svc, p_mad_request, NULL );\r
2580 \r
2581         AL_EXIT( AL_DBG_SMI_CB );\r
2582         return status;\r
2583 }\r
2584 \r
2585 \r
2586 \r
2587 /*\r
2588  * Special QP alias send completion callback.\r
2589  */\r
2590 void\r
2591 spl_qp_alias_send_cb(\r
2592         IN                              ib_mad_svc_handle_t                     h_mad_svc,\r
2593         IN                              void*                                           mad_svc_context,\r
2594         IN                              ib_mad_element_t*                       p_mad_element )\r
2595 {\r
2596         ib_api_status_t                 status;\r
2597 \r
2598         AL_ENTER( AL_DBG_SMI_CB );\r
2599 \r
2600         UNUSED_PARAM( h_mad_svc );\r
2601         UNUSED_PARAM( mad_svc_context );\r
2602         CL_ASSERT( p_mad_element );\r
2603 \r
2604         if( p_mad_element->h_av )\r
2605         {\r
2606                 status = ib_destroy_av( p_mad_element->h_av );\r
2607                 CL_ASSERT( status == IB_SUCCESS );\r
2608         }\r
2609 \r
2610         status = ib_put_mad( p_mad_element );\r
2611         CL_ASSERT( status == IB_SUCCESS );\r
2612 \r
2613         AL_EXIT( AL_DBG_SMI_CB );\r
2614 }\r
2615 \r
2616 \r
2617 \r
2618 /*\r
2619  * Special QP alias receive completion callback.\r
2620  */\r
2621 void\r
2622 spl_qp_alias_recv_cb(\r
2623         IN                              ib_mad_svc_handle_t                     h_mad_svc,\r
2624         IN                              void*                                           mad_svc_context,\r
2625         IN                              ib_mad_element_t*                       p_mad_response )\r
2626 {\r
2627         spl_qp_svc_t*                   p_spl_qp_svc;\r
2628         ib_mad_element_t*               p_mad_request;\r
2629         ib_mad_t*                               p_mad_hdr;\r
2630         ib_av_attr_t                    av_attr;\r
2631         ib_api_status_t                 status;\r
2632 \r
2633         AL_ENTER( AL_DBG_SMI_CB );\r
2634 \r
2635         CL_ASSERT( mad_svc_context );\r
2636         CL_ASSERT( p_mad_response );\r
2637         CL_ASSERT( p_mad_response->send_context1 );\r
2638 \r
2639         /* Initialize pointers. */\r
2640         p_spl_qp_svc = mad_svc_context;\r
2641         p_mad_request = p_mad_response->send_context1;\r
2642         p_mad_hdr = ib_get_mad_buf( p_mad_response );\r
2643 \r
2644         /* Restore the TID, so it will match on the remote side. */\r
2645 #pragma warning( push, 3 )\r
2646         al_set_al_tid( &p_mad_hdr->trans_id,\r
2647                 (uint32_t)(uintn_t)p_mad_response->send_context2 );\r
2648 #pragma warning( pop )\r
2649 \r
2650         /* Set the remote QP. */\r
2651         p_mad_response->remote_qp       = p_mad_request->remote_qp;\r
2652         p_mad_response->remote_qkey = p_mad_request->remote_qkey;\r
2653 \r
2654         /* Prepare to create an address vector. */\r
2655         cl_memclr( &av_attr, sizeof( ib_av_attr_t ) );\r
2656         av_attr.port_num        = p_spl_qp_svc->port_num;\r
2657         av_attr.sl                      = p_mad_request->remote_sl;\r
2658         av_attr.static_rate = IB_PATH_RECORD_RATE_10_GBS;\r
2659         av_attr.path_bits       = p_mad_request->path_bits;\r
2660         if( p_mad_request->grh_valid )\r
2661         {\r
2662                 cl_memcpy( &av_attr.grh, p_mad_request->p_grh, sizeof( ib_grh_t ) );\r
2663                 av_attr.grh.src_gid      = p_mad_request->p_grh->dest_gid;\r
2664                 av_attr.grh.dest_gid = p_mad_request->p_grh->src_gid;\r
2665                 av_attr.grh_valid = TRUE;\r
2666         }\r
2667         if( ( p_mad_hdr->mgmt_class == IB_MCLASS_SUBN_DIR ) &&\r
2668                 ( ((ib_smp_t *)p_mad_hdr)->dr_dlid == IB_LID_PERMISSIVE ) )\r
2669                 av_attr.dlid = IB_LID_PERMISSIVE;\r
2670         else\r
2671                 av_attr.dlid = p_mad_request->remote_lid;\r
2672 \r
2673         /* Create an address vector. */\r
2674         status = ib_create_av( p_spl_qp_svc->h_qp->obj.p_ci_ca->h_pd_alias,\r
2675                 &av_attr, &p_mad_response->h_av );\r
2676 \r
2677         if( status != IB_SUCCESS )\r
2678         {\r
2679                 ib_put_mad( p_mad_response );\r
2680 \r
2681                 AL_EXIT( AL_DBG_SMI );\r
2682                 return;\r
2683         }\r
2684 \r
2685         /* Send the response. */\r
2686         status = ib_send_mad( h_mad_svc, p_mad_response, NULL );\r
2687 \r
2688         if( status != IB_SUCCESS )\r
2689         {\r
2690                 ib_destroy_av( p_mad_response->h_av );\r
2691                 ib_put_mad( p_mad_response );\r
2692         }\r
2693 \r
2694         AL_EXIT( AL_DBG_SMI_CB );\r
2695 }\r
2696 \r
2697 \r
2698 \r
2699 /*\r
2700  * Post receive buffers to a special QP.\r
2701  */\r
2702 static ib_api_status_t\r
2703 spl_qp_svc_post_recvs(\r
2704         IN                              spl_qp_svc_t*   const           p_spl_qp_svc )\r
2705 {\r
2706         ib_mad_element_t*               p_mad_element;\r
2707         al_mad_element_t*               p_al_element;\r
2708         ib_recv_wr_t                    recv_wr;\r
2709         ib_api_status_t                 status = IB_SUCCESS;\r
2710 \r
2711         /* Attempt to post receive buffers up to the max_qp_depth limit. */\r
2712         while( cl_qlist_count( &p_spl_qp_svc->recv_queue ) <\r
2713                 (int32_t)p_spl_qp_svc->max_qp_depth )\r
2714         {\r
2715                 /* Get a MAD element from the pool. */\r
2716                 status = ib_get_mad( p_spl_qp_svc->obj.p_ci_ca->pool_key,\r
2717                         MAD_BLOCK_SIZE, &p_mad_element );\r
2718 \r
2719                 if( status != IB_SUCCESS ) break;\r
2720 \r
2721                 p_al_element = PARENT_STRUCT( p_mad_element, al_mad_element_t,\r
2722                         element );\r
2723 \r
2724                 /* Build the receive work request. */\r
2725                 recv_wr.p_next   = NULL;\r
2726                 recv_wr.wr_id    = (uintn_t)p_al_element;\r
2727                 recv_wr.num_ds = 1;\r
2728                 recv_wr.ds_array = &p_al_element->grh_ds;\r
2729 \r
2730                 /* Queue the receive on the service tracking list. */\r
2731                 cl_qlist_insert_tail( &p_spl_qp_svc->recv_queue,\r
2732                         &p_al_element->list_item );\r
2733 \r
2734                 /* Post the receive. */\r
2735                 status = ib_post_recv( p_spl_qp_svc->h_qp, &recv_wr, NULL );\r
2736 \r
2737                 if( status != IB_SUCCESS )\r
2738                 {\r
2739                         AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
2740                                 ("Failed to post receive %016I64x\n",\r
2741                                 (LONG_PTR)p_al_element) );\r
2742                         cl_qlist_remove_item( &p_spl_qp_svc->recv_queue,\r
2743                                 &p_al_element->list_item );\r
2744 \r
2745                         ib_put_mad( p_mad_element );\r
2746                         break;\r
2747                 }\r
2748         }\r
2749 \r
2750         return status;\r
2751 }\r
2752 \r
2753 \r
2754 \r
2755 /*\r
2756  * Special QP service asynchronous event callback.\r
2757  */\r
2758 void\r
2759 spl_qp_svc_event_cb(\r
2760         IN                              ib_async_event_rec_t            *p_event_rec )\r
2761 {\r
2762         spl_qp_svc_t*                   p_spl_qp_svc;\r
2763 \r
2764         AL_ENTER( AL_DBG_SMI_CB );\r
2765 \r
2766         CL_ASSERT( p_event_rec );\r
2767         CL_ASSERT( p_event_rec->context );\r
2768 \r
2769         if( p_event_rec->code == IB_AE_SQ_DRAINED )\r
2770         {\r
2771                 AL_EXIT( AL_DBG_SMI );\r
2772                 return;\r
2773         }\r
2774 \r
2775         p_spl_qp_svc = p_event_rec->context;\r
2776 \r
2777         spl_qp_svc_reset( p_spl_qp_svc );\r
2778 \r
2779         AL_EXIT( AL_DBG_SMI_CB );\r
2780 }\r
2781 \r
2782 \r
2783 \r
2784 /*\r
2785  * Special QP service reset.\r
2786  */\r
2787 void\r
2788 spl_qp_svc_reset(\r
2789         IN                              spl_qp_svc_t*                           p_spl_qp_svc )\r
2790 {\r
2791         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
2792 \r
2793         if( p_spl_qp_svc->state != SPL_QP_ACTIVE )\r
2794         {\r
2795                 cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2796                 return;\r
2797         }\r
2798 \r
2799         /* Change the special QP service to the error state. */\r
2800         p_spl_qp_svc->state = SPL_QP_ERROR;\r
2801 \r
2802         /* Flag the service as in use by the asynchronous processing thread. */\r
2803         cl_atomic_inc( &p_spl_qp_svc->in_use_cnt );\r
2804 \r
2805         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2806 \r
2807         /* Queue an asynchronous processing item to reset the special QP. */\r
2808         cl_async_proc_queue( gp_async_proc_mgr, &p_spl_qp_svc->reset_async );\r
2809 }\r
2810 \r
2811 \r
2812 \r
2813 /*\r
2814  * Asynchronous processing thread callback to reset the special QP service.\r
2815  */\r
2816 void\r
2817 spl_qp_svc_reset_cb(\r
2818         IN                              cl_async_proc_item_t*           p_item )\r
2819 {\r
2820         spl_qp_svc_t*                   p_spl_qp_svc;\r
2821         cl_list_item_t*                 p_list_item;\r
2822         ib_wc_t                                 wc;\r
2823         ib_wc_t*                                p_free_wc;\r
2824         ib_wc_t*                                p_done_wc;\r
2825         al_mad_wr_t*                    p_mad_wr;\r
2826         al_mad_element_t*               p_al_mad;\r
2827         ib_qp_mod_t                             qp_mod;\r
2828         ib_api_status_t                 status;\r
2829 \r
2830         AL_ENTER( AL_DBG_SMI_CB );\r
2831 \r
2832         CL_ASSERT( p_item );\r
2833         p_spl_qp_svc = PARENT_STRUCT( p_item, spl_qp_svc_t, reset_async );\r
2834 \r
2835         /* Wait here until the special QP service is only in use by this thread. */\r
2836         while( p_spl_qp_svc->in_use_cnt != 1 )\r
2837         {\r
2838                 cl_thread_suspend( 0 );\r
2839         }\r
2840 \r
2841         /* Change the QP to the RESET state. */\r
2842         cl_memclr( &qp_mod, sizeof( ib_qp_mod_t ) );\r
2843         qp_mod.req_state = IB_QPS_RESET;\r
2844 \r
2845         status = ib_modify_qp( p_spl_qp_svc->h_qp, &qp_mod );\r
2846         CL_ASSERT( status == IB_SUCCESS );\r
2847 \r
2848         /* Return receive MAD elements to the pool. */\r
2849         for( p_list_item = cl_qlist_remove_head( &p_spl_qp_svc->recv_queue );\r
2850                  p_list_item != cl_qlist_end( &p_spl_qp_svc->recv_queue );\r
2851                  p_list_item = cl_qlist_remove_head( &p_spl_qp_svc->recv_queue ) )\r
2852         {\r
2853                 p_al_mad = PARENT_STRUCT( p_list_item, al_mad_element_t, list_item );\r
2854 \r
2855                 status = ib_put_mad( &p_al_mad->element );\r
2856                 CL_ASSERT( status == IB_SUCCESS );\r
2857         }\r
2858 \r
2859         /* Re-initialize the QP. */\r
2860         status = ib_init_dgrm_svc( p_spl_qp_svc->h_qp, NULL );\r
2861         CL_ASSERT( status == IB_SUCCESS );\r
2862 \r
2863         /* Poll to remove any remaining send completions from the CQ. */\r
2864         do\r
2865         {\r
2866                 cl_memclr( &wc, sizeof( ib_wc_t ) );\r
2867                 p_free_wc = &wc;\r
2868                 status = ib_poll_cq( p_spl_qp_svc->h_send_cq, &p_free_wc, &p_done_wc );\r
2869 \r
2870         } while( status == IB_SUCCESS );\r
2871 \r
2872         /* Post receive buffers. */\r
2873         spl_qp_svc_post_recvs( p_spl_qp_svc );\r
2874 \r
2875         /*\r
2876          * Re-queue any outstanding MAD send operations.\r
2877          * Work from tail to head to maintain the request order.\r
2878          */\r
2879         for( p_list_item = cl_qlist_remove_tail( &p_spl_qp_svc->send_queue );\r
2880                  p_list_item != cl_qlist_end( &p_spl_qp_svc->send_queue );\r
2881                  p_list_item = cl_qlist_remove_tail( &p_spl_qp_svc->send_queue ) )\r
2882         {\r
2883                 p_mad_wr = PARENT_STRUCT( p_list_item, al_mad_wr_t, list_item );\r
2884                 special_qp_queue_mad( p_spl_qp_svc->h_qp, p_mad_wr );\r
2885         }\r
2886 \r
2887         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
2888         if( p_spl_qp_svc->state == SPL_QP_ERROR )\r
2889         {\r
2890                 /* The QP is ready.  Change the state. */\r
2891                 p_spl_qp_svc->state = SPL_QP_ACTIVE;\r
2892                 cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2893 \r
2894                 /* Re-arm the CQs. */\r
2895                 status = ib_rearm_cq( p_spl_qp_svc->h_recv_cq, FALSE );\r
2896                 CL_ASSERT( status == IB_SUCCESS );\r
2897                 status = ib_rearm_cq( p_spl_qp_svc->h_send_cq, FALSE );\r
2898                 CL_ASSERT( status == IB_SUCCESS );\r
2899 \r
2900                 /* Resume send processing. */\r
2901                 special_qp_resume_sends( p_spl_qp_svc->h_qp );\r
2902         }\r
2903         else\r
2904         {\r
2905                 cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2906         }\r
2907 \r
2908         /* No longer in use by the asynchronous processing thread. */\r
2909         cl_atomic_dec( &p_spl_qp_svc->in_use_cnt );\r
2910 \r
2911         AL_EXIT( AL_DBG_SMI_CB );\r
2912 }\r
2913 \r
2914 \r
2915 \r
2916 /*\r
2917  * Special QP alias asynchronous event callback.\r
2918  */\r
2919 void\r
2920 spl_qp_alias_event_cb(\r
2921         IN                              ib_async_event_rec_t            *p_event_rec )\r
2922 {\r
2923         UNUSED_PARAM( p_event_rec );\r
2924 }\r
2925 \r
2926 \r
2927 \r
2928 /*\r
2929  * Acquire the SMI dispatcher for the given port.\r
2930  */\r
2931 ib_api_status_t\r
2932 acquire_smi_disp(\r
2933         IN              const   ib_net64_t                                      port_guid,\r
2934                 OUT                     al_mad_disp_handle_t* const     ph_mad_disp )\r
2935 {\r
2936         CL_ASSERT( gp_spl_qp_mgr );\r
2937         return acquire_svc_disp( &gp_spl_qp_mgr->smi_map, port_guid, ph_mad_disp );\r
2938 }\r
2939 \r
2940 \r
2941 \r
2942 /*\r
2943  * Acquire the GSI dispatcher for the given port.\r
2944  */\r
2945 ib_api_status_t\r
2946 acquire_gsi_disp(\r
2947         IN              const   ib_net64_t                                      port_guid,\r
2948                 OUT                     al_mad_disp_handle_t* const     ph_mad_disp )\r
2949 {\r
2950         CL_ASSERT( gp_spl_qp_mgr );\r
2951         return acquire_svc_disp( &gp_spl_qp_mgr->gsi_map, port_guid, ph_mad_disp );\r
2952 }\r
2953 \r
2954 \r
2955 \r
2956 /*\r
2957  * Acquire the service dispatcher for the given port.\r
2958  */\r
2959 ib_api_status_t\r
2960 acquire_svc_disp(\r
2961         IN              const   cl_qmap_t* const                        p_svc_map,\r
2962         IN              const   ib_net64_t                                      port_guid,\r
2963                 OUT                     al_mad_disp_handle_t            *ph_mad_disp )\r
2964 {\r
2965         cl_map_item_t*                  p_svc_item;\r
2966         spl_qp_svc_t*                   p_spl_qp_svc;\r
2967 \r
2968         AL_ENTER( AL_DBG_SMI );\r
2969 \r
2970         CL_ASSERT( p_svc_map );\r
2971         CL_ASSERT( gp_spl_qp_mgr );\r
2972 \r
2973         /* Search for the SMI or GSI service for the given port. */\r
2974         cl_spinlock_acquire( &gp_spl_qp_mgr->obj.lock );\r
2975         p_svc_item = cl_qmap_get( p_svc_map, port_guid );\r
2976         cl_spinlock_release( &gp_spl_qp_mgr->obj.lock );\r
2977         if( p_svc_item == cl_qmap_end( p_svc_map ) )\r
2978         {\r
2979                 /* The port does not have an active agent. */\r
2980                 AL_EXIT( AL_DBG_SMI );\r
2981                 return IB_INVALID_GUID;\r
2982         }\r
2983 \r
2984         p_spl_qp_svc = PARENT_STRUCT( p_svc_item, spl_qp_svc_t, map_item );\r
2985 \r
2986         /* Found a match.  Get MAD dispatcher handle. */\r
2987         *ph_mad_disp = p_spl_qp_svc->h_mad_disp;\r
2988 \r
2989         /* Reference the MAD dispatcher on behalf of the client. */\r
2990         ref_al_obj( &p_spl_qp_svc->h_mad_disp->obj );\r
2991 \r
2992         AL_EXIT( AL_DBG_SMI );\r
2993         return IB_SUCCESS;\r
2994 }\r
2995 \r
2996 \r
2997 \r
2998 /*\r
2999  * Force a poll for CA attribute changes.\r
3000  */\r
3001 void\r
3002 force_smi_poll(\r
3003         void )\r
3004 {\r
3005         AL_ENTER( AL_DBG_SMI_CB );\r
3006 \r
3007         /*\r
3008          * Stop the poll timer.  Just invoke the timer callback directly to\r
3009          * save the thread context switching.\r
3010          */\r
3011         smi_poll_timer_cb( gp_spl_qp_mgr );\r
3012 \r
3013         AL_EXIT( AL_DBG_SMI_CB );\r
3014 }\r
3015 \r
3016 \r
3017 \r
3018 /*\r
3019  * Poll for CA port attribute changes.\r
3020  */\r
3021 void\r
3022 smi_poll_timer_cb(\r
3023         IN                              void*                                           context )\r
3024 {\r
3025         cl_status_t                     cl_status;\r
3026 \r
3027         AL_ENTER( AL_DBG_SMI_CB );\r
3028 \r
3029         CL_ASSERT( context );\r
3030         CL_ASSERT( gp_spl_qp_mgr == context );\r
3031         UNUSED_PARAM( context );\r
3032 \r
3033         /*\r
3034          * Scan for changes on the local HCAs.  Since the PnP manager has its\r
3035          * own thread for processing changes, we kick off that thread in parallel\r
3036          * reposting receive buffers to the SQP agents.\r
3037          */\r
3038         pnp_poll();\r
3039 \r
3040         /*\r
3041          * To handle the case where force_smi_poll is called at the same time\r
3042          * the timer expires, check if the asynchronous processing item is in\r
3043          * use.  If it is already in use, it means that we're about to poll\r
3044          * anyway, so just ignore this call.\r
3045          */\r
3046         cl_spinlock_acquire( &gp_spl_qp_mgr->obj.lock );\r
3047 \r
3048         /* Perform port processing on the special QP agents. */\r
3049         cl_qlist_apply_func( &gp_spl_qp_mgr->obj.obj_list, smi_post_recvs,\r
3050                 gp_spl_qp_mgr );\r
3051 \r
3052         /* Determine if there are any special QP agents to poll. */\r
3053         if( !cl_is_qlist_empty( &gp_spl_qp_mgr->obj.obj_list ) && g_smi_poll_interval )\r
3054         {\r
3055                 /* Restart the polling timer. */\r
3056                 cl_status =\r
3057                         cl_timer_start( &gp_spl_qp_mgr->poll_timer, g_smi_poll_interval );\r
3058                 CL_ASSERT( cl_status == CL_SUCCESS );\r
3059         }\r
3060         cl_spinlock_release( &gp_spl_qp_mgr->obj.lock );\r
3061 \r
3062         AL_EXIT( AL_DBG_SMI_CB );\r
3063 }\r
3064 \r
3065 \r
3066 \r
3067 /*\r
3068  * Post receive buffers to a special QP.\r
3069  */\r
3070 void\r
3071 smi_post_recvs(\r
3072         IN                              cl_list_item_t* const           p_list_item,\r
3073         IN                              void*                                           context )\r
3074 {\r
3075         al_obj_t*                               p_obj;\r
3076         spl_qp_svc_t*                   p_spl_qp_svc;\r
3077 \r
3078         AL_ENTER( AL_DBG_SMI_CB );\r
3079 \r
3080         CL_ASSERT( p_list_item );\r
3081         UNUSED_PARAM( context );\r
3082 \r
3083         p_obj = PARENT_STRUCT( p_list_item, al_obj_t, pool_item );\r
3084         p_spl_qp_svc = PARENT_STRUCT( p_obj, spl_qp_svc_t, obj );\r
3085 \r
3086         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
3087         if( p_spl_qp_svc->state != SPL_QP_ACTIVE )\r
3088         {\r
3089                 cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
3090                 return;\r
3091         }\r
3092 \r
3093         spl_qp_svc_post_recvs( p_spl_qp_svc );\r
3094         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
3095 \r
3096         AL_EXIT( AL_DBG_SMI );\r
3097 }\r