[IBAL] Fix locking around special QP service send and receive queues.
[mirror/winof/.git] / core / al / kernel / al_smi.c
1 /*\r
2  * Copyright (c) 2005 SilverStorm Technologies.  All rights reserved.\r
3  * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. \r
4  *\r
5  * This software is available to you under the OpenIB.org BSD license\r
6  * below:\r
7  *\r
8  *     Redistribution and use in source and binary forms, with or\r
9  *     without modification, are permitted provided that the following\r
10  *     conditions are met:\r
11  *\r
12  *      - Redistributions of source code must retain the above\r
13  *        copyright notice, this list of conditions and the following\r
14  *        disclaimer.\r
15  *\r
16  *      - Redistributions in binary form must reproduce the above\r
17  *        copyright notice, this list of conditions and the following\r
18  *        disclaimer in the documentation and/or other materials\r
19  *        provided with the distribution.\r
20  *\r
21  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
22  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
23  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
24  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
25  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
26  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
27  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
28  * SOFTWARE.\r
29  *\r
30  * $Id$\r
31  */\r
32 \r
33 \r
34 #include <iba/ib_al.h>\r
35 #include <complib/cl_timer.h>\r
36 \r
37 #include "ib_common.h"\r
38 #include "al_common.h"\r
39 #include "al_debug.h"\r
40 #if defined(EVENT_TRACING)\r
41 #ifdef offsetof\r
42 #undef offsetof\r
43 #endif\r
44 #include "al_smi.tmh"\r
45 #endif\r
46 #include "al_verbs.h"\r
47 #include "al_mgr.h"\r
48 #include "al_pnp.h"\r
49 #include "al_qp.h"\r
50 #include "al_smi.h"\r
51 #include "al_av.h"\r
52 \r
53 \r
54 extern char                                             node_desc[IB_NODE_DESCRIPTION_SIZE];\r
55 \r
56 #define SMI_POLL_INTERVAL                       20000           /* Milliseconds */\r
57 #define LOCAL_MAD_TIMEOUT                       50                      /* Milliseconds */\r
58 #define DEFAULT_QP0_DEPTH                       256\r
59 #define DEFAULT_QP1_DEPTH                       1024\r
60 \r
61 uint32_t                                g_smi_poll_interval =   SMI_POLL_INTERVAL;\r
62 spl_qp_mgr_t*                   gp_spl_qp_mgr = NULL;\r
63 \r
64 \r
65 /*\r
66  * Function prototypes.\r
67  */\r
68 void\r
69 destroying_spl_qp_mgr(\r
70         IN                              al_obj_t*                                       p_obj );\r
71 \r
72 void\r
73 free_spl_qp_mgr(\r
74         IN                              al_obj_t*                                       p_obj );\r
75 \r
76 ib_api_status_t\r
77 spl_qp0_agent_pnp_cb(\r
78         IN                              ib_pnp_rec_t*                           p_pnp_rec );\r
79 \r
80 ib_api_status_t\r
81 spl_qp1_agent_pnp_cb(\r
82         IN                              ib_pnp_rec_t*                           p_pnp_rec );\r
83 \r
84 ib_api_status_t\r
85 spl_qp_agent_pnp(\r
86         IN                              ib_pnp_rec_t*                           p_pnp_rec,\r
87         IN                              ib_qp_type_t                            qp_type );\r
88 \r
89 ib_api_status_t\r
90 create_spl_qp_svc(\r
91         IN                              ib_pnp_port_rec_t*                      p_pnp_rec,\r
92         IN              const   ib_qp_type_t                            qp_type );\r
93 \r
94 void\r
95 destroying_spl_qp_svc(\r
96         IN                              al_obj_t*                                       p_obj );\r
97 \r
98 void\r
99 free_spl_qp_svc(\r
100         IN                              al_obj_t*                                       p_obj );\r
101 \r
102 void\r
103 spl_qp_svc_lid_change(\r
104         IN                              al_obj_t*                                       p_obj,\r
105         IN                              ib_pnp_port_rec_t*                      p_pnp_rec );\r
106 \r
107 ib_api_status_t\r
108 remote_mad_send(\r
109         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
110         IN                              al_mad_wr_t* const                      p_mad_wr );\r
111 \r
112 static ib_api_status_t\r
113 local_mad_send(\r
114         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
115         IN                              al_mad_wr_t* const                      p_mad_wr );\r
116 \r
117 static ib_api_status_t\r
118 loopback_mad(\r
119         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
120         IN                              al_mad_wr_t* const                      p_mad_wr );\r
121 \r
122 static ib_api_status_t\r
123 process_subn_mad(\r
124         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
125         IN                              al_mad_wr_t* const                      p_mad_wr );\r
126 \r
127 static ib_api_status_t\r
128 fwd_local_mad(\r
129         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
130         IN                              al_mad_wr_t* const                      p_mad_wr );\r
131 \r
132 void\r
133 send_local_mad_cb(\r
134         IN                              cl_async_proc_item_t*           p_item );\r
135 \r
136 void\r
137 spl_qp_send_comp_cb(\r
138         IN              const   ib_cq_handle_t                          h_cq,\r
139         IN                              void                                            *cq_context );\r
140 \r
141 void\r
142 spl_qp_recv_comp_cb(\r
143         IN              const   ib_cq_handle_t                          h_cq,\r
144         IN                              void                                            *cq_context );\r
145 \r
146 void\r
147 spl_qp_comp(\r
148         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
149         IN              const   ib_cq_handle_t                          h_cq,\r
150         IN                              ib_wc_type_t                            wc_type );\r
151 \r
152 ib_api_status_t\r
153 process_mad_recv(\r
154         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
155         IN                              ib_mad_element_t*                       p_mad_element );\r
156 \r
157 mad_route_t\r
158 route_recv_smp(\r
159         IN                              ib_mad_element_t*                       p_mad_element );\r
160 \r
161 mad_route_t\r
162 route_recv_smp_attr(\r
163         IN                              ib_mad_element_t*                       p_mad_element );\r
164 \r
165 mad_route_t\r
166 route_recv_dm_mad(\r
167         IN                              ib_mad_element_t*                       p_mad_element );\r
168 \r
169 mad_route_t\r
170 route_recv_gmp(\r
171         IN                              ib_mad_element_t*                       p_mad_element );\r
172 \r
173 mad_route_t\r
174 route_recv_gmp_attr(\r
175         IN                              ib_mad_element_t*                       p_mad_element );\r
176 \r
177 ib_api_status_t\r
178 forward_sm_trap(\r
179         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
180         IN                              ib_mad_element_t*                       p_mad_element );\r
181 \r
182 ib_api_status_t\r
183 recv_local_mad(\r
184         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
185         IN                              ib_mad_element_t*                       p_mad_request );\r
186 \r
187 void\r
188 spl_qp_alias_send_cb(\r
189         IN                              ib_mad_svc_handle_t                     h_mad_svc,\r
190         IN                              void                                            *mad_svc_context,\r
191         IN                              ib_mad_element_t                        *p_mad_element );\r
192 \r
193 void\r
194 spl_qp_alias_recv_cb(\r
195         IN                              ib_mad_svc_handle_t                     h_mad_svc,\r
196         IN                              void                                            *mad_svc_context,\r
197         IN                              ib_mad_element_t                        *p_mad_response );\r
198 \r
199 static ib_api_status_t\r
200 spl_qp_svc_post_recvs(\r
201         IN                              spl_qp_svc_t*   const           p_spl_qp_svc );\r
202 \r
203 void\r
204 spl_qp_svc_event_cb(\r
205         IN                              ib_async_event_rec_t            *p_event_rec );\r
206 \r
207 void\r
208 spl_qp_alias_event_cb(\r
209         IN                              ib_async_event_rec_t            *p_event_rec );\r
210 \r
211 void\r
212 spl_qp_svc_reset(\r
213         IN                              spl_qp_svc_t*                           p_spl_qp_svc );\r
214 \r
215 void\r
216 spl_qp_svc_reset_cb(\r
217         IN                              cl_async_proc_item_t*           p_item );\r
218 \r
219 ib_api_status_t\r
220 acquire_svc_disp(\r
221         IN              const   cl_qmap_t* const                        p_svc_map,\r
222         IN              const   ib_net64_t                                      port_guid,\r
223                 OUT                     al_mad_disp_handle_t            *ph_mad_disp );\r
224 \r
225 void\r
226 smi_poll_timer_cb(\r
227         IN                              void*                                           context );\r
228 \r
229 void\r
230 smi_post_recvs(\r
231         IN                              cl_list_item_t* const           p_list_item,\r
232         IN                              void*                                           context );\r
233 \r
234 #if defined( CL_USE_MUTEX )\r
235 void\r
236 spl_qp_send_async_cb(\r
237         IN                              cl_async_proc_item_t*           p_item );\r
238 \r
239 void\r
240 spl_qp_recv_async_cb(\r
241         IN                              cl_async_proc_item_t*           p_item );\r
242 #endif\r
243 \r
244 /*\r
245  * Create the special QP manager.\r
246  */\r
247 ib_api_status_t\r
248 create_spl_qp_mgr(\r
249         IN                              al_obj_t*       const                   p_parent_obj )\r
250 {\r
251         ib_pnp_req_t                    pnp_req;\r
252         ib_api_status_t                 status;\r
253         cl_status_t                             cl_status;\r
254 \r
255         AL_ENTER( AL_DBG_SMI );\r
256 \r
257         CL_ASSERT( p_parent_obj );\r
258         CL_ASSERT( !gp_spl_qp_mgr );\r
259 \r
260         gp_spl_qp_mgr = cl_zalloc( sizeof( spl_qp_mgr_t ) );\r
261         if( !gp_spl_qp_mgr )\r
262         {\r
263                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
264                         ("IB_INSUFFICIENT_MEMORY\n") );\r
265                 return IB_INSUFFICIENT_MEMORY;\r
266         }\r
267 \r
268         /* Construct the special QP manager. */\r
269         construct_al_obj( &gp_spl_qp_mgr->obj, AL_OBJ_TYPE_SMI );\r
270         cl_timer_construct( &gp_spl_qp_mgr->poll_timer );\r
271 \r
272         /* Initialize the lists. */\r
273         cl_qmap_init( &gp_spl_qp_mgr->smi_map );\r
274         cl_qmap_init( &gp_spl_qp_mgr->gsi_map );\r
275 \r
276         /* Initialize the global SMI/GSI manager object. */\r
277         status = init_al_obj( &gp_spl_qp_mgr->obj, gp_spl_qp_mgr, TRUE,\r
278                 destroying_spl_qp_mgr, NULL, free_spl_qp_mgr );\r
279         if( status != IB_SUCCESS )\r
280         {\r
281                 free_spl_qp_mgr( &gp_spl_qp_mgr->obj );\r
282                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
283                         ("init_al_obj failed, %s\n", ib_get_err_str( status ) ) );\r
284                 return status;\r
285         }\r
286 \r
287         /* Attach the special QP manager to the parent object. */\r
288         status = attach_al_obj( p_parent_obj, &gp_spl_qp_mgr->obj );\r
289         if( status != IB_SUCCESS )\r
290         {\r
291                 gp_spl_qp_mgr->obj.pfn_destroy( &gp_spl_qp_mgr->obj, NULL );\r
292                 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
293                         ("attach_al_obj returned %s.\n", ib_get_err_str(status)) );\r
294                 return status;\r
295         }\r
296 \r
297         /* Initialize the SMI polling timer. */\r
298         cl_status = cl_timer_init( &gp_spl_qp_mgr->poll_timer, smi_poll_timer_cb,\r
299                 gp_spl_qp_mgr );\r
300         if( cl_status != CL_SUCCESS )\r
301         {\r
302                 gp_spl_qp_mgr->obj.pfn_destroy( &gp_spl_qp_mgr->obj, NULL );\r
303                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
304                         ("cl_timer_init failed, status 0x%x\n", cl_status ) );\r
305                 return ib_convert_cl_status( cl_status );\r
306         }\r
307 \r
308         /*\r
309          * Note: PnP registrations for port events must be done\r
310          * when the special QP manager is created.  This ensures that\r
311          * the registrations are listed sequentially and the reporting\r
312          * of PnP events occurs in the proper order.\r
313          */\r
314 \r
315         /*\r
316          * Separate context is needed for each special QP.  Therefore, a\r
317          * separate PnP event registration is performed for QP0 and QP1.\r
318          */\r
319 \r
320         /* Register for port PnP events for QP0. */\r
321         cl_memclr( &pnp_req, sizeof( ib_pnp_req_t ) );\r
322         pnp_req.pnp_class       = IB_PNP_PORT;\r
323         pnp_req.pnp_context = &gp_spl_qp_mgr->obj;\r
324         pnp_req.pfn_pnp_cb      = spl_qp0_agent_pnp_cb;\r
325 \r
326         status = ib_reg_pnp( gh_al, &pnp_req, &gp_spl_qp_mgr->h_qp0_pnp );\r
327 \r
328         if( status != IB_SUCCESS )\r
329         {\r
330                 gp_spl_qp_mgr->obj.pfn_destroy( &gp_spl_qp_mgr->obj, NULL );\r
331                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
332                         ("ib_reg_pnp QP0 failed, %s\n", ib_get_err_str( status ) ) );\r
333                 return status;\r
334         }\r
335 \r
336         /* Reference the special QP manager on behalf of the ib_reg_pnp call. */\r
337         ref_al_obj( &gp_spl_qp_mgr->obj );\r
338 \r
339         /* Register for port PnP events for QP1. */\r
340         cl_memclr( &pnp_req, sizeof( ib_pnp_req_t ) );\r
341         pnp_req.pnp_class       = IB_PNP_PORT;\r
342         pnp_req.pnp_context = &gp_spl_qp_mgr->obj;\r
343         pnp_req.pfn_pnp_cb      = spl_qp1_agent_pnp_cb;\r
344 \r
345         status = ib_reg_pnp( gh_al, &pnp_req, &gp_spl_qp_mgr->h_qp1_pnp );\r
346 \r
347         if( status != IB_SUCCESS )\r
348         {\r
349                 gp_spl_qp_mgr->obj.pfn_destroy( &gp_spl_qp_mgr->obj, NULL );\r
350                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
351                         ("ib_reg_pnp QP1 failed, %s\n", ib_get_err_str( status ) ) );\r
352                 return status;\r
353         }\r
354 \r
355         /*\r
356          * Note that we don't release the referende taken in init_al_obj\r
357          * because we need one on behalf of the ib_reg_pnp call.\r
358          */\r
359 \r
360         AL_EXIT( AL_DBG_SMI );\r
361         return IB_SUCCESS;\r
362 }\r
363 \r
364 \r
365 \r
366 /*\r
367  * Pre-destroy the special QP manager.\r
368  */\r
369 void\r
370 destroying_spl_qp_mgr(\r
371         IN                              al_obj_t*                                       p_obj )\r
372 {\r
373         ib_api_status_t                 status;\r
374 \r
375         CL_ASSERT( p_obj );\r
376         CL_ASSERT( gp_spl_qp_mgr == PARENT_STRUCT( p_obj, spl_qp_mgr_t, obj ) );\r
377         UNUSED_PARAM( p_obj );\r
378 \r
379         /* Deregister for port PnP events for QP0. */\r
380         if( gp_spl_qp_mgr->h_qp0_pnp )\r
381         {\r
382                 status = ib_dereg_pnp( gp_spl_qp_mgr->h_qp0_pnp,\r
383                         (ib_pfn_destroy_cb_t)deref_al_obj );\r
384                 CL_ASSERT( status == IB_SUCCESS );\r
385         }\r
386 \r
387         /* Deregister for port PnP events for QP1. */\r
388         if( gp_spl_qp_mgr->h_qp1_pnp )\r
389         {\r
390                 status = ib_dereg_pnp( gp_spl_qp_mgr->h_qp1_pnp,\r
391                         (ib_pfn_destroy_cb_t)deref_al_obj );\r
392                 CL_ASSERT( status == IB_SUCCESS );\r
393         }\r
394 \r
395         /* Destroy the SMI polling timer. */\r
396         cl_timer_destroy( &gp_spl_qp_mgr->poll_timer );\r
397 }\r
398 \r
399 \r
400 \r
401 /*\r
402  * Free the special QP manager.\r
403  */\r
404 void\r
405 free_spl_qp_mgr(\r
406         IN                              al_obj_t*                                       p_obj )\r
407 {\r
408         CL_ASSERT( p_obj );\r
409         CL_ASSERT( gp_spl_qp_mgr == PARENT_STRUCT( p_obj, spl_qp_mgr_t, obj ) );\r
410         UNUSED_PARAM( p_obj );\r
411 \r
412         destroy_al_obj( &gp_spl_qp_mgr->obj );\r
413         cl_free( gp_spl_qp_mgr );\r
414         gp_spl_qp_mgr = NULL;\r
415 }\r
416 \r
417 \r
418 \r
419 /*\r
420  * Special QP0 agent PnP event callback.\r
421  */\r
422 ib_api_status_t\r
423 spl_qp0_agent_pnp_cb(\r
424         IN                              ib_pnp_rec_t*                           p_pnp_rec )\r
425 {\r
426         ib_api_status_t status;\r
427         AL_ENTER( AL_DBG_SMI_CB );\r
428 \r
429         status = spl_qp_agent_pnp( p_pnp_rec, IB_QPT_QP0 );\r
430 \r
431         AL_EXIT( AL_DBG_SMI_CB );\r
432         return status;\r
433 }\r
434 \r
435 \r
436 \r
437 /*\r
438  * Special QP1 agent PnP event callback.\r
439  */\r
440 ib_api_status_t\r
441 spl_qp1_agent_pnp_cb(\r
442         IN                              ib_pnp_rec_t*                           p_pnp_rec )\r
443 {\r
444         ib_api_status_t status;\r
445         AL_ENTER( AL_DBG_SMI_CB );\r
446 \r
447         status = spl_qp_agent_pnp( p_pnp_rec, IB_QPT_QP1 );\r
448 \r
449         AL_EXIT( AL_DBG_SMI );\r
450         return status;\r
451 }\r
452 \r
453 \r
454 \r
455 /*\r
456  * Special QP agent PnP event callback.\r
457  */\r
458 ib_api_status_t\r
459 spl_qp_agent_pnp(\r
460         IN                              ib_pnp_rec_t*                           p_pnp_rec,\r
461         IN                              ib_qp_type_t                            qp_type )\r
462 {\r
463         ib_api_status_t                 status;\r
464         al_obj_t*                               p_obj;\r
465 \r
466         AL_ENTER( AL_DBG_SMI_CB );\r
467 \r
468         CL_ASSERT( p_pnp_rec );\r
469         p_obj = p_pnp_rec->context;\r
470 \r
471         /* Dispatch based on the PnP event type. */\r
472         switch( p_pnp_rec->pnp_event )\r
473         {\r
474         case IB_PNP_PORT_ADD:\r
475                 CL_ASSERT( !p_obj );\r
476                 status = create_spl_qp_svc( (ib_pnp_port_rec_t*)p_pnp_rec, qp_type );\r
477                 break;\r
478 \r
479         case IB_PNP_PORT_REMOVE:\r
480                 CL_ASSERT( p_obj );\r
481                 ref_al_obj( p_obj );\r
482                 p_obj->pfn_destroy( p_obj, NULL );\r
483                 status = IB_SUCCESS;\r
484                 break;\r
485 \r
486         case IB_PNP_LID_CHANGE:\r
487                 CL_ASSERT( p_obj );\r
488                 spl_qp_svc_lid_change( p_obj, (ib_pnp_port_rec_t*)p_pnp_rec );\r
489                 status = IB_SUCCESS;\r
490                 break;\r
491 \r
492         default:\r
493                 /* All other events are ignored. */\r
494                 status = IB_SUCCESS;\r
495                 break;\r
496         }\r
497 \r
498         AL_EXIT( AL_DBG_SMI );\r
499         return status;\r
500 }\r
501 \r
502 \r
503 \r
504 /*\r
505  * Create a special QP service.\r
506  */\r
507 ib_api_status_t\r
508 create_spl_qp_svc(\r
509         IN                              ib_pnp_port_rec_t*                      p_pnp_rec,\r
510         IN              const   ib_qp_type_t                            qp_type )\r
511 {\r
512         cl_status_t                             cl_status;\r
513         spl_qp_svc_t*                   p_spl_qp_svc;\r
514         ib_ca_handle_t                  h_ca;\r
515         ib_cq_create_t                  cq_create;\r
516         ib_qp_create_t                  qp_create;\r
517         ib_qp_attr_t                    qp_attr;\r
518         ib_mad_svc_t                    mad_svc;\r
519         ib_api_status_t                 status;\r
520 \r
521         AL_ENTER( AL_DBG_SMI );\r
522 \r
523         CL_ASSERT( p_pnp_rec );\r
524 \r
525         if( ( qp_type != IB_QPT_QP0 ) && ( qp_type != IB_QPT_QP1 ) )\r
526         {\r
527                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") );\r
528                 return IB_INVALID_PARAMETER;\r
529         }\r
530 \r
531         CL_ASSERT( p_pnp_rec->pnp_rec.pnp_context );\r
532         CL_ASSERT( p_pnp_rec->p_ca_attr );\r
533         CL_ASSERT( p_pnp_rec->p_port_attr );\r
534 \r
535         p_spl_qp_svc = cl_zalloc( sizeof( spl_qp_svc_t ) );\r
536         if( !p_spl_qp_svc )\r
537         {\r
538                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
539                         ("IB_INSUFFICIENT_MEMORY\n") );\r
540                 return IB_INSUFFICIENT_MEMORY;\r
541         }\r
542 \r
543         /* Tie the special QP service to the port by setting the port number. */\r
544         p_spl_qp_svc->port_num = p_pnp_rec->p_port_attr->port_num;\r
545         /* Store the port GUID to allow faster lookups of the dispatchers. */\r
546         p_spl_qp_svc->port_guid = p_pnp_rec->p_port_attr->port_guid;\r
547 \r
548         /* Initialize the send and receive queues. */\r
549         cl_qlist_init( &p_spl_qp_svc->send_queue );\r
550         cl_qlist_init( &p_spl_qp_svc->recv_queue );\r
551 \r
552 #if defined( CL_USE_MUTEX )\r
553         /* Initialize async callbacks and flags for send/receive processing. */\r
554         p_spl_qp_svc->send_async_queued = FALSE;\r
555         p_spl_qp_svc->send_async_cb.pfn_callback = spl_qp_send_async_cb;\r
556         p_spl_qp_svc->recv_async_queued = FALSE;\r
557         p_spl_qp_svc->recv_async_cb.pfn_callback = spl_qp_recv_async_cb;\r
558 #endif\r
559 \r
560         /* Initialize the async callback function to process local sends. */\r
561         p_spl_qp_svc->send_async.pfn_callback = send_local_mad_cb;\r
562 \r
563         /* Initialize the async callback function to reset the QP on error. */\r
564         p_spl_qp_svc->reset_async.pfn_callback = spl_qp_svc_reset_cb;\r
565 \r
566         /* Construct the special QP service object. */\r
567         construct_al_obj( &p_spl_qp_svc->obj, AL_OBJ_TYPE_SMI );\r
568 \r
569         /* Initialize the special QP service object. */\r
570         status = init_al_obj( &p_spl_qp_svc->obj, p_spl_qp_svc, TRUE,\r
571                 destroying_spl_qp_svc, NULL, free_spl_qp_svc );\r
572         if( status != IB_SUCCESS )\r
573         {\r
574                 free_spl_qp_svc( &p_spl_qp_svc->obj );\r
575                 return status;\r
576         }\r
577 \r
578         /* Attach the special QP service to the parent object. */\r
579         status = attach_al_obj(\r
580                 (al_obj_t* __ptr64)p_pnp_rec->pnp_rec.pnp_context, &p_spl_qp_svc->obj );\r
581         if( status != IB_SUCCESS )\r
582         {\r
583                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
584                 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
585                         ("attach_al_obj returned %s.\n", ib_get_err_str(status)) );\r
586                 return status;\r
587         }\r
588 \r
589         h_ca = acquire_ca( p_pnp_rec->p_ca_attr->ca_guid );\r
590         CL_ASSERT( h_ca );\r
591         if( !h_ca )\r
592         {\r
593                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
594                 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("acquire_ca failed.\n") );\r
595                 return IB_INVALID_GUID;\r
596         }\r
597 \r
598         p_spl_qp_svc->obj.p_ci_ca = h_ca->obj.p_ci_ca;\r
599 \r
600         /* Determine the maximum queue depth of the QP and CQs. */\r
601         p_spl_qp_svc->max_qp_depth =\r
602                 ( p_pnp_rec->p_ca_attr->max_wrs <\r
603                 p_pnp_rec->p_ca_attr->max_cqes ) ?\r
604                 p_pnp_rec->p_ca_attr->max_wrs :\r
605                 p_pnp_rec->p_ca_attr->max_cqes;\r
606 \r
607         /* Compare this maximum to the default special queue depth. */\r
608         if( ( qp_type == IB_QPT_QP0 ) &&\r
609                 ( p_spl_qp_svc->max_qp_depth > DEFAULT_QP0_DEPTH ) )\r
610                   p_spl_qp_svc->max_qp_depth = DEFAULT_QP0_DEPTH;\r
611         if( ( qp_type == IB_QPT_QP1 ) &&\r
612                 ( p_spl_qp_svc->max_qp_depth > DEFAULT_QP1_DEPTH ) )\r
613                   p_spl_qp_svc->max_qp_depth = DEFAULT_QP1_DEPTH;\r
614 \r
615         /* Create the send CQ. */\r
616         cl_memclr( &cq_create, sizeof( ib_cq_create_t ) );\r
617         cq_create.size = p_spl_qp_svc->max_qp_depth;\r
618         cq_create.pfn_comp_cb = spl_qp_send_comp_cb;\r
619 \r
620         status = ib_create_cq( p_spl_qp_svc->obj.p_ci_ca->h_ca, &cq_create,\r
621                 p_spl_qp_svc, spl_qp_svc_event_cb, &p_spl_qp_svc->h_send_cq );\r
622 \r
623         if( status != IB_SUCCESS )\r
624         {\r
625                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
626                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
627                         ("ib_create_cq send CQ failed, %s\n", ib_get_err_str( status ) ) );\r
628                 return status;\r
629         }\r
630 \r
631         /* Reference the special QP service on behalf of ib_create_cq. */\r
632         ref_al_obj( &p_spl_qp_svc->obj );\r
633 \r
634         /* Check the result of the creation request. */\r
635         if( cq_create.size < p_spl_qp_svc->max_qp_depth )\r
636         {\r
637                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
638                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
639                         ("ib_create_cq allocated insufficient send CQ size\n") );\r
640                 return IB_INSUFFICIENT_RESOURCES;\r
641         }\r
642 \r
643         /* Create the receive CQ. */\r
644         cl_memclr( &cq_create, sizeof( ib_cq_create_t ) );\r
645         cq_create.size = p_spl_qp_svc->max_qp_depth;\r
646         cq_create.pfn_comp_cb = spl_qp_recv_comp_cb;\r
647 \r
648         status = ib_create_cq( p_spl_qp_svc->obj.p_ci_ca->h_ca, &cq_create,\r
649                 p_spl_qp_svc, spl_qp_svc_event_cb, &p_spl_qp_svc->h_recv_cq );\r
650 \r
651         if( status != IB_SUCCESS )\r
652         {\r
653                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
654                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
655                         ("ib_create_cq recv CQ failed, %s\n", ib_get_err_str( status ) ) );\r
656                 return status;\r
657         }\r
658 \r
659         /* Reference the special QP service on behalf of ib_create_cq. */\r
660         ref_al_obj( &p_spl_qp_svc->obj );\r
661 \r
662         /* Check the result of the creation request. */\r
663         if( cq_create.size < p_spl_qp_svc->max_qp_depth )\r
664         {\r
665                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
666                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
667                         ("ib_create_cq allocated insufficient recv CQ size\n") );\r
668                 return IB_INSUFFICIENT_RESOURCES;\r
669         }\r
670 \r
671         /* Create the special QP. */\r
672         cl_memclr( &qp_create, sizeof( ib_qp_create_t ) );\r
673         qp_create.qp_type = qp_type;\r
674         qp_create.sq_depth = p_spl_qp_svc->max_qp_depth;\r
675         qp_create.rq_depth = p_spl_qp_svc->max_qp_depth;\r
676         qp_create.sq_sge = 3;   /* Three entries are required for segmentation. */\r
677         qp_create.rq_sge = 1;\r
678         qp_create.h_sq_cq = p_spl_qp_svc->h_send_cq;\r
679         qp_create.h_rq_cq = p_spl_qp_svc->h_recv_cq;\r
680         qp_create.sq_signaled = TRUE;\r
681 \r
682         status = ib_get_spl_qp( p_spl_qp_svc->obj.p_ci_ca->h_pd,\r
683                 p_pnp_rec->p_port_attr->port_guid, &qp_create,\r
684                 p_spl_qp_svc, spl_qp_svc_event_cb, NULL, &p_spl_qp_svc->h_qp );\r
685 \r
686         if( status != IB_SUCCESS )\r
687         {\r
688                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
689                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
690                         ("ib_get_spl_qp failed, %s\n", ib_get_err_str( status ) ) );\r
691                 return status;\r
692         }\r
693 \r
694         /* Reference the special QP service on behalf of ib_get_spl_qp. */\r
695         ref_al_obj( &p_spl_qp_svc->obj );\r
696 \r
697         /* Check the result of the creation request. */\r
698         status = ib_query_qp( p_spl_qp_svc->h_qp, &qp_attr );\r
699         if( status != IB_SUCCESS )\r
700         {\r
701                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
702                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
703                         ("ib_query_qp failed, %s\n", ib_get_err_str( status ) ) );\r
704                 return status;\r
705         }\r
706 \r
707         if( ( qp_attr.rq_depth < p_spl_qp_svc->max_qp_depth ) ||\r
708                 ( qp_attr.sq_depth < p_spl_qp_svc->max_qp_depth ) ||\r
709                 ( qp_attr.sq_sge < 3 ) || ( qp_attr.rq_sge < 1 ) )\r
710         {\r
711                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
712                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
713                         ("ib_get_spl_qp allocated attributes are insufficient\n") );\r
714                 return IB_INSUFFICIENT_RESOURCES;\r
715         }\r
716 \r
717         /* Initialize the QP for use. */\r
718         status = ib_init_dgrm_svc( p_spl_qp_svc->h_qp, NULL );\r
719         if( status != IB_SUCCESS )\r
720         {\r
721                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
722                 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
723                         ("ib_init_dgrm_svc failed, %s\n", ib_get_err_str( status ) ) );\r
724                 return status;\r
725         }\r
726 \r
727         /* Post receive buffers. */\r
728         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
729         status = spl_qp_svc_post_recvs( p_spl_qp_svc );\r
730         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
731         if( status != IB_SUCCESS )\r
732         {\r
733                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
734                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
735                         ("spl_qp_svc_post_recvs failed, %s\n",\r
736                         ib_get_err_str( status ) ) );\r
737                 return status;\r
738         }\r
739 \r
740         /* Create the MAD dispatcher. */\r
741         status = create_mad_disp( &p_spl_qp_svc->obj, p_spl_qp_svc->h_qp,\r
742                 &p_spl_qp_svc->h_mad_disp );\r
743         if( status != IB_SUCCESS )\r
744         {\r
745                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
746                 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
747                         ("create_mad_disp failed, %s\n", ib_get_err_str( status ) ) );\r
748                 return status;\r
749         }\r
750 \r
751         /*\r
752          * Add this service to the special QP manager lookup lists.\r
753          * The service must be added to allow the creation of a QP alias.\r
754          */\r
755         cl_spinlock_acquire( &gp_spl_qp_mgr->obj.lock );\r
756         if( qp_type == IB_QPT_QP0 )\r
757         {\r
758                 cl_qmap_insert( &gp_spl_qp_mgr->smi_map, p_spl_qp_svc->port_guid,\r
759                         &p_spl_qp_svc->map_item );\r
760         }\r
761         else\r
762         {\r
763                 cl_qmap_insert( &gp_spl_qp_mgr->gsi_map, p_spl_qp_svc->port_guid,\r
764                         &p_spl_qp_svc->map_item );\r
765         }\r
766         cl_spinlock_release( &gp_spl_qp_mgr->obj.lock );\r
767 \r
768         /*\r
769          * If the CA does not support HW agents, create a QP alias and register\r
770          * a MAD service for sending responses from the local MAD interface.\r
771          */\r
772         if( check_local_mad( p_spl_qp_svc->h_qp ) )\r
773         {\r
774                 /* Create a QP alias. */\r
775                 cl_memclr( &qp_create, sizeof( ib_qp_create_t ) );\r
776                 qp_create.qp_type =\r
777                         ( qp_type == IB_QPT_QP0 ) ? IB_QPT_QP0_ALIAS : IB_QPT_QP1_ALIAS;\r
778                 qp_create.sq_depth              = p_spl_qp_svc->max_qp_depth;\r
779                 qp_create.sq_sge                = 1;\r
780                 qp_create.sq_signaled   = TRUE;\r
781 \r
782                 status = ib_get_spl_qp( p_spl_qp_svc->obj.p_ci_ca->h_pd_alias,\r
783                         p_pnp_rec->p_port_attr->port_guid, &qp_create,\r
784                         p_spl_qp_svc, spl_qp_alias_event_cb, &p_spl_qp_svc->pool_key,\r
785                         &p_spl_qp_svc->h_qp_alias );\r
786 \r
787                 if (status != IB_SUCCESS)\r
788                 {\r
789                         p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
790                         AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
791                                 ("ib_get_spl_qp alias failed, %s\n",\r
792                                 ib_get_err_str( status ) ) );\r
793                         return status;\r
794                 }\r
795 \r
796                 /* Reference the special QP service on behalf of ib_get_spl_qp. */\r
797                 ref_al_obj( &p_spl_qp_svc->obj );\r
798 \r
799                 /* Register a MAD service for sends. */\r
800                 cl_memclr( &mad_svc, sizeof( ib_mad_svc_t ) );\r
801                 mad_svc.mad_svc_context = p_spl_qp_svc;\r
802                 mad_svc.pfn_mad_send_cb = spl_qp_alias_send_cb;\r
803                 mad_svc.pfn_mad_recv_cb = spl_qp_alias_recv_cb;\r
804 \r
805                 status = ib_reg_mad_svc( p_spl_qp_svc->h_qp_alias, &mad_svc,\r
806                         &p_spl_qp_svc->h_mad_svc );\r
807 \r
808                 if( status != IB_SUCCESS )\r
809                 {\r
810                         p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
811                         AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
812                                 ("ib_reg_mad_svc failed, %s\n", ib_get_err_str( status ) ) );\r
813                         return status;\r
814                 }\r
815         }\r
816 \r
817         /* Set the context of the PnP event to this child object. */\r
818         p_pnp_rec->pnp_rec.context = &p_spl_qp_svc->obj;\r
819 \r
820         /* The QP is ready.  Change the state. */\r
821         p_spl_qp_svc->state = SPL_QP_ACTIVE;\r
822 \r
823         /* Force a completion callback to rearm the CQs. */\r
824         spl_qp_recv_comp_cb( p_spl_qp_svc->h_recv_cq, p_spl_qp_svc );\r
825         spl_qp_send_comp_cb( p_spl_qp_svc->h_send_cq, p_spl_qp_svc );\r
826 \r
827         /* Start the polling thread timer. */\r
828         if( g_smi_poll_interval )\r
829         {\r
830                 cl_status =\r
831                         cl_timer_trim( &gp_spl_qp_mgr->poll_timer, g_smi_poll_interval );\r
832 \r
833                 if( cl_status != CL_SUCCESS )\r
834                 {\r
835                         p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
836                         AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
837                                 ("cl_timer_start failed, status 0x%x\n", cl_status ) );\r
838                         return ib_convert_cl_status( cl_status );\r
839                 }\r
840         }\r
841 \r
842         /* Release the reference taken in init_al_obj. */\r
843         deref_al_obj( &p_spl_qp_svc->obj );\r
844 \r
845         AL_EXIT( AL_DBG_SMI );\r
846         return IB_SUCCESS;\r
847 }\r
848 \r
849 \r
850 \r
851 /*\r
852  * Return a work completion to the MAD dispatcher for the specified MAD.\r
853  */\r
854 static void\r
855 __complete_send_mad(\r
856         IN              const   al_mad_disp_handle_t            h_mad_disp,\r
857         IN                              al_mad_wr_t* const                      p_mad_wr,\r
858         IN              const   ib_wc_status_t                          wc_status )\r
859 {\r
860         ib_wc_t                 wc;\r
861 \r
862         /* Construct a send work completion. */\r
863         cl_memclr( &wc, sizeof( ib_wc_t ) );\r
864         wc.wr_id        = p_mad_wr->send_wr.wr_id;\r
865         wc.wc_type      = IB_WC_SEND;\r
866         wc.status       = wc_status;\r
867 \r
868         /* Set the send size if we were successful with the send. */\r
869         if( wc_status == IB_WCS_SUCCESS )\r
870                 wc.length = MAD_BLOCK_SIZE;\r
871 \r
872         mad_disp_send_done( h_mad_disp, p_mad_wr, &wc );\r
873 }\r
874 \r
875 \r
876 \r
877 /*\r
878  * Pre-destroy a special QP service.\r
879  */\r
880 void\r
881 destroying_spl_qp_svc(\r
882         IN                              al_obj_t*                                       p_obj )\r
883 {\r
884         spl_qp_svc_t*                   p_spl_qp_svc;\r
885         cl_list_item_t*                 p_list_item;\r
886         al_mad_wr_t*                    p_mad_wr;\r
887 \r
888         ib_api_status_t                 status;\r
889 \r
890         AL_ENTER( AL_DBG_SMI );\r
891 \r
892         CL_ASSERT( p_obj );\r
893         p_spl_qp_svc = PARENT_STRUCT( p_obj, spl_qp_svc_t, obj );\r
894 \r
895         /* Change the state to prevent processing new send requests. */\r
896         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
897         p_spl_qp_svc->state = SPL_QP_DESTROYING;\r
898         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
899 \r
900         /* Wait here until the special QP service is no longer in use. */\r
901         while( p_spl_qp_svc->in_use_cnt )\r
902         {\r
903                 cl_thread_suspend( 0 );\r
904         }\r
905 \r
906         /* Destroy the special QP. */\r
907         if( p_spl_qp_svc->h_qp )\r
908         {\r
909                 /* If present, remove the special QP service from the tracking map. */\r
910                 cl_spinlock_acquire( &gp_spl_qp_mgr->obj.lock );\r
911                 if( p_spl_qp_svc->h_qp->type == IB_QPT_QP0 )\r
912                 {\r
913                         cl_qmap_remove( &gp_spl_qp_mgr->smi_map, p_spl_qp_svc->port_guid );\r
914                 }\r
915                 else\r
916                 {\r
917                         cl_qmap_remove( &gp_spl_qp_mgr->gsi_map, p_spl_qp_svc->port_guid );\r
918                 }\r
919                 cl_spinlock_release( &gp_spl_qp_mgr->obj.lock );\r
920 \r
921                 status = ib_destroy_qp( p_spl_qp_svc->h_qp,\r
922                         (ib_pfn_destroy_cb_t)deref_al_obj );\r
923                 CL_ASSERT( status == IB_SUCCESS );\r
924 \r
925                 cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
926 \r
927                 /* Complete any outstanding MAD sends operations as "flushed". */\r
928                 for( p_list_item = cl_qlist_remove_head( &p_spl_qp_svc->send_queue );\r
929                          p_list_item != cl_qlist_end( &p_spl_qp_svc->send_queue );\r
930                          p_list_item = cl_qlist_remove_head( &p_spl_qp_svc->send_queue ) )\r
931                 {\r
932                         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
933                         p_mad_wr = PARENT_STRUCT( p_list_item, al_mad_wr_t, list_item );\r
934                         __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr,\r
935                                 IB_WCS_WR_FLUSHED_ERR );\r
936                         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
937                 }\r
938 \r
939                 cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
940                 /* Receive MAD elements are returned to the pool by the free routine. */\r
941         }\r
942 \r
943         /* Destroy the special QP alias and CQs. */\r
944         if( p_spl_qp_svc->h_qp_alias )\r
945         {\r
946                 status = ib_destroy_qp( p_spl_qp_svc->h_qp_alias,\r
947                         (ib_pfn_destroy_cb_t)deref_al_obj );\r
948                 CL_ASSERT( status == IB_SUCCESS );\r
949         }\r
950         if( p_spl_qp_svc->h_send_cq )\r
951         {\r
952                 status = ib_destroy_cq( p_spl_qp_svc->h_send_cq,\r
953                         (ib_pfn_destroy_cb_t)deref_al_obj );\r
954                 CL_ASSERT( status == IB_SUCCESS );\r
955         }\r
956         if( p_spl_qp_svc->h_recv_cq )\r
957         {\r
958                 status = ib_destroy_cq( p_spl_qp_svc->h_recv_cq,\r
959                         (ib_pfn_destroy_cb_t)deref_al_obj );\r
960                 CL_ASSERT( status == IB_SUCCESS );\r
961         }\r
962 \r
963         AL_EXIT( AL_DBG_SMI );\r
964 }\r
965 \r
966 \r
967 \r
968 /*\r
969  * Free a special QP service.\r
970  */\r
971 void\r
972 free_spl_qp_svc(\r
973         IN                              al_obj_t*                                       p_obj )\r
974 {\r
975         spl_qp_svc_t*                   p_spl_qp_svc;\r
976         cl_list_item_t*                 p_list_item;\r
977         al_mad_element_t*               p_al_mad;\r
978         ib_api_status_t                 status;\r
979 \r
980         AL_ENTER( AL_DBG_SMI );\r
981 \r
982         CL_ASSERT( p_obj );\r
983         p_spl_qp_svc = PARENT_STRUCT( p_obj, spl_qp_svc_t, obj );\r
984 \r
985         /* Dereference the CA. */\r
986         if( p_spl_qp_svc->obj.p_ci_ca )\r
987                 deref_al_obj( &p_spl_qp_svc->obj.p_ci_ca->h_ca->obj );\r
988 \r
989         /* Return receive MAD elements to the pool. */\r
990         for( p_list_item = cl_qlist_remove_head( &p_spl_qp_svc->recv_queue );\r
991                  p_list_item != cl_qlist_end( &p_spl_qp_svc->recv_queue );\r
992                  p_list_item = cl_qlist_remove_head( &p_spl_qp_svc->recv_queue ) )\r
993         {\r
994                 p_al_mad = PARENT_STRUCT( p_list_item, al_mad_element_t, list_item );\r
995 \r
996                 status = ib_put_mad( &p_al_mad->element );\r
997                 CL_ASSERT( status == IB_SUCCESS );\r
998         }\r
999 \r
1000         CL_ASSERT( cl_is_qlist_empty( &p_spl_qp_svc->send_queue ) );\r
1001 \r
1002         destroy_al_obj( &p_spl_qp_svc->obj );\r
1003         cl_free( p_spl_qp_svc );\r
1004 \r
1005         AL_EXIT( AL_DBG_SMI );\r
1006 }\r
1007 \r
1008 \r
1009 \r
1010 /*\r
1011  * Update the base LID of a special QP service.\r
1012  */\r
1013 void\r
1014 spl_qp_svc_lid_change(\r
1015         IN                              al_obj_t*                                       p_obj,\r
1016         IN                              ib_pnp_port_rec_t*                      p_pnp_rec )\r
1017 {\r
1018         spl_qp_svc_t*                   p_spl_qp_svc;\r
1019 \r
1020         AL_ENTER( AL_DBG_SMI );\r
1021 \r
1022         CL_ASSERT( p_obj );\r
1023         CL_ASSERT( p_pnp_rec );\r
1024         CL_ASSERT( p_pnp_rec->p_port_attr );\r
1025 \r
1026         p_spl_qp_svc = PARENT_STRUCT( p_obj, spl_qp_svc_t, obj );\r
1027 \r
1028         p_spl_qp_svc->base_lid = p_pnp_rec->p_port_attr->lid;\r
1029         p_spl_qp_svc->lmc = p_pnp_rec->p_port_attr->lmc;\r
1030 \r
1031         AL_EXIT( AL_DBG_SMI );\r
1032 }\r
1033 \r
1034 \r
1035 \r
1036 /*\r
1037  * Route a send work request.\r
1038  */\r
1039 mad_route_t\r
1040 route_mad_send(\r
1041         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
1042         IN                              ib_send_wr_t* const                     p_send_wr )\r
1043 {\r
1044         al_mad_wr_t*                    p_mad_wr;\r
1045         al_mad_send_t*                  p_mad_send;\r
1046         ib_mad_t*                               p_mad;\r
1047         ib_smp_t*                               p_smp;\r
1048         ib_av_handle_t                  h_av;\r
1049         mad_route_t                             route;\r
1050         boolean_t                               local, loopback, discard;\r
1051 \r
1052         AL_ENTER( AL_DBG_SMI );\r
1053 \r
1054         CL_ASSERT( p_spl_qp_svc );\r
1055         CL_ASSERT( p_send_wr );\r
1056 \r
1057         /* Initialize a pointers to the MAD work request and the MAD. */\r
1058         p_mad_wr = PARENT_STRUCT( p_send_wr, al_mad_wr_t, send_wr );\r
1059         p_mad_send = PARENT_STRUCT( p_mad_wr, al_mad_send_t, mad_wr );\r
1060         p_mad = get_mad_hdr_from_wr( p_mad_wr );\r
1061         p_smp = (ib_smp_t*)p_mad;\r
1062 \r
1063         /* Check if the CA has a local MAD interface. */\r
1064         local = loopback = discard = FALSE;\r
1065         if( check_local_mad( p_spl_qp_svc->h_qp ) )\r
1066         {\r
1067                 /*\r
1068                  * If the MAD is a locally addressed Subnet Management, Performance\r
1069                  * Management, or Connection Management datagram, process the work\r
1070                  * request locally.\r
1071                  */\r
1072                 h_av = p_send_wr->dgrm.ud.h_av;\r
1073                 switch( p_mad->mgmt_class )\r
1074                 {\r
1075                 case IB_MCLASS_SUBN_DIR:\r
1076                         /* Perform special checks on directed route SMPs. */\r
1077                         if( ib_smp_is_response( p_smp ) )\r
1078                         {\r
1079                                 /*\r
1080                                  * This node is the originator of the response.  Discard\r
1081                                  * if the hop count or pointer is zero, an intermediate hop,\r
1082                                  * out of bounds hop, or if the first port of the directed\r
1083                                  * route retrun path is not this port.\r
1084                                  */\r
1085                                 if( ( p_smp->hop_count == 0 ) || ( p_smp->hop_ptr == 0 ) )\r
1086                                 {\r
1087                                         AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
1088                                                 ("hop cnt or hop ptr set to 0...discarding\n") );\r
1089                                         discard = TRUE;\r
1090                                 }\r
1091                                 else if( p_smp->hop_count != ( p_smp->hop_ptr - 1 ) )\r
1092                                 {\r
1093                                         AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
1094                                                 ("hop cnt != (hop ptr - 1)...discarding\n") );\r
1095                                         discard = TRUE;\r
1096                                 }\r
1097                                 else if( p_smp->hop_count >= IB_SUBNET_PATH_HOPS_MAX )\r
1098                                 {\r
1099                                         AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
1100                                                 ("hop cnt > max hops...discarding\n") );\r
1101                                         discard = TRUE;\r
1102                                 }\r
1103                                 else if( ( p_smp->dr_dlid == IB_LID_PERMISSIVE ) &&\r
1104                                                  ( p_smp->return_path[ p_smp->hop_ptr - 1 ] !=\r
1105                                                         p_spl_qp_svc->port_num ) )\r
1106                                 {\r
1107                                         AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
1108                                                 ("return path[hop ptr - 1] != port num...discarding\n") );\r
1109                                         discard = TRUE;\r
1110                                 }\r
1111                         }\r
1112                         else\r
1113                         {\r
1114                                 /* The SMP is a request. */\r
1115                                 if( ( p_smp->hop_count >= IB_SUBNET_PATH_HOPS_MAX ) ||\r
1116                                         ( p_smp->hop_ptr >= IB_SUBNET_PATH_HOPS_MAX ) )\r
1117                                 {\r
1118                                         discard = TRUE;\r
1119                                 }\r
1120                                 else if( ( p_smp->hop_count == 0 ) && ( p_smp->hop_ptr == 0 ) )\r
1121                                 {\r
1122                                         /* Self Addressed: Sent locally, routed locally. */\r
1123                                         local = TRUE;\r
1124                                         discard = ( p_smp->dr_slid != IB_LID_PERMISSIVE ) ||\r
1125                                                           ( p_smp->dr_dlid != IB_LID_PERMISSIVE );\r
1126                                 }\r
1127                                 else if( ( p_smp->hop_count != 0 ) &&\r
1128                                                  ( p_smp->hop_count == ( p_smp->hop_ptr - 1 ) ) )\r
1129                                 {\r
1130                                         /* End of Path: Sent remotely, routed locally. */\r
1131                                         local = TRUE;\r
1132                                 }\r
1133                                 else if( ( p_smp->hop_count != 0 ) &&\r
1134                                                  ( p_smp->hop_ptr       == 0 ) )\r
1135                                 {\r
1136                                         /* Beginning of Path: Sent locally, routed remotely. */\r
1137                                         if( p_smp->dr_slid == IB_LID_PERMISSIVE )\r
1138                                         {\r
1139                                                 discard =\r
1140                                                         ( p_smp->initial_path[ p_smp->hop_ptr + 1 ] !=\r
1141                                                           p_spl_qp_svc->port_num );\r
1142                                         }\r
1143                                 }\r
1144                                 else\r
1145                                 {\r
1146                                         /* Intermediate hop. */\r
1147                                         discard = TRUE;\r
1148                                 }\r
1149                         }\r
1150                         /* Loopback locally addressed SM to SM "heartbeat" messages. */\r
1151                         loopback = (p_mad->attr_id == IB_MAD_ATTR_SM_INFO);\r
1152                         break;\r
1153 \r
1154                 case IB_MCLASS_SUBN_LID:\r
1155                         /* Loopback locally addressed SM to SM "heartbeat" messages. */\r
1156                         loopback = (p_mad->attr_id == IB_MAD_ATTR_SM_INFO);\r
1157 \r
1158                         /* Fall through to check for a local MAD. */\r
1159 \r
1160                 case IB_MCLASS_PERF:\r
1161                 case IB_MCLASS_BM:\r
1162                         local = ( h_av &&\r
1163                                 ( h_av->av_attr.dlid ==\r
1164                                 ( h_av->av_attr.path_bits | p_spl_qp_svc->base_lid ) ) );\r
1165                         break;\r
1166 \r
1167                 default:\r
1168                         /* Route vendor specific MADs to the HCA provider. */\r
1169                         if( ib_class_is_vendor_specific( p_mad->mgmt_class ) )\r
1170                         {\r
1171                                 local = ( h_av &&\r
1172                                         ( h_av->av_attr.dlid ==\r
1173                                         ( h_av->av_attr.path_bits | p_spl_qp_svc->base_lid ) ) );\r
1174                         }\r
1175                         break;\r
1176                 }\r
1177         }\r
1178 \r
1179         route = ( p_mad_send->p_send_mad->send_opt & IB_SEND_OPT_LOCAL ) ?\r
1180                 ROUTE_LOCAL : ROUTE_REMOTE;\r
1181         if( local ) route = ROUTE_LOCAL;\r
1182         if( loopback && local ) route = ROUTE_LOOPBACK;\r
1183         if( discard ) route = ROUTE_DISCARD;\r
1184 \r
1185         AL_EXIT( AL_DBG_SMI );\r
1186         return route;\r
1187 }\r
1188 \r
1189 \r
1190 \r
1191 /*\r
1192  * Send a work request on the special QP.\r
1193  */\r
1194 ib_api_status_t\r
1195 spl_qp_svc_send(\r
1196         IN              const   ib_qp_handle_t                          h_qp,\r
1197         IN                              ib_send_wr_t* const                     p_send_wr )\r
1198 {\r
1199         spl_qp_svc_t*                   p_spl_qp_svc;\r
1200         al_mad_wr_t*                    p_mad_wr;\r
1201         mad_route_t                             route;\r
1202         ib_api_status_t                 status;\r
1203 \r
1204         AL_ENTER( AL_DBG_SMI );\r
1205 \r
1206         CL_ASSERT( h_qp );\r
1207         CL_ASSERT( p_send_wr );\r
1208 \r
1209         /* Get the special QP service. */\r
1210         p_spl_qp_svc = (spl_qp_svc_t*)h_qp->obj.context;\r
1211         CL_ASSERT( p_spl_qp_svc );\r
1212         CL_ASSERT( p_spl_qp_svc->h_qp == h_qp );\r
1213 \r
1214         /* Determine how to route the MAD. */\r
1215         route = route_mad_send( p_spl_qp_svc, p_send_wr );\r
1216 \r
1217         /*\r
1218          * Check the QP state and guard against error handling.  Also,\r
1219          * to maintain proper order of work completions, delay processing\r
1220          * a local MAD until any remote MAD work requests have completed,\r
1221          * and delay processing a remote MAD until local MAD work requests\r
1222          * have completed.\r
1223          */\r
1224         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
1225         if( (p_spl_qp_svc->state != SPL_QP_ACTIVE) || p_spl_qp_svc->local_mad_wr ||\r
1226                 (is_local(route) && !cl_is_qlist_empty( &p_spl_qp_svc->send_queue )) ||\r
1227                 ( cl_qlist_count( &p_spl_qp_svc->send_queue ) >=\r
1228                         p_spl_qp_svc->max_qp_depth ) )\r
1229         {\r
1230                 /*\r
1231                  * Return busy status.\r
1232                  * The special QP will resume sends at this point.\r
1233                  */\r
1234                 cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
1235 \r
1236                 AL_EXIT( AL_DBG_SMI );\r
1237                 return IB_RESOURCE_BUSY;\r
1238         }\r
1239 \r
1240         p_mad_wr = PARENT_STRUCT( p_send_wr, al_mad_wr_t, send_wr );\r
1241 \r
1242         if( is_local( route ) )\r
1243         {\r
1244                 /* Save the local MAD work request for processing. */\r
1245                 p_spl_qp_svc->local_mad_wr = p_mad_wr;\r
1246 \r
1247                 /* Flag the service as in use by the asynchronous processing thread. */\r
1248                 cl_atomic_inc( &p_spl_qp_svc->in_use_cnt );\r
1249 \r
1250                 cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
1251 \r
1252                 status = local_mad_send( p_spl_qp_svc, p_mad_wr );\r
1253         }\r
1254         else\r
1255         {\r
1256                 /* Process a remote MAD send work request. */\r
1257                 status = remote_mad_send( p_spl_qp_svc, p_mad_wr );\r
1258 \r
1259                 cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
1260         }\r
1261 \r
1262         AL_EXIT( AL_DBG_SMI );\r
1263         return status;\r
1264 }\r
1265 \r
1266 \r
1267 \r
1268 /*\r
1269  * Process a remote MAD send work request.  Called holding the spl_qp_svc lock.\r
1270  */\r
1271 ib_api_status_t\r
1272 remote_mad_send(\r
1273         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
1274         IN                              al_mad_wr_t* const                      p_mad_wr )\r
1275 {\r
1276         ib_smp_t*                               p_smp;\r
1277         ib_api_status_t                 status;\r
1278 \r
1279         AL_ENTER( AL_DBG_SMI );\r
1280 \r
1281         CL_ASSERT( p_spl_qp_svc );\r
1282         CL_ASSERT( p_mad_wr );\r
1283 \r
1284         /* Initialize a pointers to the MAD work request and outbound MAD. */\r
1285         p_smp = (ib_smp_t*)get_mad_hdr_from_wr( p_mad_wr );\r
1286 \r
1287         /* Perform outbound MAD processing. */\r
1288 \r
1289         /* Adjust directed route SMPs as required by IBA. */\r
1290         if( p_smp->mgmt_class == IB_MCLASS_SUBN_DIR )\r
1291         {\r
1292                 if( ib_smp_is_response( p_smp ) )\r
1293                 {\r
1294                         if( p_smp->dr_dlid == IB_LID_PERMISSIVE )\r
1295                                 p_smp->hop_ptr--;\r
1296                 }\r
1297                 else if( p_smp->dr_slid == IB_LID_PERMISSIVE )\r
1298                 {\r
1299                         /*\r
1300                          * Only update the pointer if the hw_agent is not implemented.\r
1301                          * Fujitsu implements SMI in hardware, so the following has to\r
1302                          * be passed down to the hardware SMI.\r
1303                          */\r
1304                         ci_ca_lock_attr( p_spl_qp_svc->obj.p_ci_ca );\r
1305                         if( !p_spl_qp_svc->obj.p_ci_ca->p_pnp_attr->hw_agents )\r
1306                                 p_smp->hop_ptr++;\r
1307                         ci_ca_unlock_attr( p_spl_qp_svc->obj.p_ci_ca );\r
1308                 }\r
1309         }\r
1310 \r
1311         /* Always generate send completions. */\r
1312         p_mad_wr->send_wr.send_opt |= IB_SEND_OPT_SIGNALED;\r
1313 \r
1314         /* Queue the MAD work request on the service tracking queue. */\r
1315         cl_qlist_insert_tail( &p_spl_qp_svc->send_queue, &p_mad_wr->list_item );\r
1316 \r
1317         status = ib_post_send( p_spl_qp_svc->h_qp, &p_mad_wr->send_wr, NULL );\r
1318 \r
1319         if( status != IB_SUCCESS )\r
1320         {\r
1321                 cl_qlist_remove_item( &p_spl_qp_svc->send_queue, &p_mad_wr->list_item );\r
1322 \r
1323                 /* Reset directed route SMPs as required by IBA. */\r
1324                 if( p_smp->mgmt_class == IB_MCLASS_SUBN_DIR )\r
1325                 {\r
1326                         if( ib_smp_is_response( p_smp ) )\r
1327                         {\r
1328                                 if( p_smp->dr_dlid == IB_LID_PERMISSIVE )\r
1329                                         p_smp->hop_ptr++;\r
1330                         }\r
1331                         else if( p_smp->dr_slid == IB_LID_PERMISSIVE )\r
1332                         {\r
1333                                 /* Only update if the hw_agent is not implemented. */\r
1334                                 ci_ca_lock_attr( p_spl_qp_svc->obj.p_ci_ca );\r
1335                                 if( p_spl_qp_svc->obj.p_ci_ca->p_pnp_attr->hw_agents == FALSE )\r
1336                                         p_smp->hop_ptr--;\r
1337                                 ci_ca_unlock_attr( p_spl_qp_svc->obj.p_ci_ca );\r
1338                         }\r
1339                 }\r
1340         }\r
1341 \r
1342         AL_EXIT( AL_DBG_SMI );\r
1343         return status;\r
1344 }\r
1345 \r
1346 \r
1347 /*\r
1348  * Handle a MAD destined for the local CA, using cached data\r
1349  * as much as possible.\r
1350  */\r
1351 static ib_api_status_t\r
1352 local_mad_send(\r
1353         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
1354         IN                              al_mad_wr_t* const                      p_mad_wr )\r
1355 {\r
1356         mad_route_t                             route;\r
1357         ib_api_status_t                 status = IB_SUCCESS;\r
1358 \r
1359         AL_ENTER( AL_DBG_SMI );\r
1360 \r
1361         CL_ASSERT( p_spl_qp_svc );\r
1362         CL_ASSERT( p_mad_wr );\r
1363 \r
1364         /* Determine how to route the MAD. */\r
1365         route = route_mad_send( p_spl_qp_svc, &p_mad_wr->send_wr );\r
1366 \r
1367         /* Check if this MAD should be discarded. */\r
1368         if( is_discard( route ) )\r
1369         {\r
1370                 /* Deliver a "work completion" to the dispatcher. */\r
1371                 __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr,\r
1372                         IB_WCS_LOCAL_OP_ERR );\r
1373                 status = IB_INVALID_SETTING;\r
1374         }\r
1375         else if( is_loopback( route ) )\r
1376         {\r
1377                 /* Loopback local SM to SM "heartbeat" messages. */\r
1378                 status = loopback_mad( p_spl_qp_svc, p_mad_wr );\r
1379         }\r
1380         else\r
1381         {\r
1382                 switch( get_mad_hdr_from_wr( p_mad_wr )->mgmt_class )\r
1383                 {\r
1384                 case IB_MCLASS_SUBN_DIR:\r
1385                 case IB_MCLASS_SUBN_LID:\r
1386                         status = process_subn_mad( p_spl_qp_svc, p_mad_wr );\r
1387                         break;\r
1388 \r
1389                 default:\r
1390                         status = IB_NOT_DONE;\r
1391                 }\r
1392         }\r
1393 \r
1394         if( status == IB_NOT_DONE )\r
1395         {\r
1396                 /* Queue an asynchronous processing item to process the local MAD. */\r
1397                 cl_async_proc_queue( gp_async_proc_mgr, &p_spl_qp_svc->send_async );\r
1398         }\r
1399         else\r
1400         {\r
1401                 /*\r
1402                  * Clear the local MAD pointer to allow processing of other MADs.\r
1403                  * This is done after polling for attribute changes to ensure that\r
1404                  * subsequent MADs pick up any changes performed by this one.\r
1405                  */\r
1406                 cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
1407                 p_spl_qp_svc->local_mad_wr = NULL;\r
1408                 cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
1409 \r
1410                 /* No longer in use by the asynchronous processing thread. */\r
1411                 cl_atomic_dec( &p_spl_qp_svc->in_use_cnt );\r
1412 \r
1413                 /* Special QP operations will resume by unwinding. */\r
1414         }\r
1415 \r
1416         AL_EXIT( AL_DBG_SMI );\r
1417         return IB_SUCCESS;\r
1418 }\r
1419 \r
1420 \r
1421 static ib_api_status_t\r
1422 get_resp_mad(\r
1423         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
1424         IN                              al_mad_wr_t* const                      p_mad_wr,\r
1425                 OUT                     ib_mad_element_t** const        pp_mad_resp )\r
1426 {\r
1427         ib_api_status_t                 status;\r
1428 \r
1429         AL_ENTER( AL_DBG_SMI );\r
1430 \r
1431         CL_ASSERT( p_spl_qp_svc );\r
1432         CL_ASSERT( p_mad_wr );\r
1433         CL_ASSERT( pp_mad_resp );\r
1434 \r
1435         /* Get a MAD element from the pool for the response. */\r
1436         status = ib_get_mad( p_spl_qp_svc->h_qp->obj.p_ci_ca->pool_key,\r
1437                 MAD_BLOCK_SIZE, pp_mad_resp );\r
1438         if( status != IB_SUCCESS )\r
1439         {\r
1440                 __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr,\r
1441                         IB_WCS_LOCAL_OP_ERR );\r
1442         }\r
1443 \r
1444         AL_EXIT( AL_DBG_SMI );\r
1445         return status;\r
1446 }\r
1447 \r
1448 \r
1449 static ib_api_status_t\r
1450 complete_local_mad(\r
1451         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
1452         IN                              al_mad_wr_t* const                      p_mad_wr,\r
1453         IN                              ib_mad_element_t* const         p_mad_resp )\r
1454 {\r
1455         ib_api_status_t                 status;\r
1456 \r
1457         AL_ENTER( AL_DBG_SMI );\r
1458 \r
1459         CL_ASSERT( p_spl_qp_svc );\r
1460         CL_ASSERT( p_mad_wr );\r
1461         CL_ASSERT( p_mad_resp );\r
1462 \r
1463         /* Construct the receive MAD element. */\r
1464         p_mad_resp->status              = IB_WCS_SUCCESS;\r
1465         p_mad_resp->remote_qp   = p_mad_wr->send_wr.dgrm.ud.remote_qp;\r
1466         p_mad_resp->remote_lid  = p_spl_qp_svc->base_lid;\r
1467         if( p_mad_wr->send_wr.send_opt & IB_RECV_OPT_IMMEDIATE )\r
1468         {\r
1469                 p_mad_resp->immediate_data = p_mad_wr->send_wr.immediate_data;\r
1470                 p_mad_resp->recv_opt |= IB_RECV_OPT_IMMEDIATE;\r
1471         }\r
1472 \r
1473         /*\r
1474          * Hand the receive MAD element to the dispatcher before completing\r
1475          * the send.  This guarantees that the send request cannot time out.\r
1476          */\r
1477         status = mad_disp_recv_done( p_spl_qp_svc->h_mad_disp, p_mad_resp );\r
1478 \r
1479         /* Forward the send work completion to the dispatcher. */\r
1480         __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr, IB_WCS_SUCCESS );\r
1481 \r
1482         AL_EXIT( AL_DBG_SMI );\r
1483         return status;\r
1484 }\r
1485 \r
1486 \r
1487 static ib_api_status_t\r
1488 loopback_mad(\r
1489         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
1490         IN                              al_mad_wr_t* const                      p_mad_wr )\r
1491 {\r
1492         ib_mad_t                                *p_mad;\r
1493         ib_mad_element_t                *p_mad_resp;\r
1494         ib_api_status_t                 status;\r
1495 \r
1496         AL_ENTER( AL_DBG_SMI );\r
1497 \r
1498         CL_ASSERT( p_spl_qp_svc );\r
1499         CL_ASSERT( p_mad_wr );\r
1500 \r
1501         /* Get a MAD element from the pool for the response. */\r
1502         status = get_resp_mad( p_spl_qp_svc, p_mad_wr, &p_mad_resp );\r
1503         if( status == IB_SUCCESS )\r
1504         {\r
1505                 /* Initialize a pointers to the MAD work request and outbound MAD. */\r
1506                 p_mad = get_mad_hdr_from_wr( p_mad_wr );\r
1507 \r
1508                 /* Simulate a send/receive between local managers. */\r
1509                 cl_memcpy( p_mad_resp->p_mad_buf, p_mad, MAD_BLOCK_SIZE );\r
1510 \r
1511                 status = complete_local_mad( p_spl_qp_svc, p_mad_wr, p_mad_resp );\r
1512         }\r
1513 \r
1514         AL_EXIT( AL_DBG_SMI );\r
1515         return status;\r
1516 }\r
1517 \r
1518 \r
1519 static ib_api_status_t\r
1520 process_node_info(\r
1521         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
1522         IN                              al_mad_wr_t* const                      p_mad_wr )\r
1523 {\r
1524         ib_mad_t                                *p_mad;\r
1525         ib_mad_element_t                *p_mad_resp;\r
1526         ib_smp_t                                *p_smp;\r
1527         ib_node_info_t                  *p_node_info;\r
1528         ib_ca_attr_t                    *p_ca_attr;\r
1529         ib_port_attr_t                  *p_port_attr;\r
1530         ib_api_status_t                 status;\r
1531 \r
1532         AL_ENTER( AL_DBG_SMI );\r
1533 \r
1534         CL_ASSERT( p_spl_qp_svc );\r
1535         CL_ASSERT( p_mad_wr );\r
1536 \r
1537         /* Initialize a pointers to the MAD work request and outbound MAD. */\r
1538         p_mad = get_mad_hdr_from_wr( p_mad_wr );\r
1539         if( p_mad->method != IB_MAD_METHOD_GET )\r
1540         {\r
1541                 /* Node description is a GET-only attribute. */\r
1542                 __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr,\r
1543                         IB_WCS_LOCAL_OP_ERR );\r
1544                 AL_EXIT( AL_DBG_SMI );\r
1545                 return IB_INVALID_SETTING;\r
1546         }\r
1547 \r
1548         /* Get a MAD element from the pool for the response. */\r
1549         status = get_resp_mad( p_spl_qp_svc, p_mad_wr, &p_mad_resp );\r
1550         if( status == IB_SUCCESS )\r
1551         {\r
1552                 p_smp = (ib_smp_t*)p_mad_resp->p_mad_buf;\r
1553                 cl_memcpy( p_smp, p_mad, MAD_BLOCK_SIZE );\r
1554                 p_smp->method |= IB_MAD_METHOD_RESP_MASK;\r
1555                 if( p_smp->mgmt_class == IB_MCLASS_SUBN_DIR )\r
1556                         p_smp->status = IB_SMP_DIRECTION;\r
1557                 else\r
1558                         p_smp->status = 0;\r
1559 \r
1560                 p_node_info = (ib_node_info_t*)ib_smp_get_payload_ptr( p_smp );\r
1561 \r
1562                 /*\r
1563                  * Fill in the node info, protecting against the\r
1564                  * attributes being changed by PnP.\r
1565                  */\r
1566                 cl_spinlock_acquire( &p_spl_qp_svc->obj.p_ci_ca->obj.lock );\r
1567 \r
1568                 p_ca_attr = p_spl_qp_svc->obj.p_ci_ca->p_pnp_attr;\r
1569                 p_port_attr = &p_ca_attr->p_port_attr[p_spl_qp_svc->port_num - 1];\r
1570 \r
1571                 p_node_info->base_version = 1;\r
1572                 p_node_info->class_version = 1;\r
1573                 p_node_info->node_type = IB_NODE_TYPE_CA;\r
1574                 p_node_info->num_ports = p_ca_attr->num_ports;\r
1575                 /* TODO: Get some unique identifier for the system */\r
1576                 p_node_info->sys_guid = p_ca_attr->ca_guid;\r
1577                 p_node_info->node_guid = p_ca_attr->ca_guid;\r
1578                 p_node_info->port_guid = p_port_attr->port_guid;\r
1579                 p_node_info->partition_cap = cl_hton16( p_port_attr->num_pkeys );\r
1580                 p_node_info->device_id = cl_hton16( p_ca_attr->dev_id );\r
1581                 p_node_info->revision = cl_hton32( p_ca_attr->revision );\r
1582                 p_node_info->port_num_vendor_id =\r
1583                         cl_hton32( p_ca_attr->vend_id & 0x00FFFFFF ) | p_port_attr->port_num;\r
1584                 cl_spinlock_release( &p_spl_qp_svc->obj.p_ci_ca->obj.lock );\r
1585 \r
1586                 status = complete_local_mad( p_spl_qp_svc, p_mad_wr, p_mad_resp );\r
1587         }\r
1588 \r
1589         AL_EXIT( AL_DBG_SMI );\r
1590         return status;\r
1591 }\r
1592 \r
1593 \r
1594 static ib_api_status_t\r
1595 process_node_desc(\r
1596         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
1597         IN                              al_mad_wr_t* const                      p_mad_wr )\r
1598 {\r
1599         ib_mad_t                                *p_mad;\r
1600         ib_mad_element_t                *p_mad_resp;\r
1601         ib_api_status_t                 status;\r
1602 \r
1603         AL_ENTER( AL_DBG_SMI );\r
1604 \r
1605         CL_ASSERT( p_spl_qp_svc );\r
1606         CL_ASSERT( p_mad_wr );\r
1607 \r
1608         /* Initialize a pointers to the MAD work request and outbound MAD. */\r
1609         p_mad = get_mad_hdr_from_wr( p_mad_wr );\r
1610         if( p_mad->method != IB_MAD_METHOD_GET )\r
1611         {\r
1612                 /* Node info is a GET-only attribute. */\r
1613                 __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr,\r
1614                         IB_WCS_LOCAL_OP_ERR );\r
1615                 AL_EXIT( AL_DBG_SMI );\r
1616                 return IB_INVALID_SETTING;\r
1617         }\r
1618 \r
1619         /* Get a MAD element from the pool for the response. */\r
1620         status = get_resp_mad( p_spl_qp_svc, p_mad_wr, &p_mad_resp );\r
1621         if( status == IB_SUCCESS )\r
1622         {\r
1623                 cl_memcpy( p_mad_resp->p_mad_buf, p_mad, MAD_BLOCK_SIZE );\r
1624                 p_mad_resp->p_mad_buf->method |= IB_MAD_METHOD_RESP_MASK;\r
1625                 if( p_mad_resp->p_mad_buf->mgmt_class == IB_MCLASS_SUBN_DIR )\r
1626                         p_mad_resp->p_mad_buf->status = IB_SMP_DIRECTION;\r
1627                 else\r
1628                         p_mad_resp->p_mad_buf->status = 0;\r
1629                 /* Set the node description to the machine name. */\r
1630                 cl_memcpy( ((ib_smp_t*)p_mad_resp->p_mad_buf)->data, \r
1631                         node_desc, sizeof(node_desc) );\r
1632 \r
1633                 status = complete_local_mad( p_spl_qp_svc, p_mad_wr, p_mad_resp );\r
1634         }\r
1635 \r
1636         AL_EXIT( AL_DBG_SMI );\r
1637         return status;\r
1638 }\r
1639 \r
1640 \r
1641 /*\r
1642  * Process subnet administration MADs using cached data if possible.\r
1643  */\r
1644 static ib_api_status_t\r
1645 process_subn_mad(\r
1646         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
1647         IN                              al_mad_wr_t* const                      p_mad_wr )\r
1648 {\r
1649         ib_api_status_t         status;\r
1650         ib_smp_t                        *p_smp;\r
1651 \r
1652         AL_ENTER( AL_DBG_SMI );\r
1653 \r
1654         CL_ASSERT( p_spl_qp_svc );\r
1655         CL_ASSERT( p_mad_wr );\r
1656 \r
1657         p_smp = (ib_smp_t*)get_mad_hdr_from_wr( p_mad_wr );\r
1658 \r
1659         CL_ASSERT( p_smp->mgmt_class == IB_MCLASS_SUBN_DIR ||\r
1660                 p_smp->mgmt_class == IB_MCLASS_SUBN_LID );\r
1661 \r
1662         switch( p_smp->attr_id )\r
1663         {\r
1664         case IB_MAD_ATTR_NODE_INFO:\r
1665                 status = process_node_info( p_spl_qp_svc, p_mad_wr );\r
1666                 break;\r
1667 \r
1668         case IB_MAD_ATTR_NODE_DESC:\r
1669                 status = process_node_desc( p_spl_qp_svc, p_mad_wr );\r
1670                 break;\r
1671 \r
1672         default:\r
1673                 status = IB_NOT_DONE;\r
1674                 break;\r
1675         }\r
1676 \r
1677         AL_EXIT( AL_DBG_SMI );\r
1678         return status;\r
1679 }\r
1680 \r
1681 \r
1682 /*\r
1683  * Process a local MAD send work request.\r
1684  */\r
1685 ib_api_status_t\r
1686 fwd_local_mad(\r
1687         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
1688         IN                              al_mad_wr_t* const                      p_mad_wr )\r
1689 {\r
1690         ib_mad_t*                               p_mad;\r
1691         ib_smp_t*                               p_smp;\r
1692         al_mad_send_t*                  p_mad_send;\r
1693         ib_mad_element_t*               p_mad_response;\r
1694         ib_mad_t*                               p_mad_response_buf;\r
1695         ib_api_status_t                 status = IB_SUCCESS;\r
1696         boolean_t                               smp_is_set;\r
1697 \r
1698         AL_ENTER( AL_DBG_SMI );\r
1699 \r
1700         CL_ASSERT( p_spl_qp_svc );\r
1701         CL_ASSERT( p_mad_wr );\r
1702 \r
1703         /* Initialize a pointers to the MAD work request and outbound MAD. */\r
1704         p_mad = get_mad_hdr_from_wr( p_mad_wr );\r
1705         p_smp = (ib_smp_t*)p_mad;\r
1706 \r
1707         smp_is_set = (p_smp->method == IB_MAD_METHOD_SET);\r
1708 \r
1709         /* Get a MAD element from the pool for the response. */\r
1710         p_mad_send = PARENT_STRUCT( p_mad_wr, al_mad_send_t, mad_wr );\r
1711 //*** Commented code to work-around ib_local_mad() requiring a response MAD\r
1712 //*** as input.  Remove comments once the ib_local_mad() implementation allows\r
1713 //*** for a NULL response MAD, when one is not expected.\r
1714 //*** Note that an attempt to route an invalid response MAD in this case\r
1715 //*** will fail harmlessly.\r
1716 //***   if( p_mad_send->p_send_mad->resp_expected )\r
1717 //***   {\r
1718                 status = get_resp_mad( p_spl_qp_svc, p_mad_wr, &p_mad_response );\r
1719                 if( status != IB_SUCCESS )\r
1720                 {\r
1721                         AL_EXIT( AL_DBG_SMI );\r
1722                         return status;\r
1723                 }\r
1724                 p_mad_response_buf = p_mad_response->p_mad_buf;\r
1725 //***   }\r
1726 //***   else\r
1727 //***   {\r
1728 //***           p_mad_response_buf = NULL;\r
1729 //***   }\r
1730 \r
1731         /* Adjust directed route SMPs as required by IBA. */\r
1732         if( p_mad->mgmt_class == IB_MCLASS_SUBN_DIR )\r
1733         {\r
1734                 CL_ASSERT( !ib_smp_is_response( p_smp ) );\r
1735 \r
1736                 /*\r
1737                  * If this was a self addressed, directed route SMP, increment\r
1738                  * the hop pointer in the request before delivery as required\r
1739                  * by IBA.  Otherwise, adjustment for remote requests occurs\r
1740                  * during inbound processing.\r
1741                  */\r
1742                 if( p_smp->hop_count == 0 )\r
1743                         p_smp->hop_ptr++;\r
1744         }\r
1745 \r
1746         /* Forward the locally addressed MAD to the CA interface. */\r
1747         status = ib_local_mad( p_spl_qp_svc->h_qp->obj.p_ci_ca->h_ca,\r
1748                 p_spl_qp_svc->port_num, p_mad, p_mad_response_buf );\r
1749 \r
1750         /* Reset directed route SMPs as required by IBA. */\r
1751         if( p_mad->mgmt_class == IB_MCLASS_SUBN_DIR )\r
1752         {\r
1753                 /*\r
1754                  * If this was a self addressed, directed route SMP, decrement\r
1755                  * the hop pointer in the response before delivery as required\r
1756                  * by IBA.  Otherwise, adjustment for remote responses occurs\r
1757                  * during outbound processing.\r
1758                  */\r
1759                 if( p_smp->hop_count == 0 )\r
1760                 {\r
1761                         /* Adjust the request SMP. */\r
1762                         p_smp->hop_ptr--;\r
1763 \r
1764                         /* Adjust the response SMP. */\r
1765                         if( p_mad_response_buf )\r
1766                         {\r
1767                                 p_smp = (ib_smp_t*)p_mad_response_buf;\r
1768                                 p_smp->hop_ptr--;\r
1769                         }\r
1770                 }\r
1771         }\r
1772 \r
1773         if( status != IB_SUCCESS )\r
1774         {\r
1775                 if( p_mad_response )\r
1776                         ib_put_mad( p_mad_response );\r
1777 \r
1778                 __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr,\r
1779                         IB_WCS_LOCAL_OP_ERR );\r
1780                 AL_EXIT( AL_DBG_SMI );\r
1781                 return status;\r
1782         }\r
1783 \r
1784         /* Check the completion status of this simulated send. */\r
1785         if( p_mad_response_buf )\r
1786         {\r
1787                 /*\r
1788                  * The SMI is uses PnP polling to refresh the base_lid and lmc.\r
1789                  * Polling takes time, so we update the values here to prevent\r
1790                  * the failure of LID routed MADs sent immediately following this\r
1791                  * assignment.  Check the response to see if the port info was set.\r
1792                  */\r
1793                 if( smp_is_set )\r
1794                 {\r
1795                         ib_port_info_t*         p_port_info = NULL;\r
1796 \r
1797                         switch( p_mad_response_buf->mgmt_class )\r
1798                         {\r
1799                         case IB_MCLASS_SUBN_DIR:\r
1800                                 if( ( p_smp->attr_id == IB_MAD_ATTR_PORT_INFO ) &&\r
1801                                         ( ib_smp_get_status( p_smp ) == IB_SA_MAD_STATUS_SUCCESS ) )\r
1802                                 {\r
1803                                         p_port_info =\r
1804                                                 (ib_port_info_t*)ib_smp_get_payload_ptr( p_smp );\r
1805                                 }\r
1806                                 break;\r
1807 \r
1808                         case IB_MCLASS_SUBN_LID:\r
1809                                 if( ( p_mad_response_buf->attr_id == IB_MAD_ATTR_PORT_INFO ) &&\r
1810                                         ( p_mad_response_buf->status == IB_SA_MAD_STATUS_SUCCESS ) )\r
1811                                 {\r
1812                                         p_port_info =\r
1813                                                 (ib_port_info_t*)( p_mad_response_buf + 1 );\r
1814                                 }\r
1815                                 break;\r
1816 \r
1817                         default:\r
1818                                 break;\r
1819                         }\r
1820 \r
1821                         if( p_port_info )\r
1822                         {\r
1823                                 p_spl_qp_svc->base_lid = p_port_info->base_lid;\r
1824                                 p_spl_qp_svc->lmc = ib_port_info_get_lmc( p_port_info );\r
1825                                 if (p_port_info->subnet_timeout & 0x80)\r
1826                                 {\r
1827                                         AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_PNP,\r
1828                                                 ("Client reregister event, setting sm_lid to 0.\n"));\r
1829                                         ci_ca_lock_attr(p_spl_qp_svc->obj.p_ci_ca);\r
1830                                         p_spl_qp_svc->obj.p_ci_ca->p_pnp_attr->\r
1831                                                 p_port_attr->sm_lid= 0;\r
1832                                         ci_ca_unlock_attr(p_spl_qp_svc->obj.p_ci_ca);\r
1833                                 }\r
1834                         }\r
1835                 }\r
1836         }\r
1837 \r
1838         status = complete_local_mad( p_spl_qp_svc, p_mad_wr, p_mad_response );\r
1839 \r
1840         /* If the SMP was a Get, no need to trigger a PnP poll. */\r
1841         if( status == IB_SUCCESS && !smp_is_set )\r
1842                 status = IB_NOT_DONE;\r
1843 \r
1844         AL_EXIT( AL_DBG_SMI );\r
1845         return status;\r
1846 }\r
1847 \r
1848 \r
1849 \r
1850 /*\r
1851  * Asynchronous processing thread callback to send a local MAD.\r
1852  */\r
1853 void\r
1854 send_local_mad_cb(\r
1855         IN                              cl_async_proc_item_t*           p_item )\r
1856 {\r
1857         spl_qp_svc_t*                   p_spl_qp_svc;\r
1858         ib_api_status_t                 status;\r
1859 \r
1860         AL_ENTER( AL_DBG_SMI_CB );\r
1861 \r
1862         CL_ASSERT( p_item );\r
1863         p_spl_qp_svc = PARENT_STRUCT( p_item, spl_qp_svc_t, send_async );\r
1864 \r
1865         /* Process a local MAD send work request. */\r
1866         CL_ASSERT( p_spl_qp_svc->local_mad_wr );\r
1867         status = fwd_local_mad( p_spl_qp_svc, p_spl_qp_svc->local_mad_wr );\r
1868 \r
1869         /*\r
1870          * If we successfully processed a local MAD, which could have changed\r
1871          * something (e.g. the LID) on the HCA.  Scan for changes.\r
1872          */\r
1873         if( status == IB_SUCCESS )\r
1874                 pnp_poll();\r
1875 \r
1876         /*\r
1877          * Clear the local MAD pointer to allow processing of other MADs.\r
1878          * This is done after polling for attribute changes to ensure that\r
1879          * subsequent MADs pick up any changes performed by this one.\r
1880          */\r
1881         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
1882         p_spl_qp_svc->local_mad_wr = NULL;\r
1883         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
1884 \r
1885         /* Continue processing any queued MADs on the QP. */\r
1886         special_qp_resume_sends( p_spl_qp_svc->h_qp );\r
1887 \r
1888         /* No longer in use by the asynchronous processing thread. */\r
1889         cl_atomic_dec( &p_spl_qp_svc->in_use_cnt );\r
1890 \r
1891         AL_EXIT( AL_DBG_SMI );\r
1892 }\r
1893 \r
1894 \r
1895 \r
1896 /*\r
1897  * Special QP send completion callback.\r
1898  */\r
1899 void\r
1900 spl_qp_send_comp_cb(\r
1901         IN              const   ib_cq_handle_t                          h_cq,\r
1902         IN                              void*                                           cq_context )\r
1903 {\r
1904         spl_qp_svc_t*                   p_spl_qp_svc;\r
1905 \r
1906         AL_ENTER( AL_DBG_SMI_CB );\r
1907 \r
1908         CL_ASSERT( cq_context );\r
1909         p_spl_qp_svc = cq_context;\r
1910 \r
1911 #if defined( CL_USE_MUTEX )\r
1912 \r
1913         /* Queue an asynchronous processing item to process sends. */\r
1914         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
1915         if( !p_spl_qp_svc->send_async_queued )\r
1916         {\r
1917                 p_spl_qp_svc->send_async_queued = TRUE;\r
1918                 ref_al_obj( &p_spl_qp_svc->obj );\r
1919                 cl_async_proc_queue( gp_async_proc_mgr, &p_spl_qp_svc->send_async_cb );\r
1920         }\r
1921         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
1922 \r
1923 #else\r
1924 \r
1925         /* Invoke the callback directly. */\r
1926         CL_ASSERT( h_cq == p_spl_qp_svc->h_send_cq );\r
1927         spl_qp_comp( p_spl_qp_svc, h_cq, IB_WC_SEND );\r
1928 \r
1929         /* Continue processing any queued MADs on the QP. */\r
1930         special_qp_resume_sends( p_spl_qp_svc->h_qp );\r
1931 \r
1932 #endif\r
1933 \r
1934         AL_EXIT( AL_DBG_SMI );\r
1935 }\r
1936 \r
1937 \r
1938 \r
1939 #if defined( CL_USE_MUTEX )\r
1940 void\r
1941 spl_qp_send_async_cb(\r
1942         IN                              cl_async_proc_item_t*           p_item )\r
1943 {\r
1944         spl_qp_svc_t*                   p_spl_qp_svc;\r
1945         ib_api_status_t                 status;\r
1946 \r
1947         AL_ENTER( AL_DBG_SMI_CB );\r
1948 \r
1949         CL_ASSERT( p_item );\r
1950         p_spl_qp_svc = PARENT_STRUCT( p_item, spl_qp_svc_t, send_async_cb );\r
1951 \r
1952         /* Reset asynchronous queue flag. */\r
1953         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
1954         p_spl_qp_svc->send_async_queued = FALSE;\r
1955         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
1956 \r
1957         spl_qp_comp( p_spl_qp_svc, p_spl_qp_svc->h_send_cq, IB_WC_SEND );\r
1958 \r
1959         /* Continue processing any queued MADs on the QP. */\r
1960         status = special_qp_resume_sends( p_spl_qp_svc->h_qp );\r
1961         CL_ASSERT( status == IB_SUCCESS );\r
1962 \r
1963         deref_al_obj( &p_spl_qp_svc->obj );\r
1964 \r
1965         AL_EXIT( AL_DBG_SMI );\r
1966 }\r
1967 #endif\r
1968 \r
1969 \r
1970 \r
1971 /*\r
1972  * Special QP receive completion callback.\r
1973  */\r
1974 void\r
1975 spl_qp_recv_comp_cb(\r
1976         IN              const   ib_cq_handle_t                          h_cq,\r
1977         IN                              void*                                           cq_context )\r
1978 {\r
1979         spl_qp_svc_t*                   p_spl_qp_svc;\r
1980 \r
1981         AL_ENTER( AL_DBG_SMI );\r
1982 \r
1983         CL_ASSERT( cq_context );\r
1984         p_spl_qp_svc = cq_context;\r
1985 \r
1986 #if defined( CL_USE_MUTEX )\r
1987 \r
1988         /* Queue an asynchronous processing item to process receives. */\r
1989         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
1990         if( !p_spl_qp_svc->recv_async_queued )\r
1991         {\r
1992                 p_spl_qp_svc->recv_async_queued = TRUE;\r
1993                 ref_al_obj( &p_spl_qp_svc->obj );\r
1994                 cl_async_proc_queue( gp_async_proc_mgr, &p_spl_qp_svc->recv_async_cb );\r
1995         }\r
1996         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
1997 \r
1998 #else\r
1999 \r
2000         CL_ASSERT( h_cq == p_spl_qp_svc->h_recv_cq );\r
2001         spl_qp_comp( p_spl_qp_svc, h_cq, IB_WC_RECV );\r
2002 \r
2003 #endif\r
2004 \r
2005         AL_EXIT( AL_DBG_SMI );\r
2006 }\r
2007 \r
2008 \r
2009 \r
2010 #if defined( CL_USE_MUTEX )\r
2011 void\r
2012 spl_qp_recv_async_cb(\r
2013         IN                              cl_async_proc_item_t*           p_item )\r
2014 {\r
2015         spl_qp_svc_t*                   p_spl_qp_svc;\r
2016 \r
2017         AL_ENTER( AL_DBG_SMI );\r
2018 \r
2019         CL_ASSERT( p_item );\r
2020         p_spl_qp_svc = PARENT_STRUCT( p_item, spl_qp_svc_t, recv_async_cb );\r
2021 \r
2022         /* Reset asynchronous queue flag. */\r
2023         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
2024         p_spl_qp_svc->recv_async_queued = FALSE;\r
2025         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2026 \r
2027         spl_qp_comp( p_spl_qp_svc, p_spl_qp_svc->h_recv_cq, IB_WC_RECV );\r
2028 \r
2029         deref_al_obj( &p_spl_qp_svc->obj );\r
2030 \r
2031         AL_EXIT( AL_DBG_SMI );\r
2032 }\r
2033 #endif\r
2034 \r
2035 \r
2036 \r
2037 /*\r
2038  * Special QP completion handler.\r
2039  */\r
2040 void\r
2041 spl_qp_comp(\r
2042         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
2043         IN              const   ib_cq_handle_t                          h_cq,\r
2044         IN                              ib_wc_type_t                            wc_type )\r
2045 {\r
2046         ib_wc_t                                 wc;\r
2047         ib_wc_t*                                p_free_wc = &wc;\r
2048         ib_wc_t*                                p_done_wc;\r
2049         al_mad_wr_t*                    p_mad_wr;\r
2050         al_mad_element_t*               p_al_mad;\r
2051         ib_mad_element_t*               p_mad_element;\r
2052         ib_smp_t*                               p_smp;\r
2053         ib_api_status_t                 status;\r
2054 \r
2055         AL_ENTER( AL_DBG_SMI_CB );\r
2056 \r
2057         CL_ASSERT( p_spl_qp_svc );\r
2058         CL_ASSERT( h_cq );\r
2059 \r
2060         /* Check the QP state and guard against error handling. */\r
2061         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
2062         if( p_spl_qp_svc->state != SPL_QP_ACTIVE )\r
2063         {\r
2064                 cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2065                 return;\r
2066         }\r
2067         cl_atomic_inc( &p_spl_qp_svc->in_use_cnt );\r
2068         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2069 \r
2070         wc.p_next = NULL;\r
2071         /* Process work completions. */\r
2072         while( ib_poll_cq( h_cq, &p_free_wc, &p_done_wc ) == IB_SUCCESS )\r
2073         {\r
2074                 /* Process completions one at a time. */\r
2075                 CL_ASSERT( p_done_wc );\r
2076 \r
2077                 /* Flushed completions are handled elsewhere. */\r
2078                 if( wc.status == IB_WCS_WR_FLUSHED_ERR )\r
2079                 {\r
2080                         p_free_wc = &wc;\r
2081                         continue;\r
2082                 }\r
2083 \r
2084                 /*\r
2085                  * Process the work completion.  Per IBA specification, the\r
2086                  * wc.wc_type is undefined if wc.status is not IB_WCS_SUCCESS.\r
2087                  * Use the wc_type parameter.\r
2088                  */\r
2089                 switch( wc_type )\r
2090                 {\r
2091                 case IB_WC_SEND:\r
2092                         /* Get a pointer to the MAD work request. */\r
2093                         p_mad_wr = (al_mad_wr_t*)((uintn_t)wc.wr_id);\r
2094 \r
2095                         /* Remove the MAD work request from the service tracking queue. */\r
2096                         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
2097                         cl_qlist_remove_item( &p_spl_qp_svc->send_queue,\r
2098                                 &p_mad_wr->list_item );\r
2099                         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2100 \r
2101                         /* Reset directed route SMPs as required by IBA. */\r
2102                         p_smp = (ib_smp_t*)get_mad_hdr_from_wr( p_mad_wr );\r
2103                         if( p_smp->mgmt_class == IB_MCLASS_SUBN_DIR )\r
2104                         {\r
2105                                 if( ib_smp_is_response( p_smp ) )\r
2106                                         p_smp->hop_ptr++;\r
2107                                 else\r
2108                                         p_smp->hop_ptr--;\r
2109                         }\r
2110 \r
2111                         /* Report the send completion to the dispatcher. */\r
2112                         mad_disp_send_done( p_spl_qp_svc->h_mad_disp, p_mad_wr, &wc );\r
2113                         break;\r
2114 \r
2115                 case IB_WC_RECV:\r
2116 \r
2117                         /* Initialize pointers to the MAD element. */\r
2118                         p_al_mad = (al_mad_element_t*)((uintn_t)wc.wr_id);\r
2119                         p_mad_element = &p_al_mad->element;\r
2120 \r
2121                         /* Remove the AL MAD element from the service tracking list. */\r
2122                         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
2123 \r
2124                         cl_qlist_remove_item( &p_spl_qp_svc->recv_queue,\r
2125                                 &p_al_mad->list_item );\r
2126 \r
2127                         /* Replenish the receive buffer. */\r
2128                         spl_qp_svc_post_recvs( p_spl_qp_svc );\r
2129                         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2130 \r
2131                         /* Construct the MAD element from the receive work completion. */\r
2132                         build_mad_recv( p_mad_element, &wc );\r
2133 \r
2134                         /* Process the received MAD. */\r
2135                         status = process_mad_recv( p_spl_qp_svc, p_mad_element );\r
2136 \r
2137                         /* Discard this MAD on error. */\r
2138                         if( status != IB_SUCCESS )\r
2139                         {\r
2140                                 status = ib_put_mad( p_mad_element );\r
2141                                 CL_ASSERT( status == IB_SUCCESS );\r
2142                         }\r
2143                         break;\r
2144 \r
2145                 default:\r
2146                         CL_ASSERT( wc_type == IB_WC_SEND || wc_type == IB_WC_RECV );\r
2147                         break;\r
2148                 }\r
2149 \r
2150                 if( wc.status != IB_WCS_SUCCESS )\r
2151                 {\r
2152                         AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
2153                                 ("special QP completion error: %s! internal syndrome 0x%I64x\n",\r
2154                                 ib_get_wc_status_str( wc.status ), wc.vendor_specific) );\r
2155 \r
2156                         /* Reset the special QP service and return. */\r
2157                         spl_qp_svc_reset( p_spl_qp_svc );\r
2158                 }\r
2159                 p_free_wc = &wc;\r
2160         }\r
2161 \r
2162         /* Rearm the CQ. */\r
2163         status = ib_rearm_cq( h_cq, FALSE );\r
2164         CL_ASSERT( status == IB_SUCCESS );\r
2165 \r
2166         cl_atomic_dec( &p_spl_qp_svc->in_use_cnt );\r
2167         AL_EXIT( AL_DBG_SMI_CB );\r
2168 }\r
2169 \r
2170 \r
2171 \r
2172 /*\r
2173  * Process a received MAD.\r
2174  */\r
2175 ib_api_status_t\r
2176 process_mad_recv(\r
2177         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
2178         IN                              ib_mad_element_t*                       p_mad_element )\r
2179 {\r
2180         ib_smp_t*                               p_smp;\r
2181         mad_route_t                             route;\r
2182         ib_api_status_t                 status;\r
2183 \r
2184         AL_ENTER( AL_DBG_SMI );\r
2185 \r
2186         CL_ASSERT( p_spl_qp_svc );\r
2187         CL_ASSERT( p_mad_element );\r
2188 \r
2189         /*\r
2190          * If the CA has a HW agent then this MAD should have been\r
2191          * consumed below verbs.  The fact that it was received here\r
2192          * indicates that it should be forwarded to the dispatcher\r
2193          * for delivery to a class manager.  Otherwise, determine how\r
2194          * the MAD should be routed.\r
2195          */\r
2196         route = ROUTE_DISPATCHER;\r
2197         if( check_local_mad( p_spl_qp_svc->h_qp ) )\r
2198         {\r
2199                 /*\r
2200                  * SMP and GMP processing is branched here to handle overlaps\r
2201                  * between class methods and attributes.\r
2202                  */\r
2203                 switch( p_mad_element->p_mad_buf->mgmt_class )\r
2204                 {\r
2205                 case IB_MCLASS_SUBN_DIR:\r
2206                         /* Perform special checks on directed route SMPs. */\r
2207                         p_smp = (ib_smp_t*)p_mad_element->p_mad_buf;\r
2208 \r
2209                         if( ( p_smp->hop_count >= IB_SUBNET_PATH_HOPS_MAX ) ||\r
2210                                 ( p_smp->hop_ptr >= IB_SUBNET_PATH_HOPS_MAX ) )\r
2211                         {\r
2212                                 route = ROUTE_DISCARD;\r
2213                         }\r
2214                         else if( ib_smp_is_response( p_smp ) )\r
2215                         {\r
2216                                 /*\r
2217                                  * This node is the destination of the response.  Discard\r
2218                                  * the source LID or hop pointer are incorrect.\r
2219                                  */\r
2220                                 if( p_smp->dr_slid == IB_LID_PERMISSIVE )\r
2221                                 {\r
2222                                         if( p_smp->hop_ptr == 1 )\r
2223                                         {\r
2224                                                 p_smp->hop_ptr--;               /* Adjust ptr per IBA spec. */\r
2225                                         }\r
2226                                         else\r
2227                                         {\r
2228                                                 route = ROUTE_DISCARD;\r
2229                                         }\r
2230                                 }\r
2231                                 else if( ( p_smp->dr_slid <  p_spl_qp_svc->base_lid ) ||\r
2232                                                  ( p_smp->dr_slid >= p_spl_qp_svc->base_lid +\r
2233                                                         ( 1 << p_spl_qp_svc->lmc ) ) )\r
2234                                 {\r
2235                                                 route = ROUTE_DISCARD;\r
2236                                 }\r
2237                         }\r
2238                         else\r
2239                         {\r
2240                                 /*\r
2241                                  * This node is the destination of the request.  Discard\r
2242                                  * the destination LID or hop pointer are incorrect.\r
2243                                  */\r
2244                                 if( p_smp->dr_dlid == IB_LID_PERMISSIVE )\r
2245                                 {\r
2246                                         if( p_smp->hop_count == p_smp->hop_ptr )\r
2247                                         {\r
2248                                                 p_smp->return_path[ p_smp->hop_ptr++ ] =\r
2249                                                         p_spl_qp_svc->port_num; /* Set path per IBA spec. */\r
2250                                         }\r
2251                                         else\r
2252                                         {\r
2253                                                 route = ROUTE_DISCARD;\r
2254                                         }\r
2255                                 }\r
2256                                 else if( ( p_smp->dr_dlid <  p_spl_qp_svc->base_lid ) ||\r
2257                                                  ( p_smp->dr_dlid >= p_spl_qp_svc->base_lid +\r
2258                                                         ( 1 << p_spl_qp_svc->lmc ) ) )\r
2259                                 {\r
2260                                                 route = ROUTE_DISCARD;\r
2261                                 }\r
2262                         }\r
2263 \r
2264                         if( route == ROUTE_DISCARD ) break;\r
2265                         /* else fall through next case */\r
2266 \r
2267                 case IB_MCLASS_SUBN_LID:\r
2268                         route = route_recv_smp( p_mad_element );\r
2269                         break;\r
2270 \r
2271                 case IB_MCLASS_PERF:\r
2272                         route = ROUTE_LOCAL;\r
2273                         break;\r
2274 \r
2275                 case IB_MCLASS_BM:\r
2276                         route = route_recv_gmp( p_mad_element );\r
2277                         break;\r
2278 \r
2279                 default:\r
2280                         /* Route vendor specific MADs to the HCA provider. */\r
2281                         if( ib_class_is_vendor_specific(\r
2282                                 p_mad_element->p_mad_buf->mgmt_class ) )\r
2283                         {\r
2284                                 route = route_recv_gmp( p_mad_element );\r
2285                         }\r
2286                         break;\r
2287                 }\r
2288         }\r
2289 \r
2290         /* Route the MAD. */\r
2291         if ( is_discard( route ) )\r
2292                 status = IB_ERROR;\r
2293         else if( is_dispatcher( route ) )\r
2294                 status = mad_disp_recv_done( p_spl_qp_svc->h_mad_disp, p_mad_element );\r
2295         else if( is_remote( route ) )\r
2296                 status = forward_sm_trap( p_spl_qp_svc, p_mad_element );\r
2297         else\r
2298                 status = recv_local_mad( p_spl_qp_svc, p_mad_element );\r
2299 \r
2300         AL_EXIT( AL_DBG_SMI );\r
2301         return status;\r
2302 }\r
2303 \r
2304 \r
2305 \r
2306 /*\r
2307  * Route a received SMP.\r
2308  */\r
2309 mad_route_t\r
2310 route_recv_smp(\r
2311         IN                              ib_mad_element_t*                       p_mad_element )\r
2312 {\r
2313         mad_route_t                             route;\r
2314 \r
2315         AL_ENTER( AL_DBG_SMI );\r
2316 \r
2317         CL_ASSERT( p_mad_element );\r
2318 \r
2319         /* Process the received SMP. */\r
2320         switch( p_mad_element->p_mad_buf->method )\r
2321         {\r
2322         case IB_MAD_METHOD_GET:\r
2323         case IB_MAD_METHOD_SET:\r
2324                 route = route_recv_smp_attr( p_mad_element );\r
2325                 break;\r
2326 \r
2327         case IB_MAD_METHOD_TRAP:\r
2328                 /*\r
2329                  * Special check to route locally generated traps to the remote SM.\r
2330                  * Distinguished from other receives by the p_wc->recv.ud.recv_opt\r
2331                  * IB_RECV_OPT_FORWARD flag.\r
2332                  *\r
2333                  * Note that because forwarded traps use AL MAD services, the upper\r
2334                  * 32-bits of the TID are reserved by the access layer.  When matching\r
2335                  * a Trap Repress MAD, the SMA must only use the lower 32-bits of the\r
2336                  * TID.\r
2337                  */\r
2338                 route = ( p_mad_element->recv_opt & IB_RECV_OPT_FORWARD ) ?\r
2339                         ROUTE_REMOTE : ROUTE_DISPATCHER;\r
2340                 break;\r
2341 \r
2342         case IB_MAD_METHOD_TRAP_REPRESS:\r
2343                 /*\r
2344                  * Note that because forwarded traps use AL MAD services, the upper\r
2345                  * 32-bits of the TID are reserved by the access layer.  When matching\r
2346                  * a Trap Repress MAD, the SMA must only use the lower 32-bits of the\r
2347                  * TID.\r
2348                  */\r
2349                 route = ROUTE_LOCAL;\r
2350                 break;\r
2351 \r
2352         default:\r
2353                 route = ROUTE_DISPATCHER;\r
2354                 break;\r
2355         }\r
2356 \r
2357         AL_EXIT( AL_DBG_SMI );\r
2358         return route;\r
2359 }\r
2360 \r
2361 \r
2362 \r
2363 /*\r
2364  * Route received SMP attributes.\r
2365  */\r
2366 mad_route_t\r
2367 route_recv_smp_attr(\r
2368         IN                              ib_mad_element_t*                       p_mad_element )\r
2369 {\r
2370         mad_route_t                             route;\r
2371 \r
2372         AL_ENTER( AL_DBG_SMI );\r
2373 \r
2374         CL_ASSERT( p_mad_element );\r
2375 \r
2376         /* Process the received SMP attributes. */\r
2377         switch( p_mad_element->p_mad_buf->attr_id )\r
2378         {\r
2379         case IB_MAD_ATTR_NODE_DESC:\r
2380         case IB_MAD_ATTR_NODE_INFO:\r
2381         case IB_MAD_ATTR_GUID_INFO:\r
2382         case IB_MAD_ATTR_PORT_INFO:\r
2383         case IB_MAD_ATTR_P_KEY_TABLE:\r
2384         case IB_MAD_ATTR_SLVL_TABLE:\r
2385         case IB_MAD_ATTR_VL_ARBITRATION:\r
2386         case IB_MAD_ATTR_VENDOR_DIAG:\r
2387         case IB_MAD_ATTR_LED_INFO:\r
2388                 route = ROUTE_LOCAL;\r
2389                 break;\r
2390 \r
2391         default:\r
2392                 route = ROUTE_DISPATCHER;\r
2393                 break;\r
2394         }\r
2395 \r
2396         AL_EXIT( AL_DBG_SMI );\r
2397         return route;\r
2398 }\r
2399 \r
2400 \r
2401 /*\r
2402  * Route a received GMP.\r
2403  */\r
2404 mad_route_t\r
2405 route_recv_gmp(\r
2406         IN                              ib_mad_element_t*                       p_mad_element )\r
2407 {\r
2408         mad_route_t                             route;\r
2409 \r
2410         AL_ENTER( AL_DBG_SMI );\r
2411 \r
2412         CL_ASSERT( p_mad_element );\r
2413 \r
2414         /* Process the received GMP. */\r
2415         switch( p_mad_element->p_mad_buf->method )\r
2416         {\r
2417         case IB_MAD_METHOD_GET:\r
2418         case IB_MAD_METHOD_SET:\r
2419                 /* Route vendor specific MADs to the HCA provider. */\r
2420                 if( ib_class_is_vendor_specific(\r
2421                         p_mad_element->p_mad_buf->mgmt_class ) )\r
2422                 {\r
2423                         route = ROUTE_LOCAL;\r
2424                 }\r
2425                 else\r
2426                 {\r
2427                         route = route_recv_gmp_attr( p_mad_element );\r
2428                 }\r
2429                 break;\r
2430 \r
2431         default:\r
2432                 route = ROUTE_DISPATCHER;\r
2433                 break;\r
2434         }\r
2435 \r
2436         AL_EXIT( AL_DBG_SMI );\r
2437         return route;\r
2438 }\r
2439 \r
2440 \r
2441 \r
2442 /*\r
2443  * Route received GMP attributes.\r
2444  */\r
2445 mad_route_t\r
2446 route_recv_gmp_attr(\r
2447         IN                              ib_mad_element_t*                       p_mad_element )\r
2448 {\r
2449         mad_route_t                             route;\r
2450 \r
2451         AL_ENTER( AL_DBG_SMI );\r
2452 \r
2453         CL_ASSERT( p_mad_element );\r
2454 \r
2455         /* Process the received GMP attributes. */\r
2456         if( p_mad_element->p_mad_buf->attr_id == IB_MAD_ATTR_CLASS_PORT_INFO )\r
2457                 route = ROUTE_LOCAL;\r
2458         else\r
2459                 route = ROUTE_DISPATCHER;\r
2460 \r
2461         AL_EXIT( AL_DBG_SMI );\r
2462         return route;\r
2463 }\r
2464 \r
2465 \r
2466 \r
2467 /*\r
2468  * Forward a locally generated Subnet Management trap.\r
2469  */\r
2470 ib_api_status_t\r
2471 forward_sm_trap(\r
2472         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
2473         IN                              ib_mad_element_t*                       p_mad_element )\r
2474 {\r
2475         ib_av_attr_t                    av_attr;\r
2476         ib_api_status_t                 status;\r
2477 \r
2478         AL_ENTER( AL_DBG_SMI_CB );\r
2479 \r
2480         CL_ASSERT( p_spl_qp_svc );\r
2481         CL_ASSERT( p_mad_element );\r
2482 \r
2483         /* Check the SMP class. */\r
2484         if( p_mad_element->p_mad_buf->mgmt_class != IB_MCLASS_SUBN_LID )\r
2485         {\r
2486                 /*\r
2487                  * Per IBA Specification Release 1.1 Section 14.2.2.1,\r
2488                  * "C14-5: Only a SM shall originate a directed route SMP."\r
2489                  * Therefore all traps should be LID routed; drop this one.\r
2490                  */\r
2491                 AL_EXIT( AL_DBG_SMI_CB );\r
2492                 return IB_ERROR;\r
2493         }\r
2494 \r
2495         /* Create an address vector for the SM. */\r
2496         cl_memclr( &av_attr, sizeof( ib_av_attr_t ) );\r
2497         av_attr.port_num = p_spl_qp_svc->port_num;\r
2498         av_attr.sl = p_mad_element->remote_sl;\r
2499         av_attr.dlid = p_mad_element->remote_lid;\r
2500         if( p_mad_element->grh_valid )\r
2501         {\r
2502                 cl_memcpy( &av_attr.grh, p_mad_element->p_grh, sizeof( ib_grh_t ) );\r
2503                 av_attr.grh.src_gid      = p_mad_element->p_grh->dest_gid;\r
2504                 av_attr.grh.dest_gid = p_mad_element->p_grh->src_gid;\r
2505                 av_attr.grh_valid = TRUE;\r
2506         }\r
2507 \r
2508         status = ib_create_av( p_spl_qp_svc->h_qp->obj.p_ci_ca->h_pd_alias,\r
2509                 &av_attr, &p_mad_element->h_av );\r
2510 \r
2511         if( status != IB_SUCCESS )\r
2512         {\r
2513                 AL_EXIT( AL_DBG_SMI_CB );\r
2514                 return status;\r
2515         }\r
2516 \r
2517         /* Complete the initialization of the MAD element. */\r
2518         p_mad_element->p_next = NULL;\r
2519         p_mad_element->remote_qkey = IB_QP_PRIVILEGED_Q_KEY;\r
2520         p_mad_element->resp_expected = FALSE;\r
2521 \r
2522         /* Clear context1 for proper send completion callback processing. */\r
2523         p_mad_element->context1 = NULL;\r
2524 \r
2525         /*\r
2526          * Forward the trap.  Note that because forwarded traps use AL MAD\r
2527          * services, the upper 32-bits of the TID are reserved by the access\r
2528          * layer.  When matching a Trap Repress MAD, the SMA must only use\r
2529          * the lower 32-bits of the TID.\r
2530          */\r
2531         status = ib_send_mad( p_spl_qp_svc->h_mad_svc, p_mad_element, NULL );\r
2532 \r
2533         if( status != IB_SUCCESS )\r
2534                 ib_destroy_av( p_mad_element->h_av );\r
2535 \r
2536         AL_EXIT( AL_DBG_SMI_CB );\r
2537         return status;\r
2538 }\r
2539 \r
2540 \r
2541 /*\r
2542  * Process a locally routed MAD received from the special QP.\r
2543  */\r
2544 ib_api_status_t\r
2545 recv_local_mad(\r
2546         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
2547         IN                              ib_mad_element_t*                       p_mad_request )\r
2548 {\r
2549         ib_mad_t*                               p_mad_hdr;\r
2550         ib_api_status_t                 status;\r
2551 \r
2552         AL_ENTER( AL_DBG_SMI_CB );\r
2553 \r
2554         CL_ASSERT( p_spl_qp_svc );\r
2555         CL_ASSERT( p_mad_request );\r
2556 \r
2557         /* Initialize the MAD element. */\r
2558         p_mad_hdr = ib_get_mad_buf( p_mad_request );\r
2559         p_mad_request->context1 = p_mad_request;\r
2560 \r
2561         /* Save the TID. */\r
2562         p_mad_request->context2 =\r
2563                 (void*)(uintn_t)al_get_al_tid( p_mad_hdr->trans_id );\r
2564 /*\r
2565  * Disable warning about passing unaligned 64-bit value.\r
2566  * The value is always aligned given how buffers are allocated\r
2567  * and given the layout of a MAD.\r
2568  */\r
2569 #pragma warning( push, 3 )\r
2570         al_set_al_tid( &p_mad_hdr->trans_id, 0 );\r
2571 #pragma warning( pop )\r
2572 \r
2573         /*\r
2574          * We need to get a response from the local HCA to this MAD only if this\r
2575          * MAD is not itself a response.\r
2576          */\r
2577         p_mad_request->resp_expected = !( ib_mad_is_response( p_mad_hdr ) ||\r
2578                 ( p_mad_hdr->method == IB_MAD_METHOD_TRAP_REPRESS ) );\r
2579         p_mad_request->timeout_ms = LOCAL_MAD_TIMEOUT;\r
2580         p_mad_request->send_opt = IB_SEND_OPT_LOCAL;\r
2581 \r
2582         /* Send the locally addressed MAD request to the CA for processing. */\r
2583         status = ib_send_mad( p_spl_qp_svc->h_mad_svc, p_mad_request, NULL );\r
2584 \r
2585         AL_EXIT( AL_DBG_SMI_CB );\r
2586         return status;\r
2587 }\r
2588 \r
2589 \r
2590 \r
2591 /*\r
2592  * Special QP alias send completion callback.\r
2593  */\r
2594 void\r
2595 spl_qp_alias_send_cb(\r
2596         IN                              ib_mad_svc_handle_t                     h_mad_svc,\r
2597         IN                              void*                                           mad_svc_context,\r
2598         IN                              ib_mad_element_t*                       p_mad_element )\r
2599 {\r
2600         ib_api_status_t                 status;\r
2601 \r
2602         AL_ENTER( AL_DBG_SMI_CB );\r
2603 \r
2604         UNUSED_PARAM( h_mad_svc );\r
2605         UNUSED_PARAM( mad_svc_context );\r
2606         CL_ASSERT( p_mad_element );\r
2607 \r
2608         if( p_mad_element->h_av )\r
2609         {\r
2610                 status = ib_destroy_av( p_mad_element->h_av );\r
2611                 CL_ASSERT( status == IB_SUCCESS );\r
2612         }\r
2613 \r
2614         status = ib_put_mad( p_mad_element );\r
2615         CL_ASSERT( status == IB_SUCCESS );\r
2616 \r
2617         AL_EXIT( AL_DBG_SMI_CB );\r
2618 }\r
2619 \r
2620 \r
2621 \r
2622 /*\r
2623  * Special QP alias receive completion callback.\r
2624  */\r
2625 void\r
2626 spl_qp_alias_recv_cb(\r
2627         IN                              ib_mad_svc_handle_t                     h_mad_svc,\r
2628         IN                              void*                                           mad_svc_context,\r
2629         IN                              ib_mad_element_t*                       p_mad_response )\r
2630 {\r
2631         spl_qp_svc_t*                   p_spl_qp_svc;\r
2632         ib_mad_element_t*               p_mad_request;\r
2633         ib_mad_t*                               p_mad_hdr;\r
2634         ib_av_attr_t                    av_attr;\r
2635         ib_api_status_t                 status;\r
2636 \r
2637         AL_ENTER( AL_DBG_SMI_CB );\r
2638 \r
2639         CL_ASSERT( mad_svc_context );\r
2640         CL_ASSERT( p_mad_response );\r
2641         CL_ASSERT( p_mad_response->send_context1 );\r
2642 \r
2643         /* Initialize pointers. */\r
2644         p_spl_qp_svc = mad_svc_context;\r
2645         p_mad_request = p_mad_response->send_context1;\r
2646         p_mad_hdr = ib_get_mad_buf( p_mad_response );\r
2647 \r
2648         /* Restore the TID, so it will match on the remote side. */\r
2649 #pragma warning( push, 3 )\r
2650         al_set_al_tid( &p_mad_hdr->trans_id,\r
2651                 (uint32_t)(uintn_t)p_mad_response->send_context2 );\r
2652 #pragma warning( pop )\r
2653 \r
2654         /* Set the remote QP. */\r
2655         p_mad_response->remote_qp       = p_mad_request->remote_qp;\r
2656         p_mad_response->remote_qkey = p_mad_request->remote_qkey;\r
2657 \r
2658         /* Prepare to create an address vector. */\r
2659         cl_memclr( &av_attr, sizeof( ib_av_attr_t ) );\r
2660         av_attr.port_num        = p_spl_qp_svc->port_num;\r
2661         av_attr.sl                      = p_mad_request->remote_sl;\r
2662         av_attr.static_rate = IB_PATH_RECORD_RATE_10_GBS;\r
2663         av_attr.path_bits       = p_mad_request->path_bits;\r
2664         if( p_mad_request->grh_valid )\r
2665         {\r
2666                 cl_memcpy( &av_attr.grh, p_mad_request->p_grh, sizeof( ib_grh_t ) );\r
2667                 av_attr.grh.src_gid      = p_mad_request->p_grh->dest_gid;\r
2668                 av_attr.grh.dest_gid = p_mad_request->p_grh->src_gid;\r
2669                 av_attr.grh_valid = TRUE;\r
2670         }\r
2671         if( ( p_mad_hdr->mgmt_class == IB_MCLASS_SUBN_DIR ) &&\r
2672                 ( ((ib_smp_t *)p_mad_hdr)->dr_dlid == IB_LID_PERMISSIVE ) )\r
2673                 av_attr.dlid = IB_LID_PERMISSIVE;\r
2674         else\r
2675                 av_attr.dlid = p_mad_request->remote_lid;\r
2676 \r
2677         /* Create an address vector. */\r
2678         status = ib_create_av( p_spl_qp_svc->h_qp->obj.p_ci_ca->h_pd_alias,\r
2679                 &av_attr, &p_mad_response->h_av );\r
2680 \r
2681         if( status != IB_SUCCESS )\r
2682         {\r
2683                 ib_put_mad( p_mad_response );\r
2684 \r
2685                 AL_EXIT( AL_DBG_SMI );\r
2686                 return;\r
2687         }\r
2688 \r
2689         /* Send the response. */\r
2690         status = ib_send_mad( h_mad_svc, p_mad_response, NULL );\r
2691 \r
2692         if( status != IB_SUCCESS )\r
2693         {\r
2694                 ib_destroy_av( p_mad_response->h_av );\r
2695                 ib_put_mad( p_mad_response );\r
2696         }\r
2697 \r
2698         AL_EXIT( AL_DBG_SMI_CB );\r
2699 }\r
2700 \r
2701 \r
2702 \r
2703 /*\r
2704  * Post receive buffers to a special QP.\r
2705  */\r
2706 static ib_api_status_t\r
2707 spl_qp_svc_post_recvs(\r
2708         IN                              spl_qp_svc_t*   const           p_spl_qp_svc )\r
2709 {\r
2710         ib_mad_element_t*               p_mad_element;\r
2711         al_mad_element_t*               p_al_element;\r
2712         ib_recv_wr_t                    recv_wr;\r
2713         ib_api_status_t                 status = IB_SUCCESS;\r
2714 \r
2715         /* Attempt to post receive buffers up to the max_qp_depth limit. */\r
2716         while( cl_qlist_count( &p_spl_qp_svc->recv_queue ) <\r
2717                 (int32_t)p_spl_qp_svc->max_qp_depth )\r
2718         {\r
2719                 /* Get a MAD element from the pool. */\r
2720                 status = ib_get_mad( p_spl_qp_svc->obj.p_ci_ca->pool_key,\r
2721                         MAD_BLOCK_SIZE, &p_mad_element );\r
2722 \r
2723                 if( status != IB_SUCCESS ) break;\r
2724 \r
2725                 p_al_element = PARENT_STRUCT( p_mad_element, al_mad_element_t,\r
2726                         element );\r
2727 \r
2728                 /* Build the receive work request. */\r
2729                 recv_wr.p_next   = NULL;\r
2730                 recv_wr.wr_id    = (uintn_t)p_al_element;\r
2731                 recv_wr.num_ds = 1;\r
2732                 recv_wr.ds_array = &p_al_element->grh_ds;\r
2733 \r
2734                 /* Queue the receive on the service tracking list. */\r
2735                 cl_qlist_insert_tail( &p_spl_qp_svc->recv_queue,\r
2736                         &p_al_element->list_item );\r
2737 \r
2738                 /* Post the receive. */\r
2739                 status = ib_post_recv( p_spl_qp_svc->h_qp, &recv_wr, NULL );\r
2740 \r
2741                 if( status != IB_SUCCESS )\r
2742                 {\r
2743                         AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
2744                                 ("Failed to post receive %016I64x\n",\r
2745                                 (LONG_PTR)p_al_element) );\r
2746                         cl_qlist_remove_item( &p_spl_qp_svc->recv_queue,\r
2747                                 &p_al_element->list_item );\r
2748 \r
2749                         ib_put_mad( p_mad_element );\r
2750                         break;\r
2751                 }\r
2752         }\r
2753 \r
2754         return status;\r
2755 }\r
2756 \r
2757 \r
2758 \r
2759 /*\r
2760  * Special QP service asynchronous event callback.\r
2761  */\r
2762 void\r
2763 spl_qp_svc_event_cb(\r
2764         IN                              ib_async_event_rec_t            *p_event_rec )\r
2765 {\r
2766         spl_qp_svc_t*                   p_spl_qp_svc;\r
2767 \r
2768         AL_ENTER( AL_DBG_SMI_CB );\r
2769 \r
2770         CL_ASSERT( p_event_rec );\r
2771         CL_ASSERT( p_event_rec->context );\r
2772 \r
2773         if( p_event_rec->code == IB_AE_SQ_DRAINED )\r
2774         {\r
2775                 AL_EXIT( AL_DBG_SMI );\r
2776                 return;\r
2777         }\r
2778 \r
2779         p_spl_qp_svc = p_event_rec->context;\r
2780 \r
2781         spl_qp_svc_reset( p_spl_qp_svc );\r
2782 \r
2783         AL_EXIT( AL_DBG_SMI_CB );\r
2784 }\r
2785 \r
2786 \r
2787 \r
2788 /*\r
2789  * Special QP service reset.\r
2790  */\r
2791 void\r
2792 spl_qp_svc_reset(\r
2793         IN                              spl_qp_svc_t*                           p_spl_qp_svc )\r
2794 {\r
2795         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
2796 \r
2797         if( p_spl_qp_svc->state != SPL_QP_ACTIVE )\r
2798         {\r
2799                 cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2800                 return;\r
2801         }\r
2802 \r
2803         /* Change the special QP service to the error state. */\r
2804         p_spl_qp_svc->state = SPL_QP_ERROR;\r
2805 \r
2806         /* Flag the service as in use by the asynchronous processing thread. */\r
2807         cl_atomic_inc( &p_spl_qp_svc->in_use_cnt );\r
2808 \r
2809         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2810 \r
2811         /* Queue an asynchronous processing item to reset the special QP. */\r
2812         cl_async_proc_queue( gp_async_proc_mgr, &p_spl_qp_svc->reset_async );\r
2813 }\r
2814 \r
2815 \r
2816 \r
2817 /*\r
2818  * Asynchronous processing thread callback to reset the special QP service.\r
2819  */\r
2820 void\r
2821 spl_qp_svc_reset_cb(\r
2822         IN                              cl_async_proc_item_t*           p_item )\r
2823 {\r
2824         spl_qp_svc_t*                   p_spl_qp_svc;\r
2825         cl_list_item_t*                 p_list_item;\r
2826         ib_wc_t                                 wc;\r
2827         ib_wc_t*                                p_free_wc;\r
2828         ib_wc_t*                                p_done_wc;\r
2829         al_mad_wr_t*                    p_mad_wr;\r
2830         al_mad_element_t*               p_al_mad;\r
2831         ib_qp_mod_t                             qp_mod;\r
2832         ib_api_status_t                 status;\r
2833         cl_qlist_t                              mad_wr_list;\r
2834 \r
2835         AL_ENTER( AL_DBG_SMI_CB );\r
2836 \r
2837         CL_ASSERT( p_item );\r
2838         p_spl_qp_svc = PARENT_STRUCT( p_item, spl_qp_svc_t, reset_async );\r
2839 \r
2840         /* Wait here until the special QP service is only in use by this thread. */\r
2841         while( p_spl_qp_svc->in_use_cnt != 1 )\r
2842         {\r
2843                 cl_thread_suspend( 0 );\r
2844         }\r
2845 \r
2846         /* Change the QP to the RESET state. */\r
2847         cl_memclr( &qp_mod, sizeof( ib_qp_mod_t ) );\r
2848         qp_mod.req_state = IB_QPS_RESET;\r
2849 \r
2850         status = ib_modify_qp( p_spl_qp_svc->h_qp, &qp_mod );\r
2851         CL_ASSERT( status == IB_SUCCESS );\r
2852 \r
2853         /* Return receive MAD elements to the pool. */\r
2854         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
2855         for( p_list_item = cl_qlist_remove_head( &p_spl_qp_svc->recv_queue );\r
2856                  p_list_item != cl_qlist_end( &p_spl_qp_svc->recv_queue );\r
2857                  p_list_item = cl_qlist_remove_head( &p_spl_qp_svc->recv_queue ) )\r
2858         {\r
2859                 p_al_mad = PARENT_STRUCT( p_list_item, al_mad_element_t, list_item );\r
2860 \r
2861                 status = ib_put_mad( &p_al_mad->element );\r
2862                 CL_ASSERT( status == IB_SUCCESS );\r
2863         }\r
2864         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2865 \r
2866         /* Re-initialize the QP. */\r
2867         status = ib_init_dgrm_svc( p_spl_qp_svc->h_qp, NULL );\r
2868         CL_ASSERT( status == IB_SUCCESS );\r
2869 \r
2870         /* Poll to remove any remaining send completions from the CQ. */\r
2871         do\r
2872         {\r
2873                 cl_memclr( &wc, sizeof( ib_wc_t ) );\r
2874                 p_free_wc = &wc;\r
2875                 status = ib_poll_cq( p_spl_qp_svc->h_send_cq, &p_free_wc, &p_done_wc );\r
2876 \r
2877         } while( status == IB_SUCCESS );\r
2878 \r
2879         /* Post receive buffers. */\r
2880         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
2881         spl_qp_svc_post_recvs( p_spl_qp_svc );\r
2882 \r
2883         /* Re-queue any outstanding MAD send operations. */\r
2884         cl_qlist_init( &mad_wr_list );\r
2885         cl_qlist_insert_list_tail( &mad_wr_list, &p_spl_qp_svc->send_queue );\r
2886         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2887 \r
2888         for( p_list_item = cl_qlist_remove_head( &mad_wr_list );\r
2889                  p_list_item != cl_qlist_end( &mad_wr_list );\r
2890                  p_list_item = cl_qlist_remove_head( &mad_wr_list ) )\r
2891         {\r
2892                 p_mad_wr = PARENT_STRUCT( p_list_item, al_mad_wr_t, list_item );\r
2893                 special_qp_queue_mad( p_spl_qp_svc->h_qp, p_mad_wr );\r
2894         }\r
2895 \r
2896         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
2897         if( p_spl_qp_svc->state == SPL_QP_ERROR )\r
2898         {\r
2899                 /* The QP is ready.  Change the state. */\r
2900                 p_spl_qp_svc->state = SPL_QP_ACTIVE;\r
2901                 cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2902 \r
2903                 /* Re-arm the CQs. */\r
2904                 status = ib_rearm_cq( p_spl_qp_svc->h_recv_cq, FALSE );\r
2905                 CL_ASSERT( status == IB_SUCCESS );\r
2906                 status = ib_rearm_cq( p_spl_qp_svc->h_send_cq, FALSE );\r
2907                 CL_ASSERT( status == IB_SUCCESS );\r
2908 \r
2909                 /* Resume send processing. */\r
2910                 special_qp_resume_sends( p_spl_qp_svc->h_qp );\r
2911         }\r
2912         else\r
2913         {\r
2914                 cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2915         }\r
2916 \r
2917         /* No longer in use by the asynchronous processing thread. */\r
2918         cl_atomic_dec( &p_spl_qp_svc->in_use_cnt );\r
2919 \r
2920         AL_EXIT( AL_DBG_SMI_CB );\r
2921 }\r
2922 \r
2923 \r
2924 \r
2925 /*\r
2926  * Special QP alias asynchronous event callback.\r
2927  */\r
2928 void\r
2929 spl_qp_alias_event_cb(\r
2930         IN                              ib_async_event_rec_t            *p_event_rec )\r
2931 {\r
2932         UNUSED_PARAM( p_event_rec );\r
2933 }\r
2934 \r
2935 \r
2936 \r
2937 /*\r
2938  * Acquire the SMI dispatcher for the given port.\r
2939  */\r
2940 ib_api_status_t\r
2941 acquire_smi_disp(\r
2942         IN              const   ib_net64_t                                      port_guid,\r
2943                 OUT                     al_mad_disp_handle_t* const     ph_mad_disp )\r
2944 {\r
2945         CL_ASSERT( gp_spl_qp_mgr );\r
2946         return acquire_svc_disp( &gp_spl_qp_mgr->smi_map, port_guid, ph_mad_disp );\r
2947 }\r
2948 \r
2949 \r
2950 \r
2951 /*\r
2952  * Acquire the GSI dispatcher for the given port.\r
2953  */\r
2954 ib_api_status_t\r
2955 acquire_gsi_disp(\r
2956         IN              const   ib_net64_t                                      port_guid,\r
2957                 OUT                     al_mad_disp_handle_t* const     ph_mad_disp )\r
2958 {\r
2959         CL_ASSERT( gp_spl_qp_mgr );\r
2960         return acquire_svc_disp( &gp_spl_qp_mgr->gsi_map, port_guid, ph_mad_disp );\r
2961 }\r
2962 \r
2963 \r
2964 \r
2965 /*\r
2966  * Acquire the service dispatcher for the given port.\r
2967  */\r
2968 ib_api_status_t\r
2969 acquire_svc_disp(\r
2970         IN              const   cl_qmap_t* const                        p_svc_map,\r
2971         IN              const   ib_net64_t                                      port_guid,\r
2972                 OUT                     al_mad_disp_handle_t            *ph_mad_disp )\r
2973 {\r
2974         cl_map_item_t*                  p_svc_item;\r
2975         spl_qp_svc_t*                   p_spl_qp_svc;\r
2976 \r
2977         AL_ENTER( AL_DBG_SMI );\r
2978 \r
2979         CL_ASSERT( p_svc_map );\r
2980         CL_ASSERT( gp_spl_qp_mgr );\r
2981 \r
2982         /* Search for the SMI or GSI service for the given port. */\r
2983         cl_spinlock_acquire( &gp_spl_qp_mgr->obj.lock );\r
2984         p_svc_item = cl_qmap_get( p_svc_map, port_guid );\r
2985         cl_spinlock_release( &gp_spl_qp_mgr->obj.lock );\r
2986         if( p_svc_item == cl_qmap_end( p_svc_map ) )\r
2987         {\r
2988                 /* The port does not have an active agent. */\r
2989                 AL_EXIT( AL_DBG_SMI );\r
2990                 return IB_INVALID_GUID;\r
2991         }\r
2992 \r
2993         p_spl_qp_svc = PARENT_STRUCT( p_svc_item, spl_qp_svc_t, map_item );\r
2994 \r
2995         /* Found a match.  Get MAD dispatcher handle. */\r
2996         *ph_mad_disp = p_spl_qp_svc->h_mad_disp;\r
2997 \r
2998         /* Reference the MAD dispatcher on behalf of the client. */\r
2999         ref_al_obj( &p_spl_qp_svc->h_mad_disp->obj );\r
3000 \r
3001         AL_EXIT( AL_DBG_SMI );\r
3002         return IB_SUCCESS;\r
3003 }\r
3004 \r
3005 \r
3006 \r
3007 /*\r
3008  * Force a poll for CA attribute changes.\r
3009  */\r
3010 void\r
3011 force_smi_poll(\r
3012         void )\r
3013 {\r
3014         AL_ENTER( AL_DBG_SMI_CB );\r
3015 \r
3016         /*\r
3017          * Stop the poll timer.  Just invoke the timer callback directly to\r
3018          * save the thread context switching.\r
3019          */\r
3020         smi_poll_timer_cb( gp_spl_qp_mgr );\r
3021 \r
3022         AL_EXIT( AL_DBG_SMI_CB );\r
3023 }\r
3024 \r
3025 \r
3026 \r
3027 /*\r
3028  * Poll for CA port attribute changes.\r
3029  */\r
3030 void\r
3031 smi_poll_timer_cb(\r
3032         IN                              void*                                           context )\r
3033 {\r
3034         cl_status_t                     cl_status;\r
3035 \r
3036         AL_ENTER( AL_DBG_SMI_CB );\r
3037 \r
3038         CL_ASSERT( context );\r
3039         CL_ASSERT( gp_spl_qp_mgr == context );\r
3040         UNUSED_PARAM( context );\r
3041 \r
3042         /*\r
3043          * Scan for changes on the local HCAs.  Since the PnP manager has its\r
3044          * own thread for processing changes, we kick off that thread in parallel\r
3045          * reposting receive buffers to the SQP agents.\r
3046          */\r
3047         pnp_poll();\r
3048 \r
3049         /*\r
3050          * To handle the case where force_smi_poll is called at the same time\r
3051          * the timer expires, check if the asynchronous processing item is in\r
3052          * use.  If it is already in use, it means that we're about to poll\r
3053          * anyway, so just ignore this call.\r
3054          */\r
3055         cl_spinlock_acquire( &gp_spl_qp_mgr->obj.lock );\r
3056 \r
3057         /* Perform port processing on the special QP agents. */\r
3058         cl_qlist_apply_func( &gp_spl_qp_mgr->obj.obj_list, smi_post_recvs,\r
3059                 gp_spl_qp_mgr );\r
3060 \r
3061         /* Determine if there are any special QP agents to poll. */\r
3062         if( !cl_is_qlist_empty( &gp_spl_qp_mgr->obj.obj_list ) && g_smi_poll_interval )\r
3063         {\r
3064                 /* Restart the polling timer. */\r
3065                 cl_status =\r
3066                         cl_timer_start( &gp_spl_qp_mgr->poll_timer, g_smi_poll_interval );\r
3067                 CL_ASSERT( cl_status == CL_SUCCESS );\r
3068         }\r
3069         cl_spinlock_release( &gp_spl_qp_mgr->obj.lock );\r
3070 \r
3071         AL_EXIT( AL_DBG_SMI_CB );\r
3072 }\r
3073 \r
3074 \r
3075 \r
3076 /*\r
3077  * Post receive buffers to a special QP.\r
3078  */\r
3079 void\r
3080 smi_post_recvs(\r
3081         IN                              cl_list_item_t* const           p_list_item,\r
3082         IN                              void*                                           context )\r
3083 {\r
3084         al_obj_t*                               p_obj;\r
3085         spl_qp_svc_t*                   p_spl_qp_svc;\r
3086 \r
3087         AL_ENTER( AL_DBG_SMI_CB );\r
3088 \r
3089         CL_ASSERT( p_list_item );\r
3090         UNUSED_PARAM( context );\r
3091 \r
3092         p_obj = PARENT_STRUCT( p_list_item, al_obj_t, pool_item );\r
3093         p_spl_qp_svc = PARENT_STRUCT( p_obj, spl_qp_svc_t, obj );\r
3094 \r
3095         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
3096         if( p_spl_qp_svc->state != SPL_QP_ACTIVE )\r
3097         {\r
3098                 cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
3099                 return;\r
3100         }\r
3101 \r
3102         spl_qp_svc_post_recvs( p_spl_qp_svc );\r
3103         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
3104 \r
3105         AL_EXIT( AL_DBG_SMI );\r
3106 }\r