d15944c60a693799c1027964e1081d24bbebce6f
[mirror/winof/.git] / core / al / kernel / al_smi.c
1 /*\r
2  * Copyright (c) 2005 SilverStorm Technologies.  All rights reserved.\r
3  * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. \r
4  * Copyright (c) 2006 Voltaire Corporation.  All rights reserved.\r
5  *\r
6  * This software is available to you under the OpenIB.org BSD license\r
7  * below:\r
8  *\r
9  *     Redistribution and use in source and binary forms, with or\r
10  *     without modification, are permitted provided that the following\r
11  *     conditions are met:\r
12  *\r
13  *      - Redistributions of source code must retain the above\r
14  *        copyright notice, this list of conditions and the following\r
15  *        disclaimer.\r
16  *\r
17  *      - Redistributions in binary form must reproduce the above\r
18  *        copyright notice, this list of conditions and the following\r
19  *        disclaimer in the documentation and/or other materials\r
20  *        provided with the distribution.\r
21  *\r
22  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
23  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
24  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
25  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
26  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
27  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
28  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
29  * SOFTWARE.\r
30  *\r
31  * $Id$\r
32  */\r
33 \r
34 \r
35 #include <iba/ib_al.h>\r
36 #include <complib/cl_timer.h>\r
37 \r
38 #include "ib_common.h"\r
39 #include "al_common.h"\r
40 #include "al_debug.h"\r
41 #if defined(EVENT_TRACING)\r
42 #ifdef offsetof\r
43 #undef offsetof\r
44 #endif\r
45 #include "al_smi.tmh"\r
46 #endif\r
47 #include "al_verbs.h"\r
48 #include "al_mgr.h"\r
49 #include "al_pnp.h"\r
50 #include "al_qp.h"\r
51 #include "al_smi.h"\r
52 #include "al_av.h"\r
53 \r
54 \r
55 extern char                                             node_desc[IB_NODE_DESCRIPTION_SIZE];\r
56 \r
57 #define SMI_POLL_INTERVAL                       20000           /* Milliseconds */\r
58 #define LOCAL_MAD_TIMEOUT                       50                      /* Milliseconds */\r
59 #define DEFAULT_QP0_DEPTH                       256\r
60 #define DEFAULT_QP1_DEPTH                       1024\r
61 \r
62 uint32_t                                g_smi_poll_interval =   SMI_POLL_INTERVAL;\r
63 spl_qp_mgr_t*                   gp_spl_qp_mgr = NULL;\r
64 \r
65 \r
66 /*\r
67  * Function prototypes.\r
68  */\r
69 void\r
70 destroying_spl_qp_mgr(\r
71         IN                              al_obj_t*                                       p_obj );\r
72 \r
73 void\r
74 free_spl_qp_mgr(\r
75         IN                              al_obj_t*                                       p_obj );\r
76 \r
77 ib_api_status_t\r
78 spl_qp0_agent_pnp_cb(\r
79         IN                              ib_pnp_rec_t*                           p_pnp_rec );\r
80 \r
81 ib_api_status_t\r
82 spl_qp1_agent_pnp_cb(\r
83         IN                              ib_pnp_rec_t*                           p_pnp_rec );\r
84 \r
85 ib_api_status_t\r
86 spl_qp_agent_pnp(\r
87         IN                              ib_pnp_rec_t*                           p_pnp_rec,\r
88         IN                              ib_qp_type_t                            qp_type );\r
89 \r
90 ib_api_status_t\r
91 create_spl_qp_svc(\r
92         IN                              ib_pnp_port_rec_t*                      p_pnp_rec,\r
93         IN              const   ib_qp_type_t                            qp_type );\r
94 \r
95 void\r
96 destroying_spl_qp_svc(\r
97         IN                              al_obj_t*                                       p_obj );\r
98 \r
99 void\r
100 free_spl_qp_svc(\r
101         IN                              al_obj_t*                                       p_obj );\r
102 \r
103 void\r
104 spl_qp_svc_lid_change(\r
105         IN                              al_obj_t*                                       p_obj,\r
106         IN                              ib_pnp_port_rec_t*                      p_pnp_rec );\r
107 \r
108 ib_api_status_t\r
109 remote_mad_send(\r
110         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
111         IN                              al_mad_wr_t* const                      p_mad_wr );\r
112 \r
113 static ib_api_status_t\r
114 local_mad_send(\r
115         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
116         IN                              al_mad_wr_t* const                      p_mad_wr );\r
117 \r
118 static ib_api_status_t\r
119 loopback_mad(\r
120         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
121         IN                              al_mad_wr_t* const                      p_mad_wr );\r
122 \r
123 static ib_api_status_t\r
124 process_subn_mad(\r
125         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
126         IN                              al_mad_wr_t* const                      p_mad_wr );\r
127 \r
128 static ib_api_status_t\r
129 fwd_local_mad(\r
130         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
131         IN                              al_mad_wr_t* const                      p_mad_wr );\r
132 \r
133 void\r
134 send_local_mad_cb(\r
135         IN                              cl_async_proc_item_t*           p_item );\r
136 \r
137 void\r
138 spl_qp_send_comp_cb(\r
139         IN              const   ib_cq_handle_t                          h_cq,\r
140         IN                              void                                            *cq_context );\r
141 \r
142 void\r
143 spl_qp_recv_comp_cb(\r
144         IN              const   ib_cq_handle_t                          h_cq,\r
145         IN                              void                                            *cq_context );\r
146 \r
147 void\r
148 spl_qp_comp(\r
149         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
150         IN              const   ib_cq_handle_t                          h_cq,\r
151         IN                              ib_wc_type_t                            wc_type );\r
152 \r
153 ib_api_status_t\r
154 process_mad_recv(\r
155         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
156         IN                              ib_mad_element_t*                       p_mad_element );\r
157 \r
158 mad_route_t\r
159 route_recv_smp(\r
160         IN                              ib_mad_element_t*                       p_mad_element );\r
161 \r
162 mad_route_t\r
163 route_recv_smp_attr(\r
164         IN                              ib_mad_element_t*                       p_mad_element );\r
165 \r
166 mad_route_t\r
167 route_recv_dm_mad(\r
168         IN                              ib_mad_element_t*                       p_mad_element );\r
169 \r
170 mad_route_t\r
171 route_recv_gmp(\r
172         IN                              ib_mad_element_t*                       p_mad_element );\r
173 \r
174 mad_route_t\r
175 route_recv_gmp_attr(\r
176         IN                              ib_mad_element_t*                       p_mad_element );\r
177 \r
178 ib_api_status_t\r
179 forward_sm_trap(\r
180         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
181         IN                              ib_mad_element_t*                       p_mad_element );\r
182 \r
183 ib_api_status_t\r
184 recv_local_mad(\r
185         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
186         IN                              ib_mad_element_t*                       p_mad_request );\r
187 \r
188 void\r
189 spl_qp_alias_send_cb(\r
190         IN                              ib_mad_svc_handle_t                     h_mad_svc,\r
191         IN                              void                                            *mad_svc_context,\r
192         IN                              ib_mad_element_t                        *p_mad_element );\r
193 \r
194 void\r
195 spl_qp_alias_recv_cb(\r
196         IN                              ib_mad_svc_handle_t                     h_mad_svc,\r
197         IN                              void                                            *mad_svc_context,\r
198         IN                              ib_mad_element_t                        *p_mad_response );\r
199 \r
200 static ib_api_status_t\r
201 spl_qp_svc_post_recvs(\r
202         IN                              spl_qp_svc_t*   const           p_spl_qp_svc );\r
203 \r
204 void\r
205 spl_qp_svc_event_cb(\r
206         IN                              ib_async_event_rec_t            *p_event_rec );\r
207 \r
208 void\r
209 spl_qp_alias_event_cb(\r
210         IN                              ib_async_event_rec_t            *p_event_rec );\r
211 \r
212 void\r
213 spl_qp_svc_reset(\r
214         IN                              spl_qp_svc_t*                           p_spl_qp_svc );\r
215 \r
216 void\r
217 spl_qp_svc_reset_cb(\r
218         IN                              cl_async_proc_item_t*           p_item );\r
219 \r
220 ib_api_status_t\r
221 acquire_svc_disp(\r
222         IN              const   cl_qmap_t* const                        p_svc_map,\r
223         IN              const   ib_net64_t                                      port_guid,\r
224                 OUT                     al_mad_disp_handle_t            *ph_mad_disp );\r
225 \r
226 void\r
227 smi_poll_timer_cb(\r
228         IN                              void*                                           context );\r
229 \r
230 void\r
231 smi_post_recvs(\r
232         IN                              cl_list_item_t* const           p_list_item,\r
233         IN                              void*                                           context );\r
234 \r
235 #if defined( CL_USE_MUTEX )\r
236 void\r
237 spl_qp_send_async_cb(\r
238         IN                              cl_async_proc_item_t*           p_item );\r
239 \r
240 void\r
241 spl_qp_recv_async_cb(\r
242         IN                              cl_async_proc_item_t*           p_item );\r
243 #endif\r
244 \r
245 /*\r
246  * Create the special QP manager.\r
247  */\r
248 ib_api_status_t\r
249 create_spl_qp_mgr(\r
250         IN                              al_obj_t*       const                   p_parent_obj )\r
251 {\r
252         ib_pnp_req_t                    pnp_req;\r
253         ib_api_status_t                 status;\r
254         cl_status_t                             cl_status;\r
255 \r
256         AL_ENTER( AL_DBG_SMI );\r
257 \r
258         CL_ASSERT( p_parent_obj );\r
259         CL_ASSERT( !gp_spl_qp_mgr );\r
260 \r
261         gp_spl_qp_mgr = cl_zalloc( sizeof( spl_qp_mgr_t ) );\r
262         if( !gp_spl_qp_mgr )\r
263         {\r
264                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
265                         ("IB_INSUFFICIENT_MEMORY\n") );\r
266                 return IB_INSUFFICIENT_MEMORY;\r
267         }\r
268 \r
269         /* Construct the special QP manager. */\r
270         construct_al_obj( &gp_spl_qp_mgr->obj, AL_OBJ_TYPE_SMI );\r
271         cl_timer_construct( &gp_spl_qp_mgr->poll_timer );\r
272 \r
273         /* Initialize the lists. */\r
274         cl_qmap_init( &gp_spl_qp_mgr->smi_map );\r
275         cl_qmap_init( &gp_spl_qp_mgr->gsi_map );\r
276 \r
277         /* Initialize the global SMI/GSI manager object. */\r
278         status = init_al_obj( &gp_spl_qp_mgr->obj, gp_spl_qp_mgr, TRUE,\r
279                 destroying_spl_qp_mgr, NULL, free_spl_qp_mgr );\r
280         if( status != IB_SUCCESS )\r
281         {\r
282                 free_spl_qp_mgr( &gp_spl_qp_mgr->obj );\r
283                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
284                         ("init_al_obj failed, %s\n", ib_get_err_str( status ) ) );\r
285                 return status;\r
286         }\r
287 \r
288         /* Attach the special QP manager to the parent object. */\r
289         status = attach_al_obj( p_parent_obj, &gp_spl_qp_mgr->obj );\r
290         if( status != IB_SUCCESS )\r
291         {\r
292                 gp_spl_qp_mgr->obj.pfn_destroy( &gp_spl_qp_mgr->obj, NULL );\r
293                 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
294                         ("attach_al_obj returned %s.\n", ib_get_err_str(status)) );\r
295                 return status;\r
296         }\r
297 \r
298         /* Initialize the SMI polling timer. */\r
299         cl_status = cl_timer_init( &gp_spl_qp_mgr->poll_timer, smi_poll_timer_cb,\r
300                 gp_spl_qp_mgr );\r
301         if( cl_status != CL_SUCCESS )\r
302         {\r
303                 gp_spl_qp_mgr->obj.pfn_destroy( &gp_spl_qp_mgr->obj, NULL );\r
304                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
305                         ("cl_timer_init failed, status 0x%x\n", cl_status ) );\r
306                 return ib_convert_cl_status( cl_status );\r
307         }\r
308 \r
309         /*\r
310          * Note: PnP registrations for port events must be done\r
311          * when the special QP manager is created.  This ensures that\r
312          * the registrations are listed sequentially and the reporting\r
313          * of PnP events occurs in the proper order.\r
314          */\r
315 \r
316         /*\r
317          * Separate context is needed for each special QP.  Therefore, a\r
318          * separate PnP event registration is performed for QP0 and QP1.\r
319          */\r
320 \r
321         /* Register for port PnP events for QP0. */\r
322         cl_memclr( &pnp_req, sizeof( ib_pnp_req_t ) );\r
323         pnp_req.pnp_class       = IB_PNP_PORT;\r
324         pnp_req.pnp_context = &gp_spl_qp_mgr->obj;\r
325         pnp_req.pfn_pnp_cb      = spl_qp0_agent_pnp_cb;\r
326 \r
327         status = ib_reg_pnp( gh_al, &pnp_req, &gp_spl_qp_mgr->h_qp0_pnp );\r
328 \r
329         if( status != IB_SUCCESS )\r
330         {\r
331                 gp_spl_qp_mgr->obj.pfn_destroy( &gp_spl_qp_mgr->obj, NULL );\r
332                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
333                         ("ib_reg_pnp QP0 failed, %s\n", ib_get_err_str( status ) ) );\r
334                 return status;\r
335         }\r
336 \r
337         /* Reference the special QP manager on behalf of the ib_reg_pnp call. */\r
338         ref_al_obj( &gp_spl_qp_mgr->obj );\r
339 \r
340         /* Register for port PnP events for QP1. */\r
341         cl_memclr( &pnp_req, sizeof( ib_pnp_req_t ) );\r
342         pnp_req.pnp_class       = IB_PNP_PORT;\r
343         pnp_req.pnp_context = &gp_spl_qp_mgr->obj;\r
344         pnp_req.pfn_pnp_cb      = spl_qp1_agent_pnp_cb;\r
345 \r
346         status = ib_reg_pnp( gh_al, &pnp_req, &gp_spl_qp_mgr->h_qp1_pnp );\r
347 \r
348         if( status != IB_SUCCESS )\r
349         {\r
350                 gp_spl_qp_mgr->obj.pfn_destroy( &gp_spl_qp_mgr->obj, NULL );\r
351                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
352                         ("ib_reg_pnp QP1 failed, %s\n", ib_get_err_str( status ) ) );\r
353                 return status;\r
354         }\r
355 \r
356         /*\r
357          * Note that we don't release the referende taken in init_al_obj\r
358          * because we need one on behalf of the ib_reg_pnp call.\r
359          */\r
360 \r
361         AL_EXIT( AL_DBG_SMI );\r
362         return IB_SUCCESS;\r
363 }\r
364 \r
365 \r
366 \r
367 /*\r
368  * Pre-destroy the special QP manager.\r
369  */\r
370 void\r
371 destroying_spl_qp_mgr(\r
372         IN                              al_obj_t*                                       p_obj )\r
373 {\r
374         ib_api_status_t                 status;\r
375 \r
376         CL_ASSERT( p_obj );\r
377         CL_ASSERT( gp_spl_qp_mgr == PARENT_STRUCT( p_obj, spl_qp_mgr_t, obj ) );\r
378         UNUSED_PARAM( p_obj );\r
379 \r
380         /* Deregister for port PnP events for QP0. */\r
381         if( gp_spl_qp_mgr->h_qp0_pnp )\r
382         {\r
383                 status = ib_dereg_pnp( gp_spl_qp_mgr->h_qp0_pnp,\r
384                         (ib_pfn_destroy_cb_t)deref_al_obj );\r
385                 CL_ASSERT( status == IB_SUCCESS );\r
386         }\r
387 \r
388         /* Deregister for port PnP events for QP1. */\r
389         if( gp_spl_qp_mgr->h_qp1_pnp )\r
390         {\r
391                 status = ib_dereg_pnp( gp_spl_qp_mgr->h_qp1_pnp,\r
392                         (ib_pfn_destroy_cb_t)deref_al_obj );\r
393                 CL_ASSERT( status == IB_SUCCESS );\r
394         }\r
395 \r
396         /* Destroy the SMI polling timer. */\r
397         cl_timer_destroy( &gp_spl_qp_mgr->poll_timer );\r
398 }\r
399 \r
400 \r
401 \r
402 /*\r
403  * Free the special QP manager.\r
404  */\r
405 void\r
406 free_spl_qp_mgr(\r
407         IN                              al_obj_t*                                       p_obj )\r
408 {\r
409         CL_ASSERT( p_obj );\r
410         CL_ASSERT( gp_spl_qp_mgr == PARENT_STRUCT( p_obj, spl_qp_mgr_t, obj ) );\r
411         UNUSED_PARAM( p_obj );\r
412 \r
413         destroy_al_obj( &gp_spl_qp_mgr->obj );\r
414         cl_free( gp_spl_qp_mgr );\r
415         gp_spl_qp_mgr = NULL;\r
416 }\r
417 \r
418 \r
419 \r
420 /*\r
421  * Special QP0 agent PnP event callback.\r
422  */\r
423 ib_api_status_t\r
424 spl_qp0_agent_pnp_cb(\r
425         IN                              ib_pnp_rec_t*                           p_pnp_rec )\r
426 {\r
427         ib_api_status_t status;\r
428         AL_ENTER( AL_DBG_SMI );\r
429 \r
430         status = spl_qp_agent_pnp( p_pnp_rec, IB_QPT_QP0 );\r
431 \r
432         AL_EXIT( AL_DBG_SMI );\r
433         return status;\r
434 }\r
435 \r
436 \r
437 \r
438 /*\r
439  * Special QP1 agent PnP event callback.\r
440  */\r
441 ib_api_status_t\r
442 spl_qp1_agent_pnp_cb(\r
443         IN                              ib_pnp_rec_t*                           p_pnp_rec )\r
444 {\r
445         ib_api_status_t status;\r
446         AL_ENTER( AL_DBG_SMI );\r
447 \r
448         status = spl_qp_agent_pnp( p_pnp_rec, IB_QPT_QP1 );\r
449 \r
450         AL_EXIT( AL_DBG_SMI );\r
451         return status;\r
452 }\r
453 \r
454 \r
455 \r
456 /*\r
457  * Special QP agent PnP event callback.\r
458  */\r
459 ib_api_status_t\r
460 spl_qp_agent_pnp(\r
461         IN                              ib_pnp_rec_t*                           p_pnp_rec,\r
462         IN                              ib_qp_type_t                            qp_type )\r
463 {\r
464         ib_api_status_t                 status;\r
465         al_obj_t*                               p_obj;\r
466 \r
467         AL_ENTER( AL_DBG_SMI );\r
468 \r
469         CL_ASSERT( p_pnp_rec );\r
470         p_obj = p_pnp_rec->context;\r
471 \r
472         AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_SMI,\r
473                 ("p_pnp_rec->pnp_event = 0x%x (%s)\n",\r
474                 p_pnp_rec->pnp_event, ib_get_pnp_event_str( p_pnp_rec->pnp_event )) );\r
475         /* Dispatch based on the PnP event type. */\r
476         switch( p_pnp_rec->pnp_event )\r
477         {\r
478         case IB_PNP_PORT_ADD:\r
479                 CL_ASSERT( !p_obj );\r
480                 status = create_spl_qp_svc( (ib_pnp_port_rec_t*)p_pnp_rec, qp_type );\r
481                 break;\r
482 \r
483         case IB_PNP_PORT_REMOVE:\r
484                 CL_ASSERT( p_obj );\r
485                 ref_al_obj( p_obj );\r
486                 p_obj->pfn_destroy( p_obj, NULL );\r
487                 status = IB_SUCCESS;\r
488                 break;\r
489 \r
490         case IB_PNP_LID_CHANGE:\r
491                 CL_ASSERT( p_obj );\r
492                 spl_qp_svc_lid_change( p_obj, (ib_pnp_port_rec_t*)p_pnp_rec );\r
493                 status = IB_SUCCESS;\r
494                 break;\r
495 \r
496         default:\r
497                 /* All other events are ignored. */\r
498                 status = IB_SUCCESS;\r
499                 break;\r
500         }\r
501 \r
502         AL_EXIT( AL_DBG_SMI );\r
503         return status;\r
504 }\r
505 \r
506 \r
507 \r
508 /*\r
509  * Create a special QP service.\r
510  */\r
511 ib_api_status_t\r
512 create_spl_qp_svc(\r
513         IN                              ib_pnp_port_rec_t*                      p_pnp_rec,\r
514         IN              const   ib_qp_type_t                            qp_type )\r
515 {\r
516         cl_status_t                             cl_status;\r
517         spl_qp_svc_t*                   p_spl_qp_svc;\r
518         ib_ca_handle_t                  h_ca;\r
519         ib_cq_create_t                  cq_create;\r
520         ib_qp_create_t                  qp_create;\r
521         ib_qp_attr_t                    qp_attr;\r
522         ib_mad_svc_t                    mad_svc;\r
523         ib_api_status_t                 status;\r
524 \r
525         AL_ENTER( AL_DBG_SMI );\r
526 \r
527         CL_ASSERT( p_pnp_rec );\r
528 \r
529         if( ( qp_type != IB_QPT_QP0 ) && ( qp_type != IB_QPT_QP1 ) )\r
530         {\r
531                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") );\r
532                 return IB_INVALID_PARAMETER;\r
533         }\r
534 \r
535         CL_ASSERT( p_pnp_rec->pnp_rec.pnp_context );\r
536         CL_ASSERT( p_pnp_rec->p_ca_attr );\r
537         CL_ASSERT( p_pnp_rec->p_port_attr );\r
538 \r
539         p_spl_qp_svc = cl_zalloc( sizeof( spl_qp_svc_t ) );\r
540         if( !p_spl_qp_svc )\r
541         {\r
542                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
543                         ("IB_INSUFFICIENT_MEMORY\n") );\r
544                 return IB_INSUFFICIENT_MEMORY;\r
545         }\r
546 \r
547         /* Tie the special QP service to the port by setting the port number. */\r
548         p_spl_qp_svc->port_num = p_pnp_rec->p_port_attr->port_num;\r
549         /* Store the port GUID to allow faster lookups of the dispatchers. */\r
550         p_spl_qp_svc->port_guid = p_pnp_rec->p_port_attr->port_guid;\r
551 \r
552         /* Initialize the send and receive queues. */\r
553         cl_qlist_init( &p_spl_qp_svc->send_queue );\r
554         cl_qlist_init( &p_spl_qp_svc->recv_queue );\r
555 \r
556 #if defined( CL_USE_MUTEX )\r
557         /* Initialize async callbacks and flags for send/receive processing. */\r
558         p_spl_qp_svc->send_async_queued = FALSE;\r
559         p_spl_qp_svc->send_async_cb.pfn_callback = spl_qp_send_async_cb;\r
560         p_spl_qp_svc->recv_async_queued = FALSE;\r
561         p_spl_qp_svc->recv_async_cb.pfn_callback = spl_qp_recv_async_cb;\r
562 #endif\r
563 \r
564         /* Initialize the async callback function to process local sends. */\r
565         p_spl_qp_svc->send_async.pfn_callback = send_local_mad_cb;\r
566 \r
567         /* Initialize the async callback function to reset the QP on error. */\r
568         p_spl_qp_svc->reset_async.pfn_callback = spl_qp_svc_reset_cb;\r
569 \r
570         /* Construct the special QP service object. */\r
571         construct_al_obj( &p_spl_qp_svc->obj, AL_OBJ_TYPE_SMI );\r
572 \r
573         /* Initialize the special QP service object. */\r
574         status = init_al_obj( &p_spl_qp_svc->obj, p_spl_qp_svc, TRUE,\r
575                 destroying_spl_qp_svc, NULL, free_spl_qp_svc );\r
576         if( status != IB_SUCCESS )\r
577         {\r
578                 free_spl_qp_svc( &p_spl_qp_svc->obj );\r
579                 return status;\r
580         }\r
581 \r
582         /* Attach the special QP service to the parent object. */\r
583         status = attach_al_obj(\r
584                 (al_obj_t* __ptr64)p_pnp_rec->pnp_rec.pnp_context, &p_spl_qp_svc->obj );\r
585         if( status != IB_SUCCESS )\r
586         {\r
587                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
588                 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
589                         ("attach_al_obj returned %s.\n", ib_get_err_str(status)) );\r
590                 return status;\r
591         }\r
592 \r
593         h_ca = acquire_ca( p_pnp_rec->p_ca_attr->ca_guid );\r
594         CL_ASSERT( h_ca );\r
595         if( !h_ca )\r
596         {\r
597                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
598                 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("acquire_ca failed.\n") );\r
599                 return IB_INVALID_GUID;\r
600         }\r
601 \r
602         p_spl_qp_svc->obj.p_ci_ca = h_ca->obj.p_ci_ca;\r
603 \r
604         /* Determine the maximum queue depth of the QP and CQs. */\r
605         p_spl_qp_svc->max_qp_depth =\r
606                 ( p_pnp_rec->p_ca_attr->max_wrs <\r
607                 p_pnp_rec->p_ca_attr->max_cqes ) ?\r
608                 p_pnp_rec->p_ca_attr->max_wrs :\r
609                 p_pnp_rec->p_ca_attr->max_cqes;\r
610 \r
611         /* Compare this maximum to the default special queue depth. */\r
612         if( ( qp_type == IB_QPT_QP0 ) &&\r
613                 ( p_spl_qp_svc->max_qp_depth > DEFAULT_QP0_DEPTH ) )\r
614                   p_spl_qp_svc->max_qp_depth = DEFAULT_QP0_DEPTH;\r
615         if( ( qp_type == IB_QPT_QP1 ) &&\r
616                 ( p_spl_qp_svc->max_qp_depth > DEFAULT_QP1_DEPTH ) )\r
617                   p_spl_qp_svc->max_qp_depth = DEFAULT_QP1_DEPTH;\r
618 \r
619         /* Create the send CQ. */\r
620         cl_memclr( &cq_create, sizeof( ib_cq_create_t ) );\r
621         cq_create.size = p_spl_qp_svc->max_qp_depth;\r
622         cq_create.pfn_comp_cb = spl_qp_send_comp_cb;\r
623 \r
624         status = ib_create_cq( p_spl_qp_svc->obj.p_ci_ca->h_ca, &cq_create,\r
625                 p_spl_qp_svc, spl_qp_svc_event_cb, &p_spl_qp_svc->h_send_cq );\r
626 \r
627         if( status != IB_SUCCESS )\r
628         {\r
629                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
630                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
631                         ("ib_create_cq send CQ failed, %s\n", ib_get_err_str( status ) ) );\r
632                 return status;\r
633         }\r
634 \r
635         /* Reference the special QP service on behalf of ib_create_cq. */\r
636         ref_al_obj( &p_spl_qp_svc->obj );\r
637 \r
638         /* Check the result of the creation request. */\r
639         if( cq_create.size < p_spl_qp_svc->max_qp_depth )\r
640         {\r
641                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
642                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
643                         ("ib_create_cq allocated insufficient send CQ size\n") );\r
644                 return IB_INSUFFICIENT_RESOURCES;\r
645         }\r
646 \r
647         /* Create the receive CQ. */\r
648         cl_memclr( &cq_create, sizeof( ib_cq_create_t ) );\r
649         cq_create.size = p_spl_qp_svc->max_qp_depth;\r
650         cq_create.pfn_comp_cb = spl_qp_recv_comp_cb;\r
651 \r
652         status = ib_create_cq( p_spl_qp_svc->obj.p_ci_ca->h_ca, &cq_create,\r
653                 p_spl_qp_svc, spl_qp_svc_event_cb, &p_spl_qp_svc->h_recv_cq );\r
654 \r
655         if( status != IB_SUCCESS )\r
656         {\r
657                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
658                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
659                         ("ib_create_cq recv CQ failed, %s\n", ib_get_err_str( status ) ) );\r
660                 return status;\r
661         }\r
662 \r
663         /* Reference the special QP service on behalf of ib_create_cq. */\r
664         ref_al_obj( &p_spl_qp_svc->obj );\r
665 \r
666         /* Check the result of the creation request. */\r
667         if( cq_create.size < p_spl_qp_svc->max_qp_depth )\r
668         {\r
669                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
670                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
671                         ("ib_create_cq allocated insufficient recv CQ size\n") );\r
672                 return IB_INSUFFICIENT_RESOURCES;\r
673         }\r
674 \r
675         /* Create the special QP. */\r
676         cl_memclr( &qp_create, sizeof( ib_qp_create_t ) );\r
677         qp_create.qp_type = qp_type;\r
678         qp_create.sq_depth = p_spl_qp_svc->max_qp_depth;\r
679         qp_create.rq_depth = p_spl_qp_svc->max_qp_depth;\r
680         qp_create.sq_sge = 3;   /* Three entries are required for segmentation. */\r
681         qp_create.rq_sge = 1;\r
682         qp_create.h_sq_cq = p_spl_qp_svc->h_send_cq;\r
683         qp_create.h_rq_cq = p_spl_qp_svc->h_recv_cq;\r
684         qp_create.sq_signaled = TRUE;\r
685 \r
686         status = ib_get_spl_qp( p_spl_qp_svc->obj.p_ci_ca->h_pd,\r
687                 p_pnp_rec->p_port_attr->port_guid, &qp_create,\r
688                 p_spl_qp_svc, spl_qp_svc_event_cb, NULL, &p_spl_qp_svc->h_qp );\r
689 \r
690         if( status != IB_SUCCESS )\r
691         {\r
692                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
693                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
694                         ("ib_get_spl_qp failed, %s\n", ib_get_err_str( status ) ) );\r
695                 return status;\r
696         }\r
697 \r
698         /* Reference the special QP service on behalf of ib_get_spl_qp. */\r
699         ref_al_obj( &p_spl_qp_svc->obj );\r
700 \r
701         /* Check the result of the creation request. */\r
702         status = ib_query_qp( p_spl_qp_svc->h_qp, &qp_attr );\r
703         if( status != IB_SUCCESS )\r
704         {\r
705                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
706                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
707                         ("ib_query_qp failed, %s\n", ib_get_err_str( status ) ) );\r
708                 return status;\r
709         }\r
710 \r
711         if( ( qp_attr.rq_depth < p_spl_qp_svc->max_qp_depth ) ||\r
712                 ( qp_attr.sq_depth < p_spl_qp_svc->max_qp_depth ) ||\r
713                 ( qp_attr.sq_sge < 3 ) || ( qp_attr.rq_sge < 1 ) )\r
714         {\r
715                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
716                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
717                         ("ib_get_spl_qp allocated attributes are insufficient\n") );\r
718                 return IB_INSUFFICIENT_RESOURCES;\r
719         }\r
720 \r
721         /* Initialize the QP for use. */\r
722         status = ib_init_dgrm_svc( p_spl_qp_svc->h_qp, NULL );\r
723         if( status != IB_SUCCESS )\r
724         {\r
725                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
726                 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
727                         ("ib_init_dgrm_svc failed, %s\n", ib_get_err_str( status ) ) );\r
728                 return status;\r
729         }\r
730 \r
731         /* Post receive buffers. */\r
732         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
733         status = spl_qp_svc_post_recvs( p_spl_qp_svc );\r
734         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
735         if( status != IB_SUCCESS )\r
736         {\r
737                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
738                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
739                         ("spl_qp_svc_post_recvs failed, %s\n",\r
740                         ib_get_err_str( status ) ) );\r
741                 return status;\r
742         }\r
743 \r
744         /* Create the MAD dispatcher. */\r
745         status = create_mad_disp( &p_spl_qp_svc->obj, p_spl_qp_svc->h_qp,\r
746                 &p_spl_qp_svc->h_mad_disp );\r
747         if( status != IB_SUCCESS )\r
748         {\r
749                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
750                 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
751                         ("create_mad_disp failed, %s\n", ib_get_err_str( status ) ) );\r
752                 return status;\r
753         }\r
754 \r
755         /*\r
756          * Add this service to the special QP manager lookup lists.\r
757          * The service must be added to allow the creation of a QP alias.\r
758          */\r
759         cl_spinlock_acquire( &gp_spl_qp_mgr->obj.lock );\r
760         if( qp_type == IB_QPT_QP0 )\r
761         {\r
762                 cl_qmap_insert( &gp_spl_qp_mgr->smi_map, p_spl_qp_svc->port_guid,\r
763                         &p_spl_qp_svc->map_item );\r
764         }\r
765         else\r
766         {\r
767                 cl_qmap_insert( &gp_spl_qp_mgr->gsi_map, p_spl_qp_svc->port_guid,\r
768                         &p_spl_qp_svc->map_item );\r
769         }\r
770         cl_spinlock_release( &gp_spl_qp_mgr->obj.lock );\r
771 \r
772         /*\r
773          * If the CA does not support HW agents, create a QP alias and register\r
774          * a MAD service for sending responses from the local MAD interface.\r
775          */\r
776         if( check_local_mad( p_spl_qp_svc->h_qp ) )\r
777         {\r
778                 /* Create a QP alias. */\r
779                 cl_memclr( &qp_create, sizeof( ib_qp_create_t ) );\r
780                 qp_create.qp_type =\r
781                         ( qp_type == IB_QPT_QP0 ) ? IB_QPT_QP0_ALIAS : IB_QPT_QP1_ALIAS;\r
782                 qp_create.sq_depth              = p_spl_qp_svc->max_qp_depth;\r
783                 qp_create.sq_sge                = 1;\r
784                 qp_create.sq_signaled   = TRUE;\r
785 \r
786                 status = ib_get_spl_qp( p_spl_qp_svc->obj.p_ci_ca->h_pd_alias,\r
787                         p_pnp_rec->p_port_attr->port_guid, &qp_create,\r
788                         p_spl_qp_svc, spl_qp_alias_event_cb, &p_spl_qp_svc->pool_key,\r
789                         &p_spl_qp_svc->h_qp_alias );\r
790 \r
791                 if (status != IB_SUCCESS)\r
792                 {\r
793                         p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
794                         AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
795                                 ("ib_get_spl_qp alias failed, %s\n",\r
796                                 ib_get_err_str( status ) ) );\r
797                         return status;\r
798                 }\r
799 \r
800                 /* Reference the special QP service on behalf of ib_get_spl_qp. */\r
801                 ref_al_obj( &p_spl_qp_svc->obj );\r
802 \r
803                 /* Register a MAD service for sends. */\r
804                 cl_memclr( &mad_svc, sizeof( ib_mad_svc_t ) );\r
805                 mad_svc.mad_svc_context = p_spl_qp_svc;\r
806                 mad_svc.pfn_mad_send_cb = spl_qp_alias_send_cb;\r
807                 mad_svc.pfn_mad_recv_cb = spl_qp_alias_recv_cb;\r
808 \r
809                 status = ib_reg_mad_svc( p_spl_qp_svc->h_qp_alias, &mad_svc,\r
810                         &p_spl_qp_svc->h_mad_svc );\r
811 \r
812                 if( status != IB_SUCCESS )\r
813                 {\r
814                         p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
815                         AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
816                                 ("ib_reg_mad_svc failed, %s\n", ib_get_err_str( status ) ) );\r
817                         return status;\r
818                 }\r
819         }\r
820 \r
821         /* Set the context of the PnP event to this child object. */\r
822         p_pnp_rec->pnp_rec.context = &p_spl_qp_svc->obj;\r
823 \r
824         /* The QP is ready.  Change the state. */\r
825         p_spl_qp_svc->state = SPL_QP_ACTIVE;\r
826 \r
827         /* Force a completion callback to rearm the CQs. */\r
828         spl_qp_recv_comp_cb( p_spl_qp_svc->h_recv_cq, p_spl_qp_svc );\r
829         spl_qp_send_comp_cb( p_spl_qp_svc->h_send_cq, p_spl_qp_svc );\r
830 \r
831         /* Start the polling thread timer. */\r
832         if( g_smi_poll_interval )\r
833         {\r
834                 cl_status =\r
835                         cl_timer_trim( &gp_spl_qp_mgr->poll_timer, g_smi_poll_interval );\r
836 \r
837                 if( cl_status != CL_SUCCESS )\r
838                 {\r
839                         p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
840                         AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
841                                 ("cl_timer_start failed, status 0x%x\n", cl_status ) );\r
842                         return ib_convert_cl_status( cl_status );\r
843                 }\r
844         }\r
845 \r
846         /* Release the reference taken in init_al_obj. */\r
847         deref_al_obj( &p_spl_qp_svc->obj );\r
848 \r
849         AL_EXIT( AL_DBG_SMI );\r
850         return IB_SUCCESS;\r
851 }\r
852 \r
853 \r
854 \r
855 /*\r
856  * Return a work completion to the MAD dispatcher for the specified MAD.\r
857  */\r
858 static void\r
859 __complete_send_mad(\r
860         IN              const   al_mad_disp_handle_t            h_mad_disp,\r
861         IN                              al_mad_wr_t* const                      p_mad_wr,\r
862         IN              const   ib_wc_status_t                          wc_status )\r
863 {\r
864         ib_wc_t                 wc;\r
865 \r
866         /* Construct a send work completion. */\r
867         cl_memclr( &wc, sizeof( ib_wc_t ) );\r
868         wc.wr_id        = p_mad_wr->send_wr.wr_id;\r
869         wc.wc_type      = IB_WC_SEND;\r
870         wc.status       = wc_status;\r
871 \r
872         /* Set the send size if we were successful with the send. */\r
873         if( wc_status == IB_WCS_SUCCESS )\r
874                 wc.length = MAD_BLOCK_SIZE;\r
875 \r
876         mad_disp_send_done( h_mad_disp, p_mad_wr, &wc );\r
877 }\r
878 \r
879 \r
880 \r
881 /*\r
882  * Pre-destroy a special QP service.\r
883  */\r
884 void\r
885 destroying_spl_qp_svc(\r
886         IN                              al_obj_t*                                       p_obj )\r
887 {\r
888         spl_qp_svc_t*                   p_spl_qp_svc;\r
889         cl_list_item_t*                 p_list_item;\r
890         al_mad_wr_t*                    p_mad_wr;\r
891 \r
892         ib_api_status_t                 status;\r
893 \r
894         AL_ENTER( AL_DBG_SMI );\r
895 \r
896         CL_ASSERT( p_obj );\r
897         p_spl_qp_svc = PARENT_STRUCT( p_obj, spl_qp_svc_t, obj );\r
898 \r
899         /* Change the state to prevent processing new send requests. */\r
900         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
901         p_spl_qp_svc->state = SPL_QP_DESTROYING;\r
902         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
903 \r
904         /* Wait here until the special QP service is no longer in use. */\r
905         while( p_spl_qp_svc->in_use_cnt )\r
906         {\r
907                 cl_thread_suspend( 0 );\r
908         }\r
909 \r
910         /* Destroy the special QP. */\r
911         if( p_spl_qp_svc->h_qp )\r
912         {\r
913                 /* If present, remove the special QP service from the tracking map. */\r
914                 cl_spinlock_acquire( &gp_spl_qp_mgr->obj.lock );\r
915                 if( p_spl_qp_svc->h_qp->type == IB_QPT_QP0 )\r
916                 {\r
917                         cl_qmap_remove( &gp_spl_qp_mgr->smi_map, p_spl_qp_svc->port_guid );\r
918                 }\r
919                 else\r
920                 {\r
921                         cl_qmap_remove( &gp_spl_qp_mgr->gsi_map, p_spl_qp_svc->port_guid );\r
922                 }\r
923                 cl_spinlock_release( &gp_spl_qp_mgr->obj.lock );\r
924 \r
925                 status = ib_destroy_qp( p_spl_qp_svc->h_qp,\r
926                         (ib_pfn_destroy_cb_t)deref_al_obj );\r
927                 CL_ASSERT( status == IB_SUCCESS );\r
928 \r
929                 cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
930 \r
931                 /* Complete any outstanding MAD sends operations as "flushed". */\r
932                 for( p_list_item = cl_qlist_remove_head( &p_spl_qp_svc->send_queue );\r
933                          p_list_item != cl_qlist_end( &p_spl_qp_svc->send_queue );\r
934                          p_list_item = cl_qlist_remove_head( &p_spl_qp_svc->send_queue ) )\r
935                 {\r
936                         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
937                         p_mad_wr = PARENT_STRUCT( p_list_item, al_mad_wr_t, list_item );\r
938                         __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr,\r
939                                 IB_WCS_WR_FLUSHED_ERR );\r
940                         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
941                 }\r
942 \r
943                 cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
944                 /* Receive MAD elements are returned to the pool by the free routine. */\r
945         }\r
946 \r
947         /* Destroy the special QP alias and CQs. */\r
948         if( p_spl_qp_svc->h_qp_alias )\r
949         {\r
950                 status = ib_destroy_qp( p_spl_qp_svc->h_qp_alias,\r
951                         (ib_pfn_destroy_cb_t)deref_al_obj );\r
952                 CL_ASSERT( status == IB_SUCCESS );\r
953         }\r
954         if( p_spl_qp_svc->h_send_cq )\r
955         {\r
956                 status = ib_destroy_cq( p_spl_qp_svc->h_send_cq,\r
957                         (ib_pfn_destroy_cb_t)deref_al_obj );\r
958                 CL_ASSERT( status == IB_SUCCESS );\r
959         }\r
960         if( p_spl_qp_svc->h_recv_cq )\r
961         {\r
962                 status = ib_destroy_cq( p_spl_qp_svc->h_recv_cq,\r
963                         (ib_pfn_destroy_cb_t)deref_al_obj );\r
964                 CL_ASSERT( status == IB_SUCCESS );\r
965         }\r
966 \r
967         AL_EXIT( AL_DBG_SMI );\r
968 }\r
969 \r
970 \r
971 \r
972 /*\r
973  * Free a special QP service.\r
974  */\r
975 void\r
976 free_spl_qp_svc(\r
977         IN                              al_obj_t*                                       p_obj )\r
978 {\r
979         spl_qp_svc_t*                   p_spl_qp_svc;\r
980         cl_list_item_t*                 p_list_item;\r
981         al_mad_element_t*               p_al_mad;\r
982         ib_api_status_t                 status;\r
983 \r
984         AL_ENTER( AL_DBG_SMI );\r
985 \r
986         CL_ASSERT( p_obj );\r
987         p_spl_qp_svc = PARENT_STRUCT( p_obj, spl_qp_svc_t, obj );\r
988 \r
989         /* Dereference the CA. */\r
990         if( p_spl_qp_svc->obj.p_ci_ca )\r
991                 deref_al_obj( &p_spl_qp_svc->obj.p_ci_ca->h_ca->obj );\r
992 \r
993         /* Return receive MAD elements to the pool. */\r
994         for( p_list_item = cl_qlist_remove_head( &p_spl_qp_svc->recv_queue );\r
995                  p_list_item != cl_qlist_end( &p_spl_qp_svc->recv_queue );\r
996                  p_list_item = cl_qlist_remove_head( &p_spl_qp_svc->recv_queue ) )\r
997         {\r
998                 p_al_mad = PARENT_STRUCT( p_list_item, al_mad_element_t, list_item );\r
999 \r
1000                 status = ib_put_mad( &p_al_mad->element );\r
1001                 CL_ASSERT( status == IB_SUCCESS );\r
1002         }\r
1003 \r
1004         CL_ASSERT( cl_is_qlist_empty( &p_spl_qp_svc->send_queue ) );\r
1005 \r
1006         destroy_al_obj( &p_spl_qp_svc->obj );\r
1007         cl_free( p_spl_qp_svc );\r
1008 \r
1009         AL_EXIT( AL_DBG_SMI );\r
1010 }\r
1011 \r
1012 \r
1013 \r
1014 /*\r
1015  * Update the base LID of a special QP service.\r
1016  */\r
1017 void\r
1018 spl_qp_svc_lid_change(\r
1019         IN                              al_obj_t*                                       p_obj,\r
1020         IN                              ib_pnp_port_rec_t*                      p_pnp_rec )\r
1021 {\r
1022         spl_qp_svc_t*                   p_spl_qp_svc;\r
1023 \r
1024         AL_ENTER( AL_DBG_SMI );\r
1025 \r
1026         CL_ASSERT( p_obj );\r
1027         CL_ASSERT( p_pnp_rec );\r
1028         CL_ASSERT( p_pnp_rec->p_port_attr );\r
1029 \r
1030         p_spl_qp_svc = PARENT_STRUCT( p_obj, spl_qp_svc_t, obj );\r
1031 \r
1032         p_spl_qp_svc->base_lid = p_pnp_rec->p_port_attr->lid;\r
1033         p_spl_qp_svc->lmc = p_pnp_rec->p_port_attr->lmc;\r
1034 \r
1035         AL_EXIT( AL_DBG_SMI );\r
1036 }\r
1037 \r
1038 \r
1039 \r
1040 /*\r
1041  * Route a send work request.\r
1042  */\r
1043 mad_route_t\r
1044 route_mad_send(\r
1045         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
1046         IN                              ib_send_wr_t* const                     p_send_wr )\r
1047 {\r
1048         al_mad_wr_t*                    p_mad_wr;\r
1049         al_mad_send_t*                  p_mad_send;\r
1050         ib_mad_t*                               p_mad;\r
1051         ib_smp_t*                               p_smp;\r
1052         ib_av_handle_t                  h_av;\r
1053         mad_route_t                             route;\r
1054         boolean_t                               local, loopback, discard;\r
1055 \r
1056         AL_ENTER( AL_DBG_SMI );\r
1057 \r
1058         CL_ASSERT( p_spl_qp_svc );\r
1059         CL_ASSERT( p_send_wr );\r
1060 \r
1061         /* Initialize a pointers to the MAD work request and the MAD. */\r
1062         p_mad_wr = PARENT_STRUCT( p_send_wr, al_mad_wr_t, send_wr );\r
1063         p_mad_send = PARENT_STRUCT( p_mad_wr, al_mad_send_t, mad_wr );\r
1064         p_mad = get_mad_hdr_from_wr( p_mad_wr );\r
1065         p_smp = (ib_smp_t*)p_mad;\r
1066 \r
1067         /* Check if the CA has a local MAD interface. */\r
1068         local = loopback = discard = FALSE;\r
1069         if( check_local_mad( p_spl_qp_svc->h_qp ) )\r
1070         {\r
1071                 /*\r
1072                  * If the MAD is a locally addressed Subnet Management, Performance\r
1073                  * Management, or Connection Management datagram, process the work\r
1074                  * request locally.\r
1075                  */\r
1076                 h_av = p_send_wr->dgrm.ud.h_av;\r
1077                 switch( p_mad->mgmt_class )\r
1078                 {\r
1079                 case IB_MCLASS_SUBN_DIR:\r
1080                         /* Perform special checks on directed route SMPs. */\r
1081                         if( ib_smp_is_response( p_smp ) )\r
1082                         {\r
1083                                 /*\r
1084                                  * This node is the originator of the response.  Discard\r
1085                                  * if the hop count or pointer is zero, an intermediate hop,\r
1086                                  * out of bounds hop, or if the first port of the directed\r
1087                                  * route retrun path is not this port.\r
1088                                  */\r
1089                                 if( ( p_smp->hop_count == 0 ) || ( p_smp->hop_ptr == 0 ) )\r
1090                                 {\r
1091                                         AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
1092                                                 ("hop cnt or hop ptr set to 0...discarding\n") );\r
1093                                         discard = TRUE;\r
1094                                 }\r
1095                                 else if( p_smp->hop_count != ( p_smp->hop_ptr - 1 ) )\r
1096                                 {\r
1097                                         AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
1098                                                 ("hop cnt != (hop ptr - 1)...discarding\n") );\r
1099                                         discard = TRUE;\r
1100                                 }\r
1101                                 else if( p_smp->hop_count >= IB_SUBNET_PATH_HOPS_MAX )\r
1102                                 {\r
1103                                         AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
1104                                                 ("hop cnt > max hops...discarding\n") );\r
1105                                         discard = TRUE;\r
1106                                 }\r
1107                                 else if( ( p_smp->dr_dlid == IB_LID_PERMISSIVE ) &&\r
1108                                                  ( p_smp->return_path[ p_smp->hop_ptr - 1 ] !=\r
1109                                                         p_spl_qp_svc->port_num ) )\r
1110                                 {\r
1111                                         AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
1112                                                 ("return path[hop ptr - 1] != port num...discarding\n") );\r
1113                                         discard = TRUE;\r
1114                                 }\r
1115                         }\r
1116                         else\r
1117                         {\r
1118                                 /* The SMP is a request. */\r
1119                                 if( ( p_smp->hop_count >= IB_SUBNET_PATH_HOPS_MAX ) ||\r
1120                                         ( p_smp->hop_ptr >= IB_SUBNET_PATH_HOPS_MAX ) )\r
1121                                 {\r
1122                                         discard = TRUE;\r
1123                                 }\r
1124                                 else if( ( p_smp->hop_count == 0 ) && ( p_smp->hop_ptr == 0 ) )\r
1125                                 {\r
1126                                         /* Self Addressed: Sent locally, routed locally. */\r
1127                                         local = TRUE;\r
1128                                         discard = ( p_smp->dr_slid != IB_LID_PERMISSIVE ) ||\r
1129                                                           ( p_smp->dr_dlid != IB_LID_PERMISSIVE );\r
1130                                 }\r
1131                                 else if( ( p_smp->hop_count != 0 ) &&\r
1132                                                  ( p_smp->hop_count == ( p_smp->hop_ptr - 1 ) ) )\r
1133                                 {\r
1134                                         /* End of Path: Sent remotely, routed locally. */\r
1135                                         local = TRUE;\r
1136                                 }\r
1137                                 else if( ( p_smp->hop_count != 0 ) &&\r
1138                                                  ( p_smp->hop_ptr       == 0 ) )\r
1139                                 {\r
1140                                         /* Beginning of Path: Sent locally, routed remotely. */\r
1141                                         if( p_smp->dr_slid == IB_LID_PERMISSIVE )\r
1142                                         {\r
1143                                                 discard =\r
1144                                                         ( p_smp->initial_path[ p_smp->hop_ptr + 1 ] !=\r
1145                                                           p_spl_qp_svc->port_num );\r
1146                                         }\r
1147                                 }\r
1148                                 else\r
1149                                 {\r
1150                                         /* Intermediate hop. */\r
1151                                         discard = TRUE;\r
1152                                 }\r
1153                         }\r
1154                         /* Loopback locally addressed SM to SM "heartbeat" messages. */\r
1155                         loopback = (p_mad->attr_id == IB_MAD_ATTR_SM_INFO);\r
1156                         break;\r
1157 \r
1158                 case IB_MCLASS_SUBN_LID:\r
1159                         /* Loopback locally addressed SM to SM "heartbeat" messages. */\r
1160                         loopback = (p_mad->attr_id == IB_MAD_ATTR_SM_INFO);\r
1161 \r
1162                         /* Fall through to check for a local MAD. */\r
1163 \r
1164                 case IB_MCLASS_PERF:\r
1165                 case IB_MCLASS_BM:\r
1166                         local = ( h_av &&\r
1167                                 ( h_av->av_attr.dlid ==\r
1168                                 ( h_av->av_attr.path_bits | p_spl_qp_svc->base_lid ) ) );\r
1169                         break;\r
1170 \r
1171                 default:\r
1172                         /* Route vendor specific MADs to the HCA provider. */\r
1173                         if( ib_class_is_vendor_specific( p_mad->mgmt_class ) )\r
1174                         {\r
1175                                 local = ( h_av &&\r
1176                                         ( h_av->av_attr.dlid ==\r
1177                                         ( h_av->av_attr.path_bits | p_spl_qp_svc->base_lid ) ) );\r
1178                         }\r
1179                         break;\r
1180                 }\r
1181         }\r
1182 \r
1183         route = ( p_mad_send->p_send_mad->send_opt & IB_SEND_OPT_LOCAL ) ?\r
1184                 ROUTE_LOCAL : ROUTE_REMOTE;\r
1185         if( local ) route = ROUTE_LOCAL;\r
1186         if( loopback && local ) route = ROUTE_LOOPBACK;\r
1187         if( discard ) route = ROUTE_DISCARD;\r
1188 \r
1189         AL_EXIT( AL_DBG_SMI );\r
1190         return route;\r
1191 }\r
1192 \r
1193 \r
1194 \r
1195 /*\r
1196  * Send a work request on the special QP.\r
1197  */\r
1198 ib_api_status_t\r
1199 spl_qp_svc_send(\r
1200         IN              const   ib_qp_handle_t                          h_qp,\r
1201         IN                              ib_send_wr_t* const                     p_send_wr )\r
1202 {\r
1203         spl_qp_svc_t*                   p_spl_qp_svc;\r
1204         al_mad_wr_t*                    p_mad_wr;\r
1205         mad_route_t                             route;\r
1206         ib_api_status_t                 status;\r
1207 \r
1208         AL_ENTER( AL_DBG_SMI );\r
1209 \r
1210         CL_ASSERT( h_qp );\r
1211         CL_ASSERT( p_send_wr );\r
1212 \r
1213         /* Get the special QP service. */\r
1214         p_spl_qp_svc = (spl_qp_svc_t*)h_qp->obj.context;\r
1215         CL_ASSERT( p_spl_qp_svc );\r
1216         CL_ASSERT( p_spl_qp_svc->h_qp == h_qp );\r
1217 \r
1218         /* Determine how to route the MAD. */\r
1219         route = route_mad_send( p_spl_qp_svc, p_send_wr );\r
1220 \r
1221         /*\r
1222          * Check the QP state and guard against error handling.  Also,\r
1223          * to maintain proper order of work completions, delay processing\r
1224          * a local MAD until any remote MAD work requests have completed,\r
1225          * and delay processing a remote MAD until local MAD work requests\r
1226          * have completed.\r
1227          */\r
1228         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
1229         if( (p_spl_qp_svc->state != SPL_QP_ACTIVE) || p_spl_qp_svc->local_mad_wr ||\r
1230                 (is_local(route) && !cl_is_qlist_empty( &p_spl_qp_svc->send_queue )) ||\r
1231                 ( cl_qlist_count( &p_spl_qp_svc->send_queue ) >=\r
1232                         p_spl_qp_svc->max_qp_depth ) )\r
1233         {\r
1234                 /*\r
1235                  * Return busy status.\r
1236                  * The special QP will resume sends at this point.\r
1237                  */\r
1238                 cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
1239 \r
1240                 AL_EXIT( AL_DBG_SMI );\r
1241                 return IB_RESOURCE_BUSY;\r
1242         }\r
1243 \r
1244         p_mad_wr = PARENT_STRUCT( p_send_wr, al_mad_wr_t, send_wr );\r
1245 \r
1246         if( is_local( route ) )\r
1247         {\r
1248                 /* Save the local MAD work request for processing. */\r
1249                 p_spl_qp_svc->local_mad_wr = p_mad_wr;\r
1250 \r
1251                 /* Flag the service as in use by the asynchronous processing thread. */\r
1252                 cl_atomic_inc( &p_spl_qp_svc->in_use_cnt );\r
1253 \r
1254                 cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
1255 \r
1256                 status = local_mad_send( p_spl_qp_svc, p_mad_wr );\r
1257         }\r
1258         else\r
1259         {\r
1260                 /* Process a remote MAD send work request. */\r
1261                 status = remote_mad_send( p_spl_qp_svc, p_mad_wr );\r
1262 \r
1263                 cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
1264         }\r
1265 \r
1266         AL_EXIT( AL_DBG_SMI );\r
1267         return status;\r
1268 }\r
1269 \r
1270 \r
1271 \r
1272 /*\r
1273  * Process a remote MAD send work request.  Called holding the spl_qp_svc lock.\r
1274  */\r
1275 ib_api_status_t\r
1276 remote_mad_send(\r
1277         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
1278         IN                              al_mad_wr_t* const                      p_mad_wr )\r
1279 {\r
1280         ib_smp_t*                               p_smp;\r
1281         ib_api_status_t                 status;\r
1282 \r
1283         AL_ENTER( AL_DBG_SMI );\r
1284 \r
1285         CL_ASSERT( p_spl_qp_svc );\r
1286         CL_ASSERT( p_mad_wr );\r
1287 \r
1288         /* Initialize a pointers to the MAD work request and outbound MAD. */\r
1289         p_smp = (ib_smp_t*)get_mad_hdr_from_wr( p_mad_wr );\r
1290 \r
1291         /* Perform outbound MAD processing. */\r
1292 \r
1293         /* Adjust directed route SMPs as required by IBA. */\r
1294         if( p_smp->mgmt_class == IB_MCLASS_SUBN_DIR )\r
1295         {\r
1296                 if( ib_smp_is_response( p_smp ) )\r
1297                 {\r
1298                         if( p_smp->dr_dlid == IB_LID_PERMISSIVE )\r
1299                                 p_smp->hop_ptr--;\r
1300                 }\r
1301                 else if( p_smp->dr_slid == IB_LID_PERMISSIVE )\r
1302                 {\r
1303                         /*\r
1304                          * Only update the pointer if the hw_agent is not implemented.\r
1305                          * Fujitsu implements SMI in hardware, so the following has to\r
1306                          * be passed down to the hardware SMI.\r
1307                          */\r
1308                         ci_ca_lock_attr( p_spl_qp_svc->obj.p_ci_ca );\r
1309                         if( !p_spl_qp_svc->obj.p_ci_ca->p_pnp_attr->hw_agents )\r
1310                                 p_smp->hop_ptr++;\r
1311                         ci_ca_unlock_attr( p_spl_qp_svc->obj.p_ci_ca );\r
1312                 }\r
1313         }\r
1314 \r
1315         /* Always generate send completions. */\r
1316         p_mad_wr->send_wr.send_opt |= IB_SEND_OPT_SIGNALED;\r
1317 \r
1318         /* Queue the MAD work request on the service tracking queue. */\r
1319         cl_qlist_insert_tail( &p_spl_qp_svc->send_queue, &p_mad_wr->list_item );\r
1320 \r
1321         status = ib_post_send( p_spl_qp_svc->h_qp, &p_mad_wr->send_wr, NULL );\r
1322 \r
1323         if( status != IB_SUCCESS )\r
1324         {\r
1325                 cl_qlist_remove_item( &p_spl_qp_svc->send_queue, &p_mad_wr->list_item );\r
1326 \r
1327                 /* Reset directed route SMPs as required by IBA. */\r
1328                 if( p_smp->mgmt_class == IB_MCLASS_SUBN_DIR )\r
1329                 {\r
1330                         if( ib_smp_is_response( p_smp ) )\r
1331                         {\r
1332                                 if( p_smp->dr_dlid == IB_LID_PERMISSIVE )\r
1333                                         p_smp->hop_ptr++;\r
1334                         }\r
1335                         else if( p_smp->dr_slid == IB_LID_PERMISSIVE )\r
1336                         {\r
1337                                 /* Only update if the hw_agent is not implemented. */\r
1338                                 ci_ca_lock_attr( p_spl_qp_svc->obj.p_ci_ca );\r
1339                                 if( p_spl_qp_svc->obj.p_ci_ca->p_pnp_attr->hw_agents == FALSE )\r
1340                                         p_smp->hop_ptr--;\r
1341                                 ci_ca_unlock_attr( p_spl_qp_svc->obj.p_ci_ca );\r
1342                         }\r
1343                 }\r
1344         }\r
1345 \r
1346         AL_EXIT( AL_DBG_SMI );\r
1347         return status;\r
1348 }\r
1349 \r
1350 \r
1351 /*\r
1352  * Handle a MAD destined for the local CA, using cached data\r
1353  * as much as possible.\r
1354  */\r
1355 static ib_api_status_t\r
1356 local_mad_send(\r
1357         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
1358         IN                              al_mad_wr_t* const                      p_mad_wr )\r
1359 {\r
1360         mad_route_t                             route;\r
1361         ib_api_status_t                 status = IB_SUCCESS;\r
1362 \r
1363         AL_ENTER( AL_DBG_SMI );\r
1364 \r
1365         CL_ASSERT( p_spl_qp_svc );\r
1366         CL_ASSERT( p_mad_wr );\r
1367 \r
1368         /* Determine how to route the MAD. */\r
1369         route = route_mad_send( p_spl_qp_svc, &p_mad_wr->send_wr );\r
1370 \r
1371         /* Check if this MAD should be discarded. */\r
1372         if( is_discard( route ) )\r
1373         {\r
1374                 /* Deliver a "work completion" to the dispatcher. */\r
1375                 __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr,\r
1376                         IB_WCS_LOCAL_OP_ERR );\r
1377                 status = IB_INVALID_SETTING;\r
1378         }\r
1379         else if( is_loopback( route ) )\r
1380         {\r
1381                 /* Loopback local SM to SM "heartbeat" messages. */\r
1382                 status = loopback_mad( p_spl_qp_svc, p_mad_wr );\r
1383         }\r
1384         else\r
1385         {\r
1386                 switch( get_mad_hdr_from_wr( p_mad_wr )->mgmt_class )\r
1387                 {\r
1388                 case IB_MCLASS_SUBN_DIR:\r
1389                 case IB_MCLASS_SUBN_LID:\r
1390                         //DO not use the cache in order to force Mkey  check\r
1391                         //status = process_subn_mad( p_spl_qp_svc, p_mad_wr );\r
1392                         status = IB_NOT_DONE;\r
1393                         break;\r
1394 \r
1395                 default:\r
1396                         status = IB_NOT_DONE;\r
1397                 }\r
1398         }\r
1399 \r
1400         if( status == IB_NOT_DONE )\r
1401         {\r
1402                 /* Queue an asynchronous processing item to process the local MAD. */\r
1403                 cl_async_proc_queue( gp_async_proc_mgr, &p_spl_qp_svc->send_async );\r
1404         }\r
1405         else\r
1406         {\r
1407                 /*\r
1408                  * Clear the local MAD pointer to allow processing of other MADs.\r
1409                  * This is done after polling for attribute changes to ensure that\r
1410                  * subsequent MADs pick up any changes performed by this one.\r
1411                  */\r
1412                 cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
1413                 p_spl_qp_svc->local_mad_wr = NULL;\r
1414                 cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
1415 \r
1416                 /* No longer in use by the asynchronous processing thread. */\r
1417                 cl_atomic_dec( &p_spl_qp_svc->in_use_cnt );\r
1418 \r
1419                 /* Special QP operations will resume by unwinding. */\r
1420         }\r
1421 \r
1422         AL_EXIT( AL_DBG_SMI );\r
1423         return IB_SUCCESS;\r
1424 }\r
1425 \r
1426 \r
1427 static ib_api_status_t\r
1428 get_resp_mad(\r
1429         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
1430         IN                              al_mad_wr_t* const                      p_mad_wr,\r
1431                 OUT                     ib_mad_element_t** const        pp_mad_resp )\r
1432 {\r
1433         ib_api_status_t                 status;\r
1434 \r
1435         AL_ENTER( AL_DBG_SMI );\r
1436 \r
1437         CL_ASSERT( p_spl_qp_svc );\r
1438         CL_ASSERT( p_mad_wr );\r
1439         CL_ASSERT( pp_mad_resp );\r
1440 \r
1441         /* Get a MAD element from the pool for the response. */\r
1442         status = ib_get_mad( p_spl_qp_svc->h_qp->obj.p_ci_ca->pool_key,\r
1443                 MAD_BLOCK_SIZE, pp_mad_resp );\r
1444         if( status != IB_SUCCESS )\r
1445         {\r
1446                 __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr,\r
1447                         IB_WCS_LOCAL_OP_ERR );\r
1448         }\r
1449 \r
1450         AL_EXIT( AL_DBG_SMI );\r
1451         return status;\r
1452 }\r
1453 \r
1454 \r
1455 static ib_api_status_t\r
1456 complete_local_mad(\r
1457         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
1458         IN                              al_mad_wr_t* const                      p_mad_wr,\r
1459         IN                              ib_mad_element_t* const         p_mad_resp )\r
1460 {\r
1461         ib_api_status_t                 status;\r
1462 \r
1463         AL_ENTER( AL_DBG_SMI );\r
1464 \r
1465         CL_ASSERT( p_spl_qp_svc );\r
1466         CL_ASSERT( p_mad_wr );\r
1467         CL_ASSERT( p_mad_resp );\r
1468 \r
1469         /* Construct the receive MAD element. */\r
1470         p_mad_resp->status              = IB_WCS_SUCCESS;\r
1471         p_mad_resp->remote_qp   = p_mad_wr->send_wr.dgrm.ud.remote_qp;\r
1472         p_mad_resp->remote_lid  = p_spl_qp_svc->base_lid;\r
1473         if( p_mad_wr->send_wr.send_opt & IB_RECV_OPT_IMMEDIATE )\r
1474         {\r
1475                 p_mad_resp->immediate_data = p_mad_wr->send_wr.immediate_data;\r
1476                 p_mad_resp->recv_opt |= IB_RECV_OPT_IMMEDIATE;\r
1477         }\r
1478 \r
1479         /*\r
1480          * Hand the receive MAD element to the dispatcher before completing\r
1481          * the send.  This guarantees that the send request cannot time out.\r
1482          */\r
1483         status = mad_disp_recv_done( p_spl_qp_svc->h_mad_disp, p_mad_resp );\r
1484 \r
1485         /* Forward the send work completion to the dispatcher. */\r
1486         __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr, IB_WCS_SUCCESS );\r
1487 \r
1488         AL_EXIT( AL_DBG_SMI );\r
1489         return status;\r
1490 }\r
1491 \r
1492 \r
1493 static ib_api_status_t\r
1494 loopback_mad(\r
1495         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
1496         IN                              al_mad_wr_t* const                      p_mad_wr )\r
1497 {\r
1498         ib_mad_t                                *p_mad;\r
1499         ib_mad_element_t                *p_mad_resp;\r
1500         ib_api_status_t                 status;\r
1501 \r
1502         AL_ENTER( AL_DBG_SMI );\r
1503 \r
1504         CL_ASSERT( p_spl_qp_svc );\r
1505         CL_ASSERT( p_mad_wr );\r
1506 \r
1507         /* Get a MAD element from the pool for the response. */\r
1508         status = get_resp_mad( p_spl_qp_svc, p_mad_wr, &p_mad_resp );\r
1509         if( status == IB_SUCCESS )\r
1510         {\r
1511                 /* Initialize a pointers to the MAD work request and outbound MAD. */\r
1512                 p_mad = get_mad_hdr_from_wr( p_mad_wr );\r
1513 \r
1514                 /* Simulate a send/receive between local managers. */\r
1515                 cl_memcpy( p_mad_resp->p_mad_buf, p_mad, MAD_BLOCK_SIZE );\r
1516 \r
1517                 status = complete_local_mad( p_spl_qp_svc, p_mad_wr, p_mad_resp );\r
1518         }\r
1519 \r
1520         AL_EXIT( AL_DBG_SMI );\r
1521         return status;\r
1522 }\r
1523 \r
1524 \r
1525 static ib_api_status_t\r
1526 process_node_info(\r
1527         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
1528         IN                              al_mad_wr_t* const                      p_mad_wr )\r
1529 {\r
1530         ib_mad_t                                *p_mad;\r
1531         ib_mad_element_t                *p_mad_resp;\r
1532         ib_smp_t                                *p_smp;\r
1533         ib_node_info_t                  *p_node_info;\r
1534         ib_ca_attr_t                    *p_ca_attr;\r
1535         ib_port_attr_t                  *p_port_attr;\r
1536         ib_api_status_t                 status;\r
1537 \r
1538         AL_ENTER( AL_DBG_SMI );\r
1539 \r
1540         CL_ASSERT( p_spl_qp_svc );\r
1541         CL_ASSERT( p_mad_wr );\r
1542 \r
1543         /* Initialize a pointers to the MAD work request and outbound MAD. */\r
1544         p_mad = get_mad_hdr_from_wr( p_mad_wr );\r
1545         if( p_mad->method != IB_MAD_METHOD_GET )\r
1546         {\r
1547                 /* Node description is a GET-only attribute. */\r
1548                 __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr,\r
1549                         IB_WCS_LOCAL_OP_ERR );\r
1550                 AL_EXIT( AL_DBG_SMI );\r
1551                 return IB_INVALID_SETTING;\r
1552         }\r
1553 \r
1554         /* Get a MAD element from the pool for the response. */\r
1555         status = get_resp_mad( p_spl_qp_svc, p_mad_wr, &p_mad_resp );\r
1556         if( status == IB_SUCCESS )\r
1557         {\r
1558                 p_smp = (ib_smp_t*)p_mad_resp->p_mad_buf;\r
1559                 cl_memcpy( p_smp, p_mad, MAD_BLOCK_SIZE );\r
1560                 p_smp->method = (IB_MAD_METHOD_RESP_MASK | IB_MAD_METHOD_GET);\r
1561                 if( p_smp->mgmt_class == IB_MCLASS_SUBN_DIR )\r
1562                         p_smp->status = IB_SMP_DIRECTION;\r
1563                 else\r
1564                         p_smp->status = 0;\r
1565 \r
1566                 p_node_info = (ib_node_info_t*)ib_smp_get_payload_ptr( p_smp );\r
1567 \r
1568                 /*\r
1569                  * Fill in the node info, protecting against the\r
1570                  * attributes being changed by PnP.\r
1571                  */\r
1572                 cl_spinlock_acquire( &p_spl_qp_svc->obj.p_ci_ca->obj.lock );\r
1573 \r
1574                 p_ca_attr = p_spl_qp_svc->obj.p_ci_ca->p_pnp_attr;\r
1575                 p_port_attr = &p_ca_attr->p_port_attr[p_spl_qp_svc->port_num - 1];\r
1576 \r
1577                 p_node_info->base_version = 1;\r
1578                 p_node_info->class_version = 1;\r
1579                 p_node_info->node_type = IB_NODE_TYPE_CA;\r
1580                 p_node_info->num_ports = p_ca_attr->num_ports;\r
1581                 /* TODO: Get some unique identifier for the system */\r
1582                 p_node_info->sys_guid = p_ca_attr->ca_guid;\r
1583                 p_node_info->node_guid = p_ca_attr->ca_guid;\r
1584                 p_node_info->port_guid = p_port_attr->port_guid;\r
1585                 p_node_info->partition_cap = cl_hton16( p_port_attr->num_pkeys );\r
1586                 p_node_info->device_id = cl_hton16( p_ca_attr->dev_id );\r
1587                 p_node_info->revision = cl_hton32( p_ca_attr->revision );\r
1588                 p_node_info->port_num_vendor_id =\r
1589                         cl_hton32( p_ca_attr->vend_id & 0x00FFFFFF ) | p_port_attr->port_num;\r
1590                 cl_spinlock_release( &p_spl_qp_svc->obj.p_ci_ca->obj.lock );\r
1591 \r
1592                 status = complete_local_mad( p_spl_qp_svc, p_mad_wr, p_mad_resp );\r
1593         }\r
1594 \r
1595         AL_EXIT( AL_DBG_SMI );\r
1596         return status;\r
1597 }\r
1598 \r
1599 \r
1600 static ib_api_status_t\r
1601 process_node_desc(\r
1602         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
1603         IN                              al_mad_wr_t* const                      p_mad_wr )\r
1604 {\r
1605         ib_mad_t                                *p_mad;\r
1606         ib_mad_element_t                *p_mad_resp;\r
1607         ib_api_status_t                 status;\r
1608 \r
1609         AL_ENTER( AL_DBG_SMI );\r
1610 \r
1611         CL_ASSERT( p_spl_qp_svc );\r
1612         CL_ASSERT( p_mad_wr );\r
1613 \r
1614         /* Initialize a pointers to the MAD work request and outbound MAD. */\r
1615         p_mad = get_mad_hdr_from_wr( p_mad_wr );\r
1616         if( p_mad->method != IB_MAD_METHOD_GET )\r
1617         {\r
1618                 /* Node info is a GET-only attribute. */\r
1619                 __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr,\r
1620                         IB_WCS_LOCAL_OP_ERR );\r
1621                 AL_EXIT( AL_DBG_SMI );\r
1622                 return IB_INVALID_SETTING;\r
1623         }\r
1624 \r
1625         /* Get a MAD element from the pool for the response. */\r
1626         status = get_resp_mad( p_spl_qp_svc, p_mad_wr, &p_mad_resp );\r
1627         if( status == IB_SUCCESS )\r
1628         {\r
1629                 cl_memcpy( p_mad_resp->p_mad_buf, p_mad, MAD_BLOCK_SIZE );\r
1630                 p_mad_resp->p_mad_buf->method =\r
1631                         (IB_MAD_METHOD_RESP_MASK | IB_MAD_METHOD_GET);\r
1632                 if( p_mad_resp->p_mad_buf->mgmt_class == IB_MCLASS_SUBN_DIR )\r
1633                         p_mad_resp->p_mad_buf->status = IB_SMP_DIRECTION;\r
1634                 else\r
1635                         p_mad_resp->p_mad_buf->status = 0;\r
1636                 /* Set the node description to the machine name. */\r
1637                 cl_memcpy( ((ib_smp_t*)p_mad_resp->p_mad_buf)->data, \r
1638                         node_desc, sizeof(node_desc) );\r
1639 \r
1640                 status = complete_local_mad( p_spl_qp_svc, p_mad_wr, p_mad_resp );\r
1641         }\r
1642 \r
1643         AL_EXIT( AL_DBG_SMI );\r
1644         return status;\r
1645 }\r
1646 \r
1647 \r
1648 /*\r
1649  * Process subnet administration MADs using cached data if possible.\r
1650  */\r
1651 static ib_api_status_t\r
1652 process_subn_mad(\r
1653         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
1654         IN                              al_mad_wr_t* const                      p_mad_wr )\r
1655 {\r
1656         ib_api_status_t         status;\r
1657         ib_smp_t                        *p_smp;\r
1658 \r
1659         AL_ENTER( AL_DBG_SMI );\r
1660 \r
1661         CL_ASSERT( p_spl_qp_svc );\r
1662         CL_ASSERT( p_mad_wr );\r
1663 \r
1664         p_smp = (ib_smp_t*)get_mad_hdr_from_wr( p_mad_wr );\r
1665 \r
1666         CL_ASSERT( p_smp->mgmt_class == IB_MCLASS_SUBN_DIR ||\r
1667                 p_smp->mgmt_class == IB_MCLASS_SUBN_LID );\r
1668 \r
1669         switch( p_smp->attr_id )\r
1670         {\r
1671         case IB_MAD_ATTR_NODE_INFO:\r
1672                 status = process_node_info( p_spl_qp_svc, p_mad_wr );\r
1673                 break;\r
1674 \r
1675         case IB_MAD_ATTR_NODE_DESC:\r
1676                 status = process_node_desc( p_spl_qp_svc, p_mad_wr );\r
1677                 break;\r
1678 \r
1679         default:\r
1680                 status = IB_NOT_DONE;\r
1681                 break;\r
1682         }\r
1683 \r
1684         AL_EXIT( AL_DBG_SMI );\r
1685         return status;\r
1686 }\r
1687 \r
1688 \r
1689 /*\r
1690  * Process a local MAD send work request.\r
1691  */\r
1692 ib_api_status_t\r
1693 fwd_local_mad(\r
1694         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
1695         IN                              al_mad_wr_t* const                      p_mad_wr )\r
1696 {\r
1697         ib_mad_t*                               p_mad;\r
1698         ib_smp_t*                               p_smp;\r
1699         al_mad_send_t*                  p_mad_send;\r
1700         ib_mad_element_t*               p_mad_response;\r
1701         ib_mad_t*                               p_mad_response_buf;\r
1702         ib_api_status_t                 status = IB_SUCCESS;\r
1703         boolean_t                               smp_is_set;\r
1704 \r
1705         AL_ENTER( AL_DBG_SMI );\r
1706 \r
1707         CL_ASSERT( p_spl_qp_svc );\r
1708         CL_ASSERT( p_mad_wr );\r
1709 \r
1710         /* Initialize a pointers to the MAD work request and outbound MAD. */\r
1711         p_mad = get_mad_hdr_from_wr( p_mad_wr );\r
1712         p_smp = (ib_smp_t*)p_mad;\r
1713 \r
1714         smp_is_set = (p_smp->method == IB_MAD_METHOD_SET);\r
1715 \r
1716         /* Get a MAD element from the pool for the response. */\r
1717         p_mad_send = PARENT_STRUCT( p_mad_wr, al_mad_send_t, mad_wr );\r
1718 //*** Commented code to work-around ib_local_mad() requiring a response MAD\r
1719 //*** as input.  Remove comments once the ib_local_mad() implementation allows\r
1720 //*** for a NULL response MAD, when one is not expected.\r
1721 //*** Note that an attempt to route an invalid response MAD in this case\r
1722 //*** will fail harmlessly.\r
1723 //***   if( p_mad_send->p_send_mad->resp_expected )\r
1724 //***   {\r
1725                 status = get_resp_mad( p_spl_qp_svc, p_mad_wr, &p_mad_response );\r
1726                 if( status != IB_SUCCESS )\r
1727                 {\r
1728                         AL_EXIT( AL_DBG_SMI );\r
1729                         return status;\r
1730                 }\r
1731                 p_mad_response_buf = p_mad_response->p_mad_buf;\r
1732 //***   }\r
1733 //***   else\r
1734 //***   {\r
1735 //***           p_mad_response_buf = NULL;\r
1736 //***   }\r
1737 \r
1738         /* Adjust directed route SMPs as required by IBA. */\r
1739         if( p_mad->mgmt_class == IB_MCLASS_SUBN_DIR )\r
1740         {\r
1741                 CL_ASSERT( !ib_smp_is_response( p_smp ) );\r
1742 \r
1743                 /*\r
1744                  * If this was a self addressed, directed route SMP, increment\r
1745                  * the hop pointer in the request before delivery as required\r
1746                  * by IBA.  Otherwise, adjustment for remote requests occurs\r
1747                  * during inbound processing.\r
1748                  */\r
1749                 if( p_smp->hop_count == 0 )\r
1750                         p_smp->hop_ptr++;\r
1751         }\r
1752 \r
1753         /* Forward the locally addressed MAD to the CA interface. */\r
1754         status = ib_local_mad( p_spl_qp_svc->h_qp->obj.p_ci_ca->h_ca,\r
1755                 p_spl_qp_svc->port_num, p_mad, p_mad_response_buf );\r
1756 \r
1757         /* Reset directed route SMPs as required by IBA. */\r
1758         if( p_mad->mgmt_class == IB_MCLASS_SUBN_DIR )\r
1759         {\r
1760                 /*\r
1761                  * If this was a self addressed, directed route SMP, decrement\r
1762                  * the hop pointer in the response before delivery as required\r
1763                  * by IBA.  Otherwise, adjustment for remote responses occurs\r
1764                  * during outbound processing.\r
1765                  */\r
1766                 if( p_smp->hop_count == 0 )\r
1767                 {\r
1768                         /* Adjust the request SMP. */\r
1769                         p_smp->hop_ptr--;\r
1770 \r
1771                         /* Adjust the response SMP. */\r
1772                         if( p_mad_response_buf )\r
1773                         {\r
1774                                 p_smp = (ib_smp_t*)p_mad_response_buf;\r
1775                                 p_smp->hop_ptr--;\r
1776                         }\r
1777                 }\r
1778         }\r
1779 \r
1780         if( status != IB_SUCCESS )\r
1781         {\r
1782                 if( p_mad_response )\r
1783                         ib_put_mad( p_mad_response );\r
1784 \r
1785                 __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr,\r
1786                         IB_WCS_LOCAL_OP_ERR );\r
1787                 AL_EXIT( AL_DBG_SMI );\r
1788                 return status;\r
1789         }\r
1790 \r
1791         /* Check the completion status of this simulated send. */\r
1792         if( p_mad_response_buf )\r
1793         {\r
1794                 /*\r
1795                  * The SMI is uses PnP polling to refresh the base_lid and lmc.\r
1796                  * Polling takes time, so we update the values here to prevent\r
1797                  * the failure of LID routed MADs sent immediately following this\r
1798                  * assignment.  Check the response to see if the port info was set.\r
1799                  */\r
1800                 if( smp_is_set )\r
1801                 {\r
1802                         ib_port_info_t*         p_port_info = NULL;\r
1803 \r
1804                         switch( p_mad_response_buf->mgmt_class )\r
1805                         {\r
1806                         case IB_MCLASS_SUBN_DIR:\r
1807                                 if( ( p_smp->attr_id == IB_MAD_ATTR_PORT_INFO ) &&\r
1808                                         ( ib_smp_get_status( p_smp ) == IB_SA_MAD_STATUS_SUCCESS ) )\r
1809                                 {\r
1810                                         p_port_info =\r
1811                                                 (ib_port_info_t*)ib_smp_get_payload_ptr( p_smp );\r
1812                                 }\r
1813                                 break;\r
1814 \r
1815                         case IB_MCLASS_SUBN_LID:\r
1816                                 if( ( p_mad_response_buf->attr_id == IB_MAD_ATTR_PORT_INFO ) &&\r
1817                                         ( p_mad_response_buf->status == IB_SA_MAD_STATUS_SUCCESS ) )\r
1818                                 {\r
1819                                         p_port_info =\r
1820                                                 (ib_port_info_t*)( p_mad_response_buf + 1 );\r
1821                                 }\r
1822                                 break;\r
1823 \r
1824                         default:\r
1825                                 break;\r
1826                         }\r
1827 \r
1828                         if( p_port_info )\r
1829                         {\r
1830                                 p_spl_qp_svc->base_lid = p_port_info->base_lid;\r
1831                                 p_spl_qp_svc->lmc = ib_port_info_get_lmc( p_port_info );\r
1832                                 if (p_port_info->subnet_timeout & 0x80)\r
1833                                 {\r
1834                                         AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_PNP,\r
1835                                                 ("Client reregister event, setting sm_lid to 0.\n"));\r
1836                                         ci_ca_lock_attr(p_spl_qp_svc->obj.p_ci_ca);\r
1837                                         p_spl_qp_svc->obj.p_ci_ca->p_pnp_attr->\r
1838                                                 p_port_attr->sm_lid= 0;\r
1839                                         ci_ca_unlock_attr(p_spl_qp_svc->obj.p_ci_ca);\r
1840                                 }\r
1841                         }\r
1842                 }\r
1843         }\r
1844 \r
1845         status = complete_local_mad( p_spl_qp_svc, p_mad_wr, p_mad_response );\r
1846 \r
1847         /* If the SMP was a Get, no need to trigger a PnP poll. */\r
1848         if( status == IB_SUCCESS && !smp_is_set )\r
1849                 status = IB_NOT_DONE;\r
1850 \r
1851         AL_EXIT( AL_DBG_SMI );\r
1852         return status;\r
1853 }\r
1854 \r
1855 \r
1856 \r
1857 /*\r
1858  * Asynchronous processing thread callback to send a local MAD.\r
1859  */\r
1860 void\r
1861 send_local_mad_cb(\r
1862         IN                              cl_async_proc_item_t*           p_item )\r
1863 {\r
1864         spl_qp_svc_t*                   p_spl_qp_svc;\r
1865         ib_api_status_t                 status;\r
1866 \r
1867         AL_ENTER( AL_DBG_SMI );\r
1868 \r
1869         CL_ASSERT( p_item );\r
1870         p_spl_qp_svc = PARENT_STRUCT( p_item, spl_qp_svc_t, send_async );\r
1871 \r
1872         /* Process a local MAD send work request. */\r
1873         CL_ASSERT( p_spl_qp_svc->local_mad_wr );\r
1874         status = fwd_local_mad( p_spl_qp_svc, p_spl_qp_svc->local_mad_wr );\r
1875 \r
1876         /*\r
1877          * If we successfully processed a local MAD, which could have changed\r
1878          * something (e.g. the LID) on the HCA.  Scan for changes.\r
1879          */\r
1880         if( status == IB_SUCCESS )\r
1881                 pnp_poll();\r
1882 \r
1883         /*\r
1884          * Clear the local MAD pointer to allow processing of other MADs.\r
1885          * This is done after polling for attribute changes to ensure that\r
1886          * subsequent MADs pick up any changes performed by this one.\r
1887          */\r
1888         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
1889         p_spl_qp_svc->local_mad_wr = NULL;\r
1890         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
1891 \r
1892         /* Continue processing any queued MADs on the QP. */\r
1893         special_qp_resume_sends( p_spl_qp_svc->h_qp );\r
1894 \r
1895         /* No longer in use by the asynchronous processing thread. */\r
1896         cl_atomic_dec( &p_spl_qp_svc->in_use_cnt );\r
1897 \r
1898         AL_EXIT( AL_DBG_SMI );\r
1899 }\r
1900 \r
1901 \r
1902 \r
1903 /*\r
1904  * Special QP send completion callback.\r
1905  */\r
1906 void\r
1907 spl_qp_send_comp_cb(\r
1908         IN              const   ib_cq_handle_t                          h_cq,\r
1909         IN                              void*                                           cq_context )\r
1910 {\r
1911         spl_qp_svc_t*                   p_spl_qp_svc;\r
1912 \r
1913         AL_ENTER( AL_DBG_SMI );\r
1914 \r
1915         CL_ASSERT( cq_context );\r
1916         p_spl_qp_svc = cq_context;\r
1917 \r
1918 #if defined( CL_USE_MUTEX )\r
1919 \r
1920         /* Queue an asynchronous processing item to process sends. */\r
1921         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
1922         if( !p_spl_qp_svc->send_async_queued )\r
1923         {\r
1924                 p_spl_qp_svc->send_async_queued = TRUE;\r
1925                 ref_al_obj( &p_spl_qp_svc->obj );\r
1926                 cl_async_proc_queue( gp_async_proc_mgr, &p_spl_qp_svc->send_async_cb );\r
1927         }\r
1928         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
1929 \r
1930 #else\r
1931 \r
1932         /* Invoke the callback directly. */\r
1933         CL_ASSERT( h_cq == p_spl_qp_svc->h_send_cq );\r
1934         spl_qp_comp( p_spl_qp_svc, h_cq, IB_WC_SEND );\r
1935 \r
1936         /* Continue processing any queued MADs on the QP. */\r
1937         special_qp_resume_sends( p_spl_qp_svc->h_qp );\r
1938 \r
1939 #endif\r
1940 \r
1941         AL_EXIT( AL_DBG_SMI );\r
1942 }\r
1943 \r
1944 \r
1945 \r
1946 #if defined( CL_USE_MUTEX )\r
1947 void\r
1948 spl_qp_send_async_cb(\r
1949         IN                              cl_async_proc_item_t*           p_item )\r
1950 {\r
1951         spl_qp_svc_t*                   p_spl_qp_svc;\r
1952         ib_api_status_t                 status;\r
1953 \r
1954         AL_ENTER( AL_DBG_SMI );\r
1955 \r
1956         CL_ASSERT( p_item );\r
1957         p_spl_qp_svc = PARENT_STRUCT( p_item, spl_qp_svc_t, send_async_cb );\r
1958 \r
1959         /* Reset asynchronous queue flag. */\r
1960         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
1961         p_spl_qp_svc->send_async_queued = FALSE;\r
1962         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
1963 \r
1964         spl_qp_comp( p_spl_qp_svc, p_spl_qp_svc->h_send_cq, IB_WC_SEND );\r
1965 \r
1966         /* Continue processing any queued MADs on the QP. */\r
1967         status = special_qp_resume_sends( p_spl_qp_svc->h_qp );\r
1968         CL_ASSERT( status == IB_SUCCESS );\r
1969 \r
1970         deref_al_obj( &p_spl_qp_svc->obj );\r
1971 \r
1972         AL_EXIT( AL_DBG_SMI );\r
1973 }\r
1974 #endif\r
1975 \r
1976 \r
1977 \r
1978 /*\r
1979  * Special QP receive completion callback.\r
1980  */\r
1981 void\r
1982 spl_qp_recv_comp_cb(\r
1983         IN              const   ib_cq_handle_t                          h_cq,\r
1984         IN                              void*                                           cq_context )\r
1985 {\r
1986         spl_qp_svc_t*                   p_spl_qp_svc;\r
1987 \r
1988         AL_ENTER( AL_DBG_SMI );\r
1989 \r
1990         CL_ASSERT( cq_context );\r
1991         p_spl_qp_svc = cq_context;\r
1992 \r
1993 #if defined( CL_USE_MUTEX )\r
1994 \r
1995         /* Queue an asynchronous processing item to process receives. */\r
1996         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
1997         if( !p_spl_qp_svc->recv_async_queued )\r
1998         {\r
1999                 p_spl_qp_svc->recv_async_queued = TRUE;\r
2000                 ref_al_obj( &p_spl_qp_svc->obj );\r
2001                 cl_async_proc_queue( gp_async_proc_mgr, &p_spl_qp_svc->recv_async_cb );\r
2002         }\r
2003         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2004 \r
2005 #else\r
2006 \r
2007         CL_ASSERT( h_cq == p_spl_qp_svc->h_recv_cq );\r
2008         spl_qp_comp( p_spl_qp_svc, h_cq, IB_WC_RECV );\r
2009 \r
2010 #endif\r
2011 \r
2012         AL_EXIT( AL_DBG_SMI );\r
2013 }\r
2014 \r
2015 \r
2016 \r
2017 #if defined( CL_USE_MUTEX )\r
2018 void\r
2019 spl_qp_recv_async_cb(\r
2020         IN                              cl_async_proc_item_t*           p_item )\r
2021 {\r
2022         spl_qp_svc_t*                   p_spl_qp_svc;\r
2023 \r
2024         AL_ENTER( AL_DBG_SMI );\r
2025 \r
2026         CL_ASSERT( p_item );\r
2027         p_spl_qp_svc = PARENT_STRUCT( p_item, spl_qp_svc_t, recv_async_cb );\r
2028 \r
2029         /* Reset asynchronous queue flag. */\r
2030         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
2031         p_spl_qp_svc->recv_async_queued = FALSE;\r
2032         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2033 \r
2034         spl_qp_comp( p_spl_qp_svc, p_spl_qp_svc->h_recv_cq, IB_WC_RECV );\r
2035 \r
2036         deref_al_obj( &p_spl_qp_svc->obj );\r
2037 \r
2038         AL_EXIT( AL_DBG_SMI );\r
2039 }\r
2040 #endif\r
2041 \r
2042 \r
2043 \r
2044 /*\r
2045  * Special QP completion handler.\r
2046  */\r
2047 void\r
2048 spl_qp_comp(\r
2049         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
2050         IN              const   ib_cq_handle_t                          h_cq,\r
2051         IN                              ib_wc_type_t                            wc_type )\r
2052 {\r
2053         ib_wc_t                                 wc;\r
2054         ib_wc_t*                                p_free_wc = &wc;\r
2055         ib_wc_t*                                p_done_wc;\r
2056         al_mad_wr_t*                    p_mad_wr;\r
2057         al_mad_element_t*               p_al_mad;\r
2058         ib_mad_element_t*               p_mad_element;\r
2059         ib_smp_t*                               p_smp;\r
2060         ib_api_status_t                 status;\r
2061 \r
2062         AL_ENTER( AL_DBG_SMI_CB );\r
2063 \r
2064         CL_ASSERT( p_spl_qp_svc );\r
2065         CL_ASSERT( h_cq );\r
2066 \r
2067         /* Check the QP state and guard against error handling. */\r
2068         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
2069         if( p_spl_qp_svc->state != SPL_QP_ACTIVE )\r
2070         {\r
2071                 cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2072                 return;\r
2073         }\r
2074         cl_atomic_inc( &p_spl_qp_svc->in_use_cnt );\r
2075         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2076 \r
2077         wc.p_next = NULL;\r
2078         /* Process work completions. */\r
2079         while( ib_poll_cq( h_cq, &p_free_wc, &p_done_wc ) == IB_SUCCESS )\r
2080         {\r
2081                 /* Process completions one at a time. */\r
2082                 CL_ASSERT( p_done_wc );\r
2083 \r
2084                 /* Flushed completions are handled elsewhere. */\r
2085                 if( wc.status == IB_WCS_WR_FLUSHED_ERR )\r
2086                 {\r
2087                         p_free_wc = &wc;\r
2088                         continue;\r
2089                 }\r
2090 \r
2091                 /*\r
2092                  * Process the work completion.  Per IBA specification, the\r
2093                  * wc.wc_type is undefined if wc.status is not IB_WCS_SUCCESS.\r
2094                  * Use the wc_type parameter.\r
2095                  */\r
2096                 switch( wc_type )\r
2097                 {\r
2098                 case IB_WC_SEND:\r
2099                         /* Get a pointer to the MAD work request. */\r
2100                         p_mad_wr = (al_mad_wr_t*)((uintn_t)wc.wr_id);\r
2101 \r
2102                         /* Remove the MAD work request from the service tracking queue. */\r
2103                         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
2104                         cl_qlist_remove_item( &p_spl_qp_svc->send_queue,\r
2105                                 &p_mad_wr->list_item );\r
2106                         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2107 \r
2108                         /* Reset directed route SMPs as required by IBA. */\r
2109                         p_smp = (ib_smp_t*)get_mad_hdr_from_wr( p_mad_wr );\r
2110                         if( p_smp->mgmt_class == IB_MCLASS_SUBN_DIR )\r
2111                         {\r
2112                                 if( ib_smp_is_response( p_smp ) )\r
2113                                         p_smp->hop_ptr++;\r
2114                                 else\r
2115                                         p_smp->hop_ptr--;\r
2116                         }\r
2117 \r
2118                         /* Report the send completion to the dispatcher. */\r
2119                         mad_disp_send_done( p_spl_qp_svc->h_mad_disp, p_mad_wr, &wc );\r
2120                         break;\r
2121 \r
2122                 case IB_WC_RECV:\r
2123 \r
2124                         /* Initialize pointers to the MAD element. */\r
2125                         p_al_mad = (al_mad_element_t*)((uintn_t)wc.wr_id);\r
2126                         p_mad_element = &p_al_mad->element;\r
2127 \r
2128                         /* Remove the AL MAD element from the service tracking list. */\r
2129                         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
2130 \r
2131                         cl_qlist_remove_item( &p_spl_qp_svc->recv_queue,\r
2132                                 &p_al_mad->list_item );\r
2133 \r
2134                         /* Replenish the receive buffer. */\r
2135                         spl_qp_svc_post_recvs( p_spl_qp_svc );\r
2136                         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2137 \r
2138                         /* Construct the MAD element from the receive work completion. */\r
2139                         build_mad_recv( p_mad_element, &wc );\r
2140 \r
2141                         /* Process the received MAD. */\r
2142                         status = process_mad_recv( p_spl_qp_svc, p_mad_element );\r
2143 \r
2144                         /* Discard this MAD on error. */\r
2145                         if( status != IB_SUCCESS )\r
2146                         {\r
2147                                 status = ib_put_mad( p_mad_element );\r
2148                                 CL_ASSERT( status == IB_SUCCESS );\r
2149                         }\r
2150                         break;\r
2151 \r
2152                 default:\r
2153                         CL_ASSERT( wc_type == IB_WC_SEND || wc_type == IB_WC_RECV );\r
2154                         break;\r
2155                 }\r
2156 \r
2157                 if( wc.status != IB_WCS_SUCCESS )\r
2158                 {\r
2159                         AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
2160                                 ("special QP completion error: %s! internal syndrome 0x%I64x\n",\r
2161                                 ib_get_wc_status_str( wc.status ), wc.vendor_specific) );\r
2162 \r
2163                         /* Reset the special QP service and return. */\r
2164                         spl_qp_svc_reset( p_spl_qp_svc );\r
2165                 }\r
2166                 p_free_wc = &wc;\r
2167         }\r
2168 \r
2169         /* Rearm the CQ. */\r
2170         status = ib_rearm_cq( h_cq, FALSE );\r
2171         CL_ASSERT( status == IB_SUCCESS );\r
2172 \r
2173         cl_atomic_dec( &p_spl_qp_svc->in_use_cnt );\r
2174         AL_EXIT( AL_DBG_SMI_CB );\r
2175 }\r
2176 \r
2177 \r
2178 \r
2179 /*\r
2180  * Process a received MAD.\r
2181  */\r
2182 ib_api_status_t\r
2183 process_mad_recv(\r
2184         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
2185         IN                              ib_mad_element_t*                       p_mad_element )\r
2186 {\r
2187         ib_smp_t*                               p_smp;\r
2188         mad_route_t                             route;\r
2189         ib_api_status_t                 status;\r
2190 \r
2191         AL_ENTER( AL_DBG_SMI );\r
2192 \r
2193         CL_ASSERT( p_spl_qp_svc );\r
2194         CL_ASSERT( p_mad_element );\r
2195 \r
2196         /*\r
2197          * If the CA has a HW agent then this MAD should have been\r
2198          * consumed below verbs.  The fact that it was received here\r
2199          * indicates that it should be forwarded to the dispatcher\r
2200          * for delivery to a class manager.  Otherwise, determine how\r
2201          * the MAD should be routed.\r
2202          */\r
2203         route = ROUTE_DISPATCHER;\r
2204         if( check_local_mad( p_spl_qp_svc->h_qp ) )\r
2205         {\r
2206                 /*\r
2207                  * SMP and GMP processing is branched here to handle overlaps\r
2208                  * between class methods and attributes.\r
2209                  */\r
2210                 switch( p_mad_element->p_mad_buf->mgmt_class )\r
2211                 {\r
2212                 case IB_MCLASS_SUBN_DIR:\r
2213                         /* Perform special checks on directed route SMPs. */\r
2214                         p_smp = (ib_smp_t*)p_mad_element->p_mad_buf;\r
2215 \r
2216                         if( ( p_smp->hop_count >= IB_SUBNET_PATH_HOPS_MAX ) ||\r
2217                                 ( p_smp->hop_ptr >= IB_SUBNET_PATH_HOPS_MAX ) )\r
2218                         {\r
2219                                 route = ROUTE_DISCARD;\r
2220                         }\r
2221                         else if( ib_smp_is_response( p_smp ) )\r
2222                         {\r
2223                                 /*\r
2224                                  * This node is the destination of the response.  Discard\r
2225                                  * the source LID or hop pointer are incorrect.\r
2226                                  */\r
2227                                 if( p_smp->dr_slid == IB_LID_PERMISSIVE )\r
2228                                 {\r
2229                                         if( p_smp->hop_ptr == 1 )\r
2230                                         {\r
2231                                                 p_smp->hop_ptr--;               /* Adjust ptr per IBA spec. */\r
2232                                         }\r
2233                                         else\r
2234                                         {\r
2235                                                 route = ROUTE_DISCARD;\r
2236                                         }\r
2237                                 }\r
2238                                 else if( ( p_smp->dr_slid <  p_spl_qp_svc->base_lid ) ||\r
2239                                                  ( p_smp->dr_slid >= p_spl_qp_svc->base_lid +\r
2240                                                         ( 1 << p_spl_qp_svc->lmc ) ) )\r
2241                                 {\r
2242                                                 route = ROUTE_DISCARD;\r
2243                                 }\r
2244                         }\r
2245                         else\r
2246                         {\r
2247                                 /*\r
2248                                  * This node is the destination of the request.  Discard\r
2249                                  * the destination LID or hop pointer are incorrect.\r
2250                                  */\r
2251                                 if( p_smp->dr_dlid == IB_LID_PERMISSIVE )\r
2252                                 {\r
2253                                         if( p_smp->hop_count == p_smp->hop_ptr )\r
2254                                         {\r
2255                                                 p_smp->return_path[ p_smp->hop_ptr++ ] =\r
2256                                                         p_spl_qp_svc->port_num; /* Set path per IBA spec. */\r
2257                                         }\r
2258                                         else\r
2259                                         {\r
2260                                                 route = ROUTE_DISCARD;\r
2261                                         }\r
2262                                 }\r
2263                                 else if( ( p_smp->dr_dlid <  p_spl_qp_svc->base_lid ) ||\r
2264                                                  ( p_smp->dr_dlid >= p_spl_qp_svc->base_lid +\r
2265                                                         ( 1 << p_spl_qp_svc->lmc ) ) )\r
2266                                 {\r
2267                                         route = ROUTE_DISCARD;\r
2268                                 }\r
2269                         }\r
2270 \r
2271                         if( route == ROUTE_DISCARD ) break;\r
2272                         /* else fall through next case */\r
2273 \r
2274                 case IB_MCLASS_SUBN_LID:\r
2275                         route = route_recv_smp( p_mad_element );\r
2276                         break;\r
2277 \r
2278                 case IB_MCLASS_PERF:\r
2279                         /* Process the received GMP. */\r
2280                         switch( p_mad_element->p_mad_buf->method )\r
2281                         {\r
2282                         case IB_MAD_METHOD_GET:\r
2283                         case IB_MAD_METHOD_SET:\r
2284                                 route = ROUTE_LOCAL;\r
2285                                 break;\r
2286                         default:\r
2287                                 break;\r
2288                         }\r
2289                         break;\r
2290 \r
2291                 case IB_MCLASS_BM:\r
2292                         route = route_recv_gmp( p_mad_element );\r
2293                         break;\r
2294 \r
2295                 case IB_MCLASS_SUBN_ADM:\r
2296                 case IB_MCLASS_DEV_MGMT:\r
2297                 case IB_MCLASS_COMM_MGMT:\r
2298                 case IB_MCLASS_SNMP:\r
2299                         break;\r
2300 \r
2301                 default:\r
2302                         /* Route vendor specific MADs to the HCA provider. */\r
2303                         if( ib_class_is_vendor_specific(\r
2304                                 p_mad_element->p_mad_buf->mgmt_class ) )\r
2305                         {\r
2306                                 route = route_recv_gmp( p_mad_element );\r
2307                         }\r
2308                         break;\r
2309                 }\r
2310         }\r
2311 \r
2312         /* Route the MAD. */\r
2313         if( is_discard( route ) )\r
2314                 status = IB_ERROR;\r
2315         else if( is_dispatcher( route ) )\r
2316                 status = mad_disp_recv_done( p_spl_qp_svc->h_mad_disp, p_mad_element );\r
2317         else if( is_remote( route ) )\r
2318                 status = forward_sm_trap( p_spl_qp_svc, p_mad_element );\r
2319         else\r
2320                 status = recv_local_mad( p_spl_qp_svc, p_mad_element );\r
2321 \r
2322         AL_EXIT( AL_DBG_SMI );\r
2323         return status;\r
2324 }\r
2325 \r
2326 \r
2327 \r
2328 /*\r
2329  * Route a received SMP.\r
2330  */\r
2331 mad_route_t\r
2332 route_recv_smp(\r
2333         IN                              ib_mad_element_t*                       p_mad_element )\r
2334 {\r
2335         mad_route_t                             route;\r
2336 \r
2337         AL_ENTER( AL_DBG_SMI );\r
2338 \r
2339         CL_ASSERT( p_mad_element );\r
2340 \r
2341         /* Process the received SMP. */\r
2342         switch( p_mad_element->p_mad_buf->method )\r
2343         {\r
2344         case IB_MAD_METHOD_GET:\r
2345         case IB_MAD_METHOD_SET:\r
2346                 route = route_recv_smp_attr( p_mad_element );\r
2347                 break;\r
2348 \r
2349         case IB_MAD_METHOD_TRAP:\r
2350                 /*\r
2351                  * Special check to route locally generated traps to the remote SM.\r
2352                  * Distinguished from other receives by the p_wc->recv.ud.recv_opt\r
2353                  * IB_RECV_OPT_FORWARD flag.\r
2354                  *\r
2355                  * Note that because forwarded traps use AL MAD services, the upper\r
2356                  * 32-bits of the TID are reserved by the access layer.  When matching\r
2357                  * a Trap Repress MAD, the SMA must only use the lower 32-bits of the\r
2358                  * TID.\r
2359                  */\r
2360                 route = ( p_mad_element->recv_opt & IB_RECV_OPT_FORWARD ) ?\r
2361                         ROUTE_REMOTE : ROUTE_DISPATCHER;\r
2362                 break;\r
2363 \r
2364         case IB_MAD_METHOD_TRAP_REPRESS:\r
2365                 /*\r
2366                  * Note that because forwarded traps use AL MAD services, the upper\r
2367                  * 32-bits of the TID are reserved by the access layer.  When matching\r
2368                  * a Trap Repress MAD, the SMA must only use the lower 32-bits of the\r
2369                  * TID.\r
2370                  */\r
2371                 route = ROUTE_LOCAL;\r
2372                 break;\r
2373 \r
2374         default:\r
2375                 route = ROUTE_DISPATCHER;\r
2376                 break;\r
2377         }\r
2378 \r
2379         AL_EXIT( AL_DBG_SMI );\r
2380         return route;\r
2381 }\r
2382 \r
2383 \r
2384 \r
2385 /*\r
2386  * Route received SMP attributes.\r
2387  */\r
2388 mad_route_t\r
2389 route_recv_smp_attr(\r
2390         IN                              ib_mad_element_t*                       p_mad_element )\r
2391 {\r
2392         mad_route_t                             route;\r
2393 \r
2394         AL_ENTER( AL_DBG_SMI );\r
2395 \r
2396         CL_ASSERT( p_mad_element );\r
2397 \r
2398         /* Process the received SMP attributes. */\r
2399         switch( p_mad_element->p_mad_buf->attr_id )\r
2400         {\r
2401         case IB_MAD_ATTR_NODE_DESC:\r
2402         case IB_MAD_ATTR_NODE_INFO:\r
2403         case IB_MAD_ATTR_GUID_INFO:\r
2404         case IB_MAD_ATTR_PORT_INFO:\r
2405         case IB_MAD_ATTR_P_KEY_TABLE:\r
2406         case IB_MAD_ATTR_SLVL_TABLE:\r
2407         case IB_MAD_ATTR_VL_ARBITRATION:\r
2408         case IB_MAD_ATTR_VENDOR_DIAG:\r
2409         case IB_MAD_ATTR_LED_INFO:\r
2410         case IB_MAD_ATTR_SWITCH_INFO:\r
2411                 route = ROUTE_LOCAL;\r
2412                 break;\r
2413 \r
2414         default:\r
2415                 route = ROUTE_DISPATCHER;\r
2416                 break;\r
2417         }\r
2418 \r
2419         AL_EXIT( AL_DBG_SMI );\r
2420         return route;\r
2421 }\r
2422 \r
2423 \r
2424 /*\r
2425  * Route a received GMP.\r
2426  */\r
2427 mad_route_t\r
2428 route_recv_gmp(\r
2429         IN                              ib_mad_element_t*                       p_mad_element )\r
2430 {\r
2431         mad_route_t                             route;\r
2432 \r
2433         AL_ENTER( AL_DBG_SMI );\r
2434 \r
2435         CL_ASSERT( p_mad_element );\r
2436 \r
2437         /* Process the received GMP. */\r
2438         switch( p_mad_element->p_mad_buf->method )\r
2439         {\r
2440         case IB_MAD_METHOD_GET:\r
2441         case IB_MAD_METHOD_SET:\r
2442                 /* Route vendor specific MADs to the HCA provider. */\r
2443                 if( ib_class_is_vendor_specific(\r
2444                         p_mad_element->p_mad_buf->mgmt_class ) )\r
2445                 {\r
2446                         route = ROUTE_LOCAL;\r
2447                 }\r
2448                 else\r
2449                 {\r
2450                         route = route_recv_gmp_attr( p_mad_element );\r
2451                 }\r
2452                 break;\r
2453 \r
2454         default:\r
2455                 route = ROUTE_DISPATCHER;\r
2456                 break;\r
2457         }\r
2458 \r
2459         AL_EXIT( AL_DBG_SMI );\r
2460         return route;\r
2461 }\r
2462 \r
2463 \r
2464 \r
2465 /*\r
2466  * Route received GMP attributes.\r
2467  */\r
2468 mad_route_t\r
2469 route_recv_gmp_attr(\r
2470         IN                              ib_mad_element_t*                       p_mad_element )\r
2471 {\r
2472         mad_route_t                             route;\r
2473 \r
2474         AL_ENTER( AL_DBG_SMI );\r
2475 \r
2476         CL_ASSERT( p_mad_element );\r
2477 \r
2478         /* Process the received GMP attributes. */\r
2479         if( p_mad_element->p_mad_buf->attr_id == IB_MAD_ATTR_CLASS_PORT_INFO )\r
2480                 route = ROUTE_LOCAL;\r
2481         else\r
2482                 route = ROUTE_DISPATCHER;\r
2483 \r
2484         AL_EXIT( AL_DBG_SMI );\r
2485         return route;\r
2486 }\r
2487 \r
2488 \r
2489 \r
2490 /*\r
2491  * Forward a locally generated Subnet Management trap.\r
2492  */\r
2493 ib_api_status_t\r
2494 forward_sm_trap(\r
2495         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
2496         IN                              ib_mad_element_t*                       p_mad_element )\r
2497 {\r
2498         ib_av_attr_t                    av_attr;\r
2499         ib_api_status_t                 status;\r
2500 \r
2501         AL_ENTER( AL_DBG_SMI );\r
2502 \r
2503         CL_ASSERT( p_spl_qp_svc );\r
2504         CL_ASSERT( p_mad_element );\r
2505 \r
2506         /* Check the SMP class. */\r
2507         if( p_mad_element->p_mad_buf->mgmt_class != IB_MCLASS_SUBN_LID )\r
2508         {\r
2509                 /*\r
2510                  * Per IBA Specification Release 1.1 Section 14.2.2.1,\r
2511                  * "C14-5: Only a SM shall originate a directed route SMP."\r
2512                  * Therefore all traps should be LID routed; drop this one.\r
2513                  */\r
2514                 AL_EXIT( AL_DBG_SMI );\r
2515                 return IB_ERROR;\r
2516         }\r
2517 \r
2518         /* Create an address vector for the SM. */\r
2519         cl_memclr( &av_attr, sizeof( ib_av_attr_t ) );\r
2520         av_attr.port_num = p_spl_qp_svc->port_num;\r
2521         av_attr.sl = p_mad_element->remote_sl;\r
2522         av_attr.dlid = p_mad_element->remote_lid;\r
2523         if( p_mad_element->grh_valid )\r
2524         {\r
2525                 cl_memcpy( &av_attr.grh, p_mad_element->p_grh, sizeof( ib_grh_t ) );\r
2526                 av_attr.grh.src_gid      = p_mad_element->p_grh->dest_gid;\r
2527                 av_attr.grh.dest_gid = p_mad_element->p_grh->src_gid;\r
2528                 av_attr.grh_valid = TRUE;\r
2529         }\r
2530 \r
2531         status = ib_create_av( p_spl_qp_svc->h_qp->obj.p_ci_ca->h_pd_alias,\r
2532                 &av_attr, &p_mad_element->h_av );\r
2533 \r
2534         if( status != IB_SUCCESS )\r
2535         {\r
2536                 AL_EXIT( AL_DBG_SMI );\r
2537                 return status;\r
2538         }\r
2539 \r
2540         /* Complete the initialization of the MAD element. */\r
2541         p_mad_element->p_next = NULL;\r
2542         p_mad_element->remote_qkey = IB_QP_PRIVILEGED_Q_KEY;\r
2543         p_mad_element->resp_expected = FALSE;\r
2544 \r
2545         /* Clear context1 for proper send completion callback processing. */\r
2546         p_mad_element->context1 = NULL;\r
2547 \r
2548         /*\r
2549          * Forward the trap.  Note that because forwarded traps use AL MAD\r
2550          * services, the upper 32-bits of the TID are reserved by the access\r
2551          * layer.  When matching a Trap Repress MAD, the SMA must only use\r
2552          * the lower 32-bits of the TID.\r
2553          */\r
2554         status = ib_send_mad( p_spl_qp_svc->h_mad_svc, p_mad_element, NULL );\r
2555 \r
2556         if( status != IB_SUCCESS )\r
2557                 ib_destroy_av( p_mad_element->h_av );\r
2558 \r
2559         AL_EXIT( AL_DBG_SMI );\r
2560         return status;\r
2561 }\r
2562 \r
2563 \r
2564 /*\r
2565  * Process a locally routed MAD received from the special QP.\r
2566  */\r
2567 ib_api_status_t\r
2568 recv_local_mad(\r
2569         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
2570         IN                              ib_mad_element_t*                       p_mad_request )\r
2571 {\r
2572         ib_mad_t*                               p_mad_hdr;\r
2573         ib_api_status_t                 status;\r
2574 \r
2575         AL_ENTER( AL_DBG_SMI );\r
2576 \r
2577         CL_ASSERT( p_spl_qp_svc );\r
2578         CL_ASSERT( p_mad_request );\r
2579 \r
2580         /* Initialize the MAD element. */\r
2581         p_mad_hdr = ib_get_mad_buf( p_mad_request );\r
2582         p_mad_request->context1 = p_mad_request;\r
2583 \r
2584         /* Save the TID. */\r
2585         p_mad_request->context2 =\r
2586                 (void*)(uintn_t)al_get_al_tid( p_mad_hdr->trans_id );\r
2587 /*\r
2588  * Disable warning about passing unaligned 64-bit value.\r
2589  * The value is always aligned given how buffers are allocated\r
2590  * and given the layout of a MAD.\r
2591  */\r
2592 #pragma warning( push, 3 )\r
2593         al_set_al_tid( &p_mad_hdr->trans_id, 0 );\r
2594 #pragma warning( pop )\r
2595 \r
2596         /*\r
2597          * We need to get a response from the local HCA to this MAD only if this\r
2598          * MAD is not itself a response.\r
2599          */\r
2600         p_mad_request->resp_expected = !( ib_mad_is_response( p_mad_hdr ) ||\r
2601                 ( p_mad_hdr->method == IB_MAD_METHOD_TRAP_REPRESS ) );\r
2602         p_mad_request->timeout_ms = LOCAL_MAD_TIMEOUT;\r
2603         p_mad_request->send_opt = IB_SEND_OPT_LOCAL;\r
2604 \r
2605         /* Send the locally addressed MAD request to the CA for processing. */\r
2606         status = ib_send_mad( p_spl_qp_svc->h_mad_svc, p_mad_request, NULL );\r
2607 \r
2608         AL_EXIT( AL_DBG_SMI );\r
2609         return status;\r
2610 }\r
2611 \r
2612 \r
2613 \r
2614 /*\r
2615  * Special QP alias send completion callback.\r
2616  */\r
2617 void\r
2618 spl_qp_alias_send_cb(\r
2619         IN                              ib_mad_svc_handle_t                     h_mad_svc,\r
2620         IN                              void*                                           mad_svc_context,\r
2621         IN                              ib_mad_element_t*                       p_mad_element )\r
2622 {\r
2623         ib_api_status_t                 status;\r
2624 \r
2625         AL_ENTER( AL_DBG_SMI );\r
2626 \r
2627         UNUSED_PARAM( h_mad_svc );\r
2628         UNUSED_PARAM( mad_svc_context );\r
2629         CL_ASSERT( p_mad_element );\r
2630 \r
2631         if( p_mad_element->h_av )\r
2632         {\r
2633                 status = ib_destroy_av( p_mad_element->h_av );\r
2634                 CL_ASSERT( status == IB_SUCCESS );\r
2635         }\r
2636 \r
2637         status = ib_put_mad( p_mad_element );\r
2638         CL_ASSERT( status == IB_SUCCESS );\r
2639 \r
2640         AL_EXIT( AL_DBG_SMI );\r
2641 }\r
2642 \r
2643 \r
2644 \r
2645 /*\r
2646  * Special QP alias receive completion callback.\r
2647  */\r
2648 void\r
2649 spl_qp_alias_recv_cb(\r
2650         IN                              ib_mad_svc_handle_t                     h_mad_svc,\r
2651         IN                              void*                                           mad_svc_context,\r
2652         IN                              ib_mad_element_t*                       p_mad_response )\r
2653 {\r
2654         spl_qp_svc_t*                   p_spl_qp_svc;\r
2655         ib_mad_element_t*               p_mad_request;\r
2656         ib_mad_t*                               p_mad_hdr;\r
2657         ib_av_attr_t                    av_attr;\r
2658         ib_api_status_t                 status;\r
2659 \r
2660         AL_ENTER( AL_DBG_SMI );\r
2661 \r
2662         CL_ASSERT( mad_svc_context );\r
2663         CL_ASSERT( p_mad_response );\r
2664         CL_ASSERT( p_mad_response->send_context1 );\r
2665 \r
2666         /* Initialize pointers. */\r
2667         p_spl_qp_svc = mad_svc_context;\r
2668         p_mad_request = p_mad_response->send_context1;\r
2669         p_mad_hdr = ib_get_mad_buf( p_mad_response );\r
2670 \r
2671         /* Restore the TID, so it will match on the remote side. */\r
2672 #pragma warning( push, 3 )\r
2673         al_set_al_tid( &p_mad_hdr->trans_id,\r
2674                 (uint32_t)(uintn_t)p_mad_response->send_context2 );\r
2675 #pragma warning( pop )\r
2676 \r
2677         /* Set the remote QP. */\r
2678         p_mad_response->remote_qp       = p_mad_request->remote_qp;\r
2679         p_mad_response->remote_qkey = p_mad_request->remote_qkey;\r
2680 \r
2681         /* Prepare to create an address vector. */\r
2682         cl_memclr( &av_attr, sizeof( ib_av_attr_t ) );\r
2683         av_attr.port_num        = p_spl_qp_svc->port_num;\r
2684         av_attr.sl                      = p_mad_request->remote_sl;\r
2685         av_attr.static_rate = IB_PATH_RECORD_RATE_10_GBS;\r
2686         av_attr.path_bits       = p_mad_request->path_bits;\r
2687         if( p_mad_request->grh_valid )\r
2688         {\r
2689                 cl_memcpy( &av_attr.grh, p_mad_request->p_grh, sizeof( ib_grh_t ) );\r
2690                 av_attr.grh.src_gid      = p_mad_request->p_grh->dest_gid;\r
2691                 av_attr.grh.dest_gid = p_mad_request->p_grh->src_gid;\r
2692                 av_attr.grh_valid = TRUE;\r
2693         }\r
2694         if( ( p_mad_hdr->mgmt_class == IB_MCLASS_SUBN_DIR ) &&\r
2695                 ( ((ib_smp_t *)p_mad_hdr)->dr_dlid == IB_LID_PERMISSIVE ) )\r
2696                 av_attr.dlid = IB_LID_PERMISSIVE;\r
2697         else\r
2698                 av_attr.dlid = p_mad_request->remote_lid;\r
2699 \r
2700         /* Create an address vector. */\r
2701         status = ib_create_av( p_spl_qp_svc->h_qp->obj.p_ci_ca->h_pd_alias,\r
2702                 &av_attr, &p_mad_response->h_av );\r
2703 \r
2704         if( status != IB_SUCCESS )\r
2705         {\r
2706                 ib_put_mad( p_mad_response );\r
2707 \r
2708                 AL_EXIT( AL_DBG_SMI );\r
2709                 return;\r
2710         }\r
2711 \r
2712         /* Send the response. */\r
2713         status = ib_send_mad( h_mad_svc, p_mad_response, NULL );\r
2714 \r
2715         if( status != IB_SUCCESS )\r
2716         {\r
2717                 ib_destroy_av( p_mad_response->h_av );\r
2718                 ib_put_mad( p_mad_response );\r
2719         }\r
2720 \r
2721         AL_EXIT( AL_DBG_SMI );\r
2722 }\r
2723 \r
2724 \r
2725 \r
2726 /*\r
2727  * Post receive buffers to a special QP.\r
2728  */\r
2729 static ib_api_status_t\r
2730 spl_qp_svc_post_recvs(\r
2731         IN                              spl_qp_svc_t*   const           p_spl_qp_svc )\r
2732 {\r
2733         ib_mad_element_t*               p_mad_element;\r
2734         al_mad_element_t*               p_al_element;\r
2735         ib_recv_wr_t                    recv_wr;\r
2736         ib_api_status_t                 status = IB_SUCCESS;\r
2737 \r
2738         /* Attempt to post receive buffers up to the max_qp_depth limit. */\r
2739         while( cl_qlist_count( &p_spl_qp_svc->recv_queue ) <\r
2740                 (int32_t)p_spl_qp_svc->max_qp_depth )\r
2741         {\r
2742                 /* Get a MAD element from the pool. */\r
2743                 status = ib_get_mad( p_spl_qp_svc->obj.p_ci_ca->pool_key,\r
2744                         MAD_BLOCK_SIZE, &p_mad_element );\r
2745 \r
2746                 if( status != IB_SUCCESS ) break;\r
2747 \r
2748                 p_al_element = PARENT_STRUCT( p_mad_element, al_mad_element_t,\r
2749                         element );\r
2750 \r
2751                 /* Build the receive work request. */\r
2752                 recv_wr.p_next   = NULL;\r
2753                 recv_wr.wr_id    = (uintn_t)p_al_element;\r
2754                 recv_wr.num_ds = 1;\r
2755                 recv_wr.ds_array = &p_al_element->grh_ds;\r
2756 \r
2757                 /* Queue the receive on the service tracking list. */\r
2758                 cl_qlist_insert_tail( &p_spl_qp_svc->recv_queue,\r
2759                         &p_al_element->list_item );\r
2760 \r
2761                 /* Post the receive. */\r
2762                 status = ib_post_recv( p_spl_qp_svc->h_qp, &recv_wr, NULL );\r
2763 \r
2764                 if( status != IB_SUCCESS )\r
2765                 {\r
2766                         AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
2767                                 ("Failed to post receive %016I64x\n",\r
2768                                 (LONG_PTR)p_al_element) );\r
2769                         cl_qlist_remove_item( &p_spl_qp_svc->recv_queue,\r
2770                                 &p_al_element->list_item );\r
2771 \r
2772                         ib_put_mad( p_mad_element );\r
2773                         break;\r
2774                 }\r
2775         }\r
2776 \r
2777         return status;\r
2778 }\r
2779 \r
2780 \r
2781 \r
2782 /*\r
2783  * Special QP service asynchronous event callback.\r
2784  */\r
2785 void\r
2786 spl_qp_svc_event_cb(\r
2787         IN                              ib_async_event_rec_t            *p_event_rec )\r
2788 {\r
2789         spl_qp_svc_t*                   p_spl_qp_svc;\r
2790 \r
2791         AL_ENTER( AL_DBG_SMI );\r
2792 \r
2793         CL_ASSERT( p_event_rec );\r
2794         CL_ASSERT( p_event_rec->context );\r
2795 \r
2796         if( p_event_rec->code == IB_AE_SQ_DRAINED )\r
2797         {\r
2798                 AL_EXIT( AL_DBG_SMI );\r
2799                 return;\r
2800         }\r
2801 \r
2802         p_spl_qp_svc = p_event_rec->context;\r
2803 \r
2804         spl_qp_svc_reset( p_spl_qp_svc );\r
2805 \r
2806         AL_EXIT( AL_DBG_SMI );\r
2807 }\r
2808 \r
2809 \r
2810 \r
2811 /*\r
2812  * Special QP service reset.\r
2813  */\r
2814 void\r
2815 spl_qp_svc_reset(\r
2816         IN                              spl_qp_svc_t*                           p_spl_qp_svc )\r
2817 {\r
2818         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
2819 \r
2820         if( p_spl_qp_svc->state != SPL_QP_ACTIVE )\r
2821         {\r
2822                 cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2823                 return;\r
2824         }\r
2825 \r
2826         /* Change the special QP service to the error state. */\r
2827         p_spl_qp_svc->state = SPL_QP_ERROR;\r
2828 \r
2829         /* Flag the service as in use by the asynchronous processing thread. */\r
2830         cl_atomic_inc( &p_spl_qp_svc->in_use_cnt );\r
2831 \r
2832         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2833 \r
2834         /* Queue an asynchronous processing item to reset the special QP. */\r
2835         cl_async_proc_queue( gp_async_proc_mgr, &p_spl_qp_svc->reset_async );\r
2836 }\r
2837 \r
2838 \r
2839 \r
2840 /*\r
2841  * Asynchronous processing thread callback to reset the special QP service.\r
2842  */\r
2843 void\r
2844 spl_qp_svc_reset_cb(\r
2845         IN                              cl_async_proc_item_t*           p_item )\r
2846 {\r
2847         spl_qp_svc_t*                   p_spl_qp_svc;\r
2848         cl_list_item_t*                 p_list_item;\r
2849         ib_wc_t                                 wc;\r
2850         ib_wc_t*                                p_free_wc;\r
2851         ib_wc_t*                                p_done_wc;\r
2852         al_mad_wr_t*                    p_mad_wr;\r
2853         al_mad_element_t*               p_al_mad;\r
2854         ib_qp_mod_t                             qp_mod;\r
2855         ib_api_status_t                 status;\r
2856         cl_qlist_t                              mad_wr_list;\r
2857 \r
2858         AL_ENTER( AL_DBG_SMI );\r
2859 \r
2860         CL_ASSERT( p_item );\r
2861         p_spl_qp_svc = PARENT_STRUCT( p_item, spl_qp_svc_t, reset_async );\r
2862 \r
2863         /* Wait here until the special QP service is only in use by this thread. */\r
2864         while( p_spl_qp_svc->in_use_cnt != 1 )\r
2865         {\r
2866                 cl_thread_suspend( 0 );\r
2867         }\r
2868 \r
2869         /* Change the QP to the RESET state. */\r
2870         cl_memclr( &qp_mod, sizeof( ib_qp_mod_t ) );\r
2871         qp_mod.req_state = IB_QPS_RESET;\r
2872 \r
2873         status = ib_modify_qp( p_spl_qp_svc->h_qp, &qp_mod );\r
2874         CL_ASSERT( status == IB_SUCCESS );\r
2875 \r
2876         /* Return receive MAD elements to the pool. */\r
2877         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
2878         for( p_list_item = cl_qlist_remove_head( &p_spl_qp_svc->recv_queue );\r
2879                  p_list_item != cl_qlist_end( &p_spl_qp_svc->recv_queue );\r
2880                  p_list_item = cl_qlist_remove_head( &p_spl_qp_svc->recv_queue ) )\r
2881         {\r
2882                 p_al_mad = PARENT_STRUCT( p_list_item, al_mad_element_t, list_item );\r
2883 \r
2884                 status = ib_put_mad( &p_al_mad->element );\r
2885                 CL_ASSERT( status == IB_SUCCESS );\r
2886         }\r
2887         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2888 \r
2889         /* Re-initialize the QP. */\r
2890         status = ib_init_dgrm_svc( p_spl_qp_svc->h_qp, NULL );\r
2891         CL_ASSERT( status == IB_SUCCESS );\r
2892 \r
2893         /* Poll to remove any remaining send completions from the CQ. */\r
2894         do\r
2895         {\r
2896                 cl_memclr( &wc, sizeof( ib_wc_t ) );\r
2897                 p_free_wc = &wc;\r
2898                 status = ib_poll_cq( p_spl_qp_svc->h_send_cq, &p_free_wc, &p_done_wc );\r
2899 \r
2900         } while( status == IB_SUCCESS );\r
2901 \r
2902         /* Post receive buffers. */\r
2903         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
2904         spl_qp_svc_post_recvs( p_spl_qp_svc );\r
2905 \r
2906         /* Re-queue any outstanding MAD send operations. */\r
2907         cl_qlist_init( &mad_wr_list );\r
2908         cl_qlist_insert_list_tail( &mad_wr_list, &p_spl_qp_svc->send_queue );\r
2909         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2910 \r
2911         for( p_list_item = cl_qlist_remove_head( &mad_wr_list );\r
2912                  p_list_item != cl_qlist_end( &mad_wr_list );\r
2913                  p_list_item = cl_qlist_remove_head( &mad_wr_list ) )\r
2914         {\r
2915                 p_mad_wr = PARENT_STRUCT( p_list_item, al_mad_wr_t, list_item );\r
2916                 special_qp_queue_mad( p_spl_qp_svc->h_qp, p_mad_wr );\r
2917         }\r
2918 \r
2919         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
2920         if( p_spl_qp_svc->state == SPL_QP_ERROR )\r
2921         {\r
2922                 /* The QP is ready.  Change the state. */\r
2923                 p_spl_qp_svc->state = SPL_QP_ACTIVE;\r
2924                 cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2925 \r
2926                 /* Re-arm the CQs. */\r
2927                 status = ib_rearm_cq( p_spl_qp_svc->h_recv_cq, FALSE );\r
2928                 CL_ASSERT( status == IB_SUCCESS );\r
2929                 status = ib_rearm_cq( p_spl_qp_svc->h_send_cq, FALSE );\r
2930                 CL_ASSERT( status == IB_SUCCESS );\r
2931 \r
2932                 /* Resume send processing. */\r
2933                 special_qp_resume_sends( p_spl_qp_svc->h_qp );\r
2934         }\r
2935         else\r
2936         {\r
2937                 cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2938         }\r
2939 \r
2940         /* No longer in use by the asynchronous processing thread. */\r
2941         cl_atomic_dec( &p_spl_qp_svc->in_use_cnt );\r
2942 \r
2943         AL_EXIT( AL_DBG_SMI );\r
2944 }\r
2945 \r
2946 \r
2947 \r
2948 /*\r
2949  * Special QP alias asynchronous event callback.\r
2950  */\r
2951 void\r
2952 spl_qp_alias_event_cb(\r
2953         IN                              ib_async_event_rec_t            *p_event_rec )\r
2954 {\r
2955         UNUSED_PARAM( p_event_rec );\r
2956 }\r
2957 \r
2958 \r
2959 \r
2960 /*\r
2961  * Acquire the SMI dispatcher for the given port.\r
2962  */\r
2963 ib_api_status_t\r
2964 acquire_smi_disp(\r
2965         IN              const   ib_net64_t                                      port_guid,\r
2966                 OUT                     al_mad_disp_handle_t* const     ph_mad_disp )\r
2967 {\r
2968         CL_ASSERT( gp_spl_qp_mgr );\r
2969         return acquire_svc_disp( &gp_spl_qp_mgr->smi_map, port_guid, ph_mad_disp );\r
2970 }\r
2971 \r
2972 \r
2973 \r
2974 /*\r
2975  * Acquire the GSI dispatcher for the given port.\r
2976  */\r
2977 ib_api_status_t\r
2978 acquire_gsi_disp(\r
2979         IN              const   ib_net64_t                                      port_guid,\r
2980                 OUT                     al_mad_disp_handle_t* const     ph_mad_disp )\r
2981 {\r
2982         CL_ASSERT( gp_spl_qp_mgr );\r
2983         return acquire_svc_disp( &gp_spl_qp_mgr->gsi_map, port_guid, ph_mad_disp );\r
2984 }\r
2985 \r
2986 \r
2987 \r
2988 /*\r
2989  * Acquire the service dispatcher for the given port.\r
2990  */\r
2991 ib_api_status_t\r
2992 acquire_svc_disp(\r
2993         IN              const   cl_qmap_t* const                        p_svc_map,\r
2994         IN              const   ib_net64_t                                      port_guid,\r
2995                 OUT                     al_mad_disp_handle_t            *ph_mad_disp )\r
2996 {\r
2997         cl_map_item_t*                  p_svc_item;\r
2998         spl_qp_svc_t*                   p_spl_qp_svc;\r
2999 \r
3000         AL_ENTER( AL_DBG_SMI );\r
3001 \r
3002         CL_ASSERT( p_svc_map );\r
3003         CL_ASSERT( gp_spl_qp_mgr );\r
3004 \r
3005         /* Search for the SMI or GSI service for the given port. */\r
3006         cl_spinlock_acquire( &gp_spl_qp_mgr->obj.lock );\r
3007         p_svc_item = cl_qmap_get( p_svc_map, port_guid );\r
3008         cl_spinlock_release( &gp_spl_qp_mgr->obj.lock );\r
3009         if( p_svc_item == cl_qmap_end( p_svc_map ) )\r
3010         {\r
3011                 /* The port does not have an active agent. */\r
3012                 AL_EXIT( AL_DBG_SMI );\r
3013                 return IB_INVALID_GUID;\r
3014         }\r
3015 \r
3016         p_spl_qp_svc = PARENT_STRUCT( p_svc_item, spl_qp_svc_t, map_item );\r
3017 \r
3018         /* Found a match.  Get MAD dispatcher handle. */\r
3019         *ph_mad_disp = p_spl_qp_svc->h_mad_disp;\r
3020 \r
3021         /* Reference the MAD dispatcher on behalf of the client. */\r
3022         ref_al_obj( &p_spl_qp_svc->h_mad_disp->obj );\r
3023 \r
3024         AL_EXIT( AL_DBG_SMI );\r
3025         return IB_SUCCESS;\r
3026 }\r
3027 \r
3028 \r
3029 \r
3030 /*\r
3031  * Force a poll for CA attribute changes.\r
3032  */\r
3033 void\r
3034 force_smi_poll(\r
3035         void )\r
3036 {\r
3037         AL_ENTER( AL_DBG_SMI );\r
3038 \r
3039         /*\r
3040          * Stop the poll timer.  Just invoke the timer callback directly to\r
3041          * save the thread context switching.\r
3042          */\r
3043         smi_poll_timer_cb( gp_spl_qp_mgr );\r
3044 \r
3045         AL_EXIT( AL_DBG_SMI );\r
3046 }\r
3047 \r
3048 \r
3049 \r
3050 /*\r
3051  * Poll for CA port attribute changes.\r
3052  */\r
3053 void\r
3054 smi_poll_timer_cb(\r
3055         IN                              void*                                           context )\r
3056 {\r
3057         cl_status_t                     cl_status;\r
3058 \r
3059         AL_ENTER( AL_DBG_SMI );\r
3060 \r
3061         CL_ASSERT( context );\r
3062         CL_ASSERT( gp_spl_qp_mgr == context );\r
3063         UNUSED_PARAM( context );\r
3064 \r
3065         /*\r
3066          * Scan for changes on the local HCAs.  Since the PnP manager has its\r
3067          * own thread for processing changes, we kick off that thread in parallel\r
3068          * reposting receive buffers to the SQP agents.\r
3069          */\r
3070         pnp_poll();\r
3071 \r
3072         /*\r
3073          * To handle the case where force_smi_poll is called at the same time\r
3074          * the timer expires, check if the asynchronous processing item is in\r
3075          * use.  If it is already in use, it means that we're about to poll\r
3076          * anyway, so just ignore this call.\r
3077          */\r
3078         cl_spinlock_acquire( &gp_spl_qp_mgr->obj.lock );\r
3079 \r
3080         /* Perform port processing on the special QP agents. */\r
3081         cl_qlist_apply_func( &gp_spl_qp_mgr->obj.obj_list, smi_post_recvs,\r
3082                 gp_spl_qp_mgr );\r
3083 \r
3084         /* Determine if there are any special QP agents to poll. */\r
3085         if( !cl_is_qlist_empty( &gp_spl_qp_mgr->obj.obj_list ) && g_smi_poll_interval )\r
3086         {\r
3087                 /* Restart the polling timer. */\r
3088                 cl_status =\r
3089                         cl_timer_start( &gp_spl_qp_mgr->poll_timer, g_smi_poll_interval );\r
3090                 CL_ASSERT( cl_status == CL_SUCCESS );\r
3091         }\r
3092         cl_spinlock_release( &gp_spl_qp_mgr->obj.lock );\r
3093 \r
3094         AL_EXIT( AL_DBG_SMI );\r
3095 }\r
3096 \r
3097 \r
3098 \r
3099 /*\r
3100  * Post receive buffers to a special QP.\r
3101  */\r
3102 void\r
3103 smi_post_recvs(\r
3104         IN                              cl_list_item_t* const           p_list_item,\r
3105         IN                              void*                                           context )\r
3106 {\r
3107         al_obj_t*                               p_obj;\r
3108         spl_qp_svc_t*                   p_spl_qp_svc;\r
3109 \r
3110         AL_ENTER( AL_DBG_SMI );\r
3111 \r
3112         CL_ASSERT( p_list_item );\r
3113         UNUSED_PARAM( context );\r
3114 \r
3115         p_obj = PARENT_STRUCT( p_list_item, al_obj_t, pool_item );\r
3116         p_spl_qp_svc = PARENT_STRUCT( p_obj, spl_qp_svc_t, obj );\r
3117 \r
3118         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
3119         if( p_spl_qp_svc->state != SPL_QP_ACTIVE )\r
3120         {\r
3121                 cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
3122                 return;\r
3123         }\r
3124 \r
3125         spl_qp_svc_post_recvs( p_spl_qp_svc );\r
3126         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
3127 \r
3128         AL_EXIT( AL_DBG_SMI );\r
3129 }\r