[IBAL, MT23108, MTHCA] Fix MAD response for cached attributes
[mirror/winof/.git] / core / al / kernel / al_smi.c
1 /*\r
2  * Copyright (c) 2005 SilverStorm Technologies.  All rights reserved.\r
3  * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. \r
4  * Copyright (c) 2006 Voltaire Corporation.  All rights reserved.\r
5  *\r
6  * This software is available to you under the OpenIB.org BSD license\r
7  * below:\r
8  *\r
9  *     Redistribution and use in source and binary forms, with or\r
10  *     without modification, are permitted provided that the following\r
11  *     conditions are met:\r
12  *\r
13  *      - Redistributions of source code must retain the above\r
14  *        copyright notice, this list of conditions and the following\r
15  *        disclaimer.\r
16  *\r
17  *      - Redistributions in binary form must reproduce the above\r
18  *        copyright notice, this list of conditions and the following\r
19  *        disclaimer in the documentation and/or other materials\r
20  *        provided with the distribution.\r
21  *\r
22  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
23  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
24  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
25  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
26  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
27  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
28  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
29  * SOFTWARE.\r
30  *\r
31  * $Id$\r
32  */\r
33 \r
34 \r
35 #include <iba/ib_al.h>\r
36 #include <complib/cl_timer.h>\r
37 \r
38 #include "ib_common.h"\r
39 #include "al_common.h"\r
40 #include "al_debug.h"\r
41 #if defined(EVENT_TRACING)\r
42 #ifdef offsetof\r
43 #undef offsetof\r
44 #endif\r
45 #include "al_smi.tmh"\r
46 #endif\r
47 #include "al_verbs.h"\r
48 #include "al_mgr.h"\r
49 #include "al_pnp.h"\r
50 #include "al_qp.h"\r
51 #include "al_smi.h"\r
52 #include "al_av.h"\r
53 \r
54 \r
55 extern char                                             node_desc[IB_NODE_DESCRIPTION_SIZE];\r
56 \r
57 #define SMI_POLL_INTERVAL                       20000           /* Milliseconds */\r
58 #define LOCAL_MAD_TIMEOUT                       50                      /* Milliseconds */\r
59 #define DEFAULT_QP0_DEPTH                       256\r
60 #define DEFAULT_QP1_DEPTH                       1024\r
61 \r
62 uint32_t                                g_smi_poll_interval =   SMI_POLL_INTERVAL;\r
63 spl_qp_mgr_t*                   gp_spl_qp_mgr = NULL;\r
64 \r
65 \r
66 /*\r
67  * Function prototypes.\r
68  */\r
69 void\r
70 destroying_spl_qp_mgr(\r
71         IN                              al_obj_t*                                       p_obj );\r
72 \r
73 void\r
74 free_spl_qp_mgr(\r
75         IN                              al_obj_t*                                       p_obj );\r
76 \r
77 ib_api_status_t\r
78 spl_qp0_agent_pnp_cb(\r
79         IN                              ib_pnp_rec_t*                           p_pnp_rec );\r
80 \r
81 ib_api_status_t\r
82 spl_qp1_agent_pnp_cb(\r
83         IN                              ib_pnp_rec_t*                           p_pnp_rec );\r
84 \r
85 ib_api_status_t\r
86 spl_qp_agent_pnp(\r
87         IN                              ib_pnp_rec_t*                           p_pnp_rec,\r
88         IN                              ib_qp_type_t                            qp_type );\r
89 \r
90 ib_api_status_t\r
91 create_spl_qp_svc(\r
92         IN                              ib_pnp_port_rec_t*                      p_pnp_rec,\r
93         IN              const   ib_qp_type_t                            qp_type );\r
94 \r
95 void\r
96 destroying_spl_qp_svc(\r
97         IN                              al_obj_t*                                       p_obj );\r
98 \r
99 void\r
100 free_spl_qp_svc(\r
101         IN                              al_obj_t*                                       p_obj );\r
102 \r
103 void\r
104 spl_qp_svc_lid_change(\r
105         IN                              al_obj_t*                                       p_obj,\r
106         IN                              ib_pnp_port_rec_t*                      p_pnp_rec );\r
107 \r
108 ib_api_status_t\r
109 remote_mad_send(\r
110         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
111         IN                              al_mad_wr_t* const                      p_mad_wr );\r
112 \r
113 static ib_api_status_t\r
114 local_mad_send(\r
115         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
116         IN                              al_mad_wr_t* const                      p_mad_wr );\r
117 \r
118 static ib_api_status_t\r
119 loopback_mad(\r
120         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
121         IN                              al_mad_wr_t* const                      p_mad_wr );\r
122 \r
123 static ib_api_status_t\r
124 process_subn_mad(\r
125         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
126         IN                              al_mad_wr_t* const                      p_mad_wr );\r
127 \r
128 static ib_api_status_t\r
129 fwd_local_mad(\r
130         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
131         IN                              al_mad_wr_t* const                      p_mad_wr );\r
132 \r
133 void\r
134 send_local_mad_cb(\r
135         IN                              cl_async_proc_item_t*           p_item );\r
136 \r
137 void\r
138 spl_qp_send_comp_cb(\r
139         IN              const   ib_cq_handle_t                          h_cq,\r
140         IN                              void                                            *cq_context );\r
141 \r
142 void\r
143 spl_qp_recv_comp_cb(\r
144         IN              const   ib_cq_handle_t                          h_cq,\r
145         IN                              void                                            *cq_context );\r
146 \r
147 void\r
148 spl_qp_comp(\r
149         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
150         IN              const   ib_cq_handle_t                          h_cq,\r
151         IN                              ib_wc_type_t                            wc_type );\r
152 \r
153 ib_api_status_t\r
154 process_mad_recv(\r
155         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
156         IN                              ib_mad_element_t*                       p_mad_element );\r
157 \r
158 mad_route_t\r
159 route_recv_smp(\r
160         IN                              ib_mad_element_t*                       p_mad_element );\r
161 \r
162 mad_route_t\r
163 route_recv_smp_attr(\r
164         IN                              ib_mad_element_t*                       p_mad_element );\r
165 \r
166 mad_route_t\r
167 route_recv_dm_mad(\r
168         IN                              ib_mad_element_t*                       p_mad_element );\r
169 \r
170 mad_route_t\r
171 route_recv_gmp(\r
172         IN                              ib_mad_element_t*                       p_mad_element );\r
173 \r
174 mad_route_t\r
175 route_recv_gmp_attr(\r
176         IN                              ib_mad_element_t*                       p_mad_element );\r
177 \r
178 ib_api_status_t\r
179 forward_sm_trap(\r
180         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
181         IN                              ib_mad_element_t*                       p_mad_element );\r
182 \r
183 ib_api_status_t\r
184 recv_local_mad(\r
185         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
186         IN                              ib_mad_element_t*                       p_mad_request );\r
187 \r
188 void\r
189 spl_qp_alias_send_cb(\r
190         IN                              ib_mad_svc_handle_t                     h_mad_svc,\r
191         IN                              void                                            *mad_svc_context,\r
192         IN                              ib_mad_element_t                        *p_mad_element );\r
193 \r
194 void\r
195 spl_qp_alias_recv_cb(\r
196         IN                              ib_mad_svc_handle_t                     h_mad_svc,\r
197         IN                              void                                            *mad_svc_context,\r
198         IN                              ib_mad_element_t                        *p_mad_response );\r
199 \r
200 static ib_api_status_t\r
201 spl_qp_svc_post_recvs(\r
202         IN                              spl_qp_svc_t*   const           p_spl_qp_svc );\r
203 \r
204 void\r
205 spl_qp_svc_event_cb(\r
206         IN                              ib_async_event_rec_t            *p_event_rec );\r
207 \r
208 void\r
209 spl_qp_alias_event_cb(\r
210         IN                              ib_async_event_rec_t            *p_event_rec );\r
211 \r
212 void\r
213 spl_qp_svc_reset(\r
214         IN                              spl_qp_svc_t*                           p_spl_qp_svc );\r
215 \r
216 void\r
217 spl_qp_svc_reset_cb(\r
218         IN                              cl_async_proc_item_t*           p_item );\r
219 \r
220 ib_api_status_t\r
221 acquire_svc_disp(\r
222         IN              const   cl_qmap_t* const                        p_svc_map,\r
223         IN              const   ib_net64_t                                      port_guid,\r
224                 OUT                     al_mad_disp_handle_t            *ph_mad_disp );\r
225 \r
226 void\r
227 smi_poll_timer_cb(\r
228         IN                              void*                                           context );\r
229 \r
230 void\r
231 smi_post_recvs(\r
232         IN                              cl_list_item_t* const           p_list_item,\r
233         IN                              void*                                           context );\r
234 \r
235 #if defined( CL_USE_MUTEX )\r
236 void\r
237 spl_qp_send_async_cb(\r
238         IN                              cl_async_proc_item_t*           p_item );\r
239 \r
240 void\r
241 spl_qp_recv_async_cb(\r
242         IN                              cl_async_proc_item_t*           p_item );\r
243 #endif\r
244 \r
245 /*\r
246  * Create the special QP manager.\r
247  */\r
248 ib_api_status_t\r
249 create_spl_qp_mgr(\r
250         IN                              al_obj_t*       const                   p_parent_obj )\r
251 {\r
252         ib_pnp_req_t                    pnp_req;\r
253         ib_api_status_t                 status;\r
254         cl_status_t                             cl_status;\r
255 \r
256         AL_ENTER( AL_DBG_SMI );\r
257 \r
258         CL_ASSERT( p_parent_obj );\r
259         CL_ASSERT( !gp_spl_qp_mgr );\r
260 \r
261         gp_spl_qp_mgr = cl_zalloc( sizeof( spl_qp_mgr_t ) );\r
262         if( !gp_spl_qp_mgr )\r
263         {\r
264                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
265                         ("IB_INSUFFICIENT_MEMORY\n") );\r
266                 return IB_INSUFFICIENT_MEMORY;\r
267         }\r
268 \r
269         /* Construct the special QP manager. */\r
270         construct_al_obj( &gp_spl_qp_mgr->obj, AL_OBJ_TYPE_SMI );\r
271         cl_timer_construct( &gp_spl_qp_mgr->poll_timer );\r
272 \r
273         /* Initialize the lists. */\r
274         cl_qmap_init( &gp_spl_qp_mgr->smi_map );\r
275         cl_qmap_init( &gp_spl_qp_mgr->gsi_map );\r
276 \r
277         /* Initialize the global SMI/GSI manager object. */\r
278         status = init_al_obj( &gp_spl_qp_mgr->obj, gp_spl_qp_mgr, TRUE,\r
279                 destroying_spl_qp_mgr, NULL, free_spl_qp_mgr );\r
280         if( status != IB_SUCCESS )\r
281         {\r
282                 free_spl_qp_mgr( &gp_spl_qp_mgr->obj );\r
283                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
284                         ("init_al_obj failed, %s\n", ib_get_err_str( status ) ) );\r
285                 return status;\r
286         }\r
287 \r
288         /* Attach the special QP manager to the parent object. */\r
289         status = attach_al_obj( p_parent_obj, &gp_spl_qp_mgr->obj );\r
290         if( status != IB_SUCCESS )\r
291         {\r
292                 gp_spl_qp_mgr->obj.pfn_destroy( &gp_spl_qp_mgr->obj, NULL );\r
293                 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
294                         ("attach_al_obj returned %s.\n", ib_get_err_str(status)) );\r
295                 return status;\r
296         }\r
297 \r
298         /* Initialize the SMI polling timer. */\r
299         cl_status = cl_timer_init( &gp_spl_qp_mgr->poll_timer, smi_poll_timer_cb,\r
300                 gp_spl_qp_mgr );\r
301         if( cl_status != CL_SUCCESS )\r
302         {\r
303                 gp_spl_qp_mgr->obj.pfn_destroy( &gp_spl_qp_mgr->obj, NULL );\r
304                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
305                         ("cl_timer_init failed, status 0x%x\n", cl_status ) );\r
306                 return ib_convert_cl_status( cl_status );\r
307         }\r
308 \r
309         /*\r
310          * Note: PnP registrations for port events must be done\r
311          * when the special QP manager is created.  This ensures that\r
312          * the registrations are listed sequentially and the reporting\r
313          * of PnP events occurs in the proper order.\r
314          */\r
315 \r
316         /*\r
317          * Separate context is needed for each special QP.  Therefore, a\r
318          * separate PnP event registration is performed for QP0 and QP1.\r
319          */\r
320 \r
321         /* Register for port PnP events for QP0. */\r
322         cl_memclr( &pnp_req, sizeof( ib_pnp_req_t ) );\r
323         pnp_req.pnp_class       = IB_PNP_PORT;\r
324         pnp_req.pnp_context = &gp_spl_qp_mgr->obj;\r
325         pnp_req.pfn_pnp_cb      = spl_qp0_agent_pnp_cb;\r
326 \r
327         status = ib_reg_pnp( gh_al, &pnp_req, &gp_spl_qp_mgr->h_qp0_pnp );\r
328 \r
329         if( status != IB_SUCCESS )\r
330         {\r
331                 gp_spl_qp_mgr->obj.pfn_destroy( &gp_spl_qp_mgr->obj, NULL );\r
332                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
333                         ("ib_reg_pnp QP0 failed, %s\n", ib_get_err_str( status ) ) );\r
334                 return status;\r
335         }\r
336 \r
337         /* Reference the special QP manager on behalf of the ib_reg_pnp call. */\r
338         ref_al_obj( &gp_spl_qp_mgr->obj );\r
339 \r
340         /* Register for port PnP events for QP1. */\r
341         cl_memclr( &pnp_req, sizeof( ib_pnp_req_t ) );\r
342         pnp_req.pnp_class       = IB_PNP_PORT;\r
343         pnp_req.pnp_context = &gp_spl_qp_mgr->obj;\r
344         pnp_req.pfn_pnp_cb      = spl_qp1_agent_pnp_cb;\r
345 \r
346         status = ib_reg_pnp( gh_al, &pnp_req, &gp_spl_qp_mgr->h_qp1_pnp );\r
347 \r
348         if( status != IB_SUCCESS )\r
349         {\r
350                 gp_spl_qp_mgr->obj.pfn_destroy( &gp_spl_qp_mgr->obj, NULL );\r
351                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
352                         ("ib_reg_pnp QP1 failed, %s\n", ib_get_err_str( status ) ) );\r
353                 return status;\r
354         }\r
355 \r
356         /*\r
357          * Note that we don't release the referende taken in init_al_obj\r
358          * because we need one on behalf of the ib_reg_pnp call.\r
359          */\r
360 \r
361         AL_EXIT( AL_DBG_SMI );\r
362         return IB_SUCCESS;\r
363 }\r
364 \r
365 \r
366 \r
367 /*\r
368  * Pre-destroy the special QP manager.\r
369  */\r
370 void\r
371 destroying_spl_qp_mgr(\r
372         IN                              al_obj_t*                                       p_obj )\r
373 {\r
374         ib_api_status_t                 status;\r
375 \r
376         CL_ASSERT( p_obj );\r
377         CL_ASSERT( gp_spl_qp_mgr == PARENT_STRUCT( p_obj, spl_qp_mgr_t, obj ) );\r
378         UNUSED_PARAM( p_obj );\r
379 \r
380         /* Deregister for port PnP events for QP0. */\r
381         if( gp_spl_qp_mgr->h_qp0_pnp )\r
382         {\r
383                 status = ib_dereg_pnp( gp_spl_qp_mgr->h_qp0_pnp,\r
384                         (ib_pfn_destroy_cb_t)deref_al_obj );\r
385                 CL_ASSERT( status == IB_SUCCESS );\r
386         }\r
387 \r
388         /* Deregister for port PnP events for QP1. */\r
389         if( gp_spl_qp_mgr->h_qp1_pnp )\r
390         {\r
391                 status = ib_dereg_pnp( gp_spl_qp_mgr->h_qp1_pnp,\r
392                         (ib_pfn_destroy_cb_t)deref_al_obj );\r
393                 CL_ASSERT( status == IB_SUCCESS );\r
394         }\r
395 \r
396         /* Destroy the SMI polling timer. */\r
397         cl_timer_destroy( &gp_spl_qp_mgr->poll_timer );\r
398 }\r
399 \r
400 \r
401 \r
402 /*\r
403  * Free the special QP manager.\r
404  */\r
405 void\r
406 free_spl_qp_mgr(\r
407         IN                              al_obj_t*                                       p_obj )\r
408 {\r
409         CL_ASSERT( p_obj );\r
410         CL_ASSERT( gp_spl_qp_mgr == PARENT_STRUCT( p_obj, spl_qp_mgr_t, obj ) );\r
411         UNUSED_PARAM( p_obj );\r
412 \r
413         destroy_al_obj( &gp_spl_qp_mgr->obj );\r
414         cl_free( gp_spl_qp_mgr );\r
415         gp_spl_qp_mgr = NULL;\r
416 }\r
417 \r
418 \r
419 \r
420 /*\r
421  * Special QP0 agent PnP event callback.\r
422  */\r
423 ib_api_status_t\r
424 spl_qp0_agent_pnp_cb(\r
425         IN                              ib_pnp_rec_t*                           p_pnp_rec )\r
426 {\r
427         ib_api_status_t status;\r
428         AL_ENTER( AL_DBG_SMI_CB );\r
429 \r
430         status = spl_qp_agent_pnp( p_pnp_rec, IB_QPT_QP0 );\r
431 \r
432         AL_EXIT( AL_DBG_SMI_CB );\r
433         return status;\r
434 }\r
435 \r
436 \r
437 \r
438 /*\r
439  * Special QP1 agent PnP event callback.\r
440  */\r
441 ib_api_status_t\r
442 spl_qp1_agent_pnp_cb(\r
443         IN                              ib_pnp_rec_t*                           p_pnp_rec )\r
444 {\r
445         ib_api_status_t status;\r
446         AL_ENTER( AL_DBG_SMI_CB );\r
447 \r
448         status = spl_qp_agent_pnp( p_pnp_rec, IB_QPT_QP1 );\r
449 \r
450         AL_EXIT( AL_DBG_SMI );\r
451         return status;\r
452 }\r
453 \r
454 \r
455 \r
456 /*\r
457  * Special QP agent PnP event callback.\r
458  */\r
459 ib_api_status_t\r
460 spl_qp_agent_pnp(\r
461         IN                              ib_pnp_rec_t*                           p_pnp_rec,\r
462         IN                              ib_qp_type_t                            qp_type )\r
463 {\r
464         ib_api_status_t                 status;\r
465         al_obj_t*                               p_obj;\r
466 \r
467         AL_ENTER( AL_DBG_SMI_CB );\r
468 \r
469         CL_ASSERT( p_pnp_rec );\r
470         p_obj = p_pnp_rec->context;\r
471 \r
472         /* Dispatch based on the PnP event type. */\r
473         switch( p_pnp_rec->pnp_event )\r
474         {\r
475         case IB_PNP_PORT_ADD:\r
476                 CL_ASSERT( !p_obj );\r
477                 status = create_spl_qp_svc( (ib_pnp_port_rec_t*)p_pnp_rec, qp_type );\r
478                 break;\r
479 \r
480         case IB_PNP_PORT_REMOVE:\r
481                 CL_ASSERT( p_obj );\r
482                 ref_al_obj( p_obj );\r
483                 p_obj->pfn_destroy( p_obj, NULL );\r
484                 status = IB_SUCCESS;\r
485                 break;\r
486 \r
487         case IB_PNP_LID_CHANGE:\r
488                 CL_ASSERT( p_obj );\r
489                 spl_qp_svc_lid_change( p_obj, (ib_pnp_port_rec_t*)p_pnp_rec );\r
490                 status = IB_SUCCESS;\r
491                 break;\r
492 \r
493         default:\r
494                 /* All other events are ignored. */\r
495                 status = IB_SUCCESS;\r
496                 break;\r
497         }\r
498 \r
499         AL_EXIT( AL_DBG_SMI );\r
500         return status;\r
501 }\r
502 \r
503 \r
504 \r
505 /*\r
506  * Create a special QP service.\r
507  */\r
508 ib_api_status_t\r
509 create_spl_qp_svc(\r
510         IN                              ib_pnp_port_rec_t*                      p_pnp_rec,\r
511         IN              const   ib_qp_type_t                            qp_type )\r
512 {\r
513         cl_status_t                             cl_status;\r
514         spl_qp_svc_t*                   p_spl_qp_svc;\r
515         ib_ca_handle_t                  h_ca;\r
516         ib_cq_create_t                  cq_create;\r
517         ib_qp_create_t                  qp_create;\r
518         ib_qp_attr_t                    qp_attr;\r
519         ib_mad_svc_t                    mad_svc;\r
520         ib_api_status_t                 status;\r
521 \r
522         AL_ENTER( AL_DBG_SMI );\r
523 \r
524         CL_ASSERT( p_pnp_rec );\r
525 \r
526         if( ( qp_type != IB_QPT_QP0 ) && ( qp_type != IB_QPT_QP1 ) )\r
527         {\r
528                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") );\r
529                 return IB_INVALID_PARAMETER;\r
530         }\r
531 \r
532         CL_ASSERT( p_pnp_rec->pnp_rec.pnp_context );\r
533         CL_ASSERT( p_pnp_rec->p_ca_attr );\r
534         CL_ASSERT( p_pnp_rec->p_port_attr );\r
535 \r
536         p_spl_qp_svc = cl_zalloc( sizeof( spl_qp_svc_t ) );\r
537         if( !p_spl_qp_svc )\r
538         {\r
539                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
540                         ("IB_INSUFFICIENT_MEMORY\n") );\r
541                 return IB_INSUFFICIENT_MEMORY;\r
542         }\r
543 \r
544         /* Tie the special QP service to the port by setting the port number. */\r
545         p_spl_qp_svc->port_num = p_pnp_rec->p_port_attr->port_num;\r
546         /* Store the port GUID to allow faster lookups of the dispatchers. */\r
547         p_spl_qp_svc->port_guid = p_pnp_rec->p_port_attr->port_guid;\r
548 \r
549         /* Initialize the send and receive queues. */\r
550         cl_qlist_init( &p_spl_qp_svc->send_queue );\r
551         cl_qlist_init( &p_spl_qp_svc->recv_queue );\r
552 \r
553 #if defined( CL_USE_MUTEX )\r
554         /* Initialize async callbacks and flags for send/receive processing. */\r
555         p_spl_qp_svc->send_async_queued = FALSE;\r
556         p_spl_qp_svc->send_async_cb.pfn_callback = spl_qp_send_async_cb;\r
557         p_spl_qp_svc->recv_async_queued = FALSE;\r
558         p_spl_qp_svc->recv_async_cb.pfn_callback = spl_qp_recv_async_cb;\r
559 #endif\r
560 \r
561         /* Initialize the async callback function to process local sends. */\r
562         p_spl_qp_svc->send_async.pfn_callback = send_local_mad_cb;\r
563 \r
564         /* Initialize the async callback function to reset the QP on error. */\r
565         p_spl_qp_svc->reset_async.pfn_callback = spl_qp_svc_reset_cb;\r
566 \r
567         /* Construct the special QP service object. */\r
568         construct_al_obj( &p_spl_qp_svc->obj, AL_OBJ_TYPE_SMI );\r
569 \r
570         /* Initialize the special QP service object. */\r
571         status = init_al_obj( &p_spl_qp_svc->obj, p_spl_qp_svc, TRUE,\r
572                 destroying_spl_qp_svc, NULL, free_spl_qp_svc );\r
573         if( status != IB_SUCCESS )\r
574         {\r
575                 free_spl_qp_svc( &p_spl_qp_svc->obj );\r
576                 return status;\r
577         }\r
578 \r
579         /* Attach the special QP service to the parent object. */\r
580         status = attach_al_obj(\r
581                 (al_obj_t* __ptr64)p_pnp_rec->pnp_rec.pnp_context, &p_spl_qp_svc->obj );\r
582         if( status != IB_SUCCESS )\r
583         {\r
584                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
585                 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
586                         ("attach_al_obj returned %s.\n", ib_get_err_str(status)) );\r
587                 return status;\r
588         }\r
589 \r
590         h_ca = acquire_ca( p_pnp_rec->p_ca_attr->ca_guid );\r
591         CL_ASSERT( h_ca );\r
592         if( !h_ca )\r
593         {\r
594                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
595                 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("acquire_ca failed.\n") );\r
596                 return IB_INVALID_GUID;\r
597         }\r
598 \r
599         p_spl_qp_svc->obj.p_ci_ca = h_ca->obj.p_ci_ca;\r
600 \r
601         /* Determine the maximum queue depth of the QP and CQs. */\r
602         p_spl_qp_svc->max_qp_depth =\r
603                 ( p_pnp_rec->p_ca_attr->max_wrs <\r
604                 p_pnp_rec->p_ca_attr->max_cqes ) ?\r
605                 p_pnp_rec->p_ca_attr->max_wrs :\r
606                 p_pnp_rec->p_ca_attr->max_cqes;\r
607 \r
608         /* Compare this maximum to the default special queue depth. */\r
609         if( ( qp_type == IB_QPT_QP0 ) &&\r
610                 ( p_spl_qp_svc->max_qp_depth > DEFAULT_QP0_DEPTH ) )\r
611                   p_spl_qp_svc->max_qp_depth = DEFAULT_QP0_DEPTH;\r
612         if( ( qp_type == IB_QPT_QP1 ) &&\r
613                 ( p_spl_qp_svc->max_qp_depth > DEFAULT_QP1_DEPTH ) )\r
614                   p_spl_qp_svc->max_qp_depth = DEFAULT_QP1_DEPTH;\r
615 \r
616         /* Create the send CQ. */\r
617         cl_memclr( &cq_create, sizeof( ib_cq_create_t ) );\r
618         cq_create.size = p_spl_qp_svc->max_qp_depth;\r
619         cq_create.pfn_comp_cb = spl_qp_send_comp_cb;\r
620 \r
621         status = ib_create_cq( p_spl_qp_svc->obj.p_ci_ca->h_ca, &cq_create,\r
622                 p_spl_qp_svc, spl_qp_svc_event_cb, &p_spl_qp_svc->h_send_cq );\r
623 \r
624         if( status != IB_SUCCESS )\r
625         {\r
626                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
627                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
628                         ("ib_create_cq send CQ failed, %s\n", ib_get_err_str( status ) ) );\r
629                 return status;\r
630         }\r
631 \r
632         /* Reference the special QP service on behalf of ib_create_cq. */\r
633         ref_al_obj( &p_spl_qp_svc->obj );\r
634 \r
635         /* Check the result of the creation request. */\r
636         if( cq_create.size < p_spl_qp_svc->max_qp_depth )\r
637         {\r
638                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
639                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
640                         ("ib_create_cq allocated insufficient send CQ size\n") );\r
641                 return IB_INSUFFICIENT_RESOURCES;\r
642         }\r
643 \r
644         /* Create the receive CQ. */\r
645         cl_memclr( &cq_create, sizeof( ib_cq_create_t ) );\r
646         cq_create.size = p_spl_qp_svc->max_qp_depth;\r
647         cq_create.pfn_comp_cb = spl_qp_recv_comp_cb;\r
648 \r
649         status = ib_create_cq( p_spl_qp_svc->obj.p_ci_ca->h_ca, &cq_create,\r
650                 p_spl_qp_svc, spl_qp_svc_event_cb, &p_spl_qp_svc->h_recv_cq );\r
651 \r
652         if( status != IB_SUCCESS )\r
653         {\r
654                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
655                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
656                         ("ib_create_cq recv CQ failed, %s\n", ib_get_err_str( status ) ) );\r
657                 return status;\r
658         }\r
659 \r
660         /* Reference the special QP service on behalf of ib_create_cq. */\r
661         ref_al_obj( &p_spl_qp_svc->obj );\r
662 \r
663         /* Check the result of the creation request. */\r
664         if( cq_create.size < p_spl_qp_svc->max_qp_depth )\r
665         {\r
666                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
667                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
668                         ("ib_create_cq allocated insufficient recv CQ size\n") );\r
669                 return IB_INSUFFICIENT_RESOURCES;\r
670         }\r
671 \r
672         /* Create the special QP. */\r
673         cl_memclr( &qp_create, sizeof( ib_qp_create_t ) );\r
674         qp_create.qp_type = qp_type;\r
675         qp_create.sq_depth = p_spl_qp_svc->max_qp_depth;\r
676         qp_create.rq_depth = p_spl_qp_svc->max_qp_depth;\r
677         qp_create.sq_sge = 3;   /* Three entries are required for segmentation. */\r
678         qp_create.rq_sge = 1;\r
679         qp_create.h_sq_cq = p_spl_qp_svc->h_send_cq;\r
680         qp_create.h_rq_cq = p_spl_qp_svc->h_recv_cq;\r
681         qp_create.sq_signaled = TRUE;\r
682 \r
683         status = ib_get_spl_qp( p_spl_qp_svc->obj.p_ci_ca->h_pd,\r
684                 p_pnp_rec->p_port_attr->port_guid, &qp_create,\r
685                 p_spl_qp_svc, spl_qp_svc_event_cb, NULL, &p_spl_qp_svc->h_qp );\r
686 \r
687         if( status != IB_SUCCESS )\r
688         {\r
689                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
690                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
691                         ("ib_get_spl_qp failed, %s\n", ib_get_err_str( status ) ) );\r
692                 return status;\r
693         }\r
694 \r
695         /* Reference the special QP service on behalf of ib_get_spl_qp. */\r
696         ref_al_obj( &p_spl_qp_svc->obj );\r
697 \r
698         /* Check the result of the creation request. */\r
699         status = ib_query_qp( p_spl_qp_svc->h_qp, &qp_attr );\r
700         if( status != IB_SUCCESS )\r
701         {\r
702                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
703                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
704                         ("ib_query_qp failed, %s\n", ib_get_err_str( status ) ) );\r
705                 return status;\r
706         }\r
707 \r
708         if( ( qp_attr.rq_depth < p_spl_qp_svc->max_qp_depth ) ||\r
709                 ( qp_attr.sq_depth < p_spl_qp_svc->max_qp_depth ) ||\r
710                 ( qp_attr.sq_sge < 3 ) || ( qp_attr.rq_sge < 1 ) )\r
711         {\r
712                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
713                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
714                         ("ib_get_spl_qp allocated attributes are insufficient\n") );\r
715                 return IB_INSUFFICIENT_RESOURCES;\r
716         }\r
717 \r
718         /* Initialize the QP for use. */\r
719         status = ib_init_dgrm_svc( p_spl_qp_svc->h_qp, NULL );\r
720         if( status != IB_SUCCESS )\r
721         {\r
722                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
723                 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
724                         ("ib_init_dgrm_svc failed, %s\n", ib_get_err_str( status ) ) );\r
725                 return status;\r
726         }\r
727 \r
728         /* Post receive buffers. */\r
729         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
730         status = spl_qp_svc_post_recvs( p_spl_qp_svc );\r
731         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
732         if( status != IB_SUCCESS )\r
733         {\r
734                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
735                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
736                         ("spl_qp_svc_post_recvs failed, %s\n",\r
737                         ib_get_err_str( status ) ) );\r
738                 return status;\r
739         }\r
740 \r
741         /* Create the MAD dispatcher. */\r
742         status = create_mad_disp( &p_spl_qp_svc->obj, p_spl_qp_svc->h_qp,\r
743                 &p_spl_qp_svc->h_mad_disp );\r
744         if( status != IB_SUCCESS )\r
745         {\r
746                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
747                 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
748                         ("create_mad_disp failed, %s\n", ib_get_err_str( status ) ) );\r
749                 return status;\r
750         }\r
751 \r
752         /*\r
753          * Add this service to the special QP manager lookup lists.\r
754          * The service must be added to allow the creation of a QP alias.\r
755          */\r
756         cl_spinlock_acquire( &gp_spl_qp_mgr->obj.lock );\r
757         if( qp_type == IB_QPT_QP0 )\r
758         {\r
759                 cl_qmap_insert( &gp_spl_qp_mgr->smi_map, p_spl_qp_svc->port_guid,\r
760                         &p_spl_qp_svc->map_item );\r
761         }\r
762         else\r
763         {\r
764                 cl_qmap_insert( &gp_spl_qp_mgr->gsi_map, p_spl_qp_svc->port_guid,\r
765                         &p_spl_qp_svc->map_item );\r
766         }\r
767         cl_spinlock_release( &gp_spl_qp_mgr->obj.lock );\r
768 \r
769         /*\r
770          * If the CA does not support HW agents, create a QP alias and register\r
771          * a MAD service for sending responses from the local MAD interface.\r
772          */\r
773         if( check_local_mad( p_spl_qp_svc->h_qp ) )\r
774         {\r
775                 /* Create a QP alias. */\r
776                 cl_memclr( &qp_create, sizeof( ib_qp_create_t ) );\r
777                 qp_create.qp_type =\r
778                         ( qp_type == IB_QPT_QP0 ) ? IB_QPT_QP0_ALIAS : IB_QPT_QP1_ALIAS;\r
779                 qp_create.sq_depth              = p_spl_qp_svc->max_qp_depth;\r
780                 qp_create.sq_sge                = 1;\r
781                 qp_create.sq_signaled   = TRUE;\r
782 \r
783                 status = ib_get_spl_qp( p_spl_qp_svc->obj.p_ci_ca->h_pd_alias,\r
784                         p_pnp_rec->p_port_attr->port_guid, &qp_create,\r
785                         p_spl_qp_svc, spl_qp_alias_event_cb, &p_spl_qp_svc->pool_key,\r
786                         &p_spl_qp_svc->h_qp_alias );\r
787 \r
788                 if (status != IB_SUCCESS)\r
789                 {\r
790                         p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
791                         AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
792                                 ("ib_get_spl_qp alias failed, %s\n",\r
793                                 ib_get_err_str( status ) ) );\r
794                         return status;\r
795                 }\r
796 \r
797                 /* Reference the special QP service on behalf of ib_get_spl_qp. */\r
798                 ref_al_obj( &p_spl_qp_svc->obj );\r
799 \r
800                 /* Register a MAD service for sends. */\r
801                 cl_memclr( &mad_svc, sizeof( ib_mad_svc_t ) );\r
802                 mad_svc.mad_svc_context = p_spl_qp_svc;\r
803                 mad_svc.pfn_mad_send_cb = spl_qp_alias_send_cb;\r
804                 mad_svc.pfn_mad_recv_cb = spl_qp_alias_recv_cb;\r
805 \r
806                 status = ib_reg_mad_svc( p_spl_qp_svc->h_qp_alias, &mad_svc,\r
807                         &p_spl_qp_svc->h_mad_svc );\r
808 \r
809                 if( status != IB_SUCCESS )\r
810                 {\r
811                         p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
812                         AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
813                                 ("ib_reg_mad_svc failed, %s\n", ib_get_err_str( status ) ) );\r
814                         return status;\r
815                 }\r
816         }\r
817 \r
818         /* Set the context of the PnP event to this child object. */\r
819         p_pnp_rec->pnp_rec.context = &p_spl_qp_svc->obj;\r
820 \r
821         /* The QP is ready.  Change the state. */\r
822         p_spl_qp_svc->state = SPL_QP_ACTIVE;\r
823 \r
824         /* Force a completion callback to rearm the CQs. */\r
825         spl_qp_recv_comp_cb( p_spl_qp_svc->h_recv_cq, p_spl_qp_svc );\r
826         spl_qp_send_comp_cb( p_spl_qp_svc->h_send_cq, p_spl_qp_svc );\r
827 \r
828         /* Start the polling thread timer. */\r
829         if( g_smi_poll_interval )\r
830         {\r
831                 cl_status =\r
832                         cl_timer_trim( &gp_spl_qp_mgr->poll_timer, g_smi_poll_interval );\r
833 \r
834                 if( cl_status != CL_SUCCESS )\r
835                 {\r
836                         p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
837                         AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
838                                 ("cl_timer_start failed, status 0x%x\n", cl_status ) );\r
839                         return ib_convert_cl_status( cl_status );\r
840                 }\r
841         }\r
842 \r
843         /* Release the reference taken in init_al_obj. */\r
844         deref_al_obj( &p_spl_qp_svc->obj );\r
845 \r
846         AL_EXIT( AL_DBG_SMI );\r
847         return IB_SUCCESS;\r
848 }\r
849 \r
850 \r
851 \r
852 /*\r
853  * Return a work completion to the MAD dispatcher for the specified MAD.\r
854  */\r
855 static void\r
856 __complete_send_mad(\r
857         IN              const   al_mad_disp_handle_t            h_mad_disp,\r
858         IN                              al_mad_wr_t* const                      p_mad_wr,\r
859         IN              const   ib_wc_status_t                          wc_status )\r
860 {\r
861         ib_wc_t                 wc;\r
862 \r
863         /* Construct a send work completion. */\r
864         cl_memclr( &wc, sizeof( ib_wc_t ) );\r
865         wc.wr_id        = p_mad_wr->send_wr.wr_id;\r
866         wc.wc_type      = IB_WC_SEND;\r
867         wc.status       = wc_status;\r
868 \r
869         /* Set the send size if we were successful with the send. */\r
870         if( wc_status == IB_WCS_SUCCESS )\r
871                 wc.length = MAD_BLOCK_SIZE;\r
872 \r
873         mad_disp_send_done( h_mad_disp, p_mad_wr, &wc );\r
874 }\r
875 \r
876 \r
877 \r
878 /*\r
879  * Pre-destroy a special QP service.\r
880  */\r
881 void\r
882 destroying_spl_qp_svc(\r
883         IN                              al_obj_t*                                       p_obj )\r
884 {\r
885         spl_qp_svc_t*                   p_spl_qp_svc;\r
886         cl_list_item_t*                 p_list_item;\r
887         al_mad_wr_t*                    p_mad_wr;\r
888 \r
889         ib_api_status_t                 status;\r
890 \r
891         AL_ENTER( AL_DBG_SMI );\r
892 \r
893         CL_ASSERT( p_obj );\r
894         p_spl_qp_svc = PARENT_STRUCT( p_obj, spl_qp_svc_t, obj );\r
895 \r
896         /* Change the state to prevent processing new send requests. */\r
897         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
898         p_spl_qp_svc->state = SPL_QP_DESTROYING;\r
899         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
900 \r
901         /* Wait here until the special QP service is no longer in use. */\r
902         while( p_spl_qp_svc->in_use_cnt )\r
903         {\r
904                 cl_thread_suspend( 0 );\r
905         }\r
906 \r
907         /* Destroy the special QP. */\r
908         if( p_spl_qp_svc->h_qp )\r
909         {\r
910                 /* If present, remove the special QP service from the tracking map. */\r
911                 cl_spinlock_acquire( &gp_spl_qp_mgr->obj.lock );\r
912                 if( p_spl_qp_svc->h_qp->type == IB_QPT_QP0 )\r
913                 {\r
914                         cl_qmap_remove( &gp_spl_qp_mgr->smi_map, p_spl_qp_svc->port_guid );\r
915                 }\r
916                 else\r
917                 {\r
918                         cl_qmap_remove( &gp_spl_qp_mgr->gsi_map, p_spl_qp_svc->port_guid );\r
919                 }\r
920                 cl_spinlock_release( &gp_spl_qp_mgr->obj.lock );\r
921 \r
922                 status = ib_destroy_qp( p_spl_qp_svc->h_qp,\r
923                         (ib_pfn_destroy_cb_t)deref_al_obj );\r
924                 CL_ASSERT( status == IB_SUCCESS );\r
925 \r
926                 cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
927 \r
928                 /* Complete any outstanding MAD sends operations as "flushed". */\r
929                 for( p_list_item = cl_qlist_remove_head( &p_spl_qp_svc->send_queue );\r
930                          p_list_item != cl_qlist_end( &p_spl_qp_svc->send_queue );\r
931                          p_list_item = cl_qlist_remove_head( &p_spl_qp_svc->send_queue ) )\r
932                 {\r
933                         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
934                         p_mad_wr = PARENT_STRUCT( p_list_item, al_mad_wr_t, list_item );\r
935                         __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr,\r
936                                 IB_WCS_WR_FLUSHED_ERR );\r
937                         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
938                 }\r
939 \r
940                 cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
941                 /* Receive MAD elements are returned to the pool by the free routine. */\r
942         }\r
943 \r
944         /* Destroy the special QP alias and CQs. */\r
945         if( p_spl_qp_svc->h_qp_alias )\r
946         {\r
947                 status = ib_destroy_qp( p_spl_qp_svc->h_qp_alias,\r
948                         (ib_pfn_destroy_cb_t)deref_al_obj );\r
949                 CL_ASSERT( status == IB_SUCCESS );\r
950         }\r
951         if( p_spl_qp_svc->h_send_cq )\r
952         {\r
953                 status = ib_destroy_cq( p_spl_qp_svc->h_send_cq,\r
954                         (ib_pfn_destroy_cb_t)deref_al_obj );\r
955                 CL_ASSERT( status == IB_SUCCESS );\r
956         }\r
957         if( p_spl_qp_svc->h_recv_cq )\r
958         {\r
959                 status = ib_destroy_cq( p_spl_qp_svc->h_recv_cq,\r
960                         (ib_pfn_destroy_cb_t)deref_al_obj );\r
961                 CL_ASSERT( status == IB_SUCCESS );\r
962         }\r
963 \r
964         AL_EXIT( AL_DBG_SMI );\r
965 }\r
966 \r
967 \r
968 \r
969 /*\r
970  * Free a special QP service.\r
971  */\r
972 void\r
973 free_spl_qp_svc(\r
974         IN                              al_obj_t*                                       p_obj )\r
975 {\r
976         spl_qp_svc_t*                   p_spl_qp_svc;\r
977         cl_list_item_t*                 p_list_item;\r
978         al_mad_element_t*               p_al_mad;\r
979         ib_api_status_t                 status;\r
980 \r
981         AL_ENTER( AL_DBG_SMI );\r
982 \r
983         CL_ASSERT( p_obj );\r
984         p_spl_qp_svc = PARENT_STRUCT( p_obj, spl_qp_svc_t, obj );\r
985 \r
986         /* Dereference the CA. */\r
987         if( p_spl_qp_svc->obj.p_ci_ca )\r
988                 deref_al_obj( &p_spl_qp_svc->obj.p_ci_ca->h_ca->obj );\r
989 \r
990         /* Return receive MAD elements to the pool. */\r
991         for( p_list_item = cl_qlist_remove_head( &p_spl_qp_svc->recv_queue );\r
992                  p_list_item != cl_qlist_end( &p_spl_qp_svc->recv_queue );\r
993                  p_list_item = cl_qlist_remove_head( &p_spl_qp_svc->recv_queue ) )\r
994         {\r
995                 p_al_mad = PARENT_STRUCT( p_list_item, al_mad_element_t, list_item );\r
996 \r
997                 status = ib_put_mad( &p_al_mad->element );\r
998                 CL_ASSERT( status == IB_SUCCESS );\r
999         }\r
1000 \r
1001         CL_ASSERT( cl_is_qlist_empty( &p_spl_qp_svc->send_queue ) );\r
1002 \r
1003         destroy_al_obj( &p_spl_qp_svc->obj );\r
1004         cl_free( p_spl_qp_svc );\r
1005 \r
1006         AL_EXIT( AL_DBG_SMI );\r
1007 }\r
1008 \r
1009 \r
1010 \r
1011 /*\r
1012  * Update the base LID of a special QP service.\r
1013  */\r
1014 void\r
1015 spl_qp_svc_lid_change(\r
1016         IN                              al_obj_t*                                       p_obj,\r
1017         IN                              ib_pnp_port_rec_t*                      p_pnp_rec )\r
1018 {\r
1019         spl_qp_svc_t*                   p_spl_qp_svc;\r
1020 \r
1021         AL_ENTER( AL_DBG_SMI );\r
1022 \r
1023         CL_ASSERT( p_obj );\r
1024         CL_ASSERT( p_pnp_rec );\r
1025         CL_ASSERT( p_pnp_rec->p_port_attr );\r
1026 \r
1027         p_spl_qp_svc = PARENT_STRUCT( p_obj, spl_qp_svc_t, obj );\r
1028 \r
1029         p_spl_qp_svc->base_lid = p_pnp_rec->p_port_attr->lid;\r
1030         p_spl_qp_svc->lmc = p_pnp_rec->p_port_attr->lmc;\r
1031 \r
1032         AL_EXIT( AL_DBG_SMI );\r
1033 }\r
1034 \r
1035 \r
1036 \r
1037 /*\r
1038  * Route a send work request.\r
1039  */\r
1040 mad_route_t\r
1041 route_mad_send(\r
1042         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
1043         IN                              ib_send_wr_t* const                     p_send_wr )\r
1044 {\r
1045         al_mad_wr_t*                    p_mad_wr;\r
1046         al_mad_send_t*                  p_mad_send;\r
1047         ib_mad_t*                               p_mad;\r
1048         ib_smp_t*                               p_smp;\r
1049         ib_av_handle_t                  h_av;\r
1050         mad_route_t                             route;\r
1051         boolean_t                               local, loopback, discard;\r
1052 \r
1053         AL_ENTER( AL_DBG_SMI );\r
1054 \r
1055         CL_ASSERT( p_spl_qp_svc );\r
1056         CL_ASSERT( p_send_wr );\r
1057 \r
1058         /* Initialize a pointers to the MAD work request and the MAD. */\r
1059         p_mad_wr = PARENT_STRUCT( p_send_wr, al_mad_wr_t, send_wr );\r
1060         p_mad_send = PARENT_STRUCT( p_mad_wr, al_mad_send_t, mad_wr );\r
1061         p_mad = get_mad_hdr_from_wr( p_mad_wr );\r
1062         p_smp = (ib_smp_t*)p_mad;\r
1063 \r
1064         /* Check if the CA has a local MAD interface. */\r
1065         local = loopback = discard = FALSE;\r
1066         if( check_local_mad( p_spl_qp_svc->h_qp ) )\r
1067         {\r
1068                 /*\r
1069                  * If the MAD is a locally addressed Subnet Management, Performance\r
1070                  * Management, or Connection Management datagram, process the work\r
1071                  * request locally.\r
1072                  */\r
1073                 h_av = p_send_wr->dgrm.ud.h_av;\r
1074                 switch( p_mad->mgmt_class )\r
1075                 {\r
1076                 case IB_MCLASS_SUBN_DIR:\r
1077                         /* Perform special checks on directed route SMPs. */\r
1078                         if( ib_smp_is_response( p_smp ) )\r
1079                         {\r
1080                                 /*\r
1081                                  * This node is the originator of the response.  Discard\r
1082                                  * if the hop count or pointer is zero, an intermediate hop,\r
1083                                  * out of bounds hop, or if the first port of the directed\r
1084                                  * route retrun path is not this port.\r
1085                                  */\r
1086                                 if( ( p_smp->hop_count == 0 ) || ( p_smp->hop_ptr == 0 ) )\r
1087                                 {\r
1088                                         AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
1089                                                 ("hop cnt or hop ptr set to 0...discarding\n") );\r
1090                                         discard = TRUE;\r
1091                                 }\r
1092                                 else if( p_smp->hop_count != ( p_smp->hop_ptr - 1 ) )\r
1093                                 {\r
1094                                         AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
1095                                                 ("hop cnt != (hop ptr - 1)...discarding\n") );\r
1096                                         discard = TRUE;\r
1097                                 }\r
1098                                 else if( p_smp->hop_count >= IB_SUBNET_PATH_HOPS_MAX )\r
1099                                 {\r
1100                                         AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
1101                                                 ("hop cnt > max hops...discarding\n") );\r
1102                                         discard = TRUE;\r
1103                                 }\r
1104                                 else if( ( p_smp->dr_dlid == IB_LID_PERMISSIVE ) &&\r
1105                                                  ( p_smp->return_path[ p_smp->hop_ptr - 1 ] !=\r
1106                                                         p_spl_qp_svc->port_num ) )\r
1107                                 {\r
1108                                         AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
1109                                                 ("return path[hop ptr - 1] != port num...discarding\n") );\r
1110                                         discard = TRUE;\r
1111                                 }\r
1112                         }\r
1113                         else\r
1114                         {\r
1115                                 /* The SMP is a request. */\r
1116                                 if( ( p_smp->hop_count >= IB_SUBNET_PATH_HOPS_MAX ) ||\r
1117                                         ( p_smp->hop_ptr >= IB_SUBNET_PATH_HOPS_MAX ) )\r
1118                                 {\r
1119                                         discard = TRUE;\r
1120                                 }\r
1121                                 else if( ( p_smp->hop_count == 0 ) && ( p_smp->hop_ptr == 0 ) )\r
1122                                 {\r
1123                                         /* Self Addressed: Sent locally, routed locally. */\r
1124                                         local = TRUE;\r
1125                                         discard = ( p_smp->dr_slid != IB_LID_PERMISSIVE ) ||\r
1126                                                           ( p_smp->dr_dlid != IB_LID_PERMISSIVE );\r
1127                                 }\r
1128                                 else if( ( p_smp->hop_count != 0 ) &&\r
1129                                                  ( p_smp->hop_count == ( p_smp->hop_ptr - 1 ) ) )\r
1130                                 {\r
1131                                         /* End of Path: Sent remotely, routed locally. */\r
1132                                         local = TRUE;\r
1133                                 }\r
1134                                 else if( ( p_smp->hop_count != 0 ) &&\r
1135                                                  ( p_smp->hop_ptr       == 0 ) )\r
1136                                 {\r
1137                                         /* Beginning of Path: Sent locally, routed remotely. */\r
1138                                         if( p_smp->dr_slid == IB_LID_PERMISSIVE )\r
1139                                         {\r
1140                                                 discard =\r
1141                                                         ( p_smp->initial_path[ p_smp->hop_ptr + 1 ] !=\r
1142                                                           p_spl_qp_svc->port_num );\r
1143                                         }\r
1144                                 }\r
1145                                 else\r
1146                                 {\r
1147                                         /* Intermediate hop. */\r
1148                                         discard = TRUE;\r
1149                                 }\r
1150                         }\r
1151                         /* Loopback locally addressed SM to SM "heartbeat" messages. */\r
1152                         loopback = (p_mad->attr_id == IB_MAD_ATTR_SM_INFO);\r
1153                         break;\r
1154 \r
1155                 case IB_MCLASS_SUBN_LID:\r
1156                         /* Loopback locally addressed SM to SM "heartbeat" messages. */\r
1157                         loopback = (p_mad->attr_id == IB_MAD_ATTR_SM_INFO);\r
1158 \r
1159                         /* Fall through to check for a local MAD. */\r
1160 \r
1161                 case IB_MCLASS_PERF:\r
1162                 case IB_MCLASS_BM:\r
1163                         local = ( h_av &&\r
1164                                 ( h_av->av_attr.dlid ==\r
1165                                 ( h_av->av_attr.path_bits | p_spl_qp_svc->base_lid ) ) );\r
1166                         break;\r
1167 \r
1168                 default:\r
1169                         /* Route vendor specific MADs to the HCA provider. */\r
1170                         if( ib_class_is_vendor_specific( p_mad->mgmt_class ) )\r
1171                         {\r
1172                                 local = ( h_av &&\r
1173                                         ( h_av->av_attr.dlid ==\r
1174                                         ( h_av->av_attr.path_bits | p_spl_qp_svc->base_lid ) ) );\r
1175                         }\r
1176                         break;\r
1177                 }\r
1178         }\r
1179 \r
1180         route = ( p_mad_send->p_send_mad->send_opt & IB_SEND_OPT_LOCAL ) ?\r
1181                 ROUTE_LOCAL : ROUTE_REMOTE;\r
1182         if( local ) route = ROUTE_LOCAL;\r
1183         if( loopback && local ) route = ROUTE_LOOPBACK;\r
1184         if( discard ) route = ROUTE_DISCARD;\r
1185 \r
1186         AL_EXIT( AL_DBG_SMI );\r
1187         return route;\r
1188 }\r
1189 \r
1190 \r
1191 \r
1192 /*\r
1193  * Send a work request on the special QP.\r
1194  */\r
1195 ib_api_status_t\r
1196 spl_qp_svc_send(\r
1197         IN              const   ib_qp_handle_t                          h_qp,\r
1198         IN                              ib_send_wr_t* const                     p_send_wr )\r
1199 {\r
1200         spl_qp_svc_t*                   p_spl_qp_svc;\r
1201         al_mad_wr_t*                    p_mad_wr;\r
1202         mad_route_t                             route;\r
1203         ib_api_status_t                 status;\r
1204 \r
1205         AL_ENTER( AL_DBG_SMI );\r
1206 \r
1207         CL_ASSERT( h_qp );\r
1208         CL_ASSERT( p_send_wr );\r
1209 \r
1210         /* Get the special QP service. */\r
1211         p_spl_qp_svc = (spl_qp_svc_t*)h_qp->obj.context;\r
1212         CL_ASSERT( p_spl_qp_svc );\r
1213         CL_ASSERT( p_spl_qp_svc->h_qp == h_qp );\r
1214 \r
1215         /* Determine how to route the MAD. */\r
1216         route = route_mad_send( p_spl_qp_svc, p_send_wr );\r
1217 \r
1218         /*\r
1219          * Check the QP state and guard against error handling.  Also,\r
1220          * to maintain proper order of work completions, delay processing\r
1221          * a local MAD until any remote MAD work requests have completed,\r
1222          * and delay processing a remote MAD until local MAD work requests\r
1223          * have completed.\r
1224          */\r
1225         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
1226         if( (p_spl_qp_svc->state != SPL_QP_ACTIVE) || p_spl_qp_svc->local_mad_wr ||\r
1227                 (is_local(route) && !cl_is_qlist_empty( &p_spl_qp_svc->send_queue )) ||\r
1228                 ( cl_qlist_count( &p_spl_qp_svc->send_queue ) >=\r
1229                         p_spl_qp_svc->max_qp_depth ) )\r
1230         {\r
1231                 /*\r
1232                  * Return busy status.\r
1233                  * The special QP will resume sends at this point.\r
1234                  */\r
1235                 cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
1236 \r
1237                 AL_EXIT( AL_DBG_SMI );\r
1238                 return IB_RESOURCE_BUSY;\r
1239         }\r
1240 \r
1241         p_mad_wr = PARENT_STRUCT( p_send_wr, al_mad_wr_t, send_wr );\r
1242 \r
1243         if( is_local( route ) )\r
1244         {\r
1245                 /* Save the local MAD work request for processing. */\r
1246                 p_spl_qp_svc->local_mad_wr = p_mad_wr;\r
1247 \r
1248                 /* Flag the service as in use by the asynchronous processing thread. */\r
1249                 cl_atomic_inc( &p_spl_qp_svc->in_use_cnt );\r
1250 \r
1251                 cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
1252 \r
1253                 status = local_mad_send( p_spl_qp_svc, p_mad_wr );\r
1254         }\r
1255         else\r
1256         {\r
1257                 /* Process a remote MAD send work request. */\r
1258                 status = remote_mad_send( p_spl_qp_svc, p_mad_wr );\r
1259 \r
1260                 cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
1261         }\r
1262 \r
1263         AL_EXIT( AL_DBG_SMI );\r
1264         return status;\r
1265 }\r
1266 \r
1267 \r
1268 \r
1269 /*\r
1270  * Process a remote MAD send work request.  Called holding the spl_qp_svc lock.\r
1271  */\r
1272 ib_api_status_t\r
1273 remote_mad_send(\r
1274         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
1275         IN                              al_mad_wr_t* const                      p_mad_wr )\r
1276 {\r
1277         ib_smp_t*                               p_smp;\r
1278         ib_api_status_t                 status;\r
1279 \r
1280         AL_ENTER( AL_DBG_SMI );\r
1281 \r
1282         CL_ASSERT( p_spl_qp_svc );\r
1283         CL_ASSERT( p_mad_wr );\r
1284 \r
1285         /* Initialize a pointers to the MAD work request and outbound MAD. */\r
1286         p_smp = (ib_smp_t*)get_mad_hdr_from_wr( p_mad_wr );\r
1287 \r
1288         /* Perform outbound MAD processing. */\r
1289 \r
1290         /* Adjust directed route SMPs as required by IBA. */\r
1291         if( p_smp->mgmt_class == IB_MCLASS_SUBN_DIR )\r
1292         {\r
1293                 if( ib_smp_is_response( p_smp ) )\r
1294                 {\r
1295                         if( p_smp->dr_dlid == IB_LID_PERMISSIVE )\r
1296                                 p_smp->hop_ptr--;\r
1297                 }\r
1298                 else if( p_smp->dr_slid == IB_LID_PERMISSIVE )\r
1299                 {\r
1300                         /*\r
1301                          * Only update the pointer if the hw_agent is not implemented.\r
1302                          * Fujitsu implements SMI in hardware, so the following has to\r
1303                          * be passed down to the hardware SMI.\r
1304                          */\r
1305                         ci_ca_lock_attr( p_spl_qp_svc->obj.p_ci_ca );\r
1306                         if( !p_spl_qp_svc->obj.p_ci_ca->p_pnp_attr->hw_agents )\r
1307                                 p_smp->hop_ptr++;\r
1308                         ci_ca_unlock_attr( p_spl_qp_svc->obj.p_ci_ca );\r
1309                 }\r
1310         }\r
1311 \r
1312         /* Always generate send completions. */\r
1313         p_mad_wr->send_wr.send_opt |= IB_SEND_OPT_SIGNALED;\r
1314 \r
1315         /* Queue the MAD work request on the service tracking queue. */\r
1316         cl_qlist_insert_tail( &p_spl_qp_svc->send_queue, &p_mad_wr->list_item );\r
1317 \r
1318         status = ib_post_send( p_spl_qp_svc->h_qp, &p_mad_wr->send_wr, NULL );\r
1319 \r
1320         if( status != IB_SUCCESS )\r
1321         {\r
1322                 cl_qlist_remove_item( &p_spl_qp_svc->send_queue, &p_mad_wr->list_item );\r
1323 \r
1324                 /* Reset directed route SMPs as required by IBA. */\r
1325                 if( p_smp->mgmt_class == IB_MCLASS_SUBN_DIR )\r
1326                 {\r
1327                         if( ib_smp_is_response( p_smp ) )\r
1328                         {\r
1329                                 if( p_smp->dr_dlid == IB_LID_PERMISSIVE )\r
1330                                         p_smp->hop_ptr++;\r
1331                         }\r
1332                         else if( p_smp->dr_slid == IB_LID_PERMISSIVE )\r
1333                         {\r
1334                                 /* Only update if the hw_agent is not implemented. */\r
1335                                 ci_ca_lock_attr( p_spl_qp_svc->obj.p_ci_ca );\r
1336                                 if( p_spl_qp_svc->obj.p_ci_ca->p_pnp_attr->hw_agents == FALSE )\r
1337                                         p_smp->hop_ptr--;\r
1338                                 ci_ca_unlock_attr( p_spl_qp_svc->obj.p_ci_ca );\r
1339                         }\r
1340                 }\r
1341         }\r
1342 \r
1343         AL_EXIT( AL_DBG_SMI );\r
1344         return status;\r
1345 }\r
1346 \r
1347 \r
1348 /*\r
1349  * Handle a MAD destined for the local CA, using cached data\r
1350  * as much as possible.\r
1351  */\r
1352 static ib_api_status_t\r
1353 local_mad_send(\r
1354         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
1355         IN                              al_mad_wr_t* const                      p_mad_wr )\r
1356 {\r
1357         mad_route_t                             route;\r
1358         ib_api_status_t                 status = IB_SUCCESS;\r
1359 \r
1360         AL_ENTER( AL_DBG_SMI );\r
1361 \r
1362         CL_ASSERT( p_spl_qp_svc );\r
1363         CL_ASSERT( p_mad_wr );\r
1364 \r
1365         /* Determine how to route the MAD. */\r
1366         route = route_mad_send( p_spl_qp_svc, &p_mad_wr->send_wr );\r
1367 \r
1368         /* Check if this MAD should be discarded. */\r
1369         if( is_discard( route ) )\r
1370         {\r
1371                 /* Deliver a "work completion" to the dispatcher. */\r
1372                 __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr,\r
1373                         IB_WCS_LOCAL_OP_ERR );\r
1374                 status = IB_INVALID_SETTING;\r
1375         }\r
1376         else if( is_loopback( route ) )\r
1377         {\r
1378                 /* Loopback local SM to SM "heartbeat" messages. */\r
1379                 status = loopback_mad( p_spl_qp_svc, p_mad_wr );\r
1380         }\r
1381         else\r
1382         {\r
1383                 switch( get_mad_hdr_from_wr( p_mad_wr )->mgmt_class )\r
1384                 {\r
1385                 case IB_MCLASS_SUBN_DIR:\r
1386                 case IB_MCLASS_SUBN_LID:\r
1387                         status = process_subn_mad( p_spl_qp_svc, p_mad_wr );\r
1388                         break;\r
1389 \r
1390                 default:\r
1391                         status = IB_NOT_DONE;\r
1392                 }\r
1393         }\r
1394 \r
1395         if( status == IB_NOT_DONE )\r
1396         {\r
1397                 /* Queue an asynchronous processing item to process the local MAD. */\r
1398                 cl_async_proc_queue( gp_async_proc_mgr, &p_spl_qp_svc->send_async );\r
1399         }\r
1400         else\r
1401         {\r
1402                 /*\r
1403                  * Clear the local MAD pointer to allow processing of other MADs.\r
1404                  * This is done after polling for attribute changes to ensure that\r
1405                  * subsequent MADs pick up any changes performed by this one.\r
1406                  */\r
1407                 cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
1408                 p_spl_qp_svc->local_mad_wr = NULL;\r
1409                 cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
1410 \r
1411                 /* No longer in use by the asynchronous processing thread. */\r
1412                 cl_atomic_dec( &p_spl_qp_svc->in_use_cnt );\r
1413 \r
1414                 /* Special QP operations will resume by unwinding. */\r
1415         }\r
1416 \r
1417         AL_EXIT( AL_DBG_SMI );\r
1418         return IB_SUCCESS;\r
1419 }\r
1420 \r
1421 \r
1422 static ib_api_status_t\r
1423 get_resp_mad(\r
1424         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
1425         IN                              al_mad_wr_t* const                      p_mad_wr,\r
1426                 OUT                     ib_mad_element_t** const        pp_mad_resp )\r
1427 {\r
1428         ib_api_status_t                 status;\r
1429 \r
1430         AL_ENTER( AL_DBG_SMI );\r
1431 \r
1432         CL_ASSERT( p_spl_qp_svc );\r
1433         CL_ASSERT( p_mad_wr );\r
1434         CL_ASSERT( pp_mad_resp );\r
1435 \r
1436         /* Get a MAD element from the pool for the response. */\r
1437         status = ib_get_mad( p_spl_qp_svc->h_qp->obj.p_ci_ca->pool_key,\r
1438                 MAD_BLOCK_SIZE, pp_mad_resp );\r
1439         if( status != IB_SUCCESS )\r
1440         {\r
1441                 __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr,\r
1442                         IB_WCS_LOCAL_OP_ERR );\r
1443         }\r
1444 \r
1445         AL_EXIT( AL_DBG_SMI );\r
1446         return status;\r
1447 }\r
1448 \r
1449 \r
1450 static ib_api_status_t\r
1451 complete_local_mad(\r
1452         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
1453         IN                              al_mad_wr_t* const                      p_mad_wr,\r
1454         IN                              ib_mad_element_t* const         p_mad_resp )\r
1455 {\r
1456         ib_api_status_t                 status;\r
1457 \r
1458         AL_ENTER( AL_DBG_SMI );\r
1459 \r
1460         CL_ASSERT( p_spl_qp_svc );\r
1461         CL_ASSERT( p_mad_wr );\r
1462         CL_ASSERT( p_mad_resp );\r
1463 \r
1464         /* Construct the receive MAD element. */\r
1465         p_mad_resp->status              = IB_WCS_SUCCESS;\r
1466         p_mad_resp->remote_qp   = p_mad_wr->send_wr.dgrm.ud.remote_qp;\r
1467         p_mad_resp->remote_lid  = p_spl_qp_svc->base_lid;\r
1468         if( p_mad_wr->send_wr.send_opt & IB_RECV_OPT_IMMEDIATE )\r
1469         {\r
1470                 p_mad_resp->immediate_data = p_mad_wr->send_wr.immediate_data;\r
1471                 p_mad_resp->recv_opt |= IB_RECV_OPT_IMMEDIATE;\r
1472         }\r
1473 \r
1474         /*\r
1475          * Hand the receive MAD element to the dispatcher before completing\r
1476          * the send.  This guarantees that the send request cannot time out.\r
1477          */\r
1478         status = mad_disp_recv_done( p_spl_qp_svc->h_mad_disp, p_mad_resp );\r
1479 \r
1480         /* Forward the send work completion to the dispatcher. */\r
1481         __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr, IB_WCS_SUCCESS );\r
1482 \r
1483         AL_EXIT( AL_DBG_SMI );\r
1484         return status;\r
1485 }\r
1486 \r
1487 \r
1488 static ib_api_status_t\r
1489 loopback_mad(\r
1490         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
1491         IN                              al_mad_wr_t* const                      p_mad_wr )\r
1492 {\r
1493         ib_mad_t                                *p_mad;\r
1494         ib_mad_element_t                *p_mad_resp;\r
1495         ib_api_status_t                 status;\r
1496 \r
1497         AL_ENTER( AL_DBG_SMI );\r
1498 \r
1499         CL_ASSERT( p_spl_qp_svc );\r
1500         CL_ASSERT( p_mad_wr );\r
1501 \r
1502         /* Get a MAD element from the pool for the response. */\r
1503         status = get_resp_mad( p_spl_qp_svc, p_mad_wr, &p_mad_resp );\r
1504         if( status == IB_SUCCESS )\r
1505         {\r
1506                 /* Initialize a pointers to the MAD work request and outbound MAD. */\r
1507                 p_mad = get_mad_hdr_from_wr( p_mad_wr );\r
1508 \r
1509                 /* Simulate a send/receive between local managers. */\r
1510                 cl_memcpy( p_mad_resp->p_mad_buf, p_mad, MAD_BLOCK_SIZE );\r
1511 \r
1512                 status = complete_local_mad( p_spl_qp_svc, p_mad_wr, p_mad_resp );\r
1513         }\r
1514 \r
1515         AL_EXIT( AL_DBG_SMI );\r
1516         return status;\r
1517 }\r
1518 \r
1519 \r
1520 static ib_api_status_t\r
1521 process_node_info(\r
1522         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
1523         IN                              al_mad_wr_t* const                      p_mad_wr )\r
1524 {\r
1525         ib_mad_t                                *p_mad;\r
1526         ib_mad_element_t                *p_mad_resp;\r
1527         ib_smp_t                                *p_smp;\r
1528         ib_node_info_t                  *p_node_info;\r
1529         ib_ca_attr_t                    *p_ca_attr;\r
1530         ib_port_attr_t                  *p_port_attr;\r
1531         ib_api_status_t                 status;\r
1532 \r
1533         AL_ENTER( AL_DBG_SMI );\r
1534 \r
1535         CL_ASSERT( p_spl_qp_svc );\r
1536         CL_ASSERT( p_mad_wr );\r
1537 \r
1538         /* Initialize a pointers to the MAD work request and outbound MAD. */\r
1539         p_mad = get_mad_hdr_from_wr( p_mad_wr );\r
1540         if( p_mad->method != IB_MAD_METHOD_GET )\r
1541         {\r
1542                 /* Node description is a GET-only attribute. */\r
1543                 __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr,\r
1544                         IB_WCS_LOCAL_OP_ERR );\r
1545                 AL_EXIT( AL_DBG_SMI );\r
1546                 return IB_INVALID_SETTING;\r
1547         }\r
1548 \r
1549         /* Get a MAD element from the pool for the response. */\r
1550         status = get_resp_mad( p_spl_qp_svc, p_mad_wr, &p_mad_resp );\r
1551         if( status == IB_SUCCESS )\r
1552         {\r
1553                 p_smp = (ib_smp_t*)p_mad_resp->p_mad_buf;\r
1554                 cl_memcpy( p_smp, p_mad, MAD_BLOCK_SIZE );\r
1555                 p_smp->method = (IB_MAD_METHOD_RESP_MASK | IB_MAD_METHOD_GET);\r
1556                 if( p_smp->mgmt_class == IB_MCLASS_SUBN_DIR )\r
1557                         p_smp->status = IB_SMP_DIRECTION;\r
1558                 else\r
1559                         p_smp->status = 0;\r
1560 \r
1561                 p_node_info = (ib_node_info_t*)ib_smp_get_payload_ptr( p_smp );\r
1562 \r
1563                 /*\r
1564                  * Fill in the node info, protecting against the\r
1565                  * attributes being changed by PnP.\r
1566                  */\r
1567                 cl_spinlock_acquire( &p_spl_qp_svc->obj.p_ci_ca->obj.lock );\r
1568 \r
1569                 p_ca_attr = p_spl_qp_svc->obj.p_ci_ca->p_pnp_attr;\r
1570                 p_port_attr = &p_ca_attr->p_port_attr[p_spl_qp_svc->port_num - 1];\r
1571 \r
1572                 p_node_info->base_version = 1;\r
1573                 p_node_info->class_version = 1;\r
1574                 p_node_info->node_type = IB_NODE_TYPE_CA;\r
1575                 p_node_info->num_ports = p_ca_attr->num_ports;\r
1576                 /* TODO: Get some unique identifier for the system */\r
1577                 p_node_info->sys_guid = p_ca_attr->ca_guid;\r
1578                 p_node_info->node_guid = p_ca_attr->ca_guid;\r
1579                 p_node_info->port_guid = p_port_attr->port_guid;\r
1580                 p_node_info->partition_cap = cl_hton16( p_port_attr->num_pkeys );\r
1581                 p_node_info->device_id = cl_hton16( p_ca_attr->dev_id );\r
1582                 p_node_info->revision = cl_hton32( p_ca_attr->revision );\r
1583                 p_node_info->port_num_vendor_id =\r
1584                         cl_hton32( p_ca_attr->vend_id & 0x00FFFFFF ) | p_port_attr->port_num;\r
1585                 cl_spinlock_release( &p_spl_qp_svc->obj.p_ci_ca->obj.lock );\r
1586 \r
1587                 status = complete_local_mad( p_spl_qp_svc, p_mad_wr, p_mad_resp );\r
1588         }\r
1589 \r
1590         AL_EXIT( AL_DBG_SMI );\r
1591         return status;\r
1592 }\r
1593 \r
1594 \r
1595 static ib_api_status_t\r
1596 process_node_desc(\r
1597         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
1598         IN                              al_mad_wr_t* const                      p_mad_wr )\r
1599 {\r
1600         ib_mad_t                                *p_mad;\r
1601         ib_mad_element_t                *p_mad_resp;\r
1602         ib_api_status_t                 status;\r
1603 \r
1604         AL_ENTER( AL_DBG_SMI );\r
1605 \r
1606         CL_ASSERT( p_spl_qp_svc );\r
1607         CL_ASSERT( p_mad_wr );\r
1608 \r
1609         /* Initialize a pointers to the MAD work request and outbound MAD. */\r
1610         p_mad = get_mad_hdr_from_wr( p_mad_wr );\r
1611         if( p_mad->method != IB_MAD_METHOD_GET )\r
1612         {\r
1613                 /* Node info is a GET-only attribute. */\r
1614                 __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr,\r
1615                         IB_WCS_LOCAL_OP_ERR );\r
1616                 AL_EXIT( AL_DBG_SMI );\r
1617                 return IB_INVALID_SETTING;\r
1618         }\r
1619 \r
1620         /* Get a MAD element from the pool for the response. */\r
1621         status = get_resp_mad( p_spl_qp_svc, p_mad_wr, &p_mad_resp );\r
1622         if( status == IB_SUCCESS )\r
1623         {\r
1624                 cl_memcpy( p_mad_resp->p_mad_buf, p_mad, MAD_BLOCK_SIZE );\r
1625                 p_mad_resp->p_mad_buf->method =\r
1626                         (IB_MAD_METHOD_RESP_MASK | IB_MAD_METHOD_GET);\r
1627                 if( p_mad_resp->p_mad_buf->mgmt_class == IB_MCLASS_SUBN_DIR )\r
1628                         p_mad_resp->p_mad_buf->status = IB_SMP_DIRECTION;\r
1629                 else\r
1630                         p_mad_resp->p_mad_buf->status = 0;\r
1631                 /* Set the node description to the machine name. */\r
1632                 cl_memcpy( ((ib_smp_t*)p_mad_resp->p_mad_buf)->data, \r
1633                         node_desc, sizeof(node_desc) );\r
1634 \r
1635                 status = complete_local_mad( p_spl_qp_svc, p_mad_wr, p_mad_resp );\r
1636         }\r
1637 \r
1638         AL_EXIT( AL_DBG_SMI );\r
1639         return status;\r
1640 }\r
1641 \r
1642 \r
1643 /*\r
1644  * Process subnet administration MADs using cached data if possible.\r
1645  */\r
1646 static ib_api_status_t\r
1647 process_subn_mad(\r
1648         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
1649         IN                              al_mad_wr_t* const                      p_mad_wr )\r
1650 {\r
1651         ib_api_status_t         status;\r
1652         ib_smp_t                        *p_smp;\r
1653 \r
1654         AL_ENTER( AL_DBG_SMI );\r
1655 \r
1656         CL_ASSERT( p_spl_qp_svc );\r
1657         CL_ASSERT( p_mad_wr );\r
1658 \r
1659         p_smp = (ib_smp_t*)get_mad_hdr_from_wr( p_mad_wr );\r
1660 \r
1661         CL_ASSERT( p_smp->mgmt_class == IB_MCLASS_SUBN_DIR ||\r
1662                 p_smp->mgmt_class == IB_MCLASS_SUBN_LID );\r
1663 \r
1664         switch( p_smp->attr_id )\r
1665         {\r
1666         case IB_MAD_ATTR_NODE_INFO:\r
1667                 status = process_node_info( p_spl_qp_svc, p_mad_wr );\r
1668                 break;\r
1669 \r
1670         case IB_MAD_ATTR_NODE_DESC:\r
1671                 status = process_node_desc( p_spl_qp_svc, p_mad_wr );\r
1672                 break;\r
1673 \r
1674         default:\r
1675                 status = IB_NOT_DONE;\r
1676                 break;\r
1677         }\r
1678 \r
1679         AL_EXIT( AL_DBG_SMI );\r
1680         return status;\r
1681 }\r
1682 \r
1683 \r
1684 /*\r
1685  * Process a local MAD send work request.\r
1686  */\r
1687 ib_api_status_t\r
1688 fwd_local_mad(\r
1689         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
1690         IN                              al_mad_wr_t* const                      p_mad_wr )\r
1691 {\r
1692         ib_mad_t*                               p_mad;\r
1693         ib_smp_t*                               p_smp;\r
1694         al_mad_send_t*                  p_mad_send;\r
1695         ib_mad_element_t*               p_mad_response;\r
1696         ib_mad_t*                               p_mad_response_buf;\r
1697         ib_api_status_t                 status = IB_SUCCESS;\r
1698         boolean_t                               smp_is_set;\r
1699 \r
1700         AL_ENTER( AL_DBG_SMI );\r
1701 \r
1702         CL_ASSERT( p_spl_qp_svc );\r
1703         CL_ASSERT( p_mad_wr );\r
1704 \r
1705         /* Initialize a pointers to the MAD work request and outbound MAD. */\r
1706         p_mad = get_mad_hdr_from_wr( p_mad_wr );\r
1707         p_smp = (ib_smp_t*)p_mad;\r
1708 \r
1709         smp_is_set = (p_smp->method == IB_MAD_METHOD_SET);\r
1710 \r
1711         /* Get a MAD element from the pool for the response. */\r
1712         p_mad_send = PARENT_STRUCT( p_mad_wr, al_mad_send_t, mad_wr );\r
1713 //*** Commented code to work-around ib_local_mad() requiring a response MAD\r
1714 //*** as input.  Remove comments once the ib_local_mad() implementation allows\r
1715 //*** for a NULL response MAD, when one is not expected.\r
1716 //*** Note that an attempt to route an invalid response MAD in this case\r
1717 //*** will fail harmlessly.\r
1718 //***   if( p_mad_send->p_send_mad->resp_expected )\r
1719 //***   {\r
1720                 status = get_resp_mad( p_spl_qp_svc, p_mad_wr, &p_mad_response );\r
1721                 if( status != IB_SUCCESS )\r
1722                 {\r
1723                         AL_EXIT( AL_DBG_SMI );\r
1724                         return status;\r
1725                 }\r
1726                 p_mad_response_buf = p_mad_response->p_mad_buf;\r
1727 //***   }\r
1728 //***   else\r
1729 //***   {\r
1730 //***           p_mad_response_buf = NULL;\r
1731 //***   }\r
1732 \r
1733         /* Adjust directed route SMPs as required by IBA. */\r
1734         if( p_mad->mgmt_class == IB_MCLASS_SUBN_DIR )\r
1735         {\r
1736                 CL_ASSERT( !ib_smp_is_response( p_smp ) );\r
1737 \r
1738                 /*\r
1739                  * If this was a self addressed, directed route SMP, increment\r
1740                  * the hop pointer in the request before delivery as required\r
1741                  * by IBA.  Otherwise, adjustment for remote requests occurs\r
1742                  * during inbound processing.\r
1743                  */\r
1744                 if( p_smp->hop_count == 0 )\r
1745                         p_smp->hop_ptr++;\r
1746         }\r
1747 \r
1748         /* Forward the locally addressed MAD to the CA interface. */\r
1749         status = ib_local_mad( p_spl_qp_svc->h_qp->obj.p_ci_ca->h_ca,\r
1750                 p_spl_qp_svc->port_num, p_mad, p_mad_response_buf );\r
1751 \r
1752         /* Reset directed route SMPs as required by IBA. */\r
1753         if( p_mad->mgmt_class == IB_MCLASS_SUBN_DIR )\r
1754         {\r
1755                 /*\r
1756                  * If this was a self addressed, directed route SMP, decrement\r
1757                  * the hop pointer in the response before delivery as required\r
1758                  * by IBA.  Otherwise, adjustment for remote responses occurs\r
1759                  * during outbound processing.\r
1760                  */\r
1761                 if( p_smp->hop_count == 0 )\r
1762                 {\r
1763                         /* Adjust the request SMP. */\r
1764                         p_smp->hop_ptr--;\r
1765 \r
1766                         /* Adjust the response SMP. */\r
1767                         if( p_mad_response_buf )\r
1768                         {\r
1769                                 p_smp = (ib_smp_t*)p_mad_response_buf;\r
1770                                 p_smp->hop_ptr--;\r
1771                         }\r
1772                 }\r
1773         }\r
1774 \r
1775         if( status != IB_SUCCESS )\r
1776         {\r
1777                 if( p_mad_response )\r
1778                         ib_put_mad( p_mad_response );\r
1779 \r
1780                 __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr,\r
1781                         IB_WCS_LOCAL_OP_ERR );\r
1782                 AL_EXIT( AL_DBG_SMI );\r
1783                 return status;\r
1784         }\r
1785 \r
1786         /* Check the completion status of this simulated send. */\r
1787         if( p_mad_response_buf )\r
1788         {\r
1789                 /*\r
1790                  * The SMI is uses PnP polling to refresh the base_lid and lmc.\r
1791                  * Polling takes time, so we update the values here to prevent\r
1792                  * the failure of LID routed MADs sent immediately following this\r
1793                  * assignment.  Check the response to see if the port info was set.\r
1794                  */\r
1795                 if( smp_is_set )\r
1796                 {\r
1797                         ib_port_info_t*         p_port_info = NULL;\r
1798 \r
1799                         switch( p_mad_response_buf->mgmt_class )\r
1800                         {\r
1801                         case IB_MCLASS_SUBN_DIR:\r
1802                                 if( ( p_smp->attr_id == IB_MAD_ATTR_PORT_INFO ) &&\r
1803                                         ( ib_smp_get_status( p_smp ) == IB_SA_MAD_STATUS_SUCCESS ) )\r
1804                                 {\r
1805                                         p_port_info =\r
1806                                                 (ib_port_info_t*)ib_smp_get_payload_ptr( p_smp );\r
1807                                 }\r
1808                                 break;\r
1809 \r
1810                         case IB_MCLASS_SUBN_LID:\r
1811                                 if( ( p_mad_response_buf->attr_id == IB_MAD_ATTR_PORT_INFO ) &&\r
1812                                         ( p_mad_response_buf->status == IB_SA_MAD_STATUS_SUCCESS ) )\r
1813                                 {\r
1814                                         p_port_info =\r
1815                                                 (ib_port_info_t*)( p_mad_response_buf + 1 );\r
1816                                 }\r
1817                                 break;\r
1818 \r
1819                         default:\r
1820                                 break;\r
1821                         }\r
1822 \r
1823                         if( p_port_info )\r
1824                         {\r
1825                                 p_spl_qp_svc->base_lid = p_port_info->base_lid;\r
1826                                 p_spl_qp_svc->lmc = ib_port_info_get_lmc( p_port_info );\r
1827                                 if (p_port_info->subnet_timeout & 0x80)\r
1828                                 {\r
1829                                         AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_PNP,\r
1830                                                 ("Client reregister event, setting sm_lid to 0.\n"));\r
1831                                         ci_ca_lock_attr(p_spl_qp_svc->obj.p_ci_ca);\r
1832                                         p_spl_qp_svc->obj.p_ci_ca->p_pnp_attr->\r
1833                                                 p_port_attr->sm_lid= 0;\r
1834                                         ci_ca_unlock_attr(p_spl_qp_svc->obj.p_ci_ca);\r
1835                                 }\r
1836                         }\r
1837                 }\r
1838         }\r
1839 \r
1840         status = complete_local_mad( p_spl_qp_svc, p_mad_wr, p_mad_response );\r
1841 \r
1842         /* If the SMP was a Get, no need to trigger a PnP poll. */\r
1843         if( status == IB_SUCCESS && !smp_is_set )\r
1844                 status = IB_NOT_DONE;\r
1845 \r
1846         AL_EXIT( AL_DBG_SMI );\r
1847         return status;\r
1848 }\r
1849 \r
1850 \r
1851 \r
1852 /*\r
1853  * Asynchronous processing thread callback to send a local MAD.\r
1854  */\r
1855 void\r
1856 send_local_mad_cb(\r
1857         IN                              cl_async_proc_item_t*           p_item )\r
1858 {\r
1859         spl_qp_svc_t*                   p_spl_qp_svc;\r
1860         ib_api_status_t                 status;\r
1861 \r
1862         AL_ENTER( AL_DBG_SMI_CB );\r
1863 \r
1864         CL_ASSERT( p_item );\r
1865         p_spl_qp_svc = PARENT_STRUCT( p_item, spl_qp_svc_t, send_async );\r
1866 \r
1867         /* Process a local MAD send work request. */\r
1868         CL_ASSERT( p_spl_qp_svc->local_mad_wr );\r
1869         status = fwd_local_mad( p_spl_qp_svc, p_spl_qp_svc->local_mad_wr );\r
1870 \r
1871         /*\r
1872          * If we successfully processed a local MAD, which could have changed\r
1873          * something (e.g. the LID) on the HCA.  Scan for changes.\r
1874          */\r
1875         if( status == IB_SUCCESS )\r
1876                 pnp_poll();\r
1877 \r
1878         /*\r
1879          * Clear the local MAD pointer to allow processing of other MADs.\r
1880          * This is done after polling for attribute changes to ensure that\r
1881          * subsequent MADs pick up any changes performed by this one.\r
1882          */\r
1883         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
1884         p_spl_qp_svc->local_mad_wr = NULL;\r
1885         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
1886 \r
1887         /* Continue processing any queued MADs on the QP. */\r
1888         special_qp_resume_sends( p_spl_qp_svc->h_qp );\r
1889 \r
1890         /* No longer in use by the asynchronous processing thread. */\r
1891         cl_atomic_dec( &p_spl_qp_svc->in_use_cnt );\r
1892 \r
1893         AL_EXIT( AL_DBG_SMI );\r
1894 }\r
1895 \r
1896 \r
1897 \r
1898 /*\r
1899  * Special QP send completion callback.\r
1900  */\r
1901 void\r
1902 spl_qp_send_comp_cb(\r
1903         IN              const   ib_cq_handle_t                          h_cq,\r
1904         IN                              void*                                           cq_context )\r
1905 {\r
1906         spl_qp_svc_t*                   p_spl_qp_svc;\r
1907 \r
1908         AL_ENTER( AL_DBG_SMI_CB );\r
1909 \r
1910         CL_ASSERT( cq_context );\r
1911         p_spl_qp_svc = cq_context;\r
1912 \r
1913 #if defined( CL_USE_MUTEX )\r
1914 \r
1915         /* Queue an asynchronous processing item to process sends. */\r
1916         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
1917         if( !p_spl_qp_svc->send_async_queued )\r
1918         {\r
1919                 p_spl_qp_svc->send_async_queued = TRUE;\r
1920                 ref_al_obj( &p_spl_qp_svc->obj );\r
1921                 cl_async_proc_queue( gp_async_proc_mgr, &p_spl_qp_svc->send_async_cb );\r
1922         }\r
1923         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
1924 \r
1925 #else\r
1926 \r
1927         /* Invoke the callback directly. */\r
1928         CL_ASSERT( h_cq == p_spl_qp_svc->h_send_cq );\r
1929         spl_qp_comp( p_spl_qp_svc, h_cq, IB_WC_SEND );\r
1930 \r
1931         /* Continue processing any queued MADs on the QP. */\r
1932         special_qp_resume_sends( p_spl_qp_svc->h_qp );\r
1933 \r
1934 #endif\r
1935 \r
1936         AL_EXIT( AL_DBG_SMI );\r
1937 }\r
1938 \r
1939 \r
1940 \r
1941 #if defined( CL_USE_MUTEX )\r
1942 void\r
1943 spl_qp_send_async_cb(\r
1944         IN                              cl_async_proc_item_t*           p_item )\r
1945 {\r
1946         spl_qp_svc_t*                   p_spl_qp_svc;\r
1947         ib_api_status_t                 status;\r
1948 \r
1949         AL_ENTER( AL_DBG_SMI_CB );\r
1950 \r
1951         CL_ASSERT( p_item );\r
1952         p_spl_qp_svc = PARENT_STRUCT( p_item, spl_qp_svc_t, send_async_cb );\r
1953 \r
1954         /* Reset asynchronous queue flag. */\r
1955         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
1956         p_spl_qp_svc->send_async_queued = FALSE;\r
1957         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
1958 \r
1959         spl_qp_comp( p_spl_qp_svc, p_spl_qp_svc->h_send_cq, IB_WC_SEND );\r
1960 \r
1961         /* Continue processing any queued MADs on the QP. */\r
1962         status = special_qp_resume_sends( p_spl_qp_svc->h_qp );\r
1963         CL_ASSERT( status == IB_SUCCESS );\r
1964 \r
1965         deref_al_obj( &p_spl_qp_svc->obj );\r
1966 \r
1967         AL_EXIT( AL_DBG_SMI );\r
1968 }\r
1969 #endif\r
1970 \r
1971 \r
1972 \r
1973 /*\r
1974  * Special QP receive completion callback.\r
1975  */\r
1976 void\r
1977 spl_qp_recv_comp_cb(\r
1978         IN              const   ib_cq_handle_t                          h_cq,\r
1979         IN                              void*                                           cq_context )\r
1980 {\r
1981         spl_qp_svc_t*                   p_spl_qp_svc;\r
1982 \r
1983         AL_ENTER( AL_DBG_SMI );\r
1984 \r
1985         CL_ASSERT( cq_context );\r
1986         p_spl_qp_svc = cq_context;\r
1987 \r
1988 #if defined( CL_USE_MUTEX )\r
1989 \r
1990         /* Queue an asynchronous processing item to process receives. */\r
1991         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
1992         if( !p_spl_qp_svc->recv_async_queued )\r
1993         {\r
1994                 p_spl_qp_svc->recv_async_queued = TRUE;\r
1995                 ref_al_obj( &p_spl_qp_svc->obj );\r
1996                 cl_async_proc_queue( gp_async_proc_mgr, &p_spl_qp_svc->recv_async_cb );\r
1997         }\r
1998         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
1999 \r
2000 #else\r
2001 \r
2002         CL_ASSERT( h_cq == p_spl_qp_svc->h_recv_cq );\r
2003         spl_qp_comp( p_spl_qp_svc, h_cq, IB_WC_RECV );\r
2004 \r
2005 #endif\r
2006 \r
2007         AL_EXIT( AL_DBG_SMI );\r
2008 }\r
2009 \r
2010 \r
2011 \r
2012 #if defined( CL_USE_MUTEX )\r
2013 void\r
2014 spl_qp_recv_async_cb(\r
2015         IN                              cl_async_proc_item_t*           p_item )\r
2016 {\r
2017         spl_qp_svc_t*                   p_spl_qp_svc;\r
2018 \r
2019         AL_ENTER( AL_DBG_SMI );\r
2020 \r
2021         CL_ASSERT( p_item );\r
2022         p_spl_qp_svc = PARENT_STRUCT( p_item, spl_qp_svc_t, recv_async_cb );\r
2023 \r
2024         /* Reset asynchronous queue flag. */\r
2025         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
2026         p_spl_qp_svc->recv_async_queued = FALSE;\r
2027         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2028 \r
2029         spl_qp_comp( p_spl_qp_svc, p_spl_qp_svc->h_recv_cq, IB_WC_RECV );\r
2030 \r
2031         deref_al_obj( &p_spl_qp_svc->obj );\r
2032 \r
2033         AL_EXIT( AL_DBG_SMI );\r
2034 }\r
2035 #endif\r
2036 \r
2037 \r
2038 \r
2039 /*\r
2040  * Special QP completion handler.\r
2041  */\r
2042 void\r
2043 spl_qp_comp(\r
2044         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
2045         IN              const   ib_cq_handle_t                          h_cq,\r
2046         IN                              ib_wc_type_t                            wc_type )\r
2047 {\r
2048         ib_wc_t                                 wc;\r
2049         ib_wc_t*                                p_free_wc = &wc;\r
2050         ib_wc_t*                                p_done_wc;\r
2051         al_mad_wr_t*                    p_mad_wr;\r
2052         al_mad_element_t*               p_al_mad;\r
2053         ib_mad_element_t*               p_mad_element;\r
2054         ib_smp_t*                               p_smp;\r
2055         ib_api_status_t                 status;\r
2056 \r
2057         AL_ENTER( AL_DBG_SMI_CB );\r
2058 \r
2059         CL_ASSERT( p_spl_qp_svc );\r
2060         CL_ASSERT( h_cq );\r
2061 \r
2062         /* Check the QP state and guard against error handling. */\r
2063         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
2064         if( p_spl_qp_svc->state != SPL_QP_ACTIVE )\r
2065         {\r
2066                 cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2067                 return;\r
2068         }\r
2069         cl_atomic_inc( &p_spl_qp_svc->in_use_cnt );\r
2070         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2071 \r
2072         wc.p_next = NULL;\r
2073         /* Process work completions. */\r
2074         while( ib_poll_cq( h_cq, &p_free_wc, &p_done_wc ) == IB_SUCCESS )\r
2075         {\r
2076                 /* Process completions one at a time. */\r
2077                 CL_ASSERT( p_done_wc );\r
2078 \r
2079                 /* Flushed completions are handled elsewhere. */\r
2080                 if( wc.status == IB_WCS_WR_FLUSHED_ERR )\r
2081                 {\r
2082                         p_free_wc = &wc;\r
2083                         continue;\r
2084                 }\r
2085 \r
2086                 /*\r
2087                  * Process the work completion.  Per IBA specification, the\r
2088                  * wc.wc_type is undefined if wc.status is not IB_WCS_SUCCESS.\r
2089                  * Use the wc_type parameter.\r
2090                  */\r
2091                 switch( wc_type )\r
2092                 {\r
2093                 case IB_WC_SEND:\r
2094                         /* Get a pointer to the MAD work request. */\r
2095                         p_mad_wr = (al_mad_wr_t*)((uintn_t)wc.wr_id);\r
2096 \r
2097                         /* Remove the MAD work request from the service tracking queue. */\r
2098                         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
2099                         cl_qlist_remove_item( &p_spl_qp_svc->send_queue,\r
2100                                 &p_mad_wr->list_item );\r
2101                         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2102 \r
2103                         /* Reset directed route SMPs as required by IBA. */\r
2104                         p_smp = (ib_smp_t*)get_mad_hdr_from_wr( p_mad_wr );\r
2105                         if( p_smp->mgmt_class == IB_MCLASS_SUBN_DIR )\r
2106                         {\r
2107                                 if( ib_smp_is_response( p_smp ) )\r
2108                                         p_smp->hop_ptr++;\r
2109                                 else\r
2110                                         p_smp->hop_ptr--;\r
2111                         }\r
2112 \r
2113                         /* Report the send completion to the dispatcher. */\r
2114                         mad_disp_send_done( p_spl_qp_svc->h_mad_disp, p_mad_wr, &wc );\r
2115                         break;\r
2116 \r
2117                 case IB_WC_RECV:\r
2118 \r
2119                         /* Initialize pointers to the MAD element. */\r
2120                         p_al_mad = (al_mad_element_t*)((uintn_t)wc.wr_id);\r
2121                         p_mad_element = &p_al_mad->element;\r
2122 \r
2123                         /* Remove the AL MAD element from the service tracking list. */\r
2124                         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
2125 \r
2126                         cl_qlist_remove_item( &p_spl_qp_svc->recv_queue,\r
2127                                 &p_al_mad->list_item );\r
2128 \r
2129                         /* Replenish the receive buffer. */\r
2130                         spl_qp_svc_post_recvs( p_spl_qp_svc );\r
2131                         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2132 \r
2133                         /* Construct the MAD element from the receive work completion. */\r
2134                         build_mad_recv( p_mad_element, &wc );\r
2135 \r
2136                         /* Process the received MAD. */\r
2137                         status = process_mad_recv( p_spl_qp_svc, p_mad_element );\r
2138 \r
2139                         /* Discard this MAD on error. */\r
2140                         if( status != IB_SUCCESS )\r
2141                         {\r
2142                                 status = ib_put_mad( p_mad_element );\r
2143                                 CL_ASSERT( status == IB_SUCCESS );\r
2144                         }\r
2145                         break;\r
2146 \r
2147                 default:\r
2148                         CL_ASSERT( wc_type == IB_WC_SEND || wc_type == IB_WC_RECV );\r
2149                         break;\r
2150                 }\r
2151 \r
2152                 if( wc.status != IB_WCS_SUCCESS )\r
2153                 {\r
2154                         AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
2155                                 ("special QP completion error: %s! internal syndrome 0x%I64x\n",\r
2156                                 ib_get_wc_status_str( wc.status ), wc.vendor_specific) );\r
2157 \r
2158                         /* Reset the special QP service and return. */\r
2159                         spl_qp_svc_reset( p_spl_qp_svc );\r
2160                 }\r
2161                 p_free_wc = &wc;\r
2162         }\r
2163 \r
2164         /* Rearm the CQ. */\r
2165         status = ib_rearm_cq( h_cq, FALSE );\r
2166         CL_ASSERT( status == IB_SUCCESS );\r
2167 \r
2168         cl_atomic_dec( &p_spl_qp_svc->in_use_cnt );\r
2169         AL_EXIT( AL_DBG_SMI_CB );\r
2170 }\r
2171 \r
2172 \r
2173 \r
2174 /*\r
2175  * Process a received MAD.\r
2176  */\r
2177 ib_api_status_t\r
2178 process_mad_recv(\r
2179         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
2180         IN                              ib_mad_element_t*                       p_mad_element )\r
2181 {\r
2182         ib_smp_t*                               p_smp;\r
2183         mad_route_t                             route;\r
2184         ib_api_status_t                 status;\r
2185 \r
2186         AL_ENTER( AL_DBG_SMI );\r
2187 \r
2188         CL_ASSERT( p_spl_qp_svc );\r
2189         CL_ASSERT( p_mad_element );\r
2190 \r
2191         /*\r
2192          * If the CA has a HW agent then this MAD should have been\r
2193          * consumed below verbs.  The fact that it was received here\r
2194          * indicates that it should be forwarded to the dispatcher\r
2195          * for delivery to a class manager.  Otherwise, determine how\r
2196          * the MAD should be routed.\r
2197          */\r
2198         route = ROUTE_DISPATCHER;\r
2199         if( check_local_mad( p_spl_qp_svc->h_qp ) )\r
2200         {\r
2201                 /*\r
2202                  * SMP and GMP processing is branched here to handle overlaps\r
2203                  * between class methods and attributes.\r
2204                  */\r
2205                 switch( p_mad_element->p_mad_buf->mgmt_class )\r
2206                 {\r
2207                 case IB_MCLASS_SUBN_DIR:\r
2208                         /* Perform special checks on directed route SMPs. */\r
2209                         p_smp = (ib_smp_t*)p_mad_element->p_mad_buf;\r
2210 \r
2211                         if( ( p_smp->hop_count >= IB_SUBNET_PATH_HOPS_MAX ) ||\r
2212                                 ( p_smp->hop_ptr >= IB_SUBNET_PATH_HOPS_MAX ) )\r
2213                         {\r
2214                                 route = ROUTE_DISCARD;\r
2215                         }\r
2216                         else if( ib_smp_is_response( p_smp ) )\r
2217                         {\r
2218                                 /*\r
2219                                  * This node is the destination of the response.  Discard\r
2220                                  * the source LID or hop pointer are incorrect.\r
2221                                  */\r
2222                                 if( p_smp->dr_slid == IB_LID_PERMISSIVE )\r
2223                                 {\r
2224                                         if( p_smp->hop_ptr == 1 )\r
2225                                         {\r
2226                                                 p_smp->hop_ptr--;               /* Adjust ptr per IBA spec. */\r
2227                                         }\r
2228                                         else\r
2229                                         {\r
2230                                                 route = ROUTE_DISCARD;\r
2231                                         }\r
2232                                 }\r
2233                                 else if( ( p_smp->dr_slid <  p_spl_qp_svc->base_lid ) ||\r
2234                                                  ( p_smp->dr_slid >= p_spl_qp_svc->base_lid +\r
2235                                                         ( 1 << p_spl_qp_svc->lmc ) ) )\r
2236                                 {\r
2237                                                 route = ROUTE_DISCARD;\r
2238                                 }\r
2239                         }\r
2240                         else\r
2241                         {\r
2242                                 /*\r
2243                                  * This node is the destination of the request.  Discard\r
2244                                  * the destination LID or hop pointer are incorrect.\r
2245                                  */\r
2246                                 if( p_smp->dr_dlid == IB_LID_PERMISSIVE )\r
2247                                 {\r
2248                                         if( p_smp->hop_count == p_smp->hop_ptr )\r
2249                                         {\r
2250                                                 p_smp->return_path[ p_smp->hop_ptr++ ] =\r
2251                                                         p_spl_qp_svc->port_num; /* Set path per IBA spec. */\r
2252                                         }\r
2253                                         else\r
2254                                         {\r
2255                                                 route = ROUTE_DISCARD;\r
2256                                         }\r
2257                                 }\r
2258                                 else if( ( p_smp->dr_dlid <  p_spl_qp_svc->base_lid ) ||\r
2259                                                  ( p_smp->dr_dlid >= p_spl_qp_svc->base_lid +\r
2260                                                         ( 1 << p_spl_qp_svc->lmc ) ) )\r
2261                                 {\r
2262                                         route = ROUTE_DISCARD;\r
2263                                 }\r
2264                         }\r
2265 \r
2266                         if( route == ROUTE_DISCARD ) break;\r
2267                         /* else fall through next case */\r
2268 \r
2269                 case IB_MCLASS_SUBN_LID:\r
2270                         route = route_recv_smp( p_mad_element );\r
2271                         break;\r
2272 \r
2273                 case IB_MCLASS_PERF:\r
2274                         /* Process the received GMP. */\r
2275                         switch( p_mad_element->p_mad_buf->method )\r
2276                         {\r
2277                         case IB_MAD_METHOD_GET:\r
2278                         case IB_MAD_METHOD_SET:\r
2279                                 route = ROUTE_LOCAL;\r
2280                                 break;\r
2281                         default:\r
2282                                 break;\r
2283                         }\r
2284                         break;\r
2285 \r
2286                 case IB_MCLASS_BM:\r
2287                         route = route_recv_gmp( p_mad_element );\r
2288                         break;\r
2289 \r
2290                 case IB_MCLASS_SUBN_ADM:\r
2291                 case IB_MCLASS_DEV_MGMT:\r
2292                 case IB_MCLASS_COMM_MGMT:\r
2293                 case IB_MCLASS_SNMP:\r
2294                         break;\r
2295 \r
2296                 default:\r
2297                         /* Route vendor specific MADs to the HCA provider. */\r
2298                         if( ib_class_is_vendor_specific(\r
2299                                 p_mad_element->p_mad_buf->mgmt_class ) )\r
2300                         {\r
2301                                 route = route_recv_gmp( p_mad_element );\r
2302                         }\r
2303                         break;\r
2304                 }\r
2305         }\r
2306 \r
2307         /* Route the MAD. */\r
2308         if( is_discard( route ) )\r
2309                 status = IB_ERROR;\r
2310         else if( is_dispatcher( route ) )\r
2311                 status = mad_disp_recv_done( p_spl_qp_svc->h_mad_disp, p_mad_element );\r
2312         else if( is_remote( route ) )\r
2313                 status = forward_sm_trap( p_spl_qp_svc, p_mad_element );\r
2314         else\r
2315                 status = recv_local_mad( p_spl_qp_svc, p_mad_element );\r
2316 \r
2317         AL_EXIT( AL_DBG_SMI );\r
2318         return status;\r
2319 }\r
2320 \r
2321 \r
2322 \r
2323 /*\r
2324  * Route a received SMP.\r
2325  */\r
2326 mad_route_t\r
2327 route_recv_smp(\r
2328         IN                              ib_mad_element_t*                       p_mad_element )\r
2329 {\r
2330         mad_route_t                             route;\r
2331 \r
2332         AL_ENTER( AL_DBG_SMI );\r
2333 \r
2334         CL_ASSERT( p_mad_element );\r
2335 \r
2336         /* Process the received SMP. */\r
2337         switch( p_mad_element->p_mad_buf->method )\r
2338         {\r
2339         case IB_MAD_METHOD_GET:\r
2340         case IB_MAD_METHOD_SET:\r
2341                 route = route_recv_smp_attr( p_mad_element );\r
2342                 break;\r
2343 \r
2344         case IB_MAD_METHOD_TRAP:\r
2345                 /*\r
2346                  * Special check to route locally generated traps to the remote SM.\r
2347                  * Distinguished from other receives by the p_wc->recv.ud.recv_opt\r
2348                  * IB_RECV_OPT_FORWARD flag.\r
2349                  *\r
2350                  * Note that because forwarded traps use AL MAD services, the upper\r
2351                  * 32-bits of the TID are reserved by the access layer.  When matching\r
2352                  * a Trap Repress MAD, the SMA must only use the lower 32-bits of the\r
2353                  * TID.\r
2354                  */\r
2355                 route = ( p_mad_element->recv_opt & IB_RECV_OPT_FORWARD ) ?\r
2356                         ROUTE_REMOTE : ROUTE_DISPATCHER;\r
2357                 break;\r
2358 \r
2359         case IB_MAD_METHOD_TRAP_REPRESS:\r
2360                 /*\r
2361                  * Note that because forwarded traps use AL MAD services, the upper\r
2362                  * 32-bits of the TID are reserved by the access layer.  When matching\r
2363                  * a Trap Repress MAD, the SMA must only use the lower 32-bits of the\r
2364                  * TID.\r
2365                  */\r
2366                 route = ROUTE_LOCAL;\r
2367                 break;\r
2368 \r
2369         default:\r
2370                 route = ROUTE_DISPATCHER;\r
2371                 break;\r
2372         }\r
2373 \r
2374         AL_EXIT( AL_DBG_SMI );\r
2375         return route;\r
2376 }\r
2377 \r
2378 \r
2379 \r
2380 /*\r
2381  * Route received SMP attributes.\r
2382  */\r
2383 mad_route_t\r
2384 route_recv_smp_attr(\r
2385         IN                              ib_mad_element_t*                       p_mad_element )\r
2386 {\r
2387         mad_route_t                             route;\r
2388 \r
2389         AL_ENTER( AL_DBG_SMI );\r
2390 \r
2391         CL_ASSERT( p_mad_element );\r
2392 \r
2393         /* Process the received SMP attributes. */\r
2394         switch( p_mad_element->p_mad_buf->attr_id )\r
2395         {\r
2396         case IB_MAD_ATTR_NODE_DESC:\r
2397         case IB_MAD_ATTR_NODE_INFO:\r
2398         case IB_MAD_ATTR_GUID_INFO:\r
2399         case IB_MAD_ATTR_PORT_INFO:\r
2400         case IB_MAD_ATTR_P_KEY_TABLE:\r
2401         case IB_MAD_ATTR_SLVL_TABLE:\r
2402         case IB_MAD_ATTR_VL_ARBITRATION:\r
2403         case IB_MAD_ATTR_VENDOR_DIAG:\r
2404         case IB_MAD_ATTR_LED_INFO:\r
2405                 route = ROUTE_LOCAL;\r
2406                 break;\r
2407 \r
2408         default:\r
2409                 route = ROUTE_DISPATCHER;\r
2410                 break;\r
2411         }\r
2412 \r
2413         AL_EXIT( AL_DBG_SMI );\r
2414         return route;\r
2415 }\r
2416 \r
2417 \r
2418 /*\r
2419  * Route a received GMP.\r
2420  */\r
2421 mad_route_t\r
2422 route_recv_gmp(\r
2423         IN                              ib_mad_element_t*                       p_mad_element )\r
2424 {\r
2425         mad_route_t                             route;\r
2426 \r
2427         AL_ENTER( AL_DBG_SMI );\r
2428 \r
2429         CL_ASSERT( p_mad_element );\r
2430 \r
2431         /* Process the received GMP. */\r
2432         switch( p_mad_element->p_mad_buf->method )\r
2433         {\r
2434         case IB_MAD_METHOD_GET:\r
2435         case IB_MAD_METHOD_SET:\r
2436                 /* Route vendor specific MADs to the HCA provider. */\r
2437                 if( ib_class_is_vendor_specific(\r
2438                         p_mad_element->p_mad_buf->mgmt_class ) )\r
2439                 {\r
2440                         route = ROUTE_LOCAL;\r
2441                 }\r
2442                 else\r
2443                 {\r
2444                         route = route_recv_gmp_attr( p_mad_element );\r
2445                 }\r
2446                 break;\r
2447 \r
2448         default:\r
2449                 route = ROUTE_DISPATCHER;\r
2450                 break;\r
2451         }\r
2452 \r
2453         AL_EXIT( AL_DBG_SMI );\r
2454         return route;\r
2455 }\r
2456 \r
2457 \r
2458 \r
2459 /*\r
2460  * Route received GMP attributes.\r
2461  */\r
2462 mad_route_t\r
2463 route_recv_gmp_attr(\r
2464         IN                              ib_mad_element_t*                       p_mad_element )\r
2465 {\r
2466         mad_route_t                             route;\r
2467 \r
2468         AL_ENTER( AL_DBG_SMI );\r
2469 \r
2470         CL_ASSERT( p_mad_element );\r
2471 \r
2472         /* Process the received GMP attributes. */\r
2473         if( p_mad_element->p_mad_buf->attr_id == IB_MAD_ATTR_CLASS_PORT_INFO )\r
2474                 route = ROUTE_LOCAL;\r
2475         else\r
2476                 route = ROUTE_DISPATCHER;\r
2477 \r
2478         AL_EXIT( AL_DBG_SMI );\r
2479         return route;\r
2480 }\r
2481 \r
2482 \r
2483 \r
2484 /*\r
2485  * Forward a locally generated Subnet Management trap.\r
2486  */\r
2487 ib_api_status_t\r
2488 forward_sm_trap(\r
2489         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
2490         IN                              ib_mad_element_t*                       p_mad_element )\r
2491 {\r
2492         ib_av_attr_t                    av_attr;\r
2493         ib_api_status_t                 status;\r
2494 \r
2495         AL_ENTER( AL_DBG_SMI_CB );\r
2496 \r
2497         CL_ASSERT( p_spl_qp_svc );\r
2498         CL_ASSERT( p_mad_element );\r
2499 \r
2500         /* Check the SMP class. */\r
2501         if( p_mad_element->p_mad_buf->mgmt_class != IB_MCLASS_SUBN_LID )\r
2502         {\r
2503                 /*\r
2504                  * Per IBA Specification Release 1.1 Section 14.2.2.1,\r
2505                  * "C14-5: Only a SM shall originate a directed route SMP."\r
2506                  * Therefore all traps should be LID routed; drop this one.\r
2507                  */\r
2508                 AL_EXIT( AL_DBG_SMI_CB );\r
2509                 return IB_ERROR;\r
2510         }\r
2511 \r
2512         /* Create an address vector for the SM. */\r
2513         cl_memclr( &av_attr, sizeof( ib_av_attr_t ) );\r
2514         av_attr.port_num = p_spl_qp_svc->port_num;\r
2515         av_attr.sl = p_mad_element->remote_sl;\r
2516         av_attr.dlid = p_mad_element->remote_lid;\r
2517         if( p_mad_element->grh_valid )\r
2518         {\r
2519                 cl_memcpy( &av_attr.grh, p_mad_element->p_grh, sizeof( ib_grh_t ) );\r
2520                 av_attr.grh.src_gid      = p_mad_element->p_grh->dest_gid;\r
2521                 av_attr.grh.dest_gid = p_mad_element->p_grh->src_gid;\r
2522                 av_attr.grh_valid = TRUE;\r
2523         }\r
2524 \r
2525         status = ib_create_av( p_spl_qp_svc->h_qp->obj.p_ci_ca->h_pd_alias,\r
2526                 &av_attr, &p_mad_element->h_av );\r
2527 \r
2528         if( status != IB_SUCCESS )\r
2529         {\r
2530                 AL_EXIT( AL_DBG_SMI_CB );\r
2531                 return status;\r
2532         }\r
2533 \r
2534         /* Complete the initialization of the MAD element. */\r
2535         p_mad_element->p_next = NULL;\r
2536         p_mad_element->remote_qkey = IB_QP_PRIVILEGED_Q_KEY;\r
2537         p_mad_element->resp_expected = FALSE;\r
2538 \r
2539         /* Clear context1 for proper send completion callback processing. */\r
2540         p_mad_element->context1 = NULL;\r
2541 \r
2542         /*\r
2543          * Forward the trap.  Note that because forwarded traps use AL MAD\r
2544          * services, the upper 32-bits of the TID are reserved by the access\r
2545          * layer.  When matching a Trap Repress MAD, the SMA must only use\r
2546          * the lower 32-bits of the TID.\r
2547          */\r
2548         status = ib_send_mad( p_spl_qp_svc->h_mad_svc, p_mad_element, NULL );\r
2549 \r
2550         if( status != IB_SUCCESS )\r
2551                 ib_destroy_av( p_mad_element->h_av );\r
2552 \r
2553         AL_EXIT( AL_DBG_SMI_CB );\r
2554         return status;\r
2555 }\r
2556 \r
2557 \r
2558 /*\r
2559  * Process a locally routed MAD received from the special QP.\r
2560  */\r
2561 ib_api_status_t\r
2562 recv_local_mad(\r
2563         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
2564         IN                              ib_mad_element_t*                       p_mad_request )\r
2565 {\r
2566         ib_mad_t*                               p_mad_hdr;\r
2567         ib_api_status_t                 status;\r
2568 \r
2569         AL_ENTER( AL_DBG_SMI_CB );\r
2570 \r
2571         CL_ASSERT( p_spl_qp_svc );\r
2572         CL_ASSERT( p_mad_request );\r
2573 \r
2574         /* Initialize the MAD element. */\r
2575         p_mad_hdr = ib_get_mad_buf( p_mad_request );\r
2576         p_mad_request->context1 = p_mad_request;\r
2577 \r
2578         /* Save the TID. */\r
2579         p_mad_request->context2 =\r
2580                 (void*)(uintn_t)al_get_al_tid( p_mad_hdr->trans_id );\r
2581 /*\r
2582  * Disable warning about passing unaligned 64-bit value.\r
2583  * The value is always aligned given how buffers are allocated\r
2584  * and given the layout of a MAD.\r
2585  */\r
2586 #pragma warning( push, 3 )\r
2587         al_set_al_tid( &p_mad_hdr->trans_id, 0 );\r
2588 #pragma warning( pop )\r
2589 \r
2590         /*\r
2591          * We need to get a response from the local HCA to this MAD only if this\r
2592          * MAD is not itself a response.\r
2593          */\r
2594         p_mad_request->resp_expected = !( ib_mad_is_response( p_mad_hdr ) ||\r
2595                 ( p_mad_hdr->method == IB_MAD_METHOD_TRAP_REPRESS ) );\r
2596         p_mad_request->timeout_ms = LOCAL_MAD_TIMEOUT;\r
2597         p_mad_request->send_opt = IB_SEND_OPT_LOCAL;\r
2598 \r
2599         /* Send the locally addressed MAD request to the CA for processing. */\r
2600         status = ib_send_mad( p_spl_qp_svc->h_mad_svc, p_mad_request, NULL );\r
2601 \r
2602         AL_EXIT( AL_DBG_SMI_CB );\r
2603         return status;\r
2604 }\r
2605 \r
2606 \r
2607 \r
2608 /*\r
2609  * Special QP alias send completion callback.\r
2610  */\r
2611 void\r
2612 spl_qp_alias_send_cb(\r
2613         IN                              ib_mad_svc_handle_t                     h_mad_svc,\r
2614         IN                              void*                                           mad_svc_context,\r
2615         IN                              ib_mad_element_t*                       p_mad_element )\r
2616 {\r
2617         ib_api_status_t                 status;\r
2618 \r
2619         AL_ENTER( AL_DBG_SMI_CB );\r
2620 \r
2621         UNUSED_PARAM( h_mad_svc );\r
2622         UNUSED_PARAM( mad_svc_context );\r
2623         CL_ASSERT( p_mad_element );\r
2624 \r
2625         if( p_mad_element->h_av )\r
2626         {\r
2627                 status = ib_destroy_av( p_mad_element->h_av );\r
2628                 CL_ASSERT( status == IB_SUCCESS );\r
2629         }\r
2630 \r
2631         status = ib_put_mad( p_mad_element );\r
2632         CL_ASSERT( status == IB_SUCCESS );\r
2633 \r
2634         AL_EXIT( AL_DBG_SMI_CB );\r
2635 }\r
2636 \r
2637 \r
2638 \r
2639 /*\r
2640  * Special QP alias receive completion callback.\r
2641  */\r
2642 void\r
2643 spl_qp_alias_recv_cb(\r
2644         IN                              ib_mad_svc_handle_t                     h_mad_svc,\r
2645         IN                              void*                                           mad_svc_context,\r
2646         IN                              ib_mad_element_t*                       p_mad_response )\r
2647 {\r
2648         spl_qp_svc_t*                   p_spl_qp_svc;\r
2649         ib_mad_element_t*               p_mad_request;\r
2650         ib_mad_t*                               p_mad_hdr;\r
2651         ib_av_attr_t                    av_attr;\r
2652         ib_api_status_t                 status;\r
2653 \r
2654         AL_ENTER( AL_DBG_SMI_CB );\r
2655 \r
2656         CL_ASSERT( mad_svc_context );\r
2657         CL_ASSERT( p_mad_response );\r
2658         CL_ASSERT( p_mad_response->send_context1 );\r
2659 \r
2660         /* Initialize pointers. */\r
2661         p_spl_qp_svc = mad_svc_context;\r
2662         p_mad_request = p_mad_response->send_context1;\r
2663         p_mad_hdr = ib_get_mad_buf( p_mad_response );\r
2664 \r
2665         /* Restore the TID, so it will match on the remote side. */\r
2666 #pragma warning( push, 3 )\r
2667         al_set_al_tid( &p_mad_hdr->trans_id,\r
2668                 (uint32_t)(uintn_t)p_mad_response->send_context2 );\r
2669 #pragma warning( pop )\r
2670 \r
2671         /* Set the remote QP. */\r
2672         p_mad_response->remote_qp       = p_mad_request->remote_qp;\r
2673         p_mad_response->remote_qkey = p_mad_request->remote_qkey;\r
2674 \r
2675         /* Prepare to create an address vector. */\r
2676         cl_memclr( &av_attr, sizeof( ib_av_attr_t ) );\r
2677         av_attr.port_num        = p_spl_qp_svc->port_num;\r
2678         av_attr.sl                      = p_mad_request->remote_sl;\r
2679         av_attr.static_rate = IB_PATH_RECORD_RATE_10_GBS;\r
2680         av_attr.path_bits       = p_mad_request->path_bits;\r
2681         if( p_mad_request->grh_valid )\r
2682         {\r
2683                 cl_memcpy( &av_attr.grh, p_mad_request->p_grh, sizeof( ib_grh_t ) );\r
2684                 av_attr.grh.src_gid      = p_mad_request->p_grh->dest_gid;\r
2685                 av_attr.grh.dest_gid = p_mad_request->p_grh->src_gid;\r
2686                 av_attr.grh_valid = TRUE;\r
2687         }\r
2688         if( ( p_mad_hdr->mgmt_class == IB_MCLASS_SUBN_DIR ) &&\r
2689                 ( ((ib_smp_t *)p_mad_hdr)->dr_dlid == IB_LID_PERMISSIVE ) )\r
2690                 av_attr.dlid = IB_LID_PERMISSIVE;\r
2691         else\r
2692                 av_attr.dlid = p_mad_request->remote_lid;\r
2693 \r
2694         /* Create an address vector. */\r
2695         status = ib_create_av( p_spl_qp_svc->h_qp->obj.p_ci_ca->h_pd_alias,\r
2696                 &av_attr, &p_mad_response->h_av );\r
2697 \r
2698         if( status != IB_SUCCESS )\r
2699         {\r
2700                 ib_put_mad( p_mad_response );\r
2701 \r
2702                 AL_EXIT( AL_DBG_SMI );\r
2703                 return;\r
2704         }\r
2705 \r
2706         /* Send the response. */\r
2707         status = ib_send_mad( h_mad_svc, p_mad_response, NULL );\r
2708 \r
2709         if( status != IB_SUCCESS )\r
2710         {\r
2711                 ib_destroy_av( p_mad_response->h_av );\r
2712                 ib_put_mad( p_mad_response );\r
2713         }\r
2714 \r
2715         AL_EXIT( AL_DBG_SMI_CB );\r
2716 }\r
2717 \r
2718 \r
2719 \r
2720 /*\r
2721  * Post receive buffers to a special QP.\r
2722  */\r
2723 static ib_api_status_t\r
2724 spl_qp_svc_post_recvs(\r
2725         IN                              spl_qp_svc_t*   const           p_spl_qp_svc )\r
2726 {\r
2727         ib_mad_element_t*               p_mad_element;\r
2728         al_mad_element_t*               p_al_element;\r
2729         ib_recv_wr_t                    recv_wr;\r
2730         ib_api_status_t                 status = IB_SUCCESS;\r
2731 \r
2732         /* Attempt to post receive buffers up to the max_qp_depth limit. */\r
2733         while( cl_qlist_count( &p_spl_qp_svc->recv_queue ) <\r
2734                 (int32_t)p_spl_qp_svc->max_qp_depth )\r
2735         {\r
2736                 /* Get a MAD element from the pool. */\r
2737                 status = ib_get_mad( p_spl_qp_svc->obj.p_ci_ca->pool_key,\r
2738                         MAD_BLOCK_SIZE, &p_mad_element );\r
2739 \r
2740                 if( status != IB_SUCCESS ) break;\r
2741 \r
2742                 p_al_element = PARENT_STRUCT( p_mad_element, al_mad_element_t,\r
2743                         element );\r
2744 \r
2745                 /* Build the receive work request. */\r
2746                 recv_wr.p_next   = NULL;\r
2747                 recv_wr.wr_id    = (uintn_t)p_al_element;\r
2748                 recv_wr.num_ds = 1;\r
2749                 recv_wr.ds_array = &p_al_element->grh_ds;\r
2750 \r
2751                 /* Queue the receive on the service tracking list. */\r
2752                 cl_qlist_insert_tail( &p_spl_qp_svc->recv_queue,\r
2753                         &p_al_element->list_item );\r
2754 \r
2755                 /* Post the receive. */\r
2756                 status = ib_post_recv( p_spl_qp_svc->h_qp, &recv_wr, NULL );\r
2757 \r
2758                 if( status != IB_SUCCESS )\r
2759                 {\r
2760                         AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
2761                                 ("Failed to post receive %016I64x\n",\r
2762                                 (LONG_PTR)p_al_element) );\r
2763                         cl_qlist_remove_item( &p_spl_qp_svc->recv_queue,\r
2764                                 &p_al_element->list_item );\r
2765 \r
2766                         ib_put_mad( p_mad_element );\r
2767                         break;\r
2768                 }\r
2769         }\r
2770 \r
2771         return status;\r
2772 }\r
2773 \r
2774 \r
2775 \r
2776 /*\r
2777  * Special QP service asynchronous event callback.\r
2778  */\r
2779 void\r
2780 spl_qp_svc_event_cb(\r
2781         IN                              ib_async_event_rec_t            *p_event_rec )\r
2782 {\r
2783         spl_qp_svc_t*                   p_spl_qp_svc;\r
2784 \r
2785         AL_ENTER( AL_DBG_SMI_CB );\r
2786 \r
2787         CL_ASSERT( p_event_rec );\r
2788         CL_ASSERT( p_event_rec->context );\r
2789 \r
2790         if( p_event_rec->code == IB_AE_SQ_DRAINED )\r
2791         {\r
2792                 AL_EXIT( AL_DBG_SMI );\r
2793                 return;\r
2794         }\r
2795 \r
2796         p_spl_qp_svc = p_event_rec->context;\r
2797 \r
2798         spl_qp_svc_reset( p_spl_qp_svc );\r
2799 \r
2800         AL_EXIT( AL_DBG_SMI_CB );\r
2801 }\r
2802 \r
2803 \r
2804 \r
2805 /*\r
2806  * Special QP service reset.\r
2807  */\r
2808 void\r
2809 spl_qp_svc_reset(\r
2810         IN                              spl_qp_svc_t*                           p_spl_qp_svc )\r
2811 {\r
2812         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
2813 \r
2814         if( p_spl_qp_svc->state != SPL_QP_ACTIVE )\r
2815         {\r
2816                 cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2817                 return;\r
2818         }\r
2819 \r
2820         /* Change the special QP service to the error state. */\r
2821         p_spl_qp_svc->state = SPL_QP_ERROR;\r
2822 \r
2823         /* Flag the service as in use by the asynchronous processing thread. */\r
2824         cl_atomic_inc( &p_spl_qp_svc->in_use_cnt );\r
2825 \r
2826         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2827 \r
2828         /* Queue an asynchronous processing item to reset the special QP. */\r
2829         cl_async_proc_queue( gp_async_proc_mgr, &p_spl_qp_svc->reset_async );\r
2830 }\r
2831 \r
2832 \r
2833 \r
2834 /*\r
2835  * Asynchronous processing thread callback to reset the special QP service.\r
2836  */\r
2837 void\r
2838 spl_qp_svc_reset_cb(\r
2839         IN                              cl_async_proc_item_t*           p_item )\r
2840 {\r
2841         spl_qp_svc_t*                   p_spl_qp_svc;\r
2842         cl_list_item_t*                 p_list_item;\r
2843         ib_wc_t                                 wc;\r
2844         ib_wc_t*                                p_free_wc;\r
2845         ib_wc_t*                                p_done_wc;\r
2846         al_mad_wr_t*                    p_mad_wr;\r
2847         al_mad_element_t*               p_al_mad;\r
2848         ib_qp_mod_t                             qp_mod;\r
2849         ib_api_status_t                 status;\r
2850         cl_qlist_t                              mad_wr_list;\r
2851 \r
2852         AL_ENTER( AL_DBG_SMI_CB );\r
2853 \r
2854         CL_ASSERT( p_item );\r
2855         p_spl_qp_svc = PARENT_STRUCT( p_item, spl_qp_svc_t, reset_async );\r
2856 \r
2857         /* Wait here until the special QP service is only in use by this thread. */\r
2858         while( p_spl_qp_svc->in_use_cnt != 1 )\r
2859         {\r
2860                 cl_thread_suspend( 0 );\r
2861         }\r
2862 \r
2863         /* Change the QP to the RESET state. */\r
2864         cl_memclr( &qp_mod, sizeof( ib_qp_mod_t ) );\r
2865         qp_mod.req_state = IB_QPS_RESET;\r
2866 \r
2867         status = ib_modify_qp( p_spl_qp_svc->h_qp, &qp_mod );\r
2868         CL_ASSERT( status == IB_SUCCESS );\r
2869 \r
2870         /* Return receive MAD elements to the pool. */\r
2871         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
2872         for( p_list_item = cl_qlist_remove_head( &p_spl_qp_svc->recv_queue );\r
2873                  p_list_item != cl_qlist_end( &p_spl_qp_svc->recv_queue );\r
2874                  p_list_item = cl_qlist_remove_head( &p_spl_qp_svc->recv_queue ) )\r
2875         {\r
2876                 p_al_mad = PARENT_STRUCT( p_list_item, al_mad_element_t, list_item );\r
2877 \r
2878                 status = ib_put_mad( &p_al_mad->element );\r
2879                 CL_ASSERT( status == IB_SUCCESS );\r
2880         }\r
2881         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2882 \r
2883         /* Re-initialize the QP. */\r
2884         status = ib_init_dgrm_svc( p_spl_qp_svc->h_qp, NULL );\r
2885         CL_ASSERT( status == IB_SUCCESS );\r
2886 \r
2887         /* Poll to remove any remaining send completions from the CQ. */\r
2888         do\r
2889         {\r
2890                 cl_memclr( &wc, sizeof( ib_wc_t ) );\r
2891                 p_free_wc = &wc;\r
2892                 status = ib_poll_cq( p_spl_qp_svc->h_send_cq, &p_free_wc, &p_done_wc );\r
2893 \r
2894         } while( status == IB_SUCCESS );\r
2895 \r
2896         /* Post receive buffers. */\r
2897         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
2898         spl_qp_svc_post_recvs( p_spl_qp_svc );\r
2899 \r
2900         /* Re-queue any outstanding MAD send operations. */\r
2901         cl_qlist_init( &mad_wr_list );\r
2902         cl_qlist_insert_list_tail( &mad_wr_list, &p_spl_qp_svc->send_queue );\r
2903         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2904 \r
2905         for( p_list_item = cl_qlist_remove_head( &mad_wr_list );\r
2906                  p_list_item != cl_qlist_end( &mad_wr_list );\r
2907                  p_list_item = cl_qlist_remove_head( &mad_wr_list ) )\r
2908         {\r
2909                 p_mad_wr = PARENT_STRUCT( p_list_item, al_mad_wr_t, list_item );\r
2910                 special_qp_queue_mad( p_spl_qp_svc->h_qp, p_mad_wr );\r
2911         }\r
2912 \r
2913         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
2914         if( p_spl_qp_svc->state == SPL_QP_ERROR )\r
2915         {\r
2916                 /* The QP is ready.  Change the state. */\r
2917                 p_spl_qp_svc->state = SPL_QP_ACTIVE;\r
2918                 cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2919 \r
2920                 /* Re-arm the CQs. */\r
2921                 status = ib_rearm_cq( p_spl_qp_svc->h_recv_cq, FALSE );\r
2922                 CL_ASSERT( status == IB_SUCCESS );\r
2923                 status = ib_rearm_cq( p_spl_qp_svc->h_send_cq, FALSE );\r
2924                 CL_ASSERT( status == IB_SUCCESS );\r
2925 \r
2926                 /* Resume send processing. */\r
2927                 special_qp_resume_sends( p_spl_qp_svc->h_qp );\r
2928         }\r
2929         else\r
2930         {\r
2931                 cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2932         }\r
2933 \r
2934         /* No longer in use by the asynchronous processing thread. */\r
2935         cl_atomic_dec( &p_spl_qp_svc->in_use_cnt );\r
2936 \r
2937         AL_EXIT( AL_DBG_SMI_CB );\r
2938 }\r
2939 \r
2940 \r
2941 \r
2942 /*\r
2943  * Special QP alias asynchronous event callback.\r
2944  */\r
2945 void\r
2946 spl_qp_alias_event_cb(\r
2947         IN                              ib_async_event_rec_t            *p_event_rec )\r
2948 {\r
2949         UNUSED_PARAM( p_event_rec );\r
2950 }\r
2951 \r
2952 \r
2953 \r
2954 /*\r
2955  * Acquire the SMI dispatcher for the given port.\r
2956  */\r
2957 ib_api_status_t\r
2958 acquire_smi_disp(\r
2959         IN              const   ib_net64_t                                      port_guid,\r
2960                 OUT                     al_mad_disp_handle_t* const     ph_mad_disp )\r
2961 {\r
2962         CL_ASSERT( gp_spl_qp_mgr );\r
2963         return acquire_svc_disp( &gp_spl_qp_mgr->smi_map, port_guid, ph_mad_disp );\r
2964 }\r
2965 \r
2966 \r
2967 \r
2968 /*\r
2969  * Acquire the GSI dispatcher for the given port.\r
2970  */\r
2971 ib_api_status_t\r
2972 acquire_gsi_disp(\r
2973         IN              const   ib_net64_t                                      port_guid,\r
2974                 OUT                     al_mad_disp_handle_t* const     ph_mad_disp )\r
2975 {\r
2976         CL_ASSERT( gp_spl_qp_mgr );\r
2977         return acquire_svc_disp( &gp_spl_qp_mgr->gsi_map, port_guid, ph_mad_disp );\r
2978 }\r
2979 \r
2980 \r
2981 \r
2982 /*\r
2983  * Acquire the service dispatcher for the given port.\r
2984  */\r
2985 ib_api_status_t\r
2986 acquire_svc_disp(\r
2987         IN              const   cl_qmap_t* const                        p_svc_map,\r
2988         IN              const   ib_net64_t                                      port_guid,\r
2989                 OUT                     al_mad_disp_handle_t            *ph_mad_disp )\r
2990 {\r
2991         cl_map_item_t*                  p_svc_item;\r
2992         spl_qp_svc_t*                   p_spl_qp_svc;\r
2993 \r
2994         AL_ENTER( AL_DBG_SMI );\r
2995 \r
2996         CL_ASSERT( p_svc_map );\r
2997         CL_ASSERT( gp_spl_qp_mgr );\r
2998 \r
2999         /* Search for the SMI or GSI service for the given port. */\r
3000         cl_spinlock_acquire( &gp_spl_qp_mgr->obj.lock );\r
3001         p_svc_item = cl_qmap_get( p_svc_map, port_guid );\r
3002         cl_spinlock_release( &gp_spl_qp_mgr->obj.lock );\r
3003         if( p_svc_item == cl_qmap_end( p_svc_map ) )\r
3004         {\r
3005                 /* The port does not have an active agent. */\r
3006                 AL_EXIT( AL_DBG_SMI );\r
3007                 return IB_INVALID_GUID;\r
3008         }\r
3009 \r
3010         p_spl_qp_svc = PARENT_STRUCT( p_svc_item, spl_qp_svc_t, map_item );\r
3011 \r
3012         /* Found a match.  Get MAD dispatcher handle. */\r
3013         *ph_mad_disp = p_spl_qp_svc->h_mad_disp;\r
3014 \r
3015         /* Reference the MAD dispatcher on behalf of the client. */\r
3016         ref_al_obj( &p_spl_qp_svc->h_mad_disp->obj );\r
3017 \r
3018         AL_EXIT( AL_DBG_SMI );\r
3019         return IB_SUCCESS;\r
3020 }\r
3021 \r
3022 \r
3023 \r
3024 /*\r
3025  * Force a poll for CA attribute changes.\r
3026  */\r
3027 void\r
3028 force_smi_poll(\r
3029         void )\r
3030 {\r
3031         AL_ENTER( AL_DBG_SMI_CB );\r
3032 \r
3033         /*\r
3034          * Stop the poll timer.  Just invoke the timer callback directly to\r
3035          * save the thread context switching.\r
3036          */\r
3037         smi_poll_timer_cb( gp_spl_qp_mgr );\r
3038 \r
3039         AL_EXIT( AL_DBG_SMI_CB );\r
3040 }\r
3041 \r
3042 \r
3043 \r
3044 /*\r
3045  * Poll for CA port attribute changes.\r
3046  */\r
3047 void\r
3048 smi_poll_timer_cb(\r
3049         IN                              void*                                           context )\r
3050 {\r
3051         cl_status_t                     cl_status;\r
3052 \r
3053         AL_ENTER( AL_DBG_SMI_CB );\r
3054 \r
3055         CL_ASSERT( context );\r
3056         CL_ASSERT( gp_spl_qp_mgr == context );\r
3057         UNUSED_PARAM( context );\r
3058 \r
3059         /*\r
3060          * Scan for changes on the local HCAs.  Since the PnP manager has its\r
3061          * own thread for processing changes, we kick off that thread in parallel\r
3062          * reposting receive buffers to the SQP agents.\r
3063          */\r
3064         pnp_poll();\r
3065 \r
3066         /*\r
3067          * To handle the case where force_smi_poll is called at the same time\r
3068          * the timer expires, check if the asynchronous processing item is in\r
3069          * use.  If it is already in use, it means that we're about to poll\r
3070          * anyway, so just ignore this call.\r
3071          */\r
3072         cl_spinlock_acquire( &gp_spl_qp_mgr->obj.lock );\r
3073 \r
3074         /* Perform port processing on the special QP agents. */\r
3075         cl_qlist_apply_func( &gp_spl_qp_mgr->obj.obj_list, smi_post_recvs,\r
3076                 gp_spl_qp_mgr );\r
3077 \r
3078         /* Determine if there are any special QP agents to poll. */\r
3079         if( !cl_is_qlist_empty( &gp_spl_qp_mgr->obj.obj_list ) && g_smi_poll_interval )\r
3080         {\r
3081                 /* Restart the polling timer. */\r
3082                 cl_status =\r
3083                         cl_timer_start( &gp_spl_qp_mgr->poll_timer, g_smi_poll_interval );\r
3084                 CL_ASSERT( cl_status == CL_SUCCESS );\r
3085         }\r
3086         cl_spinlock_release( &gp_spl_qp_mgr->obj.lock );\r
3087 \r
3088         AL_EXIT( AL_DBG_SMI_CB );\r
3089 }\r
3090 \r
3091 \r
3092 \r
3093 /*\r
3094  * Post receive buffers to a special QP.\r
3095  */\r
3096 void\r
3097 smi_post_recvs(\r
3098         IN                              cl_list_item_t* const           p_list_item,\r
3099         IN                              void*                                           context )\r
3100 {\r
3101         al_obj_t*                               p_obj;\r
3102         spl_qp_svc_t*                   p_spl_qp_svc;\r
3103 \r
3104         AL_ENTER( AL_DBG_SMI_CB );\r
3105 \r
3106         CL_ASSERT( p_list_item );\r
3107         UNUSED_PARAM( context );\r
3108 \r
3109         p_obj = PARENT_STRUCT( p_list_item, al_obj_t, pool_item );\r
3110         p_spl_qp_svc = PARENT_STRUCT( p_obj, spl_qp_svc_t, obj );\r
3111 \r
3112         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
3113         if( p_spl_qp_svc->state != SPL_QP_ACTIVE )\r
3114         {\r
3115                 cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
3116                 return;\r
3117         }\r
3118 \r
3119         spl_qp_svc_post_recvs( p_spl_qp_svc );\r
3120         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
3121 \r
3122         AL_EXIT( AL_DBG_SMI );\r
3123 }\r