[IBAL] Print internal syndrome for MAD completion errors.
[mirror/winof/.git] / core / al / kernel / al_smi.c
1 /*\r
2  * Copyright (c) 2005 SilverStorm Technologies.  All rights reserved.\r
3  * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. \r
4  *\r
5  * This software is available to you under the OpenIB.org BSD license\r
6  * below:\r
7  *\r
8  *     Redistribution and use in source and binary forms, with or\r
9  *     without modification, are permitted provided that the following\r
10  *     conditions are met:\r
11  *\r
12  *      - Redistributions of source code must retain the above\r
13  *        copyright notice, this list of conditions and the following\r
14  *        disclaimer.\r
15  *\r
16  *      - Redistributions in binary form must reproduce the above\r
17  *        copyright notice, this list of conditions and the following\r
18  *        disclaimer in the documentation and/or other materials\r
19  *        provided with the distribution.\r
20  *\r
21  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
22  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
23  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
24  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
25  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
26  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
27  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
28  * SOFTWARE.\r
29  *\r
30  * $Id$\r
31  */\r
32 \r
33 \r
34 #include <iba/ib_al.h>\r
35 #include <complib/cl_timer.h>\r
36 \r
37 #include "ib_common.h"\r
38 #include "al_common.h"\r
39 #include "al_debug.h"\r
40 #include "al_verbs.h"\r
41 #include "al_mgr.h"\r
42 #include "al_pnp.h"\r
43 #include "al_qp.h"\r
44 #include "al_smi.h"\r
45 #include "al_av.h"\r
46 \r
47 \r
48 extern char                                             node_desc[IB_NODE_DESCRIPTION_SIZE];\r
49 \r
50 #define SMI_POLL_INTERVAL                       20000           /* Milliseconds */\r
51 #define LOCAL_MAD_TIMEOUT                       50                      /* Milliseconds */\r
52 #define DEFAULT_QP0_DEPTH                       256\r
53 #define DEFAULT_QP1_DEPTH                       1024\r
54 \r
55 uint32_t                                g_smi_poll_interval =   SMI_POLL_INTERVAL;\r
56 spl_qp_mgr_t*                   gp_spl_qp_mgr = NULL;\r
57 \r
58 \r
59 /*\r
60  * Function prototypes.\r
61  */\r
62 void\r
63 destroying_spl_qp_mgr(\r
64         IN                              al_obj_t*                                       p_obj );\r
65 \r
66 void\r
67 free_spl_qp_mgr(\r
68         IN                              al_obj_t*                                       p_obj );\r
69 \r
70 ib_api_status_t\r
71 spl_qp0_agent_pnp_cb(\r
72         IN                              ib_pnp_rec_t*                           p_pnp_rec );\r
73 \r
74 ib_api_status_t\r
75 spl_qp1_agent_pnp_cb(\r
76         IN                              ib_pnp_rec_t*                           p_pnp_rec );\r
77 \r
78 ib_api_status_t\r
79 spl_qp_agent_pnp(\r
80         IN                              ib_pnp_rec_t*                           p_pnp_rec,\r
81         IN                              ib_qp_type_t                            qp_type );\r
82 \r
83 ib_api_status_t\r
84 create_spl_qp_svc(\r
85         IN                              ib_pnp_port_rec_t*                      p_pnp_rec,\r
86         IN              const   ib_qp_type_t                            qp_type );\r
87 \r
88 void\r
89 destroying_spl_qp_svc(\r
90         IN                              al_obj_t*                                       p_obj );\r
91 \r
92 void\r
93 free_spl_qp_svc(\r
94         IN                              al_obj_t*                                       p_obj );\r
95 \r
96 void\r
97 spl_qp_svc_lid_change(\r
98         IN                              al_obj_t*                                       p_obj,\r
99         IN                              ib_pnp_port_rec_t*                      p_pnp_rec );\r
100 \r
101 ib_api_status_t\r
102 remote_mad_send(\r
103         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
104         IN                              al_mad_wr_t* const                      p_mad_wr );\r
105 \r
106 static ib_api_status_t\r
107 local_mad_send(\r
108         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
109         IN                              al_mad_wr_t* const                      p_mad_wr );\r
110 \r
111 static ib_api_status_t\r
112 loopback_mad(\r
113         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
114         IN                              al_mad_wr_t* const                      p_mad_wr );\r
115 \r
116 static ib_api_status_t\r
117 process_subn_mad(\r
118         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
119         IN                              al_mad_wr_t* const                      p_mad_wr );\r
120 \r
121 static ib_api_status_t\r
122 fwd_local_mad(\r
123         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
124         IN                              al_mad_wr_t* const                      p_mad_wr );\r
125 \r
126 void\r
127 send_local_mad_cb(\r
128         IN                              cl_async_proc_item_t*           p_item );\r
129 \r
130 void\r
131 spl_qp_send_comp_cb(\r
132         IN              const   ib_cq_handle_t                          h_cq,\r
133         IN                              void                                            *cq_context );\r
134 \r
135 void\r
136 spl_qp_recv_comp_cb(\r
137         IN              const   ib_cq_handle_t                          h_cq,\r
138         IN                              void                                            *cq_context );\r
139 \r
140 void\r
141 spl_qp_comp(\r
142         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
143         IN              const   ib_cq_handle_t                          h_cq,\r
144         IN                              ib_wc_type_t                            wc_type );\r
145 \r
146 ib_api_status_t\r
147 process_mad_recv(\r
148         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
149         IN                              ib_mad_element_t*                       p_mad_element );\r
150 \r
151 mad_route_t\r
152 route_recv_smp(\r
153         IN                              ib_mad_element_t*                       p_mad_element );\r
154 \r
155 mad_route_t\r
156 route_recv_smp_attr(\r
157         IN                              ib_mad_element_t*                       p_mad_element );\r
158 \r
159 mad_route_t\r
160 route_recv_dm_mad(\r
161         IN                              ib_mad_element_t*                       p_mad_element );\r
162 \r
163 mad_route_t\r
164 route_recv_gmp(\r
165         IN                              ib_mad_element_t*                       p_mad_element );\r
166 \r
167 mad_route_t\r
168 route_recv_gmp_attr(\r
169         IN                              ib_mad_element_t*                       p_mad_element );\r
170 \r
171 ib_api_status_t\r
172 forward_sm_trap(\r
173         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
174         IN                              ib_mad_element_t*                       p_mad_element );\r
175 \r
176 ib_api_status_t\r
177 recv_local_mad(\r
178         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
179         IN                              ib_mad_element_t*                       p_mad_request );\r
180 \r
181 void\r
182 spl_qp_alias_send_cb(\r
183         IN                              ib_mad_svc_handle_t                     h_mad_svc,\r
184         IN                              void                                            *mad_svc_context,\r
185         IN                              ib_mad_element_t                        *p_mad_element );\r
186 \r
187 void\r
188 spl_qp_alias_recv_cb(\r
189         IN                              ib_mad_svc_handle_t                     h_mad_svc,\r
190         IN                              void                                            *mad_svc_context,\r
191         IN                              ib_mad_element_t                        *p_mad_response );\r
192 \r
193 static ib_api_status_t\r
194 spl_qp_svc_post_recvs(\r
195         IN                              spl_qp_svc_t*   const           p_spl_qp_svc );\r
196 \r
197 void\r
198 spl_qp_svc_event_cb(\r
199         IN                              ib_async_event_rec_t            *p_event_rec );\r
200 \r
201 void\r
202 spl_qp_alias_event_cb(\r
203         IN                              ib_async_event_rec_t            *p_event_rec );\r
204 \r
205 void\r
206 spl_qp_svc_reset(\r
207         IN                              spl_qp_svc_t*                           p_spl_qp_svc );\r
208 \r
209 void\r
210 spl_qp_svc_reset_cb(\r
211         IN                              cl_async_proc_item_t*           p_item );\r
212 \r
213 ib_api_status_t\r
214 acquire_svc_disp(\r
215         IN              const   cl_qmap_t* const                        p_svc_map,\r
216         IN              const   ib_net64_t                                      port_guid,\r
217                 OUT                     al_mad_disp_handle_t            *ph_mad_disp );\r
218 \r
219 void\r
220 smi_poll_timer_cb(\r
221         IN                              void*                                           context );\r
222 \r
223 void\r
224 smi_post_recvs(\r
225         IN                              cl_list_item_t* const           p_list_item,\r
226         IN                              void*                                           context );\r
227 \r
228 #if defined( CL_USE_MUTEX )\r
229 void\r
230 spl_qp_send_async_cb(\r
231         IN                              cl_async_proc_item_t*           p_item );\r
232 \r
233 void\r
234 spl_qp_recv_async_cb(\r
235         IN                              cl_async_proc_item_t*           p_item );\r
236 #endif\r
237 \r
238 /*\r
239  * Create the special QP manager.\r
240  */\r
241 ib_api_status_t\r
242 create_spl_qp_mgr(\r
243         IN                              al_obj_t*       const                   p_parent_obj )\r
244 {\r
245         ib_pnp_req_t                    pnp_req;\r
246         ib_api_status_t                 status;\r
247         cl_status_t                             cl_status;\r
248 \r
249         CL_ENTER( AL_DBG_SMI, g_al_dbg_lvl );\r
250 \r
251         CL_ASSERT( p_parent_obj );\r
252         CL_ASSERT( !gp_spl_qp_mgr );\r
253 \r
254         gp_spl_qp_mgr = cl_zalloc( sizeof( spl_qp_mgr_t ) );\r
255         if( !gp_spl_qp_mgr )\r
256         {\r
257                 CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl,\r
258                         ("IB_INSUFFICIENT_MEMORY\n") );\r
259                 return IB_INSUFFICIENT_MEMORY;\r
260         }\r
261 \r
262         /* Construct the special QP manager. */\r
263         construct_al_obj( &gp_spl_qp_mgr->obj, AL_OBJ_TYPE_SMI );\r
264         cl_timer_construct( &gp_spl_qp_mgr->poll_timer );\r
265 \r
266         /* Initialize the lists. */\r
267         cl_qmap_init( &gp_spl_qp_mgr->smi_map );\r
268         cl_qmap_init( &gp_spl_qp_mgr->gsi_map );\r
269 \r
270         /* Initialize the global SMI/GSI manager object. */\r
271         status = init_al_obj( &gp_spl_qp_mgr->obj, gp_spl_qp_mgr, TRUE,\r
272                 destroying_spl_qp_mgr, NULL, free_spl_qp_mgr );\r
273         if( status != IB_SUCCESS )\r
274         {\r
275                 free_spl_qp_mgr( &gp_spl_qp_mgr->obj );\r
276                 CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl,\r
277                         ("init_al_obj failed, %s\n", ib_get_err_str( status ) ) );\r
278                 return status;\r
279         }\r
280 \r
281         /* Attach the special QP manager to the parent object. */\r
282         status = attach_al_obj( p_parent_obj, &gp_spl_qp_mgr->obj );\r
283         if( status != IB_SUCCESS )\r
284         {\r
285                 gp_spl_qp_mgr->obj.pfn_destroy( &gp_spl_qp_mgr->obj, NULL );\r
286                 AL_TRACE_EXIT( AL_DBG_ERROR,\r
287                         ("attach_al_obj returned %s.\n", ib_get_err_str(status)) );\r
288                 return status;\r
289         }\r
290 \r
291         /* Initialize the SMI polling timer. */\r
292         cl_status = cl_timer_init( &gp_spl_qp_mgr->poll_timer, smi_poll_timer_cb,\r
293                 gp_spl_qp_mgr );\r
294         if( cl_status != CL_SUCCESS )\r
295         {\r
296                 gp_spl_qp_mgr->obj.pfn_destroy( &gp_spl_qp_mgr->obj, NULL );\r
297                 CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl,\r
298                         ("cl_timer_init failed, status 0x%x\n", cl_status ) );\r
299                 return ib_convert_cl_status( cl_status );\r
300         }\r
301 \r
302         /*\r
303          * Note: PnP registrations for port events must be done\r
304          * when the special QP manager is created.  This ensures that\r
305          * the registrations are listed sequentially and the reporting\r
306          * of PnP events occurs in the proper order.\r
307          */\r
308 \r
309         /*\r
310          * Separate context is needed for each special QP.  Therefore, a\r
311          * separate PnP event registration is performed for QP0 and QP1.\r
312          */\r
313 \r
314         /* Register for port PnP events for QP0. */\r
315         cl_memclr( &pnp_req, sizeof( ib_pnp_req_t ) );\r
316         pnp_req.pnp_class       = IB_PNP_PORT;\r
317         pnp_req.pnp_context = &gp_spl_qp_mgr->obj;\r
318         pnp_req.pfn_pnp_cb      = spl_qp0_agent_pnp_cb;\r
319 \r
320         status = ib_reg_pnp( gh_al, &pnp_req, &gp_spl_qp_mgr->h_qp0_pnp );\r
321 \r
322         if( status != IB_SUCCESS )\r
323         {\r
324                 gp_spl_qp_mgr->obj.pfn_destroy( &gp_spl_qp_mgr->obj, NULL );\r
325                 CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl,\r
326                         ("ib_reg_pnp QP0 failed, %s\n", ib_get_err_str( status ) ) );\r
327                 return status;\r
328         }\r
329 \r
330         /* Reference the special QP manager on behalf of the ib_reg_pnp call. */\r
331         ref_al_obj( &gp_spl_qp_mgr->obj );\r
332 \r
333         /* Register for port PnP events for QP1. */\r
334         cl_memclr( &pnp_req, sizeof( ib_pnp_req_t ) );\r
335         pnp_req.pnp_class       = IB_PNP_PORT;\r
336         pnp_req.pnp_context = &gp_spl_qp_mgr->obj;\r
337         pnp_req.pfn_pnp_cb      = spl_qp1_agent_pnp_cb;\r
338 \r
339         status = ib_reg_pnp( gh_al, &pnp_req, &gp_spl_qp_mgr->h_qp1_pnp );\r
340 \r
341         if( status != IB_SUCCESS )\r
342         {\r
343                 gp_spl_qp_mgr->obj.pfn_destroy( &gp_spl_qp_mgr->obj, NULL );\r
344                 CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl,\r
345                         ("ib_reg_pnp QP1 failed, %s\n", ib_get_err_str( status ) ) );\r
346                 return status;\r
347         }\r
348 \r
349         /*\r
350          * Note that we don't release the referende taken in init_al_obj\r
351          * because we need one on behalf of the ib_reg_pnp call.\r
352          */\r
353 \r
354         CL_EXIT( AL_DBG_SMI, g_al_dbg_lvl );\r
355         return IB_SUCCESS;\r
356 }\r
357 \r
358 \r
359 \r
360 /*\r
361  * Pre-destroy the special QP manager.\r
362  */\r
363 void\r
364 destroying_spl_qp_mgr(\r
365         IN                              al_obj_t*                                       p_obj )\r
366 {\r
367         ib_api_status_t                 status;\r
368 \r
369         CL_ASSERT( p_obj );\r
370         CL_ASSERT( gp_spl_qp_mgr == PARENT_STRUCT( p_obj, spl_qp_mgr_t, obj ) );\r
371         UNUSED_PARAM( p_obj );\r
372 \r
373         /* Deregister for port PnP events for QP0. */\r
374         if( gp_spl_qp_mgr->h_qp0_pnp )\r
375         {\r
376                 status = ib_dereg_pnp( gp_spl_qp_mgr->h_qp0_pnp,\r
377                         (ib_pfn_destroy_cb_t)deref_al_obj );\r
378                 CL_ASSERT( status == IB_SUCCESS );\r
379         }\r
380 \r
381         /* Deregister for port PnP events for QP1. */\r
382         if( gp_spl_qp_mgr->h_qp1_pnp )\r
383         {\r
384                 status = ib_dereg_pnp( gp_spl_qp_mgr->h_qp1_pnp,\r
385                         (ib_pfn_destroy_cb_t)deref_al_obj );\r
386                 CL_ASSERT( status == IB_SUCCESS );\r
387         }\r
388 \r
389         /* Destroy the SMI polling timer. */\r
390         cl_timer_destroy( &gp_spl_qp_mgr->poll_timer );\r
391 }\r
392 \r
393 \r
394 \r
395 /*\r
396  * Free the special QP manager.\r
397  */\r
398 void\r
399 free_spl_qp_mgr(\r
400         IN                              al_obj_t*                                       p_obj )\r
401 {\r
402         CL_ASSERT( p_obj );\r
403         CL_ASSERT( gp_spl_qp_mgr == PARENT_STRUCT( p_obj, spl_qp_mgr_t, obj ) );\r
404         UNUSED_PARAM( p_obj );\r
405 \r
406         destroy_al_obj( &gp_spl_qp_mgr->obj );\r
407         cl_free( gp_spl_qp_mgr );\r
408         gp_spl_qp_mgr = NULL;\r
409 }\r
410 \r
411 \r
412 \r
413 /*\r
414  * Special QP0 agent PnP event callback.\r
415  */\r
416 ib_api_status_t\r
417 spl_qp0_agent_pnp_cb(\r
418         IN                              ib_pnp_rec_t*                           p_pnp_rec )\r
419 {\r
420         ib_api_status_t status;\r
421         CL_ENTER( AL_DBG_SMI_CB, g_al_dbg_lvl );\r
422 \r
423         status = spl_qp_agent_pnp( p_pnp_rec, IB_QPT_QP0 );\r
424 \r
425         CL_EXIT( AL_DBG_SMI_CB, g_al_dbg_lvl );\r
426         return status;\r
427 }\r
428 \r
429 \r
430 \r
431 /*\r
432  * Special QP1 agent PnP event callback.\r
433  */\r
434 ib_api_status_t\r
435 spl_qp1_agent_pnp_cb(\r
436         IN                              ib_pnp_rec_t*                           p_pnp_rec )\r
437 {\r
438         ib_api_status_t status;\r
439         CL_ENTER( AL_DBG_SMI_CB, g_al_dbg_lvl );\r
440 \r
441         status = spl_qp_agent_pnp( p_pnp_rec, IB_QPT_QP1 );\r
442 \r
443         CL_EXIT( AL_DBG_SMI, g_al_dbg_lvl );\r
444         return status;\r
445 }\r
446 \r
447 \r
448 \r
449 /*\r
450  * Special QP agent PnP event callback.\r
451  */\r
452 ib_api_status_t\r
453 spl_qp_agent_pnp(\r
454         IN                              ib_pnp_rec_t*                           p_pnp_rec,\r
455         IN                              ib_qp_type_t                            qp_type )\r
456 {\r
457         ib_api_status_t                 status;\r
458         al_obj_t*                               p_obj;\r
459 \r
460         CL_ENTER( AL_DBG_SMI_CB, g_al_dbg_lvl );\r
461 \r
462         CL_ASSERT( p_pnp_rec );\r
463         p_obj = p_pnp_rec->context;\r
464 \r
465         /* Dispatch based on the PnP event type. */\r
466         switch( p_pnp_rec->pnp_event )\r
467         {\r
468         case IB_PNP_PORT_ADD:\r
469                 CL_ASSERT( !p_obj );\r
470                 status = create_spl_qp_svc( (ib_pnp_port_rec_t*)p_pnp_rec, qp_type );\r
471                 break;\r
472 \r
473         case IB_PNP_PORT_REMOVE:\r
474                 CL_ASSERT( p_obj );\r
475                 ref_al_obj( p_obj );\r
476                 p_obj->pfn_destroy( p_obj, NULL );\r
477                 status = IB_SUCCESS;\r
478                 break;\r
479 \r
480         case IB_PNP_LID_CHANGE:\r
481                 CL_ASSERT( p_obj );\r
482                 spl_qp_svc_lid_change( p_obj, (ib_pnp_port_rec_t*)p_pnp_rec );\r
483                 status = IB_SUCCESS;\r
484                 break;\r
485 \r
486         default:\r
487                 /* All other events are ignored. */\r
488                 status = IB_SUCCESS;\r
489                 break;\r
490         }\r
491 \r
492         CL_EXIT( AL_DBG_SMI, g_al_dbg_lvl );\r
493         return status;\r
494 }\r
495 \r
496 \r
497 \r
498 /*\r
499  * Create a special QP service.\r
500  */\r
501 ib_api_status_t\r
502 create_spl_qp_svc(\r
503         IN                              ib_pnp_port_rec_t*                      p_pnp_rec,\r
504         IN              const   ib_qp_type_t                            qp_type )\r
505 {\r
506         cl_status_t                             cl_status;\r
507         spl_qp_svc_t*                   p_spl_qp_svc;\r
508         ib_ca_handle_t                  h_ca;\r
509         ib_cq_create_t                  cq_create;\r
510         ib_qp_create_t                  qp_create;\r
511         ib_qp_attr_t                    qp_attr;\r
512         ib_mad_svc_t                    mad_svc;\r
513         ib_api_status_t                 status;\r
514 \r
515         CL_ENTER( AL_DBG_SMI, g_al_dbg_lvl );\r
516 \r
517         CL_ASSERT( p_pnp_rec );\r
518 \r
519         if( ( qp_type != IB_QPT_QP0 ) && ( qp_type != IB_QPT_QP1 ) )\r
520         {\r
521                 CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl, ("IB_INVALID_PARAMETER\n") );\r
522                 return IB_INVALID_PARAMETER;\r
523         }\r
524 \r
525         CL_ASSERT( p_pnp_rec->pnp_rec.pnp_context );\r
526         CL_ASSERT( p_pnp_rec->p_ca_attr );\r
527         CL_ASSERT( p_pnp_rec->p_port_attr );\r
528 \r
529         p_spl_qp_svc = cl_zalloc( sizeof( spl_qp_svc_t ) );\r
530         if( !p_spl_qp_svc )\r
531         {\r
532                 CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl,\r
533                         ("IB_INSUFFICIENT_MEMORY\n") );\r
534                 return IB_INSUFFICIENT_MEMORY;\r
535         }\r
536 \r
537         /* Tie the special QP service to the port by setting the port number. */\r
538         p_spl_qp_svc->port_num = p_pnp_rec->p_port_attr->port_num;\r
539         /* Store the port GUID to allow faster lookups of the dispatchers. */\r
540         p_spl_qp_svc->port_guid = p_pnp_rec->p_port_attr->port_guid;\r
541 \r
542         /* Initialize the send and receive queues. */\r
543         cl_qlist_init( &p_spl_qp_svc->send_queue );\r
544         cl_qlist_init( &p_spl_qp_svc->recv_queue );\r
545 \r
546 #if defined( CL_USE_MUTEX )\r
547         /* Initialize async callbacks and flags for send/receive processing. */\r
548         p_spl_qp_svc->send_async_queued = FALSE;\r
549         p_spl_qp_svc->send_async_cb.pfn_callback = spl_qp_send_async_cb;\r
550         p_spl_qp_svc->recv_async_queued = FALSE;\r
551         p_spl_qp_svc->recv_async_cb.pfn_callback = spl_qp_recv_async_cb;\r
552 #endif\r
553 \r
554         /* Initialize the async callback function to process local sends. */\r
555         p_spl_qp_svc->send_async.pfn_callback = send_local_mad_cb;\r
556 \r
557         /* Initialize the async callback function to reset the QP on error. */\r
558         p_spl_qp_svc->reset_async.pfn_callback = spl_qp_svc_reset_cb;\r
559 \r
560         /* Construct the special QP service object. */\r
561         construct_al_obj( &p_spl_qp_svc->obj, AL_OBJ_TYPE_SMI );\r
562 \r
563         /* Initialize the special QP service object. */\r
564         status = init_al_obj( &p_spl_qp_svc->obj, p_spl_qp_svc, TRUE,\r
565                 destroying_spl_qp_svc, NULL, free_spl_qp_svc );\r
566         if( status != IB_SUCCESS )\r
567         {\r
568                 free_spl_qp_svc( &p_spl_qp_svc->obj );\r
569                 return status;\r
570         }\r
571 \r
572         /* Attach the special QP service to the parent object. */\r
573         status = attach_al_obj(\r
574                 (al_obj_t* __ptr64)p_pnp_rec->pnp_rec.pnp_context, &p_spl_qp_svc->obj );\r
575         if( status != IB_SUCCESS )\r
576         {\r
577                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
578                 AL_TRACE_EXIT( AL_DBG_ERROR,\r
579                         ("attach_al_obj returned %s.\n", ib_get_err_str(status)) );\r
580                 return status;\r
581         }\r
582 \r
583         h_ca = acquire_ca( p_pnp_rec->p_ca_attr->ca_guid );\r
584         CL_ASSERT( h_ca );\r
585         if( !h_ca )\r
586         {\r
587                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
588                 AL_TRACE_EXIT( AL_DBG_ERROR, ("acquire_ca failed.\n") );\r
589                 return IB_INVALID_GUID;\r
590         }\r
591 \r
592         p_spl_qp_svc->obj.p_ci_ca = h_ca->obj.p_ci_ca;\r
593 \r
594         /* Determine the maximum queue depth of the QP and CQs. */\r
595         p_spl_qp_svc->max_qp_depth =\r
596                 ( p_pnp_rec->p_ca_attr->max_wrs <\r
597                 p_pnp_rec->p_ca_attr->max_cqes ) ?\r
598                 p_pnp_rec->p_ca_attr->max_wrs :\r
599                 p_pnp_rec->p_ca_attr->max_cqes;\r
600 \r
601         /* Compare this maximum to the default special queue depth. */\r
602         if( ( qp_type == IB_QPT_QP0 ) &&\r
603                 ( p_spl_qp_svc->max_qp_depth > DEFAULT_QP0_DEPTH ) )\r
604                   p_spl_qp_svc->max_qp_depth = DEFAULT_QP0_DEPTH;\r
605         if( ( qp_type == IB_QPT_QP1 ) &&\r
606                 ( p_spl_qp_svc->max_qp_depth > DEFAULT_QP1_DEPTH ) )\r
607                   p_spl_qp_svc->max_qp_depth = DEFAULT_QP1_DEPTH;\r
608 \r
609         /* Create the send CQ. */\r
610         cl_memclr( &cq_create, sizeof( ib_cq_create_t ) );\r
611         cq_create.size = p_spl_qp_svc->max_qp_depth;\r
612         cq_create.pfn_comp_cb = spl_qp_send_comp_cb;\r
613 \r
614         status = ib_create_cq( p_spl_qp_svc->obj.p_ci_ca->h_ca, &cq_create,\r
615                 p_spl_qp_svc, spl_qp_svc_event_cb, &p_spl_qp_svc->h_send_cq );\r
616 \r
617         if( status != IB_SUCCESS )\r
618         {\r
619                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
620                 CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl,\r
621                         ("ib_create_cq send CQ failed, %s\n", ib_get_err_str( status ) ) );\r
622                 return status;\r
623         }\r
624 \r
625         /* Reference the special QP service on behalf of ib_create_cq. */\r
626         ref_al_obj( &p_spl_qp_svc->obj );\r
627 \r
628         /* Check the result of the creation request. */\r
629         if( cq_create.size < p_spl_qp_svc->max_qp_depth )\r
630         {\r
631                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
632                 CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl,\r
633                         ("ib_create_cq allocated insufficient send CQ size\n") );\r
634                 return IB_INSUFFICIENT_RESOURCES;\r
635         }\r
636 \r
637         /* Create the receive CQ. */\r
638         cl_memclr( &cq_create, sizeof( ib_cq_create_t ) );\r
639         cq_create.size = p_spl_qp_svc->max_qp_depth;\r
640         cq_create.pfn_comp_cb = spl_qp_recv_comp_cb;\r
641 \r
642         status = ib_create_cq( p_spl_qp_svc->obj.p_ci_ca->h_ca, &cq_create,\r
643                 p_spl_qp_svc, spl_qp_svc_event_cb, &p_spl_qp_svc->h_recv_cq );\r
644 \r
645         if( status != IB_SUCCESS )\r
646         {\r
647                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
648                 CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl,\r
649                         ("ib_create_cq recv CQ failed, %s\n", ib_get_err_str( status ) ) );\r
650                 return status;\r
651         }\r
652 \r
653         /* Reference the special QP service on behalf of ib_create_cq. */\r
654         ref_al_obj( &p_spl_qp_svc->obj );\r
655 \r
656         /* Check the result of the creation request. */\r
657         if( cq_create.size < p_spl_qp_svc->max_qp_depth )\r
658         {\r
659                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
660                 CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl,\r
661                         ("ib_create_cq allocated insufficient recv CQ size\n") );\r
662                 return IB_INSUFFICIENT_RESOURCES;\r
663         }\r
664 \r
665         /* Create the special QP. */\r
666         cl_memclr( &qp_create, sizeof( ib_qp_create_t ) );\r
667         qp_create.qp_type = qp_type;\r
668         qp_create.sq_depth = p_spl_qp_svc->max_qp_depth;\r
669         qp_create.rq_depth = p_spl_qp_svc->max_qp_depth;\r
670         qp_create.sq_sge = 3;   /* Three entries are required for segmentation. */\r
671         qp_create.rq_sge = 1;\r
672         qp_create.h_sq_cq = p_spl_qp_svc->h_send_cq;\r
673         qp_create.h_rq_cq = p_spl_qp_svc->h_recv_cq;\r
674         qp_create.sq_signaled = TRUE;\r
675 \r
676         status = ib_get_spl_qp( p_spl_qp_svc->obj.p_ci_ca->h_pd,\r
677                 p_pnp_rec->p_port_attr->port_guid, &qp_create,\r
678                 p_spl_qp_svc, spl_qp_svc_event_cb, NULL, &p_spl_qp_svc->h_qp );\r
679 \r
680         if( status != IB_SUCCESS )\r
681         {\r
682                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
683                 CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl,\r
684                         ("ib_get_spl_qp failed, %s\n", ib_get_err_str( status ) ) );\r
685                 return status;\r
686         }\r
687 \r
688         /* Reference the special QP service on behalf of ib_get_spl_qp. */\r
689         ref_al_obj( &p_spl_qp_svc->obj );\r
690 \r
691         /* Check the result of the creation request. */\r
692         status = ib_query_qp( p_spl_qp_svc->h_qp, &qp_attr );\r
693         if( status != IB_SUCCESS )\r
694         {\r
695                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
696                 CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl,\r
697                         ("ib_query_qp failed, %s\n", ib_get_err_str( status ) ) );\r
698                 return status;\r
699         }\r
700 \r
701         if( ( qp_attr.rq_depth < p_spl_qp_svc->max_qp_depth ) ||\r
702                 ( qp_attr.sq_depth < p_spl_qp_svc->max_qp_depth ) ||\r
703                 ( qp_attr.sq_sge < 3 ) || ( qp_attr.rq_sge < 1 ) )\r
704         {\r
705                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
706                 CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl,\r
707                         ("ib_get_spl_qp allocated attributes are insufficient\n") );\r
708                 return IB_INSUFFICIENT_RESOURCES;\r
709         }\r
710 \r
711         /* Initialize the QP for use. */\r
712         status = ib_init_dgrm_svc( p_spl_qp_svc->h_qp, NULL );\r
713         if( status != IB_SUCCESS )\r
714         {\r
715                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
716                 AL_TRACE_EXIT( AL_DBG_ERROR,\r
717                         ("ib_init_dgrm_svc failed, %s\n", ib_get_err_str( status ) ) );\r
718                 return status;\r
719         }\r
720 \r
721         /* Post receive buffers. */\r
722         status = spl_qp_svc_post_recvs( p_spl_qp_svc );\r
723         if( status != IB_SUCCESS )\r
724         {\r
725                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
726                 CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl,\r
727                         ("spl_qp_svc_post_recvs failed, %s\n",\r
728                         ib_get_err_str( status ) ) );\r
729                 return status;\r
730         }\r
731 \r
732         /* Create the MAD dispatcher. */\r
733         status = create_mad_disp( &p_spl_qp_svc->obj, p_spl_qp_svc->h_qp,\r
734                 &p_spl_qp_svc->h_mad_disp );\r
735         if( status != IB_SUCCESS )\r
736         {\r
737                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
738                 AL_TRACE_EXIT( AL_DBG_ERROR,\r
739                         ("create_mad_disp failed, %s\n", ib_get_err_str( status ) ) );\r
740                 return status;\r
741         }\r
742 \r
743         /*\r
744          * Add this service to the special QP manager lookup lists.\r
745          * The service must be added to allow the creation of a QP alias.\r
746          */\r
747         cl_spinlock_acquire( &gp_spl_qp_mgr->obj.lock );\r
748         if( qp_type == IB_QPT_QP0 )\r
749         {\r
750                 cl_qmap_insert( &gp_spl_qp_mgr->smi_map, p_spl_qp_svc->port_guid,\r
751                         &p_spl_qp_svc->map_item );\r
752         }\r
753         else\r
754         {\r
755                 cl_qmap_insert( &gp_spl_qp_mgr->gsi_map, p_spl_qp_svc->port_guid,\r
756                         &p_spl_qp_svc->map_item );\r
757         }\r
758         cl_spinlock_release( &gp_spl_qp_mgr->obj.lock );\r
759 \r
760         /*\r
761          * If the CA does not support HW agents, create a QP alias and register\r
762          * a MAD service for sending responses from the local MAD interface.\r
763          */\r
764         if( check_local_mad( p_spl_qp_svc->h_qp ) )\r
765         {\r
766                 /* Create a QP alias. */\r
767                 cl_memclr( &qp_create, sizeof( ib_qp_create_t ) );\r
768                 qp_create.qp_type =\r
769                         ( qp_type == IB_QPT_QP0 ) ? IB_QPT_QP0_ALIAS : IB_QPT_QP1_ALIAS;\r
770                 qp_create.sq_depth              = p_spl_qp_svc->max_qp_depth;\r
771                 qp_create.sq_sge                = 1;\r
772                 qp_create.sq_signaled   = TRUE;\r
773 \r
774                 status = ib_get_spl_qp( p_spl_qp_svc->obj.p_ci_ca->h_pd_alias,\r
775                         p_pnp_rec->p_port_attr->port_guid, &qp_create,\r
776                         p_spl_qp_svc, spl_qp_alias_event_cb, &p_spl_qp_svc->pool_key,\r
777                         &p_spl_qp_svc->h_qp_alias );\r
778 \r
779                 if (status != IB_SUCCESS)\r
780                 {\r
781                         p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
782                         CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl,\r
783                                 ("ib_get_spl_qp alias failed, %s\n",\r
784                                 ib_get_err_str( status ) ) );\r
785                         return status;\r
786                 }\r
787 \r
788                 /* Reference the special QP service on behalf of ib_get_spl_qp. */\r
789                 ref_al_obj( &p_spl_qp_svc->obj );\r
790 \r
791                 /* Register a MAD service for sends. */\r
792                 cl_memclr( &mad_svc, sizeof( ib_mad_svc_t ) );\r
793                 mad_svc.mad_svc_context = p_spl_qp_svc;\r
794                 mad_svc.pfn_mad_send_cb = spl_qp_alias_send_cb;\r
795                 mad_svc.pfn_mad_recv_cb = spl_qp_alias_recv_cb;\r
796 \r
797                 status = ib_reg_mad_svc( p_spl_qp_svc->h_qp_alias, &mad_svc,\r
798                         &p_spl_qp_svc->h_mad_svc );\r
799 \r
800                 if( status != IB_SUCCESS )\r
801                 {\r
802                         p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
803                         CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl,\r
804                                 ("ib_reg_mad_svc failed, %s\n", ib_get_err_str( status ) ) );\r
805                         return status;\r
806                 }\r
807         }\r
808 \r
809         /* Set the context of the PnP event to this child object. */\r
810         p_pnp_rec->pnp_rec.context = &p_spl_qp_svc->obj;\r
811 \r
812         /* The QP is ready.  Change the state. */\r
813         p_spl_qp_svc->state = SPL_QP_ACTIVE;\r
814 \r
815         /* Force a completion callback to rearm the CQs. */\r
816         spl_qp_recv_comp_cb( p_spl_qp_svc->h_recv_cq, p_spl_qp_svc );\r
817         spl_qp_send_comp_cb( p_spl_qp_svc->h_send_cq, p_spl_qp_svc );\r
818 \r
819         /* Start the polling thread timer. */\r
820         if( g_smi_poll_interval )\r
821         {\r
822                 cl_status =\r
823                         cl_timer_trim( &gp_spl_qp_mgr->poll_timer, g_smi_poll_interval );\r
824 \r
825                 if( cl_status != CL_SUCCESS )\r
826                 {\r
827                         p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
828                         CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl,\r
829                                 ("cl_timer_start failed, status 0x%x\n", cl_status ) );\r
830                         return ib_convert_cl_status( cl_status );\r
831                 }\r
832         }\r
833 \r
834         /* Release the reference taken in init_al_obj. */\r
835         deref_al_obj( &p_spl_qp_svc->obj );\r
836 \r
837         CL_EXIT( AL_DBG_SMI, g_al_dbg_lvl );\r
838         return IB_SUCCESS;\r
839 }\r
840 \r
841 \r
842 \r
843 /*\r
844  * Return a work completion to the MAD dispatcher for the specified MAD.\r
845  */\r
846 static void\r
847 __complete_send_mad(\r
848         IN              const   al_mad_disp_handle_t            h_mad_disp,\r
849         IN                              al_mad_wr_t* const                      p_mad_wr,\r
850         IN              const   ib_wc_status_t                          wc_status )\r
851 {\r
852         ib_wc_t                 wc;\r
853 \r
854         /* Construct a send work completion. */\r
855         cl_memclr( &wc, sizeof( ib_wc_t ) );\r
856         wc.wr_id        = p_mad_wr->send_wr.wr_id;\r
857         wc.wc_type      = IB_WC_SEND;\r
858         wc.status       = wc_status;\r
859 \r
860         /* Set the send size if we were successful with the send. */\r
861         if( wc_status == IB_WCS_SUCCESS )\r
862                 wc.length = MAD_BLOCK_SIZE;\r
863 \r
864         mad_disp_send_done( h_mad_disp, p_mad_wr, &wc );\r
865 }\r
866 \r
867 \r
868 \r
869 /*\r
870  * Pre-destroy a special QP service.\r
871  */\r
872 void\r
873 destroying_spl_qp_svc(\r
874         IN                              al_obj_t*                                       p_obj )\r
875 {\r
876         spl_qp_svc_t*                   p_spl_qp_svc;\r
877         cl_list_item_t*                 p_list_item;\r
878         al_mad_wr_t*                    p_mad_wr;\r
879 \r
880         ib_api_status_t                 status;\r
881 \r
882         CL_ENTER( AL_DBG_SMI, g_al_dbg_lvl );\r
883 \r
884         CL_ASSERT( p_obj );\r
885         p_spl_qp_svc = PARENT_STRUCT( p_obj, spl_qp_svc_t, obj );\r
886 \r
887         /* Change the state to prevent processing new send requests. */\r
888         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
889         p_spl_qp_svc->state = SPL_QP_DESTROYING;\r
890         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
891 \r
892         /* Wait here until the special QP service is no longer in use. */\r
893         while( p_spl_qp_svc->in_use_cnt )\r
894         {\r
895                 cl_thread_suspend( 0 );\r
896         }\r
897 \r
898         /* Destroy the special QP. */\r
899         if( p_spl_qp_svc->h_qp )\r
900         {\r
901                 /* If present, remove the special QP service from the tracking map. */\r
902                 cl_spinlock_acquire( &gp_spl_qp_mgr->obj.lock );\r
903                 if( p_spl_qp_svc->h_qp->type == IB_QPT_QP0 )\r
904                 {\r
905                         cl_qmap_remove( &gp_spl_qp_mgr->smi_map, p_spl_qp_svc->port_guid );\r
906                 }\r
907                 else\r
908                 {\r
909                         cl_qmap_remove( &gp_spl_qp_mgr->gsi_map, p_spl_qp_svc->port_guid );\r
910                 }\r
911                 cl_spinlock_release( &gp_spl_qp_mgr->obj.lock );\r
912 \r
913                 status = ib_destroy_qp( p_spl_qp_svc->h_qp,\r
914                         (ib_pfn_destroy_cb_t)deref_al_obj );\r
915                 CL_ASSERT( status == IB_SUCCESS );\r
916 \r
917                 cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
918 \r
919                 /* Complete any outstanding MAD sends operations as "flushed". */\r
920                 for( p_list_item = cl_qlist_remove_head( &p_spl_qp_svc->send_queue );\r
921                          p_list_item != cl_qlist_end( &p_spl_qp_svc->send_queue );\r
922                          p_list_item = cl_qlist_remove_head( &p_spl_qp_svc->send_queue ) )\r
923                 {\r
924                         p_mad_wr = PARENT_STRUCT( p_list_item, al_mad_wr_t, list_item );\r
925                         __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr,\r
926                                 IB_WCS_WR_FLUSHED_ERR );\r
927                 }\r
928 \r
929                 cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
930                 /* Receive MAD elements are returned to the pool by the free routine. */\r
931         }\r
932 \r
933         /* Destroy the special QP alias and CQs. */\r
934         if( p_spl_qp_svc->h_qp_alias )\r
935         {\r
936                 status = ib_destroy_qp( p_spl_qp_svc->h_qp_alias,\r
937                         (ib_pfn_destroy_cb_t)deref_al_obj );\r
938                 CL_ASSERT( status == IB_SUCCESS );\r
939         }\r
940         if( p_spl_qp_svc->h_send_cq )\r
941         {\r
942                 status = ib_destroy_cq( p_spl_qp_svc->h_send_cq,\r
943                         (ib_pfn_destroy_cb_t)deref_al_obj );\r
944                 CL_ASSERT( status == IB_SUCCESS );\r
945         }\r
946         if( p_spl_qp_svc->h_recv_cq )\r
947         {\r
948                 status = ib_destroy_cq( p_spl_qp_svc->h_recv_cq,\r
949                         (ib_pfn_destroy_cb_t)deref_al_obj );\r
950                 CL_ASSERT( status == IB_SUCCESS );\r
951         }\r
952 \r
953         CL_EXIT( AL_DBG_SMI, g_al_dbg_lvl );\r
954 }\r
955 \r
956 \r
957 \r
958 /*\r
959  * Free a special QP service.\r
960  */\r
961 void\r
962 free_spl_qp_svc(\r
963         IN                              al_obj_t*                                       p_obj )\r
964 {\r
965         spl_qp_svc_t*                   p_spl_qp_svc;\r
966         cl_list_item_t*                 p_list_item;\r
967         al_mad_element_t*               p_al_mad;\r
968         ib_api_status_t                 status;\r
969 \r
970         CL_ENTER( AL_DBG_SMI, g_al_dbg_lvl );\r
971 \r
972         CL_ASSERT( p_obj );\r
973         p_spl_qp_svc = PARENT_STRUCT( p_obj, spl_qp_svc_t, obj );\r
974 \r
975         /* Dereference the CA. */\r
976         if( p_spl_qp_svc->obj.p_ci_ca )\r
977                 deref_al_obj( &p_spl_qp_svc->obj.p_ci_ca->h_ca->obj );\r
978 \r
979         /* Return receive MAD elements to the pool. */\r
980         for( p_list_item = cl_qlist_remove_head( &p_spl_qp_svc->recv_queue );\r
981                  p_list_item != cl_qlist_end( &p_spl_qp_svc->recv_queue );\r
982                  p_list_item = cl_qlist_remove_head( &p_spl_qp_svc->recv_queue ) )\r
983         {\r
984                 p_al_mad = PARENT_STRUCT( p_list_item, al_mad_element_t, list_item );\r
985 \r
986                 status = ib_put_mad( &p_al_mad->element );\r
987                 CL_ASSERT( status == IB_SUCCESS );\r
988         }\r
989 \r
990         CL_ASSERT( cl_is_qlist_empty( &p_spl_qp_svc->send_queue ) );\r
991 \r
992         destroy_al_obj( &p_spl_qp_svc->obj );\r
993         cl_free( p_spl_qp_svc );\r
994 \r
995         CL_EXIT( AL_DBG_SMI, g_al_dbg_lvl );\r
996 }\r
997 \r
998 \r
999 \r
1000 /*\r
1001  * Update the base LID of a special QP service.\r
1002  */\r
1003 void\r
1004 spl_qp_svc_lid_change(\r
1005         IN                              al_obj_t*                                       p_obj,\r
1006         IN                              ib_pnp_port_rec_t*                      p_pnp_rec )\r
1007 {\r
1008         spl_qp_svc_t*                   p_spl_qp_svc;\r
1009 \r
1010         CL_ENTER( AL_DBG_SMI, g_al_dbg_lvl );\r
1011 \r
1012         CL_ASSERT( p_obj );\r
1013         CL_ASSERT( p_pnp_rec );\r
1014         CL_ASSERT( p_pnp_rec->p_port_attr );\r
1015 \r
1016         p_spl_qp_svc = PARENT_STRUCT( p_obj, spl_qp_svc_t, obj );\r
1017 \r
1018         p_spl_qp_svc->base_lid = p_pnp_rec->p_port_attr->lid;\r
1019         p_spl_qp_svc->lmc = p_pnp_rec->p_port_attr->lmc;\r
1020 \r
1021         CL_EXIT( AL_DBG_SMI, g_al_dbg_lvl );\r
1022 }\r
1023 \r
1024 \r
1025 \r
1026 /*\r
1027  * Route a send work request.\r
1028  */\r
1029 mad_route_t\r
1030 route_mad_send(\r
1031         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
1032         IN                              ib_send_wr_t* const                     p_send_wr )\r
1033 {\r
1034         al_mad_wr_t*                    p_mad_wr;\r
1035         al_mad_send_t*                  p_mad_send;\r
1036         ib_mad_t*                               p_mad;\r
1037         ib_smp_t*                               p_smp;\r
1038         ib_av_handle_t                  h_av;\r
1039         mad_route_t                             route;\r
1040         boolean_t                               local, loopback, discard;\r
1041 \r
1042         CL_ENTER( AL_DBG_SMI, g_al_dbg_lvl );\r
1043 \r
1044         CL_ASSERT( p_spl_qp_svc );\r
1045         CL_ASSERT( p_send_wr );\r
1046 \r
1047         /* Initialize a pointers to the MAD work request and the MAD. */\r
1048         p_mad_wr = PARENT_STRUCT( p_send_wr, al_mad_wr_t, send_wr );\r
1049         p_mad_send = PARENT_STRUCT( p_mad_wr, al_mad_send_t, mad_wr );\r
1050         p_mad = get_mad_hdr_from_wr( p_mad_wr );\r
1051         p_smp = (ib_smp_t*)p_mad;\r
1052 \r
1053         /* Check if the CA has a local MAD interface. */\r
1054         local = loopback = discard = FALSE;\r
1055         if( check_local_mad( p_spl_qp_svc->h_qp ) )\r
1056         {\r
1057                 /*\r
1058                  * If the MAD is a locally addressed Subnet Management, Performance\r
1059                  * Management, or Connection Management datagram, process the work\r
1060                  * request locally.\r
1061                  */\r
1062                 h_av = p_send_wr->dgrm.ud.h_av;\r
1063                 switch( p_mad->mgmt_class )\r
1064                 {\r
1065                 case IB_MCLASS_SUBN_DIR:\r
1066                         /* Perform special checks on directed route SMPs. */\r
1067                         if( ib_smp_is_response( p_smp ) )\r
1068                         {\r
1069                                 /*\r
1070                                  * This node is the originator of the response.  Discard\r
1071                                  * if the hop count or pointer is zero, an intermediate hop,\r
1072                                  * out of bounds hop, or if the first port of the directed\r
1073                                  * route retrun path is not this port.\r
1074                                  */\r
1075                                 if( ( p_smp->hop_count == 0 ) || ( p_smp->hop_ptr == 0 ) )\r
1076                                 {\r
1077                                         CL_TRACE( AL_DBG_ERROR, g_al_dbg_lvl,\r
1078                                                 ("hop cnt or hop ptr set to 0...discarding\n") );\r
1079                                         discard = TRUE;\r
1080                                 }\r
1081                                 else if( p_smp->hop_count != ( p_smp->hop_ptr - 1 ) )\r
1082                                 {\r
1083                                         CL_TRACE( AL_DBG_ERROR, g_al_dbg_lvl,\r
1084                                                 ("hop cnt != (hop ptr - 1)...discarding\n") );\r
1085                                         discard = TRUE;\r
1086                                 }\r
1087                                 else if( p_smp->hop_count >= IB_SUBNET_PATH_HOPS_MAX )\r
1088                                 {\r
1089                                         CL_TRACE( AL_DBG_ERROR, g_al_dbg_lvl,\r
1090                                                 ("hop cnt > max hops...discarding\n") );\r
1091                                         discard = TRUE;\r
1092                                 }\r
1093                                 else if( ( p_smp->dr_dlid == IB_LID_PERMISSIVE ) &&\r
1094                                                  ( p_smp->return_path[ p_smp->hop_ptr - 1 ] !=\r
1095                                                         p_spl_qp_svc->port_num ) )\r
1096                                 {\r
1097                                         CL_TRACE( AL_DBG_ERROR, g_al_dbg_lvl,\r
1098                                                 ("return path[hop ptr - 1] != port num...discarding\n") );\r
1099                                         discard = TRUE;\r
1100                                 }\r
1101                         }\r
1102                         else\r
1103                         {\r
1104                                 /* The SMP is a request. */\r
1105                                 if( ( p_smp->hop_count >= IB_SUBNET_PATH_HOPS_MAX ) ||\r
1106                                         ( p_smp->hop_ptr >= IB_SUBNET_PATH_HOPS_MAX ) )\r
1107                                 {\r
1108                                         discard = TRUE;\r
1109                                 }\r
1110                                 else if( ( p_smp->hop_count == 0 ) && ( p_smp->hop_ptr == 0 ) )\r
1111                                 {\r
1112                                         /* Self Addressed: Sent locally, routed locally. */\r
1113                                         local = TRUE;\r
1114                                         discard = ( p_smp->dr_slid != IB_LID_PERMISSIVE ) ||\r
1115                                                           ( p_smp->dr_dlid != IB_LID_PERMISSIVE );\r
1116                                 }\r
1117                                 else if( ( p_smp->hop_count != 0 ) &&\r
1118                                                  ( p_smp->hop_count == ( p_smp->hop_ptr - 1 ) ) )\r
1119                                 {\r
1120                                         /* End of Path: Sent remotely, routed locally. */\r
1121                                         local = TRUE;\r
1122                                 }\r
1123                                 else if( ( p_smp->hop_count != 0 ) &&\r
1124                                                  ( p_smp->hop_ptr       == 0 ) )\r
1125                                 {\r
1126                                         /* Beginning of Path: Sent locally, routed remotely. */\r
1127                                         if( p_smp->dr_slid == IB_LID_PERMISSIVE )\r
1128                                         {\r
1129                                                 discard =\r
1130                                                         ( p_smp->initial_path[ p_smp->hop_ptr + 1 ] !=\r
1131                                                           p_spl_qp_svc->port_num );\r
1132                                         }\r
1133                                 }\r
1134                                 else\r
1135                                 {\r
1136                                         /* Intermediate hop. */\r
1137                                         discard = TRUE;\r
1138                                 }\r
1139                         }\r
1140                         /* Loopback locally addressed SM to SM "heartbeat" messages. */\r
1141                         loopback = (p_mad->attr_id == IB_MAD_ATTR_SM_INFO);\r
1142                         break;\r
1143 \r
1144                 case IB_MCLASS_SUBN_LID:\r
1145                         /* Loopback locally addressed SM to SM "heartbeat" messages. */\r
1146                         loopback = (p_mad->attr_id == IB_MAD_ATTR_SM_INFO);\r
1147 \r
1148                         /* Fall through to check for a local MAD. */\r
1149 \r
1150                 case IB_MCLASS_PERF:\r
1151                 case IB_MCLASS_BM:\r
1152                         local = ( h_av &&\r
1153                                 ( h_av->av_attr.dlid ==\r
1154                                 ( h_av->av_attr.path_bits | p_spl_qp_svc->base_lid ) ) );\r
1155                         break;\r
1156 \r
1157                 default:\r
1158                         /* Route vendor specific MADs to the HCA provider. */\r
1159                         if( ib_class_is_vendor_specific( p_mad->mgmt_class ) )\r
1160                         {\r
1161                                 local = ( h_av &&\r
1162                                         ( h_av->av_attr.dlid ==\r
1163                                         ( h_av->av_attr.path_bits | p_spl_qp_svc->base_lid ) ) );\r
1164                         }\r
1165                         break;\r
1166                 }\r
1167         }\r
1168 \r
1169         route = ( p_mad_send->p_send_mad->send_opt & IB_SEND_OPT_LOCAL ) ?\r
1170                 ROUTE_LOCAL : ROUTE_REMOTE;\r
1171         if( local ) route = ROUTE_LOCAL;\r
1172         if( loopback && local ) route = ROUTE_LOOPBACK;\r
1173         if( discard ) route = ROUTE_DISCARD;\r
1174 \r
1175         CL_EXIT( AL_DBG_SMI, g_al_dbg_lvl );\r
1176         return route;\r
1177 }\r
1178 \r
1179 \r
1180 \r
1181 /*\r
1182  * Send a work request on the special QP.\r
1183  */\r
1184 ib_api_status_t\r
1185 spl_qp_svc_send(\r
1186         IN              const   ib_qp_handle_t                          h_qp,\r
1187         IN                              ib_send_wr_t* const                     p_send_wr )\r
1188 {\r
1189         spl_qp_svc_t*                   p_spl_qp_svc;\r
1190         al_mad_wr_t*                    p_mad_wr;\r
1191         mad_route_t                             route;\r
1192         ib_api_status_t                 status;\r
1193 \r
1194         CL_ENTER( AL_DBG_SMI, g_al_dbg_lvl );\r
1195 \r
1196         CL_ASSERT( h_qp );\r
1197         CL_ASSERT( p_send_wr );\r
1198 \r
1199         /* Get the special QP service. */\r
1200         p_spl_qp_svc = (spl_qp_svc_t*)h_qp->obj.context;\r
1201         CL_ASSERT( p_spl_qp_svc );\r
1202         CL_ASSERT( p_spl_qp_svc->h_qp == h_qp );\r
1203 \r
1204         /* Determine how to route the MAD. */\r
1205         route = route_mad_send( p_spl_qp_svc, p_send_wr );\r
1206 \r
1207         /*\r
1208          * Check the QP state and guard against error handling.  Also,\r
1209          * to maintain proper order of work completions, delay processing\r
1210          * a local MAD until any remote MAD work requests have completed,\r
1211          * and delay processing a remote MAD until local MAD work requests\r
1212          * have completed.\r
1213          */\r
1214         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
1215         if( (p_spl_qp_svc->state != SPL_QP_ACTIVE) || p_spl_qp_svc->local_mad_wr ||\r
1216                 (is_local(route) && !cl_is_qlist_empty( &p_spl_qp_svc->send_queue )) ||\r
1217                 ( cl_qlist_count( &p_spl_qp_svc->send_queue ) >=\r
1218                         p_spl_qp_svc->max_qp_depth ) )\r
1219         {\r
1220                 /*\r
1221                  * Return busy status.\r
1222                  * The special QP will resume sends at this point.\r
1223                  */\r
1224                 cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
1225 \r
1226                 CL_EXIT( AL_DBG_SMI, g_al_dbg_lvl );\r
1227                 return IB_RESOURCE_BUSY;\r
1228         }\r
1229 \r
1230         p_mad_wr = PARENT_STRUCT( p_send_wr, al_mad_wr_t, send_wr );\r
1231 \r
1232         if( is_local( route ) )\r
1233         {\r
1234                 /* Save the local MAD work request for processing. */\r
1235                 p_spl_qp_svc->local_mad_wr = p_mad_wr;\r
1236 \r
1237                 /* Flag the service as in use by the asynchronous processing thread. */\r
1238                 cl_atomic_inc( &p_spl_qp_svc->in_use_cnt );\r
1239 \r
1240                 cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
1241 \r
1242                 status = local_mad_send( p_spl_qp_svc, p_mad_wr );\r
1243         }\r
1244         else\r
1245         {\r
1246                 /* Process a remote MAD send work request. */\r
1247                 status = remote_mad_send( p_spl_qp_svc, p_mad_wr );\r
1248 \r
1249                 cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
1250         }\r
1251 \r
1252         CL_EXIT( AL_DBG_SMI, g_al_dbg_lvl );\r
1253         return status;\r
1254 }\r
1255 \r
1256 \r
1257 \r
1258 /*\r
1259  * Process a remote MAD send work request.  Called holding the spl_qp_svc lock.\r
1260  */\r
1261 ib_api_status_t\r
1262 remote_mad_send(\r
1263         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
1264         IN                              al_mad_wr_t* const                      p_mad_wr )\r
1265 {\r
1266         ib_smp_t*                               p_smp;\r
1267         ib_api_status_t                 status;\r
1268 \r
1269         CL_ENTER( AL_DBG_SMI, g_al_dbg_lvl );\r
1270 \r
1271         CL_ASSERT( p_spl_qp_svc );\r
1272         CL_ASSERT( p_mad_wr );\r
1273 \r
1274         /* Initialize a pointers to the MAD work request and outbound MAD. */\r
1275         p_smp = (ib_smp_t*)get_mad_hdr_from_wr( p_mad_wr );\r
1276 \r
1277         /* Perform outbound MAD processing. */\r
1278 \r
1279         /* Adjust directed route SMPs as required by IBA. */\r
1280         if( p_smp->mgmt_class == IB_MCLASS_SUBN_DIR )\r
1281         {\r
1282                 if( ib_smp_is_response( p_smp ) )\r
1283                 {\r
1284                         if( p_smp->dr_dlid == IB_LID_PERMISSIVE )\r
1285                                 p_smp->hop_ptr--;\r
1286                 }\r
1287                 else if( p_smp->dr_slid == IB_LID_PERMISSIVE )\r
1288                 {\r
1289                         /*\r
1290                          * Only update the pointer if the hw_agent is not implemented.\r
1291                          * Fujitsu implements SMI in hardware, so the following has to\r
1292                          * be passed down to the hardware SMI.\r
1293                          */\r
1294                         ci_ca_lock_attr( p_spl_qp_svc->obj.p_ci_ca );\r
1295                         if( !p_spl_qp_svc->obj.p_ci_ca->p_pnp_attr->hw_agents )\r
1296                                 p_smp->hop_ptr++;\r
1297                         ci_ca_unlock_attr( p_spl_qp_svc->obj.p_ci_ca );\r
1298                 }\r
1299         }\r
1300 \r
1301         /* Always generate send completions. */\r
1302         p_mad_wr->send_wr.send_opt |= IB_SEND_OPT_SIGNALED;\r
1303 \r
1304         /* Queue the MAD work request on the service tracking queue. */\r
1305         cl_qlist_insert_tail( &p_spl_qp_svc->send_queue, &p_mad_wr->list_item );\r
1306 \r
1307         status = ib_post_send( p_spl_qp_svc->h_qp, &p_mad_wr->send_wr, NULL );\r
1308 \r
1309         if( status != IB_SUCCESS )\r
1310         {\r
1311                 cl_qlist_remove_item( &p_spl_qp_svc->send_queue, &p_mad_wr->list_item );\r
1312 \r
1313                 /* Reset directed route SMPs as required by IBA. */\r
1314                 if( p_smp->mgmt_class == IB_MCLASS_SUBN_DIR )\r
1315                 {\r
1316                         if( ib_smp_is_response( p_smp ) )\r
1317                         {\r
1318                                 if( p_smp->dr_dlid == IB_LID_PERMISSIVE )\r
1319                                         p_smp->hop_ptr++;\r
1320                         }\r
1321                         else if( p_smp->dr_slid == IB_LID_PERMISSIVE )\r
1322                         {\r
1323                                 /* Only update if the hw_agent is not implemented. */\r
1324                                 ci_ca_lock_attr( p_spl_qp_svc->obj.p_ci_ca );\r
1325                                 if( p_spl_qp_svc->obj.p_ci_ca->p_pnp_attr->hw_agents == FALSE )\r
1326                                         p_smp->hop_ptr--;\r
1327                                 ci_ca_unlock_attr( p_spl_qp_svc->obj.p_ci_ca );\r
1328                         }\r
1329                 }\r
1330         }\r
1331 \r
1332         CL_EXIT( AL_DBG_SMI, g_al_dbg_lvl );\r
1333         return status;\r
1334 }\r
1335 \r
1336 \r
1337 /*\r
1338  * Handle a MAD destined for the local CA, using cached data\r
1339  * as much as possible.\r
1340  */\r
1341 static ib_api_status_t\r
1342 local_mad_send(\r
1343         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
1344         IN                              al_mad_wr_t* const                      p_mad_wr )\r
1345 {\r
1346         mad_route_t                             route;\r
1347         ib_api_status_t                 status = IB_SUCCESS;\r
1348 \r
1349         AL_ENTER( AL_DBG_SMI );\r
1350 \r
1351         CL_ASSERT( p_spl_qp_svc );\r
1352         CL_ASSERT( p_mad_wr );\r
1353 \r
1354         /* Determine how to route the MAD. */\r
1355         route = route_mad_send( p_spl_qp_svc, &p_mad_wr->send_wr );\r
1356 \r
1357         /* Check if this MAD should be discarded. */\r
1358         if( is_discard( route ) )\r
1359         {\r
1360                 /* Deliver a "work completion" to the dispatcher. */\r
1361                 __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr,\r
1362                         IB_WCS_LOCAL_OP_ERR );\r
1363                 status = IB_INVALID_SETTING;\r
1364         }\r
1365         else if( is_loopback( route ) )\r
1366         {\r
1367                 /* Loopback local SM to SM "heartbeat" messages. */\r
1368                 status = loopback_mad( p_spl_qp_svc, p_mad_wr );\r
1369         }\r
1370         else\r
1371         {\r
1372                 switch( get_mad_hdr_from_wr( p_mad_wr )->mgmt_class )\r
1373                 {\r
1374                 case IB_MCLASS_SUBN_DIR:\r
1375                 case IB_MCLASS_SUBN_LID:\r
1376                         status = process_subn_mad( p_spl_qp_svc, p_mad_wr );\r
1377                         break;\r
1378 \r
1379                 default:\r
1380                         status = IB_NOT_DONE;\r
1381                 }\r
1382         }\r
1383 \r
1384         if( status == IB_NOT_DONE )\r
1385         {\r
1386                 /* Queue an asynchronous processing item to process the local MAD. */\r
1387                 cl_async_proc_queue( gp_async_proc_mgr, &p_spl_qp_svc->send_async );\r
1388         }\r
1389         else\r
1390         {\r
1391                 /*\r
1392                  * Clear the local MAD pointer to allow processing of other MADs.\r
1393                  * This is done after polling for attribute changes to ensure that\r
1394                  * subsequent MADs pick up any changes performed by this one.\r
1395                  */\r
1396                 cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
1397                 p_spl_qp_svc->local_mad_wr = NULL;\r
1398                 cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
1399 \r
1400                 /* No longer in use by the asynchronous processing thread. */\r
1401                 cl_atomic_dec( &p_spl_qp_svc->in_use_cnt );\r
1402 \r
1403                 /* Special QP operations will resume by unwinding. */\r
1404         }\r
1405 \r
1406         AL_EXIT( AL_DBG_SMI );\r
1407         return IB_SUCCESS;\r
1408 }\r
1409 \r
1410 \r
1411 static ib_api_status_t\r
1412 get_resp_mad(\r
1413         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
1414         IN                              al_mad_wr_t* const                      p_mad_wr,\r
1415                 OUT                     ib_mad_element_t** const        pp_mad_resp )\r
1416 {\r
1417         ib_api_status_t                 status;\r
1418 \r
1419         AL_ENTER( AL_DBG_SMI );\r
1420 \r
1421         CL_ASSERT( p_spl_qp_svc );\r
1422         CL_ASSERT( p_mad_wr );\r
1423         CL_ASSERT( pp_mad_resp );\r
1424 \r
1425         /* Get a MAD element from the pool for the response. */\r
1426         status = ib_get_mad( p_spl_qp_svc->h_qp->obj.p_ci_ca->pool_key,\r
1427                 MAD_BLOCK_SIZE, pp_mad_resp );\r
1428         if( status != IB_SUCCESS )\r
1429         {\r
1430                 __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr,\r
1431                         IB_WCS_LOCAL_OP_ERR );\r
1432         }\r
1433 \r
1434         AL_EXIT( AL_DBG_SMI );\r
1435         return status;\r
1436 }\r
1437 \r
1438 \r
1439 static ib_api_status_t\r
1440 complete_local_mad(\r
1441         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
1442         IN                              al_mad_wr_t* const                      p_mad_wr,\r
1443         IN                              ib_mad_element_t* const         p_mad_resp )\r
1444 {\r
1445         ib_api_status_t                 status;\r
1446 \r
1447         AL_ENTER( AL_DBG_SMI );\r
1448 \r
1449         CL_ASSERT( p_spl_qp_svc );\r
1450         CL_ASSERT( p_mad_wr );\r
1451         CL_ASSERT( p_mad_resp );\r
1452 \r
1453         /* Construct the receive MAD element. */\r
1454         p_mad_resp->status              = IB_WCS_SUCCESS;\r
1455         p_mad_resp->remote_qp   = p_mad_wr->send_wr.dgrm.ud.remote_qp;\r
1456         p_mad_resp->remote_lid  = p_spl_qp_svc->base_lid;\r
1457         if( p_mad_wr->send_wr.send_opt & IB_RECV_OPT_IMMEDIATE )\r
1458         {\r
1459                 p_mad_resp->immediate_data = p_mad_wr->send_wr.immediate_data;\r
1460                 p_mad_resp->recv_opt |= IB_RECV_OPT_IMMEDIATE;\r
1461         }\r
1462 \r
1463         /*\r
1464          * Hand the receive MAD element to the dispatcher before completing\r
1465          * the send.  This guarantees that the send request cannot time out.\r
1466          */\r
1467         status = mad_disp_recv_done( p_spl_qp_svc->h_mad_disp, p_mad_resp );\r
1468 \r
1469         /* Forward the send work completion to the dispatcher. */\r
1470         __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr, IB_WCS_SUCCESS );\r
1471 \r
1472         AL_EXIT( AL_DBG_SMI );\r
1473         return status;\r
1474 }\r
1475 \r
1476 \r
1477 static ib_api_status_t\r
1478 loopback_mad(\r
1479         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
1480         IN                              al_mad_wr_t* const                      p_mad_wr )\r
1481 {\r
1482         ib_mad_t                                *p_mad;\r
1483         ib_mad_element_t                *p_mad_resp;\r
1484         ib_api_status_t                 status;\r
1485 \r
1486         AL_ENTER( AL_DBG_SMI );\r
1487 \r
1488         CL_ASSERT( p_spl_qp_svc );\r
1489         CL_ASSERT( p_mad_wr );\r
1490 \r
1491         /* Get a MAD element from the pool for the response. */\r
1492         status = get_resp_mad( p_spl_qp_svc, p_mad_wr, &p_mad_resp );\r
1493         if( status == IB_SUCCESS )\r
1494         {\r
1495                 /* Initialize a pointers to the MAD work request and outbound MAD. */\r
1496                 p_mad = get_mad_hdr_from_wr( p_mad_wr );\r
1497 \r
1498                 /* Simulate a send/receive between local managers. */\r
1499                 cl_memcpy( p_mad_resp->p_mad_buf, p_mad, MAD_BLOCK_SIZE );\r
1500 \r
1501                 status = complete_local_mad( p_spl_qp_svc, p_mad_wr, p_mad_resp );\r
1502         }\r
1503 \r
1504         AL_EXIT( AL_DBG_SMI );\r
1505         return status;\r
1506 }\r
1507 \r
1508 \r
1509 static ib_api_status_t\r
1510 process_node_info(\r
1511         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
1512         IN                              al_mad_wr_t* const                      p_mad_wr )\r
1513 {\r
1514         ib_mad_t                                *p_mad;\r
1515         ib_mad_element_t                *p_mad_resp;\r
1516         ib_smp_t                                *p_smp;\r
1517         ib_node_info_t                  *p_node_info;\r
1518         ib_ca_attr_t                    *p_ca_attr;\r
1519         ib_port_attr_t                  *p_port_attr;\r
1520         ib_api_status_t                 status;\r
1521 \r
1522         AL_ENTER( AL_DBG_SMI );\r
1523 \r
1524         CL_ASSERT( p_spl_qp_svc );\r
1525         CL_ASSERT( p_mad_wr );\r
1526 \r
1527         /* Initialize a pointers to the MAD work request and outbound MAD. */\r
1528         p_mad = get_mad_hdr_from_wr( p_mad_wr );\r
1529         if( p_mad->method != IB_MAD_METHOD_GET )\r
1530         {\r
1531                 /* Node description is a GET-only attribute. */\r
1532                 __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr,\r
1533                         IB_WCS_LOCAL_OP_ERR );\r
1534                 AL_EXIT( AL_DBG_SMI );\r
1535                 return IB_INVALID_SETTING;\r
1536         }\r
1537 \r
1538         /* Get a MAD element from the pool for the response. */\r
1539         status = get_resp_mad( p_spl_qp_svc, p_mad_wr, &p_mad_resp );\r
1540         if( status == IB_SUCCESS )\r
1541         {\r
1542                 p_smp = (ib_smp_t*)p_mad_resp->p_mad_buf;\r
1543                 cl_memcpy( p_smp, p_mad, MAD_BLOCK_SIZE );\r
1544                 p_smp->method |= IB_MAD_METHOD_RESP_MASK;\r
1545                 if( p_smp->mgmt_class == IB_MCLASS_SUBN_DIR )\r
1546                         p_smp->status = IB_SMP_DIRECTION;\r
1547                 else\r
1548                         p_smp->status = 0;\r
1549 \r
1550                 p_node_info = (ib_node_info_t*)ib_smp_get_payload_ptr( p_smp );\r
1551 \r
1552                 /*\r
1553                  * Fill in the node info, protecting against the\r
1554                  * attributes being changed by PnP.\r
1555                  */\r
1556                 cl_spinlock_acquire( &p_spl_qp_svc->obj.p_ci_ca->obj.lock );\r
1557 \r
1558                 p_ca_attr = p_spl_qp_svc->obj.p_ci_ca->p_pnp_attr;\r
1559                 p_port_attr = &p_ca_attr->p_port_attr[p_spl_qp_svc->port_num - 1];\r
1560 \r
1561                 p_node_info->base_version = 1;\r
1562                 p_node_info->class_version = 1;\r
1563                 p_node_info->node_type = IB_NODE_TYPE_CA;\r
1564                 p_node_info->num_ports = p_ca_attr->num_ports;\r
1565                 /* TODO: Get some unique identifier for the system */\r
1566                 p_node_info->sys_guid = p_ca_attr->ca_guid;\r
1567                 p_node_info->node_guid = p_ca_attr->ca_guid;\r
1568                 p_node_info->port_guid = p_port_attr->port_guid;\r
1569                 p_node_info->partition_cap = cl_hton16( p_port_attr->num_pkeys );\r
1570                 p_node_info->device_id = cl_hton16( p_ca_attr->dev_id );\r
1571                 p_node_info->revision = cl_hton32( p_ca_attr->revision );\r
1572                 p_node_info->port_num_vendor_id =\r
1573                         cl_hton32( p_ca_attr->vend_id & 0x00FFFFFF ) | p_port_attr->port_num;\r
1574                 cl_spinlock_release( &p_spl_qp_svc->obj.p_ci_ca->obj.lock );\r
1575 \r
1576                 status = complete_local_mad( p_spl_qp_svc, p_mad_wr, p_mad_resp );\r
1577         }\r
1578 \r
1579         CL_EXIT( AL_DBG_SMI, g_al_dbg_lvl );\r
1580         return status;\r
1581 }\r
1582 \r
1583 \r
1584 static ib_api_status_t\r
1585 process_node_desc(\r
1586         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
1587         IN                              al_mad_wr_t* const                      p_mad_wr )\r
1588 {\r
1589         ib_mad_t                                *p_mad;\r
1590         ib_mad_element_t                *p_mad_resp;\r
1591         ib_api_status_t                 status;\r
1592 \r
1593         AL_ENTER( AL_DBG_SMI );\r
1594 \r
1595         CL_ASSERT( p_spl_qp_svc );\r
1596         CL_ASSERT( p_mad_wr );\r
1597 \r
1598         /* Initialize a pointers to the MAD work request and outbound MAD. */\r
1599         p_mad = get_mad_hdr_from_wr( p_mad_wr );\r
1600         if( p_mad->method != IB_MAD_METHOD_GET )\r
1601         {\r
1602                 /* Node info is a GET-only attribute. */\r
1603                 __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr,\r
1604                         IB_WCS_LOCAL_OP_ERR );\r
1605                 AL_EXIT( AL_DBG_SMI );\r
1606                 return IB_INVALID_SETTING;\r
1607         }\r
1608 \r
1609         /* Get a MAD element from the pool for the response. */\r
1610         status = get_resp_mad( p_spl_qp_svc, p_mad_wr, &p_mad_resp );\r
1611         if( status == IB_SUCCESS )\r
1612         {\r
1613                 cl_memcpy( p_mad_resp->p_mad_buf, p_mad, MAD_BLOCK_SIZE );\r
1614                 p_mad_resp->p_mad_buf->method |= IB_MAD_METHOD_RESP_MASK;\r
1615                 if( p_mad_resp->p_mad_buf->mgmt_class == IB_MCLASS_SUBN_DIR )\r
1616                         p_mad_resp->p_mad_buf->status = IB_SMP_DIRECTION;\r
1617                 else\r
1618                         p_mad_resp->p_mad_buf->status = 0;\r
1619                 /* Set the node description to the machine name. */\r
1620                 cl_memcpy( ((ib_smp_t*)p_mad_resp->p_mad_buf)->data, \r
1621                         node_desc, sizeof(node_desc) );\r
1622 \r
1623                 status = complete_local_mad( p_spl_qp_svc, p_mad_wr, p_mad_resp );\r
1624         }\r
1625 \r
1626         CL_EXIT( AL_DBG_SMI, g_al_dbg_lvl );\r
1627         return status;\r
1628 }\r
1629 \r
1630 \r
1631 /*\r
1632  * Process subnet administration MADs using cached data if possible.\r
1633  */\r
1634 static ib_api_status_t\r
1635 process_subn_mad(\r
1636         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
1637         IN                              al_mad_wr_t* const                      p_mad_wr )\r
1638 {\r
1639         ib_api_status_t         status;\r
1640         ib_smp_t                        *p_smp;\r
1641 \r
1642         AL_ENTER( AL_DBG_SMI );\r
1643 \r
1644         CL_ASSERT( p_spl_qp_svc );\r
1645         CL_ASSERT( p_mad_wr );\r
1646 \r
1647         p_smp = (ib_smp_t*)get_mad_hdr_from_wr( p_mad_wr );\r
1648 \r
1649         CL_ASSERT( p_smp->mgmt_class == IB_MCLASS_SUBN_DIR ||\r
1650                 p_smp->mgmt_class == IB_MCLASS_SUBN_LID );\r
1651 \r
1652         switch( p_smp->attr_id )\r
1653         {\r
1654         case IB_MAD_ATTR_NODE_INFO:\r
1655                 status = process_node_info( p_spl_qp_svc, p_mad_wr );\r
1656                 break;\r
1657 \r
1658         case IB_MAD_ATTR_NODE_DESC:\r
1659                 status = process_node_desc( p_spl_qp_svc, p_mad_wr );\r
1660                 break;\r
1661 \r
1662         default:\r
1663                 status = IB_NOT_DONE;\r
1664                 break;\r
1665         }\r
1666 \r
1667         AL_EXIT( AL_DBG_SMI );\r
1668         return status;\r
1669 }\r
1670 \r
1671 \r
1672 /*\r
1673  * Process a local MAD send work request.\r
1674  */\r
1675 ib_api_status_t\r
1676 fwd_local_mad(\r
1677         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
1678         IN                              al_mad_wr_t* const                      p_mad_wr )\r
1679 {\r
1680         ib_mad_t*                               p_mad;\r
1681         ib_smp_t*                               p_smp;\r
1682         al_mad_send_t*                  p_mad_send;\r
1683         ib_mad_element_t*               p_mad_response;\r
1684         ib_mad_t*                               p_mad_response_buf;\r
1685         ib_api_status_t                 status = IB_SUCCESS;\r
1686         boolean_t                               smp_is_set;\r
1687 \r
1688         CL_ENTER( AL_DBG_SMI, g_al_dbg_lvl );\r
1689 \r
1690         CL_ASSERT( p_spl_qp_svc );\r
1691         CL_ASSERT( p_mad_wr );\r
1692 \r
1693         /* Initialize a pointers to the MAD work request and outbound MAD. */\r
1694         p_mad = get_mad_hdr_from_wr( p_mad_wr );\r
1695         p_smp = (ib_smp_t*)p_mad;\r
1696 \r
1697         smp_is_set = (p_smp->method == IB_MAD_METHOD_SET);\r
1698 \r
1699         /* Get a MAD element from the pool for the response. */\r
1700         p_mad_send = PARENT_STRUCT( p_mad_wr, al_mad_send_t, mad_wr );\r
1701 //*** Commented code to work-around ib_local_mad() requiring a response MAD\r
1702 //*** as input.  Remove comments once the ib_local_mad() implementation allows\r
1703 //*** for a NULL response MAD, when one is not expected.\r
1704 //*** Note that an attempt to route an invalid response MAD in this case\r
1705 //*** will fail harmlessly.\r
1706 //***   if( p_mad_send->p_send_mad->resp_expected )\r
1707 //***   {\r
1708                 status = get_resp_mad( p_spl_qp_svc, p_mad_wr, &p_mad_response );\r
1709                 if( status != IB_SUCCESS )\r
1710                 {\r
1711                         CL_EXIT( AL_DBG_SMI, g_al_dbg_lvl );\r
1712                         return status;\r
1713                 }\r
1714                 p_mad_response_buf = p_mad_response->p_mad_buf;\r
1715 //***   }\r
1716 //***   else\r
1717 //***   {\r
1718 //***           p_mad_response_buf = NULL;\r
1719 //***   }\r
1720 \r
1721         /* Adjust directed route SMPs as required by IBA. */\r
1722         if( p_mad->mgmt_class == IB_MCLASS_SUBN_DIR )\r
1723         {\r
1724                 CL_ASSERT( !ib_smp_is_response( p_smp ) );\r
1725 \r
1726                 /*\r
1727                  * If this was a self addressed, directed route SMP, increment\r
1728                  * the hop pointer in the request before delivery as required\r
1729                  * by IBA.  Otherwise, adjustment for remote requests occurs\r
1730                  * during inbound processing.\r
1731                  */\r
1732                 if( p_smp->hop_count == 0 )\r
1733                         p_smp->hop_ptr++;\r
1734         }\r
1735 \r
1736         /* Forward the locally addressed MAD to the CA interface. */\r
1737         status = ib_local_mad( p_spl_qp_svc->h_qp->obj.p_ci_ca->h_ca,\r
1738                 p_spl_qp_svc->port_num, p_mad, p_mad_response_buf );\r
1739 \r
1740         /* Reset directed route SMPs as required by IBA. */\r
1741         if( p_mad->mgmt_class == IB_MCLASS_SUBN_DIR )\r
1742         {\r
1743                 /*\r
1744                  * If this was a self addressed, directed route SMP, decrement\r
1745                  * the hop pointer in the response before delivery as required\r
1746                  * by IBA.  Otherwise, adjustment for remote responses occurs\r
1747                  * during outbound processing.\r
1748                  */\r
1749                 if( p_smp->hop_count == 0 )\r
1750                 {\r
1751                         /* Adjust the request SMP. */\r
1752                         p_smp->hop_ptr--;\r
1753 \r
1754                         /* Adjust the response SMP. */\r
1755                         if( p_mad_response_buf )\r
1756                         {\r
1757                                 p_smp = (ib_smp_t*)p_mad_response_buf;\r
1758                                 p_smp->hop_ptr--;\r
1759                         }\r
1760                 }\r
1761         }\r
1762 \r
1763         if( status != IB_SUCCESS )\r
1764         {\r
1765                 if( p_mad_response )\r
1766                         ib_put_mad( p_mad_response );\r
1767 \r
1768                 __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr,\r
1769                         IB_WCS_LOCAL_OP_ERR );\r
1770                 CL_EXIT( AL_DBG_SMI, g_al_dbg_lvl );\r
1771                 return status;\r
1772         }\r
1773 \r
1774         /* Check the completion status of this simulated send. */\r
1775         if( p_mad_response_buf )\r
1776         {\r
1777                 /*\r
1778                  * The SMI is uses PnP polling to refresh the base_lid and lmc.\r
1779                  * Polling takes time, so we update the values here to prevent\r
1780                  * the failure of LID routed MADs sent immediately following this\r
1781                  * assignment.  Check the response to see if the port info was set.\r
1782                  */\r
1783                 if( smp_is_set )\r
1784                 {\r
1785                         ib_port_info_t*         p_port_info = NULL;\r
1786 \r
1787                         switch( p_mad_response_buf->mgmt_class )\r
1788                         {\r
1789                         case IB_MCLASS_SUBN_DIR:\r
1790                                 if( ( p_smp->attr_id == IB_MAD_ATTR_PORT_INFO ) &&\r
1791                                         ( ib_smp_get_status( p_smp ) == IB_SA_MAD_STATUS_SUCCESS ) )\r
1792                                 {\r
1793                                         p_port_info =\r
1794                                                 (ib_port_info_t*)ib_smp_get_payload_ptr( p_smp );\r
1795                                 }\r
1796                                 break;\r
1797 \r
1798                         case IB_MCLASS_SUBN_LID:\r
1799                                 if( ( p_mad_response_buf->attr_id == IB_MAD_ATTR_PORT_INFO ) &&\r
1800                                         ( p_mad_response_buf->status == IB_SA_MAD_STATUS_SUCCESS ) )\r
1801                                 {\r
1802                                         p_port_info =\r
1803                                                 (ib_port_info_t*)( p_mad_response_buf + 1 );\r
1804                                 }\r
1805                                 break;\r
1806 \r
1807                         default:\r
1808                                 break;\r
1809                         }\r
1810 \r
1811                         if( p_port_info )\r
1812                         {\r
1813                                 p_spl_qp_svc->base_lid = p_port_info->base_lid;\r
1814                                 p_spl_qp_svc->lmc = ib_port_info_get_lmc( p_port_info );\r
1815                                 if (p_port_info->subnet_timeout & 0x80)\r
1816                                 {\r
1817                                         AL_TRACE(AL_DBG_PNP,\r
1818                                                 ("Client reregister event, setting sm_lid to 0.\n"));\r
1819                                         ci_ca_lock_attr(p_spl_qp_svc->obj.p_ci_ca);\r
1820                                         p_spl_qp_svc->obj.p_ci_ca->p_pnp_attr->\r
1821                                                 p_port_attr->sm_lid= 0;\r
1822                                         ci_ca_unlock_attr(p_spl_qp_svc->obj.p_ci_ca);\r
1823                                 }\r
1824                         }\r
1825                 }\r
1826         }\r
1827 \r
1828         status = complete_local_mad( p_spl_qp_svc, p_mad_wr, p_mad_response );\r
1829 \r
1830         /* If the SMP was a Get, no need to trigger a PnP poll. */\r
1831         if( status == IB_SUCCESS && !smp_is_set )\r
1832                 status = IB_NOT_DONE;\r
1833 \r
1834         CL_EXIT( AL_DBG_SMI, g_al_dbg_lvl );\r
1835         return status;\r
1836 }\r
1837 \r
1838 \r
1839 \r
1840 /*\r
1841  * Asynchronous processing thread callback to send a local MAD.\r
1842  */\r
1843 void\r
1844 send_local_mad_cb(\r
1845         IN                              cl_async_proc_item_t*           p_item )\r
1846 {\r
1847         spl_qp_svc_t*                   p_spl_qp_svc;\r
1848         ib_api_status_t                 status;\r
1849 \r
1850         CL_ENTER( AL_DBG_SMI_CB, g_al_dbg_lvl );\r
1851 \r
1852         CL_ASSERT( p_item );\r
1853         p_spl_qp_svc = PARENT_STRUCT( p_item, spl_qp_svc_t, send_async );\r
1854 \r
1855         /* Process a local MAD send work request. */\r
1856         CL_ASSERT( p_spl_qp_svc->local_mad_wr );\r
1857         status = fwd_local_mad( p_spl_qp_svc, p_spl_qp_svc->local_mad_wr );\r
1858 \r
1859         /*\r
1860          * If we successfully processed a local MAD, which could have changed\r
1861          * something (e.g. the LID) on the HCA.  Scan for changes.\r
1862          */\r
1863         if( status == IB_SUCCESS )\r
1864                 pnp_poll();\r
1865 \r
1866         /*\r
1867          * Clear the local MAD pointer to allow processing of other MADs.\r
1868          * This is done after polling for attribute changes to ensure that\r
1869          * subsequent MADs pick up any changes performed by this one.\r
1870          */\r
1871         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
1872         p_spl_qp_svc->local_mad_wr = NULL;\r
1873         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
1874 \r
1875         /* Continue processing any queued MADs on the QP. */\r
1876         special_qp_resume_sends( p_spl_qp_svc->h_qp );\r
1877 \r
1878         /* No longer in use by the asynchronous processing thread. */\r
1879         cl_atomic_dec( &p_spl_qp_svc->in_use_cnt );\r
1880 \r
1881         CL_EXIT( AL_DBG_SMI, g_al_dbg_lvl );\r
1882 }\r
1883 \r
1884 \r
1885 \r
1886 /*\r
1887  * Special QP send completion callback.\r
1888  */\r
1889 void\r
1890 spl_qp_send_comp_cb(\r
1891         IN              const   ib_cq_handle_t                          h_cq,\r
1892         IN                              void*                                           cq_context )\r
1893 {\r
1894         spl_qp_svc_t*                   p_spl_qp_svc;\r
1895 \r
1896         CL_ENTER( AL_DBG_SMI_CB, g_al_dbg_lvl );\r
1897 \r
1898         CL_ASSERT( cq_context );\r
1899         p_spl_qp_svc = cq_context;\r
1900 \r
1901 #if defined( CL_USE_MUTEX )\r
1902 \r
1903         /* Queue an asynchronous processing item to process sends. */\r
1904         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
1905         if( !p_spl_qp_svc->send_async_queued )\r
1906         {\r
1907                 p_spl_qp_svc->send_async_queued = TRUE;\r
1908                 ref_al_obj( &p_spl_qp_svc->obj );\r
1909                 cl_async_proc_queue( gp_async_proc_mgr, &p_spl_qp_svc->send_async_cb );\r
1910         }\r
1911         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
1912 \r
1913 #else\r
1914 \r
1915         /* Invoke the callback directly. */\r
1916         CL_ASSERT( h_cq == p_spl_qp_svc->h_send_cq );\r
1917         spl_qp_comp( p_spl_qp_svc, h_cq, IB_WC_SEND );\r
1918 \r
1919         /* Continue processing any queued MADs on the QP. */\r
1920         special_qp_resume_sends( p_spl_qp_svc->h_qp );\r
1921 \r
1922 #endif\r
1923 \r
1924         CL_EXIT( AL_DBG_SMI, g_al_dbg_lvl );\r
1925 }\r
1926 \r
1927 \r
1928 \r
1929 #if defined( CL_USE_MUTEX )\r
1930 void\r
1931 spl_qp_send_async_cb(\r
1932         IN                              cl_async_proc_item_t*           p_item )\r
1933 {\r
1934         spl_qp_svc_t*                   p_spl_qp_svc;\r
1935         ib_api_status_t                 status;\r
1936 \r
1937         CL_ENTER( AL_DBG_SMI_CB, g_al_dbg_lvl );\r
1938 \r
1939         CL_ASSERT( p_item );\r
1940         p_spl_qp_svc = PARENT_STRUCT( p_item, spl_qp_svc_t, send_async_cb );\r
1941 \r
1942         /* Reset asynchronous queue flag. */\r
1943         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
1944         p_spl_qp_svc->send_async_queued = FALSE;\r
1945         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
1946 \r
1947         spl_qp_comp( p_spl_qp_svc, p_spl_qp_svc->h_send_cq, IB_WC_SEND );\r
1948 \r
1949         /* Continue processing any queued MADs on the QP. */\r
1950         status = special_qp_resume_sends( p_spl_qp_svc->h_qp );\r
1951         CL_ASSERT( status == IB_SUCCESS );\r
1952 \r
1953         deref_al_obj( &p_spl_qp_svc->obj );\r
1954 \r
1955         CL_EXIT( AL_DBG_SMI, g_al_dbg_lvl );\r
1956 }\r
1957 #endif\r
1958 \r
1959 \r
1960 \r
1961 /*\r
1962  * Special QP receive completion callback.\r
1963  */\r
1964 void\r
1965 spl_qp_recv_comp_cb(\r
1966         IN              const   ib_cq_handle_t                          h_cq,\r
1967         IN                              void*                                           cq_context )\r
1968 {\r
1969         spl_qp_svc_t*                   p_spl_qp_svc;\r
1970 \r
1971         CL_ENTER( AL_DBG_SMI, g_al_dbg_lvl );\r
1972 \r
1973         CL_ASSERT( cq_context );\r
1974         p_spl_qp_svc = cq_context;\r
1975 \r
1976 #if defined( CL_USE_MUTEX )\r
1977 \r
1978         /* Queue an asynchronous processing item to process receives. */\r
1979         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
1980         if( !p_spl_qp_svc->recv_async_queued )\r
1981         {\r
1982                 p_spl_qp_svc->recv_async_queued = TRUE;\r
1983                 ref_al_obj( &p_spl_qp_svc->obj );\r
1984                 cl_async_proc_queue( gp_async_proc_mgr, &p_spl_qp_svc->recv_async_cb );\r
1985         }\r
1986         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
1987 \r
1988 #else\r
1989 \r
1990         CL_ASSERT( h_cq == p_spl_qp_svc->h_recv_cq );\r
1991         spl_qp_comp( p_spl_qp_svc, h_cq, IB_WC_RECV );\r
1992 \r
1993 #endif\r
1994 \r
1995         CL_EXIT( AL_DBG_SMI, g_al_dbg_lvl );\r
1996 }\r
1997 \r
1998 \r
1999 \r
2000 #if defined( CL_USE_MUTEX )\r
2001 void\r
2002 spl_qp_recv_async_cb(\r
2003         IN                              cl_async_proc_item_t*           p_item )\r
2004 {\r
2005         spl_qp_svc_t*                   p_spl_qp_svc;\r
2006 \r
2007         CL_ENTER( AL_DBG_SMI, g_al_dbg_lvl );\r
2008 \r
2009         CL_ASSERT( p_item );\r
2010         p_spl_qp_svc = PARENT_STRUCT( p_item, spl_qp_svc_t, recv_async_cb );\r
2011 \r
2012         /* Reset asynchronous queue flag. */\r
2013         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
2014         p_spl_qp_svc->recv_async_queued = FALSE;\r
2015         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2016 \r
2017         spl_qp_comp( p_spl_qp_svc, p_spl_qp_svc->h_recv_cq, IB_WC_RECV );\r
2018 \r
2019         deref_al_obj( &p_spl_qp_svc->obj );\r
2020 \r
2021         CL_EXIT( AL_DBG_SMI, g_al_dbg_lvl );\r
2022 }\r
2023 #endif\r
2024 \r
2025 \r
2026 \r
2027 /*\r
2028  * Special QP completion handler.\r
2029  */\r
2030 void\r
2031 spl_qp_comp(\r
2032         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
2033         IN              const   ib_cq_handle_t                          h_cq,\r
2034         IN                              ib_wc_type_t                            wc_type )\r
2035 {\r
2036         ib_wc_t                                 wc;\r
2037         ib_wc_t*                                p_free_wc = &wc;\r
2038         ib_wc_t*                                p_done_wc;\r
2039         al_mad_wr_t*                    p_mad_wr;\r
2040         al_mad_element_t*               p_al_mad;\r
2041         ib_mad_element_t*               p_mad_element;\r
2042         ib_smp_t*                               p_smp;\r
2043         ib_api_status_t                 status;\r
2044 \r
2045         CL_ENTER( AL_DBG_SMI_CB, g_al_dbg_lvl );\r
2046 \r
2047         CL_ASSERT( p_spl_qp_svc );\r
2048         CL_ASSERT( h_cq );\r
2049 \r
2050         /* Check the QP state and guard against error handling. */\r
2051         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
2052         if( p_spl_qp_svc->state != SPL_QP_ACTIVE )\r
2053         {\r
2054                 cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2055                 return;\r
2056         }\r
2057         cl_atomic_inc( &p_spl_qp_svc->in_use_cnt );\r
2058         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2059 \r
2060         wc.p_next = NULL;\r
2061         /* Process work completions. */\r
2062         while( ib_poll_cq( h_cq, &p_free_wc, &p_done_wc ) == IB_SUCCESS )\r
2063         {\r
2064                 /* Process completions one at a time. */\r
2065                 CL_ASSERT( p_done_wc );\r
2066 \r
2067                 /* Flushed completions are handled elsewhere. */\r
2068                 if( wc.status == IB_WCS_WR_FLUSHED_ERR )\r
2069                 {\r
2070                         p_free_wc = &wc;\r
2071                         continue;\r
2072                 }\r
2073 \r
2074                 /*\r
2075                  * Process the work completion.  Per IBA specification, the\r
2076                  * wc.wc_type is undefined if wc.status is not IB_WCS_SUCCESS.\r
2077                  * Use the wc_type parameter.\r
2078                  */\r
2079                 switch( wc_type )\r
2080                 {\r
2081                 case IB_WC_SEND:\r
2082                         /* Get a pointer to the MAD work request. */\r
2083                         p_mad_wr = (al_mad_wr_t*)((uintn_t)wc.wr_id);\r
2084 \r
2085                         /* Remove the MAD work request from the service tracking queue. */\r
2086                         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
2087                         cl_qlist_remove_item( &p_spl_qp_svc->send_queue,\r
2088                                 &p_mad_wr->list_item );\r
2089                         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2090 \r
2091                         /* Reset directed route SMPs as required by IBA. */\r
2092                         p_smp = (ib_smp_t*)get_mad_hdr_from_wr( p_mad_wr );\r
2093                         if( p_smp->mgmt_class == IB_MCLASS_SUBN_DIR )\r
2094                         {\r
2095                                 if( ib_smp_is_response( p_smp ) )\r
2096                                         p_smp->hop_ptr++;\r
2097                                 else\r
2098                                         p_smp->hop_ptr--;\r
2099                         }\r
2100 \r
2101                         /* Report the send completion to the dispatcher. */\r
2102                         mad_disp_send_done( p_spl_qp_svc->h_mad_disp, p_mad_wr, &wc );\r
2103                         break;\r
2104 \r
2105                 case IB_WC_RECV:\r
2106 \r
2107                         /* Initialize pointers to the MAD element. */\r
2108                         p_al_mad = (al_mad_element_t*)((uintn_t)wc.wr_id);\r
2109                         p_mad_element = &p_al_mad->element;\r
2110 \r
2111                         /* Remove the AL MAD element from the service tracking list. */\r
2112                         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
2113 \r
2114                         cl_qlist_remove_item( &p_spl_qp_svc->recv_queue,\r
2115                                 &p_al_mad->list_item );\r
2116 \r
2117                         /* Replenish the receive buffer. */\r
2118                         spl_qp_svc_post_recvs( p_spl_qp_svc );\r
2119                         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2120 \r
2121                         /* Construct the MAD element from the receive work completion. */\r
2122                         build_mad_recv( p_mad_element, &wc );\r
2123 \r
2124                         /* Process the received MAD. */\r
2125                         status = process_mad_recv( p_spl_qp_svc, p_mad_element );\r
2126 \r
2127                         /* Discard this MAD on error. */\r
2128                         if( status != IB_SUCCESS )\r
2129                         {\r
2130                                 status = ib_put_mad( p_mad_element );\r
2131                                 CL_ASSERT( status == IB_SUCCESS );\r
2132                         }\r
2133                         break;\r
2134 \r
2135                 default:\r
2136                         CL_ASSERT( wc_type == IB_WC_SEND || wc_type == IB_WC_RECV );\r
2137                         break;\r
2138                 }\r
2139 \r
2140                 if( wc.status != IB_WCS_SUCCESS )\r
2141                 {\r
2142                         CL_TRACE( CL_DBG_ERROR, g_al_dbg_lvl,\r
2143                                 ("special QP completion error: %s! internal syndrome 0x%x\n",\r
2144                                 ib_get_wc_status_str( wc.status ), wc.vendor_specific) );\r
2145 \r
2146                         /* Reset the special QP service and return. */\r
2147                         spl_qp_svc_reset( p_spl_qp_svc );\r
2148                 }\r
2149                 p_free_wc = &wc;\r
2150         }\r
2151 \r
2152         /* Rearm the CQ. */\r
2153         status = ib_rearm_cq( h_cq, FALSE );\r
2154         CL_ASSERT( status == IB_SUCCESS );\r
2155 \r
2156         cl_atomic_dec( &p_spl_qp_svc->in_use_cnt );\r
2157         CL_EXIT( AL_DBG_SMI_CB, g_al_dbg_lvl );\r
2158 }\r
2159 \r
2160 \r
2161 \r
2162 /*\r
2163  * Process a received MAD.\r
2164  */\r
2165 ib_api_status_t\r
2166 process_mad_recv(\r
2167         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
2168         IN                              ib_mad_element_t*                       p_mad_element )\r
2169 {\r
2170         ib_smp_t*                               p_smp;\r
2171         mad_route_t                             route;\r
2172         ib_api_status_t                 status;\r
2173 \r
2174         CL_ENTER( AL_DBG_SMI, g_al_dbg_lvl );\r
2175 \r
2176         CL_ASSERT( p_spl_qp_svc );\r
2177         CL_ASSERT( p_mad_element );\r
2178 \r
2179         /*\r
2180          * If the CA has a HW agent then this MAD should have been\r
2181          * consumed below verbs.  The fact that it was received here\r
2182          * indicates that it should be forwarded to the dispatcher\r
2183          * for delivery to a class manager.  Otherwise, determine how\r
2184          * the MAD should be routed.\r
2185          */\r
2186         route = ROUTE_DISPATCHER;\r
2187         if( check_local_mad( p_spl_qp_svc->h_qp ) )\r
2188         {\r
2189                 /*\r
2190                  * SMP and GMP processing is branched here to handle overlaps\r
2191                  * between class methods and attributes.\r
2192                  */\r
2193                 switch( p_mad_element->p_mad_buf->mgmt_class )\r
2194                 {\r
2195                 case IB_MCLASS_SUBN_DIR:\r
2196                         /* Perform special checks on directed route SMPs. */\r
2197                         p_smp = (ib_smp_t*)p_mad_element->p_mad_buf;\r
2198 \r
2199                         if( ( p_smp->hop_count >= IB_SUBNET_PATH_HOPS_MAX ) ||\r
2200                                 ( p_smp->hop_ptr >= IB_SUBNET_PATH_HOPS_MAX ) )\r
2201                         {\r
2202                                 route = ROUTE_DISCARD;\r
2203                         }\r
2204                         else if( ib_smp_is_response( p_smp ) )\r
2205                         {\r
2206                                 /*\r
2207                                  * This node is the destination of the response.  Discard\r
2208                                  * the source LID or hop pointer are incorrect.\r
2209                                  */\r
2210                                 if( p_smp->dr_slid == IB_LID_PERMISSIVE )\r
2211                                 {\r
2212                                         if( p_smp->hop_ptr == 1 )\r
2213                                         {\r
2214                                                 p_smp->hop_ptr--;               /* Adjust ptr per IBA spec. */\r
2215                                         }\r
2216                                         else\r
2217                                         {\r
2218                                                 route = ROUTE_DISCARD;\r
2219                                         }\r
2220                                 }\r
2221                                 else if( ( p_smp->dr_slid <  p_spl_qp_svc->base_lid ) ||\r
2222                                                  ( p_smp->dr_slid >= p_spl_qp_svc->base_lid +\r
2223                                                         ( 1 << p_spl_qp_svc->lmc ) ) )\r
2224                                 {\r
2225                                                 route = ROUTE_DISCARD;\r
2226                                 }\r
2227                         }\r
2228                         else\r
2229                         {\r
2230                                 /*\r
2231                                  * This node is the destination of the request.  Discard\r
2232                                  * the destination LID or hop pointer are incorrect.\r
2233                                  */\r
2234                                 if( p_smp->dr_dlid == IB_LID_PERMISSIVE )\r
2235                                 {\r
2236                                         if( p_smp->hop_count == p_smp->hop_ptr )\r
2237                                         {\r
2238                                                 p_smp->return_path[ p_smp->hop_ptr++ ] =\r
2239                                                         p_spl_qp_svc->port_num; /* Set path per IBA spec. */\r
2240                                         }\r
2241                                         else\r
2242                                         {\r
2243                                                 route = ROUTE_DISCARD;\r
2244                                         }\r
2245                                 }\r
2246                                 else if( ( p_smp->dr_dlid <  p_spl_qp_svc->base_lid ) ||\r
2247                                                  ( p_smp->dr_dlid >= p_spl_qp_svc->base_lid +\r
2248                                                         ( 1 << p_spl_qp_svc->lmc ) ) )\r
2249                                 {\r
2250                                                 route = ROUTE_DISCARD;\r
2251                                 }\r
2252                         }\r
2253 \r
2254                         if( route == ROUTE_DISCARD ) break;\r
2255                         /* else fall through next case */\r
2256 \r
2257                 case IB_MCLASS_SUBN_LID:\r
2258                         route = route_recv_smp( p_mad_element );\r
2259                         break;\r
2260 \r
2261                 case IB_MCLASS_PERF:\r
2262                         route = ROUTE_LOCAL;\r
2263                         break;\r
2264 \r
2265                 case IB_MCLASS_BM:\r
2266                         route = route_recv_gmp( p_mad_element );\r
2267                         break;\r
2268 \r
2269                 default:\r
2270                         /* Route vendor specific MADs to the HCA provider. */\r
2271                         if( ib_class_is_vendor_specific(\r
2272                                 p_mad_element->p_mad_buf->mgmt_class ) )\r
2273                         {\r
2274                                 route = route_recv_gmp( p_mad_element );\r
2275                         }\r
2276                         break;\r
2277                 }\r
2278         }\r
2279 \r
2280         /* Route the MAD. */\r
2281         if ( is_discard( route ) )\r
2282                 status = IB_ERROR;\r
2283         else if( is_dispatcher( route ) )\r
2284                 status = mad_disp_recv_done( p_spl_qp_svc->h_mad_disp, p_mad_element );\r
2285         else if( is_remote( route ) )\r
2286                 status = forward_sm_trap( p_spl_qp_svc, p_mad_element );\r
2287         else\r
2288                 status = recv_local_mad( p_spl_qp_svc, p_mad_element );\r
2289 \r
2290         CL_EXIT( AL_DBG_SMI, g_al_dbg_lvl );\r
2291         return status;\r
2292 }\r
2293 \r
2294 \r
2295 \r
2296 /*\r
2297  * Route a received SMP.\r
2298  */\r
2299 mad_route_t\r
2300 route_recv_smp(\r
2301         IN                              ib_mad_element_t*                       p_mad_element )\r
2302 {\r
2303         mad_route_t                             route;\r
2304 \r
2305         CL_ENTER( AL_DBG_SMI, g_al_dbg_lvl );\r
2306 \r
2307         CL_ASSERT( p_mad_element );\r
2308 \r
2309         /* Process the received SMP. */\r
2310         switch( p_mad_element->p_mad_buf->method )\r
2311         {\r
2312         case IB_MAD_METHOD_GET:\r
2313         case IB_MAD_METHOD_SET:\r
2314                 route = route_recv_smp_attr( p_mad_element );\r
2315                 break;\r
2316 \r
2317         case IB_MAD_METHOD_TRAP:\r
2318                 /*\r
2319                  * Special check to route locally generated traps to the remote SM.\r
2320                  * Distinguished from other receives by the p_wc->recv.ud.recv_opt\r
2321                  * IB_RECV_OPT_FORWARD flag.\r
2322                  *\r
2323                  * Note that because forwarded traps use AL MAD services, the upper\r
2324                  * 32-bits of the TID are reserved by the access layer.  When matching\r
2325                  * a Trap Repress MAD, the SMA must only use the lower 32-bits of the\r
2326                  * TID.\r
2327                  */\r
2328                 route = ( p_mad_element->recv_opt & IB_RECV_OPT_FORWARD ) ?\r
2329                         ROUTE_REMOTE : ROUTE_DISPATCHER;\r
2330                 break;\r
2331 \r
2332         case IB_MAD_METHOD_TRAP_REPRESS:\r
2333                 /*\r
2334                  * Note that because forwarded traps use AL MAD services, the upper\r
2335                  * 32-bits of the TID are reserved by the access layer.  When matching\r
2336                  * a Trap Repress MAD, the SMA must only use the lower 32-bits of the\r
2337                  * TID.\r
2338                  */\r
2339                 route = ROUTE_LOCAL;\r
2340                 break;\r
2341 \r
2342         default:\r
2343                 route = ROUTE_DISPATCHER;\r
2344                 break;\r
2345         }\r
2346 \r
2347         CL_EXIT( AL_DBG_SMI, g_al_dbg_lvl );\r
2348         return route;\r
2349 }\r
2350 \r
2351 \r
2352 \r
2353 /*\r
2354  * Route received SMP attributes.\r
2355  */\r
2356 mad_route_t\r
2357 route_recv_smp_attr(\r
2358         IN                              ib_mad_element_t*                       p_mad_element )\r
2359 {\r
2360         mad_route_t                             route;\r
2361 \r
2362         CL_ENTER( AL_DBG_SMI, g_al_dbg_lvl );\r
2363 \r
2364         CL_ASSERT( p_mad_element );\r
2365 \r
2366         /* Process the received SMP attributes. */\r
2367         switch( p_mad_element->p_mad_buf->attr_id )\r
2368         {\r
2369         case IB_MAD_ATTR_NODE_DESC:\r
2370         case IB_MAD_ATTR_NODE_INFO:\r
2371         case IB_MAD_ATTR_GUID_INFO:\r
2372         case IB_MAD_ATTR_PORT_INFO:\r
2373         case IB_MAD_ATTR_P_KEY_TABLE:\r
2374         case IB_MAD_ATTR_SLVL_TABLE:\r
2375         case IB_MAD_ATTR_VL_ARBITRATION:\r
2376         case IB_MAD_ATTR_VENDOR_DIAG:\r
2377         case IB_MAD_ATTR_LED_INFO:\r
2378                 route = ROUTE_LOCAL;\r
2379                 break;\r
2380 \r
2381         default:\r
2382                 route = ROUTE_DISPATCHER;\r
2383                 break;\r
2384         }\r
2385 \r
2386         CL_EXIT( AL_DBG_SMI, g_al_dbg_lvl );\r
2387         return route;\r
2388 }\r
2389 \r
2390 \r
2391 /*\r
2392  * Route a received GMP.\r
2393  */\r
2394 mad_route_t\r
2395 route_recv_gmp(\r
2396         IN                              ib_mad_element_t*                       p_mad_element )\r
2397 {\r
2398         mad_route_t                             route;\r
2399 \r
2400         CL_ENTER( AL_DBG_SMI, g_al_dbg_lvl );\r
2401 \r
2402         CL_ASSERT( p_mad_element );\r
2403 \r
2404         /* Process the received GMP. */\r
2405         switch( p_mad_element->p_mad_buf->method )\r
2406         {\r
2407         case IB_MAD_METHOD_GET:\r
2408         case IB_MAD_METHOD_SET:\r
2409                 /* Route vendor specific MADs to the HCA provider. */\r
2410                 if( ib_class_is_vendor_specific(\r
2411                         p_mad_element->p_mad_buf->mgmt_class ) )\r
2412                 {\r
2413                         route = ROUTE_LOCAL;\r
2414                 }\r
2415                 else\r
2416                 {\r
2417                         route = route_recv_gmp_attr( p_mad_element );\r
2418                 }\r
2419                 break;\r
2420 \r
2421         default:\r
2422                 route = ROUTE_DISPATCHER;\r
2423                 break;\r
2424         }\r
2425 \r
2426         CL_EXIT( AL_DBG_SMI, g_al_dbg_lvl );\r
2427         return route;\r
2428 }\r
2429 \r
2430 \r
2431 \r
2432 /*\r
2433  * Route received GMP attributes.\r
2434  */\r
2435 mad_route_t\r
2436 route_recv_gmp_attr(\r
2437         IN                              ib_mad_element_t*                       p_mad_element )\r
2438 {\r
2439         mad_route_t                             route;\r
2440 \r
2441         CL_ENTER( AL_DBG_SMI, g_al_dbg_lvl );\r
2442 \r
2443         CL_ASSERT( p_mad_element );\r
2444 \r
2445         /* Process the received GMP attributes. */\r
2446         if( p_mad_element->p_mad_buf->attr_id == IB_MAD_ATTR_CLASS_PORT_INFO )\r
2447                 route = ROUTE_LOCAL;\r
2448         else\r
2449                 route = ROUTE_DISPATCHER;\r
2450 \r
2451         CL_EXIT( AL_DBG_SMI, g_al_dbg_lvl );\r
2452         return route;\r
2453 }\r
2454 \r
2455 \r
2456 \r
2457 /*\r
2458  * Forward a locally generated Subnet Management trap.\r
2459  */\r
2460 ib_api_status_t\r
2461 forward_sm_trap(\r
2462         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
2463         IN                              ib_mad_element_t*                       p_mad_element )\r
2464 {\r
2465         ib_av_attr_t                    av_attr;\r
2466         ib_api_status_t                 status;\r
2467 \r
2468         CL_ENTER( AL_DBG_SMI_CB, g_al_dbg_lvl );\r
2469 \r
2470         CL_ASSERT( p_spl_qp_svc );\r
2471         CL_ASSERT( p_mad_element );\r
2472 \r
2473         /* Check the SMP class. */\r
2474         if( p_mad_element->p_mad_buf->mgmt_class != IB_MCLASS_SUBN_LID )\r
2475         {\r
2476                 /*\r
2477                  * Per IBA Specification Release 1.1 Section 14.2.2.1,\r
2478                  * "C14-5: Only a SM shall originate a directed route SMP."\r
2479                  * Therefore all traps should be LID routed; drop this one.\r
2480                  */\r
2481                 CL_EXIT( AL_DBG_SMI_CB, g_al_dbg_lvl );\r
2482                 return IB_ERROR;\r
2483         }\r
2484 \r
2485         /* Create an address vector for the SM. */\r
2486         cl_memclr( &av_attr, sizeof( ib_av_attr_t ) );\r
2487         av_attr.port_num = p_spl_qp_svc->port_num;\r
2488         av_attr.sl = p_mad_element->remote_sl;\r
2489         av_attr.dlid = p_mad_element->remote_lid;\r
2490         if( p_mad_element->grh_valid )\r
2491         {\r
2492                 cl_memcpy( &av_attr.grh, p_mad_element->p_grh, sizeof( ib_grh_t ) );\r
2493                 av_attr.grh.src_gid      = p_mad_element->p_grh->dest_gid;\r
2494                 av_attr.grh.dest_gid = p_mad_element->p_grh->src_gid;\r
2495                 av_attr.grh_valid = TRUE;\r
2496         }\r
2497 \r
2498         status = ib_create_av( p_spl_qp_svc->h_qp->obj.p_ci_ca->h_pd_alias,\r
2499                 &av_attr, &p_mad_element->h_av );\r
2500 \r
2501         if( status != IB_SUCCESS )\r
2502         {\r
2503                 CL_EXIT( AL_DBG_SMI_CB, g_al_dbg_lvl );\r
2504                 return status;\r
2505         }\r
2506 \r
2507         /* Complete the initialization of the MAD element. */\r
2508         p_mad_element->p_next = NULL;\r
2509         p_mad_element->remote_qkey = IB_QP_PRIVILEGED_Q_KEY;\r
2510         p_mad_element->resp_expected = FALSE;\r
2511 \r
2512         /* Clear context1 for proper send completion callback processing. */\r
2513         p_mad_element->context1 = NULL;\r
2514 \r
2515         /*\r
2516          * Forward the trap.  Note that because forwarded traps use AL MAD\r
2517          * services, the upper 32-bits of the TID are reserved by the access\r
2518          * layer.  When matching a Trap Repress MAD, the SMA must only use\r
2519          * the lower 32-bits of the TID.\r
2520          */\r
2521         status = ib_send_mad( p_spl_qp_svc->h_mad_svc, p_mad_element, NULL );\r
2522 \r
2523         if( status != IB_SUCCESS )\r
2524                 ib_destroy_av( p_mad_element->h_av );\r
2525 \r
2526         CL_EXIT( AL_DBG_SMI_CB, g_al_dbg_lvl );\r
2527         return status;\r
2528 }\r
2529 \r
2530 \r
2531 /*\r
2532  * Process a locally routed MAD received from the special QP.\r
2533  */\r
2534 ib_api_status_t\r
2535 recv_local_mad(\r
2536         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
2537         IN                              ib_mad_element_t*                       p_mad_request )\r
2538 {\r
2539         ib_mad_t*                               p_mad_hdr;\r
2540         ib_api_status_t                 status;\r
2541 \r
2542         CL_ENTER( AL_DBG_SMI_CB, g_al_dbg_lvl );\r
2543 \r
2544         CL_ASSERT( p_spl_qp_svc );\r
2545         CL_ASSERT( p_mad_request );\r
2546 \r
2547         /* Initialize the MAD element. */\r
2548         p_mad_hdr = ib_get_mad_buf( p_mad_request );\r
2549         p_mad_request->context1 = p_mad_request;\r
2550 \r
2551         /* Save the TID. */\r
2552         p_mad_request->context2 =\r
2553                 (void*)(uintn_t)al_get_al_tid( p_mad_hdr->trans_id );\r
2554 /*\r
2555  * Disable warning about passing unaligned 64-bit value.\r
2556  * The value is always aligned given how buffers are allocated\r
2557  * and given the layout of a MAD.\r
2558  */\r
2559 #pragma warning( push, 3 )\r
2560         al_set_al_tid( &p_mad_hdr->trans_id, 0 );\r
2561 #pragma warning( pop )\r
2562 \r
2563         /*\r
2564          * We need to get a response from the local HCA to this MAD only if this\r
2565          * MAD is not itself a response.\r
2566          */\r
2567         p_mad_request->resp_expected = !( ib_mad_is_response( p_mad_hdr ) ||\r
2568                 ( p_mad_hdr->method == IB_MAD_METHOD_TRAP_REPRESS ) );\r
2569         p_mad_request->timeout_ms = LOCAL_MAD_TIMEOUT;\r
2570         p_mad_request->send_opt = IB_SEND_OPT_LOCAL;\r
2571 \r
2572         /* Send the locally addressed MAD request to the CA for processing. */\r
2573         status = ib_send_mad( p_spl_qp_svc->h_mad_svc, p_mad_request, NULL );\r
2574 \r
2575         CL_EXIT( AL_DBG_SMI_CB, g_al_dbg_lvl );\r
2576         return status;\r
2577 }\r
2578 \r
2579 \r
2580 \r
2581 /*\r
2582  * Special QP alias send completion callback.\r
2583  */\r
2584 void\r
2585 spl_qp_alias_send_cb(\r
2586         IN                              ib_mad_svc_handle_t                     h_mad_svc,\r
2587         IN                              void*                                           mad_svc_context,\r
2588         IN                              ib_mad_element_t*                       p_mad_element )\r
2589 {\r
2590         ib_api_status_t                 status;\r
2591 \r
2592         CL_ENTER( AL_DBG_SMI_CB, g_al_dbg_lvl );\r
2593 \r
2594         UNUSED_PARAM( h_mad_svc );\r
2595         UNUSED_PARAM( mad_svc_context );\r
2596         CL_ASSERT( p_mad_element );\r
2597 \r
2598         if( p_mad_element->h_av )\r
2599         {\r
2600                 status = ib_destroy_av( p_mad_element->h_av );\r
2601                 CL_ASSERT( status == IB_SUCCESS );\r
2602         }\r
2603 \r
2604         status = ib_put_mad( p_mad_element );\r
2605         CL_ASSERT( status == IB_SUCCESS );\r
2606 \r
2607         CL_EXIT( AL_DBG_SMI_CB, g_al_dbg_lvl );\r
2608 }\r
2609 \r
2610 \r
2611 \r
2612 /*\r
2613  * Special QP alias receive completion callback.\r
2614  */\r
2615 void\r
2616 spl_qp_alias_recv_cb(\r
2617         IN                              ib_mad_svc_handle_t                     h_mad_svc,\r
2618         IN                              void*                                           mad_svc_context,\r
2619         IN                              ib_mad_element_t*                       p_mad_response )\r
2620 {\r
2621         spl_qp_svc_t*                   p_spl_qp_svc;\r
2622         ib_mad_element_t*               p_mad_request;\r
2623         ib_mad_t*                               p_mad_hdr;\r
2624         ib_av_attr_t                    av_attr;\r
2625         ib_api_status_t                 status;\r
2626 \r
2627         CL_ENTER( AL_DBG_SMI_CB, g_al_dbg_lvl );\r
2628 \r
2629         CL_ASSERT( mad_svc_context );\r
2630         CL_ASSERT( p_mad_response );\r
2631         CL_ASSERT( p_mad_response->send_context1 );\r
2632 \r
2633         /* Initialize pointers. */\r
2634         p_spl_qp_svc = mad_svc_context;\r
2635         p_mad_request = p_mad_response->send_context1;\r
2636         p_mad_hdr = ib_get_mad_buf( p_mad_response );\r
2637 \r
2638         /* Restore the TID, so it will match on the remote side. */\r
2639 #pragma warning( push, 3 )\r
2640         al_set_al_tid( &p_mad_hdr->trans_id,\r
2641                 (uint32_t)(uintn_t)p_mad_response->send_context2 );\r
2642 #pragma warning( pop )\r
2643 \r
2644         /* Set the remote QP. */\r
2645         p_mad_response->remote_qp       = p_mad_request->remote_qp;\r
2646         p_mad_response->remote_qkey = p_mad_request->remote_qkey;\r
2647 \r
2648         /* Prepare to create an address vector. */\r
2649         cl_memclr( &av_attr, sizeof( ib_av_attr_t ) );\r
2650         av_attr.port_num        = p_spl_qp_svc->port_num;\r
2651         av_attr.sl                      = p_mad_request->remote_sl;\r
2652         av_attr.static_rate = IB_PATH_RECORD_RATE_10_GBS;\r
2653         av_attr.path_bits       = p_mad_request->path_bits;\r
2654         if( p_mad_request->grh_valid )\r
2655         {\r
2656                 cl_memcpy( &av_attr.grh, p_mad_request->p_grh, sizeof( ib_grh_t ) );\r
2657                 av_attr.grh.src_gid      = p_mad_request->p_grh->dest_gid;\r
2658                 av_attr.grh.dest_gid = p_mad_request->p_grh->src_gid;\r
2659                 av_attr.grh_valid = TRUE;\r
2660         }\r
2661         if( ( p_mad_hdr->mgmt_class == IB_MCLASS_SUBN_DIR ) &&\r
2662                 ( ((ib_smp_t *)p_mad_hdr)->dr_dlid == IB_LID_PERMISSIVE ) )\r
2663                 av_attr.dlid = IB_LID_PERMISSIVE;\r
2664         else\r
2665                 av_attr.dlid = p_mad_request->remote_lid;\r
2666 \r
2667         /* Create an address vector. */\r
2668         status = ib_create_av( p_spl_qp_svc->h_qp->obj.p_ci_ca->h_pd_alias,\r
2669                 &av_attr, &p_mad_response->h_av );\r
2670 \r
2671         if( status != IB_SUCCESS )\r
2672         {\r
2673                 ib_put_mad( p_mad_response );\r
2674 \r
2675                 CL_EXIT( AL_DBG_SMI, g_al_dbg_lvl );\r
2676                 return;\r
2677         }\r
2678 \r
2679         /* Send the response. */\r
2680         status = ib_send_mad( h_mad_svc, p_mad_response, NULL );\r
2681 \r
2682         if( status != IB_SUCCESS )\r
2683         {\r
2684                 ib_destroy_av( p_mad_response->h_av );\r
2685                 ib_put_mad( p_mad_response );\r
2686         }\r
2687 \r
2688         CL_EXIT( AL_DBG_SMI_CB, g_al_dbg_lvl );\r
2689 }\r
2690 \r
2691 \r
2692 \r
2693 /*\r
2694  * Post receive buffers to a special QP.\r
2695  */\r
2696 static ib_api_status_t\r
2697 spl_qp_svc_post_recvs(\r
2698         IN                              spl_qp_svc_t*   const           p_spl_qp_svc )\r
2699 {\r
2700         ib_mad_element_t*               p_mad_element;\r
2701         al_mad_element_t*               p_al_element;\r
2702         ib_recv_wr_t                    recv_wr;\r
2703         ib_api_status_t                 status = IB_SUCCESS;\r
2704 \r
2705         /* Attempt to post receive buffers up to the max_qp_depth limit. */\r
2706         while( cl_qlist_count( &p_spl_qp_svc->recv_queue ) <\r
2707                 (int32_t)p_spl_qp_svc->max_qp_depth )\r
2708         {\r
2709                 /* Get a MAD element from the pool. */\r
2710                 status = ib_get_mad( p_spl_qp_svc->obj.p_ci_ca->pool_key,\r
2711                         MAD_BLOCK_SIZE, &p_mad_element );\r
2712 \r
2713                 if( status != IB_SUCCESS ) break;\r
2714 \r
2715                 p_al_element = PARENT_STRUCT( p_mad_element, al_mad_element_t,\r
2716                         element );\r
2717 \r
2718                 /* Build the receive work request. */\r
2719                 recv_wr.p_next   = NULL;\r
2720                 recv_wr.wr_id    = (uintn_t)p_al_element;\r
2721                 recv_wr.num_ds = 1;\r
2722                 recv_wr.ds_array = &p_al_element->grh_ds;\r
2723 \r
2724                 /* Queue the receive on the service tracking list. */\r
2725                 cl_qlist_insert_tail( &p_spl_qp_svc->recv_queue,\r
2726                         &p_al_element->list_item );\r
2727 \r
2728                 /* Post the receive. */\r
2729                 status = ib_post_recv( p_spl_qp_svc->h_qp, &recv_wr, NULL );\r
2730 \r
2731                 if( status != IB_SUCCESS )\r
2732                 {\r
2733                         AL_TRACE( AL_DBG_ERROR,\r
2734                                 ("Failed to post receive %p\n", p_al_element) );\r
2735                         cl_qlist_remove_item( &p_spl_qp_svc->recv_queue,\r
2736                                 &p_al_element->list_item );\r
2737 \r
2738                         ib_put_mad( p_mad_element );\r
2739                         break;\r
2740                 }\r
2741         }\r
2742 \r
2743         return status;\r
2744 }\r
2745 \r
2746 \r
2747 \r
2748 /*\r
2749  * Special QP service asynchronous event callback.\r
2750  */\r
2751 void\r
2752 spl_qp_svc_event_cb(\r
2753         IN                              ib_async_event_rec_t            *p_event_rec )\r
2754 {\r
2755         spl_qp_svc_t*                   p_spl_qp_svc;\r
2756 \r
2757         CL_ENTER( AL_DBG_SMI_CB, g_al_dbg_lvl );\r
2758 \r
2759         CL_ASSERT( p_event_rec );\r
2760         CL_ASSERT( p_event_rec->context );\r
2761 \r
2762         if( p_event_rec->code == IB_AE_SQ_DRAINED )\r
2763         {\r
2764                 CL_EXIT( AL_DBG_SMI, g_al_dbg_lvl );\r
2765                 return;\r
2766         }\r
2767 \r
2768         p_spl_qp_svc = p_event_rec->context;\r
2769 \r
2770         spl_qp_svc_reset( p_spl_qp_svc );\r
2771 \r
2772         CL_EXIT( AL_DBG_SMI_CB, g_al_dbg_lvl );\r
2773 }\r
2774 \r
2775 \r
2776 \r
2777 /*\r
2778  * Special QP service reset.\r
2779  */\r
2780 void\r
2781 spl_qp_svc_reset(\r
2782         IN                              spl_qp_svc_t*                           p_spl_qp_svc )\r
2783 {\r
2784         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
2785 \r
2786         if( p_spl_qp_svc->state != SPL_QP_ACTIVE )\r
2787         {\r
2788                 cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2789                 return;\r
2790         }\r
2791 \r
2792         /* Change the special QP service to the error state. */\r
2793         p_spl_qp_svc->state = SPL_QP_ERROR;\r
2794 \r
2795         /* Flag the service as in use by the asynchronous processing thread. */\r
2796         cl_atomic_inc( &p_spl_qp_svc->in_use_cnt );\r
2797 \r
2798         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2799 \r
2800         /* Queue an asynchronous processing item to reset the special QP. */\r
2801         cl_async_proc_queue( gp_async_proc_mgr, &p_spl_qp_svc->reset_async );\r
2802 }\r
2803 \r
2804 \r
2805 \r
2806 /*\r
2807  * Asynchronous processing thread callback to reset the special QP service.\r
2808  */\r
2809 void\r
2810 spl_qp_svc_reset_cb(\r
2811         IN                              cl_async_proc_item_t*           p_item )\r
2812 {\r
2813         spl_qp_svc_t*                   p_spl_qp_svc;\r
2814         cl_list_item_t*                 p_list_item;\r
2815         ib_wc_t                                 wc;\r
2816         ib_wc_t*                                p_free_wc;\r
2817         ib_wc_t*                                p_done_wc;\r
2818         al_mad_wr_t*                    p_mad_wr;\r
2819         al_mad_element_t*               p_al_mad;\r
2820         ib_qp_mod_t                             qp_mod;\r
2821         ib_api_status_t                 status;\r
2822 \r
2823         CL_ENTER( AL_DBG_SMI_CB, g_al_dbg_lvl );\r
2824 \r
2825         CL_ASSERT( p_item );\r
2826         p_spl_qp_svc = PARENT_STRUCT( p_item, spl_qp_svc_t, reset_async );\r
2827 \r
2828         /* Wait here until the special QP service is only in use by this thread. */\r
2829         while( p_spl_qp_svc->in_use_cnt != 1 )\r
2830         {\r
2831                 cl_thread_suspend( 0 );\r
2832         }\r
2833 \r
2834         /* Change the QP to the RESET state. */\r
2835         cl_memclr( &qp_mod, sizeof( ib_qp_mod_t ) );\r
2836         qp_mod.req_state = IB_QPS_RESET;\r
2837 \r
2838         status = ib_modify_qp( p_spl_qp_svc->h_qp, &qp_mod );\r
2839         CL_ASSERT( status == IB_SUCCESS );\r
2840 \r
2841         /* Return receive MAD elements to the pool. */\r
2842         for( p_list_item = cl_qlist_remove_head( &p_spl_qp_svc->recv_queue );\r
2843                  p_list_item != cl_qlist_end( &p_spl_qp_svc->recv_queue );\r
2844                  p_list_item = cl_qlist_remove_head( &p_spl_qp_svc->recv_queue ) )\r
2845         {\r
2846                 p_al_mad = PARENT_STRUCT( p_list_item, al_mad_element_t, list_item );\r
2847 \r
2848                 status = ib_put_mad( &p_al_mad->element );\r
2849                 CL_ASSERT( status == IB_SUCCESS );\r
2850         }\r
2851 \r
2852         /* Re-initialize the QP. */\r
2853         status = ib_init_dgrm_svc( p_spl_qp_svc->h_qp, NULL );\r
2854         CL_ASSERT( status == IB_SUCCESS );\r
2855 \r
2856         /* Poll to remove any remaining send completions from the CQ. */\r
2857         do\r
2858         {\r
2859                 cl_memclr( &wc, sizeof( ib_wc_t ) );\r
2860                 p_free_wc = &wc;\r
2861                 status = ib_poll_cq( p_spl_qp_svc->h_send_cq, &p_free_wc, &p_done_wc );\r
2862 \r
2863         } while( status == IB_SUCCESS );\r
2864 \r
2865         /* Post receive buffers. */\r
2866         spl_qp_svc_post_recvs( p_spl_qp_svc );\r
2867 \r
2868         /*\r
2869          * Re-queue any outstanding MAD send operations.\r
2870          * Work from tail to head to maintain the request order.\r
2871          */\r
2872         for( p_list_item = cl_qlist_remove_tail( &p_spl_qp_svc->send_queue );\r
2873                  p_list_item != cl_qlist_end( &p_spl_qp_svc->send_queue );\r
2874                  p_list_item = cl_qlist_remove_tail( &p_spl_qp_svc->send_queue ) )\r
2875         {\r
2876                 p_mad_wr = PARENT_STRUCT( p_list_item, al_mad_wr_t, list_item );\r
2877                 special_qp_queue_mad( p_spl_qp_svc->h_qp, p_mad_wr );\r
2878         }\r
2879 \r
2880         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
2881         if( p_spl_qp_svc->state == SPL_QP_ERROR )\r
2882         {\r
2883                 /* The QP is ready.  Change the state. */\r
2884                 p_spl_qp_svc->state = SPL_QP_ACTIVE;\r
2885                 cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2886 \r
2887                 /* Re-arm the CQs. */\r
2888                 status = ib_rearm_cq( p_spl_qp_svc->h_recv_cq, FALSE );\r
2889                 CL_ASSERT( status == IB_SUCCESS );\r
2890                 status = ib_rearm_cq( p_spl_qp_svc->h_send_cq, FALSE );\r
2891                 CL_ASSERT( status == IB_SUCCESS );\r
2892 \r
2893                 /* Resume send processing. */\r
2894                 special_qp_resume_sends( p_spl_qp_svc->h_qp );\r
2895         }\r
2896         else\r
2897         {\r
2898                 cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2899         }\r
2900 \r
2901         /* No longer in use by the asynchronous processing thread. */\r
2902         cl_atomic_dec( &p_spl_qp_svc->in_use_cnt );\r
2903 \r
2904         CL_EXIT( AL_DBG_SMI_CB, g_al_dbg_lvl );\r
2905 }\r
2906 \r
2907 \r
2908 \r
2909 /*\r
2910  * Special QP alias asynchronous event callback.\r
2911  */\r
2912 void\r
2913 spl_qp_alias_event_cb(\r
2914         IN                              ib_async_event_rec_t            *p_event_rec )\r
2915 {\r
2916         UNUSED_PARAM( p_event_rec );\r
2917 }\r
2918 \r
2919 \r
2920 \r
2921 /*\r
2922  * Acquire the SMI dispatcher for the given port.\r
2923  */\r
2924 ib_api_status_t\r
2925 acquire_smi_disp(\r
2926         IN              const   ib_net64_t                                      port_guid,\r
2927                 OUT                     al_mad_disp_handle_t* const     ph_mad_disp )\r
2928 {\r
2929         CL_ASSERT( gp_spl_qp_mgr );\r
2930         return acquire_svc_disp( &gp_spl_qp_mgr->smi_map, port_guid, ph_mad_disp );\r
2931 }\r
2932 \r
2933 \r
2934 \r
2935 /*\r
2936  * Acquire the GSI dispatcher for the given port.\r
2937  */\r
2938 ib_api_status_t\r
2939 acquire_gsi_disp(\r
2940         IN              const   ib_net64_t                                      port_guid,\r
2941                 OUT                     al_mad_disp_handle_t* const     ph_mad_disp )\r
2942 {\r
2943         CL_ASSERT( gp_spl_qp_mgr );\r
2944         return acquire_svc_disp( &gp_spl_qp_mgr->gsi_map, port_guid, ph_mad_disp );\r
2945 }\r
2946 \r
2947 \r
2948 \r
2949 /*\r
2950  * Acquire the service dispatcher for the given port.\r
2951  */\r
2952 ib_api_status_t\r
2953 acquire_svc_disp(\r
2954         IN              const   cl_qmap_t* const                        p_svc_map,\r
2955         IN              const   ib_net64_t                                      port_guid,\r
2956                 OUT                     al_mad_disp_handle_t            *ph_mad_disp )\r
2957 {\r
2958         cl_map_item_t*                  p_svc_item;\r
2959         spl_qp_svc_t*                   p_spl_qp_svc;\r
2960 \r
2961         AL_ENTER( AL_DBG_SMI );\r
2962 \r
2963         CL_ASSERT( p_svc_map );\r
2964         CL_ASSERT( gp_spl_qp_mgr );\r
2965 \r
2966         /* Search for the SMI or GSI service for the given port. */\r
2967         cl_spinlock_acquire( &gp_spl_qp_mgr->obj.lock );\r
2968         p_svc_item = cl_qmap_get( p_svc_map, port_guid );\r
2969         cl_spinlock_release( &gp_spl_qp_mgr->obj.lock );\r
2970         if( p_svc_item == cl_qmap_end( p_svc_map ) )\r
2971         {\r
2972                 /* The port does not have an active agent. */\r
2973                 AL_EXIT( AL_DBG_SMI );\r
2974                 return IB_INVALID_GUID;\r
2975         }\r
2976 \r
2977         p_spl_qp_svc = PARENT_STRUCT( p_svc_item, spl_qp_svc_t, map_item );\r
2978 \r
2979         /* Found a match.  Get MAD dispatcher handle. */\r
2980         *ph_mad_disp = p_spl_qp_svc->h_mad_disp;\r
2981 \r
2982         /* Reference the MAD dispatcher on behalf of the client. */\r
2983         ref_al_obj( &p_spl_qp_svc->h_mad_disp->obj );\r
2984 \r
2985         AL_EXIT( AL_DBG_SMI );\r
2986         return IB_SUCCESS;\r
2987 }\r
2988 \r
2989 \r
2990 \r
2991 /*\r
2992  * Force a poll for CA attribute changes.\r
2993  */\r
2994 void\r
2995 force_smi_poll(\r
2996         void )\r
2997 {\r
2998         CL_ENTER( AL_DBG_SMI_CB, g_al_dbg_lvl );\r
2999 \r
3000         /*\r
3001          * Stop the poll timer.  Just invoke the timer callback directly to\r
3002          * save the thread context switching.\r
3003          */\r
3004         smi_poll_timer_cb( gp_spl_qp_mgr );\r
3005 \r
3006         CL_EXIT( AL_DBG_SMI_CB, g_al_dbg_lvl );\r
3007 }\r
3008 \r
3009 \r
3010 \r
3011 /*\r
3012  * Poll for CA port attribute changes.\r
3013  */\r
3014 void\r
3015 smi_poll_timer_cb(\r
3016         IN                              void*                                           context )\r
3017 {\r
3018         cl_status_t                     cl_status;\r
3019 \r
3020         CL_ENTER( AL_DBG_SMI_CB, g_al_dbg_lvl );\r
3021 \r
3022         CL_ASSERT( context );\r
3023         CL_ASSERT( gp_spl_qp_mgr == context );\r
3024         UNUSED_PARAM( context );\r
3025 \r
3026         /*\r
3027          * Scan for changes on the local HCAs.  Since the PnP manager has its\r
3028          * own thread for processing changes, we kick off that thread in parallel\r
3029          * reposting receive buffers to the SQP agents.\r
3030          */\r
3031         pnp_poll();\r
3032 \r
3033         /*\r
3034          * To handle the case where force_smi_poll is called at the same time\r
3035          * the timer expires, check if the asynchronous processing item is in\r
3036          * use.  If it is already in use, it means that we're about to poll\r
3037          * anyway, so just ignore this call.\r
3038          */\r
3039         cl_spinlock_acquire( &gp_spl_qp_mgr->obj.lock );\r
3040 \r
3041         /* Perform port processing on the special QP agents. */\r
3042         cl_qlist_apply_func( &gp_spl_qp_mgr->obj.obj_list, smi_post_recvs,\r
3043                 gp_spl_qp_mgr );\r
3044 \r
3045         /* Determine if there are any special QP agents to poll. */\r
3046         if( !cl_is_qlist_empty( &gp_spl_qp_mgr->obj.obj_list ) && g_smi_poll_interval )\r
3047         {\r
3048                 /* Restart the polling timer. */\r
3049                 cl_status =\r
3050                         cl_timer_start( &gp_spl_qp_mgr->poll_timer, g_smi_poll_interval );\r
3051                 CL_ASSERT( cl_status == CL_SUCCESS );\r
3052         }\r
3053         cl_spinlock_release( &gp_spl_qp_mgr->obj.lock );\r
3054 \r
3055         CL_EXIT( AL_DBG_SMI_CB, g_al_dbg_lvl );\r
3056 }\r
3057 \r
3058 \r
3059 \r
3060 /*\r
3061  * Post receive buffers to a special QP.\r
3062  */\r
3063 void\r
3064 smi_post_recvs(\r
3065         IN                              cl_list_item_t* const           p_list_item,\r
3066         IN                              void*                                           context )\r
3067 {\r
3068         al_obj_t*                               p_obj;\r
3069         spl_qp_svc_t*                   p_spl_qp_svc;\r
3070 \r
3071         CL_ENTER( AL_DBG_SMI_CB, g_al_dbg_lvl );\r
3072 \r
3073         CL_ASSERT( p_list_item );\r
3074         UNUSED_PARAM( context );\r
3075 \r
3076         p_obj = PARENT_STRUCT( p_list_item, al_obj_t, pool_item );\r
3077         p_spl_qp_svc = PARENT_STRUCT( p_obj, spl_qp_svc_t, obj );\r
3078 \r
3079         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
3080         if( p_spl_qp_svc->state != SPL_QP_ACTIVE )\r
3081         {\r
3082                 cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
3083                 return;\r
3084         }\r
3085 \r
3086         spl_qp_svc_post_recvs( p_spl_qp_svc );\r
3087         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
3088 \r
3089         CL_EXIT( AL_DBG_SMI, g_al_dbg_lvl );\r
3090 }\r