9f9d5dfca4ae9b09f2eb436b447d6834091a8091
[mirror/winof/.git] / core / al / kernel / al_smi.c
1 /*\r
2  * Copyright (c) 2005 SilverStorm Technologies.  All rights reserved.\r
3  * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. \r
4  *\r
5  * This software is available to you under the OpenIB.org BSD license\r
6  * below:\r
7  *\r
8  *     Redistribution and use in source and binary forms, with or\r
9  *     without modification, are permitted provided that the following\r
10  *     conditions are met:\r
11  *\r
12  *      - Redistributions of source code must retain the above\r
13  *        copyright notice, this list of conditions and the following\r
14  *        disclaimer.\r
15  *\r
16  *      - Redistributions in binary form must reproduce the above\r
17  *        copyright notice, this list of conditions and the following\r
18  *        disclaimer in the documentation and/or other materials\r
19  *        provided with the distribution.\r
20  *\r
21  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
22  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
23  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
24  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
25  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
26  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
27  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
28  * SOFTWARE.\r
29  *\r
30  * $Id$\r
31  */\r
32 \r
33 \r
34 #include <iba/ib_al.h>\r
35 #include <complib/cl_timer.h>\r
36 \r
37 #include "ib_common.h"\r
38 #include "al_common.h"\r
39 #include "al_debug.h"\r
40 #include "al_verbs.h"\r
41 #include "al_mgr.h"\r
42 #include "al_pnp.h"\r
43 #include "al_qp.h"\r
44 #include "al_smi.h"\r
45 #include "al_av.h"\r
46 \r
47 \r
48 extern char                                             node_desc[IB_NODE_DESCRIPTION_SIZE];\r
49 \r
50 #define SMI_POLL_INTERVAL                       20000           /* Milliseconds */\r
51 #define LOCAL_MAD_TIMEOUT                       50                      /* Milliseconds */\r
52 #define DEFAULT_QP0_DEPTH                       256\r
53 #define DEFAULT_QP1_DEPTH                       1024\r
54 \r
55 uint32_t                                g_smi_poll_interval =   SMI_POLL_INTERVAL;\r
56 spl_qp_mgr_t*                   gp_spl_qp_mgr = NULL;\r
57 \r
58 \r
59 /*\r
60  * Function prototypes.\r
61  */\r
62 void\r
63 destroying_spl_qp_mgr(\r
64         IN                              al_obj_t*                                       p_obj );\r
65 \r
66 void\r
67 free_spl_qp_mgr(\r
68         IN                              al_obj_t*                                       p_obj );\r
69 \r
70 ib_api_status_t\r
71 spl_qp0_agent_pnp_cb(\r
72         IN                              ib_pnp_rec_t*                           p_pnp_rec );\r
73 \r
74 ib_api_status_t\r
75 spl_qp1_agent_pnp_cb(\r
76         IN                              ib_pnp_rec_t*                           p_pnp_rec );\r
77 \r
78 ib_api_status_t\r
79 spl_qp_agent_pnp(\r
80         IN                              ib_pnp_rec_t*                           p_pnp_rec,\r
81         IN                              ib_qp_type_t                            qp_type );\r
82 \r
83 ib_api_status_t\r
84 create_spl_qp_svc(\r
85         IN                              ib_pnp_port_rec_t*                      p_pnp_rec,\r
86         IN              const   ib_qp_type_t                            qp_type );\r
87 \r
88 void\r
89 destroying_spl_qp_svc(\r
90         IN                              al_obj_t*                                       p_obj );\r
91 \r
92 void\r
93 free_spl_qp_svc(\r
94         IN                              al_obj_t*                                       p_obj );\r
95 \r
96 void\r
97 spl_qp_svc_lid_change(\r
98         IN                              al_obj_t*                                       p_obj,\r
99         IN                              ib_pnp_port_rec_t*                      p_pnp_rec );\r
100 \r
101 ib_api_status_t\r
102 remote_mad_send(\r
103         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
104         IN                              al_mad_wr_t* const                      p_mad_wr );\r
105 \r
106 static ib_api_status_t\r
107 local_mad_send(\r
108         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
109         IN                              al_mad_wr_t* const                      p_mad_wr );\r
110 \r
111 static ib_api_status_t\r
112 loopback_mad(\r
113         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
114         IN                              al_mad_wr_t* const                      p_mad_wr );\r
115 \r
116 static ib_api_status_t\r
117 process_subn_mad(\r
118         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
119         IN                              al_mad_wr_t* const                      p_mad_wr );\r
120 \r
121 static ib_api_status_t\r
122 fwd_local_mad(\r
123         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
124         IN                              al_mad_wr_t* const                      p_mad_wr );\r
125 \r
126 void\r
127 send_local_mad_cb(\r
128         IN                              cl_async_proc_item_t*           p_item );\r
129 \r
130 void\r
131 spl_qp_send_comp_cb(\r
132         IN              const   ib_cq_handle_t                          h_cq,\r
133         IN                              void                                            *cq_context );\r
134 \r
135 void\r
136 spl_qp_recv_comp_cb(\r
137         IN              const   ib_cq_handle_t                          h_cq,\r
138         IN                              void                                            *cq_context );\r
139 \r
140 void\r
141 spl_qp_comp(\r
142         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
143         IN              const   ib_cq_handle_t                          h_cq,\r
144         IN                              ib_wc_type_t                            wc_type );\r
145 \r
146 ib_api_status_t\r
147 process_mad_recv(\r
148         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
149         IN                              ib_mad_element_t*                       p_mad_element );\r
150 \r
151 mad_route_t\r
152 route_recv_smp(\r
153         IN                              ib_mad_element_t*                       p_mad_element );\r
154 \r
155 mad_route_t\r
156 route_recv_smp_attr(\r
157         IN                              ib_mad_element_t*                       p_mad_element );\r
158 \r
159 mad_route_t\r
160 route_recv_dm_mad(\r
161         IN                              ib_mad_element_t*                       p_mad_element );\r
162 \r
163 mad_route_t\r
164 route_recv_gmp(\r
165         IN                              ib_mad_element_t*                       p_mad_element );\r
166 \r
167 mad_route_t\r
168 route_recv_gmp_attr(\r
169         IN                              ib_mad_element_t*                       p_mad_element );\r
170 \r
171 ib_api_status_t\r
172 forward_sm_trap(\r
173         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
174         IN                              ib_mad_element_t*                       p_mad_element );\r
175 \r
176 ib_api_status_t\r
177 recv_local_mad(\r
178         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
179         IN                              ib_mad_element_t*                       p_mad_request );\r
180 \r
181 void\r
182 spl_qp_alias_send_cb(\r
183         IN                              ib_mad_svc_handle_t                     h_mad_svc,\r
184         IN                              void                                            *mad_svc_context,\r
185         IN                              ib_mad_element_t                        *p_mad_element );\r
186 \r
187 void\r
188 spl_qp_alias_recv_cb(\r
189         IN                              ib_mad_svc_handle_t                     h_mad_svc,\r
190         IN                              void                                            *mad_svc_context,\r
191         IN                              ib_mad_element_t                        *p_mad_response );\r
192 \r
193 static ib_api_status_t\r
194 spl_qp_svc_post_recvs(\r
195         IN                              spl_qp_svc_t*   const           p_spl_qp_svc );\r
196 \r
197 void\r
198 spl_qp_svc_event_cb(\r
199         IN                              ib_async_event_rec_t            *p_event_rec );\r
200 \r
201 void\r
202 spl_qp_alias_event_cb(\r
203         IN                              ib_async_event_rec_t            *p_event_rec );\r
204 \r
205 void\r
206 spl_qp_svc_reset(\r
207         IN                              spl_qp_svc_t*                           p_spl_qp_svc );\r
208 \r
209 void\r
210 spl_qp_svc_reset_cb(\r
211         IN                              cl_async_proc_item_t*           p_item );\r
212 \r
213 ib_api_status_t\r
214 acquire_svc_disp(\r
215         IN              const   cl_qmap_t* const                        p_svc_map,\r
216         IN              const   ib_net64_t                                      port_guid,\r
217                 OUT                     al_mad_disp_handle_t            *ph_mad_disp );\r
218 \r
219 void\r
220 smi_poll_timer_cb(\r
221         IN                              void*                                           context );\r
222 \r
223 void\r
224 smi_post_recvs(\r
225         IN                              cl_list_item_t* const           p_list_item,\r
226         IN                              void*                                           context );\r
227 \r
228 #if defined( CL_USE_MUTEX )\r
229 void\r
230 spl_qp_send_async_cb(\r
231         IN                              cl_async_proc_item_t*           p_item );\r
232 \r
233 void\r
234 spl_qp_recv_async_cb(\r
235         IN                              cl_async_proc_item_t*           p_item );\r
236 #endif\r
237 \r
238 /*\r
239  * Create the special QP manager.\r
240  */\r
241 ib_api_status_t\r
242 create_spl_qp_mgr(\r
243         IN                              al_obj_t*       const                   p_parent_obj )\r
244 {\r
245         ib_pnp_req_t                    pnp_req;\r
246         ib_api_status_t                 status;\r
247         cl_status_t                             cl_status;\r
248 \r
249         CL_ENTER( AL_DBG_SMI, g_al_dbg_lvl );\r
250 \r
251         CL_ASSERT( p_parent_obj );\r
252         CL_ASSERT( !gp_spl_qp_mgr );\r
253 \r
254         gp_spl_qp_mgr = cl_zalloc( sizeof( spl_qp_mgr_t ) );\r
255         if( !gp_spl_qp_mgr )\r
256         {\r
257                 CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl,\r
258                         ("IB_INSUFFICIENT_MEMORY\n") );\r
259                 return IB_INSUFFICIENT_MEMORY;\r
260         }\r
261 \r
262         /* Construct the special QP manager. */\r
263         construct_al_obj( &gp_spl_qp_mgr->obj, AL_OBJ_TYPE_SMI );\r
264         cl_timer_construct( &gp_spl_qp_mgr->poll_timer );\r
265 \r
266         /* Initialize the lists. */\r
267         cl_qmap_init( &gp_spl_qp_mgr->smi_map );\r
268         cl_qmap_init( &gp_spl_qp_mgr->gsi_map );\r
269 \r
270         /* Initialize the global SMI/GSI manager object. */\r
271         status = init_al_obj( &gp_spl_qp_mgr->obj, gp_spl_qp_mgr, TRUE,\r
272                 destroying_spl_qp_mgr, NULL, free_spl_qp_mgr );\r
273         if( status != IB_SUCCESS )\r
274         {\r
275                 free_spl_qp_mgr( &gp_spl_qp_mgr->obj );\r
276                 CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl,\r
277                         ("init_al_obj failed, %s\n", ib_get_err_str( status ) ) );\r
278                 return status;\r
279         }\r
280 \r
281         /* Attach the special QP manager to the parent object. */\r
282         status = attach_al_obj( p_parent_obj, &gp_spl_qp_mgr->obj );\r
283         if( status != IB_SUCCESS )\r
284         {\r
285                 gp_spl_qp_mgr->obj.pfn_destroy( &gp_spl_qp_mgr->obj, NULL );\r
286                 AL_TRACE_EXIT( AL_DBG_ERROR,\r
287                         ("attach_al_obj returned %s.\n", ib_get_err_str(status)) );\r
288                 return status;\r
289         }\r
290 \r
291         /* Initialize the SMI polling timer. */\r
292         cl_status = cl_timer_init( &gp_spl_qp_mgr->poll_timer, smi_poll_timer_cb,\r
293                 gp_spl_qp_mgr );\r
294         if( cl_status != CL_SUCCESS )\r
295         {\r
296                 gp_spl_qp_mgr->obj.pfn_destroy( &gp_spl_qp_mgr->obj, NULL );\r
297                 CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl,\r
298                         ("cl_timer_init failed, status 0x%x\n", cl_status ) );\r
299                 return ib_convert_cl_status( cl_status );\r
300         }\r
301 \r
302         /*\r
303          * Note: PnP registrations for port events must be done\r
304          * when the special QP manager is created.  This ensures that\r
305          * the registrations are listed sequentially and the reporting\r
306          * of PnP events occurs in the proper order.\r
307          */\r
308 \r
309         /*\r
310          * Separate context is needed for each special QP.  Therefore, a\r
311          * separate PnP event registration is performed for QP0 and QP1.\r
312          */\r
313 \r
314         /* Register for port PnP events for QP0. */\r
315         cl_memclr( &pnp_req, sizeof( ib_pnp_req_t ) );\r
316         pnp_req.pnp_class       = IB_PNP_PORT;\r
317         pnp_req.pnp_context = &gp_spl_qp_mgr->obj;\r
318         pnp_req.pfn_pnp_cb      = spl_qp0_agent_pnp_cb;\r
319 \r
320         status = ib_reg_pnp( gh_al, &pnp_req, &gp_spl_qp_mgr->h_qp0_pnp );\r
321 \r
322         if( status != IB_SUCCESS )\r
323         {\r
324                 gp_spl_qp_mgr->obj.pfn_destroy( &gp_spl_qp_mgr->obj, NULL );\r
325                 CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl,\r
326                         ("ib_reg_pnp QP0 failed, %s\n", ib_get_err_str( status ) ) );\r
327                 return status;\r
328         }\r
329 \r
330         /* Reference the special QP manager on behalf of the ib_reg_pnp call. */\r
331         ref_al_obj( &gp_spl_qp_mgr->obj );\r
332 \r
333         /* Register for port PnP events for QP1. */\r
334         cl_memclr( &pnp_req, sizeof( ib_pnp_req_t ) );\r
335         pnp_req.pnp_class       = IB_PNP_PORT;\r
336         pnp_req.pnp_context = &gp_spl_qp_mgr->obj;\r
337         pnp_req.pfn_pnp_cb      = spl_qp1_agent_pnp_cb;\r
338 \r
339         status = ib_reg_pnp( gh_al, &pnp_req, &gp_spl_qp_mgr->h_qp1_pnp );\r
340 \r
341         if( status != IB_SUCCESS )\r
342         {\r
343                 gp_spl_qp_mgr->obj.pfn_destroy( &gp_spl_qp_mgr->obj, NULL );\r
344                 CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl,\r
345                         ("ib_reg_pnp QP1 failed, %s\n", ib_get_err_str( status ) ) );\r
346                 return status;\r
347         }\r
348 \r
349         /*\r
350          * Note that we don't release the referende taken in init_al_obj\r
351          * because we need one on behalf of the ib_reg_pnp call.\r
352          */\r
353 \r
354         CL_EXIT( AL_DBG_SMI, g_al_dbg_lvl );\r
355         return IB_SUCCESS;\r
356 }\r
357 \r
358 \r
359 \r
360 /*\r
361  * Pre-destroy the special QP manager.\r
362  */\r
363 void\r
364 destroying_spl_qp_mgr(\r
365         IN                              al_obj_t*                                       p_obj )\r
366 {\r
367         ib_api_status_t                 status;\r
368 \r
369         CL_ASSERT( p_obj );\r
370         CL_ASSERT( gp_spl_qp_mgr == PARENT_STRUCT( p_obj, spl_qp_mgr_t, obj ) );\r
371         UNUSED_PARAM( p_obj );\r
372 \r
373         /* Deregister for port PnP events for QP0. */\r
374         if( gp_spl_qp_mgr->h_qp0_pnp )\r
375         {\r
376                 status = ib_dereg_pnp( gp_spl_qp_mgr->h_qp0_pnp,\r
377                         (ib_pfn_destroy_cb_t)deref_al_obj );\r
378                 CL_ASSERT( status == IB_SUCCESS );\r
379         }\r
380 \r
381         /* Deregister for port PnP events for QP1. */\r
382         if( gp_spl_qp_mgr->h_qp1_pnp )\r
383         {\r
384                 status = ib_dereg_pnp( gp_spl_qp_mgr->h_qp1_pnp,\r
385                         (ib_pfn_destroy_cb_t)deref_al_obj );\r
386                 CL_ASSERT( status == IB_SUCCESS );\r
387         }\r
388 \r
389         /* Destroy the SMI polling timer. */\r
390         cl_timer_destroy( &gp_spl_qp_mgr->poll_timer );\r
391 }\r
392 \r
393 \r
394 \r
395 /*\r
396  * Free the special QP manager.\r
397  */\r
398 void\r
399 free_spl_qp_mgr(\r
400         IN                              al_obj_t*                                       p_obj )\r
401 {\r
402         CL_ASSERT( p_obj );\r
403         CL_ASSERT( gp_spl_qp_mgr == PARENT_STRUCT( p_obj, spl_qp_mgr_t, obj ) );\r
404         UNUSED_PARAM( p_obj );\r
405 \r
406         destroy_al_obj( &gp_spl_qp_mgr->obj );\r
407         cl_free( gp_spl_qp_mgr );\r
408         gp_spl_qp_mgr = NULL;\r
409 }\r
410 \r
411 \r
412 \r
413 /*\r
414  * Special QP0 agent PnP event callback.\r
415  */\r
416 ib_api_status_t\r
417 spl_qp0_agent_pnp_cb(\r
418         IN                              ib_pnp_rec_t*                           p_pnp_rec )\r
419 {\r
420         ib_api_status_t status;\r
421         CL_ENTER( AL_DBG_SMI_CB, g_al_dbg_lvl );\r
422 \r
423         status = spl_qp_agent_pnp( p_pnp_rec, IB_QPT_QP0 );\r
424 \r
425         CL_EXIT( AL_DBG_SMI_CB, g_al_dbg_lvl );\r
426         return status;\r
427 }\r
428 \r
429 \r
430 \r
431 /*\r
432  * Special QP1 agent PnP event callback.\r
433  */\r
434 ib_api_status_t\r
435 spl_qp1_agent_pnp_cb(\r
436         IN                              ib_pnp_rec_t*                           p_pnp_rec )\r
437 {\r
438         ib_api_status_t status;\r
439         CL_ENTER( AL_DBG_SMI_CB, g_al_dbg_lvl );\r
440 \r
441         status = spl_qp_agent_pnp( p_pnp_rec, IB_QPT_QP1 );\r
442 \r
443         CL_EXIT( AL_DBG_SMI, g_al_dbg_lvl );\r
444         return status;\r
445 }\r
446 \r
447 \r
448 \r
449 /*\r
450  * Special QP agent PnP event callback.\r
451  */\r
452 ib_api_status_t\r
453 spl_qp_agent_pnp(\r
454         IN                              ib_pnp_rec_t*                           p_pnp_rec,\r
455         IN                              ib_qp_type_t                            qp_type )\r
456 {\r
457         ib_api_status_t                 status;\r
458         al_obj_t*                               p_obj;\r
459 \r
460         CL_ENTER( AL_DBG_SMI_CB, g_al_dbg_lvl );\r
461 \r
462         CL_ASSERT( p_pnp_rec );\r
463         p_obj = p_pnp_rec->context;\r
464 \r
465         /* Dispatch based on the PnP event type. */\r
466         switch( p_pnp_rec->pnp_event )\r
467         {\r
468         case IB_PNP_PORT_ADD:\r
469                 CL_ASSERT( !p_obj );\r
470                 status = create_spl_qp_svc( (ib_pnp_port_rec_t*)p_pnp_rec, qp_type );\r
471                 break;\r
472 \r
473         case IB_PNP_PORT_REMOVE:\r
474                 CL_ASSERT( p_obj );\r
475                 ref_al_obj( p_obj );\r
476                 p_obj->pfn_destroy( p_obj, NULL );\r
477                 status = IB_SUCCESS;\r
478                 break;\r
479 \r
480         case IB_PNP_LID_CHANGE:\r
481                 CL_ASSERT( p_obj );\r
482                 spl_qp_svc_lid_change( p_obj, (ib_pnp_port_rec_t*)p_pnp_rec );\r
483                 status = IB_SUCCESS;\r
484                 break;\r
485 \r
486         default:\r
487                 /* All other events are ignored. */\r
488                 status = IB_SUCCESS;\r
489                 break;\r
490         }\r
491 \r
492         CL_EXIT( AL_DBG_SMI, g_al_dbg_lvl );\r
493         return status;\r
494 }\r
495 \r
496 \r
497 \r
498 /*\r
499  * Create a special QP service.\r
500  */\r
501 ib_api_status_t\r
502 create_spl_qp_svc(\r
503         IN                              ib_pnp_port_rec_t*                      p_pnp_rec,\r
504         IN              const   ib_qp_type_t                            qp_type )\r
505 {\r
506         cl_status_t                             cl_status;\r
507         spl_qp_svc_t*                   p_spl_qp_svc;\r
508         ib_ca_handle_t                  h_ca;\r
509         ib_cq_create_t                  cq_create;\r
510         ib_qp_create_t                  qp_create;\r
511         ib_qp_attr_t                    qp_attr;\r
512         ib_mad_svc_t                    mad_svc;\r
513         ib_api_status_t                 status;\r
514 \r
515         CL_ENTER( AL_DBG_SMI, g_al_dbg_lvl );\r
516 \r
517         CL_ASSERT( p_pnp_rec );\r
518 \r
519         if( ( qp_type != IB_QPT_QP0 ) && ( qp_type != IB_QPT_QP1 ) )\r
520         {\r
521                 CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl, ("IB_INVALID_PARAMETER\n") );\r
522                 return IB_INVALID_PARAMETER;\r
523         }\r
524 \r
525         CL_ASSERT( p_pnp_rec->pnp_rec.pnp_context );\r
526         CL_ASSERT( p_pnp_rec->p_ca_attr );\r
527         CL_ASSERT( p_pnp_rec->p_port_attr );\r
528 \r
529         p_spl_qp_svc = cl_zalloc( sizeof( spl_qp_svc_t ) );\r
530         if( !p_spl_qp_svc )\r
531         {\r
532                 CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl,\r
533                         ("IB_INSUFFICIENT_MEMORY\n") );\r
534                 return IB_INSUFFICIENT_MEMORY;\r
535         }\r
536 \r
537         /* Tie the special QP service to the port by setting the port number. */\r
538         p_spl_qp_svc->port_num = p_pnp_rec->p_port_attr->port_num;\r
539         /* Store the port GUID to allow faster lookups of the dispatchers. */\r
540         p_spl_qp_svc->port_guid = p_pnp_rec->p_port_attr->port_guid;\r
541 \r
542         /* Initialize the send and receive queues. */\r
543         cl_qlist_init( &p_spl_qp_svc->send_queue );\r
544         cl_qlist_init( &p_spl_qp_svc->recv_queue );\r
545 \r
546 #if defined( CL_USE_MUTEX )\r
547         /* Initialize async callbacks and flags for send/receive processing. */\r
548         p_spl_qp_svc->send_async_queued = FALSE;\r
549         p_spl_qp_svc->send_async_cb.pfn_callback = spl_qp_send_async_cb;\r
550         p_spl_qp_svc->recv_async_queued = FALSE;\r
551         p_spl_qp_svc->recv_async_cb.pfn_callback = spl_qp_recv_async_cb;\r
552 #endif\r
553 \r
554         /* Initialize the async callback function to process local sends. */\r
555         p_spl_qp_svc->send_async.pfn_callback = send_local_mad_cb;\r
556 \r
557         /* Initialize the async callback function to reset the QP on error. */\r
558         p_spl_qp_svc->reset_async.pfn_callback = spl_qp_svc_reset_cb;\r
559 \r
560         /* Construct the special QP service object. */\r
561         construct_al_obj( &p_spl_qp_svc->obj, AL_OBJ_TYPE_SMI );\r
562 \r
563         /* Initialize the special QP service object. */\r
564         status = init_al_obj( &p_spl_qp_svc->obj, p_spl_qp_svc, TRUE,\r
565                 destroying_spl_qp_svc, NULL, free_spl_qp_svc );\r
566         if( status != IB_SUCCESS )\r
567         {\r
568                 free_spl_qp_svc( &p_spl_qp_svc->obj );\r
569                 return status;\r
570         }\r
571 \r
572         /* Attach the special QP service to the parent object. */\r
573         status = attach_al_obj(\r
574                 (al_obj_t* __ptr64)p_pnp_rec->pnp_rec.pnp_context, &p_spl_qp_svc->obj );\r
575         if( status != IB_SUCCESS )\r
576         {\r
577                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
578                 AL_TRACE_EXIT( AL_DBG_ERROR,\r
579                         ("attach_al_obj returned %s.\n", ib_get_err_str(status)) );\r
580                 return status;\r
581         }\r
582 \r
583         h_ca = acquire_ca( p_pnp_rec->p_ca_attr->ca_guid );\r
584         CL_ASSERT( h_ca );\r
585         if( !h_ca )\r
586         {\r
587                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
588                 AL_TRACE_EXIT( AL_DBG_ERROR, ("acquire_ca failed.\n") );\r
589                 return IB_INVALID_GUID;\r
590         }\r
591 \r
592         p_spl_qp_svc->obj.p_ci_ca = h_ca->obj.p_ci_ca;\r
593 \r
594         /* Determine the maximum queue depth of the QP and CQs. */\r
595         p_spl_qp_svc->max_qp_depth =\r
596                 ( p_pnp_rec->p_ca_attr->max_wrs <\r
597                 p_pnp_rec->p_ca_attr->max_cqes ) ?\r
598                 p_pnp_rec->p_ca_attr->max_wrs :\r
599                 p_pnp_rec->p_ca_attr->max_cqes;\r
600 \r
601         /* Compare this maximum to the default special queue depth. */\r
602         if( ( qp_type == IB_QPT_QP0 ) &&\r
603                 ( p_spl_qp_svc->max_qp_depth > DEFAULT_QP0_DEPTH ) )\r
604                   p_spl_qp_svc->max_qp_depth = DEFAULT_QP0_DEPTH;\r
605         if( ( qp_type == IB_QPT_QP1 ) &&\r
606                 ( p_spl_qp_svc->max_qp_depth > DEFAULT_QP1_DEPTH ) )\r
607                   p_spl_qp_svc->max_qp_depth = DEFAULT_QP1_DEPTH;\r
608 \r
609         /* Create the send CQ. */\r
610         cl_memclr( &cq_create, sizeof( ib_cq_create_t ) );\r
611         cq_create.size = p_spl_qp_svc->max_qp_depth;\r
612         cq_create.pfn_comp_cb = spl_qp_send_comp_cb;\r
613 \r
614         status = ib_create_cq( p_spl_qp_svc->obj.p_ci_ca->h_ca, &cq_create,\r
615                 p_spl_qp_svc, spl_qp_svc_event_cb, &p_spl_qp_svc->h_send_cq );\r
616 \r
617         if( status != IB_SUCCESS )\r
618         {\r
619                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
620                 CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl,\r
621                         ("ib_create_cq send CQ failed, %s\n", ib_get_err_str( status ) ) );\r
622                 return status;\r
623         }\r
624 \r
625         /* Reference the special QP service on behalf of ib_create_cq. */\r
626         ref_al_obj( &p_spl_qp_svc->obj );\r
627 \r
628         /* Check the result of the creation request. */\r
629         if( cq_create.size < p_spl_qp_svc->max_qp_depth )\r
630         {\r
631                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
632                 CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl,\r
633                         ("ib_create_cq allocated insufficient send CQ size\n") );\r
634                 return IB_INSUFFICIENT_RESOURCES;\r
635         }\r
636 \r
637         /* Create the receive CQ. */\r
638         cl_memclr( &cq_create, sizeof( ib_cq_create_t ) );\r
639         cq_create.size = p_spl_qp_svc->max_qp_depth;\r
640         cq_create.pfn_comp_cb = spl_qp_recv_comp_cb;\r
641 \r
642         status = ib_create_cq( p_spl_qp_svc->obj.p_ci_ca->h_ca, &cq_create,\r
643                 p_spl_qp_svc, spl_qp_svc_event_cb, &p_spl_qp_svc->h_recv_cq );\r
644 \r
645         if( status != IB_SUCCESS )\r
646         {\r
647                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
648                 CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl,\r
649                         ("ib_create_cq recv CQ failed, %s\n", ib_get_err_str( status ) ) );\r
650                 return status;\r
651         }\r
652 \r
653         /* Reference the special QP service on behalf of ib_create_cq. */\r
654         ref_al_obj( &p_spl_qp_svc->obj );\r
655 \r
656         /* Check the result of the creation request. */\r
657         if( cq_create.size < p_spl_qp_svc->max_qp_depth )\r
658         {\r
659                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
660                 CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl,\r
661                         ("ib_create_cq allocated insufficient recv CQ size\n") );\r
662                 return IB_INSUFFICIENT_RESOURCES;\r
663         }\r
664 \r
665         /* Create the special QP. */\r
666         cl_memclr( &qp_create, sizeof( ib_qp_create_t ) );\r
667         qp_create.qp_type = qp_type;\r
668         qp_create.sq_depth = p_spl_qp_svc->max_qp_depth;\r
669         qp_create.rq_depth = p_spl_qp_svc->max_qp_depth;\r
670         qp_create.sq_sge = 3;   /* Three entries are required for segmentation. */\r
671         qp_create.rq_sge = 1;\r
672         qp_create.h_sq_cq = p_spl_qp_svc->h_send_cq;\r
673         qp_create.h_rq_cq = p_spl_qp_svc->h_recv_cq;\r
674         qp_create.sq_signaled = TRUE;\r
675 \r
676         status = ib_get_spl_qp( p_spl_qp_svc->obj.p_ci_ca->h_pd,\r
677                 p_pnp_rec->p_port_attr->port_guid, &qp_create,\r
678                 p_spl_qp_svc, spl_qp_svc_event_cb, NULL, &p_spl_qp_svc->h_qp );\r
679 \r
680         if( status != IB_SUCCESS )\r
681         {\r
682                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
683                 CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl,\r
684                         ("ib_get_spl_qp failed, %s\n", ib_get_err_str( status ) ) );\r
685                 return status;\r
686         }\r
687 \r
688         /* Reference the special QP service on behalf of ib_get_spl_qp. */\r
689         ref_al_obj( &p_spl_qp_svc->obj );\r
690 \r
691         /* Check the result of the creation request. */\r
692         status = ib_query_qp( p_spl_qp_svc->h_qp, &qp_attr );\r
693         if( status != IB_SUCCESS )\r
694         {\r
695                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
696                 CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl,\r
697                         ("ib_query_qp failed, %s\n", ib_get_err_str( status ) ) );\r
698                 return status;\r
699         }\r
700 \r
701         if( ( qp_attr.rq_depth < p_spl_qp_svc->max_qp_depth ) ||\r
702                 ( qp_attr.sq_depth < p_spl_qp_svc->max_qp_depth ) ||\r
703                 ( qp_attr.sq_sge < 3 ) || ( qp_attr.rq_sge < 1 ) )\r
704         {\r
705                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
706                 CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl,\r
707                         ("ib_get_spl_qp allocated attributes are insufficient\n") );\r
708                 return IB_INSUFFICIENT_RESOURCES;\r
709         }\r
710 \r
711         /* Initialize the QP for use. */\r
712         status = ib_init_dgrm_svc( p_spl_qp_svc->h_qp, NULL );\r
713         if( status != IB_SUCCESS )\r
714         {\r
715                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
716                 AL_TRACE_EXIT( AL_DBG_ERROR,\r
717                         ("ib_init_dgrm_svc failed, %s\n", ib_get_err_str( status ) ) );\r
718                 return status;\r
719         }\r
720 \r
721         /* Post receive buffers. */\r
722         status = spl_qp_svc_post_recvs( p_spl_qp_svc );\r
723         if( status != IB_SUCCESS )\r
724         {\r
725                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
726                 CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl,\r
727                         ("spl_qp_svc_post_recvs failed, %s\n",\r
728                         ib_get_err_str( status ) ) );\r
729                 return status;\r
730         }\r
731 \r
732         /* Create the MAD dispatcher. */\r
733         status = create_mad_disp( &p_spl_qp_svc->obj, p_spl_qp_svc->h_qp,\r
734                 &p_spl_qp_svc->h_mad_disp );\r
735         if( status != IB_SUCCESS )\r
736         {\r
737                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
738                 AL_TRACE_EXIT( AL_DBG_ERROR,\r
739                         ("create_mad_disp failed, %s\n", ib_get_err_str( status ) ) );\r
740                 return status;\r
741         }\r
742 \r
743         /*\r
744          * Add this service to the special QP manager lookup lists.\r
745          * The service must be added to allow the creation of a QP alias.\r
746          */\r
747         cl_spinlock_acquire( &gp_spl_qp_mgr->obj.lock );\r
748         if( qp_type == IB_QPT_QP0 )\r
749         {\r
750                 cl_qmap_insert( &gp_spl_qp_mgr->smi_map, p_spl_qp_svc->port_guid,\r
751                         &p_spl_qp_svc->map_item );\r
752         }\r
753         else\r
754         {\r
755                 cl_qmap_insert( &gp_spl_qp_mgr->gsi_map, p_spl_qp_svc->port_guid,\r
756                         &p_spl_qp_svc->map_item );\r
757         }\r
758         cl_spinlock_release( &gp_spl_qp_mgr->obj.lock );\r
759 \r
760         /*\r
761          * If the CA does not support HW agents, create a QP alias and register\r
762          * a MAD service for sending responses from the local MAD interface.\r
763          */\r
764         if( check_local_mad( p_spl_qp_svc->h_qp ) )\r
765         {\r
766                 /* Create a QP alias. */\r
767                 cl_memclr( &qp_create, sizeof( ib_qp_create_t ) );\r
768                 qp_create.qp_type =\r
769                         ( qp_type == IB_QPT_QP0 ) ? IB_QPT_QP0_ALIAS : IB_QPT_QP1_ALIAS;\r
770                 qp_create.sq_depth              = p_spl_qp_svc->max_qp_depth;\r
771                 qp_create.sq_sge                = 1;\r
772                 qp_create.sq_signaled   = TRUE;\r
773 \r
774                 status = ib_get_spl_qp( p_spl_qp_svc->obj.p_ci_ca->h_pd_alias,\r
775                         p_pnp_rec->p_port_attr->port_guid, &qp_create,\r
776                         p_spl_qp_svc, spl_qp_alias_event_cb, &p_spl_qp_svc->pool_key,\r
777                         &p_spl_qp_svc->h_qp_alias );\r
778 \r
779                 if (status != IB_SUCCESS)\r
780                 {\r
781                         p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
782                         CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl,\r
783                                 ("ib_get_spl_qp alias failed, %s\n",\r
784                                 ib_get_err_str( status ) ) );\r
785                         return status;\r
786                 }\r
787 \r
788                 /* Reference the special QP service on behalf of ib_get_spl_qp. */\r
789                 ref_al_obj( &p_spl_qp_svc->obj );\r
790 \r
791                 /* Register a MAD service for sends. */\r
792                 cl_memclr( &mad_svc, sizeof( ib_mad_svc_t ) );\r
793                 mad_svc.mad_svc_context = p_spl_qp_svc;\r
794                 mad_svc.pfn_mad_send_cb = spl_qp_alias_send_cb;\r
795                 mad_svc.pfn_mad_recv_cb = spl_qp_alias_recv_cb;\r
796 \r
797                 status = ib_reg_mad_svc( p_spl_qp_svc->h_qp_alias, &mad_svc,\r
798                         &p_spl_qp_svc->h_mad_svc );\r
799 \r
800                 if( status != IB_SUCCESS )\r
801                 {\r
802                         p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
803                         CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl,\r
804                                 ("ib_reg_mad_svc failed, %s\n", ib_get_err_str( status ) ) );\r
805                         return status;\r
806                 }\r
807         }\r
808 \r
809         /* Set the context of the PnP event to this child object. */\r
810         p_pnp_rec->pnp_rec.context = &p_spl_qp_svc->obj;\r
811 \r
812         /* The QP is ready.  Change the state. */\r
813         p_spl_qp_svc->state = SPL_QP_ACTIVE;\r
814 \r
815         /* Force a completion callback to rearm the CQs. */\r
816         spl_qp_recv_comp_cb( p_spl_qp_svc->h_recv_cq, p_spl_qp_svc );\r
817         spl_qp_send_comp_cb( p_spl_qp_svc->h_send_cq, p_spl_qp_svc );\r
818 \r
819         /* Start the polling thread timer. */\r
820         if( g_smi_poll_interval )\r
821         {\r
822                 cl_status =\r
823                         cl_timer_trim( &gp_spl_qp_mgr->poll_timer, g_smi_poll_interval );\r
824 \r
825                 if( cl_status != CL_SUCCESS )\r
826                 {\r
827                         p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
828                         CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl,\r
829                                 ("cl_timer_start failed, status 0x%x\n", cl_status ) );\r
830                         return ib_convert_cl_status( cl_status );\r
831                 }\r
832         }\r
833 \r
834         /* Release the reference taken in init_al_obj. */\r
835         deref_al_obj( &p_spl_qp_svc->obj );\r
836 \r
837         CL_EXIT( AL_DBG_SMI, g_al_dbg_lvl );\r
838         return IB_SUCCESS;\r
839 }\r
840 \r
841 \r
842 \r
843 /*\r
844  * Return a work completion to the MAD dispatcher for the specified MAD.\r
845  */\r
846 static void\r
847 __complete_send_mad(\r
848         IN              const   al_mad_disp_handle_t            h_mad_disp,\r
849         IN                              al_mad_wr_t* const                      p_mad_wr,\r
850         IN              const   ib_wc_status_t                          wc_status )\r
851 {\r
852         ib_wc_t                 wc;\r
853 \r
854         /* Construct a send work completion. */\r
855         cl_memclr( &wc, sizeof( ib_wc_t ) );\r
856         wc.wr_id        = p_mad_wr->send_wr.wr_id;\r
857         wc.wc_type      = IB_WC_SEND;\r
858         wc.status       = wc_status;\r
859 \r
860         /* Set the send size if we were successful with the send. */\r
861         if( wc_status == IB_WCS_SUCCESS )\r
862                 wc.length = MAD_BLOCK_SIZE;\r
863 \r
864         mad_disp_send_done( h_mad_disp, p_mad_wr, &wc );\r
865 }\r
866 \r
867 \r
868 \r
869 /*\r
870  * Pre-destroy a special QP service.\r
871  */\r
872 void\r
873 destroying_spl_qp_svc(\r
874         IN                              al_obj_t*                                       p_obj )\r
875 {\r
876         spl_qp_svc_t*                   p_spl_qp_svc;\r
877         cl_list_item_t*                 p_list_item;\r
878         al_mad_wr_t*                    p_mad_wr;\r
879 \r
880         ib_api_status_t                 status;\r
881 \r
882         CL_ENTER( AL_DBG_SMI, g_al_dbg_lvl );\r
883 \r
884         CL_ASSERT( p_obj );\r
885         p_spl_qp_svc = PARENT_STRUCT( p_obj, spl_qp_svc_t, obj );\r
886 \r
887         /* Change the state to prevent processing new send requests. */\r
888         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
889         p_spl_qp_svc->state = SPL_QP_DESTROYING;\r
890         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
891 \r
892         /* Wait here until the special QP service is no longer in use. */\r
893         while( p_spl_qp_svc->in_use_cnt )\r
894         {\r
895                 cl_thread_suspend( 0 );\r
896         }\r
897 \r
898         /* Destroy the special QP. */\r
899         if( p_spl_qp_svc->h_qp )\r
900         {\r
901                 /* If present, remove the special QP service from the tracking map. */\r
902                 cl_spinlock_acquire( &gp_spl_qp_mgr->obj.lock );\r
903                 if( p_spl_qp_svc->h_qp->type == IB_QPT_QP0 )\r
904                 {\r
905                         cl_qmap_remove( &gp_spl_qp_mgr->smi_map, p_spl_qp_svc->port_guid );\r
906                 }\r
907                 else\r
908                 {\r
909                         cl_qmap_remove( &gp_spl_qp_mgr->gsi_map, p_spl_qp_svc->port_guid );\r
910                 }\r
911                 cl_spinlock_release( &gp_spl_qp_mgr->obj.lock );\r
912 \r
913                 status = ib_destroy_qp( p_spl_qp_svc->h_qp,\r
914                         (ib_pfn_destroy_cb_t)deref_al_obj );\r
915                 CL_ASSERT( status == IB_SUCCESS );\r
916 \r
917                 cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
918 \r
919                 /* Complete any outstanding MAD sends operations as "flushed". */\r
920                 for( p_list_item = cl_qlist_remove_head( &p_spl_qp_svc->send_queue );\r
921                          p_list_item != cl_qlist_end( &p_spl_qp_svc->send_queue );\r
922                          p_list_item = cl_qlist_remove_head( &p_spl_qp_svc->send_queue ) )\r
923                 {\r
924                         p_mad_wr = PARENT_STRUCT( p_list_item, al_mad_wr_t, list_item );\r
925                         __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr,\r
926                                 IB_WCS_WR_FLUSHED_ERR );\r
927                 }\r
928 \r
929                 cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
930                 /* Receive MAD elements are returned to the pool by the free routine. */\r
931         }\r
932 \r
933         /* Destroy the special QP alias and CQs. */\r
934         if( p_spl_qp_svc->h_qp_alias )\r
935         {\r
936                 status = ib_destroy_qp( p_spl_qp_svc->h_qp_alias,\r
937                         (ib_pfn_destroy_cb_t)deref_al_obj );\r
938                 CL_ASSERT( status == IB_SUCCESS );\r
939         }\r
940         if( p_spl_qp_svc->h_send_cq )\r
941         {\r
942                 status = ib_destroy_cq( p_spl_qp_svc->h_send_cq,\r
943                         (ib_pfn_destroy_cb_t)deref_al_obj );\r
944                 CL_ASSERT( status == IB_SUCCESS );\r
945         }\r
946         if( p_spl_qp_svc->h_recv_cq )\r
947         {\r
948                 status = ib_destroy_cq( p_spl_qp_svc->h_recv_cq,\r
949                         (ib_pfn_destroy_cb_t)deref_al_obj );\r
950                 CL_ASSERT( status == IB_SUCCESS );\r
951         }\r
952 \r
953         CL_EXIT( AL_DBG_SMI, g_al_dbg_lvl );\r
954 }\r
955 \r
956 \r
957 \r
958 /*\r
959  * Free a special QP service.\r
960  */\r
961 void\r
962 free_spl_qp_svc(\r
963         IN                              al_obj_t*                                       p_obj )\r
964 {\r
965         spl_qp_svc_t*                   p_spl_qp_svc;\r
966         cl_list_item_t*                 p_list_item;\r
967         al_mad_element_t*               p_al_mad;\r
968         ib_api_status_t                 status;\r
969 \r
970         CL_ENTER( AL_DBG_SMI, g_al_dbg_lvl );\r
971 \r
972         CL_ASSERT( p_obj );\r
973         p_spl_qp_svc = PARENT_STRUCT( p_obj, spl_qp_svc_t, obj );\r
974 \r
975         /* Dereference the CA. */\r
976         if( p_spl_qp_svc->obj.p_ci_ca )\r
977                 deref_al_obj( &p_spl_qp_svc->obj.p_ci_ca->h_ca->obj );\r
978 \r
979         /* Return receive MAD elements to the pool. */\r
980         for( p_list_item = cl_qlist_remove_head( &p_spl_qp_svc->recv_queue );\r
981                  p_list_item != cl_qlist_end( &p_spl_qp_svc->recv_queue );\r
982                  p_list_item = cl_qlist_remove_head( &p_spl_qp_svc->recv_queue ) )\r
983         {\r
984                 p_al_mad = PARENT_STRUCT( p_list_item, al_mad_element_t, list_item );\r
985 \r
986                 status = ib_put_mad( &p_al_mad->element );\r
987                 CL_ASSERT( status == IB_SUCCESS );\r
988         }\r
989 \r
990         CL_ASSERT( cl_is_qlist_empty( &p_spl_qp_svc->send_queue ) );\r
991 \r
992         destroy_al_obj( &p_spl_qp_svc->obj );\r
993         cl_free( p_spl_qp_svc );\r
994 \r
995         CL_EXIT( AL_DBG_SMI, g_al_dbg_lvl );\r
996 }\r
997 \r
998 \r
999 \r
1000 /*\r
1001  * Update the base LID of a special QP service.\r
1002  */\r
1003 void\r
1004 spl_qp_svc_lid_change(\r
1005         IN                              al_obj_t*                                       p_obj,\r
1006         IN                              ib_pnp_port_rec_t*                      p_pnp_rec )\r
1007 {\r
1008         spl_qp_svc_t*                   p_spl_qp_svc;\r
1009 \r
1010         CL_ENTER( AL_DBG_SMI, g_al_dbg_lvl );\r
1011 \r
1012         CL_ASSERT( p_obj );\r
1013         CL_ASSERT( p_pnp_rec );\r
1014         CL_ASSERT( p_pnp_rec->p_port_attr );\r
1015 \r
1016         p_spl_qp_svc = PARENT_STRUCT( p_obj, spl_qp_svc_t, obj );\r
1017 \r
1018         p_spl_qp_svc->base_lid = p_pnp_rec->p_port_attr->lid;\r
1019         p_spl_qp_svc->lmc = p_pnp_rec->p_port_attr->lmc;\r
1020 \r
1021         CL_EXIT( AL_DBG_SMI, g_al_dbg_lvl );\r
1022 }\r
1023 \r
1024 \r
1025 \r
1026 /*\r
1027  * Route a send work request.\r
1028  */\r
1029 mad_route_t\r
1030 route_mad_send(\r
1031         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
1032         IN                              ib_send_wr_t* const                     p_send_wr )\r
1033 {\r
1034         al_mad_wr_t*                    p_mad_wr;\r
1035         al_mad_send_t*                  p_mad_send;\r
1036         ib_mad_t*                               p_mad;\r
1037         ib_smp_t*                               p_smp;\r
1038         ib_av_handle_t                  h_av;\r
1039         mad_route_t                             route;\r
1040         boolean_t                               local, loopback, discard;\r
1041 \r
1042         CL_ENTER( AL_DBG_SMI, g_al_dbg_lvl );\r
1043 \r
1044         CL_ASSERT( p_spl_qp_svc );\r
1045         CL_ASSERT( p_send_wr );\r
1046 \r
1047         /* Initialize a pointers to the MAD work request and the MAD. */\r
1048         p_mad_wr = PARENT_STRUCT( p_send_wr, al_mad_wr_t, send_wr );\r
1049         p_mad_send = PARENT_STRUCT( p_mad_wr, al_mad_send_t, mad_wr );\r
1050         p_mad = get_mad_hdr_from_wr( p_mad_wr );\r
1051         p_smp = (ib_smp_t*)p_mad;\r
1052 \r
1053         /* Check if the CA has a local MAD interface. */\r
1054         local = loopback = discard = FALSE;\r
1055         if( check_local_mad( p_spl_qp_svc->h_qp ) )\r
1056         {\r
1057                 /*\r
1058                  * If the MAD is a locally addressed Subnet Management, Performance\r
1059                  * Management, or Connection Management datagram, process the work\r
1060                  * request locally.\r
1061                  */\r
1062                 h_av = p_send_wr->dgrm.ud.h_av;\r
1063                 switch( p_mad->mgmt_class )\r
1064                 {\r
1065                 case IB_MCLASS_SUBN_DIR:\r
1066                         /* Perform special checks on directed route SMPs. */\r
1067                         if( ib_smp_is_response( p_smp ) )\r
1068                         {\r
1069                                 /*\r
1070                                  * This node is the originator of the response.  Discard\r
1071                                  * if the hop count or pointer is zero, an intermediate hop,\r
1072                                  * out of bounds hop, or if the first port of the directed\r
1073                                  * route retrun path is not this port.\r
1074                                  */\r
1075                                 if( ( p_smp->hop_count == 0 ) || ( p_smp->hop_ptr == 0 ) )\r
1076                                 {\r
1077                                         CL_TRACE( AL_DBG_ERROR, g_al_dbg_lvl,\r
1078                                                 ("hop cnt or hop ptr set to 0...discarding\n") );\r
1079                                         discard = TRUE;\r
1080                                 }\r
1081                                 else if( p_smp->hop_count != ( p_smp->hop_ptr - 1 ) )\r
1082                                 {\r
1083                                         CL_TRACE( AL_DBG_ERROR, g_al_dbg_lvl,\r
1084                                                 ("hop cnt != (hop ptr - 1)...discarding\n") );\r
1085                                         discard = TRUE;\r
1086                                 }\r
1087                                 else if( p_smp->hop_count >= IB_SUBNET_PATH_HOPS_MAX )\r
1088                                 {\r
1089                                         CL_TRACE( AL_DBG_ERROR, g_al_dbg_lvl,\r
1090                                                 ("hop cnt > max hops...discarding\n") );\r
1091                                         discard = TRUE;\r
1092                                 }\r
1093                                 else if( ( p_smp->dr_dlid == IB_LID_PERMISSIVE ) &&\r
1094                                                  ( p_smp->return_path[ p_smp->hop_ptr - 1 ] !=\r
1095                                                         p_spl_qp_svc->port_num ) )\r
1096                                 {\r
1097                                         CL_TRACE( AL_DBG_ERROR, g_al_dbg_lvl,\r
1098                                                 ("return path[hop ptr - 1] != port num...discarding\n") );\r
1099                                         discard = TRUE;\r
1100                                 }\r
1101                         }\r
1102                         else\r
1103                         {\r
1104                                 /* The SMP is a request. */\r
1105                                 if( ( p_smp->hop_count >= IB_SUBNET_PATH_HOPS_MAX ) ||\r
1106                                         ( p_smp->hop_ptr >= IB_SUBNET_PATH_HOPS_MAX ) )\r
1107                                 {\r
1108                                         discard = TRUE;\r
1109                                 }\r
1110                                 else if( ( p_smp->hop_count == 0 ) && ( p_smp->hop_ptr == 0 ) )\r
1111                                 {\r
1112                                         /* Self Addressed: Sent locally, routed locally. */\r
1113                                         local = TRUE;\r
1114                                         discard = ( p_smp->dr_slid != IB_LID_PERMISSIVE ) ||\r
1115                                                           ( p_smp->dr_dlid != IB_LID_PERMISSIVE );\r
1116                                 }\r
1117                                 else if( ( p_smp->hop_count != 0 ) &&\r
1118                                                  ( p_smp->hop_count == ( p_smp->hop_ptr - 1 ) ) )\r
1119                                 {\r
1120                                         /* End of Path: Sent remotely, routed locally. */\r
1121                                         local = TRUE;\r
1122                                 }\r
1123                                 else if( ( p_smp->hop_count != 0 ) &&\r
1124                                                  ( p_smp->hop_ptr       == 0 ) )\r
1125                                 {\r
1126                                         /* Beginning of Path: Sent locally, routed remotely. */\r
1127                                         if( p_smp->dr_slid == IB_LID_PERMISSIVE )\r
1128                                         {\r
1129                                                 discard =\r
1130                                                         ( p_smp->initial_path[ p_smp->hop_ptr + 1 ] !=\r
1131                                                           p_spl_qp_svc->port_num );\r
1132                                         }\r
1133                                 }\r
1134                                 else\r
1135                                 {\r
1136                                         /* Intermediate hop. */\r
1137                                         discard = TRUE;\r
1138                                 }\r
1139                         }\r
1140                         /* Loopback locally addressed SM to SM "heartbeat" messages. */\r
1141                         loopback = (p_mad->attr_id == IB_MAD_ATTR_SM_INFO);\r
1142                         break;\r
1143 \r
1144                 case IB_MCLASS_SUBN_LID:\r
1145                         /* Loopback locally addressed SM to SM "heartbeat" messages. */\r
1146                         loopback = (p_mad->attr_id == IB_MAD_ATTR_SM_INFO);\r
1147 \r
1148                         /* Fall through to check for a local MAD. */\r
1149 \r
1150                 case IB_MCLASS_PERF:\r
1151                 case IB_MCLASS_BM:\r
1152                         local = ( h_av &&\r
1153                                 ( h_av->av_attr.dlid ==\r
1154                                 ( h_av->av_attr.path_bits | p_spl_qp_svc->base_lid ) ) );\r
1155                         break;\r
1156 \r
1157                 default:\r
1158                         /* Route vendor specific MADs to the HCA provider. */\r
1159                         if( ib_class_is_vendor_specific( p_mad->mgmt_class ) )\r
1160                         {\r
1161                                 local = ( h_av &&\r
1162                                         ( h_av->av_attr.dlid ==\r
1163                                         ( h_av->av_attr.path_bits | p_spl_qp_svc->base_lid ) ) );\r
1164                         }\r
1165                         break;\r
1166                 }\r
1167         }\r
1168 \r
1169         route = ( p_mad_send->p_send_mad->send_opt & IB_SEND_OPT_LOCAL ) ?\r
1170                 ROUTE_LOCAL : ROUTE_REMOTE;\r
1171         if( local ) route = ROUTE_LOCAL;\r
1172         if( loopback && local ) route = ROUTE_LOOPBACK;\r
1173         if( discard ) route = ROUTE_DISCARD;\r
1174 \r
1175         CL_EXIT( AL_DBG_SMI, g_al_dbg_lvl );\r
1176         return route;\r
1177 }\r
1178 \r
1179 \r
1180 \r
1181 /*\r
1182  * Send a work request on the special QP.\r
1183  */\r
1184 ib_api_status_t\r
1185 spl_qp_svc_send(\r
1186         IN              const   ib_qp_handle_t                          h_qp,\r
1187         IN                              ib_send_wr_t* const                     p_send_wr )\r
1188 {\r
1189         spl_qp_svc_t*                   p_spl_qp_svc;\r
1190         al_mad_wr_t*                    p_mad_wr;\r
1191         mad_route_t                             route;\r
1192         ib_api_status_t                 status;\r
1193 \r
1194         CL_ENTER( AL_DBG_SMI, g_al_dbg_lvl );\r
1195 \r
1196         CL_ASSERT( h_qp );\r
1197         CL_ASSERT( p_send_wr );\r
1198 \r
1199         /* Get the special QP service. */\r
1200         p_spl_qp_svc = (spl_qp_svc_t*)h_qp->obj.context;\r
1201         CL_ASSERT( p_spl_qp_svc );\r
1202         CL_ASSERT( p_spl_qp_svc->h_qp == h_qp );\r
1203 \r
1204         /* Determine how to route the MAD. */\r
1205         route = route_mad_send( p_spl_qp_svc, p_send_wr );\r
1206 \r
1207         /*\r
1208          * Check the QP state and guard against error handling.  Also,\r
1209          * to maintain proper order of work completions, delay processing\r
1210          * a local MAD until any remote MAD work requests have completed,\r
1211          * and delay processing a remote MAD until local MAD work requests\r
1212          * have completed.\r
1213          */\r
1214         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
1215         if( (p_spl_qp_svc->state != SPL_QP_ACTIVE) || p_spl_qp_svc->local_mad_wr ||\r
1216                 (is_local(route) && !cl_is_qlist_empty( &p_spl_qp_svc->send_queue )) ||\r
1217                 ( cl_qlist_count( &p_spl_qp_svc->send_queue ) >=\r
1218                         p_spl_qp_svc->max_qp_depth ) )\r
1219         {\r
1220                 /*\r
1221                  * Return busy status.\r
1222                  * The special QP will resume sends at this point.\r
1223                  */\r
1224                 cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
1225 \r
1226                 CL_EXIT( AL_DBG_SMI, g_al_dbg_lvl );\r
1227                 return IB_RESOURCE_BUSY;\r
1228         }\r
1229 \r
1230         p_mad_wr = PARENT_STRUCT( p_send_wr, al_mad_wr_t, send_wr );\r
1231 \r
1232         if( is_local( route ) )\r
1233         {\r
1234                 /* Save the local MAD work request for processing. */\r
1235                 p_spl_qp_svc->local_mad_wr = p_mad_wr;\r
1236 \r
1237                 /* Flag the service as in use by the asynchronous processing thread. */\r
1238                 cl_atomic_inc( &p_spl_qp_svc->in_use_cnt );\r
1239 \r
1240                 cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
1241 \r
1242                 status = local_mad_send( p_spl_qp_svc, p_mad_wr );\r
1243         }\r
1244         else\r
1245         {\r
1246                 /* Process a remote MAD send work request. */\r
1247                 status = remote_mad_send( p_spl_qp_svc, p_mad_wr );\r
1248 \r
1249                 cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
1250         }\r
1251 \r
1252         CL_EXIT( AL_DBG_SMI, g_al_dbg_lvl );\r
1253         return status;\r
1254 }\r
1255 \r
1256 \r
1257 \r
1258 /*\r
1259  * Process a remote MAD send work request.  Called holding the spl_qp_svc lock.\r
1260  */\r
1261 ib_api_status_t\r
1262 remote_mad_send(\r
1263         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
1264         IN                              al_mad_wr_t* const                      p_mad_wr )\r
1265 {\r
1266         ib_smp_t*                               p_smp;\r
1267         ib_api_status_t                 status;\r
1268 \r
1269         CL_ENTER( AL_DBG_SMI, g_al_dbg_lvl );\r
1270 \r
1271         CL_ASSERT( p_spl_qp_svc );\r
1272         CL_ASSERT( p_mad_wr );\r
1273 \r
1274         /* Initialize a pointers to the MAD work request and outbound MAD. */\r
1275         p_smp = (ib_smp_t*)get_mad_hdr_from_wr( p_mad_wr );\r
1276 \r
1277         /* Perform outbound MAD processing. */\r
1278 \r
1279         /* Adjust directed route SMPs as required by IBA. */\r
1280         if( p_smp->mgmt_class == IB_MCLASS_SUBN_DIR )\r
1281         {\r
1282                 if( ib_smp_is_response( p_smp ) )\r
1283                 {\r
1284                         if( p_smp->dr_dlid == IB_LID_PERMISSIVE )\r
1285                                 p_smp->hop_ptr--;\r
1286                 }\r
1287                 else if( p_smp->dr_slid == IB_LID_PERMISSIVE )\r
1288                 {\r
1289                         /*\r
1290                          * Only update the pointer if the hw_agent is not implemented.\r
1291                          * Fujitsu implements SMI in hardware, so the following has to\r
1292                          * be passed down to the hardware SMI.\r
1293                          */\r
1294                         ci_ca_lock_attr( p_spl_qp_svc->obj.p_ci_ca );\r
1295                         if( !p_spl_qp_svc->obj.p_ci_ca->p_pnp_attr->hw_agents )\r
1296                                 p_smp->hop_ptr++;\r
1297                         ci_ca_unlock_attr( p_spl_qp_svc->obj.p_ci_ca );\r
1298                 }\r
1299         }\r
1300 \r
1301         /* Always generate send completions. */\r
1302         p_mad_wr->send_wr.send_opt |= IB_SEND_OPT_SIGNALED;\r
1303 \r
1304         /* Queue the MAD work request on the service tracking queue. */\r
1305         cl_qlist_insert_tail( &p_spl_qp_svc->send_queue, &p_mad_wr->list_item );\r
1306 \r
1307         status = ib_post_send( p_spl_qp_svc->h_qp, &p_mad_wr->send_wr, NULL );\r
1308 \r
1309         if( status != IB_SUCCESS )\r
1310         {\r
1311                 cl_qlist_remove_item( &p_spl_qp_svc->send_queue, &p_mad_wr->list_item );\r
1312 \r
1313                 /* Reset directed route SMPs as required by IBA. */\r
1314                 if( p_smp->mgmt_class == IB_MCLASS_SUBN_DIR )\r
1315                 {\r
1316                         if( ib_smp_is_response( p_smp ) )\r
1317                         {\r
1318                                 if( p_smp->dr_dlid == IB_LID_PERMISSIVE )\r
1319                                         p_smp->hop_ptr++;\r
1320                         }\r
1321                         else if( p_smp->dr_slid == IB_LID_PERMISSIVE )\r
1322                         {\r
1323                                 /* Only update if the hw_agent is not implemented. */\r
1324                                 ci_ca_lock_attr( p_spl_qp_svc->obj.p_ci_ca );\r
1325                                 if( p_spl_qp_svc->obj.p_ci_ca->p_pnp_attr->hw_agents == FALSE )\r
1326                                         p_smp->hop_ptr--;\r
1327                                 ci_ca_unlock_attr( p_spl_qp_svc->obj.p_ci_ca );\r
1328                         }\r
1329                 }\r
1330         }\r
1331 \r
1332         CL_EXIT( AL_DBG_SMI, g_al_dbg_lvl );\r
1333         return status;\r
1334 }\r
1335 \r
1336 \r
1337 /*\r
1338  * Handle a MAD destined for the local CA, using cached data\r
1339  * as much as possible.\r
1340  */\r
1341 static ib_api_status_t\r
1342 local_mad_send(\r
1343         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
1344         IN                              al_mad_wr_t* const                      p_mad_wr )\r
1345 {\r
1346         mad_route_t                             route;\r
1347         ib_api_status_t                 status = IB_SUCCESS;\r
1348 \r
1349         AL_ENTER( AL_DBG_SMI );\r
1350 \r
1351         CL_ASSERT( p_spl_qp_svc );\r
1352         CL_ASSERT( p_mad_wr );\r
1353 \r
1354         /* Determine how to route the MAD. */\r
1355         route = route_mad_send( p_spl_qp_svc, &p_mad_wr->send_wr );\r
1356 \r
1357         /* Check if this MAD should be discarded. */\r
1358         if( is_discard( route ) )\r
1359         {\r
1360                 /* Deliver a "work completion" to the dispatcher. */\r
1361                 __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr,\r
1362                         IB_WCS_LOCAL_OP_ERR );\r
1363                 status = IB_INVALID_SETTING;\r
1364         }\r
1365         else if( is_loopback( route ) )\r
1366         {\r
1367                 /* Loopback local SM to SM "heartbeat" messages. */\r
1368                 status = loopback_mad( p_spl_qp_svc, p_mad_wr );\r
1369         }\r
1370         else\r
1371         {\r
1372                 switch( get_mad_hdr_from_wr( p_mad_wr )->mgmt_class )\r
1373                 {\r
1374                 case IB_MCLASS_SUBN_DIR:\r
1375                 case IB_MCLASS_SUBN_LID:\r
1376                         status = process_subn_mad( p_spl_qp_svc, p_mad_wr );\r
1377                         break;\r
1378 \r
1379                 default:\r
1380                         status = IB_NOT_DONE;\r
1381                 }\r
1382         }\r
1383 \r
1384         if( status == IB_NOT_DONE )\r
1385         {\r
1386                 /* Queue an asynchronous processing item to process the local MAD. */\r
1387                 cl_async_proc_queue( gp_async_proc_mgr, &p_spl_qp_svc->send_async );\r
1388         }\r
1389         else\r
1390         {\r
1391                 /*\r
1392                  * Clear the local MAD pointer to allow processing of other MADs.\r
1393                  * This is done after polling for attribute changes to ensure that\r
1394                  * subsequent MADs pick up any changes performed by this one.\r
1395                  */\r
1396                 cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
1397                 p_spl_qp_svc->local_mad_wr = NULL;\r
1398                 cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
1399 \r
1400                 /* No longer in use by the asynchronous processing thread. */\r
1401                 cl_atomic_dec( &p_spl_qp_svc->in_use_cnt );\r
1402 \r
1403                 /* Special QP operations will resume by unwinding. */\r
1404         }\r
1405 \r
1406         AL_EXIT( AL_DBG_SMI );\r
1407         return IB_SUCCESS;\r
1408 }\r
1409 \r
1410 \r
1411 static ib_api_status_t\r
1412 get_resp_mad(\r
1413         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
1414         IN                              al_mad_wr_t* const                      p_mad_wr,\r
1415                 OUT                     ib_mad_element_t** const        pp_mad_resp )\r
1416 {\r
1417         ib_api_status_t                 status;\r
1418 \r
1419         AL_ENTER( AL_DBG_SMI );\r
1420 \r
1421         CL_ASSERT( p_spl_qp_svc );\r
1422         CL_ASSERT( p_mad_wr );\r
1423         CL_ASSERT( pp_mad_resp );\r
1424 \r
1425         /* Get a MAD element from the pool for the response. */\r
1426         status = ib_get_mad( p_spl_qp_svc->h_qp->obj.p_ci_ca->pool_key,\r
1427                 MAD_BLOCK_SIZE, pp_mad_resp );\r
1428         if( status != IB_SUCCESS )\r
1429         {\r
1430                 __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr,\r
1431                         IB_WCS_LOCAL_OP_ERR );\r
1432         }\r
1433 \r
1434         AL_EXIT( AL_DBG_SMI );\r
1435         return status;\r
1436 }\r
1437 \r
1438 \r
1439 static ib_api_status_t\r
1440 complete_local_mad(\r
1441         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
1442         IN                              al_mad_wr_t* const                      p_mad_wr,\r
1443         IN                              ib_mad_element_t* const         p_mad_resp )\r
1444 {\r
1445         ib_api_status_t                 status;\r
1446 \r
1447         AL_ENTER( AL_DBG_SMI );\r
1448 \r
1449         CL_ASSERT( p_spl_qp_svc );\r
1450         CL_ASSERT( p_mad_wr );\r
1451         CL_ASSERT( p_mad_resp );\r
1452 \r
1453         /* Construct the receive MAD element. */\r
1454         p_mad_resp->status              = IB_WCS_SUCCESS;\r
1455         p_mad_resp->remote_qp   = p_mad_wr->send_wr.dgrm.ud.remote_qp;\r
1456         p_mad_resp->remote_lid  = p_spl_qp_svc->base_lid;\r
1457         if( p_mad_wr->send_wr.send_opt & IB_RECV_OPT_IMMEDIATE )\r
1458         {\r
1459                 p_mad_resp->immediate_data = p_mad_wr->send_wr.immediate_data;\r
1460                 p_mad_resp->recv_opt |= IB_RECV_OPT_IMMEDIATE;\r
1461         }\r
1462 \r
1463         /*\r
1464          * Hand the receive MAD element to the dispatcher before completing\r
1465          * the send.  This guarantees that the send request cannot time out.\r
1466          */\r
1467         status = mad_disp_recv_done( p_spl_qp_svc->h_mad_disp, p_mad_resp );\r
1468 \r
1469         /* Forward the send work completion to the dispatcher. */\r
1470         __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr, IB_WCS_SUCCESS );\r
1471 \r
1472         AL_EXIT( AL_DBG_SMI );\r
1473         return status;\r
1474 }\r
1475 \r
1476 \r
1477 static ib_api_status_t\r
1478 loopback_mad(\r
1479         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
1480         IN                              al_mad_wr_t* const                      p_mad_wr )\r
1481 {\r
1482         ib_mad_t                                *p_mad;\r
1483         ib_mad_element_t                *p_mad_resp;\r
1484         ib_api_status_t                 status;\r
1485 \r
1486         AL_ENTER( AL_DBG_SMI );\r
1487 \r
1488         CL_ASSERT( p_spl_qp_svc );\r
1489         CL_ASSERT( p_mad_wr );\r
1490 \r
1491         /* Get a MAD element from the pool for the response. */\r
1492         status = get_resp_mad( p_spl_qp_svc, p_mad_wr, &p_mad_resp );\r
1493         if( status == IB_SUCCESS )\r
1494         {\r
1495                 /* Initialize a pointers to the MAD work request and outbound MAD. */\r
1496                 p_mad = get_mad_hdr_from_wr( p_mad_wr );\r
1497 \r
1498                 /* Simulate a send/receive between local managers. */\r
1499                 cl_memcpy( p_mad_resp->p_mad_buf, p_mad, MAD_BLOCK_SIZE );\r
1500 \r
1501                 status = complete_local_mad( p_spl_qp_svc, p_mad_wr, p_mad_resp );\r
1502         }\r
1503 \r
1504         AL_EXIT( AL_DBG_SMI );\r
1505         return status;\r
1506 }\r
1507 \r
1508 \r
1509 static ib_api_status_t\r
1510 process_node_info(\r
1511         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
1512         IN                              al_mad_wr_t* const                      p_mad_wr )\r
1513 {\r
1514         ib_mad_t                                *p_mad;\r
1515         ib_mad_element_t                *p_mad_resp;\r
1516         ib_smp_t                                *p_smp;\r
1517         ib_node_info_t                  *p_node_info;\r
1518         ib_ca_attr_t                    *p_ca_attr;\r
1519         ib_port_attr_t                  *p_port_attr;\r
1520         ib_api_status_t                 status;\r
1521 \r
1522         AL_ENTER( AL_DBG_SMI );\r
1523 \r
1524         CL_ASSERT( p_spl_qp_svc );\r
1525         CL_ASSERT( p_mad_wr );\r
1526 \r
1527         /* Initialize a pointers to the MAD work request and outbound MAD. */\r
1528         p_mad = get_mad_hdr_from_wr( p_mad_wr );\r
1529         if( p_mad->method != IB_MAD_METHOD_GET )\r
1530         {\r
1531                 /* Node description is a GET-only attribute. */\r
1532                 __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr,\r
1533                         IB_WCS_LOCAL_OP_ERR );\r
1534                 AL_EXIT( AL_DBG_SMI );\r
1535                 return IB_INVALID_SETTING;\r
1536         }\r
1537 \r
1538         /* Get a MAD element from the pool for the response. */\r
1539         status = get_resp_mad( p_spl_qp_svc, p_mad_wr, &p_mad_resp );\r
1540         if( status == IB_SUCCESS )\r
1541         {\r
1542                 p_smp = (ib_smp_t*)p_mad_resp->p_mad_buf;\r
1543                 cl_memcpy( p_smp, p_mad, MAD_BLOCK_SIZE );\r
1544                 p_smp->method |= IB_MAD_METHOD_RESP_MASK;\r
1545                 if( p_smp->mgmt_class == IB_MCLASS_SUBN_DIR )\r
1546                         p_smp->status = IB_SMP_DIRECTION;\r
1547                 else\r
1548                         p_smp->status = 0;\r
1549 \r
1550                 p_node_info = (ib_node_info_t*)ib_smp_get_payload_ptr( p_smp );\r
1551 \r
1552                 /*\r
1553                  * Fill in the node info, protecting against the\r
1554                  * attributes being changed by PnP.\r
1555                  */\r
1556                 cl_spinlock_acquire( &p_spl_qp_svc->obj.p_ci_ca->obj.lock );\r
1557 \r
1558                 p_ca_attr = p_spl_qp_svc->obj.p_ci_ca->p_pnp_attr;\r
1559                 p_port_attr = &p_ca_attr->p_port_attr[p_spl_qp_svc->port_num - 1];\r
1560 \r
1561                 p_node_info->base_version = 1;\r
1562                 p_node_info->class_version = 1;\r
1563                 p_node_info->node_type = IB_NODE_TYPE_CA;\r
1564                 p_node_info->num_ports = p_ca_attr->num_ports;\r
1565                 /* TODO: Get some unique identifier for the system */\r
1566                 p_node_info->sys_guid = p_ca_attr->ca_guid;\r
1567                 p_node_info->node_guid = p_ca_attr->ca_guid;\r
1568                 p_node_info->port_guid = p_port_attr->port_guid;\r
1569                 p_node_info->partition_cap = cl_hton16( p_port_attr->num_pkeys );\r
1570                 p_node_info->device_id = cl_hton16( p_ca_attr->dev_id );\r
1571                 p_node_info->revision = cl_hton32( p_ca_attr->revision );\r
1572                 p_node_info->port_num_vendor_id =\r
1573                         cl_hton32( p_ca_attr->vend_id & 0x00FFFFFF ) | p_port_attr->port_num;\r
1574                 cl_spinlock_release( &p_spl_qp_svc->obj.p_ci_ca->obj.lock );\r
1575 \r
1576                 status = complete_local_mad( p_spl_qp_svc, p_mad_wr, p_mad_resp );\r
1577         }\r
1578 \r
1579         CL_EXIT( AL_DBG_SMI, g_al_dbg_lvl );\r
1580         return status;\r
1581 }\r
1582 \r
1583 \r
1584 static ib_api_status_t\r
1585 process_node_desc(\r
1586         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
1587         IN                              al_mad_wr_t* const                      p_mad_wr )\r
1588 {\r
1589         ib_mad_t                                *p_mad;\r
1590         ib_mad_element_t                *p_mad_resp;\r
1591         ib_api_status_t                 status;\r
1592 \r
1593         AL_ENTER( AL_DBG_SMI );\r
1594 \r
1595         CL_ASSERT( p_spl_qp_svc );\r
1596         CL_ASSERT( p_mad_wr );\r
1597 \r
1598         /* Initialize a pointers to the MAD work request and outbound MAD. */\r
1599         p_mad = get_mad_hdr_from_wr( p_mad_wr );\r
1600         if( p_mad->method != IB_MAD_METHOD_GET )\r
1601         {\r
1602                 /* Node info is a GET-only attribute. */\r
1603                 __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr,\r
1604                         IB_WCS_LOCAL_OP_ERR );\r
1605                 AL_EXIT( AL_DBG_SMI );\r
1606                 return IB_INVALID_SETTING;\r
1607         }\r
1608 \r
1609         /* Get a MAD element from the pool for the response. */\r
1610         status = get_resp_mad( p_spl_qp_svc, p_mad_wr, &p_mad_resp );\r
1611         if( status == IB_SUCCESS )\r
1612         {\r
1613                 cl_memcpy( p_mad_resp->p_mad_buf, p_mad, MAD_BLOCK_SIZE );\r
1614                 p_mad_resp->p_mad_buf->method |= IB_MAD_METHOD_RESP_MASK;\r
1615                 if( p_mad_resp->p_mad_buf->mgmt_class == IB_MCLASS_SUBN_DIR )\r
1616                         p_mad_resp->p_mad_buf->status = IB_SMP_DIRECTION;\r
1617                 else\r
1618                         p_mad_resp->p_mad_buf->status = 0;\r
1619                 /* Set the node description to the machine name. */\r
1620                 cl_memcpy( ((ib_smp_t*)p_mad_resp->p_mad_buf)->data, \r
1621                         node_desc, sizeof(node_desc) );\r
1622 \r
1623                 status = complete_local_mad( p_spl_qp_svc, p_mad_wr, p_mad_resp );\r
1624         }\r
1625 \r
1626         CL_EXIT( AL_DBG_SMI, g_al_dbg_lvl );\r
1627         return status;\r
1628 }\r
1629 \r
1630 \r
1631 /*\r
1632  * Process subnet administration MADs using cached data if possible.\r
1633  */\r
1634 static ib_api_status_t\r
1635 process_subn_mad(\r
1636         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
1637         IN                              al_mad_wr_t* const                      p_mad_wr )\r
1638 {\r
1639         ib_api_status_t         status;\r
1640         ib_smp_t                        *p_smp;\r
1641 \r
1642         AL_ENTER( AL_DBG_SMI );\r
1643 \r
1644         CL_ASSERT( p_spl_qp_svc );\r
1645         CL_ASSERT( p_mad_wr );\r
1646 \r
1647         p_smp = (ib_smp_t*)get_mad_hdr_from_wr( p_mad_wr );\r
1648 \r
1649         CL_ASSERT( p_smp->mgmt_class == IB_MCLASS_SUBN_DIR ||\r
1650                 p_smp->mgmt_class == IB_MCLASS_SUBN_LID );\r
1651 \r
1652         switch( p_smp->attr_id )\r
1653         {\r
1654         case IB_MAD_ATTR_NODE_INFO:\r
1655                 status = process_node_info( p_spl_qp_svc, p_mad_wr );\r
1656                 break;\r
1657 \r
1658         case IB_MAD_ATTR_NODE_DESC:\r
1659                 status = process_node_desc( p_spl_qp_svc, p_mad_wr );\r
1660                 break;\r
1661 \r
1662         default:\r
1663                 status = IB_NOT_DONE;\r
1664                 break;\r
1665         }\r
1666 \r
1667         AL_EXIT( AL_DBG_SMI );\r
1668         return status;\r
1669 }\r
1670 \r
1671 \r
1672 /*\r
1673  * Process a local MAD send work request.\r
1674  */\r
1675 ib_api_status_t\r
1676 fwd_local_mad(\r
1677         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
1678         IN                              al_mad_wr_t* const                      p_mad_wr )\r
1679 {\r
1680         ib_mad_t*                               p_mad;\r
1681         ib_smp_t*                               p_smp;\r
1682         al_mad_send_t*                  p_mad_send;\r
1683         ib_mad_element_t*               p_mad_response;\r
1684         ib_mad_t*                               p_mad_response_buf;\r
1685         ib_api_status_t                 status = IB_SUCCESS;\r
1686         boolean_t                               smp_is_set;\r
1687 \r
1688         CL_ENTER( AL_DBG_SMI, g_al_dbg_lvl );\r
1689 \r
1690         CL_ASSERT( p_spl_qp_svc );\r
1691         CL_ASSERT( p_mad_wr );\r
1692 \r
1693         /* Initialize a pointers to the MAD work request and outbound MAD. */\r
1694         p_mad = get_mad_hdr_from_wr( p_mad_wr );\r
1695         p_smp = (ib_smp_t*)p_mad;\r
1696 \r
1697         smp_is_set = (p_smp->method == IB_MAD_METHOD_SET);\r
1698 \r
1699         /* Get a MAD element from the pool for the response. */\r
1700         p_mad_send = PARENT_STRUCT( p_mad_wr, al_mad_send_t, mad_wr );\r
1701 //*** Commented code to work-around ib_local_mad() requiring a response MAD\r
1702 //*** as input.  Remove comments once the ib_local_mad() implementation allows\r
1703 //*** for a NULL response MAD, when one is not expected.\r
1704 //*** Note that an attempt to route an invalid response MAD in this case\r
1705 //*** will fail harmlessly.\r
1706 //***   if( p_mad_send->p_send_mad->resp_expected )\r
1707 //***   {\r
1708                 status = get_resp_mad( p_spl_qp_svc, p_mad_wr, &p_mad_response );\r
1709                 if( status != IB_SUCCESS )\r
1710                 {\r
1711                         CL_EXIT( AL_DBG_SMI, g_al_dbg_lvl );\r
1712                         return status;\r
1713                 }\r
1714                 p_mad_response_buf = p_mad_response->p_mad_buf;\r
1715 //***   }\r
1716 //***   else\r
1717 //***   {\r
1718 //***           p_mad_response_buf = NULL;\r
1719 //***   }\r
1720 \r
1721         /* Adjust directed route SMPs as required by IBA. */\r
1722         if( p_mad->mgmt_class == IB_MCLASS_SUBN_DIR )\r
1723         {\r
1724                 CL_ASSERT( !ib_smp_is_response( p_smp ) );\r
1725 \r
1726                 /*\r
1727                  * If this was a self addressed, directed route SMP, increment\r
1728                  * the hop pointer in the request before delivery as required\r
1729                  * by IBA.  Otherwise, adjustment for remote requests occurs\r
1730                  * during inbound processing.\r
1731                  */\r
1732                 if( p_smp->hop_count == 0 )\r
1733                         p_smp->hop_ptr++;\r
1734         }\r
1735 \r
1736         /* Forward the locally addressed MAD to the CA interface. */\r
1737         status = ib_local_mad( p_spl_qp_svc->h_qp->obj.p_ci_ca->h_ca,\r
1738                 p_spl_qp_svc->port_num, p_mad, p_mad_response_buf );\r
1739 \r
1740         /* Reset directed route SMPs as required by IBA. */\r
1741         if( p_mad->mgmt_class == IB_MCLASS_SUBN_DIR )\r
1742         {\r
1743                 /*\r
1744                  * If this was a self addressed, directed route SMP, decrement\r
1745                  * the hop pointer in the response before delivery as required\r
1746                  * by IBA.  Otherwise, adjustment for remote responses occurs\r
1747                  * during outbound processing.\r
1748                  */\r
1749                 if( p_smp->hop_count == 0 )\r
1750                 {\r
1751                         /* Adjust the request SMP. */\r
1752                         p_smp->hop_ptr--;\r
1753 \r
1754                         /* Adjust the response SMP. */\r
1755                         if( p_mad_response_buf )\r
1756                         {\r
1757                                 p_smp = (ib_smp_t*)p_mad_response_buf;\r
1758                                 p_smp->hop_ptr--;\r
1759                         }\r
1760                 }\r
1761         }\r
1762 \r
1763         if( status != IB_SUCCESS )\r
1764         {\r
1765                 if( p_mad_response )\r
1766                         ib_put_mad( p_mad_response );\r
1767 \r
1768                 __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr,\r
1769                         IB_WCS_LOCAL_OP_ERR );\r
1770                 CL_EXIT( AL_DBG_SMI, g_al_dbg_lvl );\r
1771                 return status;\r
1772         }\r
1773 \r
1774         /* Check the completion status of this simulated send. */\r
1775         if( p_mad_response_buf )\r
1776         {\r
1777                 /*\r
1778                  * The SMI is uses PnP polling to refresh the base_lid and lmc.\r
1779                  * Polling takes time, so we update the values here to prevent\r
1780                  * the failure of LID routed MADs sent immediately following this\r
1781                  * assignment.  Check the response to see if the port info was set.\r
1782                  */\r
1783                 if( smp_is_set )\r
1784                 {\r
1785                         ib_port_info_t*         p_port_info = NULL;\r
1786 \r
1787                         switch( p_mad_response_buf->mgmt_class )\r
1788                         {\r
1789                         case IB_MCLASS_SUBN_DIR:\r
1790                                 if( ( p_smp->attr_id == IB_MAD_ATTR_PORT_INFO ) &&\r
1791                                         ( ib_smp_get_status( p_smp ) == IB_SA_MAD_STATUS_SUCCESS ) )\r
1792                                 {\r
1793                                         p_port_info =\r
1794                                                 (ib_port_info_t*)ib_smp_get_payload_ptr( p_smp );\r
1795                                 }\r
1796                                 break;\r
1797 \r
1798                         case IB_MCLASS_SUBN_LID:\r
1799                                 if( ( p_mad_response_buf->attr_id == IB_MAD_ATTR_PORT_INFO ) &&\r
1800                                         ( p_mad_response_buf->status == IB_SA_MAD_STATUS_SUCCESS ) )\r
1801                                 {\r
1802                                         p_port_info =\r
1803                                                 (ib_port_info_t*)( p_mad_response_buf + 1 );\r
1804                                 }\r
1805                                 break;\r
1806 \r
1807                         default:\r
1808                                 break;\r
1809                         }\r
1810 \r
1811                         if( p_port_info )\r
1812                         {\r
1813                                 p_spl_qp_svc->base_lid = p_port_info->base_lid;\r
1814                                 p_spl_qp_svc->lmc = ib_port_info_get_lmc( p_port_info );\r
1815                         }\r
1816                 }\r
1817         }\r
1818 \r
1819         status = complete_local_mad( p_spl_qp_svc, p_mad_wr, p_mad_response );\r
1820 \r
1821         /* If the SMP was a Get, no need to trigger a PnP poll. */\r
1822         if( status == IB_SUCCESS && !smp_is_set )\r
1823                 status = IB_NOT_DONE;\r
1824 \r
1825         CL_EXIT( AL_DBG_SMI, g_al_dbg_lvl );\r
1826         return status;\r
1827 }\r
1828 \r
1829 \r
1830 \r
1831 /*\r
1832  * Asynchronous processing thread callback to send a local MAD.\r
1833  */\r
1834 void\r
1835 send_local_mad_cb(\r
1836         IN                              cl_async_proc_item_t*           p_item )\r
1837 {\r
1838         spl_qp_svc_t*                   p_spl_qp_svc;\r
1839         ib_api_status_t                 status;\r
1840 \r
1841         CL_ENTER( AL_DBG_SMI_CB, g_al_dbg_lvl );\r
1842 \r
1843         CL_ASSERT( p_item );\r
1844         p_spl_qp_svc = PARENT_STRUCT( p_item, spl_qp_svc_t, send_async );\r
1845 \r
1846         /* Process a local MAD send work request. */\r
1847         CL_ASSERT( p_spl_qp_svc->local_mad_wr );\r
1848         status = fwd_local_mad( p_spl_qp_svc, p_spl_qp_svc->local_mad_wr );\r
1849 \r
1850         /*\r
1851          * If we successfully processed a local MAD, which could have changed\r
1852          * something (e.g. the LID) on the HCA.  Scan for changes.\r
1853          */\r
1854         if( status == IB_SUCCESS )\r
1855                 pnp_poll();\r
1856 \r
1857         /*\r
1858          * Clear the local MAD pointer to allow processing of other MADs.\r
1859          * This is done after polling for attribute changes to ensure that\r
1860          * subsequent MADs pick up any changes performed by this one.\r
1861          */\r
1862         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
1863         p_spl_qp_svc->local_mad_wr = NULL;\r
1864         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
1865 \r
1866         /* Continue processing any queued MADs on the QP. */\r
1867         special_qp_resume_sends( p_spl_qp_svc->h_qp );\r
1868 \r
1869         /* No longer in use by the asynchronous processing thread. */\r
1870         cl_atomic_dec( &p_spl_qp_svc->in_use_cnt );\r
1871 \r
1872         CL_EXIT( AL_DBG_SMI, g_al_dbg_lvl );\r
1873 }\r
1874 \r
1875 \r
1876 \r
1877 /*\r
1878  * Special QP send completion callback.\r
1879  */\r
1880 void\r
1881 spl_qp_send_comp_cb(\r
1882         IN              const   ib_cq_handle_t                          h_cq,\r
1883         IN                              void*                                           cq_context )\r
1884 {\r
1885         spl_qp_svc_t*                   p_spl_qp_svc;\r
1886 \r
1887         CL_ENTER( AL_DBG_SMI_CB, g_al_dbg_lvl );\r
1888 \r
1889         CL_ASSERT( cq_context );\r
1890         p_spl_qp_svc = cq_context;\r
1891 \r
1892 #if defined( CL_USE_MUTEX )\r
1893 \r
1894         /* Queue an asynchronous processing item to process sends. */\r
1895         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
1896         if( !p_spl_qp_svc->send_async_queued )\r
1897         {\r
1898                 p_spl_qp_svc->send_async_queued = TRUE;\r
1899                 ref_al_obj( &p_spl_qp_svc->obj );\r
1900                 cl_async_proc_queue( gp_async_proc_mgr, &p_spl_qp_svc->send_async_cb );\r
1901         }\r
1902         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
1903 \r
1904 #else\r
1905 \r
1906         /* Invoke the callback directly. */\r
1907         CL_ASSERT( h_cq == p_spl_qp_svc->h_send_cq );\r
1908         spl_qp_comp( p_spl_qp_svc, h_cq, IB_WC_SEND );\r
1909 \r
1910         /* Continue processing any queued MADs on the QP. */\r
1911         special_qp_resume_sends( p_spl_qp_svc->h_qp );\r
1912 \r
1913 #endif\r
1914 \r
1915         CL_EXIT( AL_DBG_SMI, g_al_dbg_lvl );\r
1916 }\r
1917 \r
1918 \r
1919 \r
1920 #if defined( CL_USE_MUTEX )\r
1921 void\r
1922 spl_qp_send_async_cb(\r
1923         IN                              cl_async_proc_item_t*           p_item )\r
1924 {\r
1925         spl_qp_svc_t*                   p_spl_qp_svc;\r
1926         ib_api_status_t                 status;\r
1927 \r
1928         CL_ENTER( AL_DBG_SMI_CB, g_al_dbg_lvl );\r
1929 \r
1930         CL_ASSERT( p_item );\r
1931         p_spl_qp_svc = PARENT_STRUCT( p_item, spl_qp_svc_t, send_async_cb );\r
1932 \r
1933         /* Reset asynchronous queue flag. */\r
1934         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
1935         p_spl_qp_svc->send_async_queued = FALSE;\r
1936         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
1937 \r
1938         spl_qp_comp( p_spl_qp_svc, p_spl_qp_svc->h_send_cq, IB_WC_SEND );\r
1939 \r
1940         /* Continue processing any queued MADs on the QP. */\r
1941         status = special_qp_resume_sends( p_spl_qp_svc->h_qp );\r
1942         CL_ASSERT( status == IB_SUCCESS );\r
1943 \r
1944         deref_al_obj( &p_spl_qp_svc->obj );\r
1945 \r
1946         CL_EXIT( AL_DBG_SMI, g_al_dbg_lvl );\r
1947 }\r
1948 #endif\r
1949 \r
1950 \r
1951 \r
1952 /*\r
1953  * Special QP receive completion callback.\r
1954  */\r
1955 void\r
1956 spl_qp_recv_comp_cb(\r
1957         IN              const   ib_cq_handle_t                          h_cq,\r
1958         IN                              void*                                           cq_context )\r
1959 {\r
1960         spl_qp_svc_t*                   p_spl_qp_svc;\r
1961 \r
1962         CL_ENTER( AL_DBG_SMI, g_al_dbg_lvl );\r
1963 \r
1964         CL_ASSERT( cq_context );\r
1965         p_spl_qp_svc = cq_context;\r
1966 \r
1967 #if defined( CL_USE_MUTEX )\r
1968 \r
1969         /* Queue an asynchronous processing item to process receives. */\r
1970         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
1971         if( !p_spl_qp_svc->recv_async_queued )\r
1972         {\r
1973                 p_spl_qp_svc->recv_async_queued = TRUE;\r
1974                 ref_al_obj( &p_spl_qp_svc->obj );\r
1975                 cl_async_proc_queue( gp_async_proc_mgr, &p_spl_qp_svc->recv_async_cb );\r
1976         }\r
1977         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
1978 \r
1979 #else\r
1980 \r
1981         CL_ASSERT( h_cq == p_spl_qp_svc->h_recv_cq );\r
1982         spl_qp_comp( p_spl_qp_svc, h_cq, IB_WC_RECV );\r
1983 \r
1984 #endif\r
1985 \r
1986         CL_EXIT( AL_DBG_SMI, g_al_dbg_lvl );\r
1987 }\r
1988 \r
1989 \r
1990 \r
1991 #if defined( CL_USE_MUTEX )\r
1992 void\r
1993 spl_qp_recv_async_cb(\r
1994         IN                              cl_async_proc_item_t*           p_item )\r
1995 {\r
1996         spl_qp_svc_t*                   p_spl_qp_svc;\r
1997 \r
1998         CL_ENTER( AL_DBG_SMI, g_al_dbg_lvl );\r
1999 \r
2000         CL_ASSERT( p_item );\r
2001         p_spl_qp_svc = PARENT_STRUCT( p_item, spl_qp_svc_t, recv_async_cb );\r
2002 \r
2003         /* Reset asynchronous queue flag. */\r
2004         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
2005         p_spl_qp_svc->recv_async_queued = FALSE;\r
2006         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2007 \r
2008         spl_qp_comp( p_spl_qp_svc, p_spl_qp_svc->h_recv_cq, IB_WC_RECV );\r
2009 \r
2010         deref_al_obj( &p_spl_qp_svc->obj );\r
2011 \r
2012         CL_EXIT( AL_DBG_SMI, g_al_dbg_lvl );\r
2013 }\r
2014 #endif\r
2015 \r
2016 \r
2017 \r
2018 /*\r
2019  * Special QP completion handler.\r
2020  */\r
2021 void\r
2022 spl_qp_comp(\r
2023         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
2024         IN              const   ib_cq_handle_t                          h_cq,\r
2025         IN                              ib_wc_type_t                            wc_type )\r
2026 {\r
2027         ib_wc_t                                 wc;\r
2028         ib_wc_t*                                p_free_wc = &wc;\r
2029         ib_wc_t*                                p_done_wc;\r
2030         al_mad_wr_t*                    p_mad_wr;\r
2031         al_mad_element_t*               p_al_mad;\r
2032         ib_mad_element_t*               p_mad_element;\r
2033         ib_smp_t*                               p_smp;\r
2034         ib_api_status_t                 status;\r
2035 \r
2036         CL_ENTER( AL_DBG_SMI_CB, g_al_dbg_lvl );\r
2037 \r
2038         CL_ASSERT( p_spl_qp_svc );\r
2039         CL_ASSERT( h_cq );\r
2040 \r
2041         /* Check the QP state and guard against error handling. */\r
2042         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
2043         if( p_spl_qp_svc->state != SPL_QP_ACTIVE )\r
2044         {\r
2045                 cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2046                 return;\r
2047         }\r
2048         cl_atomic_inc( &p_spl_qp_svc->in_use_cnt );\r
2049         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2050 \r
2051         wc.p_next = NULL;\r
2052         /* Process work completions. */\r
2053         while( ib_poll_cq( h_cq, &p_free_wc, &p_done_wc ) == IB_SUCCESS )\r
2054         {\r
2055                 /* Process completions one at a time. */\r
2056                 CL_ASSERT( p_done_wc );\r
2057 \r
2058                 /* Flushed completions are handled elsewhere. */\r
2059                 if( wc.status == IB_WCS_WR_FLUSHED_ERR )\r
2060                 {\r
2061                         p_free_wc = &wc;\r
2062                         continue;\r
2063                 }\r
2064 \r
2065                 /*\r
2066                  * Process the work completion.  Per IBA specification, the\r
2067                  * wc.wc_type is undefined if wc.status is not IB_WCS_SUCCESS.\r
2068                  * Use the wc_type parameter.\r
2069                  */\r
2070                 switch( wc_type )\r
2071                 {\r
2072                 case IB_WC_SEND:\r
2073                         /* Get a pointer to the MAD work request. */\r
2074                         p_mad_wr = (al_mad_wr_t*)((uintn_t)wc.wr_id);\r
2075 \r
2076                         /* Remove the MAD work request from the service tracking queue. */\r
2077                         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
2078                         cl_qlist_remove_item( &p_spl_qp_svc->send_queue,\r
2079                                 &p_mad_wr->list_item );\r
2080                         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2081 \r
2082                         /* Reset directed route SMPs as required by IBA. */\r
2083                         p_smp = (ib_smp_t*)get_mad_hdr_from_wr( p_mad_wr );\r
2084                         if( p_smp->mgmt_class == IB_MCLASS_SUBN_DIR )\r
2085                         {\r
2086                                 if( ib_smp_is_response( p_smp ) )\r
2087                                         p_smp->hop_ptr++;\r
2088                                 else\r
2089                                         p_smp->hop_ptr--;\r
2090                         }\r
2091 \r
2092                         /* Report the send completion to the dispatcher. */\r
2093                         mad_disp_send_done( p_spl_qp_svc->h_mad_disp, p_mad_wr, &wc );\r
2094                         break;\r
2095 \r
2096                 case IB_WC_RECV:\r
2097 \r
2098                         /* Initialize pointers to the MAD element. */\r
2099                         p_al_mad = (al_mad_element_t*)((uintn_t)wc.wr_id);\r
2100                         p_mad_element = &p_al_mad->element;\r
2101 \r
2102                         /* Remove the AL MAD element from the service tracking list. */\r
2103                         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
2104 \r
2105                         cl_qlist_remove_item( &p_spl_qp_svc->recv_queue,\r
2106                                 &p_al_mad->list_item );\r
2107 \r
2108                         /* Replenish the receive buffer. */\r
2109                         spl_qp_svc_post_recvs( p_spl_qp_svc );\r
2110                         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2111 \r
2112                         /* Construct the MAD element from the receive work completion. */\r
2113                         build_mad_recv( p_mad_element, &wc );\r
2114 \r
2115                         /* Process the received MAD. */\r
2116                         status = process_mad_recv( p_spl_qp_svc, p_mad_element );\r
2117 \r
2118                         /* Discard this MAD on error. */\r
2119                         if( status != IB_SUCCESS )\r
2120                         {\r
2121                                 status = ib_put_mad( p_mad_element );\r
2122                                 CL_ASSERT( status == IB_SUCCESS );\r
2123                         }\r
2124                         break;\r
2125 \r
2126                 default:\r
2127                         CL_ASSERT( wc_type == IB_WC_SEND || wc_type == IB_WC_RECV );\r
2128                         break;\r
2129                 }\r
2130 \r
2131                 if( wc.status != IB_WCS_SUCCESS )\r
2132                 {\r
2133                         CL_TRACE( CL_DBG_ERROR, g_al_dbg_lvl,\r
2134                                 ("special QP completion error: %s!\n",\r
2135                                 ib_get_wc_status_str( wc.status )) );\r
2136 \r
2137                         /* Reset the special QP service and return. */\r
2138                         spl_qp_svc_reset( p_spl_qp_svc );\r
2139                 }\r
2140                 p_free_wc = &wc;\r
2141         }\r
2142 \r
2143         /* Rearm the CQ. */\r
2144         status = ib_rearm_cq( h_cq, FALSE );\r
2145         CL_ASSERT( status == IB_SUCCESS );\r
2146 \r
2147         cl_atomic_dec( &p_spl_qp_svc->in_use_cnt );\r
2148         CL_EXIT( AL_DBG_SMI_CB, g_al_dbg_lvl );\r
2149 }\r
2150 \r
2151 \r
2152 \r
2153 /*\r
2154  * Process a received MAD.\r
2155  */\r
2156 ib_api_status_t\r
2157 process_mad_recv(\r
2158         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
2159         IN                              ib_mad_element_t*                       p_mad_element )\r
2160 {\r
2161         ib_smp_t*                               p_smp;\r
2162         mad_route_t                             route;\r
2163         ib_api_status_t                 status;\r
2164 \r
2165         CL_ENTER( AL_DBG_SMI, g_al_dbg_lvl );\r
2166 \r
2167         CL_ASSERT( p_spl_qp_svc );\r
2168         CL_ASSERT( p_mad_element );\r
2169 \r
2170         /*\r
2171          * If the CA has a HW agent then this MAD should have been\r
2172          * consumed below verbs.  The fact that it was received here\r
2173          * indicates that it should be forwarded to the dispatcher\r
2174          * for delivery to a class manager.  Otherwise, determine how\r
2175          * the MAD should be routed.\r
2176          */\r
2177         route = ROUTE_DISPATCHER;\r
2178         if( check_local_mad( p_spl_qp_svc->h_qp ) )\r
2179         {\r
2180                 /*\r
2181                  * SMP and GMP processing is branched here to handle overlaps\r
2182                  * between class methods and attributes.\r
2183                  */\r
2184                 switch( p_mad_element->p_mad_buf->mgmt_class )\r
2185                 {\r
2186                 case IB_MCLASS_SUBN_DIR:\r
2187                         /* Perform special checks on directed route SMPs. */\r
2188                         p_smp = (ib_smp_t*)p_mad_element->p_mad_buf;\r
2189 \r
2190                         if( ( p_smp->hop_count >= IB_SUBNET_PATH_HOPS_MAX ) ||\r
2191                                 ( p_smp->hop_ptr >= IB_SUBNET_PATH_HOPS_MAX ) )\r
2192                         {\r
2193                                 route = ROUTE_DISCARD;\r
2194                         }\r
2195                         else if( ib_smp_is_response( p_smp ) )\r
2196                         {\r
2197                                 /*\r
2198                                  * This node is the destination of the response.  Discard\r
2199                                  * the source LID or hop pointer are incorrect.\r
2200                                  */\r
2201                                 if( p_smp->dr_slid == IB_LID_PERMISSIVE )\r
2202                                 {\r
2203                                         if( p_smp->hop_ptr == 1 )\r
2204                                         {\r
2205                                                 p_smp->hop_ptr--;               /* Adjust ptr per IBA spec. */\r
2206                                         }\r
2207                                         else\r
2208                                         {\r
2209                                                 route = ROUTE_DISCARD;\r
2210                                         }\r
2211                                 }\r
2212                                 else if( ( p_smp->dr_slid <  p_spl_qp_svc->base_lid ) ||\r
2213                                                  ( p_smp->dr_slid >= p_spl_qp_svc->base_lid +\r
2214                                                         ( 1 << p_spl_qp_svc->lmc ) ) )\r
2215                                 {\r
2216                                                 route = ROUTE_DISCARD;\r
2217                                 }\r
2218                         }\r
2219                         else\r
2220                         {\r
2221                                 /*\r
2222                                  * This node is the destination of the request.  Discard\r
2223                                  * the destination LID or hop pointer are incorrect.\r
2224                                  */\r
2225                                 if( p_smp->dr_dlid == IB_LID_PERMISSIVE )\r
2226                                 {\r
2227                                         if( p_smp->hop_count == p_smp->hop_ptr )\r
2228                                         {\r
2229                                                 p_smp->return_path[ p_smp->hop_ptr++ ] =\r
2230                                                         p_spl_qp_svc->port_num; /* Set path per IBA spec. */\r
2231                                         }\r
2232                                         else\r
2233                                         {\r
2234                                                 route = ROUTE_DISCARD;\r
2235                                         }\r
2236                                 }\r
2237                                 else if( ( p_smp->dr_dlid <  p_spl_qp_svc->base_lid ) ||\r
2238                                                  ( p_smp->dr_dlid >= p_spl_qp_svc->base_lid +\r
2239                                                         ( 1 << p_spl_qp_svc->lmc ) ) )\r
2240                                 {\r
2241                                                 route = ROUTE_DISCARD;\r
2242                                 }\r
2243                         }\r
2244 \r
2245                         if( route == ROUTE_DISCARD ) break;\r
2246                         /* else fall through next case */\r
2247 \r
2248                 case IB_MCLASS_SUBN_LID:\r
2249                         route = route_recv_smp( p_mad_element );\r
2250                         break;\r
2251 \r
2252                 case IB_MCLASS_PERF:\r
2253                         route = ROUTE_LOCAL;\r
2254                         break;\r
2255 \r
2256                 case IB_MCLASS_BM:\r
2257                         route = route_recv_gmp( p_mad_element );\r
2258                         break;\r
2259 \r
2260                 default:\r
2261                         /* Route vendor specific MADs to the HCA provider. */\r
2262                         if( ib_class_is_vendor_specific(\r
2263                                 p_mad_element->p_mad_buf->mgmt_class ) )\r
2264                         {\r
2265                                 route = route_recv_gmp( p_mad_element );\r
2266                         }\r
2267                         break;\r
2268                 }\r
2269         }\r
2270 \r
2271         /* Route the MAD. */\r
2272         if ( is_discard( route ) )\r
2273                 status = IB_ERROR;\r
2274         else if( is_dispatcher( route ) )\r
2275                 status = mad_disp_recv_done( p_spl_qp_svc->h_mad_disp, p_mad_element );\r
2276         else if( is_remote( route ) )\r
2277                 status = forward_sm_trap( p_spl_qp_svc, p_mad_element );\r
2278         else\r
2279                 status = recv_local_mad( p_spl_qp_svc, p_mad_element );\r
2280 \r
2281         CL_EXIT( AL_DBG_SMI, g_al_dbg_lvl );\r
2282         return status;\r
2283 }\r
2284 \r
2285 \r
2286 \r
2287 /*\r
2288  * Route a received SMP.\r
2289  */\r
2290 mad_route_t\r
2291 route_recv_smp(\r
2292         IN                              ib_mad_element_t*                       p_mad_element )\r
2293 {\r
2294         mad_route_t                             route;\r
2295 \r
2296         CL_ENTER( AL_DBG_SMI, g_al_dbg_lvl );\r
2297 \r
2298         CL_ASSERT( p_mad_element );\r
2299 \r
2300         /* Process the received SMP. */\r
2301         switch( p_mad_element->p_mad_buf->method )\r
2302         {\r
2303         case IB_MAD_METHOD_GET:\r
2304         case IB_MAD_METHOD_SET:\r
2305                 route = route_recv_smp_attr( p_mad_element );\r
2306                 break;\r
2307 \r
2308         case IB_MAD_METHOD_TRAP:\r
2309                 /*\r
2310                  * Special check to route locally generated traps to the remote SM.\r
2311                  * Distinguished from other receives by the p_wc->recv.ud.recv_opt\r
2312                  * IB_RECV_OPT_FORWARD flag.\r
2313                  *\r
2314                  * Note that because forwarded traps use AL MAD services, the upper\r
2315                  * 32-bits of the TID are reserved by the access layer.  When matching\r
2316                  * a Trap Repress MAD, the SMA must only use the lower 32-bits of the\r
2317                  * TID.\r
2318                  */\r
2319                 route = ( p_mad_element->recv_opt & IB_RECV_OPT_FORWARD ) ?\r
2320                         ROUTE_REMOTE : ROUTE_DISPATCHER;\r
2321                 break;\r
2322 \r
2323         case IB_MAD_METHOD_TRAP_REPRESS:\r
2324                 /*\r
2325                  * Note that because forwarded traps use AL MAD services, the upper\r
2326                  * 32-bits of the TID are reserved by the access layer.  When matching\r
2327                  * a Trap Repress MAD, the SMA must only use the lower 32-bits of the\r
2328                  * TID.\r
2329                  */\r
2330                 route = ROUTE_LOCAL;\r
2331                 break;\r
2332 \r
2333         default:\r
2334                 route = ROUTE_DISPATCHER;\r
2335                 break;\r
2336         }\r
2337 \r
2338         CL_EXIT( AL_DBG_SMI, g_al_dbg_lvl );\r
2339         return route;\r
2340 }\r
2341 \r
2342 \r
2343 \r
2344 /*\r
2345  * Route received SMP attributes.\r
2346  */\r
2347 mad_route_t\r
2348 route_recv_smp_attr(\r
2349         IN                              ib_mad_element_t*                       p_mad_element )\r
2350 {\r
2351         mad_route_t                             route;\r
2352 \r
2353         CL_ENTER( AL_DBG_SMI, g_al_dbg_lvl );\r
2354 \r
2355         CL_ASSERT( p_mad_element );\r
2356 \r
2357         /* Process the received SMP attributes. */\r
2358         switch( p_mad_element->p_mad_buf->attr_id )\r
2359         {\r
2360         case IB_MAD_ATTR_NODE_DESC:\r
2361         case IB_MAD_ATTR_NODE_INFO:\r
2362         case IB_MAD_ATTR_GUID_INFO:\r
2363         case IB_MAD_ATTR_PORT_INFO:\r
2364         case IB_MAD_ATTR_P_KEY_TABLE:\r
2365         case IB_MAD_ATTR_SLVL_TABLE:\r
2366         case IB_MAD_ATTR_VL_ARBITRATION:\r
2367         case IB_MAD_ATTR_VENDOR_DIAG:\r
2368         case IB_MAD_ATTR_LED_INFO:\r
2369                 route = ROUTE_LOCAL;\r
2370                 break;\r
2371 \r
2372         default:\r
2373                 route = ROUTE_DISPATCHER;\r
2374                 break;\r
2375         }\r
2376 \r
2377         CL_EXIT( AL_DBG_SMI, g_al_dbg_lvl );\r
2378         return route;\r
2379 }\r
2380 \r
2381 \r
2382 /*\r
2383  * Route a received GMP.\r
2384  */\r
2385 mad_route_t\r
2386 route_recv_gmp(\r
2387         IN                              ib_mad_element_t*                       p_mad_element )\r
2388 {\r
2389         mad_route_t                             route;\r
2390 \r
2391         CL_ENTER( AL_DBG_SMI, g_al_dbg_lvl );\r
2392 \r
2393         CL_ASSERT( p_mad_element );\r
2394 \r
2395         /* Process the received GMP. */\r
2396         switch( p_mad_element->p_mad_buf->method )\r
2397         {\r
2398         case IB_MAD_METHOD_GET:\r
2399         case IB_MAD_METHOD_SET:\r
2400                 /* Route vendor specific MADs to the HCA provider. */\r
2401                 if( ib_class_is_vendor_specific(\r
2402                         p_mad_element->p_mad_buf->mgmt_class ) )\r
2403                 {\r
2404                         route = ROUTE_LOCAL;\r
2405                 }\r
2406                 else\r
2407                 {\r
2408                         route = route_recv_gmp_attr( p_mad_element );\r
2409                 }\r
2410                 break;\r
2411 \r
2412         default:\r
2413                 route = ROUTE_DISPATCHER;\r
2414                 break;\r
2415         }\r
2416 \r
2417         CL_EXIT( AL_DBG_SMI, g_al_dbg_lvl );\r
2418         return route;\r
2419 }\r
2420 \r
2421 \r
2422 \r
2423 /*\r
2424  * Route received GMP attributes.\r
2425  */\r
2426 mad_route_t\r
2427 route_recv_gmp_attr(\r
2428         IN                              ib_mad_element_t*                       p_mad_element )\r
2429 {\r
2430         mad_route_t                             route;\r
2431 \r
2432         CL_ENTER( AL_DBG_SMI, g_al_dbg_lvl );\r
2433 \r
2434         CL_ASSERT( p_mad_element );\r
2435 \r
2436         /* Process the received GMP attributes. */\r
2437         if( p_mad_element->p_mad_buf->attr_id == IB_MAD_ATTR_CLASS_PORT_INFO )\r
2438                 route = ROUTE_LOCAL;\r
2439         else\r
2440                 route = ROUTE_DISPATCHER;\r
2441 \r
2442         CL_EXIT( AL_DBG_SMI, g_al_dbg_lvl );\r
2443         return route;\r
2444 }\r
2445 \r
2446 \r
2447 \r
2448 /*\r
2449  * Forward a locally generated Subnet Management trap.\r
2450  */\r
2451 ib_api_status_t\r
2452 forward_sm_trap(\r
2453         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
2454         IN                              ib_mad_element_t*                       p_mad_element )\r
2455 {\r
2456         ib_av_attr_t                    av_attr;\r
2457         ib_api_status_t                 status;\r
2458 \r
2459         CL_ENTER( AL_DBG_SMI_CB, g_al_dbg_lvl );\r
2460 \r
2461         CL_ASSERT( p_spl_qp_svc );\r
2462         CL_ASSERT( p_mad_element );\r
2463 \r
2464         /* Check the SMP class. */\r
2465         if( p_mad_element->p_mad_buf->mgmt_class != IB_MCLASS_SUBN_LID )\r
2466         {\r
2467                 /*\r
2468                  * Per IBA Specification Release 1.1 Section 14.2.2.1,\r
2469                  * "C14-5: Only a SM shall originate a directed route SMP."\r
2470                  * Therefore all traps should be LID routed; drop this one.\r
2471                  */\r
2472                 CL_EXIT( AL_DBG_SMI_CB, g_al_dbg_lvl );\r
2473                 return IB_ERROR;\r
2474         }\r
2475 \r
2476         /* Create an address vector for the SM. */\r
2477         cl_memclr( &av_attr, sizeof( ib_av_attr_t ) );\r
2478         av_attr.port_num = p_spl_qp_svc->port_num;\r
2479         av_attr.sl = p_mad_element->remote_sl;\r
2480         av_attr.dlid = p_mad_element->remote_lid;\r
2481         if( p_mad_element->grh_valid )\r
2482         {\r
2483                 cl_memcpy( &av_attr.grh, p_mad_element->p_grh, sizeof( ib_grh_t ) );\r
2484                 av_attr.grh.src_gid      = p_mad_element->p_grh->dest_gid;\r
2485                 av_attr.grh.dest_gid = p_mad_element->p_grh->src_gid;\r
2486                 av_attr.grh_valid = TRUE;\r
2487         }\r
2488 \r
2489         status = ib_create_av( p_spl_qp_svc->h_qp->obj.p_ci_ca->h_pd_alias,\r
2490                 &av_attr, &p_mad_element->h_av );\r
2491 \r
2492         if( status != IB_SUCCESS )\r
2493         {\r
2494                 CL_EXIT( AL_DBG_SMI_CB, g_al_dbg_lvl );\r
2495                 return status;\r
2496         }\r
2497 \r
2498         /* Complete the initialization of the MAD element. */\r
2499         p_mad_element->p_next = NULL;\r
2500         p_mad_element->remote_qkey = IB_QP_PRIVILEGED_Q_KEY;\r
2501         p_mad_element->resp_expected = FALSE;\r
2502 \r
2503         /* Clear context1 for proper send completion callback processing. */\r
2504         p_mad_element->context1 = NULL;\r
2505 \r
2506         /*\r
2507          * Forward the trap.  Note that because forwarded traps use AL MAD\r
2508          * services, the upper 32-bits of the TID are reserved by the access\r
2509          * layer.  When matching a Trap Repress MAD, the SMA must only use\r
2510          * the lower 32-bits of the TID.\r
2511          */\r
2512         status = ib_send_mad( p_spl_qp_svc->h_mad_svc, p_mad_element, NULL );\r
2513 \r
2514         if( status != IB_SUCCESS )\r
2515                 ib_destroy_av( p_mad_element->h_av );\r
2516 \r
2517         CL_EXIT( AL_DBG_SMI_CB, g_al_dbg_lvl );\r
2518         return status;\r
2519 }\r
2520 \r
2521 \r
2522 /*\r
2523  * Process a locally routed MAD received from the special QP.\r
2524  */\r
2525 ib_api_status_t\r
2526 recv_local_mad(\r
2527         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
2528         IN                              ib_mad_element_t*                       p_mad_request )\r
2529 {\r
2530         ib_mad_t*                               p_mad_hdr;\r
2531         ib_api_status_t                 status;\r
2532 \r
2533         CL_ENTER( AL_DBG_SMI_CB, g_al_dbg_lvl );\r
2534 \r
2535         CL_ASSERT( p_spl_qp_svc );\r
2536         CL_ASSERT( p_mad_request );\r
2537 \r
2538         /* Initialize the MAD element. */\r
2539         p_mad_hdr = ib_get_mad_buf( p_mad_request );\r
2540         p_mad_request->context1 = p_mad_request;\r
2541 \r
2542         /* Save the TID. */\r
2543         p_mad_request->context2 =\r
2544                 (void*)(uintn_t)al_get_al_tid( p_mad_hdr->trans_id );\r
2545 /*\r
2546  * Disable warning about passing unaligned 64-bit value.\r
2547  * The value is always aligned given how buffers are allocated\r
2548  * and given the layout of a MAD.\r
2549  */\r
2550 #pragma warning( push, 3 )\r
2551         al_set_al_tid( &p_mad_hdr->trans_id, 0 );\r
2552 #pragma warning( pop )\r
2553 \r
2554         /*\r
2555          * We need to get a response from the local HCA to this MAD only if this\r
2556          * MAD is not itself a response.\r
2557          */\r
2558         p_mad_request->resp_expected = !( ib_mad_is_response( p_mad_hdr ) ||\r
2559                 ( p_mad_hdr->method == IB_MAD_METHOD_TRAP_REPRESS ) );\r
2560         p_mad_request->timeout_ms = LOCAL_MAD_TIMEOUT;\r
2561         p_mad_request->send_opt = IB_SEND_OPT_LOCAL;\r
2562 \r
2563         /* Send the locally addressed MAD request to the CA for processing. */\r
2564         status = ib_send_mad( p_spl_qp_svc->h_mad_svc, p_mad_request, NULL );\r
2565 \r
2566         CL_EXIT( AL_DBG_SMI_CB, g_al_dbg_lvl );\r
2567         return status;\r
2568 }\r
2569 \r
2570 \r
2571 \r
2572 /*\r
2573  * Special QP alias send completion callback.\r
2574  */\r
2575 void\r
2576 spl_qp_alias_send_cb(\r
2577         IN                              ib_mad_svc_handle_t                     h_mad_svc,\r
2578         IN                              void*                                           mad_svc_context,\r
2579         IN                              ib_mad_element_t*                       p_mad_element )\r
2580 {\r
2581         ib_api_status_t                 status;\r
2582 \r
2583         CL_ENTER( AL_DBG_SMI_CB, g_al_dbg_lvl );\r
2584 \r
2585         UNUSED_PARAM( h_mad_svc );\r
2586         UNUSED_PARAM( mad_svc_context );\r
2587         CL_ASSERT( p_mad_element );\r
2588 \r
2589         if( p_mad_element->h_av )\r
2590         {\r
2591                 status = ib_destroy_av( p_mad_element->h_av );\r
2592                 CL_ASSERT( status == IB_SUCCESS );\r
2593         }\r
2594 \r
2595         status = ib_put_mad( p_mad_element );\r
2596         CL_ASSERT( status == IB_SUCCESS );\r
2597 \r
2598         CL_EXIT( AL_DBG_SMI_CB, g_al_dbg_lvl );\r
2599 }\r
2600 \r
2601 \r
2602 \r
2603 /*\r
2604  * Special QP alias receive completion callback.\r
2605  */\r
2606 void\r
2607 spl_qp_alias_recv_cb(\r
2608         IN                              ib_mad_svc_handle_t                     h_mad_svc,\r
2609         IN                              void*                                           mad_svc_context,\r
2610         IN                              ib_mad_element_t*                       p_mad_response )\r
2611 {\r
2612         spl_qp_svc_t*                   p_spl_qp_svc;\r
2613         ib_mad_element_t*               p_mad_request;\r
2614         ib_mad_t*                               p_mad_hdr;\r
2615         ib_av_attr_t                    av_attr;\r
2616         ib_api_status_t                 status;\r
2617 \r
2618         CL_ENTER( AL_DBG_SMI_CB, g_al_dbg_lvl );\r
2619 \r
2620         CL_ASSERT( mad_svc_context );\r
2621         CL_ASSERT( p_mad_response );\r
2622         CL_ASSERT( p_mad_response->send_context1 );\r
2623 \r
2624         /* Initialize pointers. */\r
2625         p_spl_qp_svc = mad_svc_context;\r
2626         p_mad_request = p_mad_response->send_context1;\r
2627         p_mad_hdr = ib_get_mad_buf( p_mad_response );\r
2628 \r
2629         /* Restore the TID, so it will match on the remote side. */\r
2630 #pragma warning( push, 3 )\r
2631         al_set_al_tid( &p_mad_hdr->trans_id,\r
2632                 (uint32_t)(uintn_t)p_mad_response->send_context2 );\r
2633 #pragma warning( pop )\r
2634 \r
2635         /* Set the remote QP. */\r
2636         p_mad_response->remote_qp       = p_mad_request->remote_qp;\r
2637         p_mad_response->remote_qkey = p_mad_request->remote_qkey;\r
2638 \r
2639         /* Prepare to create an address vector. */\r
2640         cl_memclr( &av_attr, sizeof( ib_av_attr_t ) );\r
2641         av_attr.port_num        = p_spl_qp_svc->port_num;\r
2642         av_attr.sl                      = p_mad_request->remote_sl;\r
2643         av_attr.static_rate = IB_PATH_RECORD_RATE_10_GBS;\r
2644         av_attr.path_bits       = p_mad_request->path_bits;\r
2645         if( p_mad_request->grh_valid )\r
2646         {\r
2647                 cl_memcpy( &av_attr.grh, p_mad_request->p_grh, sizeof( ib_grh_t ) );\r
2648                 av_attr.grh.src_gid      = p_mad_request->p_grh->dest_gid;\r
2649                 av_attr.grh.dest_gid = p_mad_request->p_grh->src_gid;\r
2650                 av_attr.grh_valid = TRUE;\r
2651         }\r
2652         if( ( p_mad_hdr->mgmt_class == IB_MCLASS_SUBN_DIR ) &&\r
2653                 ( ((ib_smp_t *)p_mad_hdr)->dr_dlid == IB_LID_PERMISSIVE ) )\r
2654                 av_attr.dlid = IB_LID_PERMISSIVE;\r
2655         else\r
2656                 av_attr.dlid = p_mad_request->remote_lid;\r
2657 \r
2658         /* Create an address vector. */\r
2659         status = ib_create_av( p_spl_qp_svc->h_qp->obj.p_ci_ca->h_pd_alias,\r
2660                 &av_attr, &p_mad_response->h_av );\r
2661 \r
2662         if( status != IB_SUCCESS )\r
2663         {\r
2664                 ib_put_mad( p_mad_response );\r
2665 \r
2666                 CL_EXIT( AL_DBG_SMI, g_al_dbg_lvl );\r
2667                 return;\r
2668         }\r
2669 \r
2670         /* Send the response. */\r
2671         status = ib_send_mad( h_mad_svc, p_mad_response, NULL );\r
2672 \r
2673         if( status != IB_SUCCESS )\r
2674         {\r
2675                 ib_destroy_av( p_mad_response->h_av );\r
2676                 ib_put_mad( p_mad_response );\r
2677         }\r
2678 \r
2679         CL_EXIT( AL_DBG_SMI_CB, g_al_dbg_lvl );\r
2680 }\r
2681 \r
2682 \r
2683 \r
2684 /*\r
2685  * Post receive buffers to a special QP.\r
2686  */\r
2687 static ib_api_status_t\r
2688 spl_qp_svc_post_recvs(\r
2689         IN                              spl_qp_svc_t*   const           p_spl_qp_svc )\r
2690 {\r
2691         ib_mad_element_t*               p_mad_element;\r
2692         al_mad_element_t*               p_al_element;\r
2693         ib_recv_wr_t                    recv_wr;\r
2694         ib_api_status_t                 status = IB_SUCCESS;\r
2695 \r
2696         /* Attempt to post receive buffers up to the max_qp_depth limit. */\r
2697         while( cl_qlist_count( &p_spl_qp_svc->recv_queue ) <\r
2698                 (int32_t)p_spl_qp_svc->max_qp_depth )\r
2699         {\r
2700                 /* Get a MAD element from the pool. */\r
2701                 status = ib_get_mad( p_spl_qp_svc->obj.p_ci_ca->pool_key,\r
2702                         MAD_BLOCK_SIZE, &p_mad_element );\r
2703 \r
2704                 if( status != IB_SUCCESS ) break;\r
2705 \r
2706                 p_al_element = PARENT_STRUCT( p_mad_element, al_mad_element_t,\r
2707                         element );\r
2708 \r
2709                 /* Build the receive work request. */\r
2710                 recv_wr.p_next   = NULL;\r
2711                 recv_wr.wr_id    = (uintn_t)p_al_element;\r
2712                 recv_wr.num_ds = 1;\r
2713                 recv_wr.ds_array = &p_al_element->grh_ds;\r
2714 \r
2715                 /* Queue the receive on the service tracking list. */\r
2716                 cl_qlist_insert_tail( &p_spl_qp_svc->recv_queue,\r
2717                         &p_al_element->list_item );\r
2718 \r
2719                 /* Post the receive. */\r
2720                 status = ib_post_recv( p_spl_qp_svc->h_qp, &recv_wr, NULL );\r
2721 \r
2722                 if( status != IB_SUCCESS )\r
2723                 {\r
2724                         AL_TRACE( AL_DBG_ERROR,\r
2725                                 ("Failed to post receive %p\n", p_al_element) );\r
2726                         cl_qlist_remove_item( &p_spl_qp_svc->recv_queue,\r
2727                                 &p_al_element->list_item );\r
2728 \r
2729                         ib_put_mad( p_mad_element );\r
2730                         break;\r
2731                 }\r
2732         }\r
2733 \r
2734         return status;\r
2735 }\r
2736 \r
2737 \r
2738 \r
2739 /*\r
2740  * Special QP service asynchronous event callback.\r
2741  */\r
2742 void\r
2743 spl_qp_svc_event_cb(\r
2744         IN                              ib_async_event_rec_t            *p_event_rec )\r
2745 {\r
2746         spl_qp_svc_t*                   p_spl_qp_svc;\r
2747 \r
2748         CL_ENTER( AL_DBG_SMI_CB, g_al_dbg_lvl );\r
2749 \r
2750         CL_ASSERT( p_event_rec );\r
2751         CL_ASSERT( p_event_rec->context );\r
2752 \r
2753         if( p_event_rec->code == IB_AE_SQ_DRAINED )\r
2754         {\r
2755                 CL_EXIT( AL_DBG_SMI, g_al_dbg_lvl );\r
2756                 return;\r
2757         }\r
2758 \r
2759         p_spl_qp_svc = p_event_rec->context;\r
2760 \r
2761         spl_qp_svc_reset( p_spl_qp_svc );\r
2762 \r
2763         CL_EXIT( AL_DBG_SMI_CB, g_al_dbg_lvl );\r
2764 }\r
2765 \r
2766 \r
2767 \r
2768 /*\r
2769  * Special QP service reset.\r
2770  */\r
2771 void\r
2772 spl_qp_svc_reset(\r
2773         IN                              spl_qp_svc_t*                           p_spl_qp_svc )\r
2774 {\r
2775         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
2776 \r
2777         if( p_spl_qp_svc->state != SPL_QP_ACTIVE )\r
2778         {\r
2779                 cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2780                 return;\r
2781         }\r
2782 \r
2783         /* Change the special QP service to the error state. */\r
2784         p_spl_qp_svc->state = SPL_QP_ERROR;\r
2785 \r
2786         /* Flag the service as in use by the asynchronous processing thread. */\r
2787         cl_atomic_inc( &p_spl_qp_svc->in_use_cnt );\r
2788 \r
2789         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2790 \r
2791         /* Queue an asynchronous processing item to reset the special QP. */\r
2792         cl_async_proc_queue( gp_async_proc_mgr, &p_spl_qp_svc->reset_async );\r
2793 }\r
2794 \r
2795 \r
2796 \r
2797 /*\r
2798  * Asynchronous processing thread callback to reset the special QP service.\r
2799  */\r
2800 void\r
2801 spl_qp_svc_reset_cb(\r
2802         IN                              cl_async_proc_item_t*           p_item )\r
2803 {\r
2804         spl_qp_svc_t*                   p_spl_qp_svc;\r
2805         cl_list_item_t*                 p_list_item;\r
2806         ib_wc_t                                 wc;\r
2807         ib_wc_t*                                p_free_wc;\r
2808         ib_wc_t*                                p_done_wc;\r
2809         al_mad_wr_t*                    p_mad_wr;\r
2810         al_mad_element_t*               p_al_mad;\r
2811         ib_qp_mod_t                             qp_mod;\r
2812         ib_api_status_t                 status;\r
2813 \r
2814         CL_ENTER( AL_DBG_SMI_CB, g_al_dbg_lvl );\r
2815 \r
2816         CL_ASSERT( p_item );\r
2817         p_spl_qp_svc = PARENT_STRUCT( p_item, spl_qp_svc_t, reset_async );\r
2818 \r
2819         /* Wait here until the special QP service is only in use by this thread. */\r
2820         while( p_spl_qp_svc->in_use_cnt != 1 )\r
2821         {\r
2822                 cl_thread_suspend( 0 );\r
2823         }\r
2824 \r
2825         /* Change the QP to the RESET state. */\r
2826         cl_memclr( &qp_mod, sizeof( ib_qp_mod_t ) );\r
2827         qp_mod.req_state = IB_QPS_RESET;\r
2828 \r
2829         status = ib_modify_qp( p_spl_qp_svc->h_qp, &qp_mod );\r
2830         CL_ASSERT( status == IB_SUCCESS );\r
2831 \r
2832         /* Return receive MAD elements to the pool. */\r
2833         for( p_list_item = cl_qlist_remove_head( &p_spl_qp_svc->recv_queue );\r
2834                  p_list_item != cl_qlist_end( &p_spl_qp_svc->recv_queue );\r
2835                  p_list_item = cl_qlist_remove_head( &p_spl_qp_svc->recv_queue ) )\r
2836         {\r
2837                 p_al_mad = PARENT_STRUCT( p_list_item, al_mad_element_t, list_item );\r
2838 \r
2839                 status = ib_put_mad( &p_al_mad->element );\r
2840                 CL_ASSERT( status == IB_SUCCESS );\r
2841         }\r
2842 \r
2843         /* Re-initialize the QP. */\r
2844         status = ib_init_dgrm_svc( p_spl_qp_svc->h_qp, NULL );\r
2845         CL_ASSERT( status == IB_SUCCESS );\r
2846 \r
2847         /* Poll to remove any remaining send completions from the CQ. */\r
2848         do\r
2849         {\r
2850                 cl_memclr( &wc, sizeof( ib_wc_t ) );\r
2851                 p_free_wc = &wc;\r
2852                 status = ib_poll_cq( p_spl_qp_svc->h_send_cq, &p_free_wc, &p_done_wc );\r
2853 \r
2854         } while( status == IB_SUCCESS );\r
2855 \r
2856         /* Post receive buffers. */\r
2857         spl_qp_svc_post_recvs( p_spl_qp_svc );\r
2858 \r
2859         /*\r
2860          * Re-queue any outstanding MAD send operations.\r
2861          * Work from tail to head to maintain the request order.\r
2862          */\r
2863         for( p_list_item = cl_qlist_remove_tail( &p_spl_qp_svc->send_queue );\r
2864                  p_list_item != cl_qlist_end( &p_spl_qp_svc->send_queue );\r
2865                  p_list_item = cl_qlist_remove_tail( &p_spl_qp_svc->send_queue ) )\r
2866         {\r
2867                 p_mad_wr = PARENT_STRUCT( p_list_item, al_mad_wr_t, list_item );\r
2868                 special_qp_queue_mad( p_spl_qp_svc->h_qp, p_mad_wr );\r
2869         }\r
2870 \r
2871         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
2872         if( p_spl_qp_svc->state == SPL_QP_ERROR )\r
2873         {\r
2874                 /* The QP is ready.  Change the state. */\r
2875                 p_spl_qp_svc->state = SPL_QP_ACTIVE;\r
2876                 cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2877 \r
2878                 /* Re-arm the CQs. */\r
2879                 status = ib_rearm_cq( p_spl_qp_svc->h_recv_cq, FALSE );\r
2880                 CL_ASSERT( status == IB_SUCCESS );\r
2881                 status = ib_rearm_cq( p_spl_qp_svc->h_send_cq, FALSE );\r
2882                 CL_ASSERT( status == IB_SUCCESS );\r
2883 \r
2884                 /* Resume send processing. */\r
2885                 special_qp_resume_sends( p_spl_qp_svc->h_qp );\r
2886         }\r
2887         else\r
2888         {\r
2889                 cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2890         }\r
2891 \r
2892         /* No longer in use by the asynchronous processing thread. */\r
2893         cl_atomic_dec( &p_spl_qp_svc->in_use_cnt );\r
2894 \r
2895         CL_EXIT( AL_DBG_SMI_CB, g_al_dbg_lvl );\r
2896 }\r
2897 \r
2898 \r
2899 \r
2900 /*\r
2901  * Special QP alias asynchronous event callback.\r
2902  */\r
2903 void\r
2904 spl_qp_alias_event_cb(\r
2905         IN                              ib_async_event_rec_t            *p_event_rec )\r
2906 {\r
2907         UNUSED_PARAM( p_event_rec );\r
2908 }\r
2909 \r
2910 \r
2911 \r
2912 /*\r
2913  * Acquire the SMI dispatcher for the given port.\r
2914  */\r
2915 ib_api_status_t\r
2916 acquire_smi_disp(\r
2917         IN              const   ib_net64_t                                      port_guid,\r
2918                 OUT                     al_mad_disp_handle_t* const     ph_mad_disp )\r
2919 {\r
2920         CL_ASSERT( gp_spl_qp_mgr );\r
2921         return acquire_svc_disp( &gp_spl_qp_mgr->smi_map, port_guid, ph_mad_disp );\r
2922 }\r
2923 \r
2924 \r
2925 \r
2926 /*\r
2927  * Acquire the GSI dispatcher for the given port.\r
2928  */\r
2929 ib_api_status_t\r
2930 acquire_gsi_disp(\r
2931         IN              const   ib_net64_t                                      port_guid,\r
2932                 OUT                     al_mad_disp_handle_t* const     ph_mad_disp )\r
2933 {\r
2934         CL_ASSERT( gp_spl_qp_mgr );\r
2935         return acquire_svc_disp( &gp_spl_qp_mgr->gsi_map, port_guid, ph_mad_disp );\r
2936 }\r
2937 \r
2938 \r
2939 \r
2940 /*\r
2941  * Acquire the service dispatcher for the given port.\r
2942  */\r
2943 ib_api_status_t\r
2944 acquire_svc_disp(\r
2945         IN              const   cl_qmap_t* const                        p_svc_map,\r
2946         IN              const   ib_net64_t                                      port_guid,\r
2947                 OUT                     al_mad_disp_handle_t            *ph_mad_disp )\r
2948 {\r
2949         cl_map_item_t*                  p_svc_item;\r
2950         spl_qp_svc_t*                   p_spl_qp_svc;\r
2951 \r
2952         AL_ENTER( AL_DBG_SMI );\r
2953 \r
2954         CL_ASSERT( p_svc_map );\r
2955         CL_ASSERT( gp_spl_qp_mgr );\r
2956 \r
2957         /* Search for the SMI or GSI service for the given port. */\r
2958         cl_spinlock_acquire( &gp_spl_qp_mgr->obj.lock );\r
2959         p_svc_item = cl_qmap_get( p_svc_map, port_guid );\r
2960         cl_spinlock_release( &gp_spl_qp_mgr->obj.lock );\r
2961         if( p_svc_item == cl_qmap_end( p_svc_map ) )\r
2962         {\r
2963                 /* The port does not have an active agent. */\r
2964                 AL_EXIT( AL_DBG_SMI );\r
2965                 return IB_INVALID_GUID;\r
2966         }\r
2967 \r
2968         p_spl_qp_svc = PARENT_STRUCT( p_svc_item, spl_qp_svc_t, map_item );\r
2969 \r
2970         /* Found a match.  Get MAD dispatcher handle. */\r
2971         *ph_mad_disp = p_spl_qp_svc->h_mad_disp;\r
2972 \r
2973         /* Reference the MAD dispatcher on behalf of the client. */\r
2974         ref_al_obj( &p_spl_qp_svc->h_mad_disp->obj );\r
2975 \r
2976         AL_EXIT( AL_DBG_SMI );\r
2977         return IB_SUCCESS;\r
2978 }\r
2979 \r
2980 \r
2981 \r
2982 /*\r
2983  * Force a poll for CA attribute changes.\r
2984  */\r
2985 void\r
2986 force_smi_poll(\r
2987         void )\r
2988 {\r
2989         CL_ENTER( AL_DBG_SMI_CB, g_al_dbg_lvl );\r
2990 \r
2991         /*\r
2992          * Stop the poll timer.  Just invoke the timer callback directly to\r
2993          * save the thread context switching.\r
2994          */\r
2995         smi_poll_timer_cb( gp_spl_qp_mgr );\r
2996 \r
2997         CL_EXIT( AL_DBG_SMI_CB, g_al_dbg_lvl );\r
2998 }\r
2999 \r
3000 \r
3001 \r
3002 /*\r
3003  * Poll for CA port attribute changes.\r
3004  */\r
3005 void\r
3006 smi_poll_timer_cb(\r
3007         IN                              void*                                           context )\r
3008 {\r
3009         cl_status_t                     cl_status;\r
3010 \r
3011         CL_ENTER( AL_DBG_SMI_CB, g_al_dbg_lvl );\r
3012 \r
3013         CL_ASSERT( context );\r
3014         CL_ASSERT( gp_spl_qp_mgr == context );\r
3015         UNUSED_PARAM( context );\r
3016 \r
3017         /*\r
3018          * Scan for changes on the local HCAs.  Since the PnP manager has its\r
3019          * own thread for processing changes, we kick off that thread in parallel\r
3020          * reposting receive buffers to the SQP agents.\r
3021          */\r
3022         pnp_poll();\r
3023 \r
3024         /*\r
3025          * To handle the case where force_smi_poll is called at the same time\r
3026          * the timer expires, check if the asynchronous processing item is in\r
3027          * use.  If it is already in use, it means that we're about to poll\r
3028          * anyway, so just ignore this call.\r
3029          */\r
3030         cl_spinlock_acquire( &gp_spl_qp_mgr->obj.lock );\r
3031 \r
3032         /* Perform port processing on the special QP agents. */\r
3033         cl_qlist_apply_func( &gp_spl_qp_mgr->obj.obj_list, smi_post_recvs,\r
3034                 gp_spl_qp_mgr );\r
3035 \r
3036         /* Determine if there are any special QP agents to poll. */\r
3037         if( !cl_is_qlist_empty( &gp_spl_qp_mgr->obj.obj_list ) && g_smi_poll_interval )\r
3038         {\r
3039                 /* Restart the polling timer. */\r
3040                 cl_status =\r
3041                         cl_timer_start( &gp_spl_qp_mgr->poll_timer, g_smi_poll_interval );\r
3042                 CL_ASSERT( cl_status == CL_SUCCESS );\r
3043         }\r
3044         cl_spinlock_release( &gp_spl_qp_mgr->obj.lock );\r
3045 \r
3046         CL_EXIT( AL_DBG_SMI_CB, g_al_dbg_lvl );\r
3047 }\r
3048 \r
3049 \r
3050 \r
3051 /*\r
3052  * Post receive buffers to a special QP.\r
3053  */\r
3054 void\r
3055 smi_post_recvs(\r
3056         IN                              cl_list_item_t* const           p_list_item,\r
3057         IN                              void*                                           context )\r
3058 {\r
3059         al_obj_t*                               p_obj;\r
3060         spl_qp_svc_t*                   p_spl_qp_svc;\r
3061 \r
3062         CL_ENTER( AL_DBG_SMI_CB, g_al_dbg_lvl );\r
3063 \r
3064         CL_ASSERT( p_list_item );\r
3065         UNUSED_PARAM( context );\r
3066 \r
3067         p_obj = PARENT_STRUCT( p_list_item, al_obj_t, pool_item );\r
3068         p_spl_qp_svc = PARENT_STRUCT( p_obj, spl_qp_svc_t, obj );\r
3069 \r
3070         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
3071         if( p_spl_qp_svc->state != SPL_QP_ACTIVE )\r
3072         {\r
3073                 cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
3074                 return;\r
3075         }\r
3076 \r
3077         spl_qp_svc_post_recvs( p_spl_qp_svc );\r
3078         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
3079 \r
3080         CL_EXIT( AL_DBG_SMI, g_al_dbg_lvl );\r
3081 }\r