[IBAL] fix trap that route to local SM .
[mirror/winof/.git] / core / al / kernel / al_smi.c
1 /*\r
2  * Copyright (c) 2005 SilverStorm Technologies.  All rights reserved.\r
3  * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. \r
4  * Copyright (c) 2006 Voltaire Corporation.  All rights reserved.\r
5  *\r
6  * This software is available to you under the OpenIB.org BSD license\r
7  * below:\r
8  *\r
9  *     Redistribution and use in source and binary forms, with or\r
10  *     without modification, are permitted provided that the following\r
11  *     conditions are met:\r
12  *\r
13  *      - Redistributions of source code must retain the above\r
14  *        copyright notice, this list of conditions and the following\r
15  *        disclaimer.\r
16  *\r
17  *      - Redistributions in binary form must reproduce the above\r
18  *        copyright notice, this list of conditions and the following\r
19  *        disclaimer in the documentation and/or other materials\r
20  *        provided with the distribution.\r
21  *\r
22  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
23  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
24  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
25  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
26  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
27  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
28  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
29  * SOFTWARE.\r
30  *\r
31  * $Id$\r
32  */\r
33 \r
34 \r
35 #include <iba/ib_al.h>\r
36 #include <complib/cl_timer.h>\r
37 \r
38 #include "ib_common.h"\r
39 #include "al_common.h"\r
40 #include "al_debug.h"\r
41 #if defined(EVENT_TRACING)\r
42 #ifdef offsetof\r
43 #undef offsetof\r
44 #endif\r
45 #include "al_smi.tmh"\r
46 #endif\r
47 #include "al_verbs.h"\r
48 #include "al_mgr.h"\r
49 #include "al_pnp.h"\r
50 #include "al_qp.h"\r
51 #include "al_smi.h"\r
52 #include "al_av.h"\r
53 \r
54 \r
55 extern char                                             node_desc[IB_NODE_DESCRIPTION_SIZE];\r
56 \r
57 #define SMI_POLL_INTERVAL                       20000           /* Milliseconds */\r
58 #define LOCAL_MAD_TIMEOUT                       50                      /* Milliseconds */\r
59 #define DEFAULT_QP0_DEPTH                       256\r
60 #define DEFAULT_QP1_DEPTH                       1024\r
61 \r
62 uint32_t                                g_smi_poll_interval =   SMI_POLL_INTERVAL;\r
63 spl_qp_mgr_t*                   gp_spl_qp_mgr = NULL;\r
64 \r
65 \r
66 /*\r
67  * Function prototypes.\r
68  */\r
69 void\r
70 destroying_spl_qp_mgr(\r
71         IN                              al_obj_t*                                       p_obj );\r
72 \r
73 void\r
74 free_spl_qp_mgr(\r
75         IN                              al_obj_t*                                       p_obj );\r
76 \r
77 ib_api_status_t\r
78 spl_qp0_agent_pnp_cb(\r
79         IN                              ib_pnp_rec_t*                           p_pnp_rec );\r
80 \r
81 ib_api_status_t\r
82 spl_qp1_agent_pnp_cb(\r
83         IN                              ib_pnp_rec_t*                           p_pnp_rec );\r
84 \r
85 ib_api_status_t\r
86 spl_qp_agent_pnp(\r
87         IN                              ib_pnp_rec_t*                           p_pnp_rec,\r
88         IN                              ib_qp_type_t                            qp_type );\r
89 \r
90 ib_api_status_t\r
91 create_spl_qp_svc(\r
92         IN                              ib_pnp_port_rec_t*                      p_pnp_rec,\r
93         IN              const   ib_qp_type_t                            qp_type );\r
94 \r
95 void\r
96 destroying_spl_qp_svc(\r
97         IN                              al_obj_t*                                       p_obj );\r
98 \r
99 void\r
100 free_spl_qp_svc(\r
101         IN                              al_obj_t*                                       p_obj );\r
102 \r
103 void\r
104 spl_qp_svc_lid_change(\r
105         IN                              al_obj_t*                                       p_obj,\r
106         IN                              ib_pnp_port_rec_t*                      p_pnp_rec );\r
107 \r
108 ib_api_status_t\r
109 remote_mad_send(\r
110         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
111         IN                              al_mad_wr_t* const                      p_mad_wr );\r
112 \r
113 static ib_api_status_t\r
114 local_mad_send(\r
115         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
116         IN                              al_mad_wr_t* const                      p_mad_wr );\r
117 \r
118 static ib_api_status_t\r
119 loopback_mad(\r
120         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
121         IN                              al_mad_wr_t* const                      p_mad_wr );\r
122 \r
123 static ib_api_status_t\r
124 __process_subn_mad(\r
125         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
126         IN                              al_mad_wr_t* const                      p_mad_wr );\r
127 \r
128 static ib_api_status_t\r
129 fwd_local_mad(\r
130         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
131         IN                              al_mad_wr_t* const                      p_mad_wr );\r
132 \r
133 void\r
134 send_local_mad_cb(\r
135         IN                              cl_async_proc_item_t*           p_item );\r
136 \r
137 void\r
138 spl_qp_send_comp_cb(\r
139         IN              const   ib_cq_handle_t                          h_cq,\r
140         IN                              void                                            *cq_context );\r
141 \r
142 void\r
143 spl_qp_recv_comp_cb(\r
144         IN              const   ib_cq_handle_t                          h_cq,\r
145         IN                              void                                            *cq_context );\r
146 \r
147 void\r
148 spl_qp_comp(\r
149         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
150         IN              const   ib_cq_handle_t                          h_cq,\r
151         IN                              ib_wc_type_t                            wc_type );\r
152 \r
153 ib_api_status_t\r
154 process_mad_recv(\r
155         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
156         IN                              ib_mad_element_t*                       p_mad_element );\r
157 \r
158 mad_route_t\r
159 route_recv_smp(\r
160         IN                              ib_mad_element_t*                       p_mad_element );\r
161 \r
162 mad_route_t\r
163 route_recv_smp_attr(\r
164         IN                              ib_mad_element_t*                       p_mad_element );\r
165 \r
166 mad_route_t\r
167 route_recv_dm_mad(\r
168         IN                              ib_mad_element_t*                       p_mad_element );\r
169 \r
170 mad_route_t\r
171 route_recv_gmp(\r
172         IN                              ib_mad_element_t*                       p_mad_element );\r
173 \r
174 mad_route_t\r
175 route_recv_gmp_attr(\r
176         IN                              ib_mad_element_t*                       p_mad_element );\r
177 \r
178 ib_api_status_t\r
179 forward_sm_trap(\r
180         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
181         IN                              ib_mad_element_t*                       p_mad_element );\r
182 \r
183 ib_api_status_t\r
184 recv_local_mad(\r
185         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
186         IN                              ib_mad_element_t*                       p_mad_request );\r
187 \r
188 void\r
189 spl_qp_alias_send_cb(\r
190         IN                              ib_mad_svc_handle_t                     h_mad_svc,\r
191         IN                              void                                            *mad_svc_context,\r
192         IN                              ib_mad_element_t                        *p_mad_element );\r
193 \r
194 void\r
195 spl_qp_alias_recv_cb(\r
196         IN                              ib_mad_svc_handle_t                     h_mad_svc,\r
197         IN                              void                                            *mad_svc_context,\r
198         IN                              ib_mad_element_t                        *p_mad_response );\r
199 \r
200 static ib_api_status_t\r
201 spl_qp_svc_post_recvs(\r
202         IN                              spl_qp_svc_t*   const           p_spl_qp_svc );\r
203 \r
204 void\r
205 spl_qp_svc_event_cb(\r
206         IN                              ib_async_event_rec_t            *p_event_rec );\r
207 \r
208 void\r
209 spl_qp_alias_event_cb(\r
210         IN                              ib_async_event_rec_t            *p_event_rec );\r
211 \r
212 void\r
213 spl_qp_svc_reset(\r
214         IN                              spl_qp_svc_t*                           p_spl_qp_svc );\r
215 \r
216 void\r
217 spl_qp_svc_reset_cb(\r
218         IN                              cl_async_proc_item_t*           p_item );\r
219 \r
220 ib_api_status_t\r
221 acquire_svc_disp(\r
222         IN              const   cl_qmap_t* const                        p_svc_map,\r
223         IN              const   ib_net64_t                                      port_guid,\r
224                 OUT                     al_mad_disp_handle_t            *ph_mad_disp );\r
225 \r
226 void\r
227 smi_poll_timer_cb(\r
228         IN                              void*                                           context );\r
229 \r
230 void\r
231 smi_post_recvs(\r
232         IN                              cl_list_item_t* const           p_list_item,\r
233         IN                              void*                                           context );\r
234 \r
235 #if defined( CL_USE_MUTEX )\r
236 void\r
237 spl_qp_send_async_cb(\r
238         IN                              cl_async_proc_item_t*           p_item );\r
239 \r
240 void\r
241 spl_qp_recv_async_cb(\r
242         IN                              cl_async_proc_item_t*           p_item );\r
243 #endif\r
244 \r
245 /*\r
246  * Create the special QP manager.\r
247  */\r
248 ib_api_status_t\r
249 create_spl_qp_mgr(\r
250         IN                              al_obj_t*       const                   p_parent_obj )\r
251 {\r
252         ib_pnp_req_t                    pnp_req;\r
253         ib_api_status_t                 status;\r
254         cl_status_t                             cl_status;\r
255 \r
256         AL_ENTER( AL_DBG_SMI );\r
257 \r
258         CL_ASSERT( p_parent_obj );\r
259         CL_ASSERT( !gp_spl_qp_mgr );\r
260 \r
261         gp_spl_qp_mgr = cl_zalloc( sizeof( spl_qp_mgr_t ) );\r
262         if( !gp_spl_qp_mgr )\r
263         {\r
264                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
265                         ("IB_INSUFFICIENT_MEMORY\n") );\r
266                 return IB_INSUFFICIENT_MEMORY;\r
267         }\r
268 \r
269         /* Construct the special QP manager. */\r
270         construct_al_obj( &gp_spl_qp_mgr->obj, AL_OBJ_TYPE_SMI );\r
271         cl_timer_construct( &gp_spl_qp_mgr->poll_timer );\r
272 \r
273         /* Initialize the lists. */\r
274         cl_qmap_init( &gp_spl_qp_mgr->smi_map );\r
275         cl_qmap_init( &gp_spl_qp_mgr->gsi_map );\r
276 \r
277         /* Initialize the global SMI/GSI manager object. */\r
278         status = init_al_obj( &gp_spl_qp_mgr->obj, gp_spl_qp_mgr, TRUE,\r
279                 destroying_spl_qp_mgr, NULL, free_spl_qp_mgr );\r
280         if( status != IB_SUCCESS )\r
281         {\r
282                 free_spl_qp_mgr( &gp_spl_qp_mgr->obj );\r
283                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
284                         ("init_al_obj failed, %s\n", ib_get_err_str( status ) ) );\r
285                 return status;\r
286         }\r
287 \r
288         /* Attach the special QP manager to the parent object. */\r
289         status = attach_al_obj( p_parent_obj, &gp_spl_qp_mgr->obj );\r
290         if( status != IB_SUCCESS )\r
291         {\r
292                 gp_spl_qp_mgr->obj.pfn_destroy( &gp_spl_qp_mgr->obj, NULL );\r
293                 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
294                         ("attach_al_obj returned %s.\n", ib_get_err_str(status)) );\r
295                 return status;\r
296         }\r
297 \r
298         /* Initialize the SMI polling timer. */\r
299         cl_status = cl_timer_init( &gp_spl_qp_mgr->poll_timer, smi_poll_timer_cb,\r
300                 gp_spl_qp_mgr );\r
301         if( cl_status != CL_SUCCESS )\r
302         {\r
303                 gp_spl_qp_mgr->obj.pfn_destroy( &gp_spl_qp_mgr->obj, NULL );\r
304                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
305                         ("cl_timer_init failed, status 0x%x\n", cl_status ) );\r
306                 return ib_convert_cl_status( cl_status );\r
307         }\r
308 \r
309         /*\r
310          * Note: PnP registrations for port events must be done\r
311          * when the special QP manager is created.  This ensures that\r
312          * the registrations are listed sequentially and the reporting\r
313          * of PnP events occurs in the proper order.\r
314          */\r
315 \r
316         /*\r
317          * Separate context is needed for each special QP.  Therefore, a\r
318          * separate PnP event registration is performed for QP0 and QP1.\r
319          */\r
320 \r
321         /* Register for port PnP events for QP0. */\r
322         cl_memclr( &pnp_req, sizeof( ib_pnp_req_t ) );\r
323         pnp_req.pnp_class       = IB_PNP_PORT;\r
324         pnp_req.pnp_context = &gp_spl_qp_mgr->obj;\r
325         pnp_req.pfn_pnp_cb      = spl_qp0_agent_pnp_cb;\r
326 \r
327         status = ib_reg_pnp( gh_al, &pnp_req, &gp_spl_qp_mgr->h_qp0_pnp );\r
328 \r
329         if( status != IB_SUCCESS )\r
330         {\r
331                 gp_spl_qp_mgr->obj.pfn_destroy( &gp_spl_qp_mgr->obj, NULL );\r
332                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
333                         ("ib_reg_pnp QP0 failed, %s\n", ib_get_err_str( status ) ) );\r
334                 return status;\r
335         }\r
336 \r
337         /* Reference the special QP manager on behalf of the ib_reg_pnp call. */\r
338         ref_al_obj( &gp_spl_qp_mgr->obj );\r
339 \r
340         /* Register for port PnP events for QP1. */\r
341         cl_memclr( &pnp_req, sizeof( ib_pnp_req_t ) );\r
342         pnp_req.pnp_class       = IB_PNP_PORT;\r
343         pnp_req.pnp_context = &gp_spl_qp_mgr->obj;\r
344         pnp_req.pfn_pnp_cb      = spl_qp1_agent_pnp_cb;\r
345 \r
346         status = ib_reg_pnp( gh_al, &pnp_req, &gp_spl_qp_mgr->h_qp1_pnp );\r
347 \r
348         if( status != IB_SUCCESS )\r
349         {\r
350                 gp_spl_qp_mgr->obj.pfn_destroy( &gp_spl_qp_mgr->obj, NULL );\r
351                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
352                         ("ib_reg_pnp QP1 failed, %s\n", ib_get_err_str( status ) ) );\r
353                 return status;\r
354         }\r
355 \r
356         /*\r
357          * Note that we don't release the referende taken in init_al_obj\r
358          * because we need one on behalf of the ib_reg_pnp call.\r
359          */\r
360 \r
361         AL_EXIT( AL_DBG_SMI );\r
362         return IB_SUCCESS;\r
363 }\r
364 \r
365 \r
366 \r
367 /*\r
368  * Pre-destroy the special QP manager.\r
369  */\r
370 void\r
371 destroying_spl_qp_mgr(\r
372         IN                              al_obj_t*                                       p_obj )\r
373 {\r
374         ib_api_status_t                 status;\r
375 \r
376         CL_ASSERT( p_obj );\r
377         CL_ASSERT( gp_spl_qp_mgr == PARENT_STRUCT( p_obj, spl_qp_mgr_t, obj ) );\r
378         UNUSED_PARAM( p_obj );\r
379 \r
380         /* Deregister for port PnP events for QP0. */\r
381         if( gp_spl_qp_mgr->h_qp0_pnp )\r
382         {\r
383                 status = ib_dereg_pnp( gp_spl_qp_mgr->h_qp0_pnp,\r
384                         (ib_pfn_destroy_cb_t)deref_al_obj );\r
385                 CL_ASSERT( status == IB_SUCCESS );\r
386         }\r
387 \r
388         /* Deregister for port PnP events for QP1. */\r
389         if( gp_spl_qp_mgr->h_qp1_pnp )\r
390         {\r
391                 status = ib_dereg_pnp( gp_spl_qp_mgr->h_qp1_pnp,\r
392                         (ib_pfn_destroy_cb_t)deref_al_obj );\r
393                 CL_ASSERT( status == IB_SUCCESS );\r
394         }\r
395 \r
396         /* Destroy the SMI polling timer. */\r
397         cl_timer_destroy( &gp_spl_qp_mgr->poll_timer );\r
398 }\r
399 \r
400 \r
401 \r
402 /*\r
403  * Free the special QP manager.\r
404  */\r
405 void\r
406 free_spl_qp_mgr(\r
407         IN                              al_obj_t*                                       p_obj )\r
408 {\r
409         CL_ASSERT( p_obj );\r
410         CL_ASSERT( gp_spl_qp_mgr == PARENT_STRUCT( p_obj, spl_qp_mgr_t, obj ) );\r
411         UNUSED_PARAM( p_obj );\r
412 \r
413         destroy_al_obj( &gp_spl_qp_mgr->obj );\r
414         cl_free( gp_spl_qp_mgr );\r
415         gp_spl_qp_mgr = NULL;\r
416 }\r
417 \r
418 \r
419 \r
420 /*\r
421  * Special QP0 agent PnP event callback.\r
422  */\r
423 ib_api_status_t\r
424 spl_qp0_agent_pnp_cb(\r
425         IN                              ib_pnp_rec_t*                           p_pnp_rec )\r
426 {\r
427         ib_api_status_t status;\r
428         AL_ENTER( AL_DBG_SMI );\r
429 \r
430         status = spl_qp_agent_pnp( p_pnp_rec, IB_QPT_QP0 );\r
431 \r
432         AL_EXIT( AL_DBG_SMI );\r
433         return status;\r
434 }\r
435 \r
436 \r
437 \r
438 /*\r
439  * Special QP1 agent PnP event callback.\r
440  */\r
441 ib_api_status_t\r
442 spl_qp1_agent_pnp_cb(\r
443         IN                              ib_pnp_rec_t*                           p_pnp_rec )\r
444 {\r
445         ib_api_status_t status;\r
446         AL_ENTER( AL_DBG_SMI );\r
447 \r
448         status = spl_qp_agent_pnp( p_pnp_rec, IB_QPT_QP1 );\r
449 \r
450         AL_EXIT( AL_DBG_SMI );\r
451         return status;\r
452 }\r
453 \r
454 \r
455 \r
456 /*\r
457  * Special QP agent PnP event callback.\r
458  */\r
459 ib_api_status_t\r
460 spl_qp_agent_pnp(\r
461         IN                              ib_pnp_rec_t*                           p_pnp_rec,\r
462         IN                              ib_qp_type_t                            qp_type )\r
463 {\r
464         ib_api_status_t                 status;\r
465         al_obj_t*                               p_obj;\r
466 \r
467         AL_ENTER( AL_DBG_SMI );\r
468 \r
469         CL_ASSERT( p_pnp_rec );\r
470         p_obj = p_pnp_rec->context;\r
471 \r
472         AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_SMI,\r
473                 ("p_pnp_rec->pnp_event = 0x%x (%s)\n",\r
474                 p_pnp_rec->pnp_event, ib_get_pnp_event_str( p_pnp_rec->pnp_event )) );\r
475         /* Dispatch based on the PnP event type. */\r
476         switch( p_pnp_rec->pnp_event )\r
477         {\r
478         case IB_PNP_PORT_ADD:\r
479                 CL_ASSERT( !p_obj );\r
480                 status = create_spl_qp_svc( (ib_pnp_port_rec_t*)p_pnp_rec, qp_type );\r
481                 break;\r
482 \r
483         case IB_PNP_PORT_REMOVE:\r
484                 CL_ASSERT( p_obj );\r
485                 ref_al_obj( p_obj );\r
486                 p_obj->pfn_destroy( p_obj, NULL );\r
487                 status = IB_SUCCESS;\r
488                 break;\r
489 \r
490         case IB_PNP_LID_CHANGE:\r
491                 CL_ASSERT( p_obj );\r
492                 spl_qp_svc_lid_change( p_obj, (ib_pnp_port_rec_t*)p_pnp_rec );\r
493                 status = IB_SUCCESS;\r
494                 break;\r
495 \r
496         default:\r
497                 /* All other events are ignored. */\r
498                 status = IB_SUCCESS;\r
499                 break;\r
500         }\r
501 \r
502         AL_EXIT( AL_DBG_SMI );\r
503         return status;\r
504 }\r
505 \r
506 \r
507 \r
508 /*\r
509  * Create a special QP service.\r
510  */\r
511 ib_api_status_t\r
512 create_spl_qp_svc(\r
513         IN                              ib_pnp_port_rec_t*                      p_pnp_rec,\r
514         IN              const   ib_qp_type_t                            qp_type )\r
515 {\r
516         cl_status_t                             cl_status;\r
517         spl_qp_svc_t*                   p_spl_qp_svc;\r
518         ib_ca_handle_t                  h_ca;\r
519         ib_cq_create_t                  cq_create;\r
520         ib_qp_create_t                  qp_create;\r
521         ib_qp_attr_t                    qp_attr;\r
522         ib_mad_svc_t                    mad_svc;\r
523         ib_api_status_t                 status;\r
524 \r
525         AL_ENTER( AL_DBG_SMI );\r
526 \r
527         CL_ASSERT( p_pnp_rec );\r
528 \r
529         if( ( qp_type != IB_QPT_QP0 ) && ( qp_type != IB_QPT_QP1 ) )\r
530         {\r
531                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") );\r
532                 return IB_INVALID_PARAMETER;\r
533         }\r
534 \r
535         CL_ASSERT( p_pnp_rec->pnp_rec.pnp_context );\r
536         CL_ASSERT( p_pnp_rec->p_ca_attr );\r
537         CL_ASSERT( p_pnp_rec->p_port_attr );\r
538 \r
539         p_spl_qp_svc = cl_zalloc( sizeof( spl_qp_svc_t ) );\r
540         if( !p_spl_qp_svc )\r
541         {\r
542                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
543                         ("IB_INSUFFICIENT_MEMORY\n") );\r
544                 return IB_INSUFFICIENT_MEMORY;\r
545         }\r
546 \r
547         /* Tie the special QP service to the port by setting the port number. */\r
548         p_spl_qp_svc->port_num = p_pnp_rec->p_port_attr->port_num;\r
549         /* Store the port GUID to allow faster lookups of the dispatchers. */\r
550         p_spl_qp_svc->port_guid = p_pnp_rec->p_port_attr->port_guid;\r
551 \r
552         /* Initialize the send and receive queues. */\r
553         cl_qlist_init( &p_spl_qp_svc->send_queue );\r
554         cl_qlist_init( &p_spl_qp_svc->recv_queue );\r
555         cl_spinlock_init(&p_spl_qp_svc->cache_lock);\r
556         \r
557 #if defined( CL_USE_MUTEX )\r
558         /* Initialize async callbacks and flags for send/receive processing. */\r
559         p_spl_qp_svc->send_async_queued = FALSE;\r
560         p_spl_qp_svc->send_async_cb.pfn_callback = spl_qp_send_async_cb;\r
561         p_spl_qp_svc->recv_async_queued = FALSE;\r
562         p_spl_qp_svc->recv_async_cb.pfn_callback = spl_qp_recv_async_cb;\r
563 #endif\r
564 \r
565         /* Initialize the async callback function to process local sends. */\r
566         p_spl_qp_svc->send_async.pfn_callback = send_local_mad_cb;\r
567 \r
568         /* Initialize the async callback function to reset the QP on error. */\r
569         p_spl_qp_svc->reset_async.pfn_callback = spl_qp_svc_reset_cb;\r
570 \r
571         /* Construct the special QP service object. */\r
572         construct_al_obj( &p_spl_qp_svc->obj, AL_OBJ_TYPE_SMI );\r
573 \r
574         /* Initialize the special QP service object. */\r
575         status = init_al_obj( &p_spl_qp_svc->obj, p_spl_qp_svc, TRUE,\r
576                 destroying_spl_qp_svc, NULL, free_spl_qp_svc );\r
577         if( status != IB_SUCCESS )\r
578         {\r
579                 free_spl_qp_svc( &p_spl_qp_svc->obj );\r
580                 return status;\r
581         }\r
582 \r
583         /* Attach the special QP service to the parent object. */\r
584         status = attach_al_obj(\r
585                 (al_obj_t* __ptr64)p_pnp_rec->pnp_rec.pnp_context, &p_spl_qp_svc->obj );\r
586         if( status != IB_SUCCESS )\r
587         {\r
588                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
589                 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
590                         ("attach_al_obj returned %s.\n", ib_get_err_str(status)) );\r
591                 return status;\r
592         }\r
593 \r
594         h_ca = acquire_ca( p_pnp_rec->p_ca_attr->ca_guid );\r
595         CL_ASSERT( h_ca );\r
596         if( !h_ca )\r
597         {\r
598                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
599                 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("acquire_ca failed.\n") );\r
600                 return IB_INVALID_GUID;\r
601         }\r
602 \r
603         p_spl_qp_svc->obj.p_ci_ca = h_ca->obj.p_ci_ca;\r
604 \r
605         /* Determine the maximum queue depth of the QP and CQs. */\r
606         p_spl_qp_svc->max_qp_depth =\r
607                 ( p_pnp_rec->p_ca_attr->max_wrs <\r
608                 p_pnp_rec->p_ca_attr->max_cqes ) ?\r
609                 p_pnp_rec->p_ca_attr->max_wrs :\r
610                 p_pnp_rec->p_ca_attr->max_cqes;\r
611 \r
612         /* Compare this maximum to the default special queue depth. */\r
613         if( ( qp_type == IB_QPT_QP0 ) &&\r
614                 ( p_spl_qp_svc->max_qp_depth > DEFAULT_QP0_DEPTH ) )\r
615                   p_spl_qp_svc->max_qp_depth = DEFAULT_QP0_DEPTH;\r
616         if( ( qp_type == IB_QPT_QP1 ) &&\r
617                 ( p_spl_qp_svc->max_qp_depth > DEFAULT_QP1_DEPTH ) )\r
618                   p_spl_qp_svc->max_qp_depth = DEFAULT_QP1_DEPTH;\r
619 \r
620         /* Create the send CQ. */\r
621         cl_memclr( &cq_create, sizeof( ib_cq_create_t ) );\r
622         cq_create.size = p_spl_qp_svc->max_qp_depth;\r
623         cq_create.pfn_comp_cb = spl_qp_send_comp_cb;\r
624 \r
625         status = ib_create_cq( p_spl_qp_svc->obj.p_ci_ca->h_ca, &cq_create,\r
626                 p_spl_qp_svc, spl_qp_svc_event_cb, &p_spl_qp_svc->h_send_cq );\r
627 \r
628         if( status != IB_SUCCESS )\r
629         {\r
630                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
631                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
632                         ("ib_create_cq send CQ failed, %s\n", ib_get_err_str( status ) ) );\r
633                 return status;\r
634         }\r
635 \r
636         /* Reference the special QP service on behalf of ib_create_cq. */\r
637         ref_al_obj( &p_spl_qp_svc->obj );\r
638 \r
639         /* Check the result of the creation request. */\r
640         if( cq_create.size < p_spl_qp_svc->max_qp_depth )\r
641         {\r
642                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
643                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
644                         ("ib_create_cq allocated insufficient send CQ size\n") );\r
645                 return IB_INSUFFICIENT_RESOURCES;\r
646         }\r
647 \r
648         /* Create the receive CQ. */\r
649         cl_memclr( &cq_create, sizeof( ib_cq_create_t ) );\r
650         cq_create.size = p_spl_qp_svc->max_qp_depth;\r
651         cq_create.pfn_comp_cb = spl_qp_recv_comp_cb;\r
652 \r
653         status = ib_create_cq( p_spl_qp_svc->obj.p_ci_ca->h_ca, &cq_create,\r
654                 p_spl_qp_svc, spl_qp_svc_event_cb, &p_spl_qp_svc->h_recv_cq );\r
655 \r
656         if( status != IB_SUCCESS )\r
657         {\r
658                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
659                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
660                         ("ib_create_cq recv CQ failed, %s\n", ib_get_err_str( status ) ) );\r
661                 return status;\r
662         }\r
663 \r
664         /* Reference the special QP service on behalf of ib_create_cq. */\r
665         ref_al_obj( &p_spl_qp_svc->obj );\r
666 \r
667         /* Check the result of the creation request. */\r
668         if( cq_create.size < p_spl_qp_svc->max_qp_depth )\r
669         {\r
670                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
671                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
672                         ("ib_create_cq allocated insufficient recv CQ size\n") );\r
673                 return IB_INSUFFICIENT_RESOURCES;\r
674         }\r
675 \r
676         /* Create the special QP. */\r
677         cl_memclr( &qp_create, sizeof( ib_qp_create_t ) );\r
678         qp_create.qp_type = qp_type;\r
679         qp_create.sq_depth = p_spl_qp_svc->max_qp_depth;\r
680         qp_create.rq_depth = p_spl_qp_svc->max_qp_depth;\r
681         qp_create.sq_sge = 3;   /* Three entries are required for segmentation. */\r
682         qp_create.rq_sge = 1;\r
683         qp_create.h_sq_cq = p_spl_qp_svc->h_send_cq;\r
684         qp_create.h_rq_cq = p_spl_qp_svc->h_recv_cq;\r
685         qp_create.sq_signaled = TRUE;\r
686 \r
687         status = ib_get_spl_qp( p_spl_qp_svc->obj.p_ci_ca->h_pd,\r
688                 p_pnp_rec->p_port_attr->port_guid, &qp_create,\r
689                 p_spl_qp_svc, spl_qp_svc_event_cb, NULL, &p_spl_qp_svc->h_qp );\r
690 \r
691         if( status != IB_SUCCESS )\r
692         {\r
693                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
694                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
695                         ("ib_get_spl_qp failed, %s\n", ib_get_err_str( status ) ) );\r
696                 return status;\r
697         }\r
698 \r
699         /* Reference the special QP service on behalf of ib_get_spl_qp. */\r
700         ref_al_obj( &p_spl_qp_svc->obj );\r
701 \r
702         /* Check the result of the creation request. */\r
703         status = ib_query_qp( p_spl_qp_svc->h_qp, &qp_attr );\r
704         if( status != IB_SUCCESS )\r
705         {\r
706                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
707                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
708                         ("ib_query_qp failed, %s\n", ib_get_err_str( status ) ) );\r
709                 return status;\r
710         }\r
711 \r
712         if( ( qp_attr.rq_depth < p_spl_qp_svc->max_qp_depth ) ||\r
713                 ( qp_attr.sq_depth < p_spl_qp_svc->max_qp_depth ) ||\r
714                 ( qp_attr.sq_sge < 3 ) || ( qp_attr.rq_sge < 1 ) )\r
715         {\r
716                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
717                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
718                         ("ib_get_spl_qp allocated attributes are insufficient\n") );\r
719                 return IB_INSUFFICIENT_RESOURCES;\r
720         }\r
721 \r
722         /* Initialize the QP for use. */\r
723         status = ib_init_dgrm_svc( p_spl_qp_svc->h_qp, NULL );\r
724         if( status != IB_SUCCESS )\r
725         {\r
726                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
727                 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
728                         ("ib_init_dgrm_svc failed, %s\n", ib_get_err_str( status ) ) );\r
729                 return status;\r
730         }\r
731 \r
732         /* Post receive buffers. */\r
733         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
734         status = spl_qp_svc_post_recvs( p_spl_qp_svc );\r
735         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
736         if( status != IB_SUCCESS )\r
737         {\r
738                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
739                 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
740                         ("spl_qp_svc_post_recvs failed, %s\n",\r
741                         ib_get_err_str( status ) ) );\r
742                 return status;\r
743         }\r
744 \r
745         /* Create the MAD dispatcher. */\r
746         status = create_mad_disp( &p_spl_qp_svc->obj, p_spl_qp_svc->h_qp,\r
747                 &p_spl_qp_svc->h_mad_disp );\r
748         if( status != IB_SUCCESS )\r
749         {\r
750                 p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
751                 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
752                         ("create_mad_disp failed, %s\n", ib_get_err_str( status ) ) );\r
753                 return status;\r
754         }\r
755 \r
756         /*\r
757          * Add this service to the special QP manager lookup lists.\r
758          * The service must be added to allow the creation of a QP alias.\r
759          */\r
760         cl_spinlock_acquire( &gp_spl_qp_mgr->obj.lock );\r
761         if( qp_type == IB_QPT_QP0 )\r
762         {\r
763                 cl_qmap_insert( &gp_spl_qp_mgr->smi_map, p_spl_qp_svc->port_guid,\r
764                         &p_spl_qp_svc->map_item );\r
765         }\r
766         else\r
767         {\r
768                 cl_qmap_insert( &gp_spl_qp_mgr->gsi_map, p_spl_qp_svc->port_guid,\r
769                         &p_spl_qp_svc->map_item );\r
770         }\r
771         cl_spinlock_release( &gp_spl_qp_mgr->obj.lock );\r
772 \r
773         /*\r
774          * If the CA does not support HW agents, create a QP alias and register\r
775          * a MAD service for sending responses from the local MAD interface.\r
776          */\r
777         if( check_local_mad( p_spl_qp_svc->h_qp ) )\r
778         {\r
779                 /* Create a QP alias. */\r
780                 cl_memclr( &qp_create, sizeof( ib_qp_create_t ) );\r
781                 qp_create.qp_type =\r
782                         ( qp_type == IB_QPT_QP0 ) ? IB_QPT_QP0_ALIAS : IB_QPT_QP1_ALIAS;\r
783                 qp_create.sq_depth              = p_spl_qp_svc->max_qp_depth;\r
784                 qp_create.sq_sge                = 1;\r
785                 qp_create.sq_signaled   = TRUE;\r
786 \r
787                 status = ib_get_spl_qp( p_spl_qp_svc->obj.p_ci_ca->h_pd_alias,\r
788                         p_pnp_rec->p_port_attr->port_guid, &qp_create,\r
789                         p_spl_qp_svc, spl_qp_alias_event_cb, &p_spl_qp_svc->pool_key,\r
790                         &p_spl_qp_svc->h_qp_alias );\r
791 \r
792                 if (status != IB_SUCCESS)\r
793                 {\r
794                         p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
795                         AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
796                                 ("ib_get_spl_qp alias failed, %s\n",\r
797                                 ib_get_err_str( status ) ) );\r
798                         return status;\r
799                 }\r
800 \r
801                 /* Reference the special QP service on behalf of ib_get_spl_qp. */\r
802                 ref_al_obj( &p_spl_qp_svc->obj );\r
803 \r
804                 /* Register a MAD service for sends. */\r
805                 cl_memclr( &mad_svc, sizeof( ib_mad_svc_t ) );\r
806                 mad_svc.mad_svc_context = p_spl_qp_svc;\r
807                 mad_svc.pfn_mad_send_cb = spl_qp_alias_send_cb;\r
808                 mad_svc.pfn_mad_recv_cb = spl_qp_alias_recv_cb;\r
809 \r
810                 status = ib_reg_mad_svc( p_spl_qp_svc->h_qp_alias, &mad_svc,\r
811                         &p_spl_qp_svc->h_mad_svc );\r
812 \r
813                 if( status != IB_SUCCESS )\r
814                 {\r
815                         p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
816                         AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
817                                 ("ib_reg_mad_svc failed, %s\n", ib_get_err_str( status ) ) );\r
818                         return status;\r
819                 }\r
820         }\r
821 \r
822         /* Set the context of the PnP event to this child object. */\r
823         p_pnp_rec->pnp_rec.context = &p_spl_qp_svc->obj;\r
824 \r
825         /* The QP is ready.  Change the state. */\r
826         p_spl_qp_svc->state = SPL_QP_ACTIVE;\r
827 \r
828         /* Force a completion callback to rearm the CQs. */\r
829         spl_qp_recv_comp_cb( p_spl_qp_svc->h_recv_cq, p_spl_qp_svc );\r
830         spl_qp_send_comp_cb( p_spl_qp_svc->h_send_cq, p_spl_qp_svc );\r
831 \r
832         /* Start the polling thread timer. */\r
833         if( g_smi_poll_interval )\r
834         {\r
835                 cl_status =\r
836                         cl_timer_trim( &gp_spl_qp_mgr->poll_timer, g_smi_poll_interval );\r
837 \r
838                 if( cl_status != CL_SUCCESS )\r
839                 {\r
840                         p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
841                         AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
842                                 ("cl_timer_start failed, status 0x%x\n", cl_status ) );\r
843                         return ib_convert_cl_status( cl_status );\r
844                 }\r
845         }\r
846 \r
847         /* Release the reference taken in init_al_obj. */\r
848         deref_al_obj( &p_spl_qp_svc->obj );\r
849 \r
850         AL_EXIT( AL_DBG_SMI );\r
851         return IB_SUCCESS;\r
852 }\r
853 \r
854 \r
855 \r
856 /*\r
857  * Return a work completion to the MAD dispatcher for the specified MAD.\r
858  */\r
859 static void\r
860 __complete_send_mad(\r
861         IN              const   al_mad_disp_handle_t            h_mad_disp,\r
862         IN                              al_mad_wr_t* const                      p_mad_wr,\r
863         IN              const   ib_wc_status_t                          wc_status )\r
864 {\r
865         ib_wc_t                 wc;\r
866 \r
867         /* Construct a send work completion. */\r
868         cl_memclr( &wc, sizeof( ib_wc_t ) );\r
869         wc.wr_id        = p_mad_wr->send_wr.wr_id;\r
870         wc.wc_type      = IB_WC_SEND;\r
871         wc.status       = wc_status;\r
872 \r
873         /* Set the send size if we were successful with the send. */\r
874         if( wc_status == IB_WCS_SUCCESS )\r
875                 wc.length = MAD_BLOCK_SIZE;\r
876 \r
877         mad_disp_send_done( h_mad_disp, p_mad_wr, &wc );\r
878 }\r
879 \r
880 \r
881 \r
882 /*\r
883  * Pre-destroy a special QP service.\r
884  */\r
885 void\r
886 destroying_spl_qp_svc(\r
887         IN                              al_obj_t*                                       p_obj )\r
888 {\r
889         spl_qp_svc_t*                   p_spl_qp_svc;\r
890         cl_list_item_t*                 p_list_item;\r
891         al_mad_wr_t*                    p_mad_wr;\r
892 \r
893         ib_api_status_t                 status;\r
894 \r
895         AL_ENTER( AL_DBG_SMI );\r
896 \r
897         CL_ASSERT( p_obj );\r
898         p_spl_qp_svc = PARENT_STRUCT( p_obj, spl_qp_svc_t, obj );\r
899 \r
900         /* Change the state to prevent processing new send requests. */\r
901         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
902         p_spl_qp_svc->state = SPL_QP_DESTROYING;\r
903         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
904 \r
905         /* Wait here until the special QP service is no longer in use. */\r
906         while( p_spl_qp_svc->in_use_cnt )\r
907         {\r
908                 cl_thread_suspend( 0 );\r
909         }\r
910 \r
911         /* Destroy the special QP. */\r
912         if( p_spl_qp_svc->h_qp )\r
913         {\r
914                 /* If present, remove the special QP service from the tracking map. */\r
915                 cl_spinlock_acquire( &gp_spl_qp_mgr->obj.lock );\r
916                 if( p_spl_qp_svc->h_qp->type == IB_QPT_QP0 )\r
917                 {\r
918                         cl_qmap_remove( &gp_spl_qp_mgr->smi_map, p_spl_qp_svc->port_guid );\r
919                 }\r
920                 else\r
921                 {\r
922                         cl_qmap_remove( &gp_spl_qp_mgr->gsi_map, p_spl_qp_svc->port_guid );\r
923                 }\r
924                 cl_spinlock_release( &gp_spl_qp_mgr->obj.lock );\r
925 \r
926                 status = ib_destroy_qp( p_spl_qp_svc->h_qp,\r
927                         (ib_pfn_destroy_cb_t)deref_al_obj );\r
928                 CL_ASSERT( status == IB_SUCCESS );\r
929 \r
930                 cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
931 \r
932                 /* Complete any outstanding MAD sends operations as "flushed". */\r
933                 for( p_list_item = cl_qlist_remove_head( &p_spl_qp_svc->send_queue );\r
934                          p_list_item != cl_qlist_end( &p_spl_qp_svc->send_queue );\r
935                          p_list_item = cl_qlist_remove_head( &p_spl_qp_svc->send_queue ) )\r
936                 {\r
937                         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
938                         p_mad_wr = PARENT_STRUCT( p_list_item, al_mad_wr_t, list_item );\r
939                         __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr,\r
940                                 IB_WCS_WR_FLUSHED_ERR );\r
941                         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
942                 }\r
943 \r
944                 cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
945                 /* Receive MAD elements are returned to the pool by the free routine. */\r
946         }\r
947 \r
948         /* Destroy the special QP alias and CQs. */\r
949         if( p_spl_qp_svc->h_qp_alias )\r
950         {\r
951                 status = ib_destroy_qp( p_spl_qp_svc->h_qp_alias,\r
952                         (ib_pfn_destroy_cb_t)deref_al_obj );\r
953                 CL_ASSERT( status == IB_SUCCESS );\r
954         }\r
955         if( p_spl_qp_svc->h_send_cq )\r
956         {\r
957                 status = ib_destroy_cq( p_spl_qp_svc->h_send_cq,\r
958                         (ib_pfn_destroy_cb_t)deref_al_obj );\r
959                 CL_ASSERT( status == IB_SUCCESS );\r
960         }\r
961         if( p_spl_qp_svc->h_recv_cq )\r
962         {\r
963                 status = ib_destroy_cq( p_spl_qp_svc->h_recv_cq,\r
964                         (ib_pfn_destroy_cb_t)deref_al_obj );\r
965                 CL_ASSERT( status == IB_SUCCESS );\r
966         }\r
967 \r
968         AL_EXIT( AL_DBG_SMI );\r
969 }\r
970 \r
971 \r
972 \r
973 /*\r
974  * Free a special QP service.\r
975  */\r
976 void\r
977 free_spl_qp_svc(\r
978         IN                              al_obj_t*                                       p_obj )\r
979 {\r
980         spl_qp_svc_t*                   p_spl_qp_svc;\r
981         cl_list_item_t*                 p_list_item;\r
982         al_mad_element_t*               p_al_mad;\r
983         ib_api_status_t                 status;\r
984 \r
985         AL_ENTER( AL_DBG_SMI );\r
986 \r
987         CL_ASSERT( p_obj );\r
988         p_spl_qp_svc = PARENT_STRUCT( p_obj, spl_qp_svc_t, obj );\r
989 \r
990         /* Dereference the CA. */\r
991         if( p_spl_qp_svc->obj.p_ci_ca )\r
992                 deref_al_obj( &p_spl_qp_svc->obj.p_ci_ca->h_ca->obj );\r
993 \r
994         /* Return receive MAD elements to the pool. */\r
995         for( p_list_item = cl_qlist_remove_head( &p_spl_qp_svc->recv_queue );\r
996                  p_list_item != cl_qlist_end( &p_spl_qp_svc->recv_queue );\r
997                  p_list_item = cl_qlist_remove_head( &p_spl_qp_svc->recv_queue ) )\r
998         {\r
999                 p_al_mad = PARENT_STRUCT( p_list_item, al_mad_element_t, list_item );\r
1000 \r
1001                 status = ib_put_mad( &p_al_mad->element );\r
1002                 CL_ASSERT( status == IB_SUCCESS );\r
1003         }\r
1004 \r
1005         CL_ASSERT( cl_is_qlist_empty( &p_spl_qp_svc->send_queue ) );\r
1006 \r
1007         destroy_al_obj( &p_spl_qp_svc->obj );\r
1008         cl_free( p_spl_qp_svc );\r
1009 \r
1010         AL_EXIT( AL_DBG_SMI );\r
1011 }\r
1012 \r
1013 \r
1014 \r
1015 /*\r
1016  * Update the base LID of a special QP service.\r
1017  */\r
1018 void\r
1019 spl_qp_svc_lid_change(\r
1020         IN                              al_obj_t*                                       p_obj,\r
1021         IN                              ib_pnp_port_rec_t*                      p_pnp_rec )\r
1022 {\r
1023         spl_qp_svc_t*                   p_spl_qp_svc;\r
1024 \r
1025         AL_ENTER( AL_DBG_SMI );\r
1026 \r
1027         CL_ASSERT( p_obj );\r
1028         CL_ASSERT( p_pnp_rec );\r
1029         CL_ASSERT( p_pnp_rec->p_port_attr );\r
1030 \r
1031         p_spl_qp_svc = PARENT_STRUCT( p_obj, spl_qp_svc_t, obj );\r
1032 \r
1033         p_spl_qp_svc->base_lid = p_pnp_rec->p_port_attr->lid;\r
1034         p_spl_qp_svc->lmc = p_pnp_rec->p_port_attr->lmc;\r
1035 \r
1036         AL_EXIT( AL_DBG_SMI );\r
1037 }\r
1038 \r
1039 \r
1040 \r
1041 /*\r
1042  * Route a send work request.\r
1043  */\r
1044 mad_route_t\r
1045 route_mad_send(\r
1046         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
1047         IN                              ib_send_wr_t* const                     p_send_wr )\r
1048 {\r
1049         al_mad_wr_t*                    p_mad_wr;\r
1050         al_mad_send_t*                  p_mad_send;\r
1051         ib_mad_t*                               p_mad;\r
1052         ib_smp_t*                               p_smp;\r
1053         ib_av_handle_t                  h_av;\r
1054         mad_route_t                             route;\r
1055         boolean_t                               local, loopback, discard;\r
1056 \r
1057         AL_ENTER( AL_DBG_SMI );\r
1058 \r
1059         CL_ASSERT( p_spl_qp_svc );\r
1060         CL_ASSERT( p_send_wr );\r
1061 \r
1062         /* Initialize a pointers to the MAD work request and the MAD. */\r
1063         p_mad_wr = PARENT_STRUCT( p_send_wr, al_mad_wr_t, send_wr );\r
1064         p_mad_send = PARENT_STRUCT( p_mad_wr, al_mad_send_t, mad_wr );\r
1065         p_mad = get_mad_hdr_from_wr( p_mad_wr );\r
1066         p_smp = (ib_smp_t*)p_mad;\r
1067 \r
1068         /* Check if the CA has a local MAD interface. */\r
1069         local = loopback = discard = FALSE;\r
1070         if( check_local_mad( p_spl_qp_svc->h_qp ) )\r
1071         {\r
1072                 /*\r
1073                  * If the MAD is a locally addressed Subnet Management, Performance\r
1074                  * Management, or Connection Management datagram, process the work\r
1075                  * request locally.\r
1076                  */\r
1077                 h_av = p_send_wr->dgrm.ud.h_av;\r
1078                 switch( p_mad->mgmt_class )\r
1079                 {\r
1080                 case IB_MCLASS_SUBN_DIR:\r
1081                         /* Perform special checks on directed route SMPs. */\r
1082                         if( ib_smp_is_response( p_smp ) )\r
1083                         {\r
1084                                 /*\r
1085                                  * This node is the originator of the response.  Discard\r
1086                                  * if the hop count or pointer is zero, an intermediate hop,\r
1087                                  * out of bounds hop, or if the first port of the directed\r
1088                                  * route retrun path is not this port.\r
1089                                  */\r
1090                                 if( ( p_smp->hop_count == 0 ) || ( p_smp->hop_ptr == 0 ) )\r
1091                                 {\r
1092                                         AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
1093                                                 ("hop cnt or hop ptr set to 0...discarding\n") );\r
1094                                         discard = TRUE;\r
1095                                 }\r
1096                                 else if( p_smp->hop_count != ( p_smp->hop_ptr - 1 ) )\r
1097                                 {\r
1098                                         AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
1099                                                 ("hop cnt != (hop ptr - 1)...discarding\n") );\r
1100                                         discard = TRUE;\r
1101                                 }\r
1102                                 else if( p_smp->hop_count >= IB_SUBNET_PATH_HOPS_MAX )\r
1103                                 {\r
1104                                         AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
1105                                                 ("hop cnt > max hops...discarding\n") );\r
1106                                         discard = TRUE;\r
1107                                 }\r
1108                                 else if( ( p_smp->dr_dlid == IB_LID_PERMISSIVE ) &&\r
1109                                                  ( p_smp->return_path[ p_smp->hop_ptr - 1 ] !=\r
1110                                                         p_spl_qp_svc->port_num ) )\r
1111                                 {\r
1112                                         AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
1113                                                 ("return path[hop ptr - 1] != port num...discarding\n") );\r
1114                                         discard = TRUE;\r
1115                                 }\r
1116                         }\r
1117                         else\r
1118                         {\r
1119                                 /* The SMP is a request. */\r
1120                                 if( ( p_smp->hop_count >= IB_SUBNET_PATH_HOPS_MAX ) ||\r
1121                                         ( p_smp->hop_ptr >= IB_SUBNET_PATH_HOPS_MAX ) )\r
1122                                 {\r
1123                                         discard = TRUE;\r
1124                                 }\r
1125                                 else if( ( p_smp->hop_count == 0 ) && ( p_smp->hop_ptr == 0 ) )\r
1126                                 {\r
1127                                         /* Self Addressed: Sent locally, routed locally. */\r
1128                                         local = TRUE;\r
1129                                         discard = ( p_smp->dr_slid != IB_LID_PERMISSIVE ) ||\r
1130                                                           ( p_smp->dr_dlid != IB_LID_PERMISSIVE );\r
1131                                 }\r
1132                                 else if( ( p_smp->hop_count != 0 ) &&\r
1133                                                  ( p_smp->hop_count == ( p_smp->hop_ptr - 1 ) ) )\r
1134                                 {\r
1135                                         /* End of Path: Sent remotely, routed locally. */\r
1136                                         local = TRUE;\r
1137                                 }\r
1138                                 else if( ( p_smp->hop_count != 0 ) &&\r
1139                                                  ( p_smp->hop_ptr       == 0 ) )\r
1140                                 {\r
1141                                         /* Beginning of Path: Sent locally, routed remotely. */\r
1142                                         if( p_smp->dr_slid == IB_LID_PERMISSIVE )\r
1143                                         {\r
1144                                                 discard =\r
1145                                                         ( p_smp->initial_path[ p_smp->hop_ptr + 1 ] !=\r
1146                                                           p_spl_qp_svc->port_num );\r
1147                                         }\r
1148                                 }\r
1149                                 else\r
1150                                 {\r
1151                                         /* Intermediate hop. */\r
1152                                         discard = TRUE;\r
1153                                 }\r
1154                         }\r
1155                         /* Loopback locally addressed SM to SM "heartbeat" messages. */\r
1156                         loopback = (p_mad->attr_id == IB_MAD_ATTR_SM_INFO);\r
1157                         break;\r
1158 \r
1159                 case IB_MCLASS_SUBN_LID:\r
1160                         /* Loopback locally addressed SM to SM "heartbeat" messages. */\r
1161                         loopback = (p_mad->attr_id == IB_MAD_ATTR_SM_INFO);\r
1162 \r
1163                         /* Fall through to check for a local MAD. */\r
1164 \r
1165                 case IB_MCLASS_PERF:\r
1166                 case IB_MCLASS_BM:\r
1167                         local = ( h_av &&\r
1168                                 ( h_av->av_attr.dlid ==\r
1169                                 ( h_av->av_attr.path_bits | p_spl_qp_svc->base_lid ) ) );\r
1170                         break;\r
1171 \r
1172                 default:\r
1173                         /* Route vendor specific MADs to the HCA provider. */\r
1174                         if( ib_class_is_vendor_specific( p_mad->mgmt_class ) )\r
1175                         {\r
1176                                 local = ( h_av &&\r
1177                                         ( h_av->av_attr.dlid ==\r
1178                                         ( h_av->av_attr.path_bits | p_spl_qp_svc->base_lid ) ) );\r
1179                         }\r
1180                         break;\r
1181                 }\r
1182         }\r
1183 \r
1184         route = ( p_mad_send->p_send_mad->send_opt & IB_SEND_OPT_LOCAL ) ?\r
1185                 ROUTE_LOCAL : ROUTE_REMOTE;\r
1186         if( local ) route = ROUTE_LOCAL;\r
1187         if( loopback && local ) route = ROUTE_LOOPBACK;\r
1188         if( discard ) route = ROUTE_DISCARD;\r
1189 \r
1190         AL_EXIT( AL_DBG_SMI );\r
1191         return route;\r
1192 }\r
1193 \r
1194 \r
1195 \r
1196 /*\r
1197  * Send a work request on the special QP.\r
1198  */\r
1199 ib_api_status_t\r
1200 spl_qp_svc_send(\r
1201         IN              const   ib_qp_handle_t                          h_qp,\r
1202         IN                              ib_send_wr_t* const                     p_send_wr )\r
1203 {\r
1204         spl_qp_svc_t*                   p_spl_qp_svc;\r
1205         al_mad_wr_t*                    p_mad_wr;\r
1206         mad_route_t                             route;\r
1207         ib_api_status_t                 status;\r
1208 \r
1209         AL_ENTER( AL_DBG_SMI );\r
1210 \r
1211         CL_ASSERT( h_qp );\r
1212         CL_ASSERT( p_send_wr );\r
1213 \r
1214         /* Get the special QP service. */\r
1215         p_spl_qp_svc = (spl_qp_svc_t*)h_qp->obj.context;\r
1216         CL_ASSERT( p_spl_qp_svc );\r
1217         CL_ASSERT( p_spl_qp_svc->h_qp == h_qp );\r
1218 \r
1219         /* Determine how to route the MAD. */\r
1220         route = route_mad_send( p_spl_qp_svc, p_send_wr );\r
1221 \r
1222         /*\r
1223          * Check the QP state and guard against error handling.  Also,\r
1224          * to maintain proper order of work completions, delay processing\r
1225          * a local MAD until any remote MAD work requests have completed,\r
1226          * and delay processing a remote MAD until local MAD work requests\r
1227          * have completed.\r
1228          */\r
1229         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
1230         if( (p_spl_qp_svc->state != SPL_QP_ACTIVE) || p_spl_qp_svc->local_mad_wr ||\r
1231                 (is_local(route) && !cl_is_qlist_empty( &p_spl_qp_svc->send_queue )) ||\r
1232                 ( cl_qlist_count( &p_spl_qp_svc->send_queue ) >=\r
1233                         p_spl_qp_svc->max_qp_depth ) )\r
1234         {\r
1235                 /*\r
1236                  * Return busy status.\r
1237                  * The special QP will resume sends at this point.\r
1238                  */\r
1239                 cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
1240 \r
1241                 AL_EXIT( AL_DBG_SMI );\r
1242                 return IB_RESOURCE_BUSY;\r
1243         }\r
1244 \r
1245         p_mad_wr = PARENT_STRUCT( p_send_wr, al_mad_wr_t, send_wr );\r
1246 \r
1247         if( is_local( route ) )\r
1248         {\r
1249                 /* Save the local MAD work request for processing. */\r
1250                 p_spl_qp_svc->local_mad_wr = p_mad_wr;\r
1251 \r
1252                 /* Flag the service as in use by the asynchronous processing thread. */\r
1253                 cl_atomic_inc( &p_spl_qp_svc->in_use_cnt );\r
1254 \r
1255                 cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
1256 \r
1257                 status = local_mad_send( p_spl_qp_svc, p_mad_wr );\r
1258         }\r
1259         else\r
1260         {\r
1261                 /* Process a remote MAD send work request. */\r
1262                 status = remote_mad_send( p_spl_qp_svc, p_mad_wr );\r
1263 \r
1264                 cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
1265         }\r
1266 \r
1267         AL_EXIT( AL_DBG_SMI );\r
1268         return status;\r
1269 }\r
1270 \r
1271 \r
1272 \r
1273 /*\r
1274  * Process a remote MAD send work request.  Called holding the spl_qp_svc lock.\r
1275  */\r
1276 ib_api_status_t\r
1277 remote_mad_send(\r
1278         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
1279         IN                              al_mad_wr_t* const                      p_mad_wr )\r
1280 {\r
1281         ib_smp_t*                               p_smp;\r
1282         ib_api_status_t                 status;\r
1283 \r
1284         AL_ENTER( AL_DBG_SMI );\r
1285 \r
1286         CL_ASSERT( p_spl_qp_svc );\r
1287         CL_ASSERT( p_mad_wr );\r
1288 \r
1289         /* Initialize a pointers to the MAD work request and outbound MAD. */\r
1290         p_smp = (ib_smp_t*)get_mad_hdr_from_wr( p_mad_wr );\r
1291 \r
1292         /* Perform outbound MAD processing. */\r
1293 \r
1294         /* Adjust directed route SMPs as required by IBA. */\r
1295         if( p_smp->mgmt_class == IB_MCLASS_SUBN_DIR )\r
1296         {\r
1297                 if( ib_smp_is_response( p_smp ) )\r
1298                 {\r
1299                         if( p_smp->dr_dlid == IB_LID_PERMISSIVE )\r
1300                                 p_smp->hop_ptr--;\r
1301                 }\r
1302                 else if( p_smp->dr_slid == IB_LID_PERMISSIVE )\r
1303                 {\r
1304                         /*\r
1305                          * Only update the pointer if the hw_agent is not implemented.\r
1306                          * Fujitsu implements SMI in hardware, so the following has to\r
1307                          * be passed down to the hardware SMI.\r
1308                          */\r
1309                         ci_ca_lock_attr( p_spl_qp_svc->obj.p_ci_ca );\r
1310                         if( !p_spl_qp_svc->obj.p_ci_ca->p_pnp_attr->hw_agents )\r
1311                                 p_smp->hop_ptr++;\r
1312                         ci_ca_unlock_attr( p_spl_qp_svc->obj.p_ci_ca );\r
1313                 }\r
1314         }\r
1315 \r
1316         /* Always generate send completions. */\r
1317         p_mad_wr->send_wr.send_opt |= IB_SEND_OPT_SIGNALED;\r
1318 \r
1319         /* Queue the MAD work request on the service tracking queue. */\r
1320         cl_qlist_insert_tail( &p_spl_qp_svc->send_queue, &p_mad_wr->list_item );\r
1321 \r
1322         status = ib_post_send( p_spl_qp_svc->h_qp, &p_mad_wr->send_wr, NULL );\r
1323 \r
1324         if( status != IB_SUCCESS )\r
1325         {\r
1326                 cl_qlist_remove_item( &p_spl_qp_svc->send_queue, &p_mad_wr->list_item );\r
1327 \r
1328                 /* Reset directed route SMPs as required by IBA. */\r
1329                 if( p_smp->mgmt_class == IB_MCLASS_SUBN_DIR )\r
1330                 {\r
1331                         if( ib_smp_is_response( p_smp ) )\r
1332                         {\r
1333                                 if( p_smp->dr_dlid == IB_LID_PERMISSIVE )\r
1334                                         p_smp->hop_ptr++;\r
1335                         }\r
1336                         else if( p_smp->dr_slid == IB_LID_PERMISSIVE )\r
1337                         {\r
1338                                 /* Only update if the hw_agent is not implemented. */\r
1339                                 ci_ca_lock_attr( p_spl_qp_svc->obj.p_ci_ca );\r
1340                                 if( p_spl_qp_svc->obj.p_ci_ca->p_pnp_attr->hw_agents == FALSE )\r
1341                                         p_smp->hop_ptr--;\r
1342                                 ci_ca_unlock_attr( p_spl_qp_svc->obj.p_ci_ca );\r
1343                         }\r
1344                 }\r
1345         }\r
1346 \r
1347         AL_EXIT( AL_DBG_SMI );\r
1348         return status;\r
1349 }\r
1350 \r
1351 \r
1352 /*\r
1353  * Handle a MAD destined for the local CA, using cached data\r
1354  * as much as possible.\r
1355  */\r
1356 static ib_api_status_t\r
1357 local_mad_send(\r
1358         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
1359         IN                              al_mad_wr_t* const                      p_mad_wr )\r
1360 {\r
1361         mad_route_t                             route;\r
1362         ib_api_status_t                 status = IB_SUCCESS;\r
1363 \r
1364         AL_ENTER( AL_DBG_SMI );\r
1365 \r
1366         CL_ASSERT( p_spl_qp_svc );\r
1367         CL_ASSERT( p_mad_wr );\r
1368 \r
1369         /* Determine how to route the MAD. */\r
1370         route = route_mad_send( p_spl_qp_svc, &p_mad_wr->send_wr );\r
1371 \r
1372         /* Check if this MAD should be discarded. */\r
1373         if( is_discard( route ) )\r
1374         {\r
1375                 /* Deliver a "work completion" to the dispatcher. */\r
1376                 __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr,\r
1377                         IB_WCS_LOCAL_OP_ERR );\r
1378                 status = IB_INVALID_SETTING;\r
1379         }\r
1380         else if( is_loopback( route ) )\r
1381         {\r
1382                 /* Loopback local SM to SM "heartbeat" messages. */\r
1383                 status = loopback_mad( p_spl_qp_svc, p_mad_wr );\r
1384         }\r
1385         else\r
1386         {\r
1387                 switch( get_mad_hdr_from_wr( p_mad_wr )->mgmt_class )\r
1388                 {\r
1389                 case IB_MCLASS_SUBN_DIR:\r
1390                 case IB_MCLASS_SUBN_LID:\r
1391                         //DO not use the cache in order to force Mkey  check\r
1392                         status = __process_subn_mad( p_spl_qp_svc, p_mad_wr );\r
1393                         //status = IB_NOT_DONE;\r
1394                         break;\r
1395 \r
1396                 default:\r
1397                         status = IB_NOT_DONE;\r
1398                 }\r
1399         }\r
1400 \r
1401         if( status == IB_NOT_DONE )\r
1402         {\r
1403                 /* Queue an asynchronous processing item to process the local MAD. */\r
1404                 cl_async_proc_queue( gp_async_proc_mgr, &p_spl_qp_svc->send_async );\r
1405         }\r
1406         else\r
1407         {\r
1408                 /*\r
1409                  * Clear the local MAD pointer to allow processing of other MADs.\r
1410                  * This is done after polling for attribute changes to ensure that\r
1411                  * subsequent MADs pick up any changes performed by this one.\r
1412                  */\r
1413                 cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
1414                 p_spl_qp_svc->local_mad_wr = NULL;\r
1415                 cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
1416 \r
1417                 /* No longer in use by the asynchronous processing thread. */\r
1418                 cl_atomic_dec( &p_spl_qp_svc->in_use_cnt );\r
1419 \r
1420                 /* Special QP operations will resume by unwinding. */\r
1421         }\r
1422 \r
1423         AL_EXIT( AL_DBG_SMI );\r
1424         return IB_SUCCESS;\r
1425 }\r
1426 \r
1427 \r
1428 static ib_api_status_t\r
1429 get_resp_mad(\r
1430         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
1431         IN                              al_mad_wr_t* const                      p_mad_wr,\r
1432                 OUT                     ib_mad_element_t** const        pp_mad_resp )\r
1433 {\r
1434         ib_api_status_t                 status;\r
1435 \r
1436         AL_ENTER( AL_DBG_SMI );\r
1437 \r
1438         CL_ASSERT( p_spl_qp_svc );\r
1439         CL_ASSERT( p_mad_wr );\r
1440         CL_ASSERT( pp_mad_resp );\r
1441 \r
1442         /* Get a MAD element from the pool for the response. */\r
1443         status = ib_get_mad( p_spl_qp_svc->h_qp->obj.p_ci_ca->pool_key,\r
1444                 MAD_BLOCK_SIZE, pp_mad_resp );\r
1445         if( status != IB_SUCCESS )\r
1446         {\r
1447                 __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr,\r
1448                         IB_WCS_LOCAL_OP_ERR );\r
1449         }\r
1450 \r
1451         AL_EXIT( AL_DBG_SMI );\r
1452         return status;\r
1453 }\r
1454 \r
1455 \r
1456 static ib_api_status_t\r
1457 complete_local_mad(\r
1458         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
1459         IN                              al_mad_wr_t* const                      p_mad_wr,\r
1460         IN                              ib_mad_element_t* const         p_mad_resp )\r
1461 {\r
1462         ib_api_status_t                 status;\r
1463 \r
1464         AL_ENTER( AL_DBG_SMI );\r
1465 \r
1466         CL_ASSERT( p_spl_qp_svc );\r
1467         CL_ASSERT( p_mad_wr );\r
1468         CL_ASSERT( p_mad_resp );\r
1469 \r
1470         /* Construct the receive MAD element. */\r
1471         p_mad_resp->status              = IB_WCS_SUCCESS;\r
1472         p_mad_resp->remote_qp   = p_mad_wr->send_wr.dgrm.ud.remote_qp;\r
1473         p_mad_resp->remote_lid  = p_spl_qp_svc->base_lid;\r
1474         if( p_mad_wr->send_wr.send_opt & IB_RECV_OPT_IMMEDIATE )\r
1475         {\r
1476                 p_mad_resp->immediate_data = p_mad_wr->send_wr.immediate_data;\r
1477                 p_mad_resp->recv_opt |= IB_RECV_OPT_IMMEDIATE;\r
1478         }\r
1479 \r
1480         /*\r
1481          * Hand the receive MAD element to the dispatcher before completing\r
1482          * the send.  This guarantees that the send request cannot time out.\r
1483          */\r
1484         status = mad_disp_recv_done( p_spl_qp_svc->h_mad_disp, p_mad_resp );\r
1485 \r
1486         /* Forward the send work completion to the dispatcher. */\r
1487         __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr, IB_WCS_SUCCESS );\r
1488 \r
1489         AL_EXIT( AL_DBG_SMI );\r
1490         return status;\r
1491 }\r
1492 \r
1493 \r
1494 static ib_api_status_t\r
1495 loopback_mad(\r
1496         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
1497         IN                              al_mad_wr_t* const                      p_mad_wr )\r
1498 {\r
1499         ib_mad_t                                *p_mad;\r
1500         ib_mad_element_t                *p_mad_resp;\r
1501         ib_api_status_t                 status;\r
1502 \r
1503         AL_ENTER( AL_DBG_SMI );\r
1504 \r
1505         CL_ASSERT( p_spl_qp_svc );\r
1506         CL_ASSERT( p_mad_wr );\r
1507 \r
1508         /* Get a MAD element from the pool for the response. */\r
1509         status = get_resp_mad( p_spl_qp_svc, p_mad_wr, &p_mad_resp );\r
1510         if( status == IB_SUCCESS )\r
1511         {\r
1512                 /* Initialize a pointers to the MAD work request and outbound MAD. */\r
1513                 p_mad = get_mad_hdr_from_wr( p_mad_wr );\r
1514 \r
1515                 /* Simulate a send/receive between local managers. */\r
1516                 cl_memcpy( p_mad_resp->p_mad_buf, p_mad, MAD_BLOCK_SIZE );\r
1517 \r
1518                 /* Construct the receive MAD element. */\r
1519                 p_mad_resp->status              = IB_WCS_SUCCESS;\r
1520                 p_mad_resp->remote_qp   = p_mad_wr->send_wr.dgrm.ud.remote_qp;\r
1521                 p_mad_resp->remote_lid  = p_spl_qp_svc->base_lid;\r
1522                 if( p_mad_wr->send_wr.send_opt & IB_RECV_OPT_IMMEDIATE )\r
1523                 {\r
1524                         p_mad_resp->immediate_data = p_mad_wr->send_wr.immediate_data;\r
1525                         p_mad_resp->recv_opt |= IB_RECV_OPT_IMMEDIATE;\r
1526                 }\r
1527 \r
1528                 /*\r
1529                  * Hand the receive MAD element to the dispatcher before completing\r
1530                  * the send.  This guarantees that the send request cannot time out.\r
1531                  */\r
1532                 status = mad_disp_recv_done( p_spl_qp_svc->h_mad_disp, p_mad_resp );\r
1533 \r
1534                 /* Forward the send work completion to the dispatcher. */\r
1535                 __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr, IB_WCS_SUCCESS );\r
1536 \r
1537         }\r
1538 \r
1539         AL_EXIT( AL_DBG_SMI );\r
1540         return status;\r
1541 }\r
1542 \r
1543 \r
1544 static void\r
1545 __update_guid_info(\r
1546         IN                              spl_qp_cache_t* const                   p_cache,\r
1547         IN              const   ib_smp_t* const                         p_mad )\r
1548 {\r
1549         uint32_t                        idx;\r
1550 \r
1551         /* Get the table selector from the attribute */\r
1552         idx = cl_ntoh32( p_mad->attr_mod );\r
1553 \r
1554         /*\r
1555          * We only get successful MADs here, so invalid settings\r
1556          * shouldn't happen.\r
1557          */\r
1558         CL_ASSERT( idx <= 31 );\r
1559 \r
1560         cl_memcpy( &p_cache->guid_block[idx].tbl,\r
1561                 ib_smp_get_payload_ptr( p_mad ),\r
1562                 sizeof(ib_guid_info_t) );\r
1563         p_cache->guid_block[idx].valid = TRUE;\r
1564 }\r
1565 \r
1566 \r
1567 static  void\r
1568 __update_pkey_table(\r
1569         IN                              spl_qp_cache_t* const                   p_cache,\r
1570         IN              const   ib_smp_t* const                         p_mad )\r
1571 {\r
1572         uint16_t                        idx;\r
1573 \r
1574         /* Get the table selector from the attribute */\r
1575         idx = ((uint16_t)cl_ntoh32( p_mad->attr_mod ));\r
1576 \r
1577         CL_ASSERT( idx <= 2047 );\r
1578 \r
1579         cl_memcpy( &p_cache->pkey_tbl[idx].tbl,\r
1580                 ib_smp_get_payload_ptr( p_mad ),\r
1581                 sizeof(ib_pkey_table_info_t) );\r
1582         p_cache->pkey_tbl[idx].valid = TRUE;\r
1583 }\r
1584 \r
1585 \r
1586 static void\r
1587 __update_sl_vl_table(\r
1588         IN                              spl_qp_cache_t* const                   p_cache,\r
1589         IN              const   ib_smp_t* const                         p_mad )\r
1590 {\r
1591         cl_memcpy( &p_cache->sl_vl.tbl,\r
1592                 ib_smp_get_payload_ptr( p_mad ),\r
1593                 sizeof(ib_slvl_table_t) );\r
1594         p_cache->sl_vl.valid = TRUE;\r
1595 }\r
1596 \r
1597 \r
1598 static void\r
1599 __update_vl_arb_table(\r
1600         IN                              spl_qp_cache_t* const                   p_cache,\r
1601         IN              const   ib_smp_t* const                         p_mad )\r
1602 {\r
1603         uint16_t                        idx;\r
1604 \r
1605         /* Get the table selector from the attribute */\r
1606         idx = ((uint16_t)(cl_ntoh32( p_mad->attr_mod ) >> 16)) - 1;\r
1607 \r
1608         CL_ASSERT( idx <= 3 );\r
1609 \r
1610         cl_memcpy( &p_cache->vl_arb[idx].tbl,\r
1611                 ib_smp_get_payload_ptr( p_mad ),\r
1612                 sizeof(ib_vl_arb_table_t) );\r
1613         p_cache->vl_arb[idx].valid = TRUE;\r
1614 }\r
1615 \r
1616 \r
1617 \r
1618 void\r
1619 spl_qp_svc_update_cache(\r
1620         IN                              spl_qp_svc_t                            *p_spl_qp_svc,\r
1621         IN                              ib_smp_t                                        *p_mad )\r
1622 {\r
1623 \r
1624 \r
1625 \r
1626         CL_ASSERT( p_spl_qp_svc );\r
1627         CL_ASSERT( p_mad );\r
1628         CL_ASSERT( p_mad->mgmt_class == IB_MCLASS_SUBN_DIR ||\r
1629                                  p_mad->mgmt_class == IB_MCLASS_SUBN_LID);\r
1630         CL_ASSERT(!p_mad->status);\r
1631 \r
1632         cl_spinlock_acquire(&p_spl_qp_svc->cache_lock);\r
1633         \r
1634         switch( p_mad->attr_id )\r
1635         {\r
1636         case IB_MAD_ATTR_GUID_INFO:\r
1637                 __update_guid_info(\r
1638                         &p_spl_qp_svc->cache, p_mad );\r
1639                 break;\r
1640 \r
1641         case IB_MAD_ATTR_P_KEY_TABLE:\r
1642                 __update_pkey_table(\r
1643                         &p_spl_qp_svc->cache, p_mad );\r
1644                 break;\r
1645 \r
1646         case IB_MAD_ATTR_SLVL_TABLE:\r
1647                 __update_sl_vl_table(\r
1648                         &p_spl_qp_svc->cache, p_mad );\r
1649                 break;\r
1650 \r
1651         case IB_MAD_ATTR_VL_ARBITRATION:\r
1652                 __update_vl_arb_table(\r
1653                         &p_spl_qp_svc->cache, p_mad );\r
1654                 break;\r
1655 \r
1656         default:\r
1657                 break;\r
1658         }\r
1659         \r
1660         cl_spinlock_release(&p_spl_qp_svc->cache_lock);\r
1661 }\r
1662 \r
1663 \r
1664 \r
1665 static ib_api_status_t\r
1666 __process_node_info(\r
1667         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
1668         IN                              al_mad_wr_t* const                      p_mad_wr )\r
1669 {\r
1670         ib_mad_t                                *p_mad;\r
1671         ib_mad_element_t                *p_mad_resp;\r
1672         ib_smp_t                                *p_smp;\r
1673         ib_node_info_t                  *p_node_info;\r
1674         ib_ca_attr_t                    *p_ca_attr;\r
1675         ib_port_attr_t                  *p_port_attr;\r
1676         ib_api_status_t                 status;\r
1677 \r
1678         AL_ENTER( AL_DBG_SMI );\r
1679 \r
1680         CL_ASSERT( p_spl_qp_svc );\r
1681         CL_ASSERT( p_mad_wr );\r
1682 \r
1683         /* Initialize a pointers to the MAD work request and outbound MAD. */\r
1684         p_mad = get_mad_hdr_from_wr( p_mad_wr );\r
1685         if( p_mad->method != IB_MAD_METHOD_GET )\r
1686         {\r
1687                 /* Node description is a GET-only attribute. */\r
1688                 __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr,\r
1689                         IB_WCS_LOCAL_OP_ERR );\r
1690                 AL_EXIT( AL_DBG_SMI );\r
1691                 return IB_INVALID_SETTING;\r
1692         }\r
1693 \r
1694         /* Get a MAD element from the pool for the response. */\r
1695         status = get_resp_mad( p_spl_qp_svc, p_mad_wr, &p_mad_resp );\r
1696         if( status == IB_SUCCESS )\r
1697         {\r
1698                 p_smp = (ib_smp_t*)p_mad_resp->p_mad_buf;\r
1699                 cl_memcpy( p_smp, p_mad, MAD_BLOCK_SIZE );\r
1700                 p_smp->method = (IB_MAD_METHOD_RESP_MASK | IB_MAD_METHOD_GET);\r
1701                 if( p_smp->mgmt_class == IB_MCLASS_SUBN_DIR )\r
1702                         p_smp->status = IB_SMP_DIRECTION;\r
1703                 else\r
1704                         p_smp->status = 0;\r
1705 \r
1706                 p_node_info = (ib_node_info_t*)ib_smp_get_payload_ptr( p_smp );\r
1707 \r
1708                 /*\r
1709                  * Fill in the node info, protecting against the\r
1710                  * attributes being changed by PnP.\r
1711                  */\r
1712                 cl_spinlock_acquire( &p_spl_qp_svc->obj.p_ci_ca->obj.lock );\r
1713 \r
1714                 p_ca_attr = p_spl_qp_svc->obj.p_ci_ca->p_pnp_attr;\r
1715                 p_port_attr = &p_ca_attr->p_port_attr[p_spl_qp_svc->port_num - 1];\r
1716 \r
1717                 p_node_info->base_version = 1;\r
1718                 p_node_info->class_version = 1;\r
1719                 p_node_info->node_type = IB_NODE_TYPE_CA;\r
1720                 p_node_info->num_ports = p_ca_attr->num_ports;\r
1721                 p_node_info->sys_guid = p_ca_attr->system_image_guid;\r
1722                 p_node_info->node_guid = p_ca_attr->ca_guid;\r
1723                 p_node_info->port_guid = p_port_attr->port_guid;\r
1724                 p_node_info->partition_cap = cl_hton16( p_port_attr->num_pkeys );\r
1725                 p_node_info->device_id = cl_hton16( p_ca_attr->dev_id );\r
1726                 p_node_info->revision = cl_hton32( p_ca_attr->revision );\r
1727                 p_node_info->port_num_vendor_id =\r
1728                         cl_hton32( p_ca_attr->vend_id & 0x00FFFFFF ) | p_port_attr->port_num;\r
1729                 cl_spinlock_release( &p_spl_qp_svc->obj.p_ci_ca->obj.lock );\r
1730 \r
1731                 status = complete_local_mad( p_spl_qp_svc, p_mad_wr, p_mad_resp );\r
1732         }\r
1733 \r
1734         AL_EXIT( AL_DBG_SMI );\r
1735         return status;\r
1736 }\r
1737 \r
1738 \r
1739 static ib_api_status_t\r
1740 __process_node_desc(\r
1741         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
1742         IN                              al_mad_wr_t* const                      p_mad_wr )\r
1743 {\r
1744         ib_mad_t                                *p_mad;\r
1745         ib_mad_element_t                *p_mad_resp;\r
1746         ib_api_status_t                 status;\r
1747 \r
1748         AL_ENTER( AL_DBG_SMI );\r
1749 \r
1750         CL_ASSERT( p_spl_qp_svc );\r
1751         CL_ASSERT( p_mad_wr );\r
1752 \r
1753         /* Initialize a pointers to the MAD work request and outbound MAD. */\r
1754         p_mad = get_mad_hdr_from_wr( p_mad_wr );\r
1755         if( p_mad->method != IB_MAD_METHOD_GET )\r
1756         {\r
1757                 /* Node info is a GET-only attribute. */\r
1758                 __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr,\r
1759                         IB_WCS_LOCAL_OP_ERR );\r
1760                 AL_EXIT( AL_DBG_SMI );\r
1761                 return IB_INVALID_SETTING;\r
1762         }\r
1763 \r
1764         /* Get a MAD element from the pool for the response. */\r
1765         status = get_resp_mad( p_spl_qp_svc, p_mad_wr, &p_mad_resp );\r
1766         if( status == IB_SUCCESS )\r
1767         {\r
1768                 cl_memcpy( p_mad_resp->p_mad_buf, p_mad, MAD_BLOCK_SIZE );\r
1769                 p_mad_resp->p_mad_buf->method =\r
1770                         (IB_MAD_METHOD_RESP_MASK | IB_MAD_METHOD_GET);\r
1771                 if( p_mad_resp->p_mad_buf->mgmt_class == IB_MCLASS_SUBN_DIR )\r
1772                         p_mad_resp->p_mad_buf->status = IB_SMP_DIRECTION;\r
1773                 else\r
1774                         p_mad_resp->p_mad_buf->status = 0;\r
1775                 /* Set the node description to the machine name. */\r
1776                 cl_memcpy( ((ib_smp_t*)p_mad_resp->p_mad_buf)->data, \r
1777                         node_desc, sizeof(node_desc) );\r
1778 \r
1779                 status = complete_local_mad( p_spl_qp_svc, p_mad_wr, p_mad_resp );\r
1780         }\r
1781 \r
1782         AL_EXIT( AL_DBG_SMI );\r
1783         return status;\r
1784 }\r
1785 \r
1786 static ib_api_status_t\r
1787 __process_guid_info(\r
1788         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
1789         IN                              al_mad_wr_t* const                      p_mad_wr )\r
1790 {\r
1791         \r
1792         ib_mad_t                                *p_mad;\r
1793         ib_mad_element_t                *p_mad_resp;\r
1794         ib_smp_t                                *p_smp;\r
1795         ib_guid_info_t                  *p_guid_info;\r
1796         uint16_t                                idx;\r
1797         ib_api_status_t         status;\r
1798 \r
1799 \r
1800         /* Initialize a pointers to the MAD work request and outbound MAD. */\r
1801         p_mad = get_mad_hdr_from_wr( p_mad_wr );\r
1802 \r
1803         /* Get the table selector from the attribute */\r
1804         idx = ((uint16_t)cl_ntoh32( p_mad->attr_mod ));\r
1805         \r
1806         /*\r
1807          * TODO : Setup the response to fail the MAD instead of sending\r
1808          * it down to the HCA.\r
1809          */\r
1810         if( idx > 31 )\r
1811         {\r
1812                 AL_EXIT( AL_DBG_SMI );\r
1813                 return IB_NOT_DONE;\r
1814         }\r
1815         if( !p_spl_qp_svc->cache.guid_block[idx].valid )\r
1816         {\r
1817                 AL_EXIT( AL_DBG_SMI );\r
1818                 return IB_NOT_DONE;\r
1819         }\r
1820 \r
1821         /*\r
1822          * If a SET, see if the set is identical to the cache,\r
1823          * in which case it's a no-op.\r
1824          */\r
1825         if( p_mad->method == IB_MAD_METHOD_SET )\r
1826         {\r
1827                 if( cl_memcmp( ib_smp_get_payload_ptr( (ib_smp_t*)p_mad ),\r
1828                         &p_spl_qp_svc->cache.guid_block[idx].tbl, sizeof(ib_pkey_table_info_t) ) )\r
1829                 {\r
1830                         /* The set is requesting a change. */\r
1831                         return IB_NOT_DONE;\r
1832                 }\r
1833         }\r
1834         \r
1835         /* Get a MAD element from the pool for the response. */\r
1836         status = get_resp_mad( p_spl_qp_svc, p_mad_wr, &p_mad_resp );\r
1837         if( status == IB_SUCCESS )\r
1838         {\r
1839                 p_smp = (ib_smp_t*)p_mad_resp->p_mad_buf;\r
1840 \r
1841                 /* Setup the response mad. */\r
1842                 cl_memcpy( p_smp, p_mad, MAD_BLOCK_SIZE );\r
1843                 p_smp->method = (IB_MAD_METHOD_RESP_MASK | IB_MAD_METHOD_GET);\r
1844                 if( p_smp->mgmt_class == IB_MCLASS_SUBN_DIR )\r
1845                         p_smp->status = IB_SMP_DIRECTION;\r
1846                 else\r
1847                         p_smp->status = 0;\r
1848 \r
1849                 p_guid_info = (ib_guid_info_t*)ib_smp_get_payload_ptr( p_smp );\r
1850 \r
1851                 // TODO: do we need lock on the cache ?????\r
1852 \r
1853                 \r
1854                 /* Copy the cached data. */\r
1855                 cl_memcpy( p_guid_info,\r
1856                         &p_spl_qp_svc->cache.guid_block[idx].tbl, sizeof(ib_guid_info_t) );\r
1857 \r
1858                 status = complete_local_mad( p_spl_qp_svc, p_mad_wr, p_mad_resp );\r
1859         }\r
1860 \r
1861         AL_EXIT( AL_DBG_SMI );\r
1862         return status;\r
1863 }\r
1864 \r
1865 \r
1866 static ib_api_status_t\r
1867 __process_pkey_table(\r
1868         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
1869         IN                              al_mad_wr_t* const                      p_mad_wr )\r
1870 {\r
1871 \r
1872         ib_mad_t                                *p_mad;\r
1873         ib_mad_element_t                *p_mad_resp;\r
1874         ib_smp_t                                *p_smp;\r
1875         ib_pkey_table_info_t    *p_pkey_table_info;\r
1876         uint16_t                                idx;\r
1877         ib_api_status_t         status;\r
1878 \r
1879         AL_ENTER( AL_DBG_SMI );\r
1880 \r
1881         CL_ASSERT( p_spl_qp_svc );\r
1882         CL_ASSERT( p_mad_wr );\r
1883 \r
1884         /* Initialize a pointers to the MAD work request and outbound MAD. */\r
1885         p_mad = get_mad_hdr_from_wr( p_mad_wr );\r
1886 \r
1887         /* Get the table selector from the attribute */\r
1888         idx = ((uint16_t)cl_ntoh32( p_mad->attr_mod ));\r
1889         \r
1890         /*\r
1891          * TODO : Setup the response to fail the MAD instead of sending\r
1892          * it down to the HCA.\r
1893          */\r
1894         if( idx > 2047 )\r
1895         {\r
1896                 AL_EXIT( AL_DBG_SMI );\r
1897                 return IB_NOT_DONE;\r
1898         }\r
1899 \r
1900 \r
1901         if( !p_spl_qp_svc->cache.pkey_tbl[idx].valid )\r
1902         {\r
1903                 AL_EXIT( AL_DBG_SMI );\r
1904                 return IB_NOT_DONE;\r
1905         }\r
1906 \r
1907         /*\r
1908          * If a SET, see if the set is identical to the cache,\r
1909          * in which case it's a no-op.\r
1910          */\r
1911         if( p_mad->method == IB_MAD_METHOD_SET )\r
1912         {\r
1913                 if( cl_memcmp( ib_smp_get_payload_ptr( (ib_smp_t*)p_mad ),\r
1914                         &p_spl_qp_svc->cache.pkey_tbl[idx].tbl, sizeof(ib_pkey_table_info_t) ) )\r
1915                 {\r
1916                         /* The set is requesting a change. */\r
1917                         AL_EXIT( AL_DBG_SMI );\r
1918                         return IB_NOT_DONE;\r
1919                 }\r
1920         }\r
1921         \r
1922         /* Get a MAD element from the pool for the response. */\r
1923         status = get_resp_mad( p_spl_qp_svc, p_mad_wr, &p_mad_resp );\r
1924         if( status == IB_SUCCESS )\r
1925         {\r
1926                 p_smp = (ib_smp_t*)p_mad_resp->p_mad_buf;\r
1927 \r
1928                 /* Setup the response mad. */\r
1929                 cl_memcpy( p_smp, p_mad, MAD_BLOCK_SIZE );\r
1930                 p_smp->method = (IB_MAD_METHOD_RESP_MASK | IB_MAD_METHOD_GET);\r
1931                 if( p_smp->mgmt_class == IB_MCLASS_SUBN_DIR )\r
1932                         p_smp->status = IB_SMP_DIRECTION;\r
1933                 else\r
1934                         p_smp->status = 0;\r
1935 \r
1936                 p_pkey_table_info = (ib_pkey_table_info_t*)ib_smp_get_payload_ptr( p_smp );\r
1937 \r
1938                 // TODO: do we need lock on the cache ?????\r
1939 \r
1940                 \r
1941                 /* Copy the cached data. */\r
1942                 cl_memcpy( p_pkey_table_info,\r
1943                         &p_spl_qp_svc->cache.pkey_tbl[idx].tbl, sizeof(ib_pkey_table_info_t) );\r
1944 \r
1945                 status = complete_local_mad( p_spl_qp_svc, p_mad_wr, p_mad_resp );\r
1946         }\r
1947 \r
1948         AL_EXIT( AL_DBG_SMI );\r
1949         return status;\r
1950 }\r
1951 \r
1952 \r
1953 static ib_api_status_t\r
1954 __process_slvl_table(\r
1955         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
1956         IN                              al_mad_wr_t* const                      p_mad_wr )\r
1957 {\r
1958 \r
1959 \r
1960         ib_mad_t                                *p_mad;\r
1961         ib_mad_element_t                *p_mad_resp;\r
1962         ib_smp_t                                *p_smp;\r
1963         ib_slvl_table_t                 *p_slvl_table;\r
1964         ib_api_status_t         status;\r
1965 \r
1966         AL_ENTER( AL_DBG_SMI );\r
1967 \r
1968         CL_ASSERT( p_spl_qp_svc );\r
1969         CL_ASSERT( p_mad_wr );\r
1970 \r
1971         /* Initialize a pointers to the MAD work request and outbound MAD. */\r
1972         p_mad = get_mad_hdr_from_wr( p_mad_wr );\r
1973 \r
1974         if( !p_spl_qp_svc->cache.sl_vl.valid )\r
1975         {\r
1976                 AL_EXIT( AL_DBG_SMI );\r
1977                 return IB_NOT_DONE;\r
1978         }\r
1979 \r
1980         /*\r
1981          * If a SET, see if the set is identical to the cache,\r
1982          * in which case it's a no-op.\r
1983          */\r
1984         if( p_mad->method == IB_MAD_METHOD_SET )\r
1985         {\r
1986                 if( cl_memcmp( ib_smp_get_payload_ptr( (ib_smp_t*)p_mad ),\r
1987                         &p_spl_qp_svc->cache.sl_vl.tbl, sizeof(ib_slvl_table_t) ) )\r
1988                 {\r
1989                         /* The set is requesting a change. */\r
1990                         AL_EXIT( AL_DBG_SMI );\r
1991                         return IB_NOT_DONE;\r
1992                 }\r
1993         }\r
1994         \r
1995         /* Get a MAD element from the pool for the response. */\r
1996         status = get_resp_mad( p_spl_qp_svc, p_mad_wr, &p_mad_resp );\r
1997         if( status == IB_SUCCESS )\r
1998         {\r
1999                 p_smp = (ib_smp_t*)p_mad_resp->p_mad_buf;\r
2000 \r
2001                 /* Setup the response mad. */\r
2002                 cl_memcpy( p_smp, p_mad, MAD_BLOCK_SIZE );\r
2003                 p_smp->method = (IB_MAD_METHOD_RESP_MASK | IB_MAD_METHOD_GET);\r
2004                 if( p_smp->mgmt_class == IB_MCLASS_SUBN_DIR )\r
2005                         p_smp->status = IB_SMP_DIRECTION;\r
2006                 else\r
2007                         p_smp->status = 0;\r
2008 \r
2009                 p_slvl_table = (ib_slvl_table_t*)ib_smp_get_payload_ptr( p_smp );\r
2010 \r
2011                 // TODO: do we need lock on the cache ?????\r
2012 \r
2013                 \r
2014                 /* Copy the cached data. */\r
2015                 cl_memcpy( p_slvl_table,\r
2016                         &p_spl_qp_svc->cache.sl_vl.tbl, sizeof(ib_slvl_table_t) );\r
2017 \r
2018                 status = complete_local_mad( p_spl_qp_svc, p_mad_wr, p_mad_resp );\r
2019         }\r
2020 \r
2021         AL_EXIT( AL_DBG_SMI );\r
2022         return status;\r
2023 }\r
2024 \r
2025 \r
2026 \r
2027 static ib_api_status_t\r
2028 __process_vl_arb_table(\r
2029         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
2030         IN                              al_mad_wr_t* const                      p_mad_wr )\r
2031 {\r
2032 \r
2033         ib_mad_t                                *p_mad;\r
2034         ib_mad_element_t                *p_mad_resp;\r
2035         ib_smp_t                                *p_smp;\r
2036         ib_vl_arb_table_t               *p_vl_arb_table;\r
2037         uint16_t                                idx;\r
2038         ib_api_status_t         status;\r
2039 \r
2040         AL_ENTER( AL_DBG_SMI );\r
2041 \r
2042         CL_ASSERT( p_spl_qp_svc );\r
2043         CL_ASSERT( p_mad_wr );\r
2044 \r
2045         /* Initialize a pointers to the MAD work request and outbound MAD. */\r
2046         p_mad = get_mad_hdr_from_wr( p_mad_wr );\r
2047 \r
2048         /* Get the table selector from the attribute */\r
2049         idx = ((uint16_t)(cl_ntoh32( p_mad->attr_mod ) >> 16)) - 1;\r
2050         \r
2051         /*\r
2052          * TODO : Setup the response to fail the MAD instead of sending\r
2053          * it down to the HCA.\r
2054          */\r
2055         if( idx > 3 )\r
2056         {\r
2057                 AL_EXIT( AL_DBG_SMI );\r
2058                 return IB_NOT_DONE;\r
2059         }\r
2060 \r
2061 \r
2062         if( !p_spl_qp_svc->cache.vl_arb[idx].valid )\r
2063         {\r
2064                 AL_EXIT( AL_DBG_SMI );\r
2065                 return IB_NOT_DONE;\r
2066         }\r
2067 \r
2068         /*\r
2069          * If a SET, see if the set is identical to the cache,\r
2070          * in which case it's a no-op.\r
2071          */\r
2072         if( p_mad->method == IB_MAD_METHOD_SET )\r
2073         {\r
2074                 if( cl_memcmp( ib_smp_get_payload_ptr( (ib_smp_t*)p_mad ),\r
2075                         &p_spl_qp_svc->cache.vl_arb[idx].tbl, sizeof(ib_vl_arb_table_t) ) )\r
2076                 {\r
2077                         /* The set is requesting a change. */\r
2078                         AL_EXIT( AL_DBG_SMI );\r
2079                         return IB_NOT_DONE;\r
2080                 }\r
2081         }\r
2082         \r
2083         /* Get a MAD element from the pool for the response. */\r
2084         status = get_resp_mad( p_spl_qp_svc, p_mad_wr, &p_mad_resp );\r
2085         if( status == IB_SUCCESS )\r
2086         {\r
2087                 p_smp = (ib_smp_t*)p_mad_resp->p_mad_buf;\r
2088 \r
2089                 /* Setup the response mad. */\r
2090                 cl_memcpy( p_smp, p_mad, MAD_BLOCK_SIZE );\r
2091                 p_smp->method = (IB_MAD_METHOD_RESP_MASK | IB_MAD_METHOD_GET);\r
2092                 if( p_smp->mgmt_class == IB_MCLASS_SUBN_DIR )\r
2093                         p_smp->status = IB_SMP_DIRECTION;\r
2094                 else\r
2095                         p_smp->status = 0;\r
2096 \r
2097                 p_vl_arb_table = (ib_vl_arb_table_t*)ib_smp_get_payload_ptr( p_smp );\r
2098 \r
2099                 // TODO: do we need lock on the cache ?????\r
2100 \r
2101                 \r
2102                 /* Copy the cached data. */\r
2103                 cl_memcpy( p_vl_arb_table,\r
2104                         &p_spl_qp_svc->cache.pkey_tbl[idx].tbl, sizeof(ib_vl_arb_table_t) );\r
2105 \r
2106                 status = complete_local_mad( p_spl_qp_svc, p_mad_wr, p_mad_resp );\r
2107         }\r
2108 \r
2109         AL_EXIT( AL_DBG_SMI );\r
2110         return status;\r
2111 }\r
2112 \r
2113 \r
2114 \r
2115 \r
2116 /*\r
2117  * Process subnet administration MADs using cached data if possible.\r
2118  */\r
2119 static ib_api_status_t\r
2120 __process_subn_mad(\r
2121         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
2122         IN                              al_mad_wr_t* const                      p_mad_wr )\r
2123 {\r
2124         ib_api_status_t         status;\r
2125         ib_smp_t                        *p_smp;\r
2126 \r
2127         AL_ENTER( AL_DBG_SMI );\r
2128 \r
2129         CL_ASSERT( p_spl_qp_svc );\r
2130         CL_ASSERT( p_mad_wr );\r
2131 \r
2132         p_smp = (ib_smp_t*)get_mad_hdr_from_wr( p_mad_wr );\r
2133 \r
2134         CL_ASSERT( p_smp->mgmt_class == IB_MCLASS_SUBN_DIR ||\r
2135                 p_smp->mgmt_class == IB_MCLASS_SUBN_LID );\r
2136 \r
2137         /* simple m-key check */\r
2138         if( p_spl_qp_svc->m_key && p_smp->m_key == p_spl_qp_svc->m_key )\r
2139         {\r
2140                 if(!p_spl_qp_svc->cache_en )\r
2141                 {\r
2142                         p_spl_qp_svc->cache_en = TRUE;\r
2143                         AL_EXIT( AL_DBG_SMI );\r
2144                         return IB_NOT_DONE;\r
2145                 }\r
2146         }\r
2147         else\r
2148         {\r
2149                 AL_PRINT(TRACE_LEVEL_WARNING, AL_DBG_SMI, ("Mkey check failed \n"));\r
2150                 AL_PRINT(TRACE_LEVEL_WARNING, AL_DBG_SMI, ("Mkey check SMP= 0x%08x:%08x  SVC = 0x%08x:%08x \n",\r
2151                                                                         ((uint32_t*)&p_smp->m_key)[0],((uint32_t*)&p_smp->m_key)[1],\r
2152                                                                         ((uint32_t*)&p_spl_qp_svc->m_key)[0],((uint32_t*)&p_spl_qp_svc->m_key)[1]));\r
2153 \r
2154                 p_spl_qp_svc->cache_en = FALSE;\r
2155                 AL_EXIT( AL_DBG_SMI );\r
2156                 return IB_NOT_DONE;\r
2157         }\r
2158 \r
2159         cl_spinlock_acquire(&p_spl_qp_svc->cache_lock);\r
2160         \r
2161         switch( p_smp->attr_id )\r
2162         {\r
2163         case IB_MAD_ATTR_NODE_INFO:\r
2164                 status = __process_node_info( p_spl_qp_svc, p_mad_wr );\r
2165                 break;\r
2166 \r
2167         case IB_MAD_ATTR_NODE_DESC:\r
2168                 status = __process_node_desc( p_spl_qp_svc, p_mad_wr );\r
2169                 break;\r
2170 \r
2171         case IB_MAD_ATTR_GUID_INFO:\r
2172                 status = __process_guid_info( p_spl_qp_svc, p_mad_wr );\r
2173                 break;\r
2174 \r
2175         case IB_MAD_ATTR_P_KEY_TABLE:\r
2176                 status = __process_pkey_table( p_spl_qp_svc, p_mad_wr );\r
2177                 break;\r
2178                 \r
2179         case IB_MAD_ATTR_SLVL_TABLE:\r
2180                 status = __process_slvl_table( p_spl_qp_svc, p_mad_wr );\r
2181                 break;\r
2182                 \r
2183         case IB_MAD_ATTR_VL_ARBITRATION:\r
2184                 status = __process_vl_arb_table( p_spl_qp_svc, p_mad_wr );\r
2185                 break;\r
2186                 \r
2187         default:\r
2188                 status = IB_NOT_DONE;\r
2189                 break;\r
2190         }\r
2191 \r
2192         cl_spinlock_release(&p_spl_qp_svc->cache_lock);\r
2193 \r
2194         AL_EXIT( AL_DBG_SMI );\r
2195         return status;\r
2196 }\r
2197 \r
2198 \r
2199 /*\r
2200  * Process a local MAD send work request.\r
2201  */\r
2202 static ib_api_status_t\r
2203 fwd_local_mad(\r
2204         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
2205         IN                              al_mad_wr_t* const                      p_mad_wr )\r
2206 {\r
2207         ib_mad_t*                               p_mad;\r
2208         ib_smp_t*                               p_smp;\r
2209         al_mad_send_t*                  p_mad_send;\r
2210         ib_mad_element_t*               p_mad_response = NULL;\r
2211         ib_mad_t*                               p_mad_response_buf;\r
2212         ib_api_status_t                 status = IB_SUCCESS;\r
2213         boolean_t                               smp_is_set;\r
2214 \r
2215         AL_ENTER( AL_DBG_SMI );\r
2216 \r
2217         CL_ASSERT( p_spl_qp_svc );\r
2218         CL_ASSERT( p_mad_wr );\r
2219 \r
2220         /* Initialize a pointers to the MAD work request and outbound MAD. */\r
2221         p_mad = get_mad_hdr_from_wr( p_mad_wr );\r
2222         p_smp = (ib_smp_t*)p_mad;\r
2223 \r
2224         smp_is_set = (p_smp->method == IB_MAD_METHOD_SET);\r
2225 \r
2226         /* Get a MAD element from the pool for the response. */\r
2227         p_mad_send = PARENT_STRUCT( p_mad_wr, al_mad_send_t, mad_wr );\r
2228         if( p_mad_send->p_send_mad->resp_expected )\r
2229         {\r
2230                 status = get_resp_mad( p_spl_qp_svc, p_mad_wr, &p_mad_response );\r
2231                 if( status != IB_SUCCESS )\r
2232                 {\r
2233                         __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr,\r
2234                                 IB_WCS_LOCAL_OP_ERR );\r
2235                         AL_EXIT( AL_DBG_SMI );\r
2236                         return status;\r
2237                 }\r
2238                 p_mad_response_buf = p_mad_response->p_mad_buf;\r
2239         }\r
2240         else\r
2241         {\r
2242                         p_mad_response_buf = NULL;\r
2243         }\r
2244 \r
2245         /* Adjust directed route SMPs as required by IBA. */\r
2246         if( p_mad->mgmt_class == IB_MCLASS_SUBN_DIR )\r
2247         {\r
2248                 CL_ASSERT( !ib_smp_is_response( p_smp ) );\r
2249 \r
2250                 /*\r
2251                  * If this was a self addressed, directed route SMP, increment\r
2252                  * the hop pointer in the request before delivery as required\r
2253                  * by IBA.  Otherwise, adjustment for remote requests occurs\r
2254                  * during inbound processing.\r
2255                  */\r
2256                 if( p_smp->hop_count == 0 )\r
2257                         p_smp->hop_ptr++;\r
2258         }\r
2259 \r
2260         /* Forward the locally addressed MAD to the CA interface. */\r
2261         status = al_local_mad( p_spl_qp_svc->h_qp->obj.p_ci_ca->h_ca,\r
2262                 p_spl_qp_svc->port_num, &p_mad_wr->send_wr.dgrm.ud.h_av->av_attr, p_mad, p_mad_response_buf );\r
2263 \r
2264         /* Reset directed route SMPs as required by IBA. */\r
2265         if( p_mad->mgmt_class == IB_MCLASS_SUBN_DIR )\r
2266         {\r
2267                 /*\r
2268                  * If this was a self addressed, directed route SMP, decrement\r
2269                  * the hop pointer in the response before delivery as required\r
2270                  * by IBA.  Otherwise, adjustment for remote responses occurs\r
2271                  * during outbound processing.\r
2272                  */\r
2273                 if( p_smp->hop_count == 0 )\r
2274                 {\r
2275                         /* Adjust the request SMP. */\r
2276                         p_smp->hop_ptr--;\r
2277 \r
2278                         /* Adjust the response SMP. */\r
2279                         if( p_mad_response_buf )\r
2280                         {\r
2281                                 p_smp = (ib_smp_t*)p_mad_response_buf;\r
2282                                 p_smp->hop_ptr--;\r
2283                         }\r
2284                 }\r
2285         }\r
2286 \r
2287         if( status != IB_SUCCESS )\r
2288         {\r
2289                 if( p_mad_response )\r
2290                         ib_put_mad( p_mad_response );\r
2291 \r
2292                 __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr,\r
2293                         IB_WCS_LOCAL_OP_ERR );\r
2294                 AL_EXIT( AL_DBG_SMI );\r
2295                 return status;\r
2296         }\r
2297 \r
2298         /* Check the completion status of this simulated send. */\r
2299         if( p_mad_send->p_send_mad->resp_expected )\r
2300         {\r
2301                 /*\r
2302                  * The SMI is uses PnP polling to refresh the base_lid and lmc.\r
2303                  * Polling takes time, so we update the values here to prevent\r
2304                  * the failure of LID routed MADs sent immediately following this\r
2305                  * assignment.  Check the response to see if the port info was set.\r
2306                  */\r
2307                 if( smp_is_set )\r
2308                 {\r
2309                         ib_smp_t*               p_smp_response = NULL;\r
2310 \r
2311                         switch( p_mad_response_buf->mgmt_class )\r
2312                         {\r
2313                         case IB_MCLASS_SUBN_DIR:\r
2314                                 if( ib_smp_get_status( p_smp ) == IB_SA_MAD_STATUS_SUCCESS ) \r
2315                                 {\r
2316                                         p_smp_response = p_smp;\r
2317                                         //p_port_info =\r
2318                                         //      (ib_port_info_t*)ib_smp_get_payload_ptr( p_smp );\r
2319                                 }\r
2320                                 break;\r
2321 \r
2322                         case IB_MCLASS_SUBN_LID:\r
2323                                 if( p_mad_response_buf->status == IB_SA_MAD_STATUS_SUCCESS )\r
2324                                 {\r
2325                                         p_smp_response = (ib_smp_t*)p_mad_response_buf;\r
2326                                         //p_port_info =\r
2327                                         //      (ib_port_info_t*)ib_smp_get_payload_ptr((ib_smp_t*)p_mad_response_buf);\r
2328                                 }\r
2329                                 break;\r
2330 \r
2331                         default:\r
2332                                 break;\r
2333                         }\r
2334 \r
2335                         if( p_smp_response )\r
2336                         {\r
2337                                 switch( p_smp_response->attr_id )\r
2338                                 {\r
2339                                         case IB_MAD_ATTR_PORT_INFO:\r
2340                                                 {\r
2341                                                         ib_port_info_t          *p_port_info =\r
2342                                                                 (ib_port_info_t*)ib_smp_get_payload_ptr(p_smp_response);\r
2343                                                         p_spl_qp_svc->base_lid = p_port_info->base_lid;\r
2344                                                         p_spl_qp_svc->lmc = ib_port_info_get_lmc( p_port_info );\r
2345                                                         p_spl_qp_svc->sm_lid = p_port_info->master_sm_base_lid;\r
2346                                                         p_spl_qp_svc->sm_sl = ib_port_info_get_sm_sl( p_port_info );\r
2347 \r
2348                                                         if(p_port_info->m_key)\r
2349                                                                 p_spl_qp_svc->m_key = p_port_info->m_key;\r
2350                                                         if (p_port_info->subnet_timeout & 0x80)\r
2351                                                         {\r
2352                                                                 AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_PNP,\r
2353                                                                         ("Client reregister event, setting sm_lid to 0.\n"));\r
2354                                                                 ci_ca_lock_attr(p_spl_qp_svc->obj.p_ci_ca);\r
2355                                                                 p_spl_qp_svc->obj.p_ci_ca->p_pnp_attr->\r
2356                                                                         p_port_attr->sm_lid= 0;\r
2357                                                                 ci_ca_unlock_attr(p_spl_qp_svc->obj.p_ci_ca);\r
2358                                                         }\r
2359                                                 }\r
2360                                                 break;\r
2361                                         case IB_MAD_ATTR_P_KEY_TABLE:\r
2362                                         case IB_MAD_ATTR_GUID_INFO:\r
2363                                         case IB_MAD_ATTR_SLVL_TABLE:\r
2364                                         case IB_MAD_ATTR_VL_ARBITRATION:\r
2365                                                 spl_qp_svc_update_cache( p_spl_qp_svc, p_smp_response);\r
2366                                                 break;\r
2367                                         default :\r
2368                                                 break;\r
2369                                 }\r
2370                         }\r
2371                 }\r
2372                 \r
2373 \r
2374                 /* Construct the receive MAD element. */\r
2375                 p_mad_response->status          = IB_WCS_SUCCESS;\r
2376                 p_mad_response->remote_qp       = p_mad_wr->send_wr.dgrm.ud.remote_qp;\r
2377                 p_mad_response->remote_lid      = p_spl_qp_svc->base_lid;\r
2378                 if( p_mad_wr->send_wr.send_opt & IB_RECV_OPT_IMMEDIATE )\r
2379                 {\r
2380                         p_mad_response->immediate_data = p_mad_wr->send_wr.immediate_data;\r
2381                         p_mad_response->recv_opt |= IB_RECV_OPT_IMMEDIATE;\r
2382                 }\r
2383 \r
2384                 /*\r
2385                  * Hand the receive MAD element to the dispatcher before completing\r
2386                  * the send.  This guarantees that the send request cannot time out.\r
2387                  */\r
2388                 status = mad_disp_recv_done( p_spl_qp_svc->h_mad_disp, p_mad_response );\r
2389         }\r
2390         \r
2391         __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr,IB_WCS_SUCCESS);\r
2392 \r
2393         \r
2394         \r
2395         /* If the SMP was a Get, no need to trigger a PnP poll. */\r
2396         if( status == IB_SUCCESS && !smp_is_set )\r
2397                 status = IB_NOT_DONE;\r
2398 \r
2399         AL_EXIT( AL_DBG_SMI );\r
2400         return status;\r
2401 }\r
2402 \r
2403 \r
2404 \r
2405 /*\r
2406  * Asynchronous processing thread callback to send a local MAD.\r
2407  */\r
2408 void\r
2409 send_local_mad_cb(\r
2410         IN                              cl_async_proc_item_t*           p_item )\r
2411 {\r
2412         spl_qp_svc_t*                   p_spl_qp_svc;\r
2413         ib_api_status_t                 status;\r
2414 \r
2415         AL_ENTER( AL_DBG_SMI );\r
2416 \r
2417         CL_ASSERT( p_item );\r
2418         p_spl_qp_svc = PARENT_STRUCT( p_item, spl_qp_svc_t, send_async );\r
2419 \r
2420         /* Process a local MAD send work request. */\r
2421         CL_ASSERT( p_spl_qp_svc->local_mad_wr );\r
2422         status = fwd_local_mad( p_spl_qp_svc, p_spl_qp_svc->local_mad_wr );\r
2423 \r
2424         /*\r
2425          * If we successfully processed a local MAD, which could have changed\r
2426          * something (e.g. the LID) on the HCA.  Scan for changes.\r
2427          */\r
2428         if( status == IB_SUCCESS )\r
2429                 pnp_poll();\r
2430 \r
2431         /*\r
2432          * Clear the local MAD pointer to allow processing of other MADs.\r
2433          * This is done after polling for attribute changes to ensure that\r
2434          * subsequent MADs pick up any changes performed by this one.\r
2435          */\r
2436         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
2437         p_spl_qp_svc->local_mad_wr = NULL;\r
2438         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2439 \r
2440         /* Continue processing any queued MADs on the QP. */\r
2441         special_qp_resume_sends( p_spl_qp_svc->h_qp );\r
2442 \r
2443         /* No longer in use by the asynchronous processing thread. */\r
2444         cl_atomic_dec( &p_spl_qp_svc->in_use_cnt );\r
2445 \r
2446         AL_EXIT( AL_DBG_SMI );\r
2447 }\r
2448 \r
2449 \r
2450 \r
2451 /*\r
2452  * Special QP send completion callback.\r
2453  */\r
2454 void\r
2455 spl_qp_send_comp_cb(\r
2456         IN              const   ib_cq_handle_t                          h_cq,\r
2457         IN                              void*                                           cq_context )\r
2458 {\r
2459         spl_qp_svc_t*                   p_spl_qp_svc;\r
2460 \r
2461         AL_ENTER( AL_DBG_SMI );\r
2462 \r
2463         CL_ASSERT( cq_context );\r
2464         p_spl_qp_svc = cq_context;\r
2465 \r
2466 #if defined( CL_USE_MUTEX )\r
2467 \r
2468         /* Queue an asynchronous processing item to process sends. */\r
2469         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
2470         if( !p_spl_qp_svc->send_async_queued )\r
2471         {\r
2472                 p_spl_qp_svc->send_async_queued = TRUE;\r
2473                 ref_al_obj( &p_spl_qp_svc->obj );\r
2474                 cl_async_proc_queue( gp_async_proc_mgr, &p_spl_qp_svc->send_async_cb );\r
2475         }\r
2476         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2477 \r
2478 #else\r
2479 \r
2480         /* Invoke the callback directly. */\r
2481         CL_ASSERT( h_cq == p_spl_qp_svc->h_send_cq );\r
2482         spl_qp_comp( p_spl_qp_svc, h_cq, IB_WC_SEND );\r
2483 \r
2484         /* Continue processing any queued MADs on the QP. */\r
2485         special_qp_resume_sends( p_spl_qp_svc->h_qp );\r
2486 \r
2487 #endif\r
2488 \r
2489         AL_EXIT( AL_DBG_SMI );\r
2490 }\r
2491 \r
2492 \r
2493 \r
2494 #if defined( CL_USE_MUTEX )\r
2495 void\r
2496 spl_qp_send_async_cb(\r
2497         IN                              cl_async_proc_item_t*           p_item )\r
2498 {\r
2499         spl_qp_svc_t*                   p_spl_qp_svc;\r
2500         ib_api_status_t                 status;\r
2501 \r
2502         AL_ENTER( AL_DBG_SMI );\r
2503 \r
2504         CL_ASSERT( p_item );\r
2505         p_spl_qp_svc = PARENT_STRUCT( p_item, spl_qp_svc_t, send_async_cb );\r
2506 \r
2507         /* Reset asynchronous queue flag. */\r
2508         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
2509         p_spl_qp_svc->send_async_queued = FALSE;\r
2510         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2511 \r
2512         spl_qp_comp( p_spl_qp_svc, p_spl_qp_svc->h_send_cq, IB_WC_SEND );\r
2513 \r
2514         /* Continue processing any queued MADs on the QP. */\r
2515         status = special_qp_resume_sends( p_spl_qp_svc->h_qp );\r
2516         CL_ASSERT( status == IB_SUCCESS );\r
2517 \r
2518         deref_al_obj( &p_spl_qp_svc->obj );\r
2519 \r
2520         AL_EXIT( AL_DBG_SMI );\r
2521 }\r
2522 #endif\r
2523 \r
2524 \r
2525 \r
2526 /*\r
2527  * Special QP receive completion callback.\r
2528  */\r
2529 void\r
2530 spl_qp_recv_comp_cb(\r
2531         IN              const   ib_cq_handle_t                          h_cq,\r
2532         IN                              void*                                           cq_context )\r
2533 {\r
2534         spl_qp_svc_t*                   p_spl_qp_svc;\r
2535 \r
2536         AL_ENTER( AL_DBG_SMI );\r
2537 \r
2538         CL_ASSERT( cq_context );\r
2539         p_spl_qp_svc = cq_context;\r
2540 \r
2541 #if defined( CL_USE_MUTEX )\r
2542 \r
2543         /* Queue an asynchronous processing item to process receives. */\r
2544         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
2545         if( !p_spl_qp_svc->recv_async_queued )\r
2546         {\r
2547                 p_spl_qp_svc->recv_async_queued = TRUE;\r
2548                 ref_al_obj( &p_spl_qp_svc->obj );\r
2549                 cl_async_proc_queue( gp_async_proc_mgr, &p_spl_qp_svc->recv_async_cb );\r
2550         }\r
2551         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2552 \r
2553 #else\r
2554 \r
2555         CL_ASSERT( h_cq == p_spl_qp_svc->h_recv_cq );\r
2556         spl_qp_comp( p_spl_qp_svc, h_cq, IB_WC_RECV );\r
2557 \r
2558 #endif\r
2559 \r
2560         AL_EXIT( AL_DBG_SMI );\r
2561 }\r
2562 \r
2563 \r
2564 \r
2565 #if defined( CL_USE_MUTEX )\r
2566 void\r
2567 spl_qp_recv_async_cb(\r
2568         IN                              cl_async_proc_item_t*           p_item )\r
2569 {\r
2570         spl_qp_svc_t*                   p_spl_qp_svc;\r
2571 \r
2572         AL_ENTER( AL_DBG_SMI );\r
2573 \r
2574         CL_ASSERT( p_item );\r
2575         p_spl_qp_svc = PARENT_STRUCT( p_item, spl_qp_svc_t, recv_async_cb );\r
2576 \r
2577         /* Reset asynchronous queue flag. */\r
2578         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
2579         p_spl_qp_svc->recv_async_queued = FALSE;\r
2580         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2581 \r
2582         spl_qp_comp( p_spl_qp_svc, p_spl_qp_svc->h_recv_cq, IB_WC_RECV );\r
2583 \r
2584         deref_al_obj( &p_spl_qp_svc->obj );\r
2585 \r
2586         AL_EXIT( AL_DBG_SMI );\r
2587 }\r
2588 #endif\r
2589 \r
2590 \r
2591 \r
2592 /*\r
2593  * Special QP completion handler.\r
2594  */\r
2595 void\r
2596 spl_qp_comp(\r
2597         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
2598         IN              const   ib_cq_handle_t                          h_cq,\r
2599         IN                              ib_wc_type_t                            wc_type )\r
2600 {\r
2601         ib_wc_t                                 wc;\r
2602         ib_wc_t*                                p_free_wc = &wc;\r
2603         ib_wc_t*                                p_done_wc;\r
2604         al_mad_wr_t*                    p_mad_wr;\r
2605         al_mad_element_t*               p_al_mad;\r
2606         ib_mad_element_t*               p_mad_element;\r
2607         ib_smp_t*                               p_smp;\r
2608         ib_api_status_t                 status;\r
2609 \r
2610         AL_ENTER( AL_DBG_SMI_CB );\r
2611 \r
2612         CL_ASSERT( p_spl_qp_svc );\r
2613         CL_ASSERT( h_cq );\r
2614 \r
2615         /* Check the QP state and guard against error handling. */\r
2616         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
2617         if( p_spl_qp_svc->state != SPL_QP_ACTIVE )\r
2618         {\r
2619                 cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2620                 return;\r
2621         }\r
2622         cl_atomic_inc( &p_spl_qp_svc->in_use_cnt );\r
2623         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2624 \r
2625         wc.p_next = NULL;\r
2626         /* Process work completions. */\r
2627         while( ib_poll_cq( h_cq, &p_free_wc, &p_done_wc ) == IB_SUCCESS )\r
2628         {\r
2629                 /* Process completions one at a time. */\r
2630                 CL_ASSERT( p_done_wc );\r
2631 \r
2632                 /* Flushed completions are handled elsewhere. */\r
2633                 if( wc.status == IB_WCS_WR_FLUSHED_ERR )\r
2634                 {\r
2635                         p_free_wc = &wc;\r
2636                         continue;\r
2637                 }\r
2638 \r
2639                 /*\r
2640                  * Process the work completion.  Per IBA specification, the\r
2641                  * wc.wc_type is undefined if wc.status is not IB_WCS_SUCCESS.\r
2642                  * Use the wc_type parameter.\r
2643                  */\r
2644                 switch( wc_type )\r
2645                 {\r
2646                 case IB_WC_SEND:\r
2647                         /* Get a pointer to the MAD work request. */\r
2648                         p_mad_wr = (al_mad_wr_t*)((uintn_t)wc.wr_id);\r
2649 \r
2650                         /* Remove the MAD work request from the service tracking queue. */\r
2651                         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
2652                         cl_qlist_remove_item( &p_spl_qp_svc->send_queue,\r
2653                                 &p_mad_wr->list_item );\r
2654                         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2655 \r
2656                         /* Reset directed route SMPs as required by IBA. */\r
2657                         p_smp = (ib_smp_t*)get_mad_hdr_from_wr( p_mad_wr );\r
2658                         if( p_smp->mgmt_class == IB_MCLASS_SUBN_DIR )\r
2659                         {\r
2660                                 if( ib_smp_is_response( p_smp ) )\r
2661                                         p_smp->hop_ptr++;\r
2662                                 else\r
2663                                         p_smp->hop_ptr--;\r
2664                         }\r
2665 \r
2666                         /* Report the send completion to the dispatcher. */\r
2667                         mad_disp_send_done( p_spl_qp_svc->h_mad_disp, p_mad_wr, &wc );\r
2668                         break;\r
2669 \r
2670                 case IB_WC_RECV:\r
2671 \r
2672                         /* Initialize pointers to the MAD element. */\r
2673                         p_al_mad = (al_mad_element_t*)((uintn_t)wc.wr_id);\r
2674                         p_mad_element = &p_al_mad->element;\r
2675 \r
2676                         /* Remove the AL MAD element from the service tracking list. */\r
2677                         cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
2678 \r
2679                         cl_qlist_remove_item( &p_spl_qp_svc->recv_queue,\r
2680                                 &p_al_mad->list_item );\r
2681 \r
2682                         /* Replenish the receive buffer. */\r
2683                         spl_qp_svc_post_recvs( p_spl_qp_svc );\r
2684                         cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
2685 \r
2686                         /* Construct the MAD element from the receive work completion. */\r
2687                         build_mad_recv( p_mad_element, &wc );\r
2688 \r
2689                         /* Process the received MAD. */\r
2690                         status = process_mad_recv( p_spl_qp_svc, p_mad_element );\r
2691 \r
2692                         /* Discard this MAD on error. */\r
2693                         if( status != IB_SUCCESS )\r
2694                         {\r
2695                                 status = ib_put_mad( p_mad_element );\r
2696                                 CL_ASSERT( status == IB_SUCCESS );\r
2697                         }\r
2698                         break;\r
2699 \r
2700                 default:\r
2701                         CL_ASSERT( wc_type == IB_WC_SEND || wc_type == IB_WC_RECV );\r
2702                         break;\r
2703                 }\r
2704 \r
2705                 if( wc.status != IB_WCS_SUCCESS )\r
2706                 {\r
2707                         AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
2708                                 ("special QP completion error: %s! internal syndrome 0x%I64x\n",\r
2709                                 ib_get_wc_status_str( wc.status ), wc.vendor_specific) );\r
2710 \r
2711                         /* Reset the special QP service and return. */\r
2712                         spl_qp_svc_reset( p_spl_qp_svc );\r
2713                 }\r
2714                 p_free_wc = &wc;\r
2715         }\r
2716 \r
2717         /* Rearm the CQ. */\r
2718         status = ib_rearm_cq( h_cq, FALSE );\r
2719         CL_ASSERT( status == IB_SUCCESS );\r
2720 \r
2721         cl_atomic_dec( &p_spl_qp_svc->in_use_cnt );\r
2722         AL_EXIT( AL_DBG_SMI_CB );\r
2723 }\r
2724 \r
2725 \r
2726 \r
2727 /*\r
2728  * Process a received MAD.\r
2729  */\r
2730 ib_api_status_t\r
2731 process_mad_recv(\r
2732         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
2733         IN                              ib_mad_element_t*                       p_mad_element )\r
2734 {\r
2735         ib_smp_t*                               p_smp;\r
2736         mad_route_t                             route;\r
2737         ib_api_status_t                 status;\r
2738 \r
2739         AL_ENTER( AL_DBG_SMI );\r
2740 \r
2741         CL_ASSERT( p_spl_qp_svc );\r
2742         CL_ASSERT( p_mad_element );\r
2743 \r
2744         /*\r
2745          * If the CA has a HW agent then this MAD should have been\r
2746          * consumed below verbs.  The fact that it was received here\r
2747          * indicates that it should be forwarded to the dispatcher\r
2748          * for delivery to a class manager.  Otherwise, determine how\r
2749          * the MAD should be routed.\r
2750          */\r
2751         route = ROUTE_DISPATCHER;\r
2752         if( check_local_mad( p_spl_qp_svc->h_qp ) )\r
2753         {\r
2754                 /*\r
2755                  * SMP and GMP processing is branched here to handle overlaps\r
2756                  * between class methods and attributes.\r
2757                  */\r
2758                 switch( p_mad_element->p_mad_buf->mgmt_class )\r
2759                 {\r
2760                 case IB_MCLASS_SUBN_DIR:\r
2761                         /* Perform special checks on directed route SMPs. */\r
2762                         p_smp = (ib_smp_t*)p_mad_element->p_mad_buf;\r
2763 \r
2764                         if( ( p_smp->hop_count >= IB_SUBNET_PATH_HOPS_MAX ) ||\r
2765                                 ( p_smp->hop_ptr >= IB_SUBNET_PATH_HOPS_MAX ) )\r
2766                         {\r
2767                                 route = ROUTE_DISCARD;\r
2768                         }\r
2769                         else if( ib_smp_is_response( p_smp ) )\r
2770                         {\r
2771                                 /*\r
2772                                  * This node is the destination of the response.  Discard\r
2773                                  * the source LID or hop pointer are incorrect.\r
2774                                  */\r
2775                                 if( p_smp->dr_slid == IB_LID_PERMISSIVE )\r
2776                                 {\r
2777                                         if( p_smp->hop_ptr == 1 )\r
2778                                         {\r
2779                                                 p_smp->hop_ptr--;               /* Adjust ptr per IBA spec. */\r
2780                                         }\r
2781                                         else\r
2782                                         {\r
2783                                                 route = ROUTE_DISCARD;\r
2784                                         }\r
2785                                 }\r
2786                                 else if( ( p_smp->dr_slid <  p_spl_qp_svc->base_lid ) ||\r
2787                                                  ( p_smp->dr_slid >= p_spl_qp_svc->base_lid +\r
2788                                                         ( 1 << p_spl_qp_svc->lmc ) ) )\r
2789                                 {\r
2790                                                 route = ROUTE_DISCARD;\r
2791                                 }\r
2792                         }\r
2793                         else\r
2794                         {\r
2795                                 /*\r
2796                                  * This node is the destination of the request.  Discard\r
2797                                  * the destination LID or hop pointer are incorrect.\r
2798                                  */\r
2799                                 if( p_smp->dr_dlid == IB_LID_PERMISSIVE )\r
2800                                 {\r
2801                                         if( p_smp->hop_count == p_smp->hop_ptr )\r
2802                                         {\r
2803                                                 p_smp->return_path[ p_smp->hop_ptr++ ] =\r
2804                                                         p_spl_qp_svc->port_num; /* Set path per IBA spec. */\r
2805                                         }\r
2806                                         else\r
2807                                         {\r
2808                                                 route = ROUTE_DISCARD;\r
2809                                         }\r
2810                                 }\r
2811                                 else if( ( p_smp->dr_dlid <  p_spl_qp_svc->base_lid ) ||\r
2812                                                  ( p_smp->dr_dlid >= p_spl_qp_svc->base_lid +\r
2813                                                         ( 1 << p_spl_qp_svc->lmc ) ) )\r
2814                                 {\r
2815                                         route = ROUTE_DISCARD;\r
2816                                 }\r
2817                         }\r
2818 \r
2819                         if( route == ROUTE_DISCARD ) break;\r
2820                         /* else fall through next case */\r
2821 \r
2822                 case IB_MCLASS_SUBN_LID:\r
2823                         route = route_recv_smp( p_mad_element );\r
2824                         break;\r
2825 \r
2826                 case IB_MCLASS_PERF:\r
2827                         /* Process the received GMP. */\r
2828                         switch( p_mad_element->p_mad_buf->method )\r
2829                         {\r
2830                         case IB_MAD_METHOD_GET:\r
2831                         case IB_MAD_METHOD_SET:\r
2832                                 route = ROUTE_LOCAL;\r
2833                                 break;\r
2834                         default:\r
2835                                 break;\r
2836                         }\r
2837                         break;\r
2838 \r
2839                 case IB_MCLASS_BM:\r
2840                         route = route_recv_gmp( p_mad_element );\r
2841                         break;\r
2842 \r
2843                 case IB_MCLASS_SUBN_ADM:\r
2844                 case IB_MCLASS_DEV_MGMT:\r
2845                 case IB_MCLASS_COMM_MGMT:\r
2846                 case IB_MCLASS_SNMP:\r
2847                         break;\r
2848 \r
2849                 default:\r
2850                         /* Route vendor specific MADs to the HCA provider. */\r
2851                         if( ib_class_is_vendor_specific(\r
2852                                 p_mad_element->p_mad_buf->mgmt_class ) )\r
2853                         {\r
2854                                 route = route_recv_gmp( p_mad_element );\r
2855                         }\r
2856                         break;\r
2857                 }\r
2858         }\r
2859 \r
2860         /* Route the MAD. */\r
2861         if( is_discard( route ) )\r
2862                 status = IB_ERROR;\r
2863         else if( is_dispatcher( route ) )\r
2864                 status = mad_disp_recv_done( p_spl_qp_svc->h_mad_disp, p_mad_element );\r
2865         else if( is_remote( route ) )\r
2866                 status = forward_sm_trap( p_spl_qp_svc, p_mad_element );\r
2867         else\r
2868                 status = recv_local_mad( p_spl_qp_svc, p_mad_element );\r
2869 \r
2870         AL_EXIT( AL_DBG_SMI );\r
2871         return status;\r
2872 }\r
2873 \r
2874 \r
2875 \r
2876 /*\r
2877  * Route a received SMP.\r
2878  */\r
2879 mad_route_t\r
2880 route_recv_smp(\r
2881         IN                              ib_mad_element_t*                       p_mad_element )\r
2882 {\r
2883         mad_route_t                             route;\r
2884 \r
2885         AL_ENTER( AL_DBG_SMI );\r
2886 \r
2887         CL_ASSERT( p_mad_element );\r
2888 \r
2889         /* Process the received SMP. */\r
2890         switch( p_mad_element->p_mad_buf->method )\r
2891         {\r
2892         case IB_MAD_METHOD_GET:\r
2893         case IB_MAD_METHOD_SET:\r
2894                 route = route_recv_smp_attr( p_mad_element );\r
2895                 break;\r
2896 \r
2897         case IB_MAD_METHOD_TRAP:\r
2898                 /*\r
2899                  * Special check to route locally generated traps to the remote SM.\r
2900                  * Distinguished from other receives by the p_wc->recv.ud.recv_opt\r
2901                  * IB_RECV_OPT_FORWARD flag.\r
2902                  *\r
2903                  * Note that because forwarded traps use AL MAD services, the upper\r
2904                  * 32-bits of the TID are reserved by the access layer.  When matching\r
2905                  * a Trap Repress MAD, the SMA must only use the lower 32-bits of the\r
2906                  * TID.\r
2907                  */\r
2908                 AL_PRINT(TRACE_LEVEL_INFORMATION, AL_DBG_SMI, ("Trap TID = 0x%08x:%08x \n",\r
2909                         ((uint32_t*)&p_mad_element->p_mad_buf->trans_id)[0],\r
2910                         ((uint32_t*)&p_mad_element->p_mad_buf->trans_id)[1]));\r
2911 \r
2912                 route = ( p_mad_element->recv_opt & IB_RECV_OPT_FORWARD ) ?\r
2913                         ROUTE_REMOTE : ROUTE_DISPATCHER;\r
2914                 break;\r
2915 \r
2916         case IB_MAD_METHOD_TRAP_REPRESS:\r
2917                 /*\r
2918                  * Note that because forwarded traps use AL MAD services, the upper\r
2919                  * 32-bits of the TID are reserved by the access layer.  When matching\r
2920                  * a Trap Repress MAD, the SMA must only use the lower 32-bits of the\r
2921                  * TID.\r
2922                  */\r
2923                 AL_PRINT(TRACE_LEVEL_INFORMATION, AL_DBG_SMI, ("TrapRepress TID = 0x%08x:%08x \n",\r
2924                         ((uint32_t*)&p_mad_element->p_mad_buf->trans_id)[0],\r
2925                         ((uint32_t*)&p_mad_element->p_mad_buf->trans_id)[1]));\r
2926 \r
2927                 route = ROUTE_LOCAL;\r
2928                 break;\r
2929 \r
2930         default:\r
2931                 route = ROUTE_DISPATCHER;\r
2932                 break;\r
2933         }\r
2934 \r
2935         AL_EXIT( AL_DBG_SMI );\r
2936         return route;\r
2937 }\r
2938 \r
2939 \r
2940 \r
2941 /*\r
2942  * Route received SMP attributes.\r
2943  */\r
2944 mad_route_t\r
2945 route_recv_smp_attr(\r
2946         IN                              ib_mad_element_t*                       p_mad_element )\r
2947 {\r
2948         mad_route_t                             route;\r
2949 \r
2950         AL_ENTER( AL_DBG_SMI );\r
2951 \r
2952         CL_ASSERT( p_mad_element );\r
2953 \r
2954         /* Process the received SMP attributes. */\r
2955         switch( p_mad_element->p_mad_buf->attr_id )\r
2956         {\r
2957         case IB_MAD_ATTR_NODE_DESC:\r
2958         case IB_MAD_ATTR_NODE_INFO:\r
2959         case IB_MAD_ATTR_GUID_INFO:\r
2960         case IB_MAD_ATTR_PORT_INFO:\r
2961         case IB_MAD_ATTR_P_KEY_TABLE:\r
2962         case IB_MAD_ATTR_SLVL_TABLE:\r
2963         case IB_MAD_ATTR_VL_ARBITRATION:\r
2964         case IB_MAD_ATTR_VENDOR_DIAG:\r
2965         case IB_MAD_ATTR_LED_INFO:\r
2966         case IB_MAD_ATTR_SWITCH_INFO:\r
2967                 route = ROUTE_LOCAL;\r
2968                 break;\r
2969 \r
2970         default:\r
2971                 route = ROUTE_DISPATCHER;\r
2972                 break;\r
2973         }\r
2974 \r
2975         AL_EXIT( AL_DBG_SMI );\r
2976         return route;\r
2977 }\r
2978 \r
2979 \r
2980 /*\r
2981  * Route a received GMP.\r
2982  */\r
2983 mad_route_t\r
2984 route_recv_gmp(\r
2985         IN                              ib_mad_element_t*                       p_mad_element )\r
2986 {\r
2987         mad_route_t                             route;\r
2988 \r
2989         AL_ENTER( AL_DBG_SMI );\r
2990 \r
2991         CL_ASSERT( p_mad_element );\r
2992 \r
2993         /* Process the received GMP. */\r
2994         switch( p_mad_element->p_mad_buf->method )\r
2995         {\r
2996         case IB_MAD_METHOD_GET:\r
2997         case IB_MAD_METHOD_SET:\r
2998                 /* Route vendor specific MADs to the HCA provider. */\r
2999                 if( ib_class_is_vendor_specific(\r
3000                         p_mad_element->p_mad_buf->mgmt_class ) )\r
3001                 {\r
3002                         route = ROUTE_LOCAL;\r
3003                 }\r
3004                 else\r
3005                 {\r
3006                         route = route_recv_gmp_attr( p_mad_element );\r
3007                 }\r
3008                 break;\r
3009 \r
3010         default:\r
3011                 route = ROUTE_DISPATCHER;\r
3012                 break;\r
3013         }\r
3014 \r
3015         AL_EXIT( AL_DBG_SMI );\r
3016         return route;\r
3017 }\r
3018 \r
3019 \r
3020 \r
3021 /*\r
3022  * Route received GMP attributes.\r
3023  */\r
3024 mad_route_t\r
3025 route_recv_gmp_attr(\r
3026         IN                              ib_mad_element_t*                       p_mad_element )\r
3027 {\r
3028         mad_route_t                             route;\r
3029 \r
3030         AL_ENTER( AL_DBG_SMI );\r
3031 \r
3032         CL_ASSERT( p_mad_element );\r
3033 \r
3034         /* Process the received GMP attributes. */\r
3035         if( p_mad_element->p_mad_buf->attr_id == IB_MAD_ATTR_CLASS_PORT_INFO )\r
3036                 route = ROUTE_LOCAL;\r
3037         else\r
3038                 route = ROUTE_DISPATCHER;\r
3039 \r
3040         AL_EXIT( AL_DBG_SMI );\r
3041         return route;\r
3042 }\r
3043 \r
3044 \r
3045 \r
3046 /*\r
3047  * Forward a locally generated Subnet Management trap.\r
3048  */\r
3049 ib_api_status_t\r
3050 forward_sm_trap(\r
3051         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
3052         IN                              ib_mad_element_t*                       p_mad_element )\r
3053 {\r
3054         ib_av_attr_t                    av_attr;\r
3055         ib_api_status_t                 status;\r
3056 \r
3057         AL_ENTER( AL_DBG_SMI );\r
3058 \r
3059         CL_ASSERT( p_spl_qp_svc );\r
3060         CL_ASSERT( p_mad_element );\r
3061 \r
3062         /* Check the SMP class. */\r
3063         if( p_mad_element->p_mad_buf->mgmt_class != IB_MCLASS_SUBN_LID )\r
3064         {\r
3065                 /*\r
3066                  * Per IBA Specification Release 1.1 Section 14.2.2.1,\r
3067                  * "C14-5: Only a SM shall originate a directed route SMP."\r
3068                  * Therefore all traps should be LID routed; drop this one.\r
3069                  */\r
3070                 AL_EXIT( AL_DBG_SMI );\r
3071                 return IB_ERROR;\r
3072         }\r
3073 \r
3074         if(p_spl_qp_svc->sm_lid == p_spl_qp_svc->base_lid)\r
3075                 return mad_disp_recv_done(p_spl_qp_svc->h_mad_disp,p_mad_element);\r
3076         \r
3077         /* Create an address vector for the SM. */\r
3078         cl_memclr( &av_attr, sizeof( ib_av_attr_t ) );\r
3079         av_attr.port_num = p_spl_qp_svc->port_num;\r
3080         av_attr.sl = p_spl_qp_svc->sm_sl;\r
3081         av_attr.dlid = p_spl_qp_svc->sm_lid;\r
3082         av_attr.grh_valid = FALSE;\r
3083 \r
3084         status = ib_create_av( p_spl_qp_svc->h_qp->obj.p_ci_ca->h_pd_alias,\r
3085                 &av_attr, &p_mad_element->h_av );\r
3086 \r
3087         if( status != IB_SUCCESS )\r
3088         {\r
3089                 AL_EXIT( AL_DBG_SMI );\r
3090                 return status;\r
3091         }\r
3092 \r
3093         /* Complete the initialization of the MAD element. */\r
3094         p_mad_element->p_next = NULL;\r
3095         p_mad_element->remote_qkey = IB_QP_PRIVILEGED_Q_KEY;\r
3096         p_mad_element->resp_expected = FALSE;\r
3097 \r
3098         /* Clear context1 for proper send completion callback processing. */\r
3099         p_mad_element->context1 = NULL;\r
3100 \r
3101         /*\r
3102          * Forward the trap.  Note that because forwarded traps use AL MAD\r
3103          * services, the upper 32-bits of the TID are reserved by the access\r
3104          * layer.  When matching a Trap Repress MAD, the SMA must only use\r
3105          * the lower 32-bits of the TID.\r
3106          */\r
3107         status = ib_send_mad( p_spl_qp_svc->h_mad_svc, p_mad_element, NULL );\r
3108 \r
3109         if( status != IB_SUCCESS )\r
3110                 ib_destroy_av( p_mad_element->h_av );\r
3111 \r
3112         AL_EXIT( AL_DBG_SMI );\r
3113         return status;\r
3114 }\r
3115 \r
3116 \r
3117 /*\r
3118  * Process a locally routed MAD received from the special QP.\r
3119  */\r
3120 ib_api_status_t\r
3121 recv_local_mad(\r
3122         IN                              spl_qp_svc_t*                           p_spl_qp_svc,\r
3123         IN                              ib_mad_element_t*                       p_mad_request )\r
3124 {\r
3125         ib_mad_t*                               p_mad_hdr;\r
3126         ib_api_status_t                 status;\r
3127 \r
3128         AL_ENTER( AL_DBG_SMI );\r
3129 \r
3130         CL_ASSERT( p_spl_qp_svc );\r
3131         CL_ASSERT( p_mad_request );\r
3132 \r
3133         /* Initialize the MAD element. */\r
3134         p_mad_hdr = ib_get_mad_buf( p_mad_request );\r
3135         p_mad_request->context1 = p_mad_request;\r
3136 \r
3137         /* Save the TID. */\r
3138         p_mad_request->context2 =\r
3139                 (void*)(uintn_t)al_get_al_tid( p_mad_hdr->trans_id );\r
3140 /*\r
3141  * Disable warning about passing unaligned 64-bit value.\r