6c9d1e734d291fd9465cb813ad45f8a88686eee3
[mirror/winof/.git] / core / al / al_mad.c
1 /*\r
2  * Copyright (c) 2005 SilverStorm Technologies.  All rights reserved.\r
3  * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. \r
4  *\r
5  * This software is available to you under the OpenIB.org BSD license\r
6  * below:\r
7  *\r
8  *     Redistribution and use in source and binary forms, with or\r
9  *     without modification, are permitted provided that the following\r
10  *     conditions are met:\r
11  *\r
12  *      - Redistributions of source code must retain the above\r
13  *        copyright notice, this list of conditions and the following\r
14  *        disclaimer.\r
15  *\r
16  *      - Redistributions in binary form must reproduce the above\r
17  *        copyright notice, this list of conditions and the following\r
18  *        disclaimer in the documentation and/or other materials\r
19  *        provided with the distribution.\r
20  *\r
21  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
22  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
23  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
24  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
25  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
26  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
27  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
28  * SOFTWARE.\r
29  *\r
30  * $Id$\r
31  */\r
32 \r
33 #include <iba/ib_al.h>\r
34 #include <complib/cl_byteswap.h>\r
35 #include <complib/cl_timer.h>\r
36 \r
37 #include "al.h"\r
38 #include "al_debug.h"\r
39 #include "al_cq.h"\r
40 #include "al_mad.h"\r
41 #include "al_qp.h"\r
42 #include "al_res_mgr.h"\r
43 #include "al_verbs.h"\r
44 \r
45 #include "ib_common.h"\r
46 \r
47 \r
48 #define MAX_TIME                                CL_CONST64(0xFFFFFFFFFFFFFFFF)\r
49 #define MAD_VECTOR_SIZE                 8\r
50 #define MAX_METHOD                              127\r
51 #define DEFAULT_RMPP_VERSION    1\r
52 \r
53 #define AL_RMPP_WINDOW                  16                              /* Max size of RMPP window */\r
54 #define AL_REASSEMBLY_TIMEOUT   5000                    /* 5 seconds */\r
55 \r
56 static void\r
57 __cleanup_mad_disp(\r
58         IN                              al_obj_t                                        *p_obj );\r
59 \r
60 static void\r
61 __free_mad_disp(\r
62         IN                              al_obj_t                                        *p_obj );\r
63 \r
64 static cl_status_t\r
65 __init_mad_reg(\r
66         IN                              void* const                                     p_element,\r
67         IN                              void*                                           context );\r
68 \r
69 static cl_status_t\r
70 __init_version_entry(\r
71         IN                              void* const                                     p_element,\r
72         IN                              void*                                           context );\r
73 \r
74 static void\r
75 __destroy_version_entry(\r
76         IN                              void* const                                     p_element,\r
77         IN                              void*                                           context );\r
78 \r
79 static cl_status_t\r
80 __init_class_entry(\r
81         IN                              void* const                                     p_element,\r
82         IN                              void*                                           context );\r
83 \r
84 static void\r
85 __destroy_class_entry(\r
86         IN                              void* const                                     p_element,\r
87         IN                              void*                                           context );\r
88 \r
89 static __inline uint8_t\r
90 __mgmt_class_index(\r
91         IN              const   uint8_t                                         mgmt_class );\r
92 \r
93 static __inline uint8_t\r
94 __mgmt_version_index(\r
95         IN              const   uint8_t                                         mgmt_version );\r
96 \r
97 static boolean_t\r
98 __mad_disp_reg_unsol(\r
99         IN              const   al_mad_disp_handle_t            h_mad_disp,\r
100         IN              const   al_mad_reg_handle_t                     h_mad_reg,\r
101         IN              const   ib_mad_svc_t                            *p_mad_svc );\r
102 \r
103 static boolean_t\r
104 __use_tid_routing(\r
105         IN              const   ib_mad_t* const                         p_mad_hdr,\r
106         IN              const   boolean_t                                       are_we_sender );\r
107 \r
108 /*\r
109  * Issue a send request to the MAD dispatcher.\r
110  */\r
111 static void\r
112 __mad_disp_queue_send(\r
113         IN              const   al_mad_reg_handle_t                     h_mad_reg,\r
114         IN                              al_mad_wr_t* const                      p_mad_wr );\r
115 \r
116 static inline void\r
117 __mad_disp_resume_send(\r
118         IN              const   al_mad_reg_handle_t                     h_mad_reg );\r
119 \r
120 static void\r
121 __destroying_mad_svc(\r
122         IN                              struct _al_obj                          *p_obj );\r
123 \r
124 static void\r
125 __cleanup_mad_svc(\r
126         IN                              struct _al_obj                          *p_obj );\r
127 \r
128 static void\r
129 __send_timer_cb(\r
130         IN                              void                                            *context );\r
131 \r
132 static void\r
133 __check_send_queue(\r
134         IN                              ib_mad_svc_handle_t                     h_mad_svc );\r
135 \r
136 static void\r
137 __recv_timer_cb(\r
138         IN                              void                                            *context );\r
139 \r
140 static ib_api_status_t\r
141 __init_send_mad(\r
142         IN                              ib_mad_svc_handle_t                     h_mad_svc,\r
143         IN              const   ib_mad_send_handle_t            h_send,\r
144         IN                              ib_mad_element_t* const         p_mad_element );\r
145 \r
146 static boolean_t\r
147 __does_send_req_rmpp(\r
148         IN              const   ib_mad_svc_type_t                       mad_svc_type,\r
149         IN              const   ib_mad_element_t* const         p_mad_element,\r
150                 OUT                     uint8_t                                         *p_rmpp_version );\r
151 \r
152 static void\r
153 __queue_mad_wr(\r
154         IN              const   al_mad_reg_handle_t                     h_mad_reg,\r
155         IN              const   ib_mad_send_handle_t            h_send );\r
156 \r
157 static void\r
158 __queue_rmpp_seg(\r
159         IN              const   al_mad_reg_handle_t                     h_mad_reg,\r
160         IN                              ib_mad_send_handle_t            h_send );\r
161 \r
162 static ib_api_status_t\r
163 __create_send_av(\r
164         IN                              ib_mad_svc_handle_t                     h_mad_svc,\r
165         IN                              ib_mad_send_handle_t            h_send );\r
166 \r
167 static void\r
168 __cleanup_mad_send(\r
169         IN                              ib_mad_svc_handle_t                     h_mad_svc,\r
170         IN                              ib_mad_send_handle_t            h_send );\r
171 \r
172 static __inline void\r
173 __set_retry_time(\r
174         IN                              ib_mad_send_handle_t            h_send );\r
175 \r
176 static void\r
177 __mad_svc_send_done(\r
178         IN                              ib_mad_svc_handle_t                     h_mad_svc,\r
179         IN                              al_mad_wr_t                                     *p_mad_wr,\r
180         IN                              ib_wc_t                                         *p_wc );\r
181 \r
182 static boolean_t\r
183 __is_send_mad_done(\r
184         IN                              ib_mad_send_handle_t            h_send,\r
185         IN                              ib_wc_t                                         *p_wc );\r
186 \r
187 static void\r
188 __notify_send_comp(\r
189         IN                              ib_mad_svc_handle_t                     h_mad_svc,\r
190         IN                              ib_mad_send_handle_t            h_send,\r
191         IN                              ib_wc_status_t                          wc_status );\r
192 \r
193 static void\r
194 __mad_svc_recv_done(\r
195         IN                              ib_mad_svc_handle_t                     h_mad_svc,\r
196         IN                              ib_mad_element_t                        *p_mad_element );\r
197 \r
198 static void\r
199 __process_recv_resp(\r
200         IN                              ib_mad_svc_handle_t                     h_mad_svc,\r
201         IN                              ib_mad_element_t                        *p_mad_element );\r
202 \r
203 static cl_status_t\r
204 __do_rmpp_recv(\r
205         IN                              ib_mad_svc_handle_t                     h_mad_svc,\r
206         IN      OUT                     ib_mad_element_t                        **pp_mad_element );\r
207 \r
208 static __inline boolean_t\r
209 __recv_requires_rmpp(\r
210         IN              const   ib_mad_svc_type_t                       mad_svc_type,\r
211         IN              const   ib_mad_element_t* const         p_mad_element );\r
212 \r
213 static __inline boolean_t\r
214 __is_internal_send(\r
215         IN              const   ib_mad_svc_type_t                       mad_svc_type,\r
216         IN              const   ib_mad_element_t* const         p_mad_element );\r
217 \r
218 static cl_status_t\r
219 __process_rmpp_data(\r
220         IN                              ib_mad_svc_handle_t                     h_mad_svc,\r
221         IN      OUT                     ib_mad_element_t                        **pp_mad_element );\r
222 \r
223 static void\r
224 __process_rmpp_ack(\r
225         IN                              ib_mad_svc_handle_t                     h_mad_svc,\r
226         IN                              ib_mad_element_t                        *p_mad_element );\r
227 \r
228 static void\r
229 __process_rmpp_nack(\r
230         IN                              ib_mad_svc_handle_t                     h_mad_svc,\r
231         IN                              ib_mad_element_t                        *p_mad_element );\r
232 \r
233 static cl_status_t\r
234 __process_segment(\r
235         IN                              ib_mad_svc_handle_t                     h_mad_svc,\r
236         IN                              al_mad_rmpp_t                           *p_rmpp,\r
237         IN      OUT                     ib_mad_element_t                        **pp_mad_element,\r
238                 OUT                     ib_mad_element_t                        **pp_rmpp_resp_mad );\r
239 \r
240 static al_mad_rmpp_t*\r
241 __find_rmpp(\r
242         IN                              ib_mad_svc_handle_t                     h_mad_svc,\r
243         IN      OUT                     ib_mad_element_t                        *p_mad_element );\r
244 \r
245 static al_mad_rmpp_t*\r
246 __get_mad_rmpp(\r
247         IN                              ib_mad_svc_handle_t                     h_mad_svc,\r
248         IN                              ib_mad_element_t                        *p_mad_element );\r
249 \r
250 static void\r
251 __put_mad_rmpp(\r
252         IN                              ib_mad_svc_handle_t                     h_mad_svc,\r
253         IN                              al_mad_rmpp_t                           *p_rmpp );\r
254 \r
255 static void\r
256 __init_reply_element(\r
257         IN                              ib_mad_element_t                        *p_dst_element,\r
258         IN                              ib_mad_element_t                        *p_src_element );\r
259 \r
260 static ib_mad_element_t*\r
261 __get_rmpp_ack(\r
262         IN                              al_mad_rmpp_t                           *p_rmpp );\r
263 \r
264 ib_net64_t\r
265 __get_send_tid(\r
266         IN                              ib_mad_send_handle_t            h_send )\r
267 {\r
268         return ((ib_mad_t*)ib_get_mad_buf( h_send->p_send_mad ))->trans_id;\r
269 }\r
270 \r
271 \r
272 ib_mad_t*\r
273 get_mad_hdr_from_wr(\r
274         IN                              al_mad_wr_t* const                      p_mad_wr )\r
275 {\r
276         ib_mad_send_handle_t    h_send;\r
277 \r
278         CL_ASSERT( p_mad_wr );\r
279 \r
280         h_send = PARENT_STRUCT( p_mad_wr, al_mad_send_t, mad_wr );\r
281         return h_send->p_send_mad->p_mad_buf;\r
282 }\r
283 \r
284 \r
285 \r
286 /*\r
287  * Construct a MAD element from a receive work completion.\r
288  */\r
289 void\r
290 build_mad_recv(\r
291         IN                              ib_mad_element_t*                       p_mad_element,\r
292         IN                              ib_wc_t*                                        p_wc )\r
293 {\r
294         CL_ENTER( AL_DBG_SMI, g_al_dbg_lvl );\r
295 \r
296         CL_ASSERT( p_mad_element );\r
297         CL_ASSERT( p_wc );\r
298 \r
299         /* Build the MAD element from the work completion. */\r
300         p_mad_element->status           = p_wc->status;\r
301         p_mad_element->remote_qp        = p_wc->recv.ud.remote_qp;\r
302 \r
303         /*\r
304          * We assume all communicating managers using MAD services use\r
305          * the same QKEY.\r
306          */\r
307 \r
308         /*\r
309          * Mellanox workaround:\r
310          * The Q_KEY from the QP context must be used if the high bit is\r
311          * set in the Q_KEY part of the work request. See section 10.2.5\r
312          * on Q_KEYS Compliance Statement C10-15.\r
313          * This must be enabled to permit future non special QP's to have\r
314          * MAD level service capability. To use SAR in a generic way.\r
315          */\r
316 \r
317         /*\r
318          * p_mad_element->remote_qkey = IB_QP_PRIVILEGED_Q_KEY;\r
319          */\r
320 \r
321         p_mad_element->remote_qkey      = IB_QP1_WELL_KNOWN_Q_KEY;\r
322         p_mad_element->remote_lid       = p_wc->recv.ud.remote_lid;\r
323         p_mad_element->remote_sl        = p_wc->recv.ud.remote_sl;\r
324         p_mad_element->pkey_index       = p_wc->recv.ud.pkey_index;\r
325         p_mad_element->path_bits        = p_wc->recv.ud.path_bits;\r
326         p_mad_element->recv_opt         = p_wc->recv.ud.recv_opt;\r
327         p_mad_element->grh_valid        = p_wc->recv.ud.recv_opt & IB_RECV_OPT_GRH_VALID;\r
328 \r
329         if( p_wc->recv.ud.recv_opt & IB_RECV_OPT_IMMEDIATE )\r
330                 p_mad_element->immediate_data = p_wc->recv.ud.immediate_data;\r
331 \r
332         CL_EXIT( AL_DBG_SMI, g_al_dbg_lvl );\r
333 }\r
334 \r
335 \r
336 \r
337 /*\r
338  *\r
339  * MAD Dispatcher.\r
340  *\r
341  */\r
342 \r
343 \r
344 ib_api_status_t\r
345 create_mad_disp(\r
346         IN                              al_obj_t* const                         p_parent_obj,\r
347         IN              const   ib_qp_handle_t                          h_qp,\r
348         IN                              al_mad_disp_handle_t* const     ph_mad_disp )\r
349 {\r
350         al_mad_disp_handle_t    h_mad_disp;\r
351         ib_api_status_t                 status;\r
352         cl_status_t                             cl_status;\r
353 \r
354         CL_ENTER( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
355         h_mad_disp = cl_zalloc( sizeof( al_mad_disp_t ) );\r
356         if( !h_mad_disp )\r
357         {\r
358                 CL_TRACE( AL_DBG_ERROR, g_al_dbg_lvl, ("insufficient memory\n") );\r
359                 return IB_INSUFFICIENT_MEMORY;\r
360         }\r
361 \r
362         /* Initialize the MAD dispatcher. */\r
363         cl_vector_construct( &h_mad_disp->client_vector );\r
364         cl_vector_construct( &h_mad_disp->version_vector );\r
365         construct_al_obj( &h_mad_disp->obj, AL_OBJ_TYPE_MAD_DISP );\r
366         status = init_al_obj( &h_mad_disp->obj, h_mad_disp, TRUE,\r
367                 NULL, __cleanup_mad_disp, __free_mad_disp );\r
368         if( status != IB_SUCCESS )\r
369         {\r
370                 CL_TRACE( AL_DBG_ERROR, g_al_dbg_lvl, ("init obj: %s\n",\r
371                         ib_get_err_str(status)) );\r
372                 __free_mad_disp( &h_mad_disp->obj );\r
373                 return status;\r
374         }\r
375         status = attach_al_obj( p_parent_obj, &h_mad_disp->obj );\r
376         if( status != IB_SUCCESS )\r
377         {\r
378                 h_mad_disp->obj.pfn_destroy( &h_mad_disp->obj, NULL );\r
379                 AL_TRACE_EXIT( AL_DBG_ERROR,\r
380                         ("attach_al_obj returned %s.\n", ib_get_err_str(status)) );\r
381                 return status;\r
382         }\r
383 \r
384         /* Obtain a reference to the QP to post sends to. */\r
385         h_mad_disp->h_qp = h_qp;\r
386         ref_al_obj( &h_qp->obj );\r
387 \r
388         /* Create the client vector. */\r
389         cl_status = cl_vector_init( &h_mad_disp->client_vector, 1, MAD_VECTOR_SIZE,\r
390                 sizeof( al_mad_disp_reg_t ), __init_mad_reg, NULL, h_mad_disp );\r
391         if( cl_status != CL_SUCCESS )\r
392         {\r
393                 h_mad_disp->obj.pfn_destroy( &h_mad_disp->obj, NULL );\r
394                 return ib_convert_cl_status( cl_status );\r
395         }\r
396 \r
397         /* Create the version vector. */\r
398         cl_status = cl_vector_init( &h_mad_disp->version_vector,\r
399                 1, 1, sizeof( cl_vector_t ), __init_version_entry,\r
400                 __destroy_version_entry, &h_mad_disp->version_vector );\r
401         if( cl_status != CL_SUCCESS )\r
402         {\r
403                 h_mad_disp->obj.pfn_destroy( &h_mad_disp->obj, NULL );\r
404                 return ib_convert_cl_status( cl_status );\r
405         }\r
406 \r
407         *ph_mad_disp = h_mad_disp;\r
408 \r
409         /* Release the reference taken in init_al_obj. */\r
410         deref_al_obj( &h_mad_disp->obj );\r
411 \r
412         CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
413         return IB_SUCCESS;\r
414 }\r
415 \r
416 \r
417 \r
418 static void\r
419 __cleanup_mad_disp(\r
420         IN                              al_obj_t                                        *p_obj )\r
421 {\r
422         al_mad_disp_handle_t    h_mad_disp;\r
423 \r
424         CL_ENTER( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
425         CL_ASSERT( p_obj );\r
426         h_mad_disp = PARENT_STRUCT( p_obj, al_mad_disp_t, obj );\r
427 \r
428         /* Detach from the QP that we were using. */\r
429         if( h_mad_disp->h_qp )\r
430                 deref_al_obj( &h_mad_disp->h_qp->obj );\r
431 \r
432         CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
433 }\r
434 \r
435 \r
436 \r
437 static void\r
438 __free_mad_disp(\r
439         IN                              al_obj_t                                        *p_obj )\r
440 {\r
441         al_mad_disp_handle_t    h_mad_disp;\r
442 \r
443         CL_ENTER( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
444         CL_ASSERT( p_obj );\r
445         h_mad_disp = PARENT_STRUCT( p_obj, al_mad_disp_t, obj );\r
446 \r
447         cl_vector_destroy( &h_mad_disp->client_vector );\r
448         cl_vector_destroy( &h_mad_disp->version_vector );\r
449         destroy_al_obj( p_obj );\r
450         cl_free( h_mad_disp );\r
451         CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
452 }\r
453 \r
454 \r
455 \r
456 static al_mad_reg_handle_t\r
457 __mad_disp_reg(\r
458         IN              const   al_mad_disp_handle_t            h_mad_disp,\r
459         IN              const   ib_mad_svc_handle_t                     h_mad_svc,\r
460         IN              const   ib_mad_svc_t                            *p_mad_svc,\r
461         IN              const   pfn_mad_svc_send_done_t         pfn_send_done,\r
462         IN              const   pfn_mad_svc_recv_done_t         pfn_recv_done )\r
463 {\r
464         al_mad_reg_handle_t             h_mad_reg;\r
465         size_t                                  i;\r
466         cl_status_t                             cl_status;\r
467 \r
468         CL_ENTER( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
469         cl_spinlock_acquire( &h_mad_disp->obj.lock );\r
470 \r
471         /* Find an empty slot in the client vector for the registration. */\r
472         for( i = 0; i < cl_vector_get_size( &h_mad_disp->client_vector ); i++ )\r
473         {\r
474                 h_mad_reg = cl_vector_get_ptr( &h_mad_disp->client_vector, i );\r
475                 if( !h_mad_reg->ref_cnt )\r
476                         break;\r
477         }\r
478         /* Trap for ClientID overflow. */\r
479         if( i >= 0xFFFFFFFF )\r
480         {\r
481                 cl_spinlock_release( &h_mad_disp->obj.lock );\r
482                 return NULL;\r
483         }\r
484         cl_status = cl_vector_set_min_size( &h_mad_disp->client_vector, i+1 );\r
485         if( cl_status != CL_SUCCESS )\r
486         {\r
487                 cl_spinlock_release( &h_mad_disp->obj.lock );\r
488                 return NULL;\r
489         }\r
490         h_mad_reg = cl_vector_get_ptr( &h_mad_disp->client_vector, i );\r
491 \r
492         /* Record the registration. */\r
493         h_mad_reg->client_id = (uint32_t)i;\r
494         h_mad_reg->support_unsol = p_mad_svc->support_unsol;\r
495         h_mad_reg->mgmt_class = p_mad_svc->mgmt_class;\r
496         h_mad_reg->mgmt_version = p_mad_svc->mgmt_version;\r
497         h_mad_reg->pfn_recv_done = pfn_recv_done;\r
498         h_mad_reg->pfn_send_done = pfn_send_done;\r
499 \r
500         /* If the client requires support for unsolicited MADs, add tracking. */\r
501         if( p_mad_svc->support_unsol )\r
502         {\r
503                 if( !__mad_disp_reg_unsol( h_mad_disp, h_mad_reg, p_mad_svc ) )\r
504                 {\r
505                         cl_spinlock_release( &h_mad_disp->obj.lock );\r
506                         CL_TRACE( AL_DBG_ERROR, g_al_dbg_lvl, ("reg unsol failed\n") );\r
507                         return NULL;\r
508                 }\r
509         }\r
510 \r
511         /* Record that the registration was successful. */\r
512         h_mad_reg->h_mad_svc = h_mad_svc;\r
513         h_mad_reg->ref_cnt = 1;\r
514         cl_spinlock_release( &h_mad_disp->obj.lock );\r
515 \r
516         /* The MAD service needs to take a reference on the dispatcher. */\r
517         ref_al_obj( &h_mad_disp->obj );\r
518 \r
519         CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
520         return h_mad_reg;\r
521 }\r
522 \r
523 \r
524 static cl_status_t\r
525 __init_mad_reg(\r
526         IN                              void* const                                     p_element,\r
527         IN                              void*                                           context )\r
528 {\r
529         al_mad_reg_handle_t                     h_mad_reg;\r
530 \r
531         /* Record the MAD dispatcher for the registration structure. */\r
532         h_mad_reg = p_element;\r
533         h_mad_reg->h_mad_disp = context;\r
534         h_mad_reg->ref_cnt = 0;\r
535 \r
536         return CL_SUCCESS;\r
537 }\r
538 \r
539 \r
540 /*\r
541  * Initialize an entry in the version vector.  Each entry is a vector of\r
542  * classes.\r
543  */\r
544 static cl_status_t\r
545 __init_version_entry(\r
546         IN                              void* const                                     p_element,\r
547         IN                              void*                                           context )\r
548 {\r
549         cl_vector_t             *p_vector;\r
550 \r
551         p_vector = p_element;\r
552         UNUSED_PARAM( context );\r
553 \r
554         cl_vector_construct( p_vector );\r
555         return cl_vector_init( p_vector, MAD_VECTOR_SIZE, MAD_VECTOR_SIZE,\r
556                 sizeof( cl_ptr_vector_t ), __init_class_entry, __destroy_class_entry,\r
557                 p_vector );\r
558 }\r
559 \r
560 \r
561 static void\r
562 __destroy_version_entry(\r
563         IN                              void* const                                     p_element,\r
564         IN                              void*                                           context )\r
565 {\r
566         cl_vector_t             *p_vector;\r
567 \r
568         p_vector = p_element;\r
569         UNUSED_PARAM( context );\r
570 \r
571         cl_vector_destroy( p_vector );\r
572 }\r
573 \r
574 \r
575 /*\r
576  * Initialize an entry in the class vector.  Each entry is a pointer vector\r
577  * of methods.\r
578  */\r
579 static cl_status_t\r
580 __init_class_entry(\r
581         IN                              void* const                                     p_element,\r
582         IN                              void*                                           context )\r
583 {\r
584         cl_ptr_vector_t         *p_ptr_vector;\r
585 \r
586         p_ptr_vector = p_element;\r
587         UNUSED_PARAM( context );\r
588 \r
589         cl_ptr_vector_construct( p_ptr_vector );\r
590         return cl_ptr_vector_init( p_ptr_vector,\r
591                 MAD_VECTOR_SIZE, MAD_VECTOR_SIZE );\r
592 }\r
593 \r
594 \r
595 static void\r
596 __destroy_class_entry(\r
597         IN                              void* const                                     p_element,\r
598         IN                              void*                                           context )\r
599 {\r
600         cl_ptr_vector_t         *p_ptr_vector;\r
601 \r
602         p_ptr_vector = p_element;\r
603         UNUSED_PARAM( context );\r
604 \r
605         cl_ptr_vector_destroy( p_ptr_vector );\r
606 }\r
607 \r
608 \r
609 /*\r
610  * Add support for unsolicited MADs for the given MAD service.\r
611  */\r
612 static boolean_t\r
613 __mad_disp_reg_unsol(\r
614         IN              const   al_mad_disp_handle_t            h_mad_disp,\r
615         IN              const   al_mad_reg_handle_t                     h_mad_reg,\r
616         IN              const   ib_mad_svc_t                            *p_mad_svc )\r
617 {\r
618         cl_status_t                     cl_status;\r
619         cl_vector_t                     *p_class_vector;\r
620         cl_ptr_vector_t         *p_method_ptr_vector;\r
621         uint8_t                         i;\r
622 \r
623         /* Ensure that we are ready to handle this version number. */\r
624         CL_ENTER( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
625         cl_status = cl_vector_set_min_size( &h_mad_disp->version_vector,\r
626                 __mgmt_version_index( p_mad_svc->mgmt_version ) + 1 );\r
627         if( cl_status != CL_SUCCESS )\r
628                 return FALSE;\r
629 \r
630         /* Get the list of classes in use for this version. */\r
631         p_class_vector = cl_vector_get_ptr( &h_mad_disp->version_vector,\r
632                 __mgmt_version_index( p_mad_svc->mgmt_version ) );\r
633 \r
634         /* Ensure that we are ready to handle the specified class. */\r
635         cl_status = cl_vector_set_min_size( p_class_vector,\r
636                 __mgmt_class_index( p_mad_svc->mgmt_class ) + 1 );\r
637         if( cl_status != CL_SUCCESS )\r
638                 return FALSE;\r
639 \r
640         /* Get the list of methods in use for this class. */\r
641         p_method_ptr_vector = cl_vector_get_ptr( p_class_vector,\r
642                 __mgmt_class_index( p_mad_svc->mgmt_class ) );\r
643 \r
644         /* Ensure that we can handle all requested methods. */\r
645         for( i = MAX_METHOD - 1; i > 0; i-- )\r
646         {\r
647                 if( p_mad_svc->method_array[i] )\r
648                 {\r
649                         cl_status = cl_ptr_vector_set_min_size( p_method_ptr_vector, i+1 );\r
650                         if( cl_status != CL_SUCCESS )\r
651                                 return FALSE;\r
652 \r
653                         /* No one else can be registered for this method. */\r
654                         if( cl_ptr_vector_get( p_method_ptr_vector, i ) )\r
655                         {\r
656                                 CL_TRACE(AL_DBG_ERROR, g_al_dbg_lvl, \r
657                                         ("Other client already registered for Un-Solicited Method "\r
658                                         "%u for version %u of class %u.\n", i, p_mad_svc->mgmt_version,\r
659                                         p_mad_svc->mgmt_class ) );\r
660                                 return FALSE;\r
661                         }\r
662                 }\r
663         }\r
664 \r
665         /* We can support the request.  Record the methods. */\r
666         for( i = 0; i < MAX_METHOD; i++ )\r
667         {\r
668                 if( p_mad_svc->method_array[i] )\r
669                 {\r
670                         cl_ptr_vector_set( p_method_ptr_vector, i, h_mad_reg );\r
671 \r
672                         CL_TRACE( AL_DBG_MAD_SVC, g_al_dbg_lvl,\r
673                                 ("Register version:%u (%u) class:0x%02X(%u) method:0x%02X Hdl:%p\n",\r
674                                 p_mad_svc->mgmt_version,\r
675                                 __mgmt_version_index( p_mad_svc->mgmt_version ),\r
676                                 p_mad_svc->mgmt_class,\r
677                                 __mgmt_class_index( p_mad_svc->mgmt_class ),\r
678                                 i,\r
679                                 h_mad_reg) );\r
680                 }\r
681         }\r
682 \r
683         CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
684         return TRUE;\r
685 }\r
686 \r
687 \r
688 static __inline uint8_t\r
689 __mgmt_version_index(\r
690         IN              const   uint8_t                                         mgmt_version )\r
691 {\r
692         return (uint8_t)(mgmt_version - 1);\r
693 }\r
694 \r
695 \r
696 static __inline uint8_t\r
697 __mgmt_class_index(\r
698         IN              const   uint8_t                                         mgmt_class )\r
699 {\r
700         /* Map class 0x81 to 0 to remove empty class values. */\r
701         if( mgmt_class == IB_MCLASS_SUBN_DIR )\r
702                 return IB_MCLASS_SUBN_LID;\r
703         else\r
704                 return mgmt_class;\r
705 }\r
706 \r
707 \r
708 \r
709 /*\r
710  * Deregister a MAD service from the dispatcher.\r
711  */\r
712 static void\r
713 __mad_disp_dereg(\r
714         IN              const   al_mad_reg_handle_t                     h_mad_reg )\r
715 {\r
716         al_mad_disp_handle_t    h_mad_disp;\r
717         cl_vector_t                             *p_class_vector;\r
718         cl_ptr_vector_t                 *p_method_ptr_vector;\r
719         size_t                                  i;\r
720 \r
721         CL_ENTER( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
722         h_mad_disp = h_mad_reg->h_mad_disp;\r
723 \r
724         cl_spinlock_acquire( &h_mad_disp->obj.lock );\r
725 \r
726         if( h_mad_reg->support_unsol )\r
727         {\r
728                 /* Deregister the service from receiving unsolicited MADs. */\r
729                 p_class_vector = cl_vector_get_ptr( &h_mad_disp->version_vector,\r
730                         __mgmt_version_index( h_mad_reg->mgmt_version ) );\r
731 \r
732                 p_method_ptr_vector = cl_vector_get_ptr( p_class_vector,\r
733                         __mgmt_class_index( h_mad_reg->mgmt_class ) );\r
734 \r
735                 /* Deregister all methods registered to the client. */\r
736                 for( i = 0; i < cl_ptr_vector_get_size( p_method_ptr_vector ); i++ )\r
737                 {\r
738                         if( cl_ptr_vector_get( p_method_ptr_vector, i ) == h_mad_reg )\r
739                         {\r
740                                 cl_ptr_vector_set( p_method_ptr_vector, i, NULL );\r
741                         }\r
742                 }\r
743         }\r
744 \r
745         cl_spinlock_release( &h_mad_disp->obj.lock );\r
746 \r
747         /* Decrement the reference count in the registration table. */\r
748         cl_atomic_dec( &h_mad_reg->ref_cnt );\r
749 \r
750         /* The MAD service no longer requires access to the MAD dispatcher. */\r
751         deref_al_obj( &h_mad_disp->obj );\r
752         CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
753 }\r
754 \r
755 \r
756 \r
757 static void\r
758 __mad_disp_queue_send(\r
759         IN              const   al_mad_reg_handle_t                     h_mad_reg,\r
760         IN                              al_mad_wr_t* const                      p_mad_wr )\r
761 {\r
762         ib_mad_t                                *p_mad_hdr;\r
763 \r
764         /*\r
765          * Increment the reference count on the registration to ensure that\r
766          * the MAD service does not go away until the send completes.\r
767          */\r
768         CL_ENTER( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
769         cl_atomic_inc( &h_mad_reg->ref_cnt );\r
770         ref_al_obj( &h_mad_reg->h_mad_svc->obj );\r
771 \r
772         /* Get the MAD header. */\r
773         p_mad_hdr = get_mad_hdr_from_wr( p_mad_wr );\r
774         CL_ASSERT( !p_mad_wr->send_wr.wr_id );\r
775         p_mad_wr->send_wr.wr_id = (uintn_t)p_mad_wr;\r
776 \r
777         /*\r
778          * If we are the originator of the transaction, we need to modify the\r
779          * TID to ensure that duplicate TIDs are not used by multiple clients.\r
780          */\r
781         CL_TRACE( AL_DBG_MAD_SVC, g_al_dbg_lvl, ("dispatching TID: 0x%0"PRIx64"\n",\r
782                 p_mad_hdr->trans_id) );\r
783         p_mad_wr->client_tid = p_mad_hdr->trans_id;\r
784         if( __use_tid_routing( p_mad_hdr, TRUE ) )\r
785         {\r
786                 /* Clear the AL portion of the TID before setting. */\r
787                 ((al_tid_t*)&p_mad_hdr->trans_id)->tid32.al_tid = 0;\r
788 \r
789 #pragma warning( push, 3 )\r
790                 al_set_al_tid( &p_mad_hdr->trans_id, h_mad_reg->client_id );\r
791 #pragma warning( pop )\r
792 \r
793                 CL_TRACE( AL_DBG_MAD_SVC, g_al_dbg_lvl,\r
794                         ("modified TID to: 0x%0"PRIx64"\n", p_mad_hdr->trans_id) );\r
795         }\r
796 \r
797         /* Post the work request to the QP. */\r
798         p_mad_wr->client_id = h_mad_reg->client_id;\r
799         h_mad_reg->h_mad_disp->h_qp->pfn_queue_mad(\r
800                 h_mad_reg->h_mad_disp->h_qp, p_mad_wr );\r
801 \r
802         CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
803 }\r
804 \r
805 \r
806 static inline void\r
807 __mad_disp_resume_send(\r
808         IN              const   al_mad_reg_handle_t                     h_mad_reg )\r
809 {\r
810         AL_ENTER( AL_DBG_MAD_SVC );\r
811 \r
812         h_mad_reg->h_mad_disp->h_qp->pfn_resume_mad(\r
813                 h_mad_reg->h_mad_disp->h_qp );\r
814 \r
815         AL_EXIT( AL_DBG_MAD_SVC );\r
816 }\r
817 \r
818 \r
819 /*\r
820  * Complete a sent MAD.  Route the completion to the correct MAD service.\r
821  */\r
822 void\r
823 mad_disp_send_done(\r
824         IN                              al_mad_disp_handle_t            h_mad_disp,\r
825         IN                              al_mad_wr_t                                     *p_mad_wr,\r
826         IN                              ib_wc_t                                         *p_wc )\r
827 {\r
828         al_mad_reg_handle_t             h_mad_reg;\r
829         ib_mad_t                                *p_mad_hdr;\r
830 \r
831         CL_ENTER( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
832 \r
833         CL_TRACE( AL_DBG_MAD_SVC, g_al_dbg_lvl, ("p_mad_wr 0x%p\n", p_mad_wr ) );\r
834 \r
835         /* Get the MAD header. */\r
836         p_mad_hdr = get_mad_hdr_from_wr( p_mad_wr );\r
837 \r
838         /* Get the MAD service that issued the send. */\r
839         cl_spinlock_acquire( &h_mad_disp->obj.lock );\r
840         h_mad_reg = cl_vector_get_ptr( &h_mad_disp->client_vector,\r
841                 p_mad_wr->client_id );\r
842         cl_spinlock_release( &h_mad_disp->obj.lock );\r
843         CL_ASSERT( h_mad_reg && (h_mad_reg->client_id == p_mad_wr->client_id) );\r
844 \r
845         /* Reset the TID and WR ID. */\r
846         CL_TRACE( AL_DBG_MAD_SVC, g_al_dbg_lvl, ("send done TID: 0x%"PRIx64"\n",\r
847                 p_mad_hdr->trans_id) );\r
848         p_mad_hdr->trans_id = p_mad_wr->client_tid;\r
849         p_mad_wr->send_wr.wr_id = 0;\r
850 \r
851         /* Return the completed request to the MAD service. */\r
852         CL_ASSERT( h_mad_reg->h_mad_svc );\r
853         h_mad_reg->pfn_send_done( h_mad_reg->h_mad_svc, p_mad_wr, p_wc );\r
854 \r
855         /* The MAD service is no longer referenced once the send completes. */\r
856         deref_al_obj( &h_mad_reg->h_mad_svc->obj );\r
857         cl_atomic_dec( &h_mad_reg->ref_cnt );\r
858 \r
859         CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
860 }\r
861 \r
862 \r
863 \r
864 /*\r
865  * Process a received MAD.  Route the completion to the correct MAD service.\r
866  */\r
867 ib_api_status_t\r
868 mad_disp_recv_done(\r
869         IN                              al_mad_disp_handle_t            h_mad_disp,\r
870         IN                              ib_mad_element_t                        *p_mad_element )\r
871 {\r
872         ib_mad_t                                *p_mad_hdr;\r
873         al_mad_reg_handle_t             h_mad_reg;\r
874         ib_al_handle_t                  h_al;\r
875         ib_mad_svc_handle_t             h_mad_svc;\r
876 \r
877         cl_vector_t                             *p_class_vector;\r
878         cl_ptr_vector_t                 *p_method_ptr_vector;\r
879         uint8_t                                 method;\r
880 \r
881         CL_ENTER( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
882         p_mad_hdr = ib_get_mad_buf( p_mad_element );\r
883 \r
884         CL_TRACE( AL_DBG_MAD_SVC, g_al_dbg_lvl,\r
885                 ("TID = 0x%"PRIx64 "\n"\r
886                  "class = 0x%x.\n"\r
887                  "version = 0x%x.\n"\r
888                  "method = 0x%x.\n",\r
889                 p_mad_hdr->trans_id,\r
890                 p_mad_hdr->mgmt_class,\r
891                 p_mad_hdr->class_ver,\r
892                 p_mad_hdr->method) );\r
893 \r
894         /* Get the client to route the receive to. */\r
895         cl_spinlock_acquire( &h_mad_disp->obj.lock );\r
896         if( __use_tid_routing( p_mad_hdr, FALSE ) )\r
897         {\r
898                 /* The MAD was received in response to a send. */\r
899                 CL_TRACE( AL_DBG_MAD_SVC, g_al_dbg_lvl, ("routing based on TID\n"));\r
900 \r
901                 /* Verify that we have a registration entry. */\r
902                 if( al_get_al_tid( p_mad_hdr->trans_id ) >=\r
903                         cl_vector_get_size( &h_mad_disp->client_vector ) )\r
904                 {\r
905                         /* No clients for this version-class-method. */\r
906                         cl_spinlock_release( &h_mad_disp->obj.lock );\r
907                         CL_TRACE_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl,\r
908                                 ("invalid client ID\n") );\r
909                         return IB_NOT_FOUND;\r
910                 }\r
911 \r
912                 h_mad_reg = cl_vector_get_ptr( &h_mad_disp->client_vector,\r
913                         al_get_al_tid( p_mad_hdr->trans_id ) );\r
914 \r
915 /*\r
916  * Disable warning about passing unaligned 64-bit value.\r
917  * The value is always aligned given how buffers are allocated\r
918  * and given the layout of a MAD.\r
919  */\r
920 #pragma warning( push, 3 )\r
921                 al_set_al_tid( &p_mad_hdr->trans_id, 0 );\r
922 #pragma warning( pop )\r
923         }\r
924         else\r
925         {\r
926                 CL_TRACE( AL_DBG_MAD_SVC, g_al_dbg_lvl,\r
927                         ("routing based on version, class, method\n"));\r
928 \r
929                 /* The receive is unsolicited.  Find the client. */\r
930                 if( __mgmt_version_index( p_mad_hdr->class_ver ) >=\r
931                         cl_vector_get_size( &h_mad_disp->version_vector ) )\r
932                 {\r
933                         /* No clients for this version of MADs. */\r
934                         cl_spinlock_release( &h_mad_disp->obj.lock );\r
935                         CL_TRACE_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl,\r
936                                 ("no clients registered for this class version\n") );\r
937                         return IB_NOT_FOUND;\r
938                 }\r
939 \r
940                 /* See if we have a client for this class of MADs. */\r
941                 p_class_vector = cl_vector_get_ptr( &h_mad_disp->version_vector,\r
942                         __mgmt_version_index( p_mad_hdr->class_ver ) );\r
943 \r
944                 if( __mgmt_class_index( p_mad_hdr->mgmt_class ) >=\r
945                         cl_vector_get_size( p_class_vector ) )\r
946                 {\r
947                         /* No clients for this version-class. */\r
948                         cl_spinlock_release( &h_mad_disp->obj.lock );\r
949                         CL_TRACE_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl,\r
950                                 ("no clients registered for this class\n") );\r
951                         return IB_NOT_FOUND;\r
952                 }\r
953 \r
954                 /* See if we have a client for this method. */\r
955                 p_method_ptr_vector = cl_vector_get_ptr( p_class_vector,\r
956                         __mgmt_class_index( p_mad_hdr->mgmt_class ) );\r
957                 method = (uint8_t)(p_mad_hdr->method & (~IB_MAD_METHOD_RESP_MASK));\r
958 \r
959                 if( method >= cl_ptr_vector_get_size( p_method_ptr_vector ) )\r
960                 {\r
961                         /* No clients for this version-class-method. */\r
962                         cl_spinlock_release( &h_mad_disp->obj.lock );\r
963                         CL_TRACE_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl,\r
964                                 ("no clients registered for this method-out of range\n") );\r
965                         return IB_NOT_FOUND;\r
966                 }\r
967 \r
968                 h_mad_reg = cl_ptr_vector_get( p_method_ptr_vector, method );\r
969                 if( !h_mad_reg )\r
970                 {\r
971                         /* No clients for this version-class-method. */\r
972                         cl_spinlock_release( &h_mad_disp->obj.lock );\r
973                         CL_TRACE_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl,\r
974                                 ("no clients registered for method %u of class %u(%u) version %u(%u)\n",\r
975                                  method,\r
976                                  p_mad_hdr->mgmt_class,\r
977                                  __mgmt_class_index( p_mad_hdr->mgmt_class ),\r
978                                  p_mad_hdr->class_ver,\r
979                                  __mgmt_version_index( p_mad_hdr->class_ver )\r
980                                  ) );\r
981                         return IB_NOT_FOUND;\r
982                 }\r
983         }\r
984 \r
985         /* Verify that the registration is still valid. */\r
986         if( !h_mad_reg->ref_cnt )\r
987         {\r
988                 cl_spinlock_release( &h_mad_disp->obj.lock );\r
989                 CL_TRACE_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl,\r
990                         ("no client registered\n") );\r
991                 return IB_NOT_FOUND;\r
992         }\r
993 \r
994         /* Take a reference on the MAD service in case it deregisters. */\r
995         h_mad_svc = h_mad_reg->h_mad_svc;\r
996         ref_al_obj( &h_mad_svc->obj );\r
997         cl_spinlock_release( &h_mad_disp->obj.lock );\r
998 \r
999         /* Handoff the MAD to the correct AL instance. */\r
1000         h_al = qp_get_al( (ib_qp_handle_t)(h_mad_svc->obj.p_parent_obj) );\r
1001         al_handoff_mad( h_al, p_mad_element );\r
1002 \r
1003         h_mad_reg->pfn_recv_done( h_mad_svc, p_mad_element );\r
1004         deref_al_obj( &h_mad_svc->obj );\r
1005         CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
1006         return IB_SUCCESS;\r
1007 }\r
1008 \r
1009 \r
1010 \r
1011 /*\r
1012  * Return TRUE if we should route the MAD to the recipient based on the TID.\r
1013  */\r
1014 static boolean_t\r
1015 __use_tid_routing(\r
1016         IN              const   ib_mad_t* const                         p_mad_hdr,\r
1017         IN              const   boolean_t                                       are_we_sender )\r
1018 {\r
1019         ib_rmpp_mad_t           *p_rmpp_mad;\r
1020         boolean_t                       is_orig;\r
1021 \r
1022         CL_ENTER( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
1023 \r
1024         /* CM MADs are never TID routed. */\r
1025         if( p_mad_hdr->mgmt_class == IB_MCLASS_COMM_MGMT )\r
1026         {\r
1027                 CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
1028                 return FALSE;\r
1029         }\r
1030 \r
1031         /*\r
1032          * Determine originator for a sent MAD.  Received MADs are just the\r
1033          * opposite.\r
1034          */\r
1035 \r
1036         /* Non-DATA RMPP MADs are handled differently. */\r
1037         p_rmpp_mad = (ib_rmpp_mad_t*)p_mad_hdr;\r
1038         if( (p_mad_hdr->mgmt_class == IB_MCLASS_SUBN_ADM) &&\r
1039                 ( ib_rmpp_is_flag_set( p_rmpp_mad, IB_RMPP_FLAG_ACTIVE ) &&\r
1040                 (p_rmpp_mad->rmpp_type != IB_RMPP_TYPE_DATA) ) )\r
1041         {\r
1042                 /*\r
1043                  * We need to distinguish between ACKs sent after receiving\r
1044                  * a request, versus ACKs sent after receiving a response.  ACKs\r
1045                  * to a request are from the responder.  ACKs to a response are\r
1046                  * from the originator.\r
1047 \r
1048                  * Note that we assume STOP and ABORT packets are initiated by\r
1049                  * receivers.  If both senders and receivers can\r
1050                  * initiate STOP and ABORT MADs, then we can't distinguish which\r
1051                  * transaction is associated with the MAD.  The TID for a\r
1052                  * send and receive can be the same.\r
1053                  */\r
1054                 is_orig = !ib_mad_is_response( p_mad_hdr );\r
1055         }\r
1056         else\r
1057         {\r
1058                 /*\r
1059                  * See if the MAD is being sent in response to a previous MAD.  If\r
1060                  * it is, then we're NOT the originator.  Note that trap repress\r
1061                  * MADs are responses, even though the response bit isn't set.\r
1062                  */\r
1063                 is_orig = !( ib_mad_is_response( p_mad_hdr ) ||\r
1064                         (p_mad_hdr->method == IB_MAD_METHOD_TRAP_REPRESS) );\r
1065         }\r
1066 \r
1067         /* If we're the receiver, toggle the result. */\r
1068         if( !are_we_sender )\r
1069                 is_orig = !is_orig;\r
1070 \r
1071         CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
1072         return is_orig;\r
1073 }\r
1074 \r
1075 \r
1076 \r
1077 /*\r
1078  *\r
1079  * MAD Service.\r
1080  *\r
1081  */\r
1082 \r
1083 \r
1084 \r
1085 /*\r
1086  * Create and initialize a MAD service for use.\r
1087  */\r
1088 ib_api_status_t\r
1089 reg_mad_svc(\r
1090         IN              const   ib_qp_handle_t                          h_qp,\r
1091         IN              const   ib_mad_svc_t* const                     p_mad_svc,\r
1092                 OUT                     ib_mad_svc_handle_t* const      ph_mad_svc )\r
1093 {\r
1094         ib_api_status_t         status;\r
1095         cl_status_t                     cl_status;\r
1096         ib_mad_svc_handle_t     h_mad_svc;\r
1097         al_qp_alias_t           *p_qp_alias;\r
1098         ib_qp_attr_t            qp_attr;\r
1099 \r
1100         CL_ENTER( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
1101         CL_ASSERT( h_qp );\r
1102 \r
1103         switch( h_qp->type )\r
1104         {\r
1105         case IB_QPT_QP0:\r
1106         case IB_QPT_QP1:\r
1107         case IB_QPT_QP0_ALIAS:\r
1108         case IB_QPT_QP1_ALIAS:\r
1109         case IB_QPT_MAD:\r
1110                 break;\r
1111 \r
1112         default:\r
1113                 CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl, ("IB_INVALID_PARAMETER\n") );\r
1114                 return IB_INVALID_PARAMETER;\r
1115         }\r
1116 \r
1117         if( !p_mad_svc || !ph_mad_svc )\r
1118         {\r
1119                 CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl, ("IB_INVALID_PARAMETER\n") );\r
1120                 return IB_INVALID_PARAMETER;\r
1121         }\r
1122 \r
1123         h_mad_svc = cl_zalloc( sizeof( al_mad_svc_t) );\r
1124         if( !h_mad_svc )\r
1125         {\r
1126                 return IB_INSUFFICIENT_MEMORY;\r
1127         }\r
1128 \r
1129         /* Construct the MAD service. */\r
1130         construct_al_obj( &h_mad_svc->obj, AL_OBJ_TYPE_H_MAD_SVC );\r
1131         cl_timer_construct( &h_mad_svc->send_timer );\r
1132         cl_timer_construct( &h_mad_svc->recv_timer );\r
1133         cl_qlist_init( &h_mad_svc->send_list );\r
1134         cl_qlist_init( &h_mad_svc->recv_list );\r
1135 \r
1136         p_qp_alias = PARENT_STRUCT( h_qp, al_qp_alias_t, qp );\r
1137         h_mad_svc->svc_type = p_mad_svc->svc_type;\r
1138         h_mad_svc->obj.context = p_mad_svc->mad_svc_context;\r
1139         h_mad_svc->pfn_user_recv_cb = p_mad_svc->pfn_mad_recv_cb;\r
1140         h_mad_svc->pfn_user_send_cb = p_mad_svc->pfn_mad_send_cb;\r
1141 \r
1142         /* Initialize the MAD service. */\r
1143         status = init_al_obj( &h_mad_svc->obj, p_mad_svc->mad_svc_context,\r
1144                 TRUE, __destroying_mad_svc, __cleanup_mad_svc, free_mad_svc );\r
1145         if( status != IB_SUCCESS )\r
1146         {\r
1147                 free_mad_svc( &h_mad_svc->obj );\r
1148                 return status;\r
1149         }\r
1150         status = attach_al_obj( &h_qp->obj, &h_mad_svc->obj );\r
1151         if( status != IB_SUCCESS )\r
1152         {\r
1153                 h_mad_svc->obj.pfn_destroy( &h_mad_svc->obj, NULL );\r
1154                 AL_TRACE_EXIT( AL_DBG_ERROR,\r
1155                         ("attach_al_obj returned %s.\n", ib_get_err_str(status)) );\r
1156                 return status;\r
1157         }\r
1158 \r
1159         h_mad_svc->h_mad_reg = __mad_disp_reg( p_qp_alias->h_mad_disp,\r
1160                 h_mad_svc, p_mad_svc, __mad_svc_send_done, __mad_svc_recv_done );\r
1161         if( !h_mad_svc->h_mad_reg )\r
1162         {\r
1163                 h_mad_svc->obj.pfn_destroy( &h_mad_svc->obj, NULL );\r
1164                 return IB_INSUFFICIENT_MEMORY;\r
1165         }\r
1166 \r
1167         /* Record which port this MAD service uses, to use when creating AVs. */\r
1168         status = ib_query_qp( h_qp, &qp_attr );\r
1169         if( status != IB_SUCCESS )\r
1170         {\r
1171                 h_mad_svc->obj.pfn_destroy( &h_mad_svc->obj, NULL );\r
1172                 return status;\r
1173         }\r
1174         h_mad_svc->h_pd = qp_attr.h_pd;\r
1175         h_mad_svc->port_num = qp_attr.primary_port;\r
1176 \r
1177         cl_status = cl_timer_init( &h_mad_svc->send_timer,\r
1178                 __send_timer_cb, h_mad_svc );\r
1179         if( cl_status != CL_SUCCESS )\r
1180         {\r
1181                 h_mad_svc->obj.pfn_destroy( &h_mad_svc->obj, NULL );\r
1182                 return ib_convert_cl_status( cl_status );\r
1183         }\r
1184 \r
1185         cl_status = cl_timer_init( &h_mad_svc->recv_timer,\r
1186                 __recv_timer_cb, h_mad_svc );\r
1187         if( cl_status != CL_SUCCESS )\r
1188         {\r
1189                 h_mad_svc->obj.pfn_destroy( &h_mad_svc->obj, NULL );\r
1190                 return ib_convert_cl_status( cl_status );\r
1191         }\r
1192 \r
1193         *ph_mad_svc = h_mad_svc;\r
1194 \r
1195         CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
1196         return IB_SUCCESS;\r
1197 }\r
1198 \r
1199 \r
1200 \r
1201 static void\r
1202 __destroying_mad_svc(\r
1203         IN                              struct _al_obj                          *p_obj )\r
1204 {\r
1205         ib_qp_handle_t                  h_qp;\r
1206         ib_mad_svc_handle_t             h_mad_svc;\r
1207         ib_mad_send_handle_t    h_send;\r
1208         cl_list_item_t                  *p_list_item;\r
1209         int32_t                                 timeout_ms;\r
1210 #ifdef CL_KERNEL\r
1211         KIRQL                                   old_irql;\r
1212 #endif\r
1213 \r
1214         CL_ENTER( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
1215         CL_ASSERT( p_obj );\r
1216         h_mad_svc = PARENT_STRUCT( p_obj, al_mad_svc_t, obj );\r
1217 \r
1218         /* Deregister the MAD service. */\r
1219         h_qp = (ib_qp_handle_t)p_obj->p_parent_obj;\r
1220         if( h_qp->pfn_dereg_mad_svc )\r
1221                 h_qp->pfn_dereg_mad_svc( h_mad_svc );\r
1222 \r
1223         /* Wait here until the MAD service is no longer in use. */\r
1224         timeout_ms = (int32_t)h_mad_svc->obj.timeout_ms;\r
1225         while( h_mad_svc->ref_cnt && timeout_ms > 0 )\r
1226         {\r
1227                 /* Use a timeout to avoid waiting forever - just in case. */\r
1228                 cl_thread_suspend( 10 );\r
1229                 timeout_ms -= 10;\r
1230         }\r
1231 \r
1232         /*\r
1233          * Deregister from the MAD dispatcher.  The MAD dispatcher holds\r
1234          * a reference on the MAD service when invoking callbacks.  Since we\r
1235          * issue sends, we know how many callbacks are expected for send\r
1236          * completions.  With receive completions, we need to wait until all\r
1237          * receive callbacks have completed before cleaning up receives.\r
1238          */\r
1239         if( h_mad_svc->h_mad_reg )\r
1240                 __mad_disp_dereg( h_mad_svc->h_mad_reg );\r
1241 \r
1242         /* Cancel all outstanding send requests. */\r
1243         cl_spinlock_acquire( &h_mad_svc->obj.lock );\r
1244         for( p_list_item = cl_qlist_head( &h_mad_svc->send_list );\r
1245                  p_list_item != cl_qlist_end( &h_mad_svc->send_list );\r
1246                  p_list_item = cl_qlist_next( p_list_item ) )\r
1247         {\r
1248                 CL_TRACE( AL_DBG_MAD_SVC, g_al_dbg_lvl, ("canceling MAD\n") );\r
1249                 h_send = PARENT_STRUCT( p_list_item, al_mad_send_t, pool_item );\r
1250                 h_send->canceled = TRUE;\r
1251         }\r
1252         cl_spinlock_release( &h_mad_svc->obj.lock );\r
1253 \r
1254         /*\r
1255          * Invoke the timer callback to return the canceled MADs to the user.\r
1256          * Since the MAD service is being destroyed, the user cannot be issuing\r
1257          * sends.\r
1258          */\r
1259 #ifdef CL_KERNEL\r
1260         old_irql = KeRaiseIrqlToDpcLevel();\r
1261 #endif\r
1262         __check_send_queue( h_mad_svc );\r
1263 #ifdef CL_KERNEL\r
1264         KeLowerIrql( old_irql );\r
1265 #endif\r
1266 \r
1267         cl_timer_destroy( &h_mad_svc->send_timer );\r
1268 \r
1269 #ifdef CL_KERNEL\r
1270         /*\r
1271          * Reclaim any pending receives sent to the proxy for UAL.\r
1272          */\r
1273         if( h_mad_svc->obj.h_al->p_context )\r
1274         {\r
1275                 cl_qlist_t                                      *p_cblist;\r
1276                 al_proxy_cb_info_t                      *p_cb_info;\r
1277 \r
1278                 cl_spinlock_acquire( &h_mad_svc->obj.h_al->p_context->cb_lock );\r
1279                 p_cblist = &h_mad_svc->obj.h_al->p_context->misc_cb_list;\r
1280                 p_list_item = cl_qlist_head( p_cblist );\r
1281                 while( p_list_item != cl_qlist_end( p_cblist ) )\r
1282                 {\r
1283                         p_cb_info = (al_proxy_cb_info_t*)p_list_item;\r
1284                         p_list_item = cl_qlist_next( p_list_item );\r
1285 \r
1286                         if( p_cb_info->p_al_obj && p_cb_info->p_al_obj == &h_mad_svc->obj )\r
1287                         {\r
1288                                 cl_qlist_remove_item( p_cblist, &p_cb_info->pool_item.list_item );\r
1289                                 deref_al_obj( p_cb_info->p_al_obj );\r
1290                                 proxy_cb_put( p_cb_info );\r
1291                         }\r
1292                 }\r
1293                 cl_spinlock_release( &h_mad_svc->obj.h_al->p_context->cb_lock );\r
1294         }\r
1295 #endif\r
1296 \r
1297         CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
1298 }\r
1299 \r
1300 \r
1301 \r
1302 static void\r
1303 __cleanup_mad_svc(\r
1304         IN                              struct _al_obj                          *p_obj )\r
1305 {\r
1306         ib_mad_svc_handle_t             h_mad_svc;\r
1307         al_mad_rmpp_t                   *p_rmpp;\r
1308         cl_list_item_t                  *p_list_item;\r
1309 \r
1310         CL_ASSERT( p_obj );\r
1311         h_mad_svc = PARENT_STRUCT( p_obj, al_mad_svc_t, obj );\r
1312 \r
1313         /*\r
1314          * There are no more callbacks from the MAD dispatcher that are active.\r
1315          * Cleanup any receives that may still be lying around.  Stop the receive\r
1316          * timer to avoid synchronizing with it.\r
1317          */\r
1318         cl_timer_destroy( &h_mad_svc->recv_timer );\r
1319         for( p_list_item = cl_qlist_head( &h_mad_svc->recv_list );\r
1320                  p_list_item != cl_qlist_end( &h_mad_svc->recv_list );\r
1321                  p_list_item = cl_qlist_next( p_list_item ) )\r
1322         {\r
1323                 p_rmpp = PARENT_STRUCT( p_list_item, al_mad_rmpp_t, pool_item );\r
1324                 p_rmpp->inactive = TRUE;\r
1325         }\r
1326         __recv_timer_cb( h_mad_svc );\r
1327 \r
1328         CL_ASSERT( cl_is_qlist_empty( &h_mad_svc->send_list ) );\r
1329         CL_ASSERT( cl_is_qlist_empty( &h_mad_svc->recv_list ) );\r
1330 }\r
1331 \r
1332 \r
1333 \r
1334 void\r
1335 free_mad_svc(\r
1336         IN                              al_obj_t                                        *p_obj )\r
1337 {\r
1338         ib_mad_svc_handle_t     h_mad_svc;\r
1339 \r
1340         CL_ASSERT( p_obj );\r
1341         h_mad_svc = PARENT_STRUCT( p_obj, al_mad_svc_t, obj );\r
1342 \r
1343         destroy_al_obj( p_obj );\r
1344         cl_free( h_mad_svc );\r
1345 }\r
1346 \r
1347 \r
1348 \r
1349 ib_api_status_t\r
1350 ib_send_mad(\r
1351         IN              const   ib_mad_svc_handle_t                     h_mad_svc,\r
1352         IN                              ib_mad_element_t* const         p_mad_element_list,\r
1353                 OUT                     ib_mad_element_t                        **pp_mad_failure OPTIONAL )\r
1354 {\r
1355         ib_api_status_t                         status = IB_SUCCESS;\r
1356 #ifdef CL_KERNEL\r
1357         ib_mad_send_handle_t            h_send;\r
1358         ib_mad_element_t                        *p_cur_mad, *p_next_mad;\r
1359 #endif\r
1360 \r
1361         CL_ENTER( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
1362 \r
1363         if( AL_OBJ_INVALID_HANDLE( h_mad_svc, AL_OBJ_TYPE_H_MAD_SVC ) )\r
1364         {\r
1365                 CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl, ("IB_INVALID_HANDLE\n") );\r
1366                 return IB_INVALID_HANDLE;\r
1367         }\r
1368         if( !p_mad_element_list ||\r
1369                 ( p_mad_element_list->p_next && !pp_mad_failure ) )\r
1370         {\r
1371                 CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl, ("IB_INVALID_PARAMETER\n") );\r
1372                 return IB_INVALID_PARAMETER;\r
1373         }\r
1374 \r
1375 #ifndef CL_KERNEL\r
1376         /* This is a send from user mode using special QP alias */\r
1377         CL_TRACE( AL_DBG_MAD_SVC, g_al_dbg_lvl,\r
1378                 ("ib_send_mad: ual_context non-zero, TID = 0x%"PRIx64 ".\n",\r
1379                 ((ib_mad_t*)(ib_get_mad_buf( p_mad_element_list )))->trans_id ));\r
1380         status = spl_qp_mad_send( h_mad_svc, p_mad_element_list,\r
1381                 pp_mad_failure );\r
1382         CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
1383         return status;\r
1384 #else\r
1385         /* Post each send on the list. */\r
1386         p_cur_mad = p_mad_element_list;\r
1387         while( p_cur_mad )\r
1388         {\r
1389                 p_next_mad = p_cur_mad->p_next;\r
1390 \r
1391                 /* Get an element to track the send. */\r
1392                 h_send = get_mad_send( PARENT_STRUCT( p_cur_mad,\r
1393                         al_mad_element_t, element ) );\r
1394                 if( !h_send )\r
1395                 {\r
1396                         CL_TRACE( AL_DBG_ERROR, g_al_dbg_lvl, ("unable to get mad_send\n") );\r
1397                         if( pp_mad_failure )\r
1398                                 *pp_mad_failure = p_cur_mad;\r
1399                         return IB_INSUFFICIENT_RESOURCES;\r
1400                 }\r
1401 \r
1402                 /* Initialize the MAD for sending. */\r
1403                 status = __init_send_mad( h_mad_svc, h_send, p_cur_mad );\r
1404                 if( status != IB_SUCCESS )\r
1405                 {\r
1406                         CL_TRACE( AL_DBG_ERROR, g_al_dbg_lvl, ("init_send_mad failed: %s\n",\r
1407                                 ib_get_err_str(status)) );\r
1408                         put_mad_send( h_send );\r
1409                         if( pp_mad_failure )\r
1410                                 *pp_mad_failure = p_cur_mad;\r
1411                         return status;\r
1412                 }\r
1413 \r
1414                 /* Add the MADs to our list. */\r
1415                 cl_spinlock_acquire( &h_mad_svc->obj.lock );\r
1416                 cl_qlist_insert_tail( &h_mad_svc->send_list,\r
1417                         (cl_list_item_t*)&h_send->pool_item );\r
1418 \r
1419                 /* Post the MAD to the dispatcher, and check for failures. */\r
1420                 ref_al_obj( &h_mad_svc->obj );\r
1421                 p_cur_mad->p_next = NULL;\r
1422                 if( h_send->uses_rmpp )\r
1423                         __queue_rmpp_seg( h_mad_svc->h_mad_reg, h_send );\r
1424                 else\r
1425                         __queue_mad_wr( h_mad_svc->h_mad_reg, h_send );\r
1426                 cl_spinlock_release( &h_mad_svc->obj.lock );\r
1427 \r
1428                 p_cur_mad = p_next_mad;\r
1429         }\r
1430 \r
1431         /*\r
1432          * Resume any sends that can now be sent without holding\r
1433          * the mad service lock.\r
1434          */\r
1435         __mad_disp_resume_send( h_mad_svc->h_mad_reg );\r
1436 \r
1437         CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
1438         return status;\r
1439 #endif\r
1440 }\r
1441 \r
1442 \r
1443 \r
1444 static ib_api_status_t\r
1445 __init_send_mad(\r
1446         IN                              ib_mad_svc_handle_t                     h_mad_svc,\r
1447         IN              const   ib_mad_send_handle_t            h_send,\r
1448         IN                              ib_mad_element_t* const         p_mad_element )\r
1449 {\r
1450         ib_rmpp_mad_t           *p_rmpp_hdr;\r
1451         uint8_t                         rmpp_version;\r
1452         ib_api_status_t         status;\r
1453 \r
1454         CL_ENTER( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
1455 \r
1456         /* Initialize tracking the send. */\r
1457         h_send->p_send_mad = p_mad_element;\r
1458         h_send->retry_time = MAX_TIME;\r
1459         h_send->retry_cnt = p_mad_element->retry_cnt;\r
1460 \r
1461         /* See if the send uses RMPP. */\r
1462         h_send->uses_rmpp = __does_send_req_rmpp( h_mad_svc->svc_type,\r
1463                 p_mad_element, &rmpp_version );\r
1464         if( h_send->uses_rmpp )\r
1465         {\r
1466                 /* The RMPP header is present. */\r
1467                 CL_TRACE( AL_DBG_MAD_SVC, g_al_dbg_lvl, ("RMPP is activated\n") );\r
1468                 p_rmpp_hdr = (ib_rmpp_mad_t*)p_mad_element->p_mad_buf;\r
1469 \r
1470                 /* We only support version 1. */\r
1471                 if( rmpp_version != DEFAULT_RMPP_VERSION )\r
1472                 {\r
1473                         CL_TRACE( AL_DBG_ERROR, g_al_dbg_lvl, ("unsupported version\n") );\r
1474                         return IB_INVALID_SETTING;\r
1475                 }\r
1476 \r
1477                 p_rmpp_hdr->rmpp_version = rmpp_version;\r
1478                 p_rmpp_hdr->rmpp_type = IB_RMPP_TYPE_DATA;\r
1479                 ib_rmpp_set_resp_time( p_rmpp_hdr, IB_RMPP_NO_RESP_TIME );\r
1480                 p_rmpp_hdr->rmpp_status = IB_RMPP_STATUS_SUCCESS;\r
1481                 /*\r
1482                  * The segment number, flags, and payload size are set when\r
1483                  * sending, so that they are set correctly when issuing retries.\r
1484                  */\r
1485 \r
1486                 h_send->ack_seg = 0;\r
1487                 h_send->seg_limit = 1;\r
1488                 h_send->cur_seg = 1;\r
1489                 /* For SA RMPP MADS we need different data size and header size */\r
1490                 if( p_mad_element->p_mad_buf->mgmt_class == IB_MCLASS_SUBN_ADM )\r
1491                 {\r
1492                         h_send->total_seg = ( (p_mad_element->size - IB_SA_MAD_HDR_SIZE) +\r
1493                                 (IB_SA_DATA_SIZE - 1) ) / IB_SA_DATA_SIZE;\r
1494                 } \r
1495                 else \r
1496                 {\r
1497                         h_send->total_seg = ( (p_mad_element->size - MAD_RMPP_HDR_SIZE) +\r
1498                                 (MAD_RMPP_DATA_SIZE - 1) ) / MAD_RMPP_DATA_SIZE;\r
1499                 }\r
1500         }\r
1501 \r
1502         /* See if we need to create the address vector for the user. */\r
1503         if( !p_mad_element->h_av &&\r
1504                 !( p_mad_element->send_opt & IB_SEND_OPT_LOCAL ) )\r
1505         {\r
1506                 status = __create_send_av( h_mad_svc, h_send );\r
1507                 if( status != IB_SUCCESS )\r
1508                 {\r
1509                         return status;\r
1510                 }\r
1511         }\r
1512 \r
1513         CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
1514         return IB_SUCCESS;\r
1515 }\r
1516 \r
1517 \r
1518 \r
1519 static ib_api_status_t\r
1520 __create_send_av(\r
1521         IN                              ib_mad_svc_handle_t                     h_mad_svc,\r
1522         IN                              ib_mad_send_handle_t            h_send )\r
1523 {\r
1524         ib_av_attr_t            av_attr;\r
1525         ib_mad_element_t        *p_mad_element;\r
1526 \r
1527         p_mad_element = h_send->p_send_mad;\r
1528 \r
1529         av_attr.port_num = h_mad_svc->port_num;\r
1530 \r
1531         av_attr.sl = p_mad_element->remote_sl;\r
1532         av_attr.dlid = p_mad_element->remote_lid;\r
1533 \r
1534         av_attr.grh_valid = p_mad_element->grh_valid;\r
1535         if( av_attr.grh_valid )\r
1536                 av_attr.grh = *p_mad_element->p_grh;\r
1537 \r
1538         av_attr.static_rate = IB_PATH_RECORD_RATE_10_GBS;\r
1539         av_attr.path_bits = p_mad_element->path_bits;\r
1540 \r
1541         return ib_create_av( h_mad_svc->h_pd, &av_attr, &h_send->h_av );\r
1542 }\r
1543 \r
1544 \r
1545 \r
1546 static boolean_t\r
1547 __does_send_req_rmpp(\r
1548         IN              const   ib_mad_svc_type_t                       mad_svc_type,\r
1549         IN              const   ib_mad_element_t* const         p_mad_element,\r
1550                 OUT                     uint8_t                                         *p_rmpp_version )\r
1551 {\r
1552         switch( mad_svc_type )\r
1553         {\r
1554         case IB_MAD_SVC_DEFAULT:\r
1555         case IB_MAD_SVC_RMPP:\r
1556                 /* Internally generated MADs do not use RMPP. */\r
1557                 if( __is_internal_send( mad_svc_type, p_mad_element ) )\r
1558                         return FALSE;\r
1559 \r
1560                 /* If the MAD has the version number set, just return it. */\r
1561                 if( p_mad_element->rmpp_version )\r
1562                 {\r
1563                         *p_rmpp_version = p_mad_element->rmpp_version;\r
1564                         return TRUE;\r
1565                 }\r
1566 \r
1567                 /* If the class is well known and uses RMPP, use the default version. */\r
1568                 if( p_mad_element->p_mad_buf->mgmt_class == IB_MCLASS_SUBN_ADM )\r
1569                 {\r
1570                         switch( p_mad_element->p_mad_buf->method )\r
1571                         {\r
1572                         case IB_MAD_METHOD_GETTABLE_RESP:\r
1573                         case IB_MAD_METHOD_GETMULTI:\r
1574                         case IB_MAD_METHOD_GETMULTI_RESP:\r
1575                                 *p_rmpp_version = DEFAULT_RMPP_VERSION;\r
1576                                 return TRUE;\r
1577 \r
1578                         default:\r
1579                                 return FALSE;\r
1580                         }\r
1581                 }\r
1582 \r
1583                 /* The RMPP is not active. */\r
1584                 return FALSE;\r
1585 \r
1586         default:\r
1587                 return FALSE;\r
1588         }\r
1589 }\r
1590 \r
1591 \r
1592 \r
1593 /*\r
1594  * Sends the next RMPP segment of an RMPP transfer.\r
1595  */\r
1596 static void\r
1597 __queue_rmpp_seg(\r
1598         IN              const   al_mad_reg_handle_t                     h_mad_reg,\r
1599         IN                              ib_mad_send_handle_t            h_send )\r
1600 {\r
1601         ib_rmpp_mad_t           *p_rmpp_hdr;\r
1602 \r
1603         CL_ENTER( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
1604 \r
1605         CL_ASSERT( h_mad_reg && h_send );\r
1606         CL_ASSERT( h_send->cur_seg <= h_send->seg_limit );\r
1607 \r
1608         /* Reset information to track the send. */\r
1609         h_send->retry_time = MAX_TIME;\r
1610 \r
1611         /* Set the RMPP header information. */\r
1612         p_rmpp_hdr = (ib_rmpp_mad_t*)h_send->p_send_mad->p_mad_buf;\r
1613         p_rmpp_hdr->seg_num = cl_hton32( h_send->cur_seg );\r
1614         p_rmpp_hdr->rmpp_flags = IB_RMPP_FLAG_ACTIVE;\r
1615         p_rmpp_hdr->paylen_newwin = 0;\r
1616 \r
1617         /* See if this is the first segment that needs to be sent. */\r
1618         if( h_send->cur_seg == 1 )\r
1619         {\r
1620                 p_rmpp_hdr->rmpp_flags |= IB_RMPP_FLAG_FIRST;\r
1621 \r
1622                 /*\r
1623                  * Since the RMPP layer is the one to support SA MADs by duplicating\r
1624                  * the SA header. The actual Payload Length should include the\r
1625                  * original mad size + NumSegs * SA-extra-header.\r
1626                  */\r
1627                 if( h_send->p_send_mad->p_mad_buf->mgmt_class == IB_MCLASS_SUBN_ADM )\r
1628                 {\r
1629                         /* Add sa_ext_hdr to each segment over the first one. */\r
1630                         p_rmpp_hdr->paylen_newwin = cl_hton32(\r
1631                                 h_send->p_send_mad->size - MAD_RMPP_HDR_SIZE +\r
1632                                 (h_send->total_seg - 1) * \r
1633                                 (IB_SA_MAD_HDR_SIZE - MAD_RMPP_HDR_SIZE) );\r
1634                 }\r
1635                 else \r
1636                 {\r
1637                         /* For other RMPP packets we simply use the given MAD */\r
1638                         p_rmpp_hdr->paylen_newwin = cl_hton32( h_send->p_send_mad->size -\r
1639                                 MAD_RMPP_HDR_SIZE );\r
1640                 }\r
1641         }\r
1642 \r
1643         /* See if this is the last segment that needs to be sent. */\r
1644         if( h_send->cur_seg == h_send->total_seg )\r
1645         {\r
1646                 p_rmpp_hdr->rmpp_flags |= IB_RMPP_FLAG_LAST;\r
1647 \r
1648                 /* But for SA MADs we need extra header size */\r
1649                 if( h_send->p_send_mad->p_mad_buf->mgmt_class == IB_MCLASS_SUBN_ADM )\r
1650                 {\r
1651                         p_rmpp_hdr->paylen_newwin = cl_hton32( h_send->p_send_mad->size -\r
1652                                 (h_send->cur_seg -1)*IB_SA_DATA_SIZE - MAD_RMPP_HDR_SIZE );\r
1653                 }\r
1654                 else\r
1655                 {\r
1656                         p_rmpp_hdr->paylen_newwin = cl_hton32( h_send->p_send_mad->size -\r
1657                                 (h_send->cur_seg -1)*MAD_RMPP_DATA_SIZE );\r
1658                 }\r
1659         }\r
1660 \r
1661         /* Set the current segment to the next one. */\r
1662         h_send->cur_seg++;\r
1663 \r
1664         /* Send the MAD. */\r
1665         __queue_mad_wr( h_mad_reg, h_send );\r
1666 \r
1667         CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
1668 }\r
1669 \r
1670 \r
1671 \r
1672 /*\r
1673  * Posts a send work request to the dispatcher for a MAD send.\r
1674  */\r
1675 static void\r
1676 __queue_mad_wr(\r
1677         IN              const   al_mad_reg_handle_t                     h_mad_reg,\r
1678         IN              const   ib_mad_send_handle_t            h_send )\r
1679 {\r
1680         ib_send_wr_t            *p_send_wr;\r
1681         al_mad_element_t        *p_al_element;\r
1682         ib_rmpp_mad_t           *p_rmpp_hdr;\r
1683         uint8_t                         *p_rmpp_src, *p_rmpp_dst;\r
1684         uintn_t                         hdr_len, offset, max_len;\r
1685 \r
1686         CL_ENTER( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
1687         p_send_wr = &h_send->mad_wr.send_wr;\r
1688 \r
1689         cl_memclr( p_send_wr, sizeof( ib_send_wr_t ) );\r
1690 \r
1691         p_send_wr->wr_type = WR_SEND;\r
1692         p_send_wr->send_opt = h_send->p_send_mad->send_opt;\r
1693 \r
1694         p_al_element = PARENT_STRUCT( h_send->p_send_mad,\r
1695                 al_mad_element_t, element );\r
1696 \r
1697         /* See if the MAD requires RMPP support. */\r
1698         if( h_send->uses_rmpp && p_al_element->p_al_mad_buf )\r
1699         {\r
1700 #if defined( CL_KERNEL )\r
1701                 p_rmpp_dst = p_al_element->mad_buf + sizeof(ib_grh_t);\r
1702 #else\r
1703                 p_rmpp_dst = (uint8_t*)(uintn_t)p_al_element->mad_ds.vaddr;\r
1704 #endif\r
1705                 p_rmpp_src = (uint8_t* __ptr64)h_send->p_send_mad->p_mad_buf;\r
1706                 p_rmpp_hdr = (ib_rmpp_mad_t*)p_rmpp_src;\r
1707 \r
1708                 if( h_send->p_send_mad->p_mad_buf->mgmt_class == IB_MCLASS_SUBN_ADM )\r
1709                         hdr_len = IB_SA_MAD_HDR_SIZE;\r
1710                 else\r
1711                         hdr_len = MAD_RMPP_HDR_SIZE;\r
1712 \r
1713                 max_len = MAD_BLOCK_SIZE - hdr_len;\r
1714 \r
1715                 offset = hdr_len + (max_len * (cl_ntoh32( p_rmpp_hdr->seg_num ) - 1));\r
1716 \r
1717                 /* Copy the header into the registered send buffer. */\r
1718                 cl_memcpy( p_rmpp_dst, p_rmpp_src, hdr_len );\r
1719 \r
1720                 /* Copy this segment's payload into the registered send buffer. */\r
1721                 CL_ASSERT( h_send->p_send_mad->size != offset );\r
1722                 if( (h_send->p_send_mad->size - offset) < max_len )\r
1723                 {\r
1724                         max_len = h_send->p_send_mad->size - offset;\r
1725                         /* Clear unused payload. */\r
1726                         cl_memclr( p_rmpp_dst + hdr_len + max_len,\r
1727                                 MAD_BLOCK_SIZE - hdr_len - max_len );\r
1728                 }\r
1729 \r
1730                 cl_memcpy(\r
1731                         p_rmpp_dst + hdr_len, p_rmpp_src + offset, max_len );\r
1732         }\r
1733 \r
1734         p_send_wr->num_ds = 1;\r
1735         p_send_wr->ds_array = &p_al_element->mad_ds;\r
1736 \r
1737         p_send_wr->dgrm.ud.remote_qp = h_send->p_send_mad->remote_qp;\r
1738         p_send_wr->dgrm.ud.remote_qkey = h_send->p_send_mad->remote_qkey;\r
1739         p_send_wr->dgrm.ud.pkey_index = h_send->p_send_mad->pkey_index;\r
1740 \r
1741         /* See if we created the address vector on behalf of the user. */\r
1742         if( h_send->p_send_mad->h_av )\r
1743                 p_send_wr->dgrm.ud.h_av = h_send->p_send_mad->h_av;\r
1744         else\r
1745                 p_send_wr->dgrm.ud.h_av = h_send->h_av;\r
1746 \r
1747         __mad_disp_queue_send( h_mad_reg, &h_send->mad_wr );\r
1748 \r
1749         CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
1750 }\r
1751 \r
1752 \r
1753 \r
1754 static cl_status_t\r
1755 __mad_svc_find_send(\r
1756         IN              const   cl_list_item_t* const           p_list_item,\r
1757         IN                              void*                                           context )\r
1758 {\r
1759         ib_mad_send_handle_t    h_send;\r
1760 \r
1761         h_send = PARENT_STRUCT( p_list_item, al_mad_send_t, pool_item );\r
1762 \r
1763         if( h_send->p_send_mad == context )\r
1764                 return CL_SUCCESS;\r
1765         else\r
1766                 return CL_NOT_FOUND;\r
1767 }\r
1768 \r
1769 \r
1770 \r
1771 ib_api_status_t\r
1772 ib_cancel_mad(\r
1773         IN              const   ib_mad_svc_handle_t                     h_mad_svc,\r
1774         IN                              ib_mad_element_t* const         p_mad_element )\r
1775 {\r
1776 #ifdef CL_KERNEL\r
1777         cl_list_item_t                  *p_list_item;\r
1778         ib_mad_send_handle_t    h_send;\r
1779 #else\r
1780         ib_api_status_t                 status;\r
1781 #endif\r
1782 \r
1783         CL_ENTER( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
1784 \r
1785         if( AL_OBJ_INVALID_HANDLE( h_mad_svc, AL_OBJ_TYPE_H_MAD_SVC ) )\r
1786         {\r
1787                 CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl, ("IB_INVALID_HANDLE\n") );\r
1788                 return IB_INVALID_HANDLE;\r
1789         }\r
1790         if( !p_mad_element )\r
1791         {\r
1792                 CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl, ("IB_INVALID_PARAMETER\n") );\r
1793                 return IB_INVALID_PARAMETER;\r
1794         }\r
1795 \r
1796 #ifndef CL_KERNEL\r
1797         /* This is a send from user mode using special QP alias */\r
1798         status = spl_qp_cancel_mad( h_mad_svc, p_mad_element );\r
1799         CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
1800         return status;\r
1801 #else\r
1802         /* Search for the MAD in our MAD list.  It may have already completed. */\r
1803         cl_spinlock_acquire( &h_mad_svc->obj.lock );\r
1804         p_list_item = cl_qlist_find_from_head( &h_mad_svc->send_list,\r
1805                 __mad_svc_find_send, p_mad_element );\r
1806 \r
1807         if( !p_list_item )\r
1808         {\r
1809                 cl_spinlock_release( &h_mad_svc->obj.lock );\r
1810                 CL_TRACE( AL_DBG_MAD_SVC, g_al_dbg_lvl, ("mad not found\n") );\r
1811                 return IB_NOT_FOUND;\r
1812         }\r
1813 \r
1814         /* Mark the MAD as having been canceled. */\r
1815         h_send = PARENT_STRUCT( p_list_item, al_mad_send_t, pool_item );\r
1816         h_send->canceled = TRUE;\r
1817 \r
1818         /* If the MAD is active, process it in the send callback. */\r
1819         if( h_send->retry_time != MAX_TIME )\r
1820         {\r
1821                 /* Process the canceled MAD using the timer thread. */\r
1822                 cl_timer_trim( &h_mad_svc->send_timer, 0 );\r
1823         }\r
1824 \r
1825         cl_spinlock_release( &h_mad_svc->obj.lock );\r
1826         CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
1827         return IB_SUCCESS;\r
1828 #endif\r
1829 }\r
1830 \r
1831 \r
1832 ib_api_status_t\r
1833 ib_delay_mad(\r
1834         IN              const   ib_mad_svc_handle_t                     h_mad_svc,\r
1835         IN                              ib_mad_element_t* const         p_mad_element,\r
1836         IN              const   uint32_t                                        delay_ms )\r
1837 {\r
1838 #ifdef CL_KERNEL\r
1839         cl_list_item_t                  *p_list_item;\r
1840         ib_mad_send_handle_t    h_send;\r
1841 #endif\r
1842 \r
1843         AL_ENTER( AL_DBG_MAD_SVC );\r
1844 \r
1845         if( AL_OBJ_INVALID_HANDLE( h_mad_svc, AL_OBJ_TYPE_H_MAD_SVC ) )\r
1846         {\r
1847                 AL_TRACE_EXIT( AL_DBG_ERROR, ("IB_INVALID_HANDLE\n") );\r
1848                 return IB_INVALID_HANDLE;\r
1849         }\r
1850         if( !p_mad_element )\r
1851         {\r
1852                 AL_TRACE_EXIT( AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") );\r
1853                 return IB_INVALID_PARAMETER;\r
1854         }\r
1855 \r
1856 #ifndef CL_KERNEL\r
1857         UNUSED_PARAM( p_mad_element );\r
1858         UNUSED_PARAM( delay_ms );\r
1859         /* TODO: support for user-mode MAD QP's. */\r
1860         AL_EXIT( AL_DBG_MAD_SVC );\r
1861         return IB_UNSUPPORTED;\r
1862 #else\r
1863         /* Search for the MAD in our MAD list.  It may have already completed. */\r
1864         cl_spinlock_acquire( &h_mad_svc->obj.lock );\r
1865         p_list_item = cl_qlist_find_from_head( &h_mad_svc->send_list,\r
1866                 __mad_svc_find_send, p_mad_element );\r
1867 \r
1868         if( !p_list_item )\r
1869         {\r
1870                 cl_spinlock_release( &h_mad_svc->obj.lock );\r
1871                 AL_TRACE( AL_DBG_MAD_SVC, ("MAD not found\n") );\r
1872                 return IB_NOT_FOUND;\r
1873         }\r
1874 \r
1875         /* Mark the MAD as having been canceled. */\r
1876         h_send = PARENT_STRUCT( p_list_item, al_mad_send_t, pool_item );\r
1877 \r
1878         if( h_send->retry_time == MAX_TIME )\r
1879                 h_send->delay = delay_ms;\r
1880         else\r
1881                 h_send->retry_time += ((uint64_t)delay_ms * 1000ULL);\r
1882 \r
1883         cl_spinlock_release( &h_mad_svc->obj.lock );\r
1884         AL_EXIT( AL_DBG_MAD_SVC );\r
1885         return IB_SUCCESS;\r
1886 #endif\r
1887 }\r
1888 \r
1889 \r
1890 /*\r
1891  * Process a send completion.\r
1892  */\r
1893 static void\r
1894 __mad_svc_send_done(\r
1895         IN                              ib_mad_svc_handle_t                     h_mad_svc,\r
1896         IN                              al_mad_wr_t                                     *p_mad_wr,\r
1897         IN                              ib_wc_t                                         *p_wc )\r
1898 {\r
1899         ib_mad_send_handle_t    h_send;\r
1900 \r
1901         CL_ENTER( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
1902         CL_ASSERT( h_mad_svc && p_mad_wr && !p_wc->p_next );\r
1903 \r
1904         h_send = PARENT_STRUCT( p_mad_wr, al_mad_send_t, mad_wr );\r
1905         CL_TRACE( AL_DBG_MAD_SVC, g_al_dbg_lvl, ("send callback TID:0x%"PRIx64"\n",\r
1906                 __get_send_tid( h_send )) );\r
1907 \r
1908         /* We need to synchronize access to the list as well as the MAD request. */\r
1909         cl_spinlock_acquire( &h_mad_svc->obj.lock );\r
1910 \r
1911         /* Complete internally sent MADs. */\r
1912         if( __is_internal_send( h_mad_svc->svc_type, h_send->p_send_mad ) )\r
1913         {\r
1914                 CL_TRACE_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl, ("internal send\n") );\r
1915                 cl_qlist_remove_item( &h_mad_svc->send_list,\r
1916                         (cl_list_item_t*)&h_send->pool_item );\r
1917                 cl_spinlock_release( &h_mad_svc->obj.lock );\r
1918                 ib_put_mad( h_send->p_send_mad );\r
1919                 __cleanup_mad_send( h_mad_svc, h_send );\r
1920                 return;\r
1921         }\r
1922 \r
1923         /* See if the send request has completed. */\r
1924         if( __is_send_mad_done( h_send, p_wc ) )\r
1925         {\r
1926                 /* The send has completed. */\r
1927                 cl_qlist_remove_item( &h_mad_svc->send_list,\r
1928                         (cl_list_item_t*)&h_send->pool_item );\r
1929                 cl_spinlock_release( &h_mad_svc->obj.lock );\r
1930 \r
1931                 /* Report the send as canceled only if we don't have the response. */\r
1932                 if( h_send->canceled && !h_send->p_resp_mad )\r
1933                         __notify_send_comp( h_mad_svc, h_send, IB_WCS_CANCELED );\r
1934                 else\r
1935                         __notify_send_comp( h_mad_svc, h_send, p_wc->status );\r
1936         }\r
1937         else\r
1938         {\r
1939                 /* See if this is an RMPP MAD, and we should send more segments. */\r
1940                 if( h_send->uses_rmpp && (h_send->cur_seg <= h_send->seg_limit) )\r
1941                 {\r
1942                         /* Send the next segment. */\r
1943                         CL_TRACE( AL_DBG_MAD_SVC, g_al_dbg_lvl,\r
1944                                 ("sending next RMPP segment for TID:0x%"PRIx64"\n",\r
1945                                 __get_send_tid( h_send )) );\r
1946 \r
1947                         __queue_rmpp_seg( h_mad_svc->h_mad_reg, h_send );\r
1948                 }\r
1949                 else\r
1950                 {\r
1951                         /* Continue waiting for a response or ACK. */\r
1952                         CL_TRACE( AL_DBG_MAD_SVC, g_al_dbg_lvl,\r
1953                                 ("waiting for response for TID:0x%"PRIx64"\n",\r
1954                                 __get_send_tid( h_send )) );\r
1955 \r
1956                         __set_retry_time( h_send );\r
1957                         cl_timer_trim( &h_mad_svc->send_timer,\r
1958                                 h_send->p_send_mad->timeout_ms );\r
1959                 }\r
1960                 cl_spinlock_release( &h_mad_svc->obj.lock );\r
1961         }\r
1962 \r
1963         /*\r
1964          * Resume any sends that can now be sent without holding\r
1965          * the mad service lock.\r
1966          */\r
1967         __mad_disp_resume_send( h_mad_svc->h_mad_reg );\r
1968 \r
1969         CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
1970 }\r
1971 \r
1972 \r
1973 \r
1974 /*\r
1975  * Notify the user of a completed send operation.\r
1976  */\r
1977 static void\r
1978 __notify_send_comp(\r
1979         IN                              ib_mad_svc_handle_t                     h_mad_svc,\r
1980         IN                              ib_mad_send_handle_t            h_send,\r
1981         IN                              ib_wc_status_t                          wc_status )\r
1982 {\r
1983         CL_ENTER( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
1984 \r
1985         CL_TRACE( AL_DBG_MAD_SVC, g_al_dbg_lvl, ("completing TID:0x%"PRIx64"\n",\r
1986                 __get_send_tid( h_send )) );\r
1987 \r
1988         h_send->p_send_mad->status = wc_status;\r
1989 \r
1990         /* Notify the user of a received response, if one exists. */\r
1991         if( h_send->p_resp_mad )\r
1992         {\r
1993                 h_mad_svc->pfn_user_recv_cb( h_mad_svc, (void*)h_mad_svc->obj.context,\r
1994                         h_send->p_resp_mad );\r
1995         }\r
1996 \r
1997         /* The transaction has completed, return the send MADs. */\r
1998         h_mad_svc->pfn_user_send_cb( h_mad_svc, (void*)h_mad_svc->obj.context,\r
1999                 h_send->p_send_mad );\r
2000 \r
2001         __cleanup_mad_send( h_mad_svc, h_send );\r
2002 \r
2003         CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
2004 }\r
2005 \r
2006 \r
2007 \r
2008 /*\r
2009  * Return a send MAD tracking structure to its pool and cleanup any resources\r
2010  * it may have allocated.\r
2011  */\r
2012 static void\r
2013 __cleanup_mad_send(\r
2014         IN                              ib_mad_svc_handle_t                     h_mad_svc,\r
2015         IN                              ib_mad_send_handle_t            h_send )\r
2016 {\r
2017         /* Release any address vectors that we may have created. */\r
2018         if( h_send->h_av )\r
2019         {\r
2020                 ib_destroy_av( h_send->h_av );\r
2021         }\r
2022 \r
2023         /* Return the send MAD tracking structure to its pool. */\r
2024         put_mad_send( h_send );\r
2025 \r
2026         /* We no longer need to reference the MAD service. */\r
2027         deref_al_obj( &h_mad_svc->obj );\r
2028 }\r
2029 \r
2030 \r
2031 \r
2032 static boolean_t\r
2033 __is_send_mad_done(\r
2034         IN                              ib_mad_send_handle_t            h_send,\r
2035         IN                              ib_wc_t                                         *p_wc )\r
2036 {\r
2037         CL_ENTER( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
2038 \r
2039         /* Complete the send if the request failed. */\r
2040         if( p_wc->status != IB_WCS_SUCCESS )\r
2041         {\r
2042                 CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl, ("y-send failed\n" ) );\r
2043                 return TRUE;\r
2044         }\r
2045 \r
2046         /* Complete the send if it has been canceled. */\r
2047         if( h_send->canceled )\r
2048         {\r
2049                 CL_TRACE_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl,\r
2050                         ("y-send was canceled\n") );\r
2051                 return TRUE;\r
2052         }\r
2053 \r
2054         /* Complete the send if we have its response. */\r
2055         if( h_send->p_resp_mad )\r
2056         {\r
2057                 CL_TRACE_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl,\r
2058                         ("y-response received\n") );\r
2059                 return TRUE;\r
2060         }\r
2061 \r
2062         /* RMPP sends cannot complete until all segments have been acked. */\r
2063         if( h_send->uses_rmpp && (h_send->ack_seg < h_send->total_seg) )\r
2064         {\r
2065                 CL_TRACE_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl,\r
2066                         ("more RMPP segments to send\n") );\r
2067                 return FALSE;\r
2068         }\r
2069 \r
2070         /*\r
2071          * All segments of this send have been sent.\r
2072          * The send has completed if we are not waiting for a response.\r
2073          */\r
2074         if( h_send->p_send_mad->resp_expected )\r
2075         {\r
2076                 CL_TRACE_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl,\r
2077                         ("no-waiting on response\n") );\r
2078                 return FALSE;\r
2079         }\r
2080         else\r
2081         {\r
2082                 CL_TRACE_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl, ("send completed\n") );\r
2083                 return TRUE;\r
2084         }\r
2085 }\r
2086 \r
2087 \r
2088 \r
2089 /*\r
2090  * Try to find a send that matches the received response.  This call must\r
2091  * be synchronized with access to the MAD service send_list.\r
2092  */\r
2093 static ib_mad_send_handle_t\r
2094 __mad_svc_match_recv(\r
2095         IN              const   ib_mad_svc_handle_t                     h_mad_svc,\r
2096         IN                              ib_mad_element_t* const         p_recv_mad )\r
2097 {\r
2098         ib_mad_t                                *p_recv_hdr;\r
2099         cl_list_item_t                  *p_list_item;\r
2100         ib_mad_send_handle_t    h_send;\r
2101 \r
2102         CL_ENTER( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
2103 \r
2104         p_recv_hdr = p_recv_mad->p_mad_buf;\r
2105 \r
2106         /* Search the send list for a matching request. */\r
2107         for( p_list_item = cl_qlist_head( &h_mad_svc->send_list );\r
2108                  p_list_item != cl_qlist_end( &h_mad_svc->send_list );\r
2109                  p_list_item = cl_qlist_next( p_list_item ) )\r
2110         {\r
2111                 h_send = PARENT_STRUCT( p_list_item, al_mad_send_t, pool_item );\r
2112 \r
2113                 /* Match on the transaction ID, ignoring internally generated sends. */\r
2114                 CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
2115                 if( (p_recv_hdr->trans_id == h_send->mad_wr.client_tid) &&\r
2116                          !__is_internal_send( h_mad_svc->svc_type, h_send->p_send_mad ) )\r
2117                 {\r
2118                         return h_send;\r
2119                 }\r
2120         }\r
2121 \r
2122         return NULL;\r
2123 }\r
2124 \r
2125 \r
2126 \r
2127 static void\r
2128 __mad_svc_recv_done(\r
2129         IN                              ib_mad_svc_handle_t                     h_mad_svc,\r
2130         IN                              ib_mad_element_t                        *p_mad_element )\r
2131 {\r
2132         ib_mad_t                                *p_mad_hdr;\r
2133         ib_api_status_t                 cl_status;\r
2134 \r
2135         CL_ENTER( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
2136 \r
2137         p_mad_hdr = ib_get_mad_buf( p_mad_element );\r
2138         CL_TRACE( AL_DBG_MAD_SVC, g_al_dbg_lvl, ("recv done TID:0x%"PRIx64"\n",\r
2139                 p_mad_hdr->trans_id) );\r
2140 \r
2141         /* Raw MAD services get all receives. */\r
2142         if( h_mad_svc->svc_type == IB_MAD_SVC_RAW )\r
2143         {\r
2144                 /* Report the receive. */\r
2145                 CL_TRACE_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl,\r
2146                         ("recv TID:0x%"PRIx64"\n", p_mad_hdr->trans_id) );\r
2147                 h_mad_svc->pfn_user_recv_cb( h_mad_svc, (void*)h_mad_svc->obj.context,\r
2148                         p_mad_element );\r
2149                 return;\r
2150         }\r
2151 \r
2152         /*\r
2153          * If the response indicates that the responder was busy, continue\r
2154          * retrying the request.\r
2155          */\r
2156         if( p_mad_hdr->status & IB_MAD_STATUS_BUSY )\r
2157         {\r
2158                 CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl,\r
2159                         ("responder busy TID:0x%"PRIx64"\n", p_mad_hdr->trans_id) );\r
2160                 ib_put_mad( p_mad_element );\r
2161                 return;\r
2162         }\r
2163 \r
2164         /* Fully reassemble received MADs before completing them. */\r
2165         if( __recv_requires_rmpp( h_mad_svc->svc_type, p_mad_element ) )\r
2166         {\r
2167                 /* Reassembling the receive. */\r
2168                 cl_status = __do_rmpp_recv( h_mad_svc, &p_mad_element );\r
2169                 if( cl_status != CL_SUCCESS )\r
2170                 {\r
2171                         /* The reassembly is not done. */\r
2172                         CL_TRACE_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl,\r
2173                                 ("no RMPP receive to report\n") );\r
2174                         return;\r
2175                 }\r
2176 \r
2177                 /*\r
2178                  * Get the header to the MAD element to report to the user.  This\r
2179                  * will be a MAD element received earlier.\r
2180                  */\r
2181                 p_mad_hdr = ib_get_mad_buf( p_mad_element );\r
2182         }\r
2183 \r
2184         /*\r
2185          * See if the MAD was sent in response to a previously sent MAD.  Note\r
2186          * that trap repress messages are responses, even though the response\r
2187          * bit isn't set.\r
2188          */\r
2189         if( ib_mad_is_response( p_mad_hdr ) ||\r
2190                 (p_mad_hdr->method == IB_MAD_METHOD_TRAP_REPRESS) )\r
2191         {\r
2192                 /* Process the received response. */\r
2193                 __process_recv_resp( h_mad_svc, p_mad_element );\r
2194         }\r
2195         else\r
2196         {\r
2197                 /* Report the receive. */\r
2198                 CL_TRACE( AL_DBG_MAD_SVC, g_al_dbg_lvl, ("unsol recv TID:0x%"PRIx64"\n",\r
2199                         p_mad_hdr->trans_id) );\r
2200                 h_mad_svc->pfn_user_recv_cb( h_mad_svc, (void*)h_mad_svc->obj.context,\r
2201                         p_mad_element );\r
2202         }\r
2203         CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
2204 }\r
2205 \r
2206 \r
2207 \r
2208 /*\r
2209  * A MAD was received in response to a send.  Find the corresponding send\r
2210  * and process the receive completion.\r
2211  */\r
2212 static void\r
2213 __process_recv_resp(\r
2214         IN                              ib_mad_svc_handle_t                     h_mad_svc,\r
2215         IN                              ib_mad_element_t                        *p_mad_element )\r
2216 {\r
2217         ib_mad_t                                *p_mad_hdr;\r
2218         ib_mad_send_handle_t    h_send;\r
2219 \r
2220         /*\r
2221          * Try to find the send.  The send may have already timed out or\r
2222          * have been canceled, so we need to search for it.\r
2223          */\r
2224         CL_ENTER( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
2225         p_mad_hdr = ib_get_mad_buf( p_mad_element );\r
2226         cl_spinlock_acquire( &h_mad_svc->obj.lock );\r
2227 \r
2228         h_send = __mad_svc_match_recv( h_mad_svc, p_mad_element );\r
2229         if( !h_send )\r
2230         {\r
2231                 /* A matching send was not found. */\r
2232                 CL_TRACE_EXIT( AL_DBG_WARN, g_al_dbg_lvl,\r
2233                         ("unmatched resp TID:0x%"PRIx64"\n", p_mad_hdr->trans_id) );\r
2234                 cl_spinlock_release( &h_mad_svc->obj.lock );\r
2235                 ib_put_mad( p_mad_element );\r
2236                 return;\r
2237         }\r
2238 \r
2239         /* We've found the matching send. */\r
2240         h_send->p_send_mad->status = IB_WCS_SUCCESS;\r
2241 \r
2242         /* Record the send contexts with the receive. */\r
2243         p_mad_element->send_context1 = (void* __ptr64)h_send->p_send_mad->context1;\r
2244         p_mad_element->send_context2 = (void* __ptr64)h_send->p_send_mad->context2;\r
2245 \r
2246         if( h_send->retry_time == MAX_TIME )\r
2247         {\r
2248                 /* The send is currently active.  Do not report it. */\r
2249                 CL_TRACE( AL_DBG_MAD_SVC, g_al_dbg_lvl,\r
2250                         ("resp send active TID:0x%"PRIx64"\n", p_mad_hdr->trans_id) );\r
2251                 h_send->p_resp_mad = p_mad_element;\r
2252                 cl_spinlock_release( &h_mad_svc->obj.lock );\r
2253         }\r
2254         else\r
2255         {\r
2256                 CL_TRACE( AL_DBG_MAD_SVC, g_al_dbg_lvl,\r
2257                         ("resp received TID:0x%"PRIx64"\n", p_mad_hdr->trans_id) );\r
2258 \r
2259                 /* Report the send completion below. */\r
2260                 cl_qlist_remove_item( &h_mad_svc->send_list,\r
2261                         (cl_list_item_t*)&h_send->pool_item );\r
2262                 cl_spinlock_release( &h_mad_svc->obj.lock );\r
2263 \r
2264                 /* Report the receive. */\r
2265                 h_mad_svc->pfn_user_recv_cb( h_mad_svc, (void*)h_mad_svc->obj.context,\r
2266                         p_mad_element );\r
2267 \r
2268                 /* Report the send completion. */\r
2269                 h_mad_svc->pfn_user_send_cb( h_mad_svc, (void*)h_mad_svc->obj.context,\r
2270                         h_send->p_send_mad );\r
2271                 __cleanup_mad_send( h_mad_svc, h_send );\r
2272         }\r
2273         CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
2274 }\r
2275 \r
2276 \r
2277 \r
2278 /*\r
2279  * Return TRUE if a received MAD requires RMPP processing.\r
2280  */\r
2281 static __inline boolean_t\r
2282 __recv_requires_rmpp(\r
2283         IN              const   ib_mad_svc_type_t                       mad_svc_type,\r
2284         IN              const   ib_mad_element_t* const         p_mad_element )\r
2285 {\r
2286         ib_rmpp_mad_t                           *p_rmpp_mad;\r
2287 \r
2288         CL_ENTER( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
2289 \r
2290         p_rmpp_mad = (ib_rmpp_mad_t*)ib_get_mad_buf( p_mad_element );\r
2291 \r
2292         CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
2293 \r
2294         switch( mad_svc_type )\r
2295         {\r
2296         case IB_MAD_SVC_DEFAULT:\r
2297                 /* Only subnet management receives require RMPP. */\r
2298                 return( (p_rmpp_mad->common_hdr.mgmt_class == IB_MCLASS_SUBN_ADM) &&\r
2299                         ib_rmpp_is_flag_set( p_rmpp_mad, IB_RMPP_FLAG_ACTIVE ) );\r
2300 \r
2301         case IB_MAD_SVC_RMPP:\r
2302                 return( ib_rmpp_is_flag_set( p_rmpp_mad, IB_RMPP_FLAG_ACTIVE ) );\r
2303 \r
2304         default:\r
2305                 return FALSE;\r
2306         }\r
2307 }\r
2308 \r
2309 \r
2310 \r
2311 /*\r
2312  * Return TRUE if the MAD was issued by AL itself.\r
2313  */\r
2314 static __inline boolean_t\r
2315 __is_internal_send(\r
2316         IN              const   ib_mad_svc_type_t                       mad_svc_type,\r
2317         IN              const   ib_mad_element_t* const         p_mad_element )\r
2318 {\r
2319         ib_rmpp_mad_t           *p_rmpp_mad;\r
2320 \r
2321         p_rmpp_mad = (ib_rmpp_mad_t*)ib_get_mad_buf( p_mad_element );\r
2322 \r
2323         /* See if the MAD service issues internal MADs. */\r
2324         switch( mad_svc_type )\r
2325         {\r
2326         case IB_MAD_SVC_DEFAULT:\r
2327                 /* Internal sends are non-RMPP data MADs. */\r
2328                 return( (p_rmpp_mad->common_hdr.mgmt_class == IB_MCLASS_SUBN_ADM) &&\r
2329                                 (p_rmpp_mad->rmpp_type &&\r
2330                                 (p_rmpp_mad->rmpp_type != IB_RMPP_TYPE_DATA) ) );\r
2331 \r
2332         case IB_MAD_SVC_RMPP:\r
2333                 /* The RMPP header is present.  Check its type. */\r
2334                 return( (p_rmpp_mad->rmpp_type) &&\r
2335                                 (p_rmpp_mad->rmpp_type != IB_RMPP_TYPE_DATA) );\r
2336 \r
2337         default:\r
2338                 return FALSE;\r
2339         }\r
2340 }\r
2341 \r
2342 \r
2343 \r
2344 /*\r
2345  * Fully reassemble a received MAD.  Return TRUE once all segments of the\r
2346  * MAD have been received.  Return the fully reassembled MAD.\r
2347  */\r
2348 static cl_status_t\r
2349 __do_rmpp_recv(\r
2350         IN                              ib_mad_svc_handle_t                     h_mad_svc,\r
2351         IN      OUT                     ib_mad_element_t                        **pp_mad_element )\r
2352 {\r
2353         ib_rmpp_mad_t           *p_rmpp_mad;\r
2354         cl_status_t                     cl_status;\r
2355 \r
2356         CL_ENTER( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
2357 \r
2358         p_rmpp_mad = ib_get_mad_buf( *pp_mad_element );\r
2359         CL_ASSERT( ib_rmpp_is_flag_set( p_rmpp_mad, IB_RMPP_FLAG_ACTIVE ) );\r
2360 \r
2361         /* Perform the correct operation base on the RMPP MAD type. */\r
2362         switch( p_rmpp_mad->rmpp_type )\r
2363         {\r
2364         case IB_RMPP_TYPE_DATA:\r
2365                 cl_status = __process_rmpp_data( h_mad_svc, pp_mad_element );\r
2366                 /* Return the received element back to its MAD pool if not needed. */\r
2367                 if( (cl_status != CL_SUCCESS) && (cl_status != CL_NOT_DONE) )\r
2368                 {\r
2369                         ib_put_mad( *pp_mad_element );\r
2370                 }\r
2371                 break;\r
2372 \r
2373         case IB_RMPP_TYPE_ACK:\r
2374                 /* Process the ACK. */\r
2375                 __process_rmpp_ack( h_mad_svc, *pp_mad_element );\r
2376                 ib_put_mad( *pp_mad_element );\r
2377                 cl_status = CL_COMPLETED;\r
2378                 break;\r
2379 \r
2380         case IB_RMPP_TYPE_STOP:\r
2381         case IB_RMPP_TYPE_ABORT:\r
2382         default:\r
2383                 /* Process the ABORT or STOP. */\r
2384                 __process_rmpp_nack( h_mad_svc, *pp_mad_element );\r
2385                 ib_put_mad( *pp_mad_element );\r
2386                 cl_status = CL_REJECT;\r
2387                 break;\r
2388         }\r
2389 \r
2390         CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
2391         return cl_status;\r
2392 }\r
2393 \r
2394 \r
2395 \r
2396 /*\r
2397  * Process an RMPP DATA message.  Reassemble the received data.  If the\r
2398  * received MAD is fully reassembled, this call returns CL_SUCCESS.\r
2399  */\r
2400 static cl_status_t\r
2401 __process_rmpp_data(\r
2402         IN                              ib_mad_svc_handle_t                     h_mad_svc,\r
2403         IN      OUT                     ib_mad_element_t                        **pp_mad_element )\r
2404 {\r
2405         ib_mad_element_t        *p_rmpp_resp_mad = NULL;\r
2406         al_mad_rmpp_t           *p_rmpp;\r
2407         ib_rmpp_mad_t           *p_rmpp_hdr;\r
2408         uint32_t                        cur_seg;\r
2409         cl_status_t                     cl_status;\r
2410         ib_api_status_t         status;\r
2411 \r
2412         p_rmpp_hdr = ib_get_mad_buf( *pp_mad_element );\r
2413         CL_ASSERT( p_rmpp_hdr->rmpp_type == IB_RMPP_TYPE_DATA );\r
2414 \r
2415         /* Try to find a receive already being reassembled. */\r
2416         cl_spinlock_acquire( &h_mad_svc->obj.lock );\r
2417         p_rmpp = __find_rmpp( h_mad_svc, *pp_mad_element );\r
2418         if( !p_rmpp )\r
2419         {\r
2420                 /* This receive is not being reassembled. It should be the first seg. */\r
2421                 if( cl_ntoh32( p_rmpp_hdr->seg_num ) != 1 )\r
2422                 {\r
2423                         cl_spinlock_release( &h_mad_svc->obj.lock );\r
2424                         return CL_NOT_FOUND;\r
2425                 }\r
2426 \r
2427                 /* Start tracking the new reassembly. */\r
2428                 p_rmpp = __get_mad_rmpp( h_mad_svc, *pp_mad_element );\r
2429                 if( !p_rmpp )\r
2430                 {\r
2431                         cl_spinlock_release( &h_mad_svc->obj.lock );\r
2432                         return CL_INSUFFICIENT_MEMORY;\r
2433                 }\r
2434         }\r
2435 \r
2436         /* Verify that we just received the expected segment. */\r
2437         cur_seg = cl_ntoh32( p_rmpp_hdr->seg_num );\r
2438         if( cur_seg == p_rmpp->expected_seg )\r
2439         {\r
2440                 /* Copy the new segment's data into our reassembly buffer. */\r
2441                 cl_status = __process_segment( h_mad_svc, p_rmpp,\r
2442                         pp_mad_element, &p_rmpp_resp_mad );\r
2443 \r
2444                 /* See if the RMPP is done. */\r
2445                 if( cl_status == CL_SUCCESS )\r
2446                 {\r
2447                         /* Stop tracking the reassembly. */\r
2448                         __put_mad_rmpp( h_mad_svc, p_rmpp );\r
2449                 }\r
2450                 else if( cl_status == CL_NOT_DONE )\r
2451                 {\r
2452                         /* Start the reassembly timer. */\r
2453                         cl_timer_trim( &h_mad_svc->recv_timer, AL_REASSEMBLY_TIMEOUT );\r
2454                 }\r
2455         }\r
2456         else if( cur_seg < p_rmpp->expected_seg )\r
2457         {\r
2458                 /* We received an old segment.  Resend the last ACK. */\r
2459                 p_rmpp_resp_mad = __get_rmpp_ack( p_rmpp );\r
2460                 cl_status = CL_DUPLICATE;\r
2461         }\r
2462         else\r
2463         {\r
2464                 /* The sender is confused, ignore this MAD.  We could ABORT here. */\r
2465                 cl_status = CL_OVERRUN;\r
2466         }\r
2467 \r
2468         cl_spinlock_release( &h_mad_svc->obj.lock );\r
2469 \r
2470         /*\r
2471          * Send any response MAD (ACK, ABORT, etc.) to the sender.  Note that\r
2472          * we are currently in the callback from the MAD dispatcher.  The\r
2473          * dispatcher holds a reference on the MAD service while in the callback,\r
2474          * preventing the MAD service from being destroyed.  This allows the\r
2475          * call to ib_send_mad() to proceed even if the user tries to destroy\r
2476          * the MAD service.\r
2477          */\r
2478         if( p_rmpp_resp_mad )\r
2479         {\r
2480                 status = ib_send_mad( h_mad_svc, p_rmpp_resp_mad, NULL );\r
2481                 if( status != IB_SUCCESS )\r
2482                 {\r
2483                         /* Return the MAD.  The MAD is considered dropped. */\r
2484                         ib_put_mad( p_rmpp_resp_mad );\r
2485                 }\r
2486         }\r
2487 \r
2488         return cl_status;\r
2489 }\r
2490 \r
2491 \r
2492 \r
2493 /*\r
2494  * Locate an existing RMPP MAD being reassembled.  Return NULL if one is not\r
2495  * found.  This call assumes access to the recv_list is synchronized.\r
2496  */\r
2497 static al_mad_rmpp_t*\r
2498 __find_rmpp(\r
2499         IN                              ib_mad_svc_handle_t                     h_mad_svc,\r
2500         IN      OUT                     ib_mad_element_t                        *p_mad_element )\r
2501 {\r
2502         al_mad_rmpp_t                   *p_rmpp;\r
2503         cl_list_item_t                  *p_list_item;\r
2504         ib_mad_t                                *p_mad_hdr, *p_mad_hdr2;\r
2505         ib_mad_element_t                *p_mad_element2;\r
2506 \r
2507 \r
2508         p_mad_hdr = ib_get_mad_buf( p_mad_element );\r
2509 \r
2510         /* Search all MADs being reassembled. */\r
2511         for( p_list_item = cl_qlist_head( &h_mad_svc->recv_list );\r
2512                  p_list_item != cl_qlist_end( &h_mad_svc->recv_list );\r
2513                  p_list_item = cl_qlist_next( p_list_item ) )\r
2514         {\r
2515                 p_rmpp = PARENT_STRUCT( p_list_item, al_mad_rmpp_t, pool_item );\r
2516 \r
2517                 p_mad_element2 = p_rmpp->p_mad_element;\r
2518                 p_mad_hdr2 = ib_get_mad_buf( p_mad_element2 );\r
2519 \r
2520                 /* See if the incoming MAD matches - what a check. */\r
2521                 if( (p_mad_hdr->trans_id                == p_mad_hdr2->trans_id)                &&\r
2522                         (p_mad_hdr->class_ver           == p_mad_hdr2->class_ver)               &&\r
2523                         (p_mad_hdr->mgmt_class          == p_mad_hdr2->mgmt_class)              &&\r
2524                         (p_mad_hdr->method                      == p_mad_hdr2->method)                  &&\r
2525                         (p_mad_element->remote_lid      == p_mad_element2->remote_lid)  &&\r
2526                         (p_mad_element->remote_qp       == p_mad_element2->remote_qp) )\r
2527                 {\r
2528                         return p_rmpp;\r
2529                 }\r
2530         }\r
2531 \r
2532         return NULL;\r
2533 }\r
2534 \r
2535 \r
2536 \r
2537 /*\r
2538  * Acquire a new RMPP tracking structure.  This call assumes access to\r
2539  * the recv_list is synchronized.\r
2540  */\r
2541 static al_mad_rmpp_t*\r
2542 __get_mad_rmpp(\r
2543         IN                              ib_mad_svc_handle_t                     h_mad_svc,\r
2544         IN                              ib_mad_element_t                        *p_mad_element )\r
2545 {\r
2546         al_mad_rmpp_t           *p_rmpp;\r
2547         al_mad_element_t        *p_al_element;\r
2548 \r
2549         p_al_element = PARENT_STRUCT( p_mad_element, al_mad_element_t, element );\r
2550 \r
2551         /* Get an RMPP tracking structure. */\r
2552         p_rmpp = get_mad_rmpp( p_al_element );\r
2553         if( !p_rmpp )\r
2554                 return NULL;\r
2555 \r
2556         /* Initialize the tracking information. */\r
2557         p_rmpp->expected_seg = 1;\r
2558         p_rmpp->seg_limit = 1;\r
2559         p_rmpp->inactive = FALSE;\r
2560         p_rmpp->p_mad_element = p_mad_element;\r
2561 \r
2562         /* Insert the tracking structure into the reassembly list. */\r
2563         cl_qlist_insert_tail( &h_mad_svc->recv_list,\r
2564                 (cl_list_item_t*)&p_rmpp->pool_item );\r
2565 \r
2566         return p_rmpp;\r
2567 }\r
2568 \r
2569 \r
2570 \r
2571 /*\r
2572  * Return the RMPP tracking structure.  This call assumes access to\r
2573  * the recv_list is synchronized.\r
2574  */\r
2575 static void\r
2576 __put_mad_rmpp(\r
2577         IN                              ib_mad_svc_handle_t                     h_mad_svc,\r
2578         IN                              al_mad_rmpp_t                           *p_rmpp )\r
2579 {\r
2580         /* Remove the tracking structure from the reassembly list. */\r
2581         cl_qlist_remove_item( &h_mad_svc->recv_list,\r
2582                 (cl_list_item_t*)&p_rmpp->pool_item );\r
2583 \r
2584         /* Return the RMPP tracking structure. */\r
2585         put_mad_rmpp( p_rmpp );\r
2586 }\r
2587 \r
2588 \r
2589 \r
2590 /*\r
2591  * Process a received RMPP segment.  Copy the data into our receive buffer,\r
2592  * update the expected segment, and send an ACK if needed.\r
2593  */\r
2594 static cl_status_t\r
2595 __process_segment(\r
2596         IN                              ib_mad_svc_handle_t                     h_mad_svc,\r
2597         IN                              al_mad_rmpp_t                           *p_rmpp,\r
2598         IN      OUT                     ib_mad_element_t                        **pp_mad_element,\r
2599                 OUT                     ib_mad_element_t                        **pp_rmpp_resp_mad )\r
2600 {\r
2601         ib_rmpp_mad_t                   *p_rmpp_hdr;\r
2602         uint32_t                                cur_seg;\r
2603         ib_api_status_t                 status;\r
2604         cl_status_t                             cl_status;\r
2605         uint8_t                                 *p_dst_seg, *p_src_seg;\r
2606         uint32_t                                paylen;\r
2607 \r
2608         CL_ASSERT( h_mad_svc && p_rmpp && pp_mad_element && *pp_mad_element );\r
2609 \r
2610         p_rmpp_hdr = (ib_rmpp_mad_t*)(*pp_mad_element)->p_mad_buf;\r
2611         cur_seg = cl_ntoh32( p_rmpp_hdr->seg_num );\r
2612         CL_ASSERT( cur_seg == p_rmpp->expected_seg );\r
2613         CL_ASSERT( cur_seg <= p_rmpp->seg_limit );\r
2614 \r
2615         /* See if the receive has been fully reassembled. */\r
2616         if( ib_rmpp_is_flag_set( p_rmpp_hdr, IB_RMPP_FLAG_LAST ) )\r
2617                 cl_status = CL_SUCCESS;\r
2618         else\r
2619                 cl_status = CL_NOT_DONE;\r
2620         \r
2621         /* Save the payload length for later use. */\r
2622         paylen = cl_ntoh32(p_rmpp_hdr->paylen_newwin);\r
2623 \r
2624         /* The element of the first segment starts the reasembly. */\r
2625         if( *pp_mad_element != p_rmpp->p_mad_element )\r
2626         {\r
2627                 /* SA MADs require extra header size ... */\r
2628                 if( (*pp_mad_element)->p_mad_buf->mgmt_class == IB_MCLASS_SUBN_ADM )\r
2629                 {\r
2630                         /* Copy the received data into our reassembly buffer. */\r
2631                         p_src_seg = ((uint8_t* __ptr64)(*pp_mad_element)->p_mad_buf) +\r
2632                                 IB_SA_MAD_HDR_SIZE;\r
2633                         p_dst_seg = ((uint8_t* __ptr64)p_rmpp->p_mad_element->p_mad_buf) +\r
2634                                 IB_SA_MAD_HDR_SIZE + IB_SA_DATA_SIZE * (cur_seg - 1);\r
2635                         cl_memcpy( p_dst_seg, p_src_seg, IB_SA_DATA_SIZE );\r
2636                 }\r
2637                 else \r
2638                 {\r
2639                         /* Copy the received data into our reassembly buffer. */\r
2640                         p_src_seg = ((uint8_t* __ptr64)(*pp_mad_element)->p_mad_buf) +\r
2641                                 MAD_RMPP_HDR_SIZE;\r
2642                         p_dst_seg = ((uint8_t* __ptr64)p_rmpp->p_mad_element->p_mad_buf) +\r
2643                                 MAD_RMPP_HDR_SIZE + MAD_RMPP_DATA_SIZE * (cur_seg - 1);\r
2644                         cl_memcpy( p_dst_seg, p_src_seg, MAD_RMPP_DATA_SIZE );\r
2645                 }\r
2646                 /* This MAD is no longer needed. */\r
2647                 ib_put_mad( *pp_mad_element );\r
2648         }\r
2649 \r
2650         /* Update the size of the mad if the last segment */\r
2651         if ( cl_status == CL_SUCCESS )\r
2652         {\r
2653                 if (p_rmpp->p_mad_element->p_mad_buf->mgmt_class == IB_MCLASS_SUBN_ADM )\r
2654                 {\r
2655                         /*\r
2656                          * Note we will get one extra SA Hdr size in the paylen, \r
2657                          * so we only take the rmpp header size of the first segment.\r
2658                          */\r
2659                         p_rmpp->p_mad_element->size = \r
2660                                 MAD_RMPP_HDR_SIZE + IB_SA_DATA_SIZE *(cur_seg - 1) + paylen;\r
2661                 }\r
2662                 else\r
2663                 {\r
2664                          p_rmpp->p_mad_element->size = \r
2665                                 MAD_RMPP_HDR_SIZE + MAD_RMPP_DATA_SIZE * (cur_seg - 1) + paylen;\r
2666                 }\r
2667         }\r
2668 \r
2669         /*\r
2670          * We are ready to accept the next segment.  We increment expected segment\r
2671          * even if we're done, so that ACKs correctly report the last segment.\r
2672          */\r
2673         p_rmpp->expected_seg++;\r
2674 \r
2675         /* Mark the RMPP as active if we're not destroying the MAD service. */\r
2676         p_rmpp->inactive = (h_mad_svc->obj.state == CL_DESTROYING);\r
2677 \r
2678         /* See if the receive has been fully reassembled. */\r
2679         if( cl_status == CL_NOT_DONE && cur_seg == p_rmpp->seg_limit )\r
2680         {\r
2681                 /* Allocate more segments for the incoming receive. */\r
2682                 status = al_resize_mad( p_rmpp->p_mad_element,\r
2683                         p_rmpp->p_mad_element->size + AL_RMPP_WINDOW * MAD_RMPP_DATA_SIZE );\r
2684 \r
2685                 /* If we couldn't allocate a new buffer, just drop the MAD. */\r
2686                 if( status == IB_SUCCESS )\r
2687                 {\r
2688                         /* Send an ACK indicating that more space is available. */\r
2689                         p_rmpp->seg_limit += AL_RMPP_WINDOW;\r
2690                         *pp_rmpp_resp_mad = __get_rmpp_ack( p_rmpp );\r
2691                 }\r
2692         }\r
2693         else if( cl_status == CL_SUCCESS )\r
2694         {\r
2695                 /* Return the element referencing the reassembled MAD. */\r
2696                 *pp_mad_element = p_rmpp->p_mad_element;\r
2697                 *pp_rmpp_resp_mad = __get_rmpp_ack( p_rmpp );\r
2698         }\r
2699 \r
2700         return cl_status;\r
2701 }\r
2702 \r
2703 \r
2704 \r
2705 /*\r
2706  * Get an ACK message to return to the sender of an RMPP MAD.\r
2707  */\r
2708 static ib_mad_element_t*\r
2709 __get_rmpp_ack(\r
2710         IN                              al_mad_rmpp_t                           *p_rmpp )\r
2711 {\r
2712         ib_mad_element_t                *p_mad_element;\r
2713         al_mad_element_t                *p_al_element;\r
2714         ib_api_status_t                 status;\r
2715         ib_rmpp_mad_t                   *p_ack_rmpp_hdr, *p_data_rmpp_hdr;\r
2716 \r
2717         /* Get a MAD to carry the ACK. */\r
2718         p_al_element = PARENT_STRUCT( p_rmpp->p_mad_element,\r
2719                 al_mad_element_t, element );\r
2720         status = ib_get_mad( p_al_element->pool_key, MAD_BLOCK_SIZE,\r
2721                 &p_mad_element );\r
2722         if( status != IB_SUCCESS )\r
2723         {\r
2724                 /* Just return.  The ACK will be treated as being dropped. */\r
2725                 return NULL;\r
2726         }\r
2727 \r
2728         /* Format the ACK. */\r
2729         p_ack_rmpp_hdr = ib_get_mad_buf( p_mad_element );\r
2730         p_data_rmpp_hdr = ib_get_mad_buf( p_rmpp->p_mad_element );\r
2731 \r
2732         __init_reply_element( p_mad_element, p_rmpp->p_mad_element );\r
2733 \r
2734         /* Copy the MAD common header. */\r
2735         cl_memcpy( &p_ack_rmpp_hdr->common_hdr, &p_data_rmpp_hdr->common_hdr,\r
2736                 sizeof( ib_mad_t ) );\r
2737 \r
2738         /* Flip the response bit in the method */\r
2739         p_ack_rmpp_hdr->common_hdr.method ^= IB_MAD_METHOD_RESP_MASK;\r
2740 \r
2741         p_ack_rmpp_hdr->rmpp_version = p_data_rmpp_hdr->rmpp_version;\r
2742         p_ack_rmpp_hdr->rmpp_type = IB_RMPP_TYPE_ACK;\r
2743         ib_rmpp_set_resp_time( p_ack_rmpp_hdr, IB_RMPP_NO_RESP_TIME );\r
2744         p_ack_rmpp_hdr->rmpp_flags |= IB_RMPP_FLAG_ACTIVE;\r
2745         p_ack_rmpp_hdr->rmpp_status = IB_RMPP_STATUS_SUCCESS;\r
2746 \r
2747         p_ack_rmpp_hdr->seg_num = cl_hton32( p_rmpp->expected_seg - 1 );\r
2748 \r
2749         if (p_rmpp->seg_limit == p_rmpp->expected_seg - 1 )\r
2750                 p_ack_rmpp_hdr->paylen_newwin = cl_hton32( 1 + p_rmpp->seg_limit);\r
2751         else\r
2752                 p_ack_rmpp_hdr->paylen_newwin = cl_hton32( p_rmpp->seg_limit );\r
2753 \r
2754         return p_mad_element;\r
2755 }\r
2756 \r
2757 \r
2758 \r
2759 /*\r
2760  * Copy necessary data between MAD elements to allow the destination\r
2761  * element to be sent to the sender of the source element.\r
2762  */\r
2763 static void\r
2764 __init_reply_element(\r
2765         IN                              ib_mad_element_t                        *p_dst_element,\r
2766         IN                              ib_mad_element_t                        *p_src_element )\r
2767 {\r
2768         p_dst_element->remote_qp = p_src_element->remote_qp;\r
2769         p_dst_element->remote_qkey = p_src_element->remote_qkey;\r
2770 \r
2771         if( p_src_element->grh_valid )\r
2772         {\r
2773                 p_dst_element->grh_valid = p_src_element->grh_valid;\r
2774                 cl_memcpy( p_dst_element->p_grh, p_src_element->p_grh,\r
2775                         sizeof( ib_grh_t ) );\r
2776         }\r
2777 \r
2778         p_dst_element->remote_lid = p_src_element->remote_lid;\r
2779         p_dst_element->remote_sl = p_src_element->remote_sl;\r
2780         p_dst_element->pkey_index = p_src_element->pkey_index;\r
2781         p_dst_element->path_bits = p_src_element->path_bits;\r
2782 }\r
2783 \r
2784 \r
2785 \r
2786 /*\r
2787  * Process an RMPP ACK message.  Continue sending addition segments.\r
2788  */\r
2789 static void\r
2790 __process_rmpp_ack(\r
2791         IN                              ib_mad_svc_handle_t                     h_mad_svc,\r
2792         IN                              ib_mad_element_t                        *p_mad_element )\r
2793 {\r
2794         ib_mad_send_handle_t    h_send;\r
2795         ib_rmpp_mad_t                   *p_rmpp_mad;\r
2796         boolean_t                               send_done = FALSE;\r
2797         ib_wc_status_t                  wc_status = IB_WCS_SUCCESS;\r
2798 \r
2799         CL_ENTER( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
2800         p_rmpp_mad = (ib_rmpp_mad_t*)ib_get_mad_buf( p_mad_element );\r
2801 \r
2802         /*\r
2803          * Search for the send.  The send may have timed out, been canceled,\r
2804          * or received a response.\r
2805          */\r
2806         cl_spinlock_acquire( &h_mad_svc->obj.lock );\r
2807         h_send = __mad_svc_match_recv( h_mad_svc, p_mad_element );\r
2808         if( !h_send )\r
2809         {\r
2810                 cl_spinlock_release( &h_mad_svc->obj.lock );\r
2811                 CL_TRACE_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl,\r
2812                         ("ACK cannot find a matching send\n") );\r
2813                 return;\r
2814         }\r
2815 \r
2816         /* Drop old ACKs. */\r
2817         if( cl_ntoh32( p_rmpp_mad->seg_num ) < h_send->ack_seg )\r
2818         {\r
2819                 cl_spinlock_release( &h_mad_svc->obj.lock );\r
2820                 CL_TRACE_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl,\r
2821                         ("old ACK - being dropped\n") );\r
2822                 return;\r
2823         }\r
2824 \r
2825         /* Update the acknowledged segment and segment limit. */\r
2826         h_send->ack_seg = cl_ntoh32( p_rmpp_mad->seg_num );\r
2827 \r
2828         /* Keep seg_limit <= total_seg to simplify checks. */\r
2829         if( cl_ntoh32( p_rmpp_mad->paylen_newwin ) > h_send->total_seg )\r
2830                 h_send->seg_limit = h_send->total_seg;\r
2831         else\r
2832                 h_send->seg_limit = cl_ntoh32( p_rmpp_mad->paylen_newwin );\r
2833 \r
2834         /* Reset the current segment to start resending from the ACK. */\r
2835         h_send->cur_seg = h_send->ack_seg + 1;\r
2836 \r
2837         /* If the send is active, we will finish processing it once it completes. */\r
2838         if( h_send->retry_time == MAX_TIME )\r
2839         {\r
2840                 cl_spinlock_release( &h_mad_svc->obj.lock );\r
2841                 CL_TRACE_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl,\r
2842                         ("ACK processed, waiting for send to complete\n") );\r
2843                 return;\r
2844         }\r
2845 \r
2846         /*\r
2847          * Complete the send if all segments have been ack'ed and no\r
2848          * response is expected.  (If the response for a send had already been\r
2849          * received, we would have reported the completion regardless of the\r
2850          * send having been ack'ed.)\r
2851          */\r
2852         CL_ASSERT( !h_send->p_send_mad->resp_expected || !h_send->p_resp_mad );\r
2853         if( (h_send->ack_seg == h_send->total_seg) &&\r
2854                 !h_send->p_send_mad->resp_expected )\r
2855         {\r
2856                 /* The send is done.  All segments have been ack'ed. */\r
2857                 send_done = TRUE;\r
2858         }\r
2859         else if( h_send->ack_seg < h_send->seg_limit )\r
2860         {\r
2861                 /* Send the next segment. */\r
2862                 __queue_rmpp_seg( h_mad_svc->h_mad_reg, h_send );\r
2863         }\r
2864 \r
2865         if( send_done )\r
2866         {\r
2867                 /* Notify the user of a send completion or error. */\r
2868                 cl_qlist_remove_item( &h_mad_svc->send_list,\r
2869                         (cl_list_item_t*)&h_send->pool_item );\r
2870                 cl_spinlock_release( &h_mad_svc->obj.lock );\r
2871                 __notify_send_comp( h_mad_svc, h_send, wc_status );\r
2872         }\r
2873         else\r
2874         {\r
2875                 /* Continue waiting for a response or a larger send window. */\r
2876                 cl_spinlock_release( &h_mad_svc->obj.lock );\r
2877         }\r
2878 \r
2879         /*\r
2880          * Resume any sends that can now be sent without holding\r
2881          * the mad service lock.\r
2882          */\r
2883         __mad_disp_resume_send( h_mad_svc->h_mad_reg );\r
2884 \r
2885         CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
2886 }\r
2887 \r
2888 \r
2889 \r
2890 /*\r
2891  * Process an RMPP STOP or ABORT message.\r
2892  */\r
2893 static void\r
2894 __process_rmpp_nack(\r
2895         IN                              ib_mad_svc_handle_t                     h_mad_svc,\r
2896         IN                              ib_mad_element_t                        *p_mad_element )\r
2897 {\r
2898         ib_mad_send_handle_t    h_send;\r
2899         ib_rmpp_mad_t                   *p_rmpp_mad;\r
2900 \r
2901         CL_ENTER( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
2902         p_rmpp_mad = (ib_rmpp_mad_t*)ib_get_mad_buf( p_mad_element );\r
2903 \r
2904         /* Search for the send.  The send may have timed out or been canceled. */\r
2905         cl_spinlock_acquire( &h_mad_svc->obj.lock );\r
2906         h_send = __mad_svc_match_recv( h_mad_svc, p_mad_element );\r
2907         if( !h_send )\r
2908         {\r
2909                 cl_spinlock_release( &h_mad_svc->obj.lock );\r
2910                 return;\r
2911         }\r
2912 \r
2913         /* If the send is active, we will finish processing it once it completes. */\r
2914         if( h_send->retry_time == MAX_TIME )\r
2915         {\r
2916                 h_send->canceled = TRUE;\r
2917                 cl_spinlock_release( &h_mad_svc->obj.lock );\r
2918                 CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
2919                 return;\r
2920         }\r
2921 \r
2922         /* Fail the send operation. */\r
2923         cl_qlist_remove_item( &h_mad_svc->send_list,\r
2924                 (cl_list_item_t*)&h_send->pool_item );\r
2925         cl_spinlock_release( &h_mad_svc->obj.lock );\r
2926         __notify_send_comp( h_mad_svc, h_send, IB_WCS_CANCELED );\r
2927 \r
2928         CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
2929 }\r
2930 \r
2931 \r
2932 \r
2933 static __inline void\r
2934 __set_retry_time(\r
2935         IN                              ib_mad_send_handle_t            h_send )\r
2936 {\r
2937         h_send->retry_time =\r
2938                 (uint64_t)(h_send->p_send_mad->timeout_ms + h_send->delay) * 1000ULL +\r
2939                 cl_get_time_stamp();\r
2940         h_send->delay = 0;\r
2941 }\r
2942 \r
2943 \r
2944 \r
2945 static void\r
2946 __send_timer_cb(\r
2947         IN                              void                                            *context )\r
2948 {\r
2949         CL_ENTER( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
2950 \r
2951         __check_send_queue( (ib_mad_svc_handle_t)context );\r
2952 \r
2953         CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
2954 }\r
2955 \r
2956 \r
2957 \r
2958 /*\r
2959  * Check the send queue for any sends that have timed out or were canceled\r
2960  * by the user.\r
2961  */\r
2962 static void\r
2963 __check_send_queue(\r
2964         IN                              ib_mad_svc_handle_t                     h_mad_svc )\r
2965 {\r
2966         ib_mad_send_handle_t    h_send;\r
2967         cl_list_item_t                  *p_list_item, *p_next_item;\r
2968         uint64_t                                cur_time;\r
2969         cl_qlist_t                              timeout_list;\r
2970 \r
2971         CL_ENTER( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
2972 \r
2973         /*\r
2974          * The timeout out list is used to call the user back without\r
2975          * holding the lock on the MAD service.\r
2976          */\r
2977         cl_qlist_init( &timeout_list );\r
2978         cur_time = cl_get_time_stamp();\r
2979 \r
2980         cl_spinlock_acquire( &h_mad_svc->obj.lock );\r
2981 \r
2982         /* Check all outstanding sends. */\r
2983         for( p_list_item = cl_qlist_head( &h_mad_svc->send_list );\r
2984                  p_list_item != cl_qlist_end( &h_mad_svc->send_list );\r
2985                  p_list_item = p_next_item )\r
2986         {\r
2987                 p_next_item = cl_qlist_next( p_list_item );\r
2988                 h_send = PARENT_STRUCT( p_list_item, al_mad_send_t, pool_item );\r
2989 \r
2990                 /* See if the request is active. */\r
2991                 if( h_send->retry_time == MAX_TIME )\r
2992                 {\r
2993                         /* The request is still active. */\r
2994                         CL_TRACE( AL_DBG_MAD_SVC, g_al_dbg_lvl, ("active TID:0x%"PRIx64"\n",\r
2995                                 __get_send_tid( h_send )) );\r
2996                         continue;\r
2997                 }\r
2998 \r
2999                 /* The request is not active. */\r
3000                 /* See if the request has been canceled. */\r
3001                 if( h_send->canceled )\r
3002                 {\r
3003                         /* The request has been canceled. */\r
3004                         CL_TRACE( AL_DBG_MAD_SVC, g_al_dbg_lvl, ("canceling TID:0x%"PRIx64"\n",\r
3005                                 __get_send_tid( h_send )) );\r
3006 \r
3007                         h_send->p_send_mad->status = IB_WCS_CANCELED;\r
3008                         cl_qlist_remove_item( &h_mad_svc->send_list, p_list_item );\r
3009                         cl_qlist_insert_tail( &timeout_list, p_list_item );\r
3010                         continue;\r
3011                 }\r
3012 \r
3013                 /* Skip requests that have not timed out. */\r
3014                 if( cur_time < h_send->retry_time )\r
3015                 {\r
3016                         /* The request has not timed out. */\r
3017                         CL_TRACE( AL_DBG_MAD_SVC, g_al_dbg_lvl, ("waiting on TID:0x%"PRIx64"\n",\r
3018                                 __get_send_tid( h_send )) );\r
3019 \r
3020                         /* Set the retry timer to the minimum needed time, in ms. */\r
3021                         cl_timer_trim( &h_mad_svc->send_timer,\r
3022                                 ((uint32_t)(h_send->retry_time - cur_time) / 1000) );\r
3023                         continue;\r
3024                 }\r
3025 \r
3026                 /* See if we need to retry the send operation. */\r
3027                 if( h_send->retry_cnt )\r
3028                 {\r
3029                         CL_TRACE( AL_DBG_MAD_SVC, g_al_dbg_lvl, ("retrying TID:0x%"PRIx64"\n",\r
3030                                 __get_send_tid( h_send )) );\r
3031 \r
3032                         /* Retry the send. */\r
3033                         h_send->retry_time = MAX_TIME;\r
3034                         h_send->retry_cnt--;\r
3035 \r
3036                         if( h_send->uses_rmpp )\r
3037                         {\r
3038                                 if( h_send->ack_seg < h_send->seg_limit )\r
3039                                 {\r
3040                                         /* Resend all unacknowledged segments. */\r
3041                                         h_send->cur_seg = h_send->ack_seg + 1;\r
3042                                         __queue_rmpp_seg( h_mad_svc->h_mad_reg, h_send );\r
3043                                 }\r
3044                                 else\r
3045                                 {\r
3046                                         /* The send was delivered.  Continue waiting. */\r
3047                                         __set_retry_time( h_send );\r
3048                                         cl_timer_trim( &h_mad_svc->send_timer,\r
3049                                                 ((uint32_t)(h_send->retry_time - cur_time) / 1000) );\r
3050                                 }\r
3051                         }\r
3052                         else\r
3053                         {\r
3054                                 /* The work request should already be formatted properly. */\r
3055                                 __mad_disp_queue_send( h_mad_svc->h_mad_reg,\r
3056                                         &h_send->mad_wr );\r
3057                         }\r
3058                         continue;\r
3059                 }\r
3060                 /* The request has timed out or failed to be retried. */\r
3061                 AL_TRACE( AL_DBG_MAD_SVC | AL_DBG_WARN,\r
3062                         ("timing out TID:0x%"PRIx64"\n", __get_send_tid( h_send )) );\r
3063 \r
3064                 h_send->p_send_mad->status = IB_WCS_TIMEOUT_RETRY_ERR;\r
3065                 cl_qlist_remove_item( &h_mad_svc->send_list, p_list_item );\r
3066                 cl_qlist_insert_tail( &timeout_list, p_list_item );\r
3067         }\r
3068 \r
3069         cl_spinlock_release( &h_mad_svc->obj.lock );\r
3070 \r
3071         /*\r
3072          * Resume any sends that can now be sent without holding\r
3073          * the mad service lock.\r
3074          */\r
3075         __mad_disp_resume_send( h_mad_svc->h_mad_reg );\r
3076 \r
3077         /* Report all timed out sends to the user. */\r
3078         p_list_item = cl_qlist_remove_head( &timeout_list );\r
3079         while( p_list_item != cl_qlist_end( &timeout_list ) )\r
3080         {\r
3081                 h_send = PARENT_STRUCT( p_list_item, al_mad_send_t, pool_item );\r
3082 \r
3083                 h_mad_svc->pfn_user_send_cb( h_mad_svc, (void*)h_mad_svc->obj.context,\r
3084                         h_send->p_send_mad );\r
3085                 __cleanup_mad_send( h_mad_svc, h_send );\r
3086                 p_list_item = cl_qlist_remove_head( &timeout_list );\r
3087         }\r
3088         CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
3089 }\r
3090 \r
3091 \r
3092 \r
3093 static void\r
3094 __recv_timer_cb(\r
3095         IN                              void                                            *context )\r
3096 {\r
3097         ib_mad_svc_handle_t             h_mad_svc;\r
3098         al_mad_rmpp_t                   *p_rmpp;\r
3099         cl_list_item_t                  *p_list_item, *p_next_item;\r
3100         boolean_t                               restart_timer;\r
3101 \r
3102         CL_ENTER( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
3103 \r
3104         h_mad_svc = (ib_mad_svc_handle_t)context;\r
3105 \r
3106         cl_spinlock_acquire( &h_mad_svc->obj.lock );\r
3107 \r
3108         /* Check all outstanding receives. */\r
3109         for( p_list_item = cl_qlist_head( &h_mad_svc->recv_list );\r
3110                  p_list_item != cl_qlist_end( &h_mad_svc->recv_list );\r
3111                  p_list_item = p_next_item )\r
3112         {\r
3113                 p_next_item = cl_qlist_next( p_list_item );\r
3114                 p_rmpp = PARENT_STRUCT( p_list_item, al_mad_rmpp_t, pool_item );\r
3115 \r
3116                 /* Fail all RMPP MADs that have remained inactive. */\r
3117                 if( p_rmpp->inactive )\r
3118                 {\r
3119                         ib_put_mad( p_rmpp->p_mad_element );\r
3120                         __put_mad_rmpp( h_mad_svc, p_rmpp );\r
3121                 }\r
3122                 else\r
3123                 {\r
3124                         /* Mark the RMPP as inactive. */\r
3125                          p_rmpp->inactive = TRUE;\r
3126                 }\r
3127         }\r
3128 \r
3129         restart_timer = !cl_is_qlist_empty( &h_mad_svc->recv_list );\r
3130         cl_spinlock_release( &h_mad_svc->obj.lock );\r
3131 \r
3132         if( restart_timer )\r
3133                 cl_timer_start( &h_mad_svc->recv_timer, AL_REASSEMBLY_TIMEOUT );\r
3134         CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
3135 }\r
3136 \r
3137 \r
3138 \r
3139 ib_api_status_t\r
3140 ib_local_mad(\r
3141         IN              const   ib_ca_handle_t                          h_ca,\r
3142         IN              const   uint8_t                                         port_num,\r
3143         IN              const   void* const                                     p_mad_in,\r
3144         IN                              void*                                           p_mad_out )\r
3145 {\r
3146         ib_api_status_t                 status;\r
3147 \r
3148         CL_ENTER( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
3149 \r
3150         if( AL_OBJ_INVALID_HANDLE( h_ca, AL_OBJ_TYPE_H_CA ) )\r
3151         {\r
3152                 CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl, ("IB_INVALID_CA_HANDLE\n") );\r
3153                 return IB_INVALID_CA_HANDLE;\r
3154         }\r
3155         if( !p_mad_in || !p_mad_out )\r
3156         {\r
3157                 CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl, ("IB_INVALID_PARAMETER\n") );\r
3158                 return IB_INVALID_PARAMETER;\r
3159         }\r
3160 \r
3161         status = verbs_local_mad( h_ca, port_num, p_mad_in, p_mad_out );\r
3162 \r
3163         CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
3164         return status;\r
3165 }\r
3166 \r
3167 \r
3168 \r
3169 ib_net32_t\r
3170 al_get_user_tid(\r
3171         IN              const   ib_net64_t                                              tid64 )\r
3172 {\r
3173         al_tid_t                al_tid;\r
3174 \r
3175         al_tid.tid64 = tid64;\r
3176         return( al_tid.tid32.user_tid );\r
3177 }\r
3178 \r
3179 uint32_t\r
3180 al_get_al_tid(\r
3181         IN              const   ib_net64_t                                              tid64 )\r
3182 {\r
3183         al_tid_t                al_tid;\r
3184 \r
3185         al_tid.tid64 = tid64;\r
3186         return( cl_ntoh32( al_tid.tid32.al_tid ) );\r
3187 }\r
3188 \r
3189 void\r
3190 al_set_al_tid(\r
3191         IN                              ib_net64_t*             const                   p_tid64,\r
3192         IN              const   uint32_t                                                tid32 )\r
3193 {\r
3194         al_tid_t                *p_al_tid;\r
3195 \r
3196         p_al_tid = (al_tid_t*)p_tid64;\r
3197 \r
3198         if( tid32 )\r
3199         {\r
3200                 CL_ASSERT( !p_al_tid->tid32.al_tid );\r
3201         }\r
3202 \r
3203         p_al_tid->tid32.al_tid = cl_hton32( tid32 );\r
3204 }\r