[IBAL] Fix crash when creating a MAD service with duplicate
[mirror/winof/.git] / core / al / al_mad.c
1 /*\r
2  * Copyright (c) 2005 SilverStorm Technologies.  All rights reserved.\r
3  * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. \r
4  *\r
5  * This software is available to you under the OpenIB.org BSD license\r
6  * below:\r
7  *\r
8  *     Redistribution and use in source and binary forms, with or\r
9  *     without modification, are permitted provided that the following\r
10  *     conditions are met:\r
11  *\r
12  *      - Redistributions of source code must retain the above\r
13  *        copyright notice, this list of conditions and the following\r
14  *        disclaimer.\r
15  *\r
16  *      - Redistributions in binary form must reproduce the above\r
17  *        copyright notice, this list of conditions and the following\r
18  *        disclaimer in the documentation and/or other materials\r
19  *        provided with the distribution.\r
20  *\r
21  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
22  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
23  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
24  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
25  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
26  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
27  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
28  * SOFTWARE.\r
29  *\r
30  * $Id$\r
31  */\r
32 \r
33 #include <iba/ib_al.h>\r
34 #include <complib/cl_byteswap.h>\r
35 #include <complib/cl_timer.h>\r
36 \r
37 #include "al.h"\r
38 #include "al_debug.h"\r
39 #include "al_cq.h"\r
40 #include "al_mad.h"\r
41 #include "al_qp.h"\r
42 #include "al_res_mgr.h"\r
43 #include "al_verbs.h"\r
44 \r
45 #include "ib_common.h"\r
46 \r
47 \r
48 #define MAX_TIME                                CL_CONST64(0xFFFFFFFFFFFFFFFF)\r
49 #define MAD_VECTOR_SIZE                 8\r
50 #define MAX_METHOD                              127\r
51 #define DEFAULT_RMPP_VERSION    1\r
52 \r
53 #define AL_RMPP_WINDOW                  16                              /* Max size of RMPP window */\r
54 #define AL_REASSEMBLY_TIMEOUT   5000                    /* 5 seconds */\r
55 \r
56 static void\r
57 __cleanup_mad_disp(\r
58         IN                              al_obj_t                                        *p_obj );\r
59 \r
60 static void\r
61 __free_mad_disp(\r
62         IN                              al_obj_t                                        *p_obj );\r
63 \r
64 static cl_status_t\r
65 __init_mad_reg(\r
66         IN                              void* const                                     p_element,\r
67         IN                              void*                                           context );\r
68 \r
69 static cl_status_t\r
70 __init_version_entry(\r
71         IN                              void* const                                     p_element,\r
72         IN                              void*                                           context );\r
73 \r
74 static void\r
75 __destroy_version_entry(\r
76         IN                              void* const                                     p_element,\r
77         IN                              void*                                           context );\r
78 \r
79 static cl_status_t\r
80 __init_class_entry(\r
81         IN                              void* const                                     p_element,\r
82         IN                              void*                                           context );\r
83 \r
84 static void\r
85 __destroy_class_entry(\r
86         IN                              void* const                                     p_element,\r
87         IN                              void*                                           context );\r
88 \r
89 static __inline uint8_t\r
90 __mgmt_class_index(\r
91         IN              const   uint8_t                                         mgmt_class );\r
92 \r
93 static __inline uint8_t\r
94 __mgmt_version_index(\r
95         IN              const   uint8_t                                         mgmt_version );\r
96 \r
97 static boolean_t\r
98 __mad_disp_reg_unsol(\r
99         IN              const   al_mad_disp_handle_t            h_mad_disp,\r
100         IN              const   al_mad_reg_handle_t                     h_mad_reg,\r
101         IN              const   ib_mad_svc_t                            *p_mad_svc );\r
102 \r
103 static boolean_t\r
104 __use_tid_routing(\r
105         IN              const   ib_mad_t* const                         p_mad_hdr,\r
106         IN              const   boolean_t                                       are_we_sender );\r
107 \r
108 /*\r
109  * Issue a send request to the MAD dispatcher.\r
110  */\r
111 static void\r
112 __mad_disp_queue_send(\r
113         IN              const   al_mad_reg_handle_t                     h_mad_reg,\r
114         IN                              al_mad_wr_t* const                      p_mad_wr );\r
115 \r
116 static inline void\r
117 __mad_disp_resume_send(\r
118         IN              const   al_mad_reg_handle_t                     h_mad_reg );\r
119 \r
120 static void\r
121 __destroying_mad_svc(\r
122         IN                              struct _al_obj                          *p_obj );\r
123 \r
124 static void\r
125 __cleanup_mad_svc(\r
126         IN                              struct _al_obj                          *p_obj );\r
127 \r
128 static void\r
129 __send_timer_cb(\r
130         IN                              void                                            *context );\r
131 \r
132 static void\r
133 __check_send_queue(\r
134         IN                              ib_mad_svc_handle_t                     h_mad_svc );\r
135 \r
136 static void\r
137 __recv_timer_cb(\r
138         IN                              void                                            *context );\r
139 \r
140 static ib_api_status_t\r
141 __init_send_mad(\r
142         IN                              ib_mad_svc_handle_t                     h_mad_svc,\r
143         IN              const   ib_mad_send_handle_t            h_send,\r
144         IN                              ib_mad_element_t* const         p_mad_element );\r
145 \r
146 static boolean_t\r
147 __does_send_req_rmpp(\r
148         IN              const   ib_mad_svc_type_t                       mad_svc_type,\r
149         IN              const   ib_mad_element_t* const         p_mad_element,\r
150                 OUT                     uint8_t                                         *p_rmpp_version );\r
151 \r
152 static void\r
153 __queue_mad_wr(\r
154         IN              const   al_mad_reg_handle_t                     h_mad_reg,\r
155         IN              const   ib_mad_send_handle_t            h_send );\r
156 \r
157 static void\r
158 __queue_rmpp_seg(\r
159         IN              const   al_mad_reg_handle_t                     h_mad_reg,\r
160         IN                              ib_mad_send_handle_t            h_send );\r
161 \r
162 static ib_api_status_t\r
163 __create_send_av(\r
164         IN                              ib_mad_svc_handle_t                     h_mad_svc,\r
165         IN                              ib_mad_send_handle_t            h_send );\r
166 \r
167 static void\r
168 __cleanup_mad_send(\r
169         IN                              ib_mad_svc_handle_t                     h_mad_svc,\r
170         IN                              ib_mad_send_handle_t            h_send );\r
171 \r
172 static __inline void\r
173 __set_retry_time(\r
174         IN                              ib_mad_send_handle_t            h_send );\r
175 \r
176 static void\r
177 __mad_svc_send_done(\r
178         IN                              ib_mad_svc_handle_t                     h_mad_svc,\r
179         IN                              al_mad_wr_t                                     *p_mad_wr,\r
180         IN                              ib_wc_t                                         *p_wc );\r
181 \r
182 static boolean_t\r
183 __is_send_mad_done(\r
184         IN                              ib_mad_send_handle_t            h_send,\r
185         IN                              ib_wc_t                                         *p_wc );\r
186 \r
187 static void\r
188 __notify_send_comp(\r
189         IN                              ib_mad_svc_handle_t                     h_mad_svc,\r
190         IN                              ib_mad_send_handle_t            h_send,\r
191         IN                              ib_wc_status_t                          wc_status );\r
192 \r
193 static void\r
194 __mad_svc_recv_done(\r
195         IN                              ib_mad_svc_handle_t                     h_mad_svc,\r
196         IN                              ib_mad_element_t                        *p_mad_element );\r
197 \r
198 static void\r
199 __process_recv_resp(\r
200         IN                              ib_mad_svc_handle_t                     h_mad_svc,\r
201         IN                              ib_mad_element_t                        *p_mad_element );\r
202 \r
203 static cl_status_t\r
204 __do_rmpp_recv(\r
205         IN                              ib_mad_svc_handle_t                     h_mad_svc,\r
206         IN      OUT                     ib_mad_element_t                        **pp_mad_element );\r
207 \r
208 static __inline boolean_t\r
209 __recv_requires_rmpp(\r
210         IN              const   ib_mad_svc_type_t                       mad_svc_type,\r
211         IN              const   ib_mad_element_t* const         p_mad_element );\r
212 \r
213 static __inline boolean_t\r
214 __is_internal_send(\r
215         IN              const   ib_mad_svc_type_t                       mad_svc_type,\r
216         IN              const   ib_mad_element_t* const         p_mad_element );\r
217 \r
218 static cl_status_t\r
219 __process_rmpp_data(\r
220         IN                              ib_mad_svc_handle_t                     h_mad_svc,\r
221         IN      OUT                     ib_mad_element_t                        **pp_mad_element );\r
222 \r
223 static void\r
224 __process_rmpp_ack(\r
225         IN                              ib_mad_svc_handle_t                     h_mad_svc,\r
226         IN                              ib_mad_element_t                        *p_mad_element );\r
227 \r
228 static void\r
229 __process_rmpp_nack(\r
230         IN                              ib_mad_svc_handle_t                     h_mad_svc,\r
231         IN                              ib_mad_element_t                        *p_mad_element );\r
232 \r
233 static cl_status_t\r
234 __process_segment(\r
235         IN                              ib_mad_svc_handle_t                     h_mad_svc,\r
236         IN                              al_mad_rmpp_t                           *p_rmpp,\r
237         IN      OUT                     ib_mad_element_t                        **pp_mad_element,\r
238                 OUT                     ib_mad_element_t                        **pp_rmpp_resp_mad );\r
239 \r
240 static al_mad_rmpp_t*\r
241 __find_rmpp(\r
242         IN                              ib_mad_svc_handle_t                     h_mad_svc,\r
243         IN      OUT                     ib_mad_element_t                        *p_mad_element );\r
244 \r
245 static al_mad_rmpp_t*\r
246 __get_mad_rmpp(\r
247         IN                              ib_mad_svc_handle_t                     h_mad_svc,\r
248         IN                              ib_mad_element_t                        *p_mad_element );\r
249 \r
250 static void\r
251 __put_mad_rmpp(\r
252         IN                              ib_mad_svc_handle_t                     h_mad_svc,\r
253         IN                              al_mad_rmpp_t                           *p_rmpp );\r
254 \r
255 static void\r
256 __init_reply_element(\r
257         IN                              ib_mad_element_t                        *p_dst_element,\r
258         IN                              ib_mad_element_t                        *p_src_element );\r
259 \r
260 static ib_mad_element_t*\r
261 __get_rmpp_ack(\r
262         IN                              al_mad_rmpp_t                           *p_rmpp );\r
263 \r
264 ib_net64_t\r
265 __get_send_tid(\r
266         IN                              ib_mad_send_handle_t            h_send )\r
267 {\r
268         return ((ib_mad_t*)ib_get_mad_buf( h_send->p_send_mad ))->trans_id;\r
269 }\r
270 \r
271 \r
272 ib_mad_t*\r
273 get_mad_hdr_from_wr(\r
274         IN                              al_mad_wr_t* const                      p_mad_wr )\r
275 {\r
276         ib_mad_send_handle_t    h_send;\r
277 \r
278         CL_ASSERT( p_mad_wr );\r
279 \r
280         h_send = PARENT_STRUCT( p_mad_wr, al_mad_send_t, mad_wr );\r
281         return h_send->p_send_mad->p_mad_buf;\r
282 }\r
283 \r
284 \r
285 \r
286 /*\r
287  * Construct a MAD element from a receive work completion.\r
288  */\r
289 void\r
290 build_mad_recv(\r
291         IN                              ib_mad_element_t*                       p_mad_element,\r
292         IN                              ib_wc_t*                                        p_wc )\r
293 {\r
294         CL_ENTER( AL_DBG_SMI, g_al_dbg_lvl );\r
295 \r
296         CL_ASSERT( p_mad_element );\r
297         CL_ASSERT( p_wc );\r
298 \r
299         /* Build the MAD element from the work completion. */\r
300         p_mad_element->status           = p_wc->status;\r
301         p_mad_element->remote_qp        = p_wc->recv.ud.remote_qp;\r
302 \r
303         /*\r
304          * We assume all communicating managers using MAD services use\r
305          * the same QKEY.\r
306          */\r
307 \r
308         /*\r
309          * Mellanox workaround:\r
310          * The Q_KEY from the QP context must be used if the high bit is\r
311          * set in the Q_KEY part of the work request. See section 10.2.5\r
312          * on Q_KEYS Compliance Statement C10-15.\r
313          * This must be enabled to permit future non special QP's to have\r
314          * MAD level service capability. To use SAR in a generic way.\r
315          */\r
316 \r
317         /*\r
318          * p_mad_element->remote_qkey = IB_QP_PRIVILEGED_Q_KEY;\r
319          */\r
320 \r
321         p_mad_element->remote_qkey      = IB_QP1_WELL_KNOWN_Q_KEY;\r
322         p_mad_element->remote_lid       = p_wc->recv.ud.remote_lid;\r
323         p_mad_element->remote_sl        = p_wc->recv.ud.remote_sl;\r
324         p_mad_element->pkey_index       = p_wc->recv.ud.pkey_index;\r
325         p_mad_element->path_bits        = p_wc->recv.ud.path_bits;\r
326         p_mad_element->recv_opt         = p_wc->recv.ud.recv_opt;\r
327         p_mad_element->grh_valid        = p_wc->recv.ud.recv_opt & IB_RECV_OPT_GRH_VALID;\r
328 \r
329         if( p_wc->recv.ud.recv_opt & IB_RECV_OPT_IMMEDIATE )\r
330                 p_mad_element->immediate_data = p_wc->recv.ud.immediate_data;\r
331 \r
332         CL_EXIT( AL_DBG_SMI, g_al_dbg_lvl );\r
333 }\r
334 \r
335 \r
336 \r
337 /*\r
338  *\r
339  * MAD Dispatcher.\r
340  *\r
341  */\r
342 \r
343 \r
344 ib_api_status_t\r
345 create_mad_disp(\r
346         IN                              al_obj_t* const                         p_parent_obj,\r
347         IN              const   ib_qp_handle_t                          h_qp,\r
348         IN                              al_mad_disp_handle_t* const     ph_mad_disp )\r
349 {\r
350         al_mad_disp_handle_t    h_mad_disp;\r
351         ib_api_status_t                 status;\r
352         cl_status_t                             cl_status;\r
353 \r
354         CL_ENTER( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
355         h_mad_disp = cl_zalloc( sizeof( al_mad_disp_t ) );\r
356         if( !h_mad_disp )\r
357         {\r
358                 CL_TRACE( AL_DBG_ERROR, g_al_dbg_lvl, ("insufficient memory\n") );\r
359                 return IB_INSUFFICIENT_MEMORY;\r
360         }\r
361 \r
362         /* Initialize the MAD dispatcher. */\r
363         cl_vector_construct( &h_mad_disp->client_vector );\r
364         cl_vector_construct( &h_mad_disp->version_vector );\r
365         construct_al_obj( &h_mad_disp->obj, AL_OBJ_TYPE_MAD_DISP );\r
366         status = init_al_obj( &h_mad_disp->obj, h_mad_disp, TRUE,\r
367                 NULL, __cleanup_mad_disp, __free_mad_disp );\r
368         if( status != IB_SUCCESS )\r
369         {\r
370                 CL_TRACE( AL_DBG_ERROR, g_al_dbg_lvl, ("init obj: %s\n",\r
371                         ib_get_err_str(status)) );\r
372                 __free_mad_disp( &h_mad_disp->obj );\r
373                 return status;\r
374         }\r
375         status = attach_al_obj( p_parent_obj, &h_mad_disp->obj );\r
376         if( status != IB_SUCCESS )\r
377         {\r
378                 h_mad_disp->obj.pfn_destroy( &h_mad_disp->obj, NULL );\r
379                 AL_TRACE_EXIT( AL_DBG_ERROR,\r
380                         ("attach_al_obj returned %s.\n", ib_get_err_str(status)) );\r
381                 return status;\r
382         }\r
383 \r
384         /* Obtain a reference to the QP to post sends to. */\r
385         h_mad_disp->h_qp = h_qp;\r
386         ref_al_obj( &h_qp->obj );\r
387 \r
388         /* Create the client vector. */\r
389         cl_status = cl_vector_init( &h_mad_disp->client_vector, 1, MAD_VECTOR_SIZE,\r
390                 sizeof( al_mad_disp_reg_t ), __init_mad_reg, NULL, h_mad_disp );\r
391         if( cl_status != CL_SUCCESS )\r
392         {\r
393                 h_mad_disp->obj.pfn_destroy( &h_mad_disp->obj, NULL );\r
394                 return ib_convert_cl_status( cl_status );\r
395         }\r
396 \r
397         /* Create the version vector. */\r
398         cl_status = cl_vector_init( &h_mad_disp->version_vector,\r
399                 1, 1, sizeof( cl_vector_t ), __init_version_entry,\r
400                 __destroy_version_entry, &h_mad_disp->version_vector );\r
401         if( cl_status != CL_SUCCESS )\r
402         {\r
403                 h_mad_disp->obj.pfn_destroy( &h_mad_disp->obj, NULL );\r
404                 return ib_convert_cl_status( cl_status );\r
405         }\r
406 \r
407         *ph_mad_disp = h_mad_disp;\r
408 \r
409         /* Release the reference taken in init_al_obj. */\r
410         deref_al_obj( &h_mad_disp->obj );\r
411 \r
412         CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
413         return IB_SUCCESS;\r
414 }\r
415 \r
416 \r
417 \r
418 static void\r
419 __cleanup_mad_disp(\r
420         IN                              al_obj_t                                        *p_obj )\r
421 {\r
422         al_mad_disp_handle_t    h_mad_disp;\r
423 \r
424         CL_ENTER( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
425         CL_ASSERT( p_obj );\r
426         h_mad_disp = PARENT_STRUCT( p_obj, al_mad_disp_t, obj );\r
427 \r
428         /* Detach from the QP that we were using. */\r
429         if( h_mad_disp->h_qp )\r
430                 deref_al_obj( &h_mad_disp->h_qp->obj );\r
431 \r
432         CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
433 }\r
434 \r
435 \r
436 \r
437 static void\r
438 __free_mad_disp(\r
439         IN                              al_obj_t                                        *p_obj )\r
440 {\r
441         al_mad_disp_handle_t    h_mad_disp;\r
442 \r
443         CL_ENTER( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
444         CL_ASSERT( p_obj );\r
445         h_mad_disp = PARENT_STRUCT( p_obj, al_mad_disp_t, obj );\r
446 \r
447         cl_vector_destroy( &h_mad_disp->client_vector );\r
448         cl_vector_destroy( &h_mad_disp->version_vector );\r
449         destroy_al_obj( p_obj );\r
450         cl_free( h_mad_disp );\r
451         CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
452 }\r
453 \r
454 \r
455 \r
456 static al_mad_reg_handle_t\r
457 __mad_disp_reg(\r
458         IN              const   al_mad_disp_handle_t            h_mad_disp,\r
459         IN              const   ib_mad_svc_handle_t                     h_mad_svc,\r
460         IN              const   ib_mad_svc_t                            *p_mad_svc,\r
461         IN              const   pfn_mad_svc_send_done_t         pfn_send_done,\r
462         IN              const   pfn_mad_svc_recv_done_t         pfn_recv_done )\r
463 {\r
464         al_mad_reg_handle_t             h_mad_reg;\r
465         size_t                                  i;\r
466         cl_status_t                             cl_status;\r
467 \r
468         CL_ENTER( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
469         cl_spinlock_acquire( &h_mad_disp->obj.lock );\r
470 \r
471         /* Find an empty slot in the client vector for the registration. */\r
472         for( i = 0; i < cl_vector_get_size( &h_mad_disp->client_vector ); i++ )\r
473         {\r
474                 h_mad_reg = cl_vector_get_ptr( &h_mad_disp->client_vector, i );\r
475                 if( !h_mad_reg->ref_cnt )\r
476                         break;\r
477         }\r
478         /* Trap for ClientID overflow. */\r
479         if( i >= 0xFFFFFFFF )\r
480         {\r
481                 cl_spinlock_release( &h_mad_disp->obj.lock );\r
482                 return NULL;\r
483         }\r
484         cl_status = cl_vector_set_min_size( &h_mad_disp->client_vector, i+1 );\r
485         if( cl_status != CL_SUCCESS )\r
486         {\r
487                 cl_spinlock_release( &h_mad_disp->obj.lock );\r
488                 return NULL;\r
489         }\r
490         h_mad_reg = cl_vector_get_ptr( &h_mad_disp->client_vector, i );\r
491 \r
492         /* Record the registration. */\r
493         h_mad_reg->client_id = (uint32_t)i;\r
494         h_mad_reg->support_unsol = p_mad_svc->support_unsol;\r
495         h_mad_reg->mgmt_class = p_mad_svc->mgmt_class;\r
496         h_mad_reg->mgmt_version = p_mad_svc->mgmt_version;\r
497         h_mad_reg->pfn_recv_done = pfn_recv_done;\r
498         h_mad_reg->pfn_send_done = pfn_send_done;\r
499 \r
500         /* If the client requires support for unsolicited MADs, add tracking. */\r
501         if( p_mad_svc->support_unsol )\r
502         {\r
503                 if( !__mad_disp_reg_unsol( h_mad_disp, h_mad_reg, p_mad_svc ) )\r
504                 {\r
505                         cl_spinlock_release( &h_mad_disp->obj.lock );\r
506                         CL_TRACE( AL_DBG_ERROR, g_al_dbg_lvl, ("reg unsol failed\n") );\r
507                         return NULL;\r
508                 }\r
509         }\r
510 \r
511         /* Record that the registration was successful. */\r
512         h_mad_reg->h_mad_svc = h_mad_svc;\r
513         h_mad_reg->ref_cnt = 1;\r
514         cl_spinlock_release( &h_mad_disp->obj.lock );\r
515 \r
516         /* The MAD service needs to take a reference on the dispatcher. */\r
517         ref_al_obj( &h_mad_disp->obj );\r
518 \r
519         CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
520         return h_mad_reg;\r
521 }\r
522 \r
523 \r
524 static cl_status_t\r
525 __init_mad_reg(\r
526         IN                              void* const                                     p_element,\r
527         IN                              void*                                           context )\r
528 {\r
529         al_mad_reg_handle_t                     h_mad_reg;\r
530 \r
531         /* Record the MAD dispatcher for the registration structure. */\r
532         h_mad_reg = p_element;\r
533         h_mad_reg->h_mad_disp = context;\r
534         h_mad_reg->ref_cnt = 0;\r
535 \r
536         return CL_SUCCESS;\r
537 }\r
538 \r
539 \r
540 /*\r
541  * Initialize an entry in the version vector.  Each entry is a vector of\r
542  * classes.\r
543  */\r
544 static cl_status_t\r
545 __init_version_entry(\r
546         IN                              void* const                                     p_element,\r
547         IN                              void*                                           context )\r
548 {\r
549         cl_vector_t             *p_vector;\r
550 \r
551         p_vector = p_element;\r
552         UNUSED_PARAM( context );\r
553 \r
554         cl_vector_construct( p_vector );\r
555         return cl_vector_init( p_vector, MAD_VECTOR_SIZE, MAD_VECTOR_SIZE,\r
556                 sizeof( cl_ptr_vector_t ), __init_class_entry, __destroy_class_entry,\r
557                 p_vector );\r
558 }\r
559 \r
560 \r
561 static void\r
562 __destroy_version_entry(\r
563         IN                              void* const                                     p_element,\r
564         IN                              void*                                           context )\r
565 {\r
566         cl_vector_t             *p_vector;\r
567 \r
568         p_vector = p_element;\r
569         UNUSED_PARAM( context );\r
570 \r
571         cl_vector_destroy( p_vector );\r
572 }\r
573 \r
574 \r
575 /*\r
576  * Initialize an entry in the class vector.  Each entry is a pointer vector\r
577  * of methods.\r
578  */\r
579 static cl_status_t\r
580 __init_class_entry(\r
581         IN                              void* const                                     p_element,\r
582         IN                              void*                                           context )\r
583 {\r
584         cl_ptr_vector_t         *p_ptr_vector;\r
585 \r
586         p_ptr_vector = p_element;\r
587         UNUSED_PARAM( context );\r
588 \r
589         cl_ptr_vector_construct( p_ptr_vector );\r
590         return cl_ptr_vector_init( p_ptr_vector,\r
591                 MAD_VECTOR_SIZE, MAD_VECTOR_SIZE );\r
592 }\r
593 \r
594 \r
595 static void\r
596 __destroy_class_entry(\r
597         IN                              void* const                                     p_element,\r
598         IN                              void*                                           context )\r
599 {\r
600         cl_ptr_vector_t         *p_ptr_vector;\r
601 \r
602         p_ptr_vector = p_element;\r
603         UNUSED_PARAM( context );\r
604 \r
605         cl_ptr_vector_destroy( p_ptr_vector );\r
606 }\r
607 \r
608 \r
609 /*\r
610  * Add support for unsolicited MADs for the given MAD service.\r
611  */\r
612 static boolean_t\r
613 __mad_disp_reg_unsol(\r
614         IN              const   al_mad_disp_handle_t            h_mad_disp,\r
615         IN              const   al_mad_reg_handle_t                     h_mad_reg,\r
616         IN              const   ib_mad_svc_t                            *p_mad_svc )\r
617 {\r
618         cl_status_t                     cl_status;\r
619         cl_vector_t                     *p_class_vector;\r
620         cl_ptr_vector_t         *p_method_ptr_vector;\r
621         uint8_t                         i;\r
622 \r
623         /* Ensure that we are ready to handle this version number. */\r
624         CL_ENTER( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
625         cl_status = cl_vector_set_min_size( &h_mad_disp->version_vector,\r
626                 __mgmt_version_index( p_mad_svc->mgmt_version ) + 1 );\r
627         if( cl_status != CL_SUCCESS )\r
628                 return FALSE;\r
629 \r
630         /* Get the list of classes in use for this version. */\r
631         p_class_vector = cl_vector_get_ptr( &h_mad_disp->version_vector,\r
632                 __mgmt_version_index( p_mad_svc->mgmt_version ) );\r
633 \r
634         /* Ensure that we are ready to handle the specified class. */\r
635         cl_status = cl_vector_set_min_size( p_class_vector,\r
636                 __mgmt_class_index( p_mad_svc->mgmt_class ) + 1 );\r
637         if( cl_status != CL_SUCCESS )\r
638                 return FALSE;\r
639 \r
640         /* Get the list of methods in use for this class. */\r
641         p_method_ptr_vector = cl_vector_get_ptr( p_class_vector,\r
642                 __mgmt_class_index( p_mad_svc->mgmt_class ) );\r
643 \r
644         /* Ensure that we can handle all requested methods. */\r
645         for( i = MAX_METHOD - 1; i > 0; i-- )\r
646         {\r
647                 if( p_mad_svc->method_array[i] )\r
648                 {\r
649                         cl_status = cl_ptr_vector_set_min_size( p_method_ptr_vector, i+1 );\r
650                         if( cl_status != CL_SUCCESS )\r
651                                 return FALSE;\r
652 \r
653                         /* No one else can be registered for this method. */\r
654                         if( cl_ptr_vector_get( p_method_ptr_vector, i ) )\r
655                         {\r
656                                 CL_TRACE(AL_DBG_ERROR, g_al_dbg_lvl, \r
657                                         ("Other client already registered for Un-Solicited Method "\r
658                                         "%u for version %u of class %u.\n", i, p_mad_svc->mgmt_version,\r
659                                         p_mad_svc->mgmt_class ) );\r
660                                 return FALSE;\r
661                         }\r
662                 }\r
663         }\r
664 \r
665         /* We can support the request.  Record the methods. */\r
666         for( i = 0; i < MAX_METHOD; i++ )\r
667         {\r
668                 if( p_mad_svc->method_array[i] )\r
669                 {\r
670                         cl_ptr_vector_set( p_method_ptr_vector, i, h_mad_reg );\r
671 \r
672                         CL_TRACE( AL_DBG_MAD_SVC, g_al_dbg_lvl,\r
673                                 ("Register version:%u (%u) class:0x%02X(%u) method:0x%02X Hdl:%p\n",\r
674                                 p_mad_svc->mgmt_version,\r
675                                 __mgmt_version_index( p_mad_svc->mgmt_version ),\r
676                                 p_mad_svc->mgmt_class,\r
677                                 __mgmt_class_index( p_mad_svc->mgmt_class ),\r
678                                 i,\r
679                                 h_mad_reg) );\r
680                 }\r
681         }\r
682 \r
683         CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
684         return TRUE;\r
685 }\r
686 \r
687 \r
688 static __inline uint8_t\r
689 __mgmt_version_index(\r
690         IN              const   uint8_t                                         mgmt_version )\r
691 {\r
692         return (uint8_t)(mgmt_version - 1);\r
693 }\r
694 \r
695 \r
696 static __inline uint8_t\r
697 __mgmt_class_index(\r
698         IN              const   uint8_t                                         mgmt_class )\r
699 {\r
700         /* Map class 0x81 to 0 to remove empty class values. */\r
701         if( mgmt_class == IB_MCLASS_SUBN_DIR )\r
702                 return IB_MCLASS_SUBN_LID;\r
703         else\r
704                 return mgmt_class;\r
705 }\r
706 \r
707 \r
708 \r
709 /*\r
710  * Deregister a MAD service from the dispatcher.\r
711  */\r
712 static void\r
713 __mad_disp_dereg(\r
714         IN              const   al_mad_reg_handle_t                     h_mad_reg )\r
715 {\r
716         al_mad_disp_handle_t    h_mad_disp;\r
717         cl_vector_t                             *p_class_vector;\r
718         cl_ptr_vector_t                 *p_method_ptr_vector;\r
719         size_t                                  i;\r
720 \r
721         CL_ENTER( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
722         h_mad_disp = h_mad_reg->h_mad_disp;\r
723 \r
724         cl_spinlock_acquire( &h_mad_disp->obj.lock );\r
725 \r
726         if( h_mad_reg->support_unsol )\r
727         {\r
728                 /* Deregister the service from receiving unsolicited MADs. */\r
729                 p_class_vector = cl_vector_get_ptr( &h_mad_disp->version_vector,\r
730                         __mgmt_version_index( h_mad_reg->mgmt_version ) );\r
731 \r
732                 p_method_ptr_vector = cl_vector_get_ptr( p_class_vector,\r
733                         __mgmt_class_index( h_mad_reg->mgmt_class ) );\r
734 \r
735                 /* Deregister all methods registered to the client. */\r
736                 for( i = 0; i < cl_ptr_vector_get_size( p_method_ptr_vector ); i++ )\r
737                 {\r
738                         if( cl_ptr_vector_get( p_method_ptr_vector, i ) == h_mad_reg )\r
739                         {\r
740                                 cl_ptr_vector_set( p_method_ptr_vector, i, NULL );\r
741                         }\r
742                 }\r
743         }\r
744 \r
745         cl_spinlock_release( &h_mad_disp->obj.lock );\r
746 \r
747         /* Decrement the reference count in the registration table. */\r
748         cl_atomic_dec( &h_mad_reg->ref_cnt );\r
749 \r
750         /* The MAD service no longer requires access to the MAD dispatcher. */\r
751         deref_al_obj( &h_mad_disp->obj );\r
752         CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
753 }\r
754 \r
755 \r
756 \r
757 static void\r
758 __mad_disp_queue_send(\r
759         IN              const   al_mad_reg_handle_t                     h_mad_reg,\r
760         IN                              al_mad_wr_t* const                      p_mad_wr )\r
761 {\r
762         ib_mad_t                                *p_mad_hdr;\r
763 \r
764         /*\r
765          * Increment the reference count on the registration to ensure that\r
766          * the MAD service does not go away until the send completes.\r
767          */\r
768         CL_ENTER( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
769         cl_atomic_inc( &h_mad_reg->ref_cnt );\r
770         ref_al_obj( &h_mad_reg->h_mad_svc->obj );\r
771 \r
772         /* Get the MAD header. */\r
773         p_mad_hdr = get_mad_hdr_from_wr( p_mad_wr );\r
774         CL_ASSERT( !p_mad_wr->send_wr.wr_id );\r
775         p_mad_wr->send_wr.wr_id = (uintn_t)p_mad_wr;\r
776 \r
777         /*\r
778          * If we are the originator of the transaction, we need to modify the\r
779          * TID to ensure that duplicate TIDs are not used by multiple clients.\r
780          */\r
781         CL_TRACE( AL_DBG_MAD_SVC, g_al_dbg_lvl, ("dispatching TID: 0x%0"PRIx64"\n",\r
782                 p_mad_hdr->trans_id) );\r
783         p_mad_wr->client_tid = p_mad_hdr->trans_id;\r
784         if( __use_tid_routing( p_mad_hdr, TRUE ) )\r
785         {\r
786                 /* Clear the AL portion of the TID before setting. */\r
787                 ((al_tid_t*)&p_mad_hdr->trans_id)->tid32.al_tid = 0;\r
788 \r
789 #pragma warning( push, 3 )\r
790                 al_set_al_tid( &p_mad_hdr->trans_id, h_mad_reg->client_id );\r
791 #pragma warning( pop )\r
792 \r
793                 CL_TRACE( AL_DBG_MAD_SVC, g_al_dbg_lvl,\r
794                         ("modified TID to: 0x%0"PRIx64"\n", p_mad_hdr->trans_id) );\r
795         }\r
796 \r
797         /* Post the work request to the QP. */\r
798         p_mad_wr->client_id = h_mad_reg->client_id;\r
799         h_mad_reg->h_mad_disp->h_qp->pfn_queue_mad(\r
800                 h_mad_reg->h_mad_disp->h_qp, p_mad_wr );\r
801 \r
802         CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
803 }\r
804 \r
805 \r
806 static inline void\r
807 __mad_disp_resume_send(\r
808         IN              const   al_mad_reg_handle_t                     h_mad_reg )\r
809 {\r
810         AL_ENTER( AL_DBG_MAD_SVC );\r
811 \r
812         h_mad_reg->h_mad_disp->h_qp->pfn_resume_mad(\r
813                 h_mad_reg->h_mad_disp->h_qp );\r
814 \r
815         AL_EXIT( AL_DBG_MAD_SVC );\r
816 }\r
817 \r
818 \r
819 /*\r
820  * Complete a sent MAD.  Route the completion to the correct MAD service.\r
821  */\r
822 void\r
823 mad_disp_send_done(\r
824         IN                              al_mad_disp_handle_t            h_mad_disp,\r
825         IN                              al_mad_wr_t                                     *p_mad_wr,\r
826         IN                              ib_wc_t                                         *p_wc )\r
827 {\r
828         al_mad_reg_handle_t             h_mad_reg;\r
829         ib_mad_t                                *p_mad_hdr;\r
830 \r
831         CL_ENTER( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
832 \r
833         CL_TRACE( AL_DBG_MAD_SVC, g_al_dbg_lvl, ("p_mad_wr 0x%p\n", p_mad_wr ) );\r
834 \r
835         /* Get the MAD header. */\r
836         p_mad_hdr = get_mad_hdr_from_wr( p_mad_wr );\r
837 \r
838         /* Get the MAD service that issued the send. */\r
839         cl_spinlock_acquire( &h_mad_disp->obj.lock );\r
840         h_mad_reg = cl_vector_get_ptr( &h_mad_disp->client_vector,\r
841                 p_mad_wr->client_id );\r
842         cl_spinlock_release( &h_mad_disp->obj.lock );\r
843         CL_ASSERT( h_mad_reg && (h_mad_reg->client_id == p_mad_wr->client_id) );\r
844 \r
845         /* Reset the TID and WR ID. */\r
846         CL_TRACE( AL_DBG_MAD_SVC, g_al_dbg_lvl, ("send done TID: 0x%"PRIx64"\n",\r
847                 p_mad_hdr->trans_id) );\r
848         p_mad_hdr->trans_id = p_mad_wr->client_tid;\r
849         p_mad_wr->send_wr.wr_id = 0;\r
850 \r
851         /* Return the completed request to the MAD service. */\r
852         CL_ASSERT( h_mad_reg->h_mad_svc );\r
853         h_mad_reg->pfn_send_done( h_mad_reg->h_mad_svc, p_mad_wr, p_wc );\r
854 \r
855         /* The MAD service is no longer referenced once the send completes. */\r
856         deref_al_obj( &h_mad_reg->h_mad_svc->obj );\r
857         cl_atomic_dec( &h_mad_reg->ref_cnt );\r
858 \r
859         CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
860 }\r
861 \r
862 \r
863 \r
864 /*\r
865  * Process a received MAD.  Route the completion to the correct MAD service.\r
866  */\r
867 ib_api_status_t\r
868 mad_disp_recv_done(\r
869         IN                              al_mad_disp_handle_t            h_mad_disp,\r
870         IN                              ib_mad_element_t                        *p_mad_element )\r
871 {\r
872         ib_mad_t                                *p_mad_hdr;\r
873         al_mad_reg_handle_t             h_mad_reg;\r
874         ib_al_handle_t                  h_al;\r
875         ib_mad_svc_handle_t             h_mad_svc;\r
876 \r
877         cl_vector_t                             *p_class_vector;\r
878         cl_ptr_vector_t                 *p_method_ptr_vector;\r
879         uint8_t                                 method;\r
880 \r
881         CL_ENTER( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
882         p_mad_hdr = ib_get_mad_buf( p_mad_element );\r
883 \r
884         CL_TRACE( AL_DBG_MAD_SVC, g_al_dbg_lvl,\r
885                 ("TID = 0x%"PRIx64 "\n"\r
886                  "class = 0x%x.\n"\r
887                  "version = 0x%x.\n"\r
888                  "method = 0x%x.\n",\r
889                 p_mad_hdr->trans_id,\r
890                 p_mad_hdr->mgmt_class,\r
891                 p_mad_hdr->class_ver,\r
892                 p_mad_hdr->method) );\r
893 \r
894         /* Get the client to route the receive to. */\r
895         cl_spinlock_acquire( &h_mad_disp->obj.lock );\r
896         if( __use_tid_routing( p_mad_hdr, FALSE ) )\r
897         {\r
898                 /* The MAD was received in response to a send. */\r
899                 CL_TRACE( AL_DBG_MAD_SVC, g_al_dbg_lvl, ("routing based on TID\n"));\r
900 \r
901                 /* Verify that we have a registration entry. */\r
902                 if( al_get_al_tid( p_mad_hdr->trans_id ) >=\r
903                         cl_vector_get_size( &h_mad_disp->client_vector ) )\r
904                 {\r
905                         /* No clients for this version-class-method. */\r
906                         cl_spinlock_release( &h_mad_disp->obj.lock );\r
907                         CL_TRACE_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl,\r
908                                 ("invalid client ID\n") );\r
909                         return IB_NOT_FOUND;\r
910                 }\r
911 \r
912                 h_mad_reg = cl_vector_get_ptr( &h_mad_disp->client_vector,\r
913                         al_get_al_tid( p_mad_hdr->trans_id ) );\r
914 \r
915 /*\r
916  * Disable warning about passing unaligned 64-bit value.\r
917  * The value is always aligned given how buffers are allocated\r
918  * and given the layout of a MAD.\r
919  */\r
920 #pragma warning( push, 3 )\r
921                 al_set_al_tid( &p_mad_hdr->trans_id, 0 );\r
922 #pragma warning( pop )\r
923         }\r
924         else\r
925         {\r
926                 CL_TRACE( AL_DBG_MAD_SVC, g_al_dbg_lvl,\r
927                         ("routing based on version, class, method\n"));\r
928 \r
929                 /* The receive is unsolicited.  Find the client. */\r
930                 if( __mgmt_version_index( p_mad_hdr->class_ver ) >=\r
931                         cl_vector_get_size( &h_mad_disp->version_vector ) )\r
932                 {\r
933                         /* No clients for this version of MADs. */\r
934                         cl_spinlock_release( &h_mad_disp->obj.lock );\r
935                         CL_TRACE_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl,\r
936                                 ("no clients registered for this class version\n") );\r
937                         return IB_NOT_FOUND;\r
938                 }\r
939 \r
940                 /* See if we have a client for this class of MADs. */\r
941                 p_class_vector = cl_vector_get_ptr( &h_mad_disp->version_vector,\r
942                         __mgmt_version_index( p_mad_hdr->class_ver ) );\r
943 \r
944                 if( __mgmt_class_index( p_mad_hdr->mgmt_class ) >=\r
945                         cl_vector_get_size( p_class_vector ) )\r
946                 {\r
947                         /* No clients for this version-class. */\r
948                         cl_spinlock_release( &h_mad_disp->obj.lock );\r
949                         CL_TRACE_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl,\r
950                                 ("no clients registered for this class\n") );\r
951                         return IB_NOT_FOUND;\r
952                 }\r
953 \r
954                 /* See if we have a client for this method. */\r
955                 p_method_ptr_vector = cl_vector_get_ptr( p_class_vector,\r
956                         __mgmt_class_index( p_mad_hdr->mgmt_class ) );\r
957                 method = (uint8_t)(p_mad_hdr->method & (~IB_MAD_METHOD_RESP_MASK));\r
958 \r
959                 if( method >= cl_ptr_vector_get_size( p_method_ptr_vector ) )\r
960                 {\r
961                         /* No clients for this version-class-method. */\r
962                         cl_spinlock_release( &h_mad_disp->obj.lock );\r
963                         CL_TRACE_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl,\r
964                                 ("no clients registered for this method-out of range\n") );\r
965                         return IB_NOT_FOUND;\r
966                 }\r
967 \r
968                 h_mad_reg = cl_ptr_vector_get( p_method_ptr_vector, method );\r
969                 if( !h_mad_reg )\r
970                 {\r
971                         /* No clients for this version-class-method. */\r
972                         cl_spinlock_release( &h_mad_disp->obj.lock );\r
973                         CL_TRACE_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl,\r
974                                 ("no clients registered for method %u of class %u(%u) version %u(%u)\n",\r
975                                  method,\r
976                                  p_mad_hdr->mgmt_class,\r
977                                  __mgmt_class_index( p_mad_hdr->mgmt_class ),\r
978                                  p_mad_hdr->class_ver,\r
979                                  __mgmt_version_index( p_mad_hdr->class_ver )\r
980                                  ) );\r
981                         return IB_NOT_FOUND;\r
982                 }\r
983         }\r
984 \r
985         /* Verify that the registration is still valid. */\r
986         if( !h_mad_reg->ref_cnt )\r
987         {\r
988                 cl_spinlock_release( &h_mad_disp->obj.lock );\r
989                 CL_TRACE_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl,\r
990                         ("no client registered\n") );\r
991                 return IB_NOT_FOUND;\r
992         }\r
993 \r
994         /* Take a reference on the MAD service in case it deregisters. */\r
995         h_mad_svc = h_mad_reg->h_mad_svc;\r
996         ref_al_obj( &h_mad_svc->obj );\r
997         cl_spinlock_release( &h_mad_disp->obj.lock );\r
998 \r
999         /* Handoff the MAD to the correct AL instance. */\r
1000         h_al = qp_get_al( (ib_qp_handle_t)(h_mad_svc->obj.p_parent_obj) );\r
1001         al_handoff_mad( h_al, p_mad_element );\r
1002 \r
1003         h_mad_reg->pfn_recv_done( h_mad_svc, p_mad_element );\r
1004         deref_al_obj( &h_mad_svc->obj );\r
1005         CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
1006         return IB_SUCCESS;\r
1007 }\r
1008 \r
1009 \r
1010 \r
1011 /*\r
1012  * Return TRUE if we should route the MAD to the recipient based on the TID.\r
1013  */\r
1014 static boolean_t\r
1015 __use_tid_routing(\r
1016         IN              const   ib_mad_t* const                         p_mad_hdr,\r
1017         IN              const   boolean_t                                       are_we_sender )\r
1018 {\r
1019         ib_rmpp_mad_t           *p_rmpp_mad;\r
1020         boolean_t                       is_orig;\r
1021 \r
1022         CL_ENTER( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
1023 \r
1024         /* CM MADs are never TID routed. */\r
1025         if( p_mad_hdr->mgmt_class == IB_MCLASS_COMM_MGMT )\r
1026         {\r
1027                 CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
1028                 return FALSE;\r
1029         }\r
1030 \r
1031         /*\r
1032          * Determine originator for a sent MAD.  Received MADs are just the\r
1033          * opposite.\r
1034          */\r
1035 \r
1036         /* Non-DATA RMPP MADs are handled differently. */\r
1037         p_rmpp_mad = (ib_rmpp_mad_t*)p_mad_hdr;\r
1038         if( (p_mad_hdr->mgmt_class == IB_MCLASS_SUBN_ADM) &&\r
1039                 ( ib_rmpp_is_flag_set( p_rmpp_mad, IB_RMPP_FLAG_ACTIVE ) &&\r
1040                 (p_rmpp_mad->rmpp_type != IB_RMPP_TYPE_DATA) ) )\r
1041         {\r
1042                 /*\r
1043                  * We need to distinguish between ACKs sent after receiving\r
1044                  * a request, versus ACKs sent after receiving a response.  ACKs\r
1045                  * to a request are from the responder.  ACKs to a response are\r
1046                  * from the originator.\r
1047 \r
1048                  * Note that we assume STOP and ABORT packets are initiated by\r
1049                  * receivers.  If both senders and receivers can\r
1050                  * initiate STOP and ABORT MADs, then we can't distinguish which\r
1051                  * transaction is associated with the MAD.  The TID for a\r
1052                  * send and receive can be the same.\r
1053                  */\r
1054                 is_orig = !ib_mad_is_response( p_mad_hdr );\r
1055         }\r
1056         else\r
1057         {\r
1058                 /*\r
1059                  * See if the MAD is being sent in response to a previous MAD.  If\r
1060                  * it is, then we're NOT the originator.  Note that trap repress\r
1061                  * MADs are responses, even though the response bit isn't set.\r
1062                  */\r
1063                 is_orig = !( ib_mad_is_response( p_mad_hdr ) ||\r
1064                         (p_mad_hdr->method == IB_MAD_METHOD_TRAP_REPRESS) );\r
1065         }\r
1066 \r
1067         /* If we're the receiver, toggle the result. */\r
1068         if( !are_we_sender )\r
1069                 is_orig = !is_orig;\r
1070 \r
1071         CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
1072         return is_orig;\r
1073 }\r
1074 \r
1075 \r
1076 \r
1077 /*\r
1078  *\r
1079  * MAD Service.\r
1080  *\r
1081  */\r
1082 \r
1083 \r
1084 \r
1085 /*\r
1086  * Create and initialize a MAD service for use.\r
1087  */\r
1088 ib_api_status_t\r
1089 reg_mad_svc(\r
1090         IN              const   ib_qp_handle_t                          h_qp,\r
1091         IN              const   ib_mad_svc_t* const                     p_mad_svc,\r
1092                 OUT                     ib_mad_svc_handle_t* const      ph_mad_svc )\r
1093 {\r
1094         ib_api_status_t         status;\r
1095         cl_status_t                     cl_status;\r
1096         ib_mad_svc_handle_t     h_mad_svc;\r
1097         al_qp_alias_t           *p_qp_alias;\r
1098         ib_qp_attr_t            qp_attr;\r
1099 \r
1100         CL_ENTER( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
1101         CL_ASSERT( h_qp );\r
1102 \r
1103         switch( h_qp->type )\r
1104         {\r
1105         case IB_QPT_QP0:\r
1106         case IB_QPT_QP1:\r
1107         case IB_QPT_QP0_ALIAS:\r
1108         case IB_QPT_QP1_ALIAS:\r
1109         case IB_QPT_MAD:\r
1110                 break;\r
1111 \r
1112         default:\r
1113                 CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl, ("IB_INVALID_PARAMETER\n") );\r
1114                 return IB_INVALID_PARAMETER;\r
1115         }\r
1116 \r
1117         if( !p_mad_svc || !ph_mad_svc )\r
1118         {\r
1119                 CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl, ("IB_INVALID_PARAMETER\n") );\r
1120                 return IB_INVALID_PARAMETER;\r
1121         }\r
1122 \r
1123         h_mad_svc = cl_zalloc( sizeof( al_mad_svc_t) );\r
1124         if( !h_mad_svc )\r
1125         {\r
1126                 return IB_INSUFFICIENT_MEMORY;\r
1127         }\r
1128 \r
1129         /* Construct the MAD service. */\r
1130         construct_al_obj( &h_mad_svc->obj, AL_OBJ_TYPE_H_MAD_SVC );\r
1131         cl_timer_construct( &h_mad_svc->send_timer );\r
1132         cl_timer_construct( &h_mad_svc->recv_timer );\r
1133         cl_qlist_init( &h_mad_svc->send_list );\r
1134         cl_qlist_init( &h_mad_svc->recv_list );\r
1135 \r
1136         p_qp_alias = PARENT_STRUCT( h_qp, al_qp_alias_t, qp );\r
1137         h_mad_svc->svc_type = p_mad_svc->svc_type;\r
1138         h_mad_svc->obj.context = p_mad_svc->mad_svc_context;\r
1139         h_mad_svc->pfn_user_recv_cb = p_mad_svc->pfn_mad_recv_cb;\r
1140         h_mad_svc->pfn_user_send_cb = p_mad_svc->pfn_mad_send_cb;\r
1141 \r
1142         /* Initialize the MAD service. */\r
1143         status = init_al_obj( &h_mad_svc->obj, p_mad_svc->mad_svc_context,\r
1144                 TRUE, __destroying_mad_svc, __cleanup_mad_svc, free_mad_svc );\r
1145         if( status != IB_SUCCESS )\r
1146         {\r
1147                 free_mad_svc( &h_mad_svc->obj );\r
1148                 return status;\r
1149         }\r
1150         status = attach_al_obj( &h_qp->obj, &h_mad_svc->obj );\r
1151         if( status != IB_SUCCESS )\r
1152         {\r
1153                 h_mad_svc->obj.pfn_destroy( &h_mad_svc->obj, NULL );\r
1154                 AL_TRACE_EXIT( AL_DBG_ERROR,\r
1155                         ("attach_al_obj returned %s.\n", ib_get_err_str(status)) );\r
1156                 return status;\r
1157         }\r
1158 \r
1159         h_mad_svc->h_mad_reg = __mad_disp_reg( p_qp_alias->h_mad_disp,\r
1160                 h_mad_svc, p_mad_svc, __mad_svc_send_done, __mad_svc_recv_done );\r
1161         if( !h_mad_svc->h_mad_reg )\r
1162         {\r
1163                 h_mad_svc->obj.pfn_destroy( &h_mad_svc->obj, NULL );\r
1164                 return IB_INSUFFICIENT_MEMORY;\r
1165         }\r
1166 \r
1167         /* Record which port this MAD service uses, to use when creating AVs. */\r
1168         status = ib_query_qp( h_qp, &qp_attr );\r
1169         if( status != IB_SUCCESS )\r
1170         {\r
1171                 h_mad_svc->obj.pfn_destroy( &h_mad_svc->obj, NULL );\r
1172                 return status;\r
1173         }\r
1174         h_mad_svc->h_pd = qp_attr.h_pd;\r
1175         h_mad_svc->port_num = qp_attr.primary_port;\r
1176 \r
1177         cl_status = cl_timer_init( &h_mad_svc->send_timer,\r
1178                 __send_timer_cb, h_mad_svc );\r
1179         if( cl_status != CL_SUCCESS )\r
1180         {\r
1181                 h_mad_svc->obj.pfn_destroy( &h_mad_svc->obj, NULL );\r
1182                 return ib_convert_cl_status( cl_status );\r
1183         }\r
1184 \r
1185         cl_status = cl_timer_init( &h_mad_svc->recv_timer,\r
1186                 __recv_timer_cb, h_mad_svc );\r
1187         if( cl_status != CL_SUCCESS )\r
1188         {\r
1189                 h_mad_svc->obj.pfn_destroy( &h_mad_svc->obj, NULL );\r
1190                 return ib_convert_cl_status( cl_status );\r
1191         }\r
1192 \r
1193         *ph_mad_svc = h_mad_svc;\r
1194 \r
1195         CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
1196         return IB_SUCCESS;\r
1197 }\r
1198 \r
1199 \r
1200 \r
1201 static void\r
1202 __destroying_mad_svc(\r
1203         IN                              struct _al_obj                          *p_obj )\r
1204 {\r
1205         ib_qp_handle_t                  h_qp;\r
1206         ib_mad_svc_handle_t             h_mad_svc;\r
1207         ib_mad_send_handle_t    h_send;\r
1208         cl_list_item_t                  *p_list_item;\r
1209         int32_t                                 timeout_ms;\r
1210 #ifdef CL_KERNEL\r
1211         KIRQL                                   old_irql;\r
1212 #endif\r
1213 \r
1214         CL_ENTER( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
1215         CL_ASSERT( p_obj );\r
1216         h_mad_svc = PARENT_STRUCT( p_obj, al_mad_svc_t, obj );\r
1217 \r
1218         /* Deregister the MAD service. */\r
1219         h_qp = (ib_qp_handle_t)p_obj->p_parent_obj;\r
1220         if( h_qp->pfn_dereg_mad_svc )\r
1221                 h_qp->pfn_dereg_mad_svc( h_mad_svc );\r
1222 \r
1223         /* Wait here until the MAD service is no longer in use. */\r
1224         timeout_ms = (int32_t)h_mad_svc->obj.timeout_ms;\r
1225         while( h_mad_svc->ref_cnt && timeout_ms > 0 )\r
1226         {\r
1227                 /* Use a timeout to avoid waiting forever - just in case. */\r
1228                 cl_thread_suspend( 10 );\r
1229                 timeout_ms -= 10;\r
1230         }\r
1231 \r
1232         /*\r
1233          * Deregister from the MAD dispatcher.  The MAD dispatcher holds\r
1234          * a reference on the MAD service when invoking callbacks.  Since we\r
1235          * issue sends, we know how many callbacks are expected for send\r
1236          * completions.  With receive completions, we need to wait until all\r
1237          * receive callbacks have completed before cleaning up receives.\r
1238          */\r
1239         if( h_mad_svc->h_mad_reg )\r
1240                 __mad_disp_dereg( h_mad_svc->h_mad_reg );\r
1241 \r
1242         /* Cancel all outstanding send requests. */\r
1243         cl_spinlock_acquire( &h_mad_svc->obj.lock );\r
1244         for( p_list_item = cl_qlist_head( &h_mad_svc->send_list );\r
1245                  p_list_item != cl_qlist_end( &h_mad_svc->send_list );\r
1246                  p_list_item = cl_qlist_next( p_list_item ) )\r
1247         {\r
1248                 CL_TRACE( AL_DBG_MAD_SVC, g_al_dbg_lvl, ("canceling MAD\n") );\r
1249                 h_send = PARENT_STRUCT( p_list_item, al_mad_send_t, pool_item );\r
1250                 h_send->canceled = TRUE;\r
1251         }\r
1252         cl_spinlock_release( &h_mad_svc->obj.lock );\r
1253 \r
1254         /*\r
1255          * Invoke the timer callback to return the canceled MADs to the user.\r
1256          * Since the MAD service is being destroyed, the user cannot be issuing\r
1257          * sends.\r
1258          */\r
1259         if( h_mad_svc->h_mad_reg )\r
1260         {\r
1261 #ifdef CL_KERNEL\r
1262                 old_irql = KeRaiseIrqlToDpcLevel();\r
1263 #endif\r
1264                 __check_send_queue( h_mad_svc );\r
1265 #ifdef CL_KERNEL\r
1266                 KeLowerIrql( old_irql );\r
1267 #endif\r
1268         }\r
1269 \r
1270         cl_timer_destroy( &h_mad_svc->send_timer );\r
1271 \r
1272 #ifdef CL_KERNEL\r
1273         /*\r
1274          * Reclaim any pending receives sent to the proxy for UAL.\r
1275          */\r
1276         if( h_mad_svc->obj.h_al->p_context )\r
1277         {\r
1278                 cl_qlist_t                                      *p_cblist;\r
1279                 al_proxy_cb_info_t                      *p_cb_info;\r
1280 \r
1281                 cl_spinlock_acquire( &h_mad_svc->obj.h_al->p_context->cb_lock );\r
1282                 p_cblist = &h_mad_svc->obj.h_al->p_context->misc_cb_list;\r
1283                 p_list_item = cl_qlist_head( p_cblist );\r
1284                 while( p_list_item != cl_qlist_end( p_cblist ) )\r
1285                 {\r
1286                         p_cb_info = (al_proxy_cb_info_t*)p_list_item;\r
1287                         p_list_item = cl_qlist_next( p_list_item );\r
1288 \r
1289                         if( p_cb_info->p_al_obj && p_cb_info->p_al_obj == &h_mad_svc->obj )\r
1290                         {\r
1291                                 cl_qlist_remove_item( p_cblist, &p_cb_info->pool_item.list_item );\r
1292                                 deref_al_obj( p_cb_info->p_al_obj );\r
1293                                 proxy_cb_put( p_cb_info );\r
1294                         }\r
1295                 }\r
1296                 cl_spinlock_release( &h_mad_svc->obj.h_al->p_context->cb_lock );\r
1297         }\r
1298 #endif\r
1299 \r
1300         CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
1301 }\r
1302 \r
1303 \r
1304 \r
1305 static void\r
1306 __cleanup_mad_svc(\r
1307         IN                              struct _al_obj                          *p_obj )\r
1308 {\r
1309         ib_mad_svc_handle_t             h_mad_svc;\r
1310         al_mad_rmpp_t                   *p_rmpp;\r
1311         cl_list_item_t                  *p_list_item;\r
1312 \r
1313         CL_ASSERT( p_obj );\r
1314         h_mad_svc = PARENT_STRUCT( p_obj, al_mad_svc_t, obj );\r
1315 \r
1316         /*\r
1317          * There are no more callbacks from the MAD dispatcher that are active.\r
1318          * Cleanup any receives that may still be lying around.  Stop the receive\r
1319          * timer to avoid synchronizing with it.\r
1320          */\r
1321         cl_timer_destroy( &h_mad_svc->recv_timer );\r
1322         for( p_list_item = cl_qlist_head( &h_mad_svc->recv_list );\r
1323                  p_list_item != cl_qlist_end( &h_mad_svc->recv_list );\r
1324                  p_list_item = cl_qlist_next( p_list_item ) )\r
1325         {\r
1326                 p_rmpp = PARENT_STRUCT( p_list_item, al_mad_rmpp_t, pool_item );\r
1327                 p_rmpp->inactive = TRUE;\r
1328         }\r
1329         __recv_timer_cb( h_mad_svc );\r
1330 \r
1331         CL_ASSERT( cl_is_qlist_empty( &h_mad_svc->send_list ) );\r
1332         CL_ASSERT( cl_is_qlist_empty( &h_mad_svc->recv_list ) );\r
1333 }\r
1334 \r
1335 \r
1336 \r
1337 void\r
1338 free_mad_svc(\r
1339         IN                              al_obj_t                                        *p_obj )\r
1340 {\r
1341         ib_mad_svc_handle_t     h_mad_svc;\r
1342 \r
1343         CL_ASSERT( p_obj );\r
1344         h_mad_svc = PARENT_STRUCT( p_obj, al_mad_svc_t, obj );\r
1345 \r
1346         destroy_al_obj( p_obj );\r
1347         cl_free( h_mad_svc );\r
1348 }\r
1349 \r
1350 \r
1351 \r
1352 ib_api_status_t\r
1353 ib_send_mad(\r
1354         IN              const   ib_mad_svc_handle_t                     h_mad_svc,\r
1355         IN                              ib_mad_element_t* const         p_mad_element_list,\r
1356                 OUT                     ib_mad_element_t                        **pp_mad_failure OPTIONAL )\r
1357 {\r
1358         ib_api_status_t                         status = IB_SUCCESS;\r
1359 #ifdef CL_KERNEL\r
1360         ib_mad_send_handle_t            h_send;\r
1361         ib_mad_element_t                        *p_cur_mad, *p_next_mad;\r
1362 #endif\r
1363 \r
1364         CL_ENTER( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
1365 \r
1366         if( AL_OBJ_INVALID_HANDLE( h_mad_svc, AL_OBJ_TYPE_H_MAD_SVC ) )\r
1367         {\r
1368                 CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl, ("IB_INVALID_HANDLE\n") );\r
1369                 return IB_INVALID_HANDLE;\r
1370         }\r
1371         if( !p_mad_element_list ||\r
1372                 ( p_mad_element_list->p_next && !pp_mad_failure ) )\r
1373         {\r
1374                 CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl, ("IB_INVALID_PARAMETER\n") );\r
1375                 return IB_INVALID_PARAMETER;\r
1376         }\r
1377 \r
1378 #ifndef CL_KERNEL\r
1379         /* This is a send from user mode using special QP alias */\r
1380         CL_TRACE( AL_DBG_MAD_SVC, g_al_dbg_lvl,\r
1381                 ("ib_send_mad: ual_context non-zero, TID = 0x%"PRIx64 ".\n",\r
1382                 ((ib_mad_t*)(ib_get_mad_buf( p_mad_element_list )))->trans_id ));\r
1383         status = spl_qp_mad_send( h_mad_svc, p_mad_element_list,\r
1384                 pp_mad_failure );\r
1385         CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
1386         return status;\r
1387 #else\r
1388         /* Post each send on the list. */\r
1389         p_cur_mad = p_mad_element_list;\r
1390         while( p_cur_mad )\r
1391         {\r
1392                 p_next_mad = p_cur_mad->p_next;\r
1393 \r
1394                 /* Get an element to track the send. */\r
1395                 h_send = get_mad_send( PARENT_STRUCT( p_cur_mad,\r
1396                         al_mad_element_t, element ) );\r
1397                 if( !h_send )\r
1398                 {\r
1399                         CL_TRACE( AL_DBG_ERROR, g_al_dbg_lvl, ("unable to get mad_send\n") );\r
1400                         if( pp_mad_failure )\r
1401                                 *pp_mad_failure = p_cur_mad;\r
1402                         return IB_INSUFFICIENT_RESOURCES;\r
1403                 }\r
1404 \r
1405                 /* Initialize the MAD for sending. */\r
1406                 status = __init_send_mad( h_mad_svc, h_send, p_cur_mad );\r
1407                 if( status != IB_SUCCESS )\r
1408                 {\r
1409                         CL_TRACE( AL_DBG_ERROR, g_al_dbg_lvl, ("init_send_mad failed: %s\n",\r
1410                                 ib_get_err_str(status)) );\r
1411                         put_mad_send( h_send );\r
1412                         if( pp_mad_failure )\r
1413                                 *pp_mad_failure = p_cur_mad;\r
1414                         return status;\r
1415                 }\r
1416 \r
1417                 /* Add the MADs to our list. */\r
1418                 cl_spinlock_acquire( &h_mad_svc->obj.lock );\r
1419                 cl_qlist_insert_tail( &h_mad_svc->send_list,\r
1420                         (cl_list_item_t*)&h_send->pool_item );\r
1421 \r
1422                 /* Post the MAD to the dispatcher, and check for failures. */\r
1423                 ref_al_obj( &h_mad_svc->obj );\r
1424                 p_cur_mad->p_next = NULL;\r
1425                 if( h_send->uses_rmpp )\r
1426                         __queue_rmpp_seg( h_mad_svc->h_mad_reg, h_send );\r
1427                 else\r
1428                         __queue_mad_wr( h_mad_svc->h_mad_reg, h_send );\r
1429                 cl_spinlock_release( &h_mad_svc->obj.lock );\r
1430 \r
1431                 p_cur_mad = p_next_mad;\r
1432         }\r
1433 \r
1434         /*\r
1435          * Resume any sends that can now be sent without holding\r
1436          * the mad service lock.\r
1437          */\r
1438         __mad_disp_resume_send( h_mad_svc->h_mad_reg );\r
1439 \r
1440         CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
1441         return status;\r
1442 #endif\r
1443 }\r
1444 \r
1445 \r
1446 \r
1447 static ib_api_status_t\r
1448 __init_send_mad(\r
1449         IN                              ib_mad_svc_handle_t                     h_mad_svc,\r
1450         IN              const   ib_mad_send_handle_t            h_send,\r
1451         IN                              ib_mad_element_t* const         p_mad_element )\r
1452 {\r
1453         ib_rmpp_mad_t           *p_rmpp_hdr;\r
1454         uint8_t                         rmpp_version;\r
1455         ib_api_status_t         status;\r
1456 \r
1457         CL_ENTER( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
1458 \r
1459         /* Initialize tracking the send. */\r
1460         h_send->p_send_mad = p_mad_element;\r
1461         h_send->retry_time = MAX_TIME;\r
1462         h_send->retry_cnt = p_mad_element->retry_cnt;\r
1463 \r
1464         /* See if the send uses RMPP. */\r
1465         h_send->uses_rmpp = __does_send_req_rmpp( h_mad_svc->svc_type,\r
1466                 p_mad_element, &rmpp_version );\r
1467         if( h_send->uses_rmpp )\r
1468         {\r
1469                 /* The RMPP header is present. */\r
1470                 CL_TRACE( AL_DBG_MAD_SVC, g_al_dbg_lvl, ("RMPP is activated\n") );\r
1471                 p_rmpp_hdr = (ib_rmpp_mad_t*)p_mad_element->p_mad_buf;\r
1472 \r
1473                 /* We only support version 1. */\r
1474                 if( rmpp_version != DEFAULT_RMPP_VERSION )\r
1475                 {\r
1476                         CL_TRACE( AL_DBG_ERROR, g_al_dbg_lvl, ("unsupported version\n") );\r
1477                         return IB_INVALID_SETTING;\r
1478                 }\r
1479 \r
1480                 p_rmpp_hdr->rmpp_version = rmpp_version;\r
1481                 p_rmpp_hdr->rmpp_type = IB_RMPP_TYPE_DATA;\r
1482                 ib_rmpp_set_resp_time( p_rmpp_hdr, IB_RMPP_NO_RESP_TIME );\r
1483                 p_rmpp_hdr->rmpp_status = IB_RMPP_STATUS_SUCCESS;\r
1484                 /*\r
1485                  * The segment number, flags, and payload size are set when\r
1486                  * sending, so that they are set correctly when issuing retries.\r
1487                  */\r
1488 \r
1489                 h_send->ack_seg = 0;\r
1490                 h_send->seg_limit = 1;\r
1491                 h_send->cur_seg = 1;\r
1492                 /* For SA RMPP MADS we need different data size and header size */\r
1493                 if( p_mad_element->p_mad_buf->mgmt_class == IB_MCLASS_SUBN_ADM )\r
1494                 {\r
1495                         h_send->total_seg = ( (p_mad_element->size - IB_SA_MAD_HDR_SIZE) +\r
1496                                 (IB_SA_DATA_SIZE - 1) ) / IB_SA_DATA_SIZE;\r
1497                 } \r
1498                 else \r
1499                 {\r
1500                         h_send->total_seg = ( (p_mad_element->size - MAD_RMPP_HDR_SIZE) +\r
1501                                 (MAD_RMPP_DATA_SIZE - 1) ) / MAD_RMPP_DATA_SIZE;\r
1502                 }\r
1503         }\r
1504 \r
1505         /* See if we need to create the address vector for the user. */\r
1506         if( !p_mad_element->h_av &&\r
1507                 !( p_mad_element->send_opt & IB_SEND_OPT_LOCAL ) )\r
1508         {\r
1509                 status = __create_send_av( h_mad_svc, h_send );\r
1510                 if( status != IB_SUCCESS )\r
1511                 {\r
1512                         return status;\r
1513                 }\r
1514         }\r
1515 \r
1516         CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
1517         return IB_SUCCESS;\r
1518 }\r
1519 \r
1520 \r
1521 \r
1522 static ib_api_status_t\r
1523 __create_send_av(\r
1524         IN                              ib_mad_svc_handle_t                     h_mad_svc,\r
1525         IN                              ib_mad_send_handle_t            h_send )\r
1526 {\r
1527         ib_av_attr_t            av_attr;\r
1528         ib_mad_element_t        *p_mad_element;\r
1529 \r
1530         p_mad_element = h_send->p_send_mad;\r
1531 \r
1532         av_attr.port_num = h_mad_svc->port_num;\r
1533 \r
1534         av_attr.sl = p_mad_element->remote_sl;\r
1535         av_attr.dlid = p_mad_element->remote_lid;\r
1536 \r
1537         av_attr.grh_valid = p_mad_element->grh_valid;\r
1538         if( av_attr.grh_valid )\r
1539                 av_attr.grh = *p_mad_element->p_grh;\r
1540 \r
1541         av_attr.static_rate = IB_PATH_RECORD_RATE_10_GBS;\r
1542         av_attr.path_bits = p_mad_element->path_bits;\r
1543 \r
1544         return ib_create_av( h_mad_svc->h_pd, &av_attr, &h_send->h_av );\r
1545 }\r
1546 \r
1547 \r
1548 \r
1549 static boolean_t\r
1550 __does_send_req_rmpp(\r
1551         IN              const   ib_mad_svc_type_t                       mad_svc_type,\r
1552         IN              const   ib_mad_element_t* const         p_mad_element,\r
1553                 OUT                     uint8_t                                         *p_rmpp_version )\r
1554 {\r
1555         switch( mad_svc_type )\r
1556         {\r
1557         case IB_MAD_SVC_DEFAULT:\r
1558         case IB_MAD_SVC_RMPP:\r
1559                 /* Internally generated MADs do not use RMPP. */\r
1560                 if( __is_internal_send( mad_svc_type, p_mad_element ) )\r
1561                         return FALSE;\r
1562 \r
1563                 /* If the MAD has the version number set, just return it. */\r
1564                 if( p_mad_element->rmpp_version )\r
1565                 {\r
1566                         *p_rmpp_version = p_mad_element->rmpp_version;\r
1567                         return TRUE;\r
1568                 }\r
1569 \r
1570                 /* If the class is well known and uses RMPP, use the default version. */\r
1571                 if( p_mad_element->p_mad_buf->mgmt_class == IB_MCLASS_SUBN_ADM )\r
1572                 {\r
1573                         switch( p_mad_element->p_mad_buf->method )\r
1574                         {\r
1575                         case IB_MAD_METHOD_GETTABLE_RESP:\r
1576                         case IB_MAD_METHOD_GETMULTI:\r
1577                         case IB_MAD_METHOD_GETMULTI_RESP:\r
1578                                 *p_rmpp_version = DEFAULT_RMPP_VERSION;\r
1579                                 return TRUE;\r
1580 \r
1581                         default:\r
1582                                 return FALSE;\r
1583                         }\r
1584                 }\r
1585 \r
1586                 /* The RMPP is not active. */\r
1587                 return FALSE;\r
1588 \r
1589         default:\r
1590                 return FALSE;\r
1591         }\r
1592 }\r
1593 \r
1594 \r
1595 \r
1596 /*\r
1597  * Sends the next RMPP segment of an RMPP transfer.\r
1598  */\r
1599 static void\r
1600 __queue_rmpp_seg(\r
1601         IN              const   al_mad_reg_handle_t                     h_mad_reg,\r
1602         IN                              ib_mad_send_handle_t            h_send )\r
1603 {\r
1604         ib_rmpp_mad_t           *p_rmpp_hdr;\r
1605 \r
1606         CL_ENTER( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
1607 \r
1608         CL_ASSERT( h_mad_reg && h_send );\r
1609         CL_ASSERT( h_send->cur_seg <= h_send->seg_limit );\r
1610 \r
1611         /* Reset information to track the send. */\r
1612         h_send->retry_time = MAX_TIME;\r
1613 \r
1614         /* Set the RMPP header information. */\r
1615         p_rmpp_hdr = (ib_rmpp_mad_t*)h_send->p_send_mad->p_mad_buf;\r
1616         p_rmpp_hdr->seg_num = cl_hton32( h_send->cur_seg );\r
1617         p_rmpp_hdr->rmpp_flags = IB_RMPP_FLAG_ACTIVE;\r
1618         p_rmpp_hdr->paylen_newwin = 0;\r
1619 \r
1620         /* See if this is the first segment that needs to be sent. */\r
1621         if( h_send->cur_seg == 1 )\r
1622         {\r
1623                 p_rmpp_hdr->rmpp_flags |= IB_RMPP_FLAG_FIRST;\r
1624 \r
1625                 /*\r
1626                  * Since the RMPP layer is the one to support SA MADs by duplicating\r
1627                  * the SA header. The actual Payload Length should include the\r
1628                  * original mad size + NumSegs * SA-extra-header.\r
1629                  */\r
1630                 if( h_send->p_send_mad->p_mad_buf->mgmt_class == IB_MCLASS_SUBN_ADM )\r
1631                 {\r
1632                         /* Add sa_ext_hdr to each segment over the first one. */\r
1633                         p_rmpp_hdr->paylen_newwin = cl_hton32(\r
1634                                 h_send->p_send_mad->size - MAD_RMPP_HDR_SIZE +\r
1635                                 (h_send->total_seg - 1) * \r
1636                                 (IB_SA_MAD_HDR_SIZE - MAD_RMPP_HDR_SIZE) );\r
1637                 }\r
1638                 else \r
1639                 {\r
1640                         /* For other RMPP packets we simply use the given MAD */\r
1641                         p_rmpp_hdr->paylen_newwin = cl_hton32( h_send->p_send_mad->size -\r
1642                                 MAD_RMPP_HDR_SIZE );\r
1643                 }\r
1644         }\r
1645 \r
1646         /* See if this is the last segment that needs to be sent. */\r
1647         if( h_send->cur_seg == h_send->total_seg )\r
1648         {\r
1649                 p_rmpp_hdr->rmpp_flags |= IB_RMPP_FLAG_LAST;\r
1650 \r
1651                 /* But for SA MADs we need extra header size */\r
1652                 if( h_send->p_send_mad->p_mad_buf->mgmt_class == IB_MCLASS_SUBN_ADM )\r
1653                 {\r
1654                         p_rmpp_hdr->paylen_newwin = cl_hton32( h_send->p_send_mad->size -\r
1655                                 (h_send->cur_seg -1)*IB_SA_DATA_SIZE - MAD_RMPP_HDR_SIZE );\r
1656                 }\r
1657                 else\r
1658                 {\r
1659                         p_rmpp_hdr->paylen_newwin = cl_hton32( h_send->p_send_mad->size -\r
1660                                 (h_send->cur_seg -1)*MAD_RMPP_DATA_SIZE );\r
1661                 }\r
1662         }\r
1663 \r
1664         /* Set the current segment to the next one. */\r
1665         h_send->cur_seg++;\r
1666 \r
1667         /* Send the MAD. */\r
1668         __queue_mad_wr( h_mad_reg, h_send );\r
1669 \r
1670         CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
1671 }\r
1672 \r
1673 \r
1674 \r
1675 /*\r
1676  * Posts a send work request to the dispatcher for a MAD send.\r
1677  */\r
1678 static void\r
1679 __queue_mad_wr(\r
1680         IN              const   al_mad_reg_handle_t                     h_mad_reg,\r
1681         IN              const   ib_mad_send_handle_t            h_send )\r
1682 {\r
1683         ib_send_wr_t            *p_send_wr;\r
1684         al_mad_element_t        *p_al_element;\r
1685         ib_rmpp_mad_t           *p_rmpp_hdr;\r
1686         uint8_t                         *p_rmpp_src, *p_rmpp_dst;\r
1687         uintn_t                         hdr_len, offset, max_len;\r
1688 \r
1689         CL_ENTER( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
1690         p_send_wr = &h_send->mad_wr.send_wr;\r
1691 \r
1692         cl_memclr( p_send_wr, sizeof( ib_send_wr_t ) );\r
1693 \r
1694         p_send_wr->wr_type = WR_SEND;\r
1695         p_send_wr->send_opt = h_send->p_send_mad->send_opt;\r
1696 \r
1697         p_al_element = PARENT_STRUCT( h_send->p_send_mad,\r
1698                 al_mad_element_t, element );\r
1699 \r
1700         /* See if the MAD requires RMPP support. */\r
1701         if( h_send->uses_rmpp && p_al_element->p_al_mad_buf )\r
1702         {\r
1703 #if defined( CL_KERNEL )\r
1704                 p_rmpp_dst = p_al_element->mad_buf + sizeof(ib_grh_t);\r
1705 #else\r
1706                 p_rmpp_dst = (uint8_t*)(uintn_t)p_al_element->mad_ds.vaddr;\r
1707 #endif\r
1708                 p_rmpp_src = (uint8_t* __ptr64)h_send->p_send_mad->p_mad_buf;\r
1709                 p_rmpp_hdr = (ib_rmpp_mad_t*)p_rmpp_src;\r
1710 \r
1711                 if( h_send->p_send_mad->p_mad_buf->mgmt_class == IB_MCLASS_SUBN_ADM )\r
1712                         hdr_len = IB_SA_MAD_HDR_SIZE;\r
1713                 else\r
1714                         hdr_len = MAD_RMPP_HDR_SIZE;\r
1715 \r
1716                 max_len = MAD_BLOCK_SIZE - hdr_len;\r
1717 \r
1718                 offset = hdr_len + (max_len * (cl_ntoh32( p_rmpp_hdr->seg_num ) - 1));\r
1719 \r
1720                 /* Copy the header into the registered send buffer. */\r
1721                 cl_memcpy( p_rmpp_dst, p_rmpp_src, hdr_len );\r
1722 \r
1723                 /* Copy this segment's payload into the registered send buffer. */\r
1724                 CL_ASSERT( h_send->p_send_mad->size != offset );\r
1725                 if( (h_send->p_send_mad->size - offset) < max_len )\r
1726                 {\r
1727                         max_len = h_send->p_send_mad->size - offset;\r
1728                         /* Clear unused payload. */\r
1729                         cl_memclr( p_rmpp_dst + hdr_len + max_len,\r
1730                                 MAD_BLOCK_SIZE - hdr_len - max_len );\r
1731                 }\r
1732 \r
1733                 cl_memcpy(\r
1734                         p_rmpp_dst + hdr_len, p_rmpp_src + offset, max_len );\r
1735         }\r
1736 \r
1737         p_send_wr->num_ds = 1;\r
1738         p_send_wr->ds_array = &p_al_element->mad_ds;\r
1739 \r
1740         p_send_wr->dgrm.ud.remote_qp = h_send->p_send_mad->remote_qp;\r
1741         p_send_wr->dgrm.ud.remote_qkey = h_send->p_send_mad->remote_qkey;\r
1742         p_send_wr->dgrm.ud.pkey_index = h_send->p_send_mad->pkey_index;\r
1743 \r
1744         /* See if we created the address vector on behalf of the user. */\r
1745         if( h_send->p_send_mad->h_av )\r
1746                 p_send_wr->dgrm.ud.h_av = h_send->p_send_mad->h_av;\r
1747         else\r
1748                 p_send_wr->dgrm.ud.h_av = h_send->h_av;\r
1749 \r
1750         __mad_disp_queue_send( h_mad_reg, &h_send->mad_wr );\r
1751 \r
1752         CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
1753 }\r
1754 \r
1755 \r
1756 \r
1757 static cl_status_t\r
1758 __mad_svc_find_send(\r
1759         IN              const   cl_list_item_t* const           p_list_item,\r
1760         IN                              void*                                           context )\r
1761 {\r
1762         ib_mad_send_handle_t    h_send;\r
1763 \r
1764         h_send = PARENT_STRUCT( p_list_item, al_mad_send_t, pool_item );\r
1765 \r
1766         if( h_send->p_send_mad == context )\r
1767                 return CL_SUCCESS;\r
1768         else\r
1769                 return CL_NOT_FOUND;\r
1770 }\r
1771 \r
1772 \r
1773 \r
1774 ib_api_status_t\r
1775 ib_cancel_mad(\r
1776         IN              const   ib_mad_svc_handle_t                     h_mad_svc,\r
1777         IN                              ib_mad_element_t* const         p_mad_element )\r
1778 {\r
1779 #ifdef CL_KERNEL\r
1780         cl_list_item_t                  *p_list_item;\r
1781         ib_mad_send_handle_t    h_send;\r
1782 #else\r
1783         ib_api_status_t                 status;\r
1784 #endif\r
1785 \r
1786         CL_ENTER( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
1787 \r
1788         if( AL_OBJ_INVALID_HANDLE( h_mad_svc, AL_OBJ_TYPE_H_MAD_SVC ) )\r
1789         {\r
1790                 CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl, ("IB_INVALID_HANDLE\n") );\r
1791                 return IB_INVALID_HANDLE;\r
1792         }\r
1793         if( !p_mad_element )\r
1794         {\r
1795                 CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl, ("IB_INVALID_PARAMETER\n") );\r
1796                 return IB_INVALID_PARAMETER;\r
1797         }\r
1798 \r
1799 #ifndef CL_KERNEL\r
1800         /* This is a send from user mode using special QP alias */\r
1801         status = spl_qp_cancel_mad( h_mad_svc, p_mad_element );\r
1802         CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
1803         return status;\r
1804 #else\r
1805         /* Search for the MAD in our MAD list.  It may have already completed. */\r
1806         cl_spinlock_acquire( &h_mad_svc->obj.lock );\r
1807         p_list_item = cl_qlist_find_from_head( &h_mad_svc->send_list,\r
1808                 __mad_svc_find_send, p_mad_element );\r
1809 \r
1810         if( !p_list_item )\r
1811         {\r
1812                 cl_spinlock_release( &h_mad_svc->obj.lock );\r
1813                 CL_TRACE( AL_DBG_MAD_SVC, g_al_dbg_lvl, ("mad not found\n") );\r
1814                 return IB_NOT_FOUND;\r
1815         }\r
1816 \r
1817         /* Mark the MAD as having been canceled. */\r
1818         h_send = PARENT_STRUCT( p_list_item, al_mad_send_t, pool_item );\r
1819         h_send->canceled = TRUE;\r
1820 \r
1821         /* If the MAD is active, process it in the send callback. */\r
1822         if( h_send->retry_time != MAX_TIME )\r
1823         {\r
1824                 /* Process the canceled MAD using the timer thread. */\r
1825                 cl_timer_trim( &h_mad_svc->send_timer, 0 );\r
1826         }\r
1827 \r
1828         cl_spinlock_release( &h_mad_svc->obj.lock );\r
1829         CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
1830         return IB_SUCCESS;\r
1831 #endif\r
1832 }\r
1833 \r
1834 \r
1835 ib_api_status_t\r
1836 ib_delay_mad(\r
1837         IN              const   ib_mad_svc_handle_t                     h_mad_svc,\r
1838         IN                              ib_mad_element_t* const         p_mad_element,\r
1839         IN              const   uint32_t                                        delay_ms )\r
1840 {\r
1841 #ifdef CL_KERNEL\r
1842         cl_list_item_t                  *p_list_item;\r
1843         ib_mad_send_handle_t    h_send;\r
1844 #endif\r
1845 \r
1846         AL_ENTER( AL_DBG_MAD_SVC );\r
1847 \r
1848         if( AL_OBJ_INVALID_HANDLE( h_mad_svc, AL_OBJ_TYPE_H_MAD_SVC ) )\r
1849         {\r
1850                 AL_TRACE_EXIT( AL_DBG_ERROR, ("IB_INVALID_HANDLE\n") );\r
1851                 return IB_INVALID_HANDLE;\r
1852         }\r
1853         if( !p_mad_element )\r
1854         {\r
1855                 AL_TRACE_EXIT( AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") );\r
1856                 return IB_INVALID_PARAMETER;\r
1857         }\r
1858 \r
1859 #ifndef CL_KERNEL\r
1860         UNUSED_PARAM( p_mad_element );\r
1861         UNUSED_PARAM( delay_ms );\r
1862         /* TODO: support for user-mode MAD QP's. */\r
1863         AL_EXIT( AL_DBG_MAD_SVC );\r
1864         return IB_UNSUPPORTED;\r
1865 #else\r
1866         /* Search for the MAD in our MAD list.  It may have already completed. */\r
1867         cl_spinlock_acquire( &h_mad_svc->obj.lock );\r
1868         p_list_item = cl_qlist_find_from_head( &h_mad_svc->send_list,\r
1869                 __mad_svc_find_send, p_mad_element );\r
1870 \r
1871         if( !p_list_item )\r
1872         {\r
1873                 cl_spinlock_release( &h_mad_svc->obj.lock );\r
1874                 AL_TRACE( AL_DBG_MAD_SVC, ("MAD not found\n") );\r
1875                 return IB_NOT_FOUND;\r
1876         }\r
1877 \r
1878         /* Mark the MAD as having been canceled. */\r
1879         h_send = PARENT_STRUCT( p_list_item, al_mad_send_t, pool_item );\r
1880 \r
1881         if( h_send->retry_time == MAX_TIME )\r
1882                 h_send->delay = delay_ms;\r
1883         else\r
1884                 h_send->retry_time += ((uint64_t)delay_ms * 1000ULL);\r
1885 \r
1886         cl_spinlock_release( &h_mad_svc->obj.lock );\r
1887         AL_EXIT( AL_DBG_MAD_SVC );\r
1888         return IB_SUCCESS;\r
1889 #endif\r
1890 }\r
1891 \r
1892 \r
1893 /*\r
1894  * Process a send completion.\r
1895  */\r
1896 static void\r
1897 __mad_svc_send_done(\r
1898         IN                              ib_mad_svc_handle_t                     h_mad_svc,\r
1899         IN                              al_mad_wr_t                                     *p_mad_wr,\r
1900         IN                              ib_wc_t                                         *p_wc )\r
1901 {\r
1902         ib_mad_send_handle_t    h_send;\r
1903 \r
1904         CL_ENTER( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
1905         CL_ASSERT( h_mad_svc && p_mad_wr && !p_wc->p_next );\r
1906 \r
1907         h_send = PARENT_STRUCT( p_mad_wr, al_mad_send_t, mad_wr );\r
1908         CL_TRACE( AL_DBG_MAD_SVC, g_al_dbg_lvl, ("send callback TID:0x%"PRIx64"\n",\r
1909                 __get_send_tid( h_send )) );\r
1910 \r
1911         /* We need to synchronize access to the list as well as the MAD request. */\r
1912         cl_spinlock_acquire( &h_mad_svc->obj.lock );\r
1913 \r
1914         /* Complete internally sent MADs. */\r
1915         if( __is_internal_send( h_mad_svc->svc_type, h_send->p_send_mad ) )\r
1916         {\r
1917                 CL_TRACE_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl, ("internal send\n") );\r
1918                 cl_qlist_remove_item( &h_mad_svc->send_list,\r
1919                         (cl_list_item_t*)&h_send->pool_item );\r
1920                 cl_spinlock_release( &h_mad_svc->obj.lock );\r
1921                 ib_put_mad( h_send->p_send_mad );\r
1922                 __cleanup_mad_send( h_mad_svc, h_send );\r
1923                 return;\r
1924         }\r
1925 \r
1926         /* See if the send request has completed. */\r
1927         if( __is_send_mad_done( h_send, p_wc ) )\r
1928         {\r
1929                 /* The send has completed. */\r
1930                 cl_qlist_remove_item( &h_mad_svc->send_list,\r
1931                         (cl_list_item_t*)&h_send->pool_item );\r
1932                 cl_spinlock_release( &h_mad_svc->obj.lock );\r
1933 \r
1934                 /* Report the send as canceled only if we don't have the response. */\r
1935                 if( h_send->canceled && !h_send->p_resp_mad )\r
1936                         __notify_send_comp( h_mad_svc, h_send, IB_WCS_CANCELED );\r
1937                 else\r
1938                         __notify_send_comp( h_mad_svc, h_send, p_wc->status );\r
1939         }\r
1940         else\r
1941         {\r
1942                 /* See if this is an RMPP MAD, and we should send more segments. */\r
1943                 if( h_send->uses_rmpp && (h_send->cur_seg <= h_send->seg_limit) )\r
1944                 {\r
1945                         /* Send the next segment. */\r
1946                         CL_TRACE( AL_DBG_MAD_SVC, g_al_dbg_lvl,\r
1947                                 ("sending next RMPP segment for TID:0x%"PRIx64"\n",\r
1948                                 __get_send_tid( h_send )) );\r
1949 \r
1950                         __queue_rmpp_seg( h_mad_svc->h_mad_reg, h_send );\r
1951                 }\r
1952                 else\r
1953                 {\r
1954                         /* Continue waiting for a response or ACK. */\r
1955                         CL_TRACE( AL_DBG_MAD_SVC, g_al_dbg_lvl,\r
1956                                 ("waiting for response for TID:0x%"PRIx64"\n",\r
1957                                 __get_send_tid( h_send )) );\r
1958 \r
1959                         __set_retry_time( h_send );\r
1960                         cl_timer_trim( &h_mad_svc->send_timer,\r
1961                                 h_send->p_send_mad->timeout_ms );\r
1962                 }\r
1963                 cl_spinlock_release( &h_mad_svc->obj.lock );\r
1964         }\r
1965 \r
1966         /*\r
1967          * Resume any sends that can now be sent without holding\r
1968          * the mad service lock.\r
1969          */\r
1970         __mad_disp_resume_send( h_mad_svc->h_mad_reg );\r
1971 \r
1972         CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
1973 }\r
1974 \r
1975 \r
1976 \r
1977 /*\r
1978  * Notify the user of a completed send operation.\r
1979  */\r
1980 static void\r
1981 __notify_send_comp(\r
1982         IN                              ib_mad_svc_handle_t                     h_mad_svc,\r
1983         IN                              ib_mad_send_handle_t            h_send,\r
1984         IN                              ib_wc_status_t                          wc_status )\r
1985 {\r
1986         CL_ENTER( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
1987 \r
1988         CL_TRACE( AL_DBG_MAD_SVC, g_al_dbg_lvl, ("completing TID:0x%"PRIx64"\n",\r
1989                 __get_send_tid( h_send )) );\r
1990 \r
1991         h_send->p_send_mad->status = wc_status;\r
1992 \r
1993         /* Notify the user of a received response, if one exists. */\r
1994         if( h_send->p_resp_mad )\r
1995         {\r
1996                 h_mad_svc->pfn_user_recv_cb( h_mad_svc, (void*)h_mad_svc->obj.context,\r
1997                         h_send->p_resp_mad );\r
1998         }\r
1999 \r
2000         /* The transaction has completed, return the send MADs. */\r
2001         h_mad_svc->pfn_user_send_cb( h_mad_svc, (void*)h_mad_svc->obj.context,\r
2002                 h_send->p_send_mad );\r
2003 \r
2004         __cleanup_mad_send( h_mad_svc, h_send );\r
2005 \r
2006         CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
2007 }\r
2008 \r
2009 \r
2010 \r
2011 /*\r
2012  * Return a send MAD tracking structure to its pool and cleanup any resources\r
2013  * it may have allocated.\r
2014  */\r
2015 static void\r
2016 __cleanup_mad_send(\r
2017         IN                              ib_mad_svc_handle_t                     h_mad_svc,\r
2018         IN                              ib_mad_send_handle_t            h_send )\r
2019 {\r
2020         /* Release any address vectors that we may have created. */\r
2021         if( h_send->h_av )\r
2022         {\r
2023                 ib_destroy_av( h_send->h_av );\r
2024         }\r
2025 \r
2026         /* Return the send MAD tracking structure to its pool. */\r
2027         put_mad_send( h_send );\r
2028 \r
2029         /* We no longer need to reference the MAD service. */\r
2030         deref_al_obj( &h_mad_svc->obj );\r
2031 }\r
2032 \r
2033 \r
2034 \r
2035 static boolean_t\r
2036 __is_send_mad_done(\r
2037         IN                              ib_mad_send_handle_t            h_send,\r
2038         IN                              ib_wc_t                                         *p_wc )\r
2039 {\r
2040         CL_ENTER( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
2041 \r
2042         /* Complete the send if the request failed. */\r
2043         if( p_wc->status != IB_WCS_SUCCESS )\r
2044         {\r
2045                 CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl, ("y-send failed\n" ) );\r
2046                 return TRUE;\r
2047         }\r
2048 \r
2049         /* Complete the send if it has been canceled. */\r
2050         if( h_send->canceled )\r
2051         {\r
2052                 CL_TRACE_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl,\r
2053                         ("y-send was canceled\n") );\r
2054                 return TRUE;\r
2055         }\r
2056 \r
2057         /* Complete the send if we have its response. */\r
2058         if( h_send->p_resp_mad )\r
2059         {\r
2060                 CL_TRACE_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl,\r
2061                         ("y-response received\n") );\r
2062                 return TRUE;\r
2063         }\r
2064 \r
2065         /* RMPP sends cannot complete until all segments have been acked. */\r
2066         if( h_send->uses_rmpp && (h_send->ack_seg < h_send->total_seg) )\r
2067         {\r
2068                 CL_TRACE_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl,\r
2069                         ("more RMPP segments to send\n") );\r
2070                 return FALSE;\r
2071         }\r
2072 \r
2073         /*\r
2074          * All segments of this send have been sent.\r
2075          * The send has completed if we are not waiting for a response.\r
2076          */\r
2077         if( h_send->p_send_mad->resp_expected )\r
2078         {\r
2079                 CL_TRACE_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl,\r
2080                         ("no-waiting on response\n") );\r
2081                 return FALSE;\r
2082         }\r
2083         else\r
2084         {\r
2085                 CL_TRACE_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl, ("send completed\n") );\r
2086                 return TRUE;\r
2087         }\r
2088 }\r
2089 \r
2090 \r
2091 \r
2092 /*\r
2093  * Try to find a send that matches the received response.  This call must\r
2094  * be synchronized with access to the MAD service send_list.\r
2095  */\r
2096 static ib_mad_send_handle_t\r
2097 __mad_svc_match_recv(\r
2098         IN              const   ib_mad_svc_handle_t                     h_mad_svc,\r
2099         IN                              ib_mad_element_t* const         p_recv_mad )\r
2100 {\r
2101         ib_mad_t                                *p_recv_hdr;\r
2102         cl_list_item_t                  *p_list_item;\r
2103         ib_mad_send_handle_t    h_send;\r
2104 \r
2105         CL_ENTER( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
2106 \r
2107         p_recv_hdr = p_recv_mad->p_mad_buf;\r
2108 \r
2109         /* Search the send list for a matching request. */\r
2110         for( p_list_item = cl_qlist_head( &h_mad_svc->send_list );\r
2111                  p_list_item != cl_qlist_end( &h_mad_svc->send_list );\r
2112                  p_list_item = cl_qlist_next( p_list_item ) )\r
2113         {\r
2114                 h_send = PARENT_STRUCT( p_list_item, al_mad_send_t, pool_item );\r
2115 \r
2116                 /* Match on the transaction ID, ignoring internally generated sends. */\r
2117                 CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
2118                 if( (p_recv_hdr->trans_id == h_send->mad_wr.client_tid) &&\r
2119                          !__is_internal_send( h_mad_svc->svc_type, h_send->p_send_mad ) )\r
2120                 {\r
2121                         return h_send;\r
2122                 }\r
2123         }\r
2124 \r
2125         return NULL;\r
2126 }\r
2127 \r
2128 \r
2129 \r
2130 static void\r
2131 __mad_svc_recv_done(\r
2132         IN                              ib_mad_svc_handle_t                     h_mad_svc,\r
2133         IN                              ib_mad_element_t                        *p_mad_element )\r
2134 {\r
2135         ib_mad_t                                *p_mad_hdr;\r
2136         ib_api_status_t                 cl_status;\r
2137 \r
2138         CL_ENTER( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
2139 \r
2140         p_mad_hdr = ib_get_mad_buf( p_mad_element );\r
2141         CL_TRACE( AL_DBG_MAD_SVC, g_al_dbg_lvl, ("recv done TID:0x%"PRIx64"\n",\r
2142                 p_mad_hdr->trans_id) );\r
2143 \r
2144         /* Raw MAD services get all receives. */\r
2145         if( h_mad_svc->svc_type == IB_MAD_SVC_RAW )\r
2146         {\r
2147                 /* Report the receive. */\r
2148                 CL_TRACE_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl,\r
2149                         ("recv TID:0x%"PRIx64"\n", p_mad_hdr->trans_id) );\r
2150                 h_mad_svc->pfn_user_recv_cb( h_mad_svc, (void*)h_mad_svc->obj.context,\r
2151                         p_mad_element );\r
2152                 return;\r
2153         }\r
2154 \r
2155         /*\r
2156          * If the response indicates that the responder was busy, continue\r
2157          * retrying the request.\r
2158          */\r
2159         if( p_mad_hdr->status & IB_MAD_STATUS_BUSY )\r
2160         {\r
2161                 CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl,\r
2162                         ("responder busy TID:0x%"PRIx64"\n", p_mad_hdr->trans_id) );\r
2163                 ib_put_mad( p_mad_element );\r
2164                 return;\r
2165         }\r
2166 \r
2167         /* Fully reassemble received MADs before completing them. */\r
2168         if( __recv_requires_rmpp( h_mad_svc->svc_type, p_mad_element ) )\r
2169         {\r
2170                 /* Reassembling the receive. */\r
2171                 cl_status = __do_rmpp_recv( h_mad_svc, &p_mad_element );\r
2172                 if( cl_status != CL_SUCCESS )\r
2173                 {\r
2174                         /* The reassembly is not done. */\r
2175                         CL_TRACE_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl,\r
2176                                 ("no RMPP receive to report\n") );\r
2177                         return;\r
2178                 }\r
2179 \r
2180                 /*\r
2181                  * Get the header to the MAD element to report to the user.  This\r
2182                  * will be a MAD element received earlier.\r
2183                  */\r
2184                 p_mad_hdr = ib_get_mad_buf( p_mad_element );\r
2185         }\r
2186 \r
2187         /*\r
2188          * See if the MAD was sent in response to a previously sent MAD.  Note\r
2189          * that trap repress messages are responses, even though the response\r
2190          * bit isn't set.\r
2191          */\r
2192         if( ib_mad_is_response( p_mad_hdr ) ||\r
2193                 (p_mad_hdr->method == IB_MAD_METHOD_TRAP_REPRESS) )\r
2194         {\r
2195                 /* Process the received response. */\r
2196                 __process_recv_resp( h_mad_svc, p_mad_element );\r
2197         }\r
2198         else\r
2199         {\r
2200                 /* Report the receive. */\r
2201                 CL_TRACE( AL_DBG_MAD_SVC, g_al_dbg_lvl, ("unsol recv TID:0x%"PRIx64"\n",\r
2202                         p_mad_hdr->trans_id) );\r
2203                 h_mad_svc->pfn_user_recv_cb( h_mad_svc, (void*)h_mad_svc->obj.context,\r
2204                         p_mad_element );\r
2205         }\r
2206         CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
2207 }\r
2208 \r
2209 \r
2210 \r
2211 /*\r
2212  * A MAD was received in response to a send.  Find the corresponding send\r
2213  * and process the receive completion.\r
2214  */\r
2215 static void\r
2216 __process_recv_resp(\r
2217         IN                              ib_mad_svc_handle_t                     h_mad_svc,\r
2218         IN                              ib_mad_element_t                        *p_mad_element )\r
2219 {\r
2220         ib_mad_t                                *p_mad_hdr;\r
2221         ib_mad_send_handle_t    h_send;\r
2222 \r
2223         /*\r
2224          * Try to find the send.  The send may have already timed out or\r
2225          * have been canceled, so we need to search for it.\r
2226          */\r
2227         CL_ENTER( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
2228         p_mad_hdr = ib_get_mad_buf( p_mad_element );\r
2229         cl_spinlock_acquire( &h_mad_svc->obj.lock );\r
2230 \r
2231         h_send = __mad_svc_match_recv( h_mad_svc, p_mad_element );\r
2232         if( !h_send )\r
2233         {\r
2234                 /* A matching send was not found. */\r
2235                 CL_TRACE_EXIT( AL_DBG_WARN, g_al_dbg_lvl,\r
2236                         ("unmatched resp TID:0x%"PRIx64"\n", p_mad_hdr->trans_id) );\r
2237                 cl_spinlock_release( &h_mad_svc->obj.lock );\r
2238                 ib_put_mad( p_mad_element );\r
2239                 return;\r
2240         }\r
2241 \r
2242         /* We've found the matching send. */\r
2243         h_send->p_send_mad->status = IB_WCS_SUCCESS;\r
2244 \r
2245         /* Record the send contexts with the receive. */\r
2246         p_mad_element->send_context1 = (void* __ptr64)h_send->p_send_mad->context1;\r
2247         p_mad_element->send_context2 = (void* __ptr64)h_send->p_send_mad->context2;\r
2248 \r
2249         if( h_send->retry_time == MAX_TIME )\r
2250         {\r
2251                 /* The send is currently active.  Do not report it. */\r
2252                 CL_TRACE( AL_DBG_MAD_SVC, g_al_dbg_lvl,\r
2253                         ("resp send active TID:0x%"PRIx64"\n", p_mad_hdr->trans_id) );\r
2254                 h_send->p_resp_mad = p_mad_element;\r
2255                 cl_spinlock_release( &h_mad_svc->obj.lock );\r
2256         }\r
2257         else\r
2258         {\r
2259                 CL_TRACE( AL_DBG_MAD_SVC, g_al_dbg_lvl,\r
2260                         ("resp received TID:0x%"PRIx64"\n", p_mad_hdr->trans_id) );\r
2261 \r
2262                 /* Report the send completion below. */\r
2263                 cl_qlist_remove_item( &h_mad_svc->send_list,\r
2264                         (cl_list_item_t*)&h_send->pool_item );\r
2265                 cl_spinlock_release( &h_mad_svc->obj.lock );\r
2266 \r
2267                 /* Report the receive. */\r
2268                 h_mad_svc->pfn_user_recv_cb( h_mad_svc, (void*)h_mad_svc->obj.context,\r
2269                         p_mad_element );\r
2270 \r
2271                 /* Report the send completion. */\r
2272                 h_mad_svc->pfn_user_send_cb( h_mad_svc, (void*)h_mad_svc->obj.context,\r
2273                         h_send->p_send_mad );\r
2274                 __cleanup_mad_send( h_mad_svc, h_send );\r
2275         }\r
2276         CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
2277 }\r
2278 \r
2279 \r
2280 \r
2281 /*\r
2282  * Return TRUE if a received MAD requires RMPP processing.\r
2283  */\r
2284 static __inline boolean_t\r
2285 __recv_requires_rmpp(\r
2286         IN              const   ib_mad_svc_type_t                       mad_svc_type,\r
2287         IN              const   ib_mad_element_t* const         p_mad_element )\r
2288 {\r
2289         ib_rmpp_mad_t                           *p_rmpp_mad;\r
2290 \r
2291         CL_ENTER( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
2292 \r
2293         p_rmpp_mad = (ib_rmpp_mad_t*)ib_get_mad_buf( p_mad_element );\r
2294 \r
2295         CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
2296 \r
2297         switch( mad_svc_type )\r
2298         {\r
2299         case IB_MAD_SVC_DEFAULT:\r
2300                 /* Only subnet management receives require RMPP. */\r
2301                 return( (p_rmpp_mad->common_hdr.mgmt_class == IB_MCLASS_SUBN_ADM) &&\r
2302                         ib_rmpp_is_flag_set( p_rmpp_mad, IB_RMPP_FLAG_ACTIVE ) );\r
2303 \r
2304         case IB_MAD_SVC_RMPP:\r
2305                 return( ib_rmpp_is_flag_set( p_rmpp_mad, IB_RMPP_FLAG_ACTIVE ) );\r
2306 \r
2307         default:\r
2308                 return FALSE;\r
2309         }\r
2310 }\r
2311 \r
2312 \r
2313 \r
2314 /*\r
2315  * Return TRUE if the MAD was issued by AL itself.\r
2316  */\r
2317 static __inline boolean_t\r
2318 __is_internal_send(\r
2319         IN              const   ib_mad_svc_type_t                       mad_svc_type,\r
2320         IN              const   ib_mad_element_t* const         p_mad_element )\r
2321 {\r
2322         ib_rmpp_mad_t           *p_rmpp_mad;\r
2323 \r
2324         p_rmpp_mad = (ib_rmpp_mad_t*)ib_get_mad_buf( p_mad_element );\r
2325 \r
2326         /* See if the MAD service issues internal MADs. */\r
2327         switch( mad_svc_type )\r
2328         {\r
2329         case IB_MAD_SVC_DEFAULT:\r
2330                 /* Internal sends are non-RMPP data MADs. */\r
2331                 return( (p_rmpp_mad->common_hdr.mgmt_class == IB_MCLASS_SUBN_ADM) &&\r
2332                                 (p_rmpp_mad->rmpp_type &&\r
2333                                 (p_rmpp_mad->rmpp_type != IB_RMPP_TYPE_DATA) ) );\r
2334 \r
2335         case IB_MAD_SVC_RMPP:\r
2336                 /* The RMPP header is present.  Check its type. */\r
2337                 return( (p_rmpp_mad->rmpp_type) &&\r
2338                                 (p_rmpp_mad->rmpp_type != IB_RMPP_TYPE_DATA) );\r
2339 \r
2340         default:\r
2341                 return FALSE;\r
2342         }\r
2343 }\r
2344 \r
2345 \r
2346 \r
2347 /*\r
2348  * Fully reassemble a received MAD.  Return TRUE once all segments of the\r
2349  * MAD have been received.  Return the fully reassembled MAD.\r
2350  */\r
2351 static cl_status_t\r
2352 __do_rmpp_recv(\r
2353         IN                              ib_mad_svc_handle_t                     h_mad_svc,\r
2354         IN      OUT                     ib_mad_element_t                        **pp_mad_element )\r
2355 {\r
2356         ib_rmpp_mad_t           *p_rmpp_mad;\r
2357         cl_status_t                     cl_status;\r
2358 \r
2359         CL_ENTER( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
2360 \r
2361         p_rmpp_mad = ib_get_mad_buf( *pp_mad_element );\r
2362         CL_ASSERT( ib_rmpp_is_flag_set( p_rmpp_mad, IB_RMPP_FLAG_ACTIVE ) );\r
2363 \r
2364         /* Perform the correct operation base on the RMPP MAD type. */\r
2365         switch( p_rmpp_mad->rmpp_type )\r
2366         {\r
2367         case IB_RMPP_TYPE_DATA:\r
2368                 cl_status = __process_rmpp_data( h_mad_svc, pp_mad_element );\r
2369                 /* Return the received element back to its MAD pool if not needed. */\r
2370                 if( (cl_status != CL_SUCCESS) && (cl_status != CL_NOT_DONE) )\r
2371                 {\r
2372                         ib_put_mad( *pp_mad_element );\r
2373                 }\r
2374                 break;\r
2375 \r
2376         case IB_RMPP_TYPE_ACK:\r
2377                 /* Process the ACK. */\r
2378                 __process_rmpp_ack( h_mad_svc, *pp_mad_element );\r
2379                 ib_put_mad( *pp_mad_element );\r
2380                 cl_status = CL_COMPLETED;\r
2381                 break;\r
2382 \r
2383         case IB_RMPP_TYPE_STOP:\r
2384         case IB_RMPP_TYPE_ABORT:\r
2385         default:\r
2386                 /* Process the ABORT or STOP. */\r
2387                 __process_rmpp_nack( h_mad_svc, *pp_mad_element );\r
2388                 ib_put_mad( *pp_mad_element );\r
2389                 cl_status = CL_REJECT;\r
2390                 break;\r
2391         }\r
2392 \r
2393         CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
2394         return cl_status;\r
2395 }\r
2396 \r
2397 \r
2398 \r
2399 /*\r
2400  * Process an RMPP DATA message.  Reassemble the received data.  If the\r
2401  * received MAD is fully reassembled, this call returns CL_SUCCESS.\r
2402  */\r
2403 static cl_status_t\r
2404 __process_rmpp_data(\r
2405         IN                              ib_mad_svc_handle_t                     h_mad_svc,\r
2406         IN      OUT                     ib_mad_element_t                        **pp_mad_element )\r
2407 {\r
2408         ib_mad_element_t        *p_rmpp_resp_mad = NULL;\r
2409         al_mad_rmpp_t           *p_rmpp;\r
2410         ib_rmpp_mad_t           *p_rmpp_hdr;\r
2411         uint32_t                        cur_seg;\r
2412         cl_status_t                     cl_status;\r
2413         ib_api_status_t         status;\r
2414 \r
2415         p_rmpp_hdr = ib_get_mad_buf( *pp_mad_element );\r
2416         CL_ASSERT( p_rmpp_hdr->rmpp_type == IB_RMPP_TYPE_DATA );\r
2417 \r
2418         /* Try to find a receive already being reassembled. */\r
2419         cl_spinlock_acquire( &h_mad_svc->obj.lock );\r
2420         p_rmpp = __find_rmpp( h_mad_svc, *pp_mad_element );\r
2421         if( !p_rmpp )\r
2422         {\r
2423                 /* This receive is not being reassembled. It should be the first seg. */\r
2424                 if( cl_ntoh32( p_rmpp_hdr->seg_num ) != 1 )\r
2425                 {\r
2426                         cl_spinlock_release( &h_mad_svc->obj.lock );\r
2427                         return CL_NOT_FOUND;\r
2428                 }\r
2429 \r
2430                 /* Start tracking the new reassembly. */\r
2431                 p_rmpp = __get_mad_rmpp( h_mad_svc, *pp_mad_element );\r
2432                 if( !p_rmpp )\r
2433                 {\r
2434                         cl_spinlock_release( &h_mad_svc->obj.lock );\r
2435                         return CL_INSUFFICIENT_MEMORY;\r
2436                 }\r
2437         }\r
2438 \r
2439         /* Verify that we just received the expected segment. */\r
2440         cur_seg = cl_ntoh32( p_rmpp_hdr->seg_num );\r
2441         if( cur_seg == p_rmpp->expected_seg )\r
2442         {\r
2443                 /* Copy the new segment's data into our reassembly buffer. */\r
2444                 cl_status = __process_segment( h_mad_svc, p_rmpp,\r
2445                         pp_mad_element, &p_rmpp_resp_mad );\r
2446 \r
2447                 /* See if the RMPP is done. */\r
2448                 if( cl_status == CL_SUCCESS )\r
2449                 {\r
2450                         /* Stop tracking the reassembly. */\r
2451                         __put_mad_rmpp( h_mad_svc, p_rmpp );\r
2452                 }\r
2453                 else if( cl_status == CL_NOT_DONE )\r
2454                 {\r
2455                         /* Start the reassembly timer. */\r
2456                         cl_timer_trim( &h_mad_svc->recv_timer, AL_REASSEMBLY_TIMEOUT );\r
2457                 }\r
2458         }\r
2459         else if( cur_seg < p_rmpp->expected_seg )\r
2460         {\r
2461                 /* We received an old segment.  Resend the last ACK. */\r
2462                 p_rmpp_resp_mad = __get_rmpp_ack( p_rmpp );\r
2463                 cl_status = CL_DUPLICATE;\r
2464         }\r
2465         else\r
2466         {\r
2467                 /* The sender is confused, ignore this MAD.  We could ABORT here. */\r
2468                 cl_status = CL_OVERRUN;\r
2469         }\r
2470 \r
2471         cl_spinlock_release( &h_mad_svc->obj.lock );\r
2472 \r
2473         /*\r
2474          * Send any response MAD (ACK, ABORT, etc.) to the sender.  Note that\r
2475          * we are currently in the callback from the MAD dispatcher.  The\r
2476          * dispatcher holds a reference on the MAD service while in the callback,\r
2477          * preventing the MAD service from being destroyed.  This allows the\r
2478          * call to ib_send_mad() to proceed even if the user tries to destroy\r
2479          * the MAD service.\r
2480          */\r
2481         if( p_rmpp_resp_mad )\r
2482         {\r
2483                 status = ib_send_mad( h_mad_svc, p_rmpp_resp_mad, NULL );\r
2484                 if( status != IB_SUCCESS )\r
2485                 {\r
2486                         /* Return the MAD.  The MAD is considered dropped. */\r
2487                         ib_put_mad( p_rmpp_resp_mad );\r
2488                 }\r
2489         }\r
2490 \r
2491         return cl_status;\r
2492 }\r
2493 \r
2494 \r
2495 \r
2496 /*\r
2497  * Locate an existing RMPP MAD being reassembled.  Return NULL if one is not\r
2498  * found.  This call assumes access to the recv_list is synchronized.\r
2499  */\r
2500 static al_mad_rmpp_t*\r
2501 __find_rmpp(\r
2502         IN                              ib_mad_svc_handle_t                     h_mad_svc,\r
2503         IN      OUT                     ib_mad_element_t                        *p_mad_element )\r
2504 {\r
2505         al_mad_rmpp_t                   *p_rmpp;\r
2506         cl_list_item_t                  *p_list_item;\r
2507         ib_mad_t                                *p_mad_hdr, *p_mad_hdr2;\r
2508         ib_mad_element_t                *p_mad_element2;\r
2509 \r
2510 \r
2511         p_mad_hdr = ib_get_mad_buf( p_mad_element );\r
2512 \r
2513         /* Search all MADs being reassembled. */\r
2514         for( p_list_item = cl_qlist_head( &h_mad_svc->recv_list );\r
2515                  p_list_item != cl_qlist_end( &h_mad_svc->recv_list );\r
2516                  p_list_item = cl_qlist_next( p_list_item ) )\r
2517         {\r
2518                 p_rmpp = PARENT_STRUCT( p_list_item, al_mad_rmpp_t, pool_item );\r
2519 \r
2520                 p_mad_element2 = p_rmpp->p_mad_element;\r
2521                 p_mad_hdr2 = ib_get_mad_buf( p_mad_element2 );\r
2522 \r
2523                 /* See if the incoming MAD matches - what a check. */\r
2524                 if( (p_mad_hdr->trans_id                == p_mad_hdr2->trans_id)                &&\r
2525                         (p_mad_hdr->class_ver           == p_mad_hdr2->class_ver)               &&\r
2526                         (p_mad_hdr->mgmt_class          == p_mad_hdr2->mgmt_class)              &&\r
2527                         (p_mad_hdr->method                      == p_mad_hdr2->method)                  &&\r
2528                         (p_mad_element->remote_lid      == p_mad_element2->remote_lid)  &&\r
2529                         (p_mad_element->remote_qp       == p_mad_element2->remote_qp) )\r
2530                 {\r
2531                         return p_rmpp;\r
2532                 }\r
2533         }\r
2534 \r
2535         return NULL;\r
2536 }\r
2537 \r
2538 \r
2539 \r
2540 /*\r
2541  * Acquire a new RMPP tracking structure.  This call assumes access to\r
2542  * the recv_list is synchronized.\r
2543  */\r
2544 static al_mad_rmpp_t*\r
2545 __get_mad_rmpp(\r
2546         IN                              ib_mad_svc_handle_t                     h_mad_svc,\r
2547         IN                              ib_mad_element_t                        *p_mad_element )\r
2548 {\r
2549         al_mad_rmpp_t           *p_rmpp;\r
2550         al_mad_element_t        *p_al_element;\r
2551 \r
2552         p_al_element = PARENT_STRUCT( p_mad_element, al_mad_element_t, element );\r
2553 \r
2554         /* Get an RMPP tracking structure. */\r
2555         p_rmpp = get_mad_rmpp( p_al_element );\r
2556         if( !p_rmpp )\r
2557                 return NULL;\r
2558 \r
2559         /* Initialize the tracking information. */\r
2560         p_rmpp->expected_seg = 1;\r
2561         p_rmpp->seg_limit = 1;\r
2562         p_rmpp->inactive = FALSE;\r
2563         p_rmpp->p_mad_element = p_mad_element;\r
2564 \r
2565         /* Insert the tracking structure into the reassembly list. */\r
2566         cl_qlist_insert_tail( &h_mad_svc->recv_list,\r
2567                 (cl_list_item_t*)&p_rmpp->pool_item );\r
2568 \r
2569         return p_rmpp;\r
2570 }\r
2571 \r
2572 \r
2573 \r
2574 /*\r
2575  * Return the RMPP tracking structure.  This call assumes access to\r
2576  * the recv_list is synchronized.\r
2577  */\r
2578 static void\r
2579 __put_mad_rmpp(\r
2580         IN                              ib_mad_svc_handle_t                     h_mad_svc,\r
2581         IN                              al_mad_rmpp_t                           *p_rmpp )\r
2582 {\r
2583         /* Remove the tracking structure from the reassembly list. */\r
2584         cl_qlist_remove_item( &h_mad_svc->recv_list,\r
2585                 (cl_list_item_t*)&p_rmpp->pool_item );\r
2586 \r
2587         /* Return the RMPP tracking structure. */\r
2588         put_mad_rmpp( p_rmpp );\r
2589 }\r
2590 \r
2591 \r
2592 \r
2593 /*\r
2594  * Process a received RMPP segment.  Copy the data into our receive buffer,\r
2595  * update the expected segment, and send an ACK if needed.\r
2596  */\r
2597 static cl_status_t\r
2598 __process_segment(\r
2599         IN                              ib_mad_svc_handle_t                     h_mad_svc,\r
2600         IN                              al_mad_rmpp_t                           *p_rmpp,\r
2601         IN      OUT                     ib_mad_element_t                        **pp_mad_element,\r
2602                 OUT                     ib_mad_element_t                        **pp_rmpp_resp_mad )\r
2603 {\r
2604         ib_rmpp_mad_t                   *p_rmpp_hdr;\r
2605         uint32_t                                cur_seg;\r
2606         ib_api_status_t                 status;\r
2607         cl_status_t                             cl_status;\r
2608         uint8_t                                 *p_dst_seg, *p_src_seg;\r
2609         uint32_t                                paylen;\r
2610 \r
2611         CL_ASSERT( h_mad_svc && p_rmpp && pp_mad_element && *pp_mad_element );\r
2612 \r
2613         p_rmpp_hdr = (ib_rmpp_mad_t*)(*pp_mad_element)->p_mad_buf;\r
2614         cur_seg = cl_ntoh32( p_rmpp_hdr->seg_num );\r
2615         CL_ASSERT( cur_seg == p_rmpp->expected_seg );\r
2616         CL_ASSERT( cur_seg <= p_rmpp->seg_limit );\r
2617 \r
2618         /* See if the receive has been fully reassembled. */\r
2619         if( ib_rmpp_is_flag_set( p_rmpp_hdr, IB_RMPP_FLAG_LAST ) )\r
2620                 cl_status = CL_SUCCESS;\r
2621         else\r
2622                 cl_status = CL_NOT_DONE;\r
2623         \r
2624         /* Save the payload length for later use. */\r
2625         paylen = cl_ntoh32(p_rmpp_hdr->paylen_newwin);\r
2626 \r
2627         /* The element of the first segment starts the reasembly. */\r
2628         if( *pp_mad_element != p_rmpp->p_mad_element )\r
2629         {\r
2630                 /* SA MADs require extra header size ... */\r
2631                 if( (*pp_mad_element)->p_mad_buf->mgmt_class == IB_MCLASS_SUBN_ADM )\r
2632                 {\r
2633                         /* Copy the received data into our reassembly buffer. */\r
2634                         p_src_seg = ((uint8_t* __ptr64)(*pp_mad_element)->p_mad_buf) +\r
2635                                 IB_SA_MAD_HDR_SIZE;\r
2636                         p_dst_seg = ((uint8_t* __ptr64)p_rmpp->p_mad_element->p_mad_buf) +\r
2637                                 IB_SA_MAD_HDR_SIZE + IB_SA_DATA_SIZE * (cur_seg - 1);\r
2638                         cl_memcpy( p_dst_seg, p_src_seg, IB_SA_DATA_SIZE );\r
2639                 }\r
2640                 else \r
2641                 {\r
2642                         /* Copy the received data into our reassembly buffer. */\r
2643                         p_src_seg = ((uint8_t* __ptr64)(*pp_mad_element)->p_mad_buf) +\r
2644                                 MAD_RMPP_HDR_SIZE;\r
2645                         p_dst_seg = ((uint8_t* __ptr64)p_rmpp->p_mad_element->p_mad_buf) +\r
2646                                 MAD_RMPP_HDR_SIZE + MAD_RMPP_DATA_SIZE * (cur_seg - 1);\r
2647                         cl_memcpy( p_dst_seg, p_src_seg, MAD_RMPP_DATA_SIZE );\r
2648                 }\r
2649                 /* This MAD is no longer needed. */\r
2650                 ib_put_mad( *pp_mad_element );\r
2651         }\r
2652 \r
2653         /* Update the size of the mad if the last segment */\r
2654         if ( cl_status == CL_SUCCESS )\r
2655         {\r
2656                 if (p_rmpp->p_mad_element->p_mad_buf->mgmt_class == IB_MCLASS_SUBN_ADM )\r
2657                 {\r
2658                         /*\r
2659                          * Note we will get one extra SA Hdr size in the paylen, \r
2660                          * so we only take the rmpp header size of the first segment.\r
2661                          */\r
2662                         p_rmpp->p_mad_element->size = \r
2663                                 MAD_RMPP_HDR_SIZE + IB_SA_DATA_SIZE *(cur_seg - 1) + paylen;\r
2664                 }\r
2665                 else\r
2666                 {\r
2667                          p_rmpp->p_mad_element->size = \r
2668                                 MAD_RMPP_HDR_SIZE + MAD_RMPP_DATA_SIZE * (cur_seg - 1) + paylen;\r
2669                 }\r
2670         }\r
2671 \r
2672         /*\r
2673          * We are ready to accept the next segment.  We increment expected segment\r
2674          * even if we're done, so that ACKs correctly report the last segment.\r
2675          */\r
2676         p_rmpp->expected_seg++;\r
2677 \r
2678         /* Mark the RMPP as active if we're not destroying the MAD service. */\r
2679         p_rmpp->inactive = (h_mad_svc->obj.state == CL_DESTROYING);\r
2680 \r
2681         /* See if the receive has been fully reassembled. */\r
2682         if( cl_status == CL_NOT_DONE && cur_seg == p_rmpp->seg_limit )\r
2683         {\r
2684                 /* Allocate more segments for the incoming receive. */\r
2685                 status = al_resize_mad( p_rmpp->p_mad_element,\r
2686                         p_rmpp->p_mad_element->size + AL_RMPP_WINDOW * MAD_RMPP_DATA_SIZE );\r
2687 \r
2688                 /* If we couldn't allocate a new buffer, just drop the MAD. */\r
2689                 if( status == IB_SUCCESS )\r
2690                 {\r
2691                         /* Send an ACK indicating that more space is available. */\r
2692                         p_rmpp->seg_limit += AL_RMPP_WINDOW;\r
2693                         *pp_rmpp_resp_mad = __get_rmpp_ack( p_rmpp );\r
2694                 }\r
2695         }\r
2696         else if( cl_status == CL_SUCCESS )\r
2697         {\r
2698                 /* Return the element referencing the reassembled MAD. */\r
2699                 *pp_mad_element = p_rmpp->p_mad_element;\r
2700                 *pp_rmpp_resp_mad = __get_rmpp_ack( p_rmpp );\r
2701         }\r
2702 \r
2703         return cl_status;\r
2704 }\r
2705 \r
2706 \r
2707 \r
2708 /*\r
2709  * Get an ACK message to return to the sender of an RMPP MAD.\r
2710  */\r
2711 static ib_mad_element_t*\r
2712 __get_rmpp_ack(\r
2713         IN                              al_mad_rmpp_t                           *p_rmpp )\r
2714 {\r
2715         ib_mad_element_t                *p_mad_element;\r
2716         al_mad_element_t                *p_al_element;\r
2717         ib_api_status_t                 status;\r
2718         ib_rmpp_mad_t                   *p_ack_rmpp_hdr, *p_data_rmpp_hdr;\r
2719 \r
2720         /* Get a MAD to carry the ACK. */\r
2721         p_al_element = PARENT_STRUCT( p_rmpp->p_mad_element,\r
2722                 al_mad_element_t, element );\r
2723         status = ib_get_mad( p_al_element->pool_key, MAD_BLOCK_SIZE,\r
2724                 &p_mad_element );\r
2725         if( status != IB_SUCCESS )\r
2726         {\r
2727                 /* Just return.  The ACK will be treated as being dropped. */\r
2728                 return NULL;\r
2729         }\r
2730 \r
2731         /* Format the ACK. */\r
2732         p_ack_rmpp_hdr = ib_get_mad_buf( p_mad_element );\r
2733         p_data_rmpp_hdr = ib_get_mad_buf( p_rmpp->p_mad_element );\r
2734 \r
2735         __init_reply_element( p_mad_element, p_rmpp->p_mad_element );\r
2736 \r
2737         /* Copy the MAD common header. */\r
2738         cl_memcpy( &p_ack_rmpp_hdr->common_hdr, &p_data_rmpp_hdr->common_hdr,\r
2739                 sizeof( ib_mad_t ) );\r
2740 \r
2741         /* Flip the response bit in the method */\r
2742         p_ack_rmpp_hdr->common_hdr.method ^= IB_MAD_METHOD_RESP_MASK;\r
2743 \r
2744         p_ack_rmpp_hdr->rmpp_version = p_data_rmpp_hdr->rmpp_version;\r
2745         p_ack_rmpp_hdr->rmpp_type = IB_RMPP_TYPE_ACK;\r
2746         ib_rmpp_set_resp_time( p_ack_rmpp_hdr, IB_RMPP_NO_RESP_TIME );\r
2747         p_ack_rmpp_hdr->rmpp_flags |= IB_RMPP_FLAG_ACTIVE;\r
2748         p_ack_rmpp_hdr->rmpp_status = IB_RMPP_STATUS_SUCCESS;\r
2749 \r
2750         p_ack_rmpp_hdr->seg_num = cl_hton32( p_rmpp->expected_seg - 1 );\r
2751 \r
2752         if (p_rmpp->seg_limit == p_rmpp->expected_seg - 1 )\r
2753                 p_ack_rmpp_hdr->paylen_newwin = cl_hton32( 1 + p_rmpp->seg_limit);\r
2754         else\r
2755                 p_ack_rmpp_hdr->paylen_newwin = cl_hton32( p_rmpp->seg_limit );\r
2756 \r
2757         return p_mad_element;\r
2758 }\r
2759 \r
2760 \r
2761 \r
2762 /*\r
2763  * Copy necessary data between MAD elements to allow the destination\r
2764  * element to be sent to the sender of the source element.\r
2765  */\r
2766 static void\r
2767 __init_reply_element(\r
2768         IN                              ib_mad_element_t                        *p_dst_element,\r
2769         IN                              ib_mad_element_t                        *p_src_element )\r
2770 {\r
2771         p_dst_element->remote_qp = p_src_element->remote_qp;\r
2772         p_dst_element->remote_qkey = p_src_element->remote_qkey;\r
2773 \r
2774         if( p_src_element->grh_valid )\r
2775         {\r
2776                 p_dst_element->grh_valid = p_src_element->grh_valid;\r
2777                 cl_memcpy( p_dst_element->p_grh, p_src_element->p_grh,\r
2778                         sizeof( ib_grh_t ) );\r
2779         }\r
2780 \r
2781         p_dst_element->remote_lid = p_src_element->remote_lid;\r
2782         p_dst_element->remote_sl = p_src_element->remote_sl;\r
2783         p_dst_element->pkey_index = p_src_element->pkey_index;\r
2784         p_dst_element->path_bits = p_src_element->path_bits;\r
2785 }\r
2786 \r
2787 \r
2788 \r
2789 /*\r
2790  * Process an RMPP ACK message.  Continue sending addition segments.\r
2791  */\r
2792 static void\r
2793 __process_rmpp_ack(\r
2794         IN                              ib_mad_svc_handle_t                     h_mad_svc,\r
2795         IN                              ib_mad_element_t                        *p_mad_element )\r
2796 {\r
2797         ib_mad_send_handle_t    h_send;\r
2798         ib_rmpp_mad_t                   *p_rmpp_mad;\r
2799         boolean_t                               send_done = FALSE;\r
2800         ib_wc_status_t                  wc_status = IB_WCS_SUCCESS;\r
2801 \r
2802         CL_ENTER( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
2803         p_rmpp_mad = (ib_rmpp_mad_t*)ib_get_mad_buf( p_mad_element );\r
2804 \r
2805         /*\r
2806          * Search for the send.  The send may have timed out, been canceled,\r
2807          * or received a response.\r
2808          */\r
2809         cl_spinlock_acquire( &h_mad_svc->obj.lock );\r
2810         h_send = __mad_svc_match_recv( h_mad_svc, p_mad_element );\r
2811         if( !h_send )\r
2812         {\r
2813                 cl_spinlock_release( &h_mad_svc->obj.lock );\r
2814                 CL_TRACE_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl,\r
2815                         ("ACK cannot find a matching send\n") );\r
2816                 return;\r
2817         }\r
2818 \r
2819         /* Drop old ACKs. */\r
2820         if( cl_ntoh32( p_rmpp_mad->seg_num ) < h_send->ack_seg )\r
2821         {\r
2822                 cl_spinlock_release( &h_mad_svc->obj.lock );\r
2823                 CL_TRACE_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl,\r
2824                         ("old ACK - being dropped\n") );\r
2825                 return;\r
2826         }\r
2827 \r
2828         /* Update the acknowledged segment and segment limit. */\r
2829         h_send->ack_seg = cl_ntoh32( p_rmpp_mad->seg_num );\r
2830 \r
2831         /* Keep seg_limit <= total_seg to simplify checks. */\r
2832         if( cl_ntoh32( p_rmpp_mad->paylen_newwin ) > h_send->total_seg )\r
2833                 h_send->seg_limit = h_send->total_seg;\r
2834         else\r
2835                 h_send->seg_limit = cl_ntoh32( p_rmpp_mad->paylen_newwin );\r
2836 \r
2837         /* Reset the current segment to start resending from the ACK. */\r
2838         h_send->cur_seg = h_send->ack_seg + 1;\r
2839 \r
2840         /* If the send is active, we will finish processing it once it completes. */\r
2841         if( h_send->retry_time == MAX_TIME )\r
2842         {\r
2843                 cl_spinlock_release( &h_mad_svc->obj.lock );\r
2844                 CL_TRACE_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl,\r
2845                         ("ACK processed, waiting for send to complete\n") );\r
2846                 return;\r
2847         }\r
2848 \r
2849         /*\r
2850          * Complete the send if all segments have been ack'ed and no\r
2851          * response is expected.  (If the response for a send had already been\r
2852          * received, we would have reported the completion regardless of the\r
2853          * send having been ack'ed.)\r
2854          */\r
2855         CL_ASSERT( !h_send->p_send_mad->resp_expected || !h_send->p_resp_mad );\r
2856         if( (h_send->ack_seg == h_send->total_seg) &&\r
2857                 !h_send->p_send_mad->resp_expected )\r
2858         {\r
2859                 /* The send is done.  All segments have been ack'ed. */\r
2860                 send_done = TRUE;\r
2861         }\r
2862         else if( h_send->ack_seg < h_send->seg_limit )\r
2863         {\r
2864                 /* Send the next segment. */\r
2865                 __queue_rmpp_seg( h_mad_svc->h_mad_reg, h_send );\r
2866         }\r
2867 \r
2868         if( send_done )\r
2869         {\r
2870                 /* Notify the user of a send completion or error. */\r
2871                 cl_qlist_remove_item( &h_mad_svc->send_list,\r
2872                         (cl_list_item_t*)&h_send->pool_item );\r
2873                 cl_spinlock_release( &h_mad_svc->obj.lock );\r
2874                 __notify_send_comp( h_mad_svc, h_send, wc_status );\r
2875         }\r
2876         else\r
2877         {\r
2878                 /* Continue waiting for a response or a larger send window. */\r
2879                 cl_spinlock_release( &h_mad_svc->obj.lock );\r
2880         }\r
2881 \r
2882         /*\r
2883          * Resume any sends that can now be sent without holding\r
2884          * the mad service lock.\r
2885          */\r
2886         __mad_disp_resume_send( h_mad_svc->h_mad_reg );\r
2887 \r
2888         CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
2889 }\r
2890 \r
2891 \r
2892 \r
2893 /*\r
2894  * Process an RMPP STOP or ABORT message.\r
2895  */\r
2896 static void\r
2897 __process_rmpp_nack(\r
2898         IN                              ib_mad_svc_handle_t                     h_mad_svc,\r
2899         IN                              ib_mad_element_t                        *p_mad_element )\r
2900 {\r
2901         ib_mad_send_handle_t    h_send;\r
2902         ib_rmpp_mad_t                   *p_rmpp_mad;\r
2903 \r
2904         CL_ENTER( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
2905         p_rmpp_mad = (ib_rmpp_mad_t*)ib_get_mad_buf( p_mad_element );\r
2906 \r
2907         /* Search for the send.  The send may have timed out or been canceled. */\r
2908         cl_spinlock_acquire( &h_mad_svc->obj.lock );\r
2909         h_send = __mad_svc_match_recv( h_mad_svc, p_mad_element );\r
2910         if( !h_send )\r
2911         {\r
2912                 cl_spinlock_release( &h_mad_svc->obj.lock );\r
2913                 return;\r
2914         }\r
2915 \r
2916         /* If the send is active, we will finish processing it once it completes. */\r
2917         if( h_send->retry_time == MAX_TIME )\r
2918         {\r
2919                 h_send->canceled = TRUE;\r
2920                 cl_spinlock_release( &h_mad_svc->obj.lock );\r
2921                 CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
2922                 return;\r
2923         }\r
2924 \r
2925         /* Fail the send operation. */\r
2926         cl_qlist_remove_item( &h_mad_svc->send_list,\r
2927                 (cl_list_item_t*)&h_send->pool_item );\r
2928         cl_spinlock_release( &h_mad_svc->obj.lock );\r
2929         __notify_send_comp( h_mad_svc, h_send, IB_WCS_CANCELED );\r
2930 \r
2931         CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
2932 }\r
2933 \r
2934 \r
2935 \r
2936 static __inline void\r
2937 __set_retry_time(\r
2938         IN                              ib_mad_send_handle_t            h_send )\r
2939 {\r
2940         h_send->retry_time =\r
2941                 (uint64_t)(h_send->p_send_mad->timeout_ms + h_send->delay) * 1000ULL +\r
2942                 cl_get_time_stamp();\r
2943         h_send->delay = 0;\r
2944 }\r
2945 \r
2946 \r
2947 \r
2948 static void\r
2949 __send_timer_cb(\r
2950         IN                              void                                            *context )\r
2951 {\r
2952         CL_ENTER( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
2953 \r
2954         __check_send_queue( (ib_mad_svc_handle_t)context );\r
2955 \r
2956         CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
2957 }\r
2958 \r
2959 \r
2960 \r
2961 /*\r
2962  * Check the send queue for any sends that have timed out or were canceled\r
2963  * by the user.\r
2964  */\r
2965 static void\r
2966 __check_send_queue(\r
2967         IN                              ib_mad_svc_handle_t                     h_mad_svc )\r
2968 {\r
2969         ib_mad_send_handle_t    h_send;\r
2970         cl_list_item_t                  *p_list_item, *p_next_item;\r
2971         uint64_t                                cur_time;\r
2972         cl_qlist_t                              timeout_list;\r
2973 \r
2974         CL_ENTER( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
2975 \r
2976         /*\r
2977          * The timeout out list is used to call the user back without\r
2978          * holding the lock on the MAD service.\r
2979          */\r
2980         cl_qlist_init( &timeout_list );\r
2981         cur_time = cl_get_time_stamp();\r
2982 \r
2983         cl_spinlock_acquire( &h_mad_svc->obj.lock );\r
2984 \r
2985         /* Check all outstanding sends. */\r
2986         for( p_list_item = cl_qlist_head( &h_mad_svc->send_list );\r
2987                  p_list_item != cl_qlist_end( &h_mad_svc->send_list );\r
2988                  p_list_item = p_next_item )\r
2989         {\r
2990                 p_next_item = cl_qlist_next( p_list_item );\r
2991                 h_send = PARENT_STRUCT( p_list_item, al_mad_send_t, pool_item );\r
2992 \r
2993                 /* See if the request is active. */\r
2994                 if( h_send->retry_time == MAX_TIME )\r
2995                 {\r
2996                         /* The request is still active. */\r
2997                         CL_TRACE( AL_DBG_MAD_SVC, g_al_dbg_lvl, ("active TID:0x%"PRIx64"\n",\r
2998                                 __get_send_tid( h_send )) );\r
2999                         continue;\r
3000                 }\r
3001 \r
3002                 /* The request is not active. */\r
3003                 /* See if the request has been canceled. */\r
3004                 if( h_send->canceled )\r
3005                 {\r
3006                         /* The request has been canceled. */\r
3007                         CL_TRACE( AL_DBG_MAD_SVC, g_al_dbg_lvl, ("canceling TID:0x%"PRIx64"\n",\r
3008                                 __get_send_tid( h_send )) );\r
3009 \r
3010                         h_send->p_send_mad->status = IB_WCS_CANCELED;\r
3011                         cl_qlist_remove_item( &h_mad_svc->send_list, p_list_item );\r
3012                         cl_qlist_insert_tail( &timeout_list, p_list_item );\r
3013                         continue;\r
3014                 }\r
3015 \r
3016                 /* Skip requests that have not timed out. */\r
3017                 if( cur_time < h_send->retry_time )\r
3018                 {\r
3019                         /* The request has not timed out. */\r
3020                         CL_TRACE( AL_DBG_MAD_SVC, g_al_dbg_lvl, ("waiting on TID:0x%"PRIx64"\n",\r
3021                                 __get_send_tid( h_send )) );\r
3022 \r
3023                         /* Set the retry timer to the minimum needed time, in ms. */\r
3024                         cl_timer_trim( &h_mad_svc->send_timer,\r
3025                                 ((uint32_t)(h_send->retry_time - cur_time) / 1000) );\r
3026                         continue;\r
3027                 }\r
3028 \r
3029                 /* See if we need to retry the send operation. */\r
3030                 if( h_send->retry_cnt )\r
3031                 {\r
3032                         CL_TRACE( AL_DBG_MAD_SVC, g_al_dbg_lvl, ("retrying TID:0x%"PRIx64"\n",\r
3033                                 __get_send_tid( h_send )) );\r
3034 \r
3035                         /* Retry the send. */\r
3036                         h_send->retry_time = MAX_TIME;\r
3037                         h_send->retry_cnt--;\r
3038 \r
3039                         if( h_send->uses_rmpp )\r
3040                         {\r
3041                                 if( h_send->ack_seg < h_send->seg_limit )\r
3042                                 {\r
3043                                         /* Resend all unacknowledged segments. */\r
3044                                         h_send->cur_seg = h_send->ack_seg + 1;\r
3045                                         __queue_rmpp_seg( h_mad_svc->h_mad_reg, h_send );\r
3046                                 }\r
3047                                 else\r
3048                                 {\r
3049                                         /* The send was delivered.  Continue waiting. */\r
3050                                         __set_retry_time( h_send );\r
3051                                         cl_timer_trim( &h_mad_svc->send_timer,\r
3052                                                 ((uint32_t)(h_send->retry_time - cur_time) / 1000) );\r
3053                                 }\r
3054                         }\r
3055                         else\r
3056                         {\r
3057                                 /* The work request should already be formatted properly. */\r
3058                                 __mad_disp_queue_send( h_mad_svc->h_mad_reg,\r
3059                                         &h_send->mad_wr );\r
3060                         }\r
3061                         continue;\r
3062                 }\r
3063                 /* The request has timed out or failed to be retried. */\r
3064                 AL_TRACE( AL_DBG_MAD_SVC | AL_DBG_WARN,\r
3065                         ("timing out TID:0x%"PRIx64"\n", __get_send_tid( h_send )) );\r
3066 \r
3067                 h_send->p_send_mad->status = IB_WCS_TIMEOUT_RETRY_ERR;\r
3068                 cl_qlist_remove_item( &h_mad_svc->send_list, p_list_item );\r
3069                 cl_qlist_insert_tail( &timeout_list, p_list_item );\r
3070         }\r
3071 \r
3072         cl_spinlock_release( &h_mad_svc->obj.lock );\r
3073 \r
3074         /*\r
3075          * Resume any sends that can now be sent without holding\r
3076          * the mad service lock.\r
3077          */\r
3078         __mad_disp_resume_send( h_mad_svc->h_mad_reg );\r
3079 \r
3080         /* Report all timed out sends to the user. */\r
3081         p_list_item = cl_qlist_remove_head( &timeout_list );\r
3082         while( p_list_item != cl_qlist_end( &timeout_list ) )\r
3083         {\r
3084                 h_send = PARENT_STRUCT( p_list_item, al_mad_send_t, pool_item );\r
3085 \r
3086                 h_mad_svc->pfn_user_send_cb( h_mad_svc, (void*)h_mad_svc->obj.context,\r
3087                         h_send->p_send_mad );\r
3088                 __cleanup_mad_send( h_mad_svc, h_send );\r
3089                 p_list_item = cl_qlist_remove_head( &timeout_list );\r
3090         }\r
3091         CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
3092 }\r
3093 \r
3094 \r
3095 \r
3096 static void\r
3097 __recv_timer_cb(\r
3098         IN                              void                                            *context )\r
3099 {\r
3100         ib_mad_svc_handle_t             h_mad_svc;\r
3101         al_mad_rmpp_t                   *p_rmpp;\r
3102         cl_list_item_t                  *p_list_item, *p_next_item;\r
3103         boolean_t                               restart_timer;\r
3104 \r
3105         CL_ENTER( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
3106 \r
3107         h_mad_svc = (ib_mad_svc_handle_t)context;\r
3108 \r
3109         cl_spinlock_acquire( &h_mad_svc->obj.lock );\r
3110 \r
3111         /* Check all outstanding receives. */\r
3112         for( p_list_item = cl_qlist_head( &h_mad_svc->recv_list );\r
3113                  p_list_item != cl_qlist_end( &h_mad_svc->recv_list );\r
3114                  p_list_item = p_next_item )\r
3115         {\r
3116                 p_next_item = cl_qlist_next( p_list_item );\r
3117                 p_rmpp = PARENT_STRUCT( p_list_item, al_mad_rmpp_t, pool_item );\r
3118 \r
3119                 /* Fail all RMPP MADs that have remained inactive. */\r
3120                 if( p_rmpp->inactive )\r
3121                 {\r
3122                         ib_put_mad( p_rmpp->p_mad_element );\r
3123                         __put_mad_rmpp( h_mad_svc, p_rmpp );\r
3124                 }\r
3125                 else\r
3126                 {\r
3127                         /* Mark the RMPP as inactive. */\r
3128                          p_rmpp->inactive = TRUE;\r
3129                 }\r
3130         }\r
3131 \r
3132         restart_timer = !cl_is_qlist_empty( &h_mad_svc->recv_list );\r
3133         cl_spinlock_release( &h_mad_svc->obj.lock );\r
3134 \r
3135         if( restart_timer )\r
3136                 cl_timer_start( &h_mad_svc->recv_timer, AL_REASSEMBLY_TIMEOUT );\r
3137         CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
3138 }\r
3139 \r
3140 \r
3141 \r
3142 ib_api_status_t\r
3143 ib_local_mad(\r
3144         IN              const   ib_ca_handle_t                          h_ca,\r
3145         IN              const   uint8_t                                         port_num,\r
3146         IN              const   void* const                                     p_mad_in,\r
3147         IN                              void*                                           p_mad_out )\r
3148 {\r
3149         ib_api_status_t                 status;\r
3150 \r
3151         CL_ENTER( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
3152 \r
3153         if( AL_OBJ_INVALID_HANDLE( h_ca, AL_OBJ_TYPE_H_CA ) )\r
3154         {\r
3155                 CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl, ("IB_INVALID_CA_HANDLE\n") );\r
3156                 return IB_INVALID_CA_HANDLE;\r
3157         }\r
3158         if( !p_mad_in || !p_mad_out )\r
3159         {\r
3160                 CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl, ("IB_INVALID_PARAMETER\n") );\r
3161                 return IB_INVALID_PARAMETER;\r
3162         }\r
3163 \r
3164         status = verbs_local_mad( h_ca, port_num, p_mad_in, p_mad_out );\r
3165 \r
3166         CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
3167         return status;\r
3168 }\r
3169 \r
3170 \r
3171 \r
3172 ib_net32_t\r
3173 al_get_user_tid(\r
3174         IN              const   ib_net64_t                                              tid64 )\r
3175 {\r
3176         al_tid_t                al_tid;\r
3177 \r
3178         al_tid.tid64 = tid64;\r
3179         return( al_tid.tid32.user_tid );\r
3180 }\r
3181 \r
3182 uint32_t\r
3183 al_get_al_tid(\r
3184         IN              const   ib_net64_t                                              tid64 )\r
3185 {\r
3186         al_tid_t                al_tid;\r
3187 \r
3188         al_tid.tid64 = tid64;\r
3189         return( cl_ntoh32( al_tid.tid32.al_tid ) );\r
3190 }\r
3191 \r
3192 void\r
3193 al_set_al_tid(\r
3194         IN                              ib_net64_t*             const                   p_tid64,\r
3195         IN              const   uint32_t                                                tid32 )\r
3196 {\r
3197         al_tid_t                *p_al_tid;\r
3198 \r
3199         p_al_tid = (al_tid_t*)p_tid64;\r
3200 \r
3201         if( tid32 )\r
3202         {\r
3203                 CL_ASSERT( !p_al_tid->tid32.al_tid );\r
3204         }\r
3205 \r
3206         p_al_tid->tid32.al_tid = cl_hton32( tid32 );\r
3207 }\r