2 * Copyright (c) 2005 SilverStorm Technologies. All rights reserved.
\r
3 * Copyright (c) 1996-2003 Intel Corporation. All rights reserved.
\r
5 * This software is available to you under the OpenIB.org BSD license
\r
8 * Redistribution and use in source and binary forms, with or
\r
9 * without modification, are permitted provided that the following
\r
10 * conditions are met:
\r
12 * - Redistributions of source code must retain the above
\r
13 * copyright notice, this list of conditions and the following
\r
16 * - Redistributions in binary form must reproduce the above
\r
17 * copyright notice, this list of conditions and the following
\r
18 * disclaimer in the documentation and/or other materials
\r
19 * provided with the distribution.
\r
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
\r
22 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
\r
23 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
\r
24 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
\r
25 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
\r
26 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
\r
27 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
\r
33 #include <iba/ib_al.h>
\r
34 #include <complib/cl_byteswap.h>
\r
35 #include <complib/cl_timer.h>
\r
38 #include "al_debug.h"
\r
42 #include "al_res_mgr.h"
\r
43 #include "al_verbs.h"
\r
45 #include "ib_common.h"
\r
48 #define MAX_TIME CL_CONST64(0xFFFFFFFFFFFFFFFF)
\r
49 #define MAD_VECTOR_SIZE 8
\r
50 #define MAX_METHOD 127
\r
51 #define DEFAULT_RMPP_VERSION 1
\r
53 #define AL_RMPP_WINDOW 16 /* Max size of RMPP window */
\r
54 #define AL_REASSEMBLY_TIMEOUT 5000 /* 5 seconds */
\r
58 IN al_obj_t *p_obj );
\r
62 IN al_obj_t *p_obj );
\r
66 IN void* const p_element,
\r
70 __init_version_entry(
\r
71 IN void* const p_element,
\r
75 __destroy_version_entry(
\r
76 IN void* const p_element,
\r
81 IN void* const p_element,
\r
85 __destroy_class_entry(
\r
86 IN void* const p_element,
\r
89 static __inline uint8_t
\r
91 IN const uint8_t mgmt_class );
\r
93 static __inline uint8_t
\r
94 __mgmt_version_index(
\r
95 IN const uint8_t mgmt_version );
\r
98 __mad_disp_reg_unsol(
\r
99 IN const al_mad_disp_handle_t h_mad_disp,
\r
100 IN const al_mad_reg_handle_t h_mad_reg,
\r
101 IN const ib_mad_svc_t *p_mad_svc );
\r
105 IN const ib_mad_t* const p_mad_hdr,
\r
106 IN const boolean_t are_we_sender );
\r
109 * Issue a send request to the MAD dispatcher.
\r
112 __mad_disp_queue_send(
\r
113 IN const al_mad_reg_handle_t h_mad_reg,
\r
114 IN al_mad_wr_t* const p_mad_wr );
\r
117 __mad_disp_resume_send(
\r
118 IN const al_mad_reg_handle_t h_mad_reg );
\r
121 __destroying_mad_svc(
\r
122 IN struct _al_obj *p_obj );
\r
126 IN struct _al_obj *p_obj );
\r
130 IN void *context );
\r
133 __check_send_queue(
\r
134 IN ib_mad_svc_handle_t h_mad_svc );
\r
138 IN void *context );
\r
140 static ib_api_status_t
\r
142 IN ib_mad_svc_handle_t h_mad_svc,
\r
143 IN const ib_mad_send_handle_t h_send,
\r
144 IN ib_mad_element_t* const p_mad_element );
\r
147 __does_send_req_rmpp(
\r
148 IN const ib_mad_svc_type_t mad_svc_type,
\r
149 IN const ib_mad_element_t* const p_mad_element,
\r
150 OUT uint8_t *p_rmpp_version );
\r
154 IN const al_mad_reg_handle_t h_mad_reg,
\r
155 IN const ib_mad_send_handle_t h_send );
\r
159 IN const al_mad_reg_handle_t h_mad_reg,
\r
160 IN ib_mad_send_handle_t h_send );
\r
162 static ib_api_status_t
\r
164 IN ib_mad_svc_handle_t h_mad_svc,
\r
165 IN ib_mad_send_handle_t h_send );
\r
168 __cleanup_mad_send(
\r
169 IN ib_mad_svc_handle_t h_mad_svc,
\r
170 IN ib_mad_send_handle_t h_send );
\r
172 static __inline void
\r
174 IN ib_mad_send_handle_t h_send );
\r
177 __mad_svc_send_done(
\r
178 IN ib_mad_svc_handle_t h_mad_svc,
\r
179 IN al_mad_wr_t *p_mad_wr,
\r
180 IN ib_wc_t *p_wc );
\r
183 __is_send_mad_done(
\r
184 IN ib_mad_send_handle_t h_send,
\r
185 IN ib_wc_t *p_wc );
\r
188 __notify_send_comp(
\r
189 IN ib_mad_svc_handle_t h_mad_svc,
\r
190 IN ib_mad_send_handle_t h_send,
\r
191 IN ib_wc_status_t wc_status );
\r
194 __mad_svc_recv_done(
\r
195 IN ib_mad_svc_handle_t h_mad_svc,
\r
196 IN ib_mad_element_t *p_mad_element );
\r
199 __process_recv_resp(
\r
200 IN ib_mad_svc_handle_t h_mad_svc,
\r
201 IN ib_mad_element_t *p_mad_element );
\r
205 IN ib_mad_svc_handle_t h_mad_svc,
\r
206 IN OUT ib_mad_element_t **pp_mad_element );
\r
208 static __inline boolean_t
\r
209 __recv_requires_rmpp(
\r
210 IN const ib_mad_svc_type_t mad_svc_type,
\r
211 IN const ib_mad_element_t* const p_mad_element );
\r
213 static __inline boolean_t
\r
214 __is_internal_send(
\r
215 IN const ib_mad_svc_type_t mad_svc_type,
\r
216 IN const ib_mad_element_t* const p_mad_element );
\r
219 __process_rmpp_data(
\r
220 IN ib_mad_svc_handle_t h_mad_svc,
\r
221 IN OUT ib_mad_element_t **pp_mad_element );
\r
224 __process_rmpp_ack(
\r
225 IN ib_mad_svc_handle_t h_mad_svc,
\r
226 IN ib_mad_element_t *p_mad_element );
\r
229 __process_rmpp_nack(
\r
230 IN ib_mad_svc_handle_t h_mad_svc,
\r
231 IN ib_mad_element_t *p_mad_element );
\r
235 IN ib_mad_svc_handle_t h_mad_svc,
\r
236 IN al_mad_rmpp_t *p_rmpp,
\r
237 IN OUT ib_mad_element_t **pp_mad_element,
\r
238 OUT ib_mad_element_t **pp_rmpp_resp_mad );
\r
240 static al_mad_rmpp_t*
\r
242 IN ib_mad_svc_handle_t h_mad_svc,
\r
243 IN OUT ib_mad_element_t *p_mad_element );
\r
245 static al_mad_rmpp_t*
\r
247 IN ib_mad_svc_handle_t h_mad_svc,
\r
248 IN ib_mad_element_t *p_mad_element );
\r
252 IN ib_mad_svc_handle_t h_mad_svc,
\r
253 IN al_mad_rmpp_t *p_rmpp );
\r
256 __init_reply_element(
\r
257 IN ib_mad_element_t *p_dst_element,
\r
258 IN ib_mad_element_t *p_src_element );
\r
260 static ib_mad_element_t*
\r
262 IN al_mad_rmpp_t *p_rmpp );
\r
266 IN ib_mad_send_handle_t h_send )
\r
268 return ((ib_mad_t*)ib_get_mad_buf( h_send->p_send_mad ))->trans_id;
\r
273 get_mad_hdr_from_wr(
\r
274 IN al_mad_wr_t* const p_mad_wr )
\r
276 ib_mad_send_handle_t h_send;
\r
278 CL_ASSERT( p_mad_wr );
\r
280 h_send = PARENT_STRUCT( p_mad_wr, al_mad_send_t, mad_wr );
\r
281 return h_send->p_send_mad->p_mad_buf;
\r
287 * Construct a MAD element from a receive work completion.
\r
291 IN ib_mad_element_t* p_mad_element,
\r
294 CL_ENTER( AL_DBG_SMI, g_al_dbg_lvl );
\r
296 CL_ASSERT( p_mad_element );
\r
299 /* Build the MAD element from the work completion. */
\r
300 p_mad_element->status = p_wc->status;
\r
301 p_mad_element->remote_qp = p_wc->recv.ud.remote_qp;
\r
304 * We assume all communicating managers using MAD services use
\r
309 * Mellanox workaround:
\r
310 * The Q_KEY from the QP context must be used if the high bit is
\r
311 * set in the Q_KEY part of the work request. See section 10.2.5
\r
312 * on Q_KEYS Compliance Statement C10-15.
\r
313 * This must be enabled to permit future non special QP's to have
\r
314 * MAD level service capability. To use SAR in a generic way.
\r
318 * p_mad_element->remote_qkey = IB_QP_PRIVILEGED_Q_KEY;
\r
321 p_mad_element->remote_qkey = IB_QP1_WELL_KNOWN_Q_KEY;
\r
322 p_mad_element->remote_lid = p_wc->recv.ud.remote_lid;
\r
323 p_mad_element->remote_sl = p_wc->recv.ud.remote_sl;
\r
324 p_mad_element->pkey_index = p_wc->recv.ud.pkey_index;
\r
325 p_mad_element->path_bits = p_wc->recv.ud.path_bits;
\r
326 p_mad_element->recv_opt = p_wc->recv.ud.recv_opt;
\r
327 p_mad_element->grh_valid = p_wc->recv.ud.recv_opt & IB_RECV_OPT_GRH_VALID;
\r
329 if( p_wc->recv.ud.recv_opt & IB_RECV_OPT_IMMEDIATE )
\r
330 p_mad_element->immediate_data = p_wc->recv.ud.immediate_data;
\r
332 CL_EXIT( AL_DBG_SMI, g_al_dbg_lvl );
\r
346 IN al_obj_t* const p_parent_obj,
\r
347 IN const ib_qp_handle_t h_qp,
\r
348 IN al_mad_disp_handle_t* const ph_mad_disp )
\r
350 al_mad_disp_handle_t h_mad_disp;
\r
351 ib_api_status_t status;
\r
352 cl_status_t cl_status;
\r
354 CL_ENTER( AL_DBG_MAD_SVC, g_al_dbg_lvl );
\r
355 h_mad_disp = cl_zalloc( sizeof( al_mad_disp_t ) );
\r
358 CL_TRACE( AL_DBG_ERROR, g_al_dbg_lvl, ("insufficient memory\n") );
\r
359 return IB_INSUFFICIENT_MEMORY;
\r
362 /* Initialize the MAD dispatcher. */
\r
363 cl_vector_construct( &h_mad_disp->client_vector );
\r
364 cl_vector_construct( &h_mad_disp->version_vector );
\r
365 construct_al_obj( &h_mad_disp->obj, AL_OBJ_TYPE_MAD_DISP );
\r
366 status = init_al_obj( &h_mad_disp->obj, h_mad_disp, TRUE,
\r
367 NULL, __cleanup_mad_disp, __free_mad_disp );
\r
368 if( status != IB_SUCCESS )
\r
370 CL_TRACE( AL_DBG_ERROR, g_al_dbg_lvl, ("init obj: %s\n",
\r
371 ib_get_err_str(status)) );
\r
372 __free_mad_disp( &h_mad_disp->obj );
\r
375 status = attach_al_obj( p_parent_obj, &h_mad_disp->obj );
\r
376 if( status != IB_SUCCESS )
\r
378 h_mad_disp->obj.pfn_destroy( &h_mad_disp->obj, NULL );
\r
379 AL_TRACE_EXIT( AL_DBG_ERROR,
\r
380 ("attach_al_obj returned %s.\n", ib_get_err_str(status)) );
\r
384 /* Obtain a reference to the QP to post sends to. */
\r
385 h_mad_disp->h_qp = h_qp;
\r
386 ref_al_obj( &h_qp->obj );
\r
388 /* Create the client vector. */
\r
389 cl_status = cl_vector_init( &h_mad_disp->client_vector, 1, MAD_VECTOR_SIZE,
\r
390 sizeof( al_mad_disp_reg_t ), __init_mad_reg, NULL, h_mad_disp );
\r
391 if( cl_status != CL_SUCCESS )
\r
393 h_mad_disp->obj.pfn_destroy( &h_mad_disp->obj, NULL );
\r
394 return ib_convert_cl_status( cl_status );
\r
397 /* Create the version vector. */
\r
398 cl_status = cl_vector_init( &h_mad_disp->version_vector,
\r
399 1, 1, sizeof( cl_vector_t ), __init_version_entry,
\r
400 __destroy_version_entry, &h_mad_disp->version_vector );
\r
401 if( cl_status != CL_SUCCESS )
\r
403 h_mad_disp->obj.pfn_destroy( &h_mad_disp->obj, NULL );
\r
404 return ib_convert_cl_status( cl_status );
\r
407 *ph_mad_disp = h_mad_disp;
\r
409 /* Release the reference taken in init_al_obj. */
\r
410 deref_al_obj( &h_mad_disp->obj );
\r
412 CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );
\r
419 __cleanup_mad_disp(
\r
420 IN al_obj_t *p_obj )
\r
422 al_mad_disp_handle_t h_mad_disp;
\r
424 CL_ENTER( AL_DBG_MAD_SVC, g_al_dbg_lvl );
\r
425 CL_ASSERT( p_obj );
\r
426 h_mad_disp = PARENT_STRUCT( p_obj, al_mad_disp_t, obj );
\r
428 /* Detach from the QP that we were using. */
\r
429 if( h_mad_disp->h_qp )
\r
430 deref_al_obj( &h_mad_disp->h_qp->obj );
\r
432 CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );
\r
439 IN al_obj_t *p_obj )
\r
441 al_mad_disp_handle_t h_mad_disp;
\r
443 CL_ENTER( AL_DBG_MAD_SVC, g_al_dbg_lvl );
\r
444 CL_ASSERT( p_obj );
\r
445 h_mad_disp = PARENT_STRUCT( p_obj, al_mad_disp_t, obj );
\r
447 cl_vector_destroy( &h_mad_disp->client_vector );
\r
448 cl_vector_destroy( &h_mad_disp->version_vector );
\r
449 destroy_al_obj( p_obj );
\r
450 cl_free( h_mad_disp );
\r
451 CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );
\r
456 static al_mad_reg_handle_t
\r
458 IN const al_mad_disp_handle_t h_mad_disp,
\r
459 IN const ib_mad_svc_handle_t h_mad_svc,
\r
460 IN const ib_mad_svc_t *p_mad_svc,
\r
461 IN const pfn_mad_svc_send_done_t pfn_send_done,
\r
462 IN const pfn_mad_svc_recv_done_t pfn_recv_done )
\r
464 al_mad_reg_handle_t h_mad_reg;
\r
466 cl_status_t cl_status;
\r
468 CL_ENTER( AL_DBG_MAD_SVC, g_al_dbg_lvl );
\r
469 cl_spinlock_acquire( &h_mad_disp->obj.lock );
\r
471 /* Find an empty slot in the client vector for the registration. */
\r
472 for( i = 0; i < cl_vector_get_size( &h_mad_disp->client_vector ); i++ )
\r
474 h_mad_reg = cl_vector_get_ptr( &h_mad_disp->client_vector, i );
\r
475 if( !h_mad_reg->ref_cnt )
\r
478 /* Trap for ClientID overflow. */
\r
479 if( i >= 0xFFFFFFFF )
\r
481 cl_spinlock_release( &h_mad_disp->obj.lock );
\r
484 cl_status = cl_vector_set_min_size( &h_mad_disp->client_vector, i+1 );
\r
485 if( cl_status != CL_SUCCESS )
\r
487 cl_spinlock_release( &h_mad_disp->obj.lock );
\r
490 h_mad_reg = cl_vector_get_ptr( &h_mad_disp->client_vector, i );
\r
492 /* Record the registration. */
\r
493 h_mad_reg->client_id = (uint32_t)i;
\r
494 h_mad_reg->support_unsol = p_mad_svc->support_unsol;
\r
495 h_mad_reg->mgmt_class = p_mad_svc->mgmt_class;
\r
496 h_mad_reg->mgmt_version = p_mad_svc->mgmt_version;
\r
497 h_mad_reg->pfn_recv_done = pfn_recv_done;
\r
498 h_mad_reg->pfn_send_done = pfn_send_done;
\r
500 /* If the client requires support for unsolicited MADs, add tracking. */
\r
501 if( p_mad_svc->support_unsol )
\r
503 if( !__mad_disp_reg_unsol( h_mad_disp, h_mad_reg, p_mad_svc ) )
\r
505 cl_spinlock_release( &h_mad_disp->obj.lock );
\r
506 CL_TRACE( AL_DBG_ERROR, g_al_dbg_lvl, ("reg unsol failed\n") );
\r
511 /* Record that the registration was successful. */
\r
512 h_mad_reg->h_mad_svc = h_mad_svc;
\r
513 h_mad_reg->ref_cnt = 1;
\r
514 cl_spinlock_release( &h_mad_disp->obj.lock );
\r
516 /* The MAD service needs to take a reference on the dispatcher. */
\r
517 ref_al_obj( &h_mad_disp->obj );
\r
519 CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );
\r
526 IN void* const p_element,
\r
529 al_mad_reg_handle_t h_mad_reg;
\r
531 /* Record the MAD dispatcher for the registration structure. */
\r
532 h_mad_reg = p_element;
\r
533 h_mad_reg->h_mad_disp = context;
\r
534 h_mad_reg->ref_cnt = 0;
\r
541 * Initialize an entry in the version vector. Each entry is a vector of
\r
545 __init_version_entry(
\r
546 IN void* const p_element,
\r
549 cl_vector_t *p_vector;
\r
551 p_vector = p_element;
\r
552 UNUSED_PARAM( context );
\r
554 cl_vector_construct( p_vector );
\r
555 return cl_vector_init( p_vector, MAD_VECTOR_SIZE, MAD_VECTOR_SIZE,
\r
556 sizeof( cl_ptr_vector_t ), __init_class_entry, __destroy_class_entry,
\r
562 __destroy_version_entry(
\r
563 IN void* const p_element,
\r
566 cl_vector_t *p_vector;
\r
568 p_vector = p_element;
\r
569 UNUSED_PARAM( context );
\r
571 cl_vector_destroy( p_vector );
\r
576 * Initialize an entry in the class vector. Each entry is a pointer vector
\r
580 __init_class_entry(
\r
581 IN void* const p_element,
\r
584 cl_ptr_vector_t *p_ptr_vector;
\r
586 p_ptr_vector = p_element;
\r
587 UNUSED_PARAM( context );
\r
589 cl_ptr_vector_construct( p_ptr_vector );
\r
590 return cl_ptr_vector_init( p_ptr_vector,
\r
591 MAD_VECTOR_SIZE, MAD_VECTOR_SIZE );
\r
596 __destroy_class_entry(
\r
597 IN void* const p_element,
\r
600 cl_ptr_vector_t *p_ptr_vector;
\r
602 p_ptr_vector = p_element;
\r
603 UNUSED_PARAM( context );
\r
605 cl_ptr_vector_destroy( p_ptr_vector );
\r
610 * Add support for unsolicited MADs for the given MAD service.
\r
613 __mad_disp_reg_unsol(
\r
614 IN const al_mad_disp_handle_t h_mad_disp,
\r
615 IN const al_mad_reg_handle_t h_mad_reg,
\r
616 IN const ib_mad_svc_t *p_mad_svc )
\r
618 cl_status_t cl_status;
\r
619 cl_vector_t *p_class_vector;
\r
620 cl_ptr_vector_t *p_method_ptr_vector;
\r
623 /* Ensure that we are ready to handle this version number. */
\r
624 CL_ENTER( AL_DBG_MAD_SVC, g_al_dbg_lvl );
\r
625 cl_status = cl_vector_set_min_size( &h_mad_disp->version_vector,
\r
626 __mgmt_version_index( p_mad_svc->mgmt_version ) + 1 );
\r
627 if( cl_status != CL_SUCCESS )
\r
630 /* Get the list of classes in use for this version. */
\r
631 p_class_vector = cl_vector_get_ptr( &h_mad_disp->version_vector,
\r
632 __mgmt_version_index( p_mad_svc->mgmt_version ) );
\r
634 /* Ensure that we are ready to handle the specified class. */
\r
635 cl_status = cl_vector_set_min_size( p_class_vector,
\r
636 __mgmt_class_index( p_mad_svc->mgmt_class ) + 1 );
\r
637 if( cl_status != CL_SUCCESS )
\r
640 /* Get the list of methods in use for this class. */
\r
641 p_method_ptr_vector = cl_vector_get_ptr( p_class_vector,
\r
642 __mgmt_class_index( p_mad_svc->mgmt_class ) );
\r
644 /* Ensure that we can handle all requested methods. */
\r
645 for( i = MAX_METHOD - 1; i > 0; i-- )
\r
647 if( p_mad_svc->method_array[i] )
\r
649 cl_status = cl_ptr_vector_set_min_size( p_method_ptr_vector, i+1 );
\r
650 if( cl_status != CL_SUCCESS )
\r
653 /* No one else can be registered for this method. */
\r
654 if( cl_ptr_vector_get( p_method_ptr_vector, i ) )
\r
656 CL_TRACE(AL_DBG_ERROR, g_al_dbg_lvl,
\r
657 ("Other client already registered for Un-Solicited Method "
\r
658 "%u for version %u of class %u.\n", i, p_mad_svc->mgmt_version,
\r
659 p_mad_svc->mgmt_class ) );
\r
665 /* We can support the request. Record the methods. */
\r
666 for( i = 0; i < MAX_METHOD; i++ )
\r
668 if( p_mad_svc->method_array[i] )
\r
670 cl_ptr_vector_set( p_method_ptr_vector, i, h_mad_reg );
\r
672 CL_TRACE( AL_DBG_MAD_SVC, g_al_dbg_lvl,
\r
673 ("Register version:%u (%u) class:0x%02X(%u) method:0x%02X Hdl:%p\n",
\r
674 p_mad_svc->mgmt_version,
\r
675 __mgmt_version_index( p_mad_svc->mgmt_version ),
\r
676 p_mad_svc->mgmt_class,
\r
677 __mgmt_class_index( p_mad_svc->mgmt_class ),
\r
683 CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );
\r
688 static __inline uint8_t
\r
689 __mgmt_version_index(
\r
690 IN const uint8_t mgmt_version )
\r
692 return (uint8_t)(mgmt_version - 1);
\r
696 static __inline uint8_t
\r
697 __mgmt_class_index(
\r
698 IN const uint8_t mgmt_class )
\r
700 /* Map class 0x81 to 0 to remove empty class values. */
\r
701 if( mgmt_class == IB_MCLASS_SUBN_DIR )
\r
702 return IB_MCLASS_SUBN_LID;
\r
710 * Deregister a MAD service from the dispatcher.
\r
714 IN const al_mad_reg_handle_t h_mad_reg )
\r
716 al_mad_disp_handle_t h_mad_disp;
\r
717 cl_vector_t *p_class_vector;
\r
718 cl_ptr_vector_t *p_method_ptr_vector;
\r
721 CL_ENTER( AL_DBG_MAD_SVC, g_al_dbg_lvl );
\r
722 h_mad_disp = h_mad_reg->h_mad_disp;
\r
724 cl_spinlock_acquire( &h_mad_disp->obj.lock );
\r
726 if( h_mad_reg->support_unsol )
\r
728 /* Deregister the service from receiving unsolicited MADs. */
\r
729 p_class_vector = cl_vector_get_ptr( &h_mad_disp->version_vector,
\r
730 __mgmt_version_index( h_mad_reg->mgmt_version ) );
\r
732 p_method_ptr_vector = cl_vector_get_ptr( p_class_vector,
\r
733 __mgmt_class_index( h_mad_reg->mgmt_class ) );
\r
735 /* Deregister all methods registered to the client. */
\r
736 for( i = 0; i < cl_ptr_vector_get_size( p_method_ptr_vector ); i++ )
\r
738 if( cl_ptr_vector_get( p_method_ptr_vector, i ) == h_mad_reg )
\r
740 cl_ptr_vector_set( p_method_ptr_vector, i, NULL );
\r
745 cl_spinlock_release( &h_mad_disp->obj.lock );
\r
747 /* Decrement the reference count in the registration table. */
\r
748 cl_atomic_dec( &h_mad_reg->ref_cnt );
\r
750 /* The MAD service no longer requires access to the MAD dispatcher. */
\r
751 deref_al_obj( &h_mad_disp->obj );
\r
752 CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );
\r
758 __mad_disp_queue_send(
\r
759 IN const al_mad_reg_handle_t h_mad_reg,
\r
760 IN al_mad_wr_t* const p_mad_wr )
\r
762 ib_mad_t *p_mad_hdr;
\r
765 * Increment the reference count on the registration to ensure that
\r
766 * the MAD service does not go away until the send completes.
\r
768 CL_ENTER( AL_DBG_MAD_SVC, g_al_dbg_lvl );
\r
769 cl_atomic_inc( &h_mad_reg->ref_cnt );
\r
770 ref_al_obj( &h_mad_reg->h_mad_svc->obj );
\r
772 /* Get the MAD header. */
\r
773 p_mad_hdr = get_mad_hdr_from_wr( p_mad_wr );
\r
774 CL_ASSERT( !p_mad_wr->send_wr.wr_id );
\r
775 p_mad_wr->send_wr.wr_id = (uintn_t)p_mad_wr;
\r
778 * If we are the originator of the transaction, we need to modify the
\r
779 * TID to ensure that duplicate TIDs are not used by multiple clients.
\r
781 CL_TRACE( AL_DBG_MAD_SVC, g_al_dbg_lvl, ("dispatching TID: 0x%0"PRIx64"\n",
\r
782 p_mad_hdr->trans_id) );
\r
783 p_mad_wr->client_tid = p_mad_hdr->trans_id;
\r
784 if( __use_tid_routing( p_mad_hdr, TRUE ) )
\r
786 /* Clear the AL portion of the TID before setting. */
\r
787 ((al_tid_t*)&p_mad_hdr->trans_id)->tid32.al_tid = 0;
\r
789 #pragma warning( push, 3 )
\r
790 al_set_al_tid( &p_mad_hdr->trans_id, h_mad_reg->client_id );
\r
791 #pragma warning( pop )
\r
793 CL_TRACE( AL_DBG_MAD_SVC, g_al_dbg_lvl,
\r
794 ("modified TID to: 0x%0"PRIx64"\n", p_mad_hdr->trans_id) );
\r
797 /* Post the work request to the QP. */
\r
798 p_mad_wr->client_id = h_mad_reg->client_id;
\r
799 h_mad_reg->h_mad_disp->h_qp->pfn_queue_mad(
\r
800 h_mad_reg->h_mad_disp->h_qp, p_mad_wr );
\r
802 CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );
\r
807 __mad_disp_resume_send(
\r
808 IN const al_mad_reg_handle_t h_mad_reg )
\r
810 AL_ENTER( AL_DBG_MAD_SVC );
\r
812 h_mad_reg->h_mad_disp->h_qp->pfn_resume_mad(
\r
813 h_mad_reg->h_mad_disp->h_qp );
\r
815 AL_EXIT( AL_DBG_MAD_SVC );
\r
820 * Complete a sent MAD. Route the completion to the correct MAD service.
\r
823 mad_disp_send_done(
\r
824 IN al_mad_disp_handle_t h_mad_disp,
\r
825 IN al_mad_wr_t *p_mad_wr,
\r
828 al_mad_reg_handle_t h_mad_reg;
\r
829 ib_mad_t *p_mad_hdr;
\r
831 CL_ENTER( AL_DBG_MAD_SVC, g_al_dbg_lvl );
\r
833 CL_TRACE( AL_DBG_MAD_SVC, g_al_dbg_lvl, ("p_mad_wr 0x%p\n", p_mad_wr ) );
\r
835 /* Get the MAD header. */
\r
836 p_mad_hdr = get_mad_hdr_from_wr( p_mad_wr );
\r
838 /* Get the MAD service that issued the send. */
\r
839 cl_spinlock_acquire( &h_mad_disp->obj.lock );
\r
840 h_mad_reg = cl_vector_get_ptr( &h_mad_disp->client_vector,
\r
841 p_mad_wr->client_id );
\r
842 cl_spinlock_release( &h_mad_disp->obj.lock );
\r
843 CL_ASSERT( h_mad_reg && (h_mad_reg->client_id == p_mad_wr->client_id) );
\r
845 /* Reset the TID and WR ID. */
\r
846 CL_TRACE( AL_DBG_MAD_SVC, g_al_dbg_lvl, ("send done TID: 0x%"PRIx64"\n",
\r
847 p_mad_hdr->trans_id) );
\r
848 p_mad_hdr->trans_id = p_mad_wr->client_tid;
\r
849 p_mad_wr->send_wr.wr_id = 0;
\r
851 /* Return the completed request to the MAD service. */
\r
852 CL_ASSERT( h_mad_reg->h_mad_svc );
\r
853 h_mad_reg->pfn_send_done( h_mad_reg->h_mad_svc, p_mad_wr, p_wc );
\r
855 /* The MAD service is no longer referenced once the send completes. */
\r
856 deref_al_obj( &h_mad_reg->h_mad_svc->obj );
\r
857 cl_atomic_dec( &h_mad_reg->ref_cnt );
\r
859 CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );
\r
865 * Process a received MAD. Route the completion to the correct MAD service.
\r
868 mad_disp_recv_done(
\r
869 IN al_mad_disp_handle_t h_mad_disp,
\r
870 IN ib_mad_element_t *p_mad_element )
\r
872 ib_mad_t *p_mad_hdr;
\r
873 al_mad_reg_handle_t h_mad_reg;
\r
874 ib_al_handle_t h_al;
\r
875 ib_mad_svc_handle_t h_mad_svc;
\r
877 cl_vector_t *p_class_vector;
\r
878 cl_ptr_vector_t *p_method_ptr_vector;
\r
881 CL_ENTER( AL_DBG_MAD_SVC, g_al_dbg_lvl );
\r
882 p_mad_hdr = ib_get_mad_buf( p_mad_element );
\r
884 CL_TRACE( AL_DBG_MAD_SVC, g_al_dbg_lvl,
\r
885 ("TID = 0x%"PRIx64 "\n"
\r
887 "version = 0x%x.\n"
\r
888 "method = 0x%x.\n",
\r
889 p_mad_hdr->trans_id,
\r
890 p_mad_hdr->mgmt_class,
\r
891 p_mad_hdr->class_ver,
\r
892 p_mad_hdr->method) );
\r
894 /* Get the client to route the receive to. */
\r
895 cl_spinlock_acquire( &h_mad_disp->obj.lock );
\r
896 if( __use_tid_routing( p_mad_hdr, FALSE ) )
\r
898 /* The MAD was received in response to a send. */
\r
899 CL_TRACE( AL_DBG_MAD_SVC, g_al_dbg_lvl, ("routing based on TID\n"));
\r
901 /* Verify that we have a registration entry. */
\r
902 if( al_get_al_tid( p_mad_hdr->trans_id ) >=
\r
903 cl_vector_get_size( &h_mad_disp->client_vector ) )
\r
905 /* No clients for this version-class-method. */
\r
906 cl_spinlock_release( &h_mad_disp->obj.lock );
\r
907 CL_TRACE_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl,
\r
908 ("invalid client ID\n") );
\r
909 return IB_NOT_FOUND;
\r
912 h_mad_reg = cl_vector_get_ptr( &h_mad_disp->client_vector,
\r
913 al_get_al_tid( p_mad_hdr->trans_id ) );
\r
916 * Disable warning about passing unaligned 64-bit value.
\r
917 * The value is always aligned given how buffers are allocated
\r
918 * and given the layout of a MAD.
\r
920 #pragma warning( push, 3 )
\r
921 al_set_al_tid( &p_mad_hdr->trans_id, 0 );
\r
922 #pragma warning( pop )
\r
926 CL_TRACE( AL_DBG_MAD_SVC, g_al_dbg_lvl,
\r
927 ("routing based on version, class, method\n"));
\r
929 /* The receive is unsolicited. Find the client. */
\r
930 if( __mgmt_version_index( p_mad_hdr->class_ver ) >=
\r
931 cl_vector_get_size( &h_mad_disp->version_vector ) )
\r
933 /* No clients for this version of MADs. */
\r
934 cl_spinlock_release( &h_mad_disp->obj.lock );
\r
935 CL_TRACE_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl,
\r
936 ("no clients registered for this class version\n") );
\r
937 return IB_NOT_FOUND;
\r
940 /* See if we have a client for this class of MADs. */
\r
941 p_class_vector = cl_vector_get_ptr( &h_mad_disp->version_vector,
\r
942 __mgmt_version_index( p_mad_hdr->class_ver ) );
\r
944 if( __mgmt_class_index( p_mad_hdr->mgmt_class ) >=
\r
945 cl_vector_get_size( p_class_vector ) )
\r
947 /* No clients for this version-class. */
\r
948 cl_spinlock_release( &h_mad_disp->obj.lock );
\r
949 CL_TRACE_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl,
\r
950 ("no clients registered for this class\n") );
\r
951 return IB_NOT_FOUND;
\r
954 /* See if we have a client for this method. */
\r
955 p_method_ptr_vector = cl_vector_get_ptr( p_class_vector,
\r
956 __mgmt_class_index( p_mad_hdr->mgmt_class ) );
\r
957 method = (uint8_t)(p_mad_hdr->method & (~IB_MAD_METHOD_RESP_MASK));
\r
959 if( method >= cl_ptr_vector_get_size( p_method_ptr_vector ) )
\r
961 /* No clients for this version-class-method. */
\r
962 cl_spinlock_release( &h_mad_disp->obj.lock );
\r
963 CL_TRACE_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl,
\r
964 ("no clients registered for this method-out of range\n") );
\r
965 return IB_NOT_FOUND;
\r
968 h_mad_reg = cl_ptr_vector_get( p_method_ptr_vector, method );
\r
971 /* No clients for this version-class-method. */
\r
972 cl_spinlock_release( &h_mad_disp->obj.lock );
\r
973 CL_TRACE_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl,
\r
974 ("no clients registered for method %u of class %u(%u) version %u(%u)\n",
\r
976 p_mad_hdr->mgmt_class,
\r
977 __mgmt_class_index( p_mad_hdr->mgmt_class ),
\r
978 p_mad_hdr->class_ver,
\r
979 __mgmt_version_index( p_mad_hdr->class_ver )
\r
981 return IB_NOT_FOUND;
\r
985 /* Verify that the registration is still valid. */
\r
986 if( !h_mad_reg->ref_cnt )
\r
988 cl_spinlock_release( &h_mad_disp->obj.lock );
\r
989 CL_TRACE_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl,
\r
990 ("no client registered\n") );
\r
991 return IB_NOT_FOUND;
\r
994 /* Take a reference on the MAD service in case it deregisters. */
\r
995 h_mad_svc = h_mad_reg->h_mad_svc;
\r
996 ref_al_obj( &h_mad_svc->obj );
\r
997 cl_spinlock_release( &h_mad_disp->obj.lock );
\r
999 /* Handoff the MAD to the correct AL instance. */
\r
1000 h_al = qp_get_al( (ib_qp_handle_t)(h_mad_svc->obj.p_parent_obj) );
\r
1001 al_handoff_mad( h_al, p_mad_element );
\r
1003 h_mad_reg->pfn_recv_done( h_mad_svc, p_mad_element );
\r
1004 deref_al_obj( &h_mad_svc->obj );
\r
1005 CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );
\r
1006 return IB_SUCCESS;
\r
1012 * Return TRUE if we should route the MAD to the recipient based on the TID.
\r
1015 __use_tid_routing(
\r
1016 IN const ib_mad_t* const p_mad_hdr,
\r
1017 IN const boolean_t are_we_sender )
\r
1019 ib_rmpp_mad_t *p_rmpp_mad;
\r
1020 boolean_t is_orig;
\r
1022 CL_ENTER( AL_DBG_MAD_SVC, g_al_dbg_lvl );
\r
1024 /* CM MADs are never TID routed. */
\r
1025 if( p_mad_hdr->mgmt_class == IB_MCLASS_COMM_MGMT )
\r
1027 CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );
\r
1032 * Determine originator for a sent MAD. Received MADs are just the
\r
1036 /* Non-DATA RMPP MADs are handled differently. */
\r
1037 p_rmpp_mad = (ib_rmpp_mad_t*)p_mad_hdr;
\r
1038 if( (p_mad_hdr->mgmt_class == IB_MCLASS_SUBN_ADM) &&
\r
1039 ( ib_rmpp_is_flag_set( p_rmpp_mad, IB_RMPP_FLAG_ACTIVE ) &&
\r
1040 (p_rmpp_mad->rmpp_type != IB_RMPP_TYPE_DATA) ) )
\r
1043 * We need to distinguish between ACKs sent after receiving
\r
1044 * a request, versus ACKs sent after receiving a response. ACKs
\r
1045 * to a request are from the responder. ACKs to a response are
\r
1046 * from the originator.
\r
1048 * Note that we assume STOP and ABORT packets are initiated by
\r
1049 * receivers. If both senders and receivers can
\r
1050 * initiate STOP and ABORT MADs, then we can't distinguish which
\r
1051 * transaction is associated with the MAD. The TID for a
\r
1052 * send and receive can be the same.
\r
1054 is_orig = !ib_mad_is_response( p_mad_hdr );
\r
1059 * See if the MAD is being sent in response to a previous MAD. If
\r
1060 * it is, then we're NOT the originator. Note that trap repress
\r
1061 * MADs are responses, even though the response bit isn't set.
\r
1063 is_orig = !( ib_mad_is_response( p_mad_hdr ) ||
\r
1064 (p_mad_hdr->method == IB_MAD_METHOD_TRAP_REPRESS) );
\r
1067 /* If we're the receiver, toggle the result. */
\r
1068 if( !are_we_sender )
\r
1069 is_orig = !is_orig;
\r
1071 CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );
\r
1086 * Create and initialize a MAD service for use.
\r
1090 IN const ib_qp_handle_t h_qp,
\r
1091 IN const ib_mad_svc_t* const p_mad_svc,
\r
1092 OUT ib_mad_svc_handle_t* const ph_mad_svc )
\r
1094 ib_api_status_t status;
\r
1095 cl_status_t cl_status;
\r
1096 ib_mad_svc_handle_t h_mad_svc;
\r
1097 al_qp_alias_t *p_qp_alias;
\r
1098 ib_qp_attr_t qp_attr;
\r
1100 CL_ENTER( AL_DBG_MAD_SVC, g_al_dbg_lvl );
\r
1101 CL_ASSERT( h_qp );
\r
1103 switch( h_qp->type )
\r
1107 case IB_QPT_QP0_ALIAS:
\r
1108 case IB_QPT_QP1_ALIAS:
\r
1113 CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl, ("IB_INVALID_PARAMETER\n") );
\r
1114 return IB_INVALID_PARAMETER;
\r
1117 if( !p_mad_svc || !ph_mad_svc )
\r
1119 CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl, ("IB_INVALID_PARAMETER\n") );
\r
1120 return IB_INVALID_PARAMETER;
\r
1123 h_mad_svc = cl_zalloc( sizeof( al_mad_svc_t) );
\r
1126 return IB_INSUFFICIENT_MEMORY;
\r
1129 /* Construct the MAD service. */
\r
1130 construct_al_obj( &h_mad_svc->obj, AL_OBJ_TYPE_H_MAD_SVC );
\r
1131 cl_timer_construct( &h_mad_svc->send_timer );
\r
1132 cl_timer_construct( &h_mad_svc->recv_timer );
\r
1133 cl_qlist_init( &h_mad_svc->send_list );
\r
1134 cl_qlist_init( &h_mad_svc->recv_list );
\r
1136 p_qp_alias = PARENT_STRUCT( h_qp, al_qp_alias_t, qp );
\r
1137 h_mad_svc->svc_type = p_mad_svc->svc_type;
\r
1138 h_mad_svc->obj.context = p_mad_svc->mad_svc_context;
\r
1139 h_mad_svc->pfn_user_recv_cb = p_mad_svc->pfn_mad_recv_cb;
\r
1140 h_mad_svc->pfn_user_send_cb = p_mad_svc->pfn_mad_send_cb;
\r
1142 /* Initialize the MAD service. */
\r
1143 status = init_al_obj( &h_mad_svc->obj, p_mad_svc->mad_svc_context,
\r
1144 TRUE, __destroying_mad_svc, __cleanup_mad_svc, free_mad_svc );
\r
1145 if( status != IB_SUCCESS )
\r
1147 free_mad_svc( &h_mad_svc->obj );
\r
1150 status = attach_al_obj( &h_qp->obj, &h_mad_svc->obj );
\r
1151 if( status != IB_SUCCESS )
\r
1153 h_mad_svc->obj.pfn_destroy( &h_mad_svc->obj, NULL );
\r
1154 AL_TRACE_EXIT( AL_DBG_ERROR,
\r
1155 ("attach_al_obj returned %s.\n", ib_get_err_str(status)) );
\r
1159 h_mad_svc->h_mad_reg = __mad_disp_reg( p_qp_alias->h_mad_disp,
\r
1160 h_mad_svc, p_mad_svc, __mad_svc_send_done, __mad_svc_recv_done );
\r
1161 if( !h_mad_svc->h_mad_reg )
\r
1163 h_mad_svc->obj.pfn_destroy( &h_mad_svc->obj, NULL );
\r
1164 return IB_INSUFFICIENT_MEMORY;
\r
1167 /* Record which port this MAD service uses, to use when creating AVs. */
\r
1168 status = ib_query_qp( h_qp, &qp_attr );
\r
1169 if( status != IB_SUCCESS )
\r
1171 h_mad_svc->obj.pfn_destroy( &h_mad_svc->obj, NULL );
\r
1174 h_mad_svc->h_pd = qp_attr.h_pd;
\r
1175 h_mad_svc->port_num = qp_attr.primary_port;
\r
1177 cl_status = cl_timer_init( &h_mad_svc->send_timer,
\r
1178 __send_timer_cb, h_mad_svc );
\r
1179 if( cl_status != CL_SUCCESS )
\r
1181 h_mad_svc->obj.pfn_destroy( &h_mad_svc->obj, NULL );
\r
1182 return ib_convert_cl_status( cl_status );
\r
1185 cl_status = cl_timer_init( &h_mad_svc->recv_timer,
\r
1186 __recv_timer_cb, h_mad_svc );
\r
1187 if( cl_status != CL_SUCCESS )
\r
1189 h_mad_svc->obj.pfn_destroy( &h_mad_svc->obj, NULL );
\r
1190 return ib_convert_cl_status( cl_status );
\r
1193 *ph_mad_svc = h_mad_svc;
\r
1195 CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );
\r
1196 return IB_SUCCESS;
\r
1202 __destroying_mad_svc(
\r
1203 IN struct _al_obj *p_obj )
\r
1205 ib_qp_handle_t h_qp;
\r
1206 ib_mad_svc_handle_t h_mad_svc;
\r
1207 ib_mad_send_handle_t h_send;
\r
1208 cl_list_item_t *p_list_item;
\r
1209 int32_t timeout_ms;
\r
1214 CL_ENTER( AL_DBG_MAD_SVC, g_al_dbg_lvl );
\r
1215 CL_ASSERT( p_obj );
\r
1216 h_mad_svc = PARENT_STRUCT( p_obj, al_mad_svc_t, obj );
\r
1218 /* Deregister the MAD service. */
\r
1219 h_qp = (ib_qp_handle_t)p_obj->p_parent_obj;
\r
1220 if( h_qp->pfn_dereg_mad_svc )
\r
1221 h_qp->pfn_dereg_mad_svc( h_mad_svc );
\r
1223 /* Wait here until the MAD service is no longer in use. */
\r
1224 timeout_ms = (int32_t)h_mad_svc->obj.timeout_ms;
\r
1225 while( h_mad_svc->ref_cnt && timeout_ms > 0 )
\r
1227 /* Use a timeout to avoid waiting forever - just in case. */
\r
1228 cl_thread_suspend( 10 );
\r
1233 * Deregister from the MAD dispatcher. The MAD dispatcher holds
\r
1234 * a reference on the MAD service when invoking callbacks. Since we
\r
1235 * issue sends, we know how many callbacks are expected for send
\r
1236 * completions. With receive completions, we need to wait until all
\r
1237 * receive callbacks have completed before cleaning up receives.
\r
1239 if( h_mad_svc->h_mad_reg )
\r
1240 __mad_disp_dereg( h_mad_svc->h_mad_reg );
\r
1242 /* Cancel all outstanding send requests. */
\r
1243 cl_spinlock_acquire( &h_mad_svc->obj.lock );
\r
1244 for( p_list_item = cl_qlist_head( &h_mad_svc->send_list );
\r
1245 p_list_item != cl_qlist_end( &h_mad_svc->send_list );
\r
1246 p_list_item = cl_qlist_next( p_list_item ) )
\r
1248 CL_TRACE( AL_DBG_MAD_SVC, g_al_dbg_lvl, ("canceling MAD\n") );
\r
1249 h_send = PARENT_STRUCT( p_list_item, al_mad_send_t, pool_item );
\r
1250 h_send->canceled = TRUE;
\r
1252 cl_spinlock_release( &h_mad_svc->obj.lock );
\r
1255 * Invoke the timer callback to return the canceled MADs to the user.
\r
1256 * Since the MAD service is being destroyed, the user cannot be issuing
\r
1260 old_irql = KeRaiseIrqlToDpcLevel();
\r
1262 __check_send_queue( h_mad_svc );
\r
1264 KeLowerIrql( old_irql );
\r
1267 cl_timer_destroy( &h_mad_svc->send_timer );
\r
1271 * Reclaim any pending receives sent to the proxy for UAL.
\r
1273 if( h_mad_svc->obj.h_al->p_context )
\r
1275 cl_qlist_t *p_cblist;
\r
1276 al_proxy_cb_info_t *p_cb_info;
\r
1278 cl_spinlock_acquire( &h_mad_svc->obj.h_al->p_context->cb_lock );
\r
1279 p_cblist = &h_mad_svc->obj.h_al->p_context->misc_cb_list;
\r
1280 p_list_item = cl_qlist_head( p_cblist );
\r
1281 while( p_list_item != cl_qlist_end( p_cblist ) )
\r
1283 p_cb_info = (al_proxy_cb_info_t*)p_list_item;
\r
1284 p_list_item = cl_qlist_next( p_list_item );
\r
1286 if( p_cb_info->p_al_obj && p_cb_info->p_al_obj == &h_mad_svc->obj )
\r
1288 cl_qlist_remove_item( p_cblist, &p_cb_info->pool_item.list_item );
\r
1289 deref_al_obj( p_cb_info->p_al_obj );
\r
1290 proxy_cb_put( p_cb_info );
\r
1293 cl_spinlock_release( &h_mad_svc->obj.h_al->p_context->cb_lock );
\r
1297 CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );
\r
1303 __cleanup_mad_svc(
\r
1304 IN struct _al_obj *p_obj )
\r
1306 ib_mad_svc_handle_t h_mad_svc;
\r
1307 al_mad_rmpp_t *p_rmpp;
\r
1308 cl_list_item_t *p_list_item;
\r
1310 CL_ASSERT( p_obj );
\r
1311 h_mad_svc = PARENT_STRUCT( p_obj, al_mad_svc_t, obj );
\r
1314 * There are no more callbacks from the MAD dispatcher that are active.
\r
1315 * Cleanup any receives that may still be lying around. Stop the receive
\r
1316 * timer to avoid synchronizing with it.
\r
1318 cl_timer_destroy( &h_mad_svc->recv_timer );
\r
1319 for( p_list_item = cl_qlist_head( &h_mad_svc->recv_list );
\r
1320 p_list_item != cl_qlist_end( &h_mad_svc->recv_list );
\r
1321 p_list_item = cl_qlist_next( p_list_item ) )
\r
1323 p_rmpp = PARENT_STRUCT( p_list_item, al_mad_rmpp_t, pool_item );
\r
1324 p_rmpp->inactive = TRUE;
\r
1326 __recv_timer_cb( h_mad_svc );
\r
1328 CL_ASSERT( cl_is_qlist_empty( &h_mad_svc->send_list ) );
\r
1329 CL_ASSERT( cl_is_qlist_empty( &h_mad_svc->recv_list ) );
\r
1336 IN al_obj_t *p_obj )
\r
1338 ib_mad_svc_handle_t h_mad_svc;
\r
1340 CL_ASSERT( p_obj );
\r
1341 h_mad_svc = PARENT_STRUCT( p_obj, al_mad_svc_t, obj );
\r
1343 destroy_al_obj( p_obj );
\r
1344 cl_free( h_mad_svc );
\r
1351 IN const ib_mad_svc_handle_t h_mad_svc,
\r
1352 IN ib_mad_element_t* const p_mad_element_list,
\r
1353 OUT ib_mad_element_t **pp_mad_failure OPTIONAL )
\r
1355 ib_api_status_t status = IB_SUCCESS;
\r
1357 ib_mad_send_handle_t h_send;
\r
1358 ib_mad_element_t *p_cur_mad, *p_next_mad;
\r
1361 CL_ENTER( AL_DBG_MAD_SVC, g_al_dbg_lvl );
\r
1363 if( AL_OBJ_INVALID_HANDLE( h_mad_svc, AL_OBJ_TYPE_H_MAD_SVC ) )
\r
1365 CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl, ("IB_INVALID_HANDLE\n") );
\r
1366 return IB_INVALID_HANDLE;
\r
1368 if( !p_mad_element_list ||
\r
1369 ( p_mad_element_list->p_next && !pp_mad_failure ) )
\r
1371 CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl, ("IB_INVALID_PARAMETER\n") );
\r
1372 return IB_INVALID_PARAMETER;
\r
1376 /* This is a send from user mode using special QP alias */
\r
1377 CL_TRACE( AL_DBG_MAD_SVC, g_al_dbg_lvl,
\r
1378 ("ib_send_mad: ual_context non-zero, TID = 0x%"PRIx64 ".\n",
\r
1379 ((ib_mad_t*)(ib_get_mad_buf( p_mad_element_list )))->trans_id ));
\r
1380 status = spl_qp_mad_send( h_mad_svc, p_mad_element_list,
\r
1382 CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );
\r
1385 /* Post each send on the list. */
\r
1386 p_cur_mad = p_mad_element_list;
\r
1387 while( p_cur_mad )
\r
1389 p_next_mad = p_cur_mad->p_next;
\r
1391 /* Get an element to track the send. */
\r
1392 h_send = get_mad_send( PARENT_STRUCT( p_cur_mad,
\r
1393 al_mad_element_t, element ) );
\r
1396 CL_TRACE( AL_DBG_ERROR, g_al_dbg_lvl, ("unable to get mad_send\n") );
\r
1397 if( pp_mad_failure )
\r
1398 *pp_mad_failure = p_cur_mad;
\r
1399 return IB_INSUFFICIENT_RESOURCES;
\r
1402 /* Initialize the MAD for sending. */
\r
1403 status = __init_send_mad( h_mad_svc, h_send, p_cur_mad );
\r
1404 if( status != IB_SUCCESS )
\r
1406 CL_TRACE( AL_DBG_ERROR, g_al_dbg_lvl, ("init_send_mad failed: %s\n",
\r
1407 ib_get_err_str(status)) );
\r
1408 put_mad_send( h_send );
\r
1409 if( pp_mad_failure )
\r
1410 *pp_mad_failure = p_cur_mad;
\r
1414 /* Add the MADs to our list. */
\r
1415 cl_spinlock_acquire( &h_mad_svc->obj.lock );
\r
1416 cl_qlist_insert_tail( &h_mad_svc->send_list,
\r
1417 (cl_list_item_t*)&h_send->pool_item );
\r
1419 /* Post the MAD to the dispatcher, and check for failures. */
\r
1420 ref_al_obj( &h_mad_svc->obj );
\r
1421 p_cur_mad->p_next = NULL;
\r
1422 if( h_send->uses_rmpp )
\r
1423 __queue_rmpp_seg( h_mad_svc->h_mad_reg, h_send );
\r
1425 __queue_mad_wr( h_mad_svc->h_mad_reg, h_send );
\r
1426 cl_spinlock_release( &h_mad_svc->obj.lock );
\r
1428 p_cur_mad = p_next_mad;
\r
1432 * Resume any sends that can now be sent without holding
\r
1433 * the mad service lock.
\r
1435 __mad_disp_resume_send( h_mad_svc->h_mad_reg );
\r
1437 CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );
\r
1444 static ib_api_status_t
\r
1446 IN ib_mad_svc_handle_t h_mad_svc,
\r
1447 IN const ib_mad_send_handle_t h_send,
\r
1448 IN ib_mad_element_t* const p_mad_element )
\r
1450 ib_rmpp_mad_t *p_rmpp_hdr;
\r
1451 uint8_t rmpp_version;
\r
1452 ib_api_status_t status;
\r
1454 CL_ENTER( AL_DBG_MAD_SVC, g_al_dbg_lvl );
\r
1456 /* Initialize tracking the send. */
\r
1457 h_send->p_send_mad = p_mad_element;
\r
1458 h_send->retry_time = MAX_TIME;
\r
1459 h_send->retry_cnt = p_mad_element->retry_cnt;
\r
1461 /* See if the send uses RMPP. */
\r
1462 h_send->uses_rmpp = __does_send_req_rmpp( h_mad_svc->svc_type,
\r
1463 p_mad_element, &rmpp_version );
\r
1464 if( h_send->uses_rmpp )
\r
1466 /* The RMPP header is present. */
\r
1467 CL_TRACE( AL_DBG_MAD_SVC, g_al_dbg_lvl, ("RMPP is activated\n") );
\r
1468 p_rmpp_hdr = (ib_rmpp_mad_t*)p_mad_element->p_mad_buf;
\r
1470 /* We only support version 1. */
\r
1471 if( rmpp_version != DEFAULT_RMPP_VERSION )
\r
1473 CL_TRACE( AL_DBG_ERROR, g_al_dbg_lvl, ("unsupported version\n") );
\r
1474 return IB_INVALID_SETTING;
\r
1477 p_rmpp_hdr->rmpp_version = rmpp_version;
\r
1478 p_rmpp_hdr->rmpp_type = IB_RMPP_TYPE_DATA;
\r
1479 ib_rmpp_set_resp_time( p_rmpp_hdr, IB_RMPP_NO_RESP_TIME );
\r
1480 p_rmpp_hdr->rmpp_status = IB_RMPP_STATUS_SUCCESS;
\r
1482 * The segment number, flags, and payload size are set when
\r
1483 * sending, so that they are set correctly when issuing retries.
\r
1486 h_send->ack_seg = 0;
\r
1487 h_send->seg_limit = 1;
\r
1488 h_send->cur_seg = 1;
\r
1489 /* For SA RMPP MADS we need different data size and header size */
\r
1490 if( p_mad_element->p_mad_buf->mgmt_class == IB_MCLASS_SUBN_ADM )
\r
1492 h_send->total_seg = ( (p_mad_element->size - IB_SA_MAD_HDR_SIZE) +
\r
1493 (IB_SA_DATA_SIZE - 1) ) / IB_SA_DATA_SIZE;
\r
1497 h_send->total_seg = ( (p_mad_element->size - MAD_RMPP_HDR_SIZE) +
\r
1498 (MAD_RMPP_DATA_SIZE - 1) ) / MAD_RMPP_DATA_SIZE;
\r
1502 /* See if we need to create the address vector for the user. */
\r
1503 if( !p_mad_element->h_av &&
\r
1504 !( p_mad_element->send_opt & IB_SEND_OPT_LOCAL ) )
\r
1506 status = __create_send_av( h_mad_svc, h_send );
\r
1507 if( status != IB_SUCCESS )
\r
1513 CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );
\r
1514 return IB_SUCCESS;
\r
1519 static ib_api_status_t
\r
1521 IN ib_mad_svc_handle_t h_mad_svc,
\r
1522 IN ib_mad_send_handle_t h_send )
\r
1524 ib_av_attr_t av_attr;
\r
1525 ib_mad_element_t *p_mad_element;
\r
1527 p_mad_element = h_send->p_send_mad;
\r
1529 av_attr.port_num = h_mad_svc->port_num;
\r
1531 av_attr.sl = p_mad_element->remote_sl;
\r
1532 av_attr.dlid = p_mad_element->remote_lid;
\r
1534 av_attr.grh_valid = p_mad_element->grh_valid;
\r
1535 if( av_attr.grh_valid )
\r
1536 av_attr.grh = *p_mad_element->p_grh;
\r
1538 av_attr.static_rate = IB_PATH_RECORD_RATE_10_GBS;
\r
1539 av_attr.path_bits = p_mad_element->path_bits;
\r
1541 return ib_create_av( h_mad_svc->h_pd, &av_attr, &h_send->h_av );
\r
1547 __does_send_req_rmpp(
\r
1548 IN const ib_mad_svc_type_t mad_svc_type,
\r
1549 IN const ib_mad_element_t* const p_mad_element,
\r
1550 OUT uint8_t *p_rmpp_version )
\r
1552 switch( mad_svc_type )
\r
1554 case IB_MAD_SVC_DEFAULT:
\r
1555 case IB_MAD_SVC_RMPP:
\r
1556 /* Internally generated MADs do not use RMPP. */
\r
1557 if( __is_internal_send( mad_svc_type, p_mad_element ) )
\r
1560 /* If the MAD has the version number set, just return it. */
\r
1561 if( p_mad_element->rmpp_version )
\r
1563 *p_rmpp_version = p_mad_element->rmpp_version;
\r
1567 /* If the class is well known and uses RMPP, use the default version. */
\r
1568 if( p_mad_element->p_mad_buf->mgmt_class == IB_MCLASS_SUBN_ADM )
\r
1570 switch( p_mad_element->p_mad_buf->method )
\r
1572 case IB_MAD_METHOD_GETTABLE_RESP:
\r
1573 case IB_MAD_METHOD_GETMULTI:
\r
1574 case IB_MAD_METHOD_GETMULTI_RESP:
\r
1575 *p_rmpp_version = DEFAULT_RMPP_VERSION;
\r
1583 /* The RMPP is not active. */
\r
1594 * Sends the next RMPP segment of an RMPP transfer.
\r
1598 IN const al_mad_reg_handle_t h_mad_reg,
\r
1599 IN ib_mad_send_handle_t h_send )
\r
1601 ib_rmpp_mad_t *p_rmpp_hdr;
\r
1603 CL_ENTER( AL_DBG_MAD_SVC, g_al_dbg_lvl );
\r
1605 CL_ASSERT( h_mad_reg && h_send );
\r
1606 CL_ASSERT( h_send->cur_seg <= h_send->seg_limit );
\r
1608 /* Reset information to track the send. */
\r
1609 h_send->retry_time = MAX_TIME;
\r
1611 /* Set the RMPP header information. */
\r
1612 p_rmpp_hdr = (ib_rmpp_mad_t*)h_send->p_send_mad->p_mad_buf;
\r
1613 p_rmpp_hdr->seg_num = cl_hton32( h_send->cur_seg );
\r
1614 p_rmpp_hdr->rmpp_flags = IB_RMPP_FLAG_ACTIVE;
\r
1615 p_rmpp_hdr->paylen_newwin = 0;
\r
1617 /* See if this is the first segment that needs to be sent. */
\r
1618 if( h_send->cur_seg == 1 )
\r
1620 p_rmpp_hdr->rmpp_flags |= IB_RMPP_FLAG_FIRST;
\r
1623 * Since the RMPP layer is the one to support SA MADs by duplicating
\r
1624 * the SA header. The actual Payload Length should include the
\r
1625 * original mad size + NumSegs * SA-extra-header.
\r
1627 if( h_send->p_send_mad->p_mad_buf->mgmt_class == IB_MCLASS_SUBN_ADM )
\r
1629 /* Add sa_ext_hdr to each segment over the first one. */
\r
1630 p_rmpp_hdr->paylen_newwin = cl_hton32(
\r
1631 h_send->p_send_mad->size - MAD_RMPP_HDR_SIZE +
\r
1632 (h_send->total_seg - 1) *
\r
1633 (IB_SA_MAD_HDR_SIZE - MAD_RMPP_HDR_SIZE) );
\r
1637 /* For other RMPP packets we simply use the given MAD */
\r
1638 p_rmpp_hdr->paylen_newwin = cl_hton32( h_send->p_send_mad->size -
\r
1639 MAD_RMPP_HDR_SIZE );
\r
1643 /* See if this is the last segment that needs to be sent. */
\r
1644 if( h_send->cur_seg == h_send->total_seg )
\r
1646 p_rmpp_hdr->rmpp_flags |= IB_RMPP_FLAG_LAST;
\r
1648 /* But for SA MADs we need extra header size */
\r
1649 if( h_send->p_send_mad->p_mad_buf->mgmt_class == IB_MCLASS_SUBN_ADM )
\r
1651 p_rmpp_hdr->paylen_newwin = cl_hton32( h_send->p_send_mad->size -
\r
1652 (h_send->cur_seg -1)*IB_SA_DATA_SIZE - MAD_RMPP_HDR_SIZE );
\r
1656 p_rmpp_hdr->paylen_newwin = cl_hton32( h_send->p_send_mad->size -
\r
1657 (h_send->cur_seg -1)*MAD_RMPP_DATA_SIZE );
\r
1661 /* Set the current segment to the next one. */
\r
1662 h_send->cur_seg++;
\r
1664 /* Send the MAD. */
\r
1665 __queue_mad_wr( h_mad_reg, h_send );
\r
1667 CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );
\r
1673 * Posts a send work request to the dispatcher for a MAD send.
\r
1677 IN const al_mad_reg_handle_t h_mad_reg,
\r
1678 IN const ib_mad_send_handle_t h_send )
\r
1680 ib_send_wr_t *p_send_wr;
\r
1681 al_mad_element_t *p_al_element;
\r
1682 ib_rmpp_mad_t *p_rmpp_hdr;
\r
1683 uint8_t *p_rmpp_src, *p_rmpp_dst;
\r
1684 uintn_t hdr_len, offset, max_len;
\r
1686 CL_ENTER( AL_DBG_MAD_SVC, g_al_dbg_lvl );
\r
1687 p_send_wr = &h_send->mad_wr.send_wr;
\r
1689 cl_memclr( p_send_wr, sizeof( ib_send_wr_t ) );
\r
1691 p_send_wr->wr_type = WR_SEND;
\r
1692 p_send_wr->send_opt = h_send->p_send_mad->send_opt;
\r
1694 p_al_element = PARENT_STRUCT( h_send->p_send_mad,
\r
1695 al_mad_element_t, element );
\r
1697 /* See if the MAD requires RMPP support. */
\r
1698 if( h_send->uses_rmpp && p_al_element->p_al_mad_buf )
\r
1700 #if defined( CL_KERNEL )
\r
1701 p_rmpp_dst = p_al_element->mad_buf + sizeof(ib_grh_t);
\r
1703 p_rmpp_dst = (uint8_t*)(uintn_t)p_al_element->mad_ds.vaddr;
\r
1705 p_rmpp_src = (uint8_t* __ptr64)h_send->p_send_mad->p_mad_buf;
\r
1706 p_rmpp_hdr = (ib_rmpp_mad_t*)p_rmpp_src;
\r
1708 if( h_send->p_send_mad->p_mad_buf->mgmt_class == IB_MCLASS_SUBN_ADM )
\r
1709 hdr_len = IB_SA_MAD_HDR_SIZE;
\r
1711 hdr_len = MAD_RMPP_HDR_SIZE;
\r
1713 max_len = MAD_BLOCK_SIZE - hdr_len;
\r
1715 offset = hdr_len + (max_len * (cl_ntoh32( p_rmpp_hdr->seg_num ) - 1));
\r
1717 /* Copy the header into the registered send buffer. */
\r
1718 cl_memcpy( p_rmpp_dst, p_rmpp_src, hdr_len );
\r
1720 /* Copy this segment's payload into the registered send buffer. */
\r
1721 CL_ASSERT( h_send->p_send_mad->size != offset );
\r
1722 if( (h_send->p_send_mad->size - offset) < max_len )
\r
1724 max_len = h_send->p_send_mad->size - offset;
\r
1725 /* Clear unused payload. */
\r
1726 cl_memclr( p_rmpp_dst + hdr_len + max_len,
\r
1727 MAD_BLOCK_SIZE - hdr_len - max_len );
\r
1731 p_rmpp_dst + hdr_len, p_rmpp_src + offset, max_len );
\r
1734 p_send_wr->num_ds = 1;
\r
1735 p_send_wr->ds_array = &p_al_element->mad_ds;
\r
1737 p_send_wr->dgrm.ud.remote_qp = h_send->p_send_mad->remote_qp;
\r
1738 p_send_wr->dgrm.ud.remote_qkey = h_send->p_send_mad->remote_qkey;
\r
1739 p_send_wr->dgrm.ud.pkey_index = h_send->p_send_mad->pkey_index;
\r
1741 /* See if we created the address vector on behalf of the user. */
\r
1742 if( h_send->p_send_mad->h_av )
\r
1743 p_send_wr->dgrm.ud.h_av = h_send->p_send_mad->h_av;
\r
1745 p_send_wr->dgrm.ud.h_av = h_send->h_av;
\r
1747 __mad_disp_queue_send( h_mad_reg, &h_send->mad_wr );
\r
1749 CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );
\r
1754 static cl_status_t
\r
1755 __mad_svc_find_send(
\r
1756 IN const cl_list_item_t* const p_list_item,
\r
1757 IN void* context )
\r
1759 ib_mad_send_handle_t h_send;
\r
1761 h_send = PARENT_STRUCT( p_list_item, al_mad_send_t, pool_item );
\r
1763 if( h_send->p_send_mad == context )
\r
1764 return CL_SUCCESS;
\r
1766 return CL_NOT_FOUND;
\r
1773 IN const ib_mad_svc_handle_t h_mad_svc,
\r
1774 IN ib_mad_element_t* const p_mad_element )
\r
1777 cl_list_item_t *p_list_item;
\r
1778 ib_mad_send_handle_t h_send;
\r
1780 ib_api_status_t status;
\r
1783 CL_ENTER( AL_DBG_MAD_SVC, g_al_dbg_lvl );
\r
1785 if( AL_OBJ_INVALID_HANDLE( h_mad_svc, AL_OBJ_TYPE_H_MAD_SVC ) )
\r
1787 CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl, ("IB_INVALID_HANDLE\n") );
\r
1788 return IB_INVALID_HANDLE;
\r
1790 if( !p_mad_element )
\r
1792 CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl, ("IB_INVALID_PARAMETER\n") );
\r
1793 return IB_INVALID_PARAMETER;
\r
1797 /* This is a send from user mode using special QP alias */
\r
1798 status = spl_qp_cancel_mad( h_mad_svc, p_mad_element );
\r
1799 CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );
\r
1802 /* Search for the MAD in our MAD list. It may have already completed. */
\r
1803 cl_spinlock_acquire( &h_mad_svc->obj.lock );
\r
1804 p_list_item = cl_qlist_find_from_head( &h_mad_svc->send_list,
\r
1805 __mad_svc_find_send, p_mad_element );
\r
1807 if( !p_list_item )
\r
1809 cl_spinlock_release( &h_mad_svc->obj.lock );
\r
1810 CL_TRACE( AL_DBG_MAD_SVC, g_al_dbg_lvl, ("mad not found\n") );
\r
1811 return IB_NOT_FOUND;
\r
1814 /* Mark the MAD as having been canceled. */
\r
1815 h_send = PARENT_STRUCT( p_list_item, al_mad_send_t, pool_item );
\r
1816 h_send->canceled = TRUE;
\r
1818 /* If the MAD is active, process it in the send callback. */
\r
1819 if( h_send->retry_time != MAX_TIME )
\r
1821 /* Process the canceled MAD using the timer thread. */
\r
1822 cl_timer_trim( &h_mad_svc->send_timer, 0 );
\r
1825 cl_spinlock_release( &h_mad_svc->obj.lock );
\r
1826 CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );
\r
1827 return IB_SUCCESS;
\r
1834 IN const ib_mad_svc_handle_t h_mad_svc,
\r
1835 IN ib_mad_element_t* const p_mad_element,
\r
1836 IN const uint32_t delay_ms )
\r
1839 cl_list_item_t *p_list_item;
\r
1840 ib_mad_send_handle_t h_send;
\r
1843 AL_ENTER( AL_DBG_MAD_SVC );
\r
1845 if( AL_OBJ_INVALID_HANDLE( h_mad_svc, AL_OBJ_TYPE_H_MAD_SVC ) )
\r
1847 AL_TRACE_EXIT( AL_DBG_ERROR, ("IB_INVALID_HANDLE\n") );
\r
1848 return IB_INVALID_HANDLE;
\r
1850 if( !p_mad_element )
\r
1852 AL_TRACE_EXIT( AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") );
\r
1853 return IB_INVALID_PARAMETER;
\r
1857 UNUSED_PARAM( p_mad_element );
\r
1858 UNUSED_PARAM( delay_ms );
\r
1859 /* TODO: support for user-mode MAD QP's. */
\r
1860 AL_EXIT( AL_DBG_MAD_SVC );
\r
1861 return IB_UNSUPPORTED;
\r
1863 /* Search for the MAD in our MAD list. It may have already completed. */
\r
1864 cl_spinlock_acquire( &h_mad_svc->obj.lock );
\r
1865 p_list_item = cl_qlist_find_from_head( &h_mad_svc->send_list,
\r
1866 __mad_svc_find_send, p_mad_element );
\r
1868 if( !p_list_item )
\r
1870 cl_spinlock_release( &h_mad_svc->obj.lock );
\r
1871 AL_TRACE( AL_DBG_MAD_SVC, ("MAD not found\n") );
\r
1872 return IB_NOT_FOUND;
\r
1875 /* Mark the MAD as having been canceled. */
\r
1876 h_send = PARENT_STRUCT( p_list_item, al_mad_send_t, pool_item );
\r
1878 if( h_send->retry_time == MAX_TIME )
\r
1879 h_send->delay = delay_ms;
\r
1881 h_send->retry_time += ((uint64_t)delay_ms * 1000ULL);
\r
1883 cl_spinlock_release( &h_mad_svc->obj.lock );
\r
1884 AL_EXIT( AL_DBG_MAD_SVC );
\r
1885 return IB_SUCCESS;
\r
1891 * Process a send completion.
\r
1894 __mad_svc_send_done(
\r
1895 IN ib_mad_svc_handle_t h_mad_svc,
\r
1896 IN al_mad_wr_t *p_mad_wr,
\r
1897 IN ib_wc_t *p_wc )
\r
1899 ib_mad_send_handle_t h_send;
\r
1901 CL_ENTER( AL_DBG_MAD_SVC, g_al_dbg_lvl );
\r
1902 CL_ASSERT( h_mad_svc && p_mad_wr && !p_wc->p_next );
\r
1904 h_send = PARENT_STRUCT( p_mad_wr, al_mad_send_t, mad_wr );
\r
1905 CL_TRACE( AL_DBG_MAD_SVC, g_al_dbg_lvl, ("send callback TID:0x%"PRIx64"\n",
\r
1906 __get_send_tid( h_send )) );
\r
1908 /* We need to synchronize access to the list as well as the MAD request. */
\r
1909 cl_spinlock_acquire( &h_mad_svc->obj.lock );
\r
1911 /* Complete internally sent MADs. */
\r
1912 if( __is_internal_send( h_mad_svc->svc_type, h_send->p_send_mad ) )
\r
1914 CL_TRACE_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl, ("internal send\n") );
\r
1915 cl_qlist_remove_item( &h_mad_svc->send_list,
\r
1916 (cl_list_item_t*)&h_send->pool_item );
\r
1917 cl_spinlock_release( &h_mad_svc->obj.lock );
\r
1918 ib_put_mad( h_send->p_send_mad );
\r
1919 __cleanup_mad_send( h_mad_svc, h_send );
\r
1923 /* See if the send request has completed. */
\r
1924 if( __is_send_mad_done( h_send, p_wc ) )
\r
1926 /* The send has completed. */
\r
1927 cl_qlist_remove_item( &h_mad_svc->send_list,
\r
1928 (cl_list_item_t*)&h_send->pool_item );
\r
1929 cl_spinlock_release( &h_mad_svc->obj.lock );
\r
1931 /* Report the send as canceled only if we don't have the response. */
\r
1932 if( h_send->canceled && !h_send->p_resp_mad )
\r
1933 __notify_send_comp( h_mad_svc, h_send, IB_WCS_CANCELED );
\r
1935 __notify_send_comp( h_mad_svc, h_send, p_wc->status );
\r
1939 /* See if this is an RMPP MAD, and we should send more segments. */
\r
1940 if( h_send->uses_rmpp && (h_send->cur_seg <= h_send->seg_limit) )
\r
1942 /* Send the next segment. */
\r
1943 CL_TRACE( AL_DBG_MAD_SVC, g_al_dbg_lvl,
\r
1944 ("sending next RMPP segment for TID:0x%"PRIx64"\n",
\r
1945 __get_send_tid( h_send )) );
\r
1947 __queue_rmpp_seg( h_mad_svc->h_mad_reg, h_send );
\r
1951 /* Continue waiting for a response or ACK. */
\r
1952 CL_TRACE( AL_DBG_MAD_SVC, g_al_dbg_lvl,
\r
1953 ("waiting for response for TID:0x%"PRIx64"\n",
\r
1954 __get_send_tid( h_send )) );
\r
1956 __set_retry_time( h_send );
\r
1957 cl_timer_trim( &h_mad_svc->send_timer,
\r
1958 h_send->p_send_mad->timeout_ms );
\r
1960 cl_spinlock_release( &h_mad_svc->obj.lock );
\r
1964 * Resume any sends that can now be sent without holding
\r
1965 * the mad service lock.
\r
1967 __mad_disp_resume_send( h_mad_svc->h_mad_reg );
\r
1969 CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );
\r
1975 * Notify the user of a completed send operation.
\r
1978 __notify_send_comp(
\r
1979 IN ib_mad_svc_handle_t h_mad_svc,
\r
1980 IN ib_mad_send_handle_t h_send,
\r
1981 IN ib_wc_status_t wc_status )
\r
1983 CL_ENTER( AL_DBG_MAD_SVC, g_al_dbg_lvl );
\r
1985 CL_TRACE( AL_DBG_MAD_SVC, g_al_dbg_lvl, ("completing TID:0x%"PRIx64"\n",
\r
1986 __get_send_tid( h_send )) );
\r
1988 h_send->p_send_mad->status = wc_status;
\r
1990 /* Notify the user of a received response, if one exists. */
\r
1991 if( h_send->p_resp_mad )
\r
1993 h_mad_svc->pfn_user_recv_cb( h_mad_svc, (void*)h_mad_svc->obj.context,
\r
1994 h_send->p_resp_mad );
\r
1997 /* The transaction has completed, return the send MADs. */
\r
1998 h_mad_svc->pfn_user_send_cb( h_mad_svc, (void*)h_mad_svc->obj.context,
\r
1999 h_send->p_send_mad );
\r
2001 __cleanup_mad_send( h_mad_svc, h_send );
\r
2003 CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );
\r
2009 * Return a send MAD tracking structure to its pool and cleanup any resources
\r
2010 * it may have allocated.
\r
2013 __cleanup_mad_send(
\r
2014 IN ib_mad_svc_handle_t h_mad_svc,
\r
2015 IN ib_mad_send_handle_t h_send )
\r
2017 /* Release any address vectors that we may have created. */
\r
2018 if( h_send->h_av )
\r
2020 ib_destroy_av( h_send->h_av );
\r
2023 /* Return the send MAD tracking structure to its pool. */
\r
2024 put_mad_send( h_send );
\r
2026 /* We no longer need to reference the MAD service. */
\r
2027 deref_al_obj( &h_mad_svc->obj );
\r
2033 __is_send_mad_done(
\r
2034 IN ib_mad_send_handle_t h_send,
\r
2035 IN ib_wc_t *p_wc )
\r
2037 CL_ENTER( AL_DBG_MAD_SVC, g_al_dbg_lvl );
\r
2039 /* Complete the send if the request failed. */
\r
2040 if( p_wc->status != IB_WCS_SUCCESS )
\r
2042 CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl, ("y-send failed\n" ) );
\r
2046 /* Complete the send if it has been canceled. */
\r
2047 if( h_send->canceled )
\r
2049 CL_TRACE_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl,
\r
2050 ("y-send was canceled\n") );
\r
2054 /* Complete the send if we have its response. */
\r
2055 if( h_send->p_resp_mad )
\r
2057 CL_TRACE_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl,
\r
2058 ("y-response received\n") );
\r
2062 /* RMPP sends cannot complete until all segments have been acked. */
\r
2063 if( h_send->uses_rmpp && (h_send->ack_seg < h_send->total_seg) )
\r
2065 CL_TRACE_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl,
\r
2066 ("more RMPP segments to send\n") );
\r
2071 * All segments of this send have been sent.
\r
2072 * The send has completed if we are not waiting for a response.
\r
2074 if( h_send->p_send_mad->resp_expected )
\r
2076 CL_TRACE_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl,
\r
2077 ("no-waiting on response\n") );
\r
2082 CL_TRACE_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl, ("send completed\n") );
\r
2090 * Try to find a send that matches the received response. This call must
\r
2091 * be synchronized with access to the MAD service send_list.
\r
2093 static ib_mad_send_handle_t
\r
2094 __mad_svc_match_recv(
\r
2095 IN const ib_mad_svc_handle_t h_mad_svc,
\r
2096 IN ib_mad_element_t* const p_recv_mad )
\r
2098 ib_mad_t *p_recv_hdr;
\r
2099 cl_list_item_t *p_list_item;
\r
2100 ib_mad_send_handle_t h_send;
\r
2102 CL_ENTER( AL_DBG_MAD_SVC, g_al_dbg_lvl );
\r
2104 p_recv_hdr = p_recv_mad->p_mad_buf;
\r
2106 /* Search the send list for a matching request. */
\r
2107 for( p_list_item = cl_qlist_head( &h_mad_svc->send_list );
\r
2108 p_list_item != cl_qlist_end( &h_mad_svc->send_list );
\r
2109 p_list_item = cl_qlist_next( p_list_item ) )
\r
2111 h_send = PARENT_STRUCT( p_list_item, al_mad_send_t, pool_item );
\r
2113 /* Match on the transaction ID, ignoring internally generated sends. */
\r
2114 CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );
\r
2115 if( (p_recv_hdr->trans_id == h_send->mad_wr.client_tid) &&
\r
2116 !__is_internal_send( h_mad_svc->svc_type, h_send->p_send_mad ) )
\r
2128 __mad_svc_recv_done(
\r
2129 IN ib_mad_svc_handle_t h_mad_svc,
\r
2130 IN ib_mad_element_t *p_mad_element )
\r
2132 ib_mad_t *p_mad_hdr;
\r
2133 ib_api_status_t cl_status;
\r
2135 CL_ENTER( AL_DBG_MAD_SVC, g_al_dbg_lvl );
\r
2137 p_mad_hdr = ib_get_mad_buf( p_mad_element );
\r
2138 CL_TRACE( AL_DBG_MAD_SVC, g_al_dbg_lvl, ("recv done TID:0x%"PRIx64"\n",
\r
2139 p_mad_hdr->trans_id) );
\r
2141 /* Raw MAD services get all receives. */
\r
2142 if( h_mad_svc->svc_type == IB_MAD_SVC_RAW )
\r
2144 /* Report the receive. */
\r
2145 CL_TRACE_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl,
\r
2146 ("recv TID:0x%"PRIx64"\n", p_mad_hdr->trans_id) );
\r
2147 h_mad_svc->pfn_user_recv_cb( h_mad_svc, (void*)h_mad_svc->obj.context,
\r
2153 * If the response indicates that the responder was busy, continue
\r
2154 * retrying the request.
\r
2156 if( p_mad_hdr->status & IB_MAD_STATUS_BUSY )
\r
2158 CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl,
\r
2159 ("responder busy TID:0x%"PRIx64"\n", p_mad_hdr->trans_id) );
\r
2160 ib_put_mad( p_mad_element );
\r
2164 /* Fully reassemble received MADs before completing them. */
\r
2165 if( __recv_requires_rmpp( h_mad_svc->svc_type, p_mad_element ) )
\r
2167 /* Reassembling the receive. */
\r
2168 cl_status = __do_rmpp_recv( h_mad_svc, &p_mad_element );
\r
2169 if( cl_status != CL_SUCCESS )
\r
2171 /* The reassembly is not done. */
\r
2172 CL_TRACE_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl,
\r
2173 ("no RMPP receive to report\n") );
\r
2178 * Get the header to the MAD element to report to the user. This
\r
2179 * will be a MAD element received earlier.
\r
2181 p_mad_hdr = ib_get_mad_buf( p_mad_element );
\r
2185 * See if the MAD was sent in response to a previously sent MAD. Note
\r
2186 * that trap repress messages are responses, even though the response
\r
2189 if( ib_mad_is_response( p_mad_hdr ) ||
\r
2190 (p_mad_hdr->method == IB_MAD_METHOD_TRAP_REPRESS) )
\r
2192 /* Process the received response. */
\r
2193 __process_recv_resp( h_mad_svc, p_mad_element );
\r
2197 /* Report the receive. */
\r
2198 CL_TRACE( AL_DBG_MAD_SVC, g_al_dbg_lvl, ("unsol recv TID:0x%"PRIx64"\n",
\r
2199 p_mad_hdr->trans_id) );
\r
2200 h_mad_svc->pfn_user_recv_cb( h_mad_svc, (void*)h_mad_svc->obj.context,
\r
2203 CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );
\r
2209 * A MAD was received in response to a send. Find the corresponding send
\r
2210 * and process the receive completion.
\r
2213 __process_recv_resp(
\r
2214 IN ib_mad_svc_handle_t h_mad_svc,
\r
2215 IN ib_mad_element_t *p_mad_element )
\r
2217 ib_mad_t *p_mad_hdr;
\r
2218 ib_mad_send_handle_t h_send;
\r
2221 * Try to find the send. The send may have already timed out or
\r
2222 * have been canceled, so we need to search for it.
\r
2224 CL_ENTER( AL_DBG_MAD_SVC, g_al_dbg_lvl );
\r
2225 p_mad_hdr = ib_get_mad_buf( p_mad_element );
\r
2226 cl_spinlock_acquire( &h_mad_svc->obj.lock );
\r
2228 h_send = __mad_svc_match_recv( h_mad_svc, p_mad_element );
\r
2231 /* A matching send was not found. */
\r
2232 CL_TRACE_EXIT( AL_DBG_WARN, g_al_dbg_lvl,
\r
2233 ("unmatched resp TID:0x%"PRIx64"\n", p_mad_hdr->trans_id) );
\r
2234 cl_spinlock_release( &h_mad_svc->obj.lock );
\r
2235 ib_put_mad( p_mad_element );
\r
2239 /* We've found the matching send. */
\r
2240 h_send->p_send_mad->status = IB_WCS_SUCCESS;
\r
2242 /* Record the send contexts with the receive. */
\r
2243 p_mad_element->send_context1 = (void* __ptr64)h_send->p_send_mad->context1;
\r
2244 p_mad_element->send_context2 = (void* __ptr64)h_send->p_send_mad->context2;
\r
2246 if( h_send->retry_time == MAX_TIME )
\r
2248 /* The send is currently active. Do not report it. */
\r
2249 CL_TRACE( AL_DBG_MAD_SVC, g_al_dbg_lvl,
\r
2250 ("resp send active TID:0x%"PRIx64"\n", p_mad_hdr->trans_id) );
\r
2251 h_send->p_resp_mad = p_mad_element;
\r
2252 cl_spinlock_release( &h_mad_svc->obj.lock );
\r
2256 CL_TRACE( AL_DBG_MAD_SVC, g_al_dbg_lvl,
\r
2257 ("resp received TID:0x%"PRIx64"\n", p_mad_hdr->trans_id) );
\r
2259 /* Report the send completion below. */
\r
2260 cl_qlist_remove_item( &h_mad_svc->send_list,
\r
2261 (cl_list_item_t*)&h_send->pool_item );
\r
2262 cl_spinlock_release( &h_mad_svc->obj.lock );
\r
2264 /* Report the receive. */
\r
2265 h_mad_svc->pfn_user_recv_cb( h_mad_svc, (void*)h_mad_svc->obj.context,
\r
2268 /* Report the send completion. */
\r
2269 h_mad_svc->pfn_user_send_cb( h_mad_svc, (void*)h_mad_svc->obj.context,
\r
2270 h_send->p_send_mad );
\r
2271 __cleanup_mad_send( h_mad_svc, h_send );
\r
2273 CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );
\r
2279 * Return TRUE if a received MAD requires RMPP processing.
\r
2281 static __inline boolean_t
\r
2282 __recv_requires_rmpp(
\r
2283 IN const ib_mad_svc_type_t mad_svc_type,
\r
2284 IN const ib_mad_element_t* const p_mad_element )
\r
2286 ib_rmpp_mad_t *p_rmpp_mad;
\r
2288 CL_ENTER( AL_DBG_MAD_SVC, g_al_dbg_lvl );
\r
2290 p_rmpp_mad = (ib_rmpp_mad_t*)ib_get_mad_buf( p_mad_element );
\r
2292 CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );
\r
2294 switch( mad_svc_type )
\r
2296 case IB_MAD_SVC_DEFAULT:
\r
2297 /* Only subnet management receives require RMPP. */
\r
2298 return( (p_rmpp_mad->common_hdr.mgmt_class == IB_MCLASS_SUBN_ADM) &&
\r
2299 ib_rmpp_is_flag_set( p_rmpp_mad, IB_RMPP_FLAG_ACTIVE ) );
\r
2301 case IB_MAD_SVC_RMPP:
\r
2302 return( ib_rmpp_is_flag_set( p_rmpp_mad, IB_RMPP_FLAG_ACTIVE ) );
\r
2312 * Return TRUE if the MAD was issued by AL itself.
\r
2314 static __inline boolean_t
\r
2315 __is_internal_send(
\r
2316 IN const ib_mad_svc_type_t mad_svc_type,
\r
2317 IN const ib_mad_element_t* const p_mad_element )
\r
2319 ib_rmpp_mad_t *p_rmpp_mad;
\r
2321 p_rmpp_mad = (ib_rmpp_mad_t*)ib_get_mad_buf( p_mad_element );
\r
2323 /* See if the MAD service issues internal MADs. */
\r
2324 switch( mad_svc_type )
\r
2326 case IB_MAD_SVC_DEFAULT:
\r
2327 /* Internal sends are non-RMPP data MADs. */
\r
2328 return( (p_rmpp_mad->common_hdr.mgmt_class == IB_MCLASS_SUBN_ADM) &&
\r
2329 (p_rmpp_mad->rmpp_type &&
\r
2330 (p_rmpp_mad->rmpp_type != IB_RMPP_TYPE_DATA) ) );
\r
2332 case IB_MAD_SVC_RMPP:
\r
2333 /* The RMPP header is present. Check its type. */
\r
2334 return( (p_rmpp_mad->rmpp_type) &&
\r
2335 (p_rmpp_mad->rmpp_type != IB_RMPP_TYPE_DATA) );
\r
2345 * Fully reassemble a received MAD. Return TRUE once all segments of the
\r
2346 * MAD have been received. Return the fully reassembled MAD.
\r
2348 static cl_status_t
\r
2350 IN ib_mad_svc_handle_t h_mad_svc,
\r
2351 IN OUT ib_mad_element_t **pp_mad_element )
\r
2353 ib_rmpp_mad_t *p_rmpp_mad;
\r
2354 cl_status_t cl_status;
\r
2356 CL_ENTER( AL_DBG_MAD_SVC, g_al_dbg_lvl );
\r
2358 p_rmpp_mad = ib_get_mad_buf( *pp_mad_element );
\r
2359 CL_ASSERT( ib_rmpp_is_flag_set( p_rmpp_mad, IB_RMPP_FLAG_ACTIVE ) );
\r
2361 /* Perform the correct operation base on the RMPP MAD type. */
\r
2362 switch( p_rmpp_mad->rmpp_type )
\r
2364 case IB_RMPP_TYPE_DATA:
\r
2365 cl_status = __process_rmpp_data( h_mad_svc, pp_mad_element );
\r
2366 /* Return the received element back to its MAD pool if not needed. */
\r
2367 if( (cl_status != CL_SUCCESS) && (cl_status != CL_NOT_DONE) )
\r
2369 ib_put_mad( *pp_mad_element );
\r
2373 case IB_RMPP_TYPE_ACK:
\r
2374 /* Process the ACK. */
\r
2375 __process_rmpp_ack( h_mad_svc, *pp_mad_element );
\r
2376 ib_put_mad( *pp_mad_element );
\r
2377 cl_status = CL_COMPLETED;
\r
2380 case IB_RMPP_TYPE_STOP:
\r
2381 case IB_RMPP_TYPE_ABORT:
\r
2383 /* Process the ABORT or STOP. */
\r
2384 __process_rmpp_nack( h_mad_svc, *pp_mad_element );
\r
2385 ib_put_mad( *pp_mad_element );
\r
2386 cl_status = CL_REJECT;
\r
2390 CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );
\r
2397 * Process an RMPP DATA message. Reassemble the received data. If the
\r
2398 * received MAD is fully reassembled, this call returns CL_SUCCESS.
\r
2400 static cl_status_t
\r
2401 __process_rmpp_data(
\r
2402 IN ib_mad_svc_handle_t h_mad_svc,
\r
2403 IN OUT ib_mad_element_t **pp_mad_element )
\r
2405 ib_mad_element_t *p_rmpp_resp_mad = NULL;
\r
2406 al_mad_rmpp_t *p_rmpp;
\r
2407 ib_rmpp_mad_t *p_rmpp_hdr;
\r
2409 cl_status_t cl_status;
\r
2410 ib_api_status_t status;
\r
2412 p_rmpp_hdr = ib_get_mad_buf( *pp_mad_element );
\r
2413 CL_ASSERT( p_rmpp_hdr->rmpp_type == IB_RMPP_TYPE_DATA );
\r
2415 /* Try to find a receive already being reassembled. */
\r
2416 cl_spinlock_acquire( &h_mad_svc->obj.lock );
\r
2417 p_rmpp = __find_rmpp( h_mad_svc, *pp_mad_element );
\r
2420 /* This receive is not being reassembled. It should be the first seg. */
\r
2421 if( cl_ntoh32( p_rmpp_hdr->seg_num ) != 1 )
\r
2423 cl_spinlock_release( &h_mad_svc->obj.lock );
\r
2424 return CL_NOT_FOUND;
\r
2427 /* Start tracking the new reassembly. */
\r
2428 p_rmpp = __get_mad_rmpp( h_mad_svc, *pp_mad_element );
\r
2431 cl_spinlock_release( &h_mad_svc->obj.lock );
\r
2432 return CL_INSUFFICIENT_MEMORY;
\r
2436 /* Verify that we just received the expected segment. */
\r
2437 cur_seg = cl_ntoh32( p_rmpp_hdr->seg_num );
\r
2438 if( cur_seg == p_rmpp->expected_seg )
\r
2440 /* Copy the new segment's data into our reassembly buffer. */
\r
2441 cl_status = __process_segment( h_mad_svc, p_rmpp,
\r
2442 pp_mad_element, &p_rmpp_resp_mad );
\r
2444 /* See if the RMPP is done. */
\r
2445 if( cl_status == CL_SUCCESS )
\r
2447 /* Stop tracking the reassembly. */
\r
2448 __put_mad_rmpp( h_mad_svc, p_rmpp );
\r
2450 else if( cl_status == CL_NOT_DONE )
\r
2452 /* Start the reassembly timer. */
\r
2453 cl_timer_trim( &h_mad_svc->recv_timer, AL_REASSEMBLY_TIMEOUT );
\r
2456 else if( cur_seg < p_rmpp->expected_seg )
\r
2458 /* We received an old segment. Resend the last ACK. */
\r
2459 p_rmpp_resp_mad = __get_rmpp_ack( p_rmpp );
\r
2460 cl_status = CL_DUPLICATE;
\r
2464 /* The sender is confused, ignore this MAD. We could ABORT here. */
\r
2465 cl_status = CL_OVERRUN;
\r
2468 cl_spinlock_release( &h_mad_svc->obj.lock );
\r
2471 * Send any response MAD (ACK, ABORT, etc.) to the sender. Note that
\r
2472 * we are currently in the callback from the MAD dispatcher. The
\r
2473 * dispatcher holds a reference on the MAD service while in the callback,
\r
2474 * preventing the MAD service from being destroyed. This allows the
\r
2475 * call to ib_send_mad() to proceed even if the user tries to destroy
\r
2476 * the MAD service.
\r
2478 if( p_rmpp_resp_mad )
\r
2480 status = ib_send_mad( h_mad_svc, p_rmpp_resp_mad, NULL );
\r
2481 if( status != IB_SUCCESS )
\r
2483 /* Return the MAD. The MAD is considered dropped. */
\r
2484 ib_put_mad( p_rmpp_resp_mad );
\r
2494 * Locate an existing RMPP MAD being reassembled. Return NULL if one is not
\r
2495 * found. This call assumes access to the recv_list is synchronized.
\r
2497 static al_mad_rmpp_t*
\r
2499 IN ib_mad_svc_handle_t h_mad_svc,
\r
2500 IN OUT ib_mad_element_t *p_mad_element )
\r
2502 al_mad_rmpp_t *p_rmpp;
\r
2503 cl_list_item_t *p_list_item;
\r
2504 ib_mad_t *p_mad_hdr, *p_mad_hdr2;
\r
2505 ib_mad_element_t *p_mad_element2;
\r
2508 p_mad_hdr = ib_get_mad_buf( p_mad_element );
\r
2510 /* Search all MADs being reassembled. */
\r
2511 for( p_list_item = cl_qlist_head( &h_mad_svc->recv_list );
\r
2512 p_list_item != cl_qlist_end( &h_mad_svc->recv_list );
\r
2513 p_list_item = cl_qlist_next( p_list_item ) )
\r
2515 p_rmpp = PARENT_STRUCT( p_list_item, al_mad_rmpp_t, pool_item );
\r
2517 p_mad_element2 = p_rmpp->p_mad_element;
\r
2518 p_mad_hdr2 = ib_get_mad_buf( p_mad_element2 );
\r
2520 /* See if the incoming MAD matches - what a check. */
\r
2521 if( (p_mad_hdr->trans_id == p_mad_hdr2->trans_id) &&
\r
2522 (p_mad_hdr->class_ver == p_mad_hdr2->class_ver) &&
\r
2523 (p_mad_hdr->mgmt_class == p_mad_hdr2->mgmt_class) &&
\r
2524 (p_mad_hdr->method == p_mad_hdr2->method) &&
\r
2525 (p_mad_element->remote_lid == p_mad_element2->remote_lid) &&
\r
2526 (p_mad_element->remote_qp == p_mad_element2->remote_qp) )
\r
2538 * Acquire a new RMPP tracking structure. This call assumes access to
\r
2539 * the recv_list is synchronized.
\r
2541 static al_mad_rmpp_t*
\r
2543 IN ib_mad_svc_handle_t h_mad_svc,
\r
2544 IN ib_mad_element_t *p_mad_element )
\r
2546 al_mad_rmpp_t *p_rmpp;
\r
2547 al_mad_element_t *p_al_element;
\r
2549 p_al_element = PARENT_STRUCT( p_mad_element, al_mad_element_t, element );
\r
2551 /* Get an RMPP tracking structure. */
\r
2552 p_rmpp = get_mad_rmpp( p_al_element );
\r
2556 /* Initialize the tracking information. */
\r
2557 p_rmpp->expected_seg = 1;
\r
2558 p_rmpp->seg_limit = 1;
\r
2559 p_rmpp->inactive = FALSE;
\r
2560 p_rmpp->p_mad_element = p_mad_element;
\r
2562 /* Insert the tracking structure into the reassembly list. */
\r
2563 cl_qlist_insert_tail( &h_mad_svc->recv_list,
\r
2564 (cl_list_item_t*)&p_rmpp->pool_item );
\r
2572 * Return the RMPP tracking structure. This call assumes access to
\r
2573 * the recv_list is synchronized.
\r
2577 IN ib_mad_svc_handle_t h_mad_svc,
\r
2578 IN al_mad_rmpp_t *p_rmpp )
\r
2580 /* Remove the tracking structure from the reassembly list. */
\r
2581 cl_qlist_remove_item( &h_mad_svc->recv_list,
\r
2582 (cl_list_item_t*)&p_rmpp->pool_item );
\r
2584 /* Return the RMPP tracking structure. */
\r
2585 put_mad_rmpp( p_rmpp );
\r
2591 * Process a received RMPP segment. Copy the data into our receive buffer,
\r
2592 * update the expected segment, and send an ACK if needed.
\r
2594 static cl_status_t
\r
2595 __process_segment(
\r
2596 IN ib_mad_svc_handle_t h_mad_svc,
\r
2597 IN al_mad_rmpp_t *p_rmpp,
\r
2598 IN OUT ib_mad_element_t **pp_mad_element,
\r
2599 OUT ib_mad_element_t **pp_rmpp_resp_mad )
\r
2601 ib_rmpp_mad_t *p_rmpp_hdr;
\r
2603 ib_api_status_t status;
\r
2604 cl_status_t cl_status;
\r
2605 uint8_t *p_dst_seg, *p_src_seg;
\r
2608 CL_ASSERT( h_mad_svc && p_rmpp && pp_mad_element && *pp_mad_element );
\r
2610 p_rmpp_hdr = (ib_rmpp_mad_t*)(*pp_mad_element)->p_mad_buf;
\r
2611 cur_seg = cl_ntoh32( p_rmpp_hdr->seg_num );
\r
2612 CL_ASSERT( cur_seg == p_rmpp->expected_seg );
\r
2613 CL_ASSERT( cur_seg <= p_rmpp->seg_limit );
\r
2615 /* See if the receive has been fully reassembled. */
\r
2616 if( ib_rmpp_is_flag_set( p_rmpp_hdr, IB_RMPP_FLAG_LAST ) )
\r
2617 cl_status = CL_SUCCESS;
\r
2619 cl_status = CL_NOT_DONE;
\r
2621 /* Save the payload length for later use. */
\r
2622 paylen = cl_ntoh32(p_rmpp_hdr->paylen_newwin);
\r
2624 /* The element of the first segment starts the reasembly. */
\r
2625 if( *pp_mad_element != p_rmpp->p_mad_element )
\r
2627 /* SA MADs require extra header size ... */
\r
2628 if( (*pp_mad_element)->p_mad_buf->mgmt_class == IB_MCLASS_SUBN_ADM )
\r
2630 /* Copy the received data into our reassembly buffer. */
\r
2631 p_src_seg = ((uint8_t* __ptr64)(*pp_mad_element)->p_mad_buf) +
\r
2632 IB_SA_MAD_HDR_SIZE;
\r
2633 p_dst_seg = ((uint8_t* __ptr64)p_rmpp->p_mad_element->p_mad_buf) +
\r
2634 IB_SA_MAD_HDR_SIZE + IB_SA_DATA_SIZE * (cur_seg - 1);
\r
2635 cl_memcpy( p_dst_seg, p_src_seg, IB_SA_DATA_SIZE );
\r
2639 /* Copy the received data into our reassembly buffer. */
\r
2640 p_src_seg = ((uint8_t* __ptr64)(*pp_mad_element)->p_mad_buf) +
\r
2641 MAD_RMPP_HDR_SIZE;
\r
2642 p_dst_seg = ((uint8_t* __ptr64)p_rmpp->p_mad_element->p_mad_buf) +
\r
2643 MAD_RMPP_HDR_SIZE + MAD_RMPP_DATA_SIZE * (cur_seg - 1);
\r
2644 cl_memcpy( p_dst_seg, p_src_seg, MAD_RMPP_DATA_SIZE );
\r
2646 /* This MAD is no longer needed. */
\r
2647 ib_put_mad( *pp_mad_element );
\r
2650 /* Update the size of the mad if the last segment */
\r
2651 if ( cl_status == CL_SUCCESS )
\r
2653 if (p_rmpp->p_mad_element->p_mad_buf->mgmt_class == IB_MCLASS_SUBN_ADM )
\r
2656 * Note we will get one extra SA Hdr size in the paylen,
\r
2657 * so we only take the rmpp header size of the first segment.
\r
2659 p_rmpp->p_mad_element->size =
\r
2660 MAD_RMPP_HDR_SIZE + IB_SA_DATA_SIZE *(cur_seg - 1) + paylen;
\r
2664 p_rmpp->p_mad_element->size =
\r
2665 MAD_RMPP_HDR_SIZE + MAD_RMPP_DATA_SIZE * (cur_seg - 1) + paylen;
\r
2670 * We are ready to accept the next segment. We increment expected segment
\r
2671 * even if we're done, so that ACKs correctly report the last segment.
\r
2673 p_rmpp->expected_seg++;
\r
2675 /* Mark the RMPP as active if we're not destroying the MAD service. */
\r
2676 p_rmpp->inactive = (h_mad_svc->obj.state == CL_DESTROYING);
\r
2678 /* See if the receive has been fully reassembled. */
\r
2679 if( cl_status == CL_NOT_DONE && cur_seg == p_rmpp->seg_limit )
\r
2681 /* Allocate more segments for the incoming receive. */
\r
2682 status = al_resize_mad( p_rmpp->p_mad_element,
\r
2683 p_rmpp->p_mad_element->size + AL_RMPP_WINDOW * MAD_RMPP_DATA_SIZE );
\r
2685 /* If we couldn't allocate a new buffer, just drop the MAD. */
\r
2686 if( status == IB_SUCCESS )
\r
2688 /* Send an ACK indicating that more space is available. */
\r
2689 p_rmpp->seg_limit += AL_RMPP_WINDOW;
\r
2690 *pp_rmpp_resp_mad = __get_rmpp_ack( p_rmpp );
\r
2693 else if( cl_status == CL_SUCCESS )
\r
2695 /* Return the element referencing the reassembled MAD. */
\r
2696 *pp_mad_element = p_rmpp->p_mad_element;
\r
2697 *pp_rmpp_resp_mad = __get_rmpp_ack( p_rmpp );
\r
2706 * Get an ACK message to return to the sender of an RMPP MAD.
\r
2708 static ib_mad_element_t*
\r
2710 IN al_mad_rmpp_t *p_rmpp )
\r
2712 ib_mad_element_t *p_mad_element;
\r
2713 al_mad_element_t *p_al_element;
\r
2714 ib_api_status_t status;
\r
2715 ib_rmpp_mad_t *p_ack_rmpp_hdr, *p_data_rmpp_hdr;
\r
2717 /* Get a MAD to carry the ACK. */
\r
2718 p_al_element = PARENT_STRUCT( p_rmpp->p_mad_element,
\r
2719 al_mad_element_t, element );
\r
2720 status = ib_get_mad( p_al_element->pool_key, MAD_BLOCK_SIZE,
\r
2722 if( status != IB_SUCCESS )
\r
2724 /* Just return. The ACK will be treated as being dropped. */
\r
2728 /* Format the ACK. */
\r
2729 p_ack_rmpp_hdr = ib_get_mad_buf( p_mad_element );
\r
2730 p_data_rmpp_hdr = ib_get_mad_buf( p_rmpp->p_mad_element );
\r
2732 __init_reply_element( p_mad_element, p_rmpp->p_mad_element );
\r
2734 /* Copy the MAD common header. */
\r
2735 cl_memcpy( &p_ack_rmpp_hdr->common_hdr, &p_data_rmpp_hdr->common_hdr,
\r
2736 sizeof( ib_mad_t ) );
\r
2738 /* Flip the response bit in the method */
\r
2739 p_ack_rmpp_hdr->common_hdr.method ^= IB_MAD_METHOD_RESP_MASK;
\r
2741 p_ack_rmpp_hdr->rmpp_version = p_data_rmpp_hdr->rmpp_version;
\r
2742 p_ack_rmpp_hdr->rmpp_type = IB_RMPP_TYPE_ACK;
\r
2743 ib_rmpp_set_resp_time( p_ack_rmpp_hdr, IB_RMPP_NO_RESP_TIME );
\r
2744 p_ack_rmpp_hdr->rmpp_flags |= IB_RMPP_FLAG_ACTIVE;
\r
2745 p_ack_rmpp_hdr->rmpp_status = IB_RMPP_STATUS_SUCCESS;
\r
2747 p_ack_rmpp_hdr->seg_num = cl_hton32( p_rmpp->expected_seg - 1 );
\r
2749 if (p_rmpp->seg_limit == p_rmpp->expected_seg - 1 )
\r
2750 p_ack_rmpp_hdr->paylen_newwin = cl_hton32( 1 + p_rmpp->seg_limit);
\r
2752 p_ack_rmpp_hdr->paylen_newwin = cl_hton32( p_rmpp->seg_limit );
\r
2754 return p_mad_element;
\r
2760 * Copy necessary data between MAD elements to allow the destination
\r
2761 * element to be sent to the sender of the source element.
\r
2764 __init_reply_element(
\r
2765 IN ib_mad_element_t *p_dst_element,
\r
2766 IN ib_mad_element_t *p_src_element )
\r
2768 p_dst_element->remote_qp = p_src_element->remote_qp;
\r
2769 p_dst_element->remote_qkey = p_src_element->remote_qkey;
\r
2771 if( p_src_element->grh_valid )
\r
2773 p_dst_element->grh_valid = p_src_element->grh_valid;
\r
2774 cl_memcpy( p_dst_element->p_grh, p_src_element->p_grh,
\r
2775 sizeof( ib_grh_t ) );
\r
2778 p_dst_element->remote_lid = p_src_element->remote_lid;
\r
2779 p_dst_element->remote_sl = p_src_element->remote_sl;
\r
2780 p_dst_element->pkey_index = p_src_element->pkey_index;
\r
2781 p_dst_element->path_bits = p_src_element->path_bits;
\r
2787 * Process an RMPP ACK message. Continue sending addition segments.
\r
2790 __process_rmpp_ack(
\r
2791 IN ib_mad_svc_handle_t h_mad_svc,
\r
2792 IN ib_mad_element_t *p_mad_element )
\r
2794 ib_mad_send_handle_t h_send;
\r
2795 ib_rmpp_mad_t *p_rmpp_mad;
\r
2796 boolean_t send_done = FALSE;
\r
2797 ib_wc_status_t wc_status = IB_WCS_SUCCESS;
\r
2799 CL_ENTER( AL_DBG_MAD_SVC, g_al_dbg_lvl );
\r
2800 p_rmpp_mad = (ib_rmpp_mad_t*)ib_get_mad_buf( p_mad_element );
\r
2803 * Search for the send. The send may have timed out, been canceled,
\r
2804 * or received a response.
\r
2806 cl_spinlock_acquire( &h_mad_svc->obj.lock );
\r
2807 h_send = __mad_svc_match_recv( h_mad_svc, p_mad_element );
\r
2810 cl_spinlock_release( &h_mad_svc->obj.lock );
\r
2811 CL_TRACE_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl,
\r
2812 ("ACK cannot find a matching send\n") );
\r
2816 /* Drop old ACKs. */
\r
2817 if( cl_ntoh32( p_rmpp_mad->seg_num ) < h_send->ack_seg )
\r
2819 cl_spinlock_release( &h_mad_svc->obj.lock );
\r
2820 CL_TRACE_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl,
\r
2821 ("old ACK - being dropped\n") );
\r
2825 /* Update the acknowledged segment and segment limit. */
\r
2826 h_send->ack_seg = cl_ntoh32( p_rmpp_mad->seg_num );
\r
2828 /* Keep seg_limit <= total_seg to simplify checks. */
\r
2829 if( cl_ntoh32( p_rmpp_mad->paylen_newwin ) > h_send->total_seg )
\r
2830 h_send->seg_limit = h_send->total_seg;
\r
2832 h_send->seg_limit = cl_ntoh32( p_rmpp_mad->paylen_newwin );
\r
2834 /* Reset the current segment to start resending from the ACK. */
\r
2835 h_send->cur_seg = h_send->ack_seg + 1;
\r
2837 /* If the send is active, we will finish processing it once it completes. */
\r
2838 if( h_send->retry_time == MAX_TIME )
\r
2840 cl_spinlock_release( &h_mad_svc->obj.lock );
\r
2841 CL_TRACE_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl,
\r
2842 ("ACK processed, waiting for send to complete\n") );
\r
2847 * Complete the send if all segments have been ack'ed and no
\r
2848 * response is expected. (If the response for a send had already been
\r
2849 * received, we would have reported the completion regardless of the
\r
2850 * send having been ack'ed.)
\r
2852 CL_ASSERT( !h_send->p_send_mad->resp_expected || !h_send->p_resp_mad );
\r
2853 if( (h_send->ack_seg == h_send->total_seg) &&
\r
2854 !h_send->p_send_mad->resp_expected )
\r
2856 /* The send is done. All segments have been ack'ed. */
\r
2859 else if( h_send->ack_seg < h_send->seg_limit )
\r
2861 /* Send the next segment. */
\r
2862 __queue_rmpp_seg( h_mad_svc->h_mad_reg, h_send );
\r
2867 /* Notify the user of a send completion or error. */
\r
2868 cl_qlist_remove_item( &h_mad_svc->send_list,
\r
2869 (cl_list_item_t*)&h_send->pool_item );
\r
2870 cl_spinlock_release( &h_mad_svc->obj.lock );
\r
2871 __notify_send_comp( h_mad_svc, h_send, wc_status );
\r
2875 /* Continue waiting for a response or a larger send window. */
\r
2876 cl_spinlock_release( &h_mad_svc->obj.lock );
\r
2880 * Resume any sends that can now be sent without holding
\r
2881 * the mad service lock.
\r
2883 __mad_disp_resume_send( h_mad_svc->h_mad_reg );
\r
2885 CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );
\r
2891 * Process an RMPP STOP or ABORT message.
\r
2894 __process_rmpp_nack(
\r
2895 IN ib_mad_svc_handle_t h_mad_svc,
\r
2896 IN ib_mad_element_t *p_mad_element )
\r
2898 ib_mad_send_handle_t h_send;
\r
2899 ib_rmpp_mad_t *p_rmpp_mad;
\r
2901 CL_ENTER( AL_DBG_MAD_SVC, g_al_dbg_lvl );
\r
2902 p_rmpp_mad = (ib_rmpp_mad_t*)ib_get_mad_buf( p_mad_element );
\r
2904 /* Search for the send. The send may have timed out or been canceled. */
\r
2905 cl_spinlock_acquire( &h_mad_svc->obj.lock );
\r
2906 h_send = __mad_svc_match_recv( h_mad_svc, p_mad_element );
\r
2909 cl_spinlock_release( &h_mad_svc->obj.lock );
\r
2913 /* If the send is active, we will finish processing it once it completes. */
\r
2914 if( h_send->retry_time == MAX_TIME )
\r
2916 h_send->canceled = TRUE;
\r
2917 cl_spinlock_release( &h_mad_svc->obj.lock );
\r
2918 CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );
\r
2922 /* Fail the send operation. */
\r
2923 cl_qlist_remove_item( &h_mad_svc->send_list,
\r
2924 (cl_list_item_t*)&h_send->pool_item );
\r
2925 cl_spinlock_release( &h_mad_svc->obj.lock );
\r
2926 __notify_send_comp( h_mad_svc, h_send, IB_WCS_CANCELED );
\r
2928 CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );
\r
2933 static __inline void
\r
2935 IN ib_mad_send_handle_t h_send )
\r
2937 h_send->retry_time =
\r
2938 (uint64_t)(h_send->p_send_mad->timeout_ms + h_send->delay) * 1000ULL +
\r
2939 cl_get_time_stamp();
\r
2940 h_send->delay = 0;
\r
2947 IN void *context )
\r
2949 CL_ENTER( AL_DBG_MAD_SVC, g_al_dbg_lvl );
\r
2951 __check_send_queue( (ib_mad_svc_handle_t)context );
\r
2953 CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );
\r
2959 * Check the send queue for any sends that have timed out or were canceled
\r
2963 __check_send_queue(
\r
2964 IN ib_mad_svc_handle_t h_mad_svc )
\r
2966 ib_mad_send_handle_t h_send;
\r
2967 cl_list_item_t *p_list_item, *p_next_item;
\r
2968 uint64_t cur_time;
\r
2969 cl_qlist_t timeout_list;
\r
2971 CL_ENTER( AL_DBG_MAD_SVC, g_al_dbg_lvl );
\r
2974 * The timeout out list is used to call the user back without
\r
2975 * holding the lock on the MAD service.
\r
2977 cl_qlist_init( &timeout_list );
\r
2978 cur_time = cl_get_time_stamp();
\r
2980 cl_spinlock_acquire( &h_mad_svc->obj.lock );
\r
2982 /* Check all outstanding sends. */
\r
2983 for( p_list_item = cl_qlist_head( &h_mad_svc->send_list );
\r
2984 p_list_item != cl_qlist_end( &h_mad_svc->send_list );
\r
2985 p_list_item = p_next_item )
\r
2987 p_next_item = cl_qlist_next( p_list_item );
\r
2988 h_send = PARENT_STRUCT( p_list_item, al_mad_send_t, pool_item );
\r
2990 /* See if the request is active. */
\r
2991 if( h_send->retry_time == MAX_TIME )
\r
2993 /* The request is still active. */
\r
2994 CL_TRACE( AL_DBG_MAD_SVC, g_al_dbg_lvl, ("active TID:0x%"PRIx64"\n",
\r
2995 __get_send_tid( h_send )) );
\r
2999 /* The request is not active. */
\r
3000 /* See if the request has been canceled. */
\r
3001 if( h_send->canceled )
\r
3003 /* The request has been canceled. */
\r
3004 CL_TRACE( AL_DBG_MAD_SVC, g_al_dbg_lvl, ("canceling TID:0x%"PRIx64"\n",
\r
3005 __get_send_tid( h_send )) );
\r
3007 h_send->p_send_mad->status = IB_WCS_CANCELED;
\r
3008 cl_qlist_remove_item( &h_mad_svc->send_list, p_list_item );
\r
3009 cl_qlist_insert_tail( &timeout_list, p_list_item );
\r
3013 /* Skip requests that have not timed out. */
\r
3014 if( cur_time < h_send->retry_time )
\r
3016 /* The request has not timed out. */
\r
3017 CL_TRACE( AL_DBG_MAD_SVC, g_al_dbg_lvl, ("waiting on TID:0x%"PRIx64"\n",
\r
3018 __get_send_tid( h_send )) );
\r
3020 /* Set the retry timer to the minimum needed time, in ms. */
\r
3021 cl_timer_trim( &h_mad_svc->send_timer,
\r
3022 ((uint32_t)(h_send->retry_time - cur_time) / 1000) );
\r
3026 /* See if we need to retry the send operation. */
\r
3027 if( h_send->retry_cnt )
\r
3029 CL_TRACE( AL_DBG_MAD_SVC, g_al_dbg_lvl, ("retrying TID:0x%"PRIx64"\n",
\r
3030 __get_send_tid( h_send )) );
\r
3032 /* Retry the send. */
\r
3033 h_send->retry_time = MAX_TIME;
\r
3034 h_send->retry_cnt--;
\r
3036 if( h_send->uses_rmpp )
\r
3038 if( h_send->ack_seg < h_send->seg_limit )
\r
3040 /* Resend all unacknowledged segments. */
\r
3041 h_send->cur_seg = h_send->ack_seg + 1;
\r
3042 __queue_rmpp_seg( h_mad_svc->h_mad_reg, h_send );
\r
3046 /* The send was delivered. Continue waiting. */
\r
3047 __set_retry_time( h_send );
\r
3048 cl_timer_trim( &h_mad_svc->send_timer,
\r
3049 ((uint32_t)(h_send->retry_time - cur_time) / 1000) );
\r
3054 /* The work request should already be formatted properly. */
\r
3055 __mad_disp_queue_send( h_mad_svc->h_mad_reg,
\r
3056 &h_send->mad_wr );
\r
3060 /* The request has timed out or failed to be retried. */
\r
3061 AL_TRACE( AL_DBG_MAD_SVC | AL_DBG_WARN,
\r
3062 ("timing out TID:0x%"PRIx64"\n", __get_send_tid( h_send )) );
\r
3064 h_send->p_send_mad->status = IB_WCS_TIMEOUT_RETRY_ERR;
\r
3065 cl_qlist_remove_item( &h_mad_svc->send_list, p_list_item );
\r
3066 cl_qlist_insert_tail( &timeout_list, p_list_item );
\r
3069 cl_spinlock_release( &h_mad_svc->obj.lock );
\r
3072 * Resume any sends that can now be sent without holding
\r
3073 * the mad service lock.
\r
3075 __mad_disp_resume_send( h_mad_svc->h_mad_reg );
\r
3077 /* Report all timed out sends to the user. */
\r
3078 p_list_item = cl_qlist_remove_head( &timeout_list );
\r
3079 while( p_list_item != cl_qlist_end( &timeout_list ) )
\r
3081 h_send = PARENT_STRUCT( p_list_item, al_mad_send_t, pool_item );
\r
3083 h_mad_svc->pfn_user_send_cb( h_mad_svc, (void*)h_mad_svc->obj.context,
\r
3084 h_send->p_send_mad );
\r
3085 __cleanup_mad_send( h_mad_svc, h_send );
\r
3086 p_list_item = cl_qlist_remove_head( &timeout_list );
\r
3088 CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );
\r
3095 IN void *context )
\r
3097 ib_mad_svc_handle_t h_mad_svc;
\r
3098 al_mad_rmpp_t *p_rmpp;
\r
3099 cl_list_item_t *p_list_item, *p_next_item;
\r
3100 boolean_t restart_timer;
\r
3102 CL_ENTER( AL_DBG_MAD_SVC, g_al_dbg_lvl );
\r
3104 h_mad_svc = (ib_mad_svc_handle_t)context;
\r
3106 cl_spinlock_acquire( &h_mad_svc->obj.lock );
\r
3108 /* Check all outstanding receives. */
\r
3109 for( p_list_item = cl_qlist_head( &h_mad_svc->recv_list );
\r
3110 p_list_item != cl_qlist_end( &h_mad_svc->recv_list );
\r
3111 p_list_item = p_next_item )
\r
3113 p_next_item = cl_qlist_next( p_list_item );
\r
3114 p_rmpp = PARENT_STRUCT( p_list_item, al_mad_rmpp_t, pool_item );
\r
3116 /* Fail all RMPP MADs that have remained inactive. */
\r
3117 if( p_rmpp->inactive )
\r
3119 ib_put_mad( p_rmpp->p_mad_element );
\r
3120 __put_mad_rmpp( h_mad_svc, p_rmpp );
\r
3124 /* Mark the RMPP as inactive. */
\r
3125 p_rmpp->inactive = TRUE;
\r
3129 restart_timer = !cl_is_qlist_empty( &h_mad_svc->recv_list );
\r
3130 cl_spinlock_release( &h_mad_svc->obj.lock );
\r
3132 if( restart_timer )
\r
3133 cl_timer_start( &h_mad_svc->recv_timer, AL_REASSEMBLY_TIMEOUT );
\r
3134 CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );
\r
3141 IN const ib_ca_handle_t h_ca,
\r
3142 IN const uint8_t port_num,
\r
3143 IN const void* const p_mad_in,
\r
3144 IN void* p_mad_out )
\r
3146 ib_api_status_t status;
\r
3148 CL_ENTER( AL_DBG_MAD_SVC, g_al_dbg_lvl );
\r
3150 if( AL_OBJ_INVALID_HANDLE( h_ca, AL_OBJ_TYPE_H_CA ) )
\r
3152 CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl, ("IB_INVALID_CA_HANDLE\n") );
\r
3153 return IB_INVALID_CA_HANDLE;
\r
3155 if( !p_mad_in || !p_mad_out )
\r
3157 CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl, ("IB_INVALID_PARAMETER\n") );
\r
3158 return IB_INVALID_PARAMETER;
\r
3161 status = verbs_local_mad( h_ca, port_num, p_mad_in, p_mad_out );
\r
3163 CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );
\r
3171 IN const ib_net64_t tid64 )
\r
3175 al_tid.tid64 = tid64;
\r
3176 return( al_tid.tid32.user_tid );
\r
3181 IN const ib_net64_t tid64 )
\r
3185 al_tid.tid64 = tid64;
\r
3186 return( cl_ntoh32( al_tid.tid32.al_tid ) );
\r
3191 IN ib_net64_t* const p_tid64,
\r
3192 IN const uint32_t tid32 )
\r
3194 al_tid_t *p_al_tid;
\r
3196 p_al_tid = (al_tid_t*)p_tid64;
\r
3200 CL_ASSERT( !p_al_tid->tid32.al_tid );
\r
3203 p_al_tid->tid32.al_tid = cl_hton32( tid32 );
\r