2 * Copyright (c) 2005 SilverStorm Technologies. All rights reserved.
\r
3 * Copyright (c) 1996-2003 Intel Corporation. All rights reserved.
\r
5 * This software is available to you under the OpenIB.org BSD license
\r
8 * Redistribution and use in source and binary forms, with or
\r
9 * without modification, are permitted provided that the following
\r
10 * conditions are met:
\r
12 * - Redistributions of source code must retain the above
\r
13 * copyright notice, this list of conditions and the following
\r
16 * - Redistributions in binary form must reproduce the above
\r
17 * copyright notice, this list of conditions and the following
\r
18 * disclaimer in the documentation and/or other materials
\r
19 * provided with the distribution.
\r
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
\r
22 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
\r
23 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
\r
24 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
\r
25 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
\r
26 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
\r
27 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
\r
33 #include <iba/ib_al.h>
\r
34 #include <complib/cl_byteswap.h>
\r
35 #include <complib/cl_timer.h>
\r
38 #include "al_debug.h"
\r
40 #if defined(EVENT_TRACING)
\r
44 #include "al_mad.tmh"
\r
50 #include "al_res_mgr.h"
\r
51 #include "al_verbs.h"
\r
53 #include "ib_common.h"
\r
56 #define MAX_TIME CL_CONST64(0xFFFFFFFFFFFFFFFF)
\r
57 #define MAD_VECTOR_SIZE 8
\r
58 #define MAX_METHOD 127
\r
59 #define DEFAULT_RMPP_VERSION 1
\r
61 #define AL_RMPP_WINDOW 16 /* Max size of RMPP window */
\r
62 #define AL_REASSEMBLY_TIMEOUT 5000 /* 5 seconds */
\r
66 IN al_obj_t *p_obj );
\r
70 IN al_obj_t *p_obj );
\r
74 IN void* const p_element,
\r
78 __init_version_entry(
\r
79 IN void* const p_element,
\r
83 __destroy_version_entry(
\r
84 IN void* const p_element,
\r
89 IN void* const p_element,
\r
93 __destroy_class_entry(
\r
94 IN void* const p_element,
\r
97 static __inline uint8_t
\r
99 IN const uint8_t mgmt_class );
\r
101 static __inline uint8_t
\r
102 __mgmt_version_index(
\r
103 IN const uint8_t mgmt_version );
\r
106 __mad_disp_reg_unsol(
\r
107 IN const al_mad_disp_handle_t h_mad_disp,
\r
108 IN const al_mad_reg_handle_t h_mad_reg,
\r
109 IN const ib_mad_svc_t *p_mad_svc );
\r
113 IN const ib_mad_t* const p_mad_hdr,
\r
114 IN const boolean_t are_we_sender );
\r
117 * Issue a send request to the MAD dispatcher.
\r
120 __mad_disp_queue_send(
\r
121 IN const al_mad_reg_handle_t h_mad_reg,
\r
122 IN al_mad_wr_t* const p_mad_wr );
\r
125 __mad_disp_resume_send(
\r
126 IN const al_mad_reg_handle_t h_mad_reg );
\r
129 __destroying_mad_svc(
\r
130 IN struct _al_obj *p_obj );
\r
134 IN struct _al_obj *p_obj );
\r
138 IN void *context );
\r
141 __check_send_queue(
\r
142 IN ib_mad_svc_handle_t h_mad_svc );
\r
146 IN void *context );
\r
148 static ib_api_status_t
\r
150 IN ib_mad_svc_handle_t h_mad_svc,
\r
151 IN const ib_mad_send_handle_t h_send,
\r
152 IN ib_mad_element_t* const p_mad_element );
\r
155 __does_send_req_rmpp(
\r
156 IN const ib_mad_svc_type_t mad_svc_type,
\r
157 IN const ib_mad_element_t* const p_mad_element,
\r
158 OUT uint8_t *p_rmpp_version );
\r
162 IN const al_mad_reg_handle_t h_mad_reg,
\r
163 IN const ib_mad_send_handle_t h_send );
\r
167 IN const al_mad_reg_handle_t h_mad_reg,
\r
168 IN ib_mad_send_handle_t h_send );
\r
170 static ib_api_status_t
\r
172 IN ib_mad_svc_handle_t h_mad_svc,
\r
173 IN ib_mad_send_handle_t h_send );
\r
176 __cleanup_mad_send(
\r
177 IN ib_mad_svc_handle_t h_mad_svc,
\r
178 IN ib_mad_send_handle_t h_send );
\r
180 static __inline void
\r
182 IN ib_mad_send_handle_t h_send );
\r
185 __mad_svc_send_done(
\r
186 IN ib_mad_svc_handle_t h_mad_svc,
\r
187 IN al_mad_wr_t *p_mad_wr,
\r
188 IN ib_wc_t *p_wc );
\r
191 __is_send_mad_done(
\r
192 IN ib_mad_send_handle_t h_send,
\r
193 IN ib_wc_t *p_wc );
\r
196 __notify_send_comp(
\r
197 IN ib_mad_svc_handle_t h_mad_svc,
\r
198 IN ib_mad_send_handle_t h_send,
\r
199 IN ib_wc_status_t wc_status );
\r
202 __mad_svc_recv_done(
\r
203 IN ib_mad_svc_handle_t h_mad_svc,
\r
204 IN ib_mad_element_t *p_mad_element );
\r
207 __process_recv_resp(
\r
208 IN ib_mad_svc_handle_t h_mad_svc,
\r
209 IN ib_mad_element_t *p_mad_element );
\r
213 IN ib_mad_svc_handle_t h_mad_svc,
\r
214 IN OUT ib_mad_element_t **pp_mad_element );
\r
216 static __inline boolean_t
\r
217 __recv_requires_rmpp(
\r
218 IN const ib_mad_svc_type_t mad_svc_type,
\r
219 IN const ib_mad_element_t* const p_mad_element );
\r
221 static __inline boolean_t
\r
222 __is_internal_send(
\r
223 IN const ib_mad_svc_type_t mad_svc_type,
\r
224 IN const ib_mad_element_t* const p_mad_element );
\r
227 __process_rmpp_data(
\r
228 IN ib_mad_svc_handle_t h_mad_svc,
\r
229 IN OUT ib_mad_element_t **pp_mad_element );
\r
232 __process_rmpp_ack(
\r
233 IN ib_mad_svc_handle_t h_mad_svc,
\r
234 IN ib_mad_element_t *p_mad_element );
\r
237 __process_rmpp_nack(
\r
238 IN ib_mad_svc_handle_t h_mad_svc,
\r
239 IN ib_mad_element_t *p_mad_element );
\r
243 IN ib_mad_svc_handle_t h_mad_svc,
\r
244 IN al_mad_rmpp_t *p_rmpp,
\r
245 IN OUT ib_mad_element_t **pp_mad_element,
\r
246 OUT ib_mad_element_t **pp_rmpp_resp_mad );
\r
248 static al_mad_rmpp_t*
\r
250 IN ib_mad_svc_handle_t h_mad_svc,
\r
251 IN OUT ib_mad_element_t *p_mad_element );
\r
253 static al_mad_rmpp_t*
\r
255 IN ib_mad_svc_handle_t h_mad_svc,
\r
256 IN ib_mad_element_t *p_mad_element );
\r
260 IN ib_mad_svc_handle_t h_mad_svc,
\r
261 IN al_mad_rmpp_t *p_rmpp );
\r
264 __init_reply_element(
\r
265 IN ib_mad_element_t *p_dst_element,
\r
266 IN ib_mad_element_t *p_src_element );
\r
268 static ib_mad_element_t*
\r
270 IN al_mad_rmpp_t *p_rmpp );
\r
274 IN ib_mad_send_handle_t h_send )
\r
276 return ((ib_mad_t*)ib_get_mad_buf( h_send->p_send_mad ))->trans_id;
\r
281 get_mad_hdr_from_wr(
\r
282 IN al_mad_wr_t* const p_mad_wr )
\r
284 ib_mad_send_handle_t h_send;
\r
286 CL_ASSERT( p_mad_wr );
\r
288 h_send = PARENT_STRUCT( p_mad_wr, al_mad_send_t, mad_wr );
\r
289 return h_send->p_send_mad->p_mad_buf;
\r
295 * Construct a MAD element from a receive work completion.
\r
299 IN ib_mad_element_t* p_mad_element,
\r
302 AL_ENTER( AL_DBG_SMI );
\r
304 CL_ASSERT( p_mad_element );
\r
307 /* Build the MAD element from the work completion. */
\r
308 p_mad_element->status = p_wc->status;
\r
309 p_mad_element->remote_qp = p_wc->recv.ud.remote_qp;
\r
312 * We assume all communicating managers using MAD services use
\r
317 * Mellanox workaround:
\r
318 * The Q_KEY from the QP context must be used if the high bit is
\r
319 * set in the Q_KEY part of the work request. See section 10.2.5
\r
320 * on Q_KEYS Compliance Statement C10-15.
\r
321 * This must be enabled to permit future non special QP's to have
\r
322 * MAD level service capability. To use SAR in a generic way.
\r
326 * p_mad_element->remote_qkey = IB_QP_PRIVILEGED_Q_KEY;
\r
329 p_mad_element->remote_qkey = IB_QP1_WELL_KNOWN_Q_KEY;
\r
330 p_mad_element->remote_lid = p_wc->recv.ud.remote_lid;
\r
331 p_mad_element->remote_sl = p_wc->recv.ud.remote_sl;
\r
332 p_mad_element->pkey_index = p_wc->recv.ud.pkey_index;
\r
333 p_mad_element->path_bits = p_wc->recv.ud.path_bits;
\r
334 p_mad_element->recv_opt = p_wc->recv.ud.recv_opt;
\r
336 p_mad_element->grh_valid = p_wc->recv.ud.recv_opt & IB_RECV_OPT_GRH_VALID;
\r
338 if( p_wc->recv.ud.recv_opt & IB_RECV_OPT_IMMEDIATE )
\r
339 p_mad_element->immediate_data = p_wc->recv.ud.immediate_data;
\r
341 AL_EXIT( AL_DBG_SMI );
\r
355 IN al_obj_t* const p_parent_obj,
\r
356 IN const ib_qp_handle_t h_qp,
\r
357 IN al_mad_disp_handle_t* const ph_mad_disp )
\r
359 al_mad_disp_handle_t h_mad_disp;
\r
360 ib_api_status_t status;
\r
361 cl_status_t cl_status;
\r
363 AL_ENTER( AL_DBG_MAD_SVC );
\r
364 h_mad_disp = cl_zalloc( sizeof( al_mad_disp_t ) );
\r
367 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("insufficient memory\n") );
\r
368 return IB_INSUFFICIENT_MEMORY;
\r
371 /* Initialize the MAD dispatcher. */
\r
372 cl_vector_construct( &h_mad_disp->client_vector );
\r
373 cl_vector_construct( &h_mad_disp->version_vector );
\r
374 construct_al_obj( &h_mad_disp->obj, AL_OBJ_TYPE_MAD_DISP );
\r
375 status = init_al_obj( &h_mad_disp->obj, h_mad_disp, TRUE,
\r
376 NULL, __cleanup_mad_disp, __free_mad_disp );
\r
377 if( status != IB_SUCCESS )
\r
379 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("init obj: %s\n",
\r
380 ib_get_err_str(status)) );
\r
381 __free_mad_disp( &h_mad_disp->obj );
\r
384 status = attach_al_obj( p_parent_obj, &h_mad_disp->obj );
\r
385 if( status != IB_SUCCESS )
\r
387 h_mad_disp->obj.pfn_destroy( &h_mad_disp->obj, NULL );
\r
388 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
389 ("attach_al_obj returned %s.\n", ib_get_err_str(status)) );
\r
393 /* Obtain a reference to the QP to post sends to. */
\r
394 h_mad_disp->h_qp = h_qp;
\r
395 ref_al_obj( &h_qp->obj );
\r
397 /* Create the client vector. */
\r
398 cl_status = cl_vector_init( &h_mad_disp->client_vector, 1, MAD_VECTOR_SIZE,
\r
399 sizeof( al_mad_disp_reg_t ), __init_mad_reg, NULL, h_mad_disp );
\r
400 if( cl_status != CL_SUCCESS )
\r
402 h_mad_disp->obj.pfn_destroy( &h_mad_disp->obj, NULL );
\r
403 return ib_convert_cl_status( cl_status );
\r
406 /* Create the version vector. */
\r
407 cl_status = cl_vector_init( &h_mad_disp->version_vector,
\r
408 1, 1, sizeof( cl_vector_t ), __init_version_entry,
\r
409 __destroy_version_entry, &h_mad_disp->version_vector );
\r
410 if( cl_status != CL_SUCCESS )
\r
412 h_mad_disp->obj.pfn_destroy( &h_mad_disp->obj, NULL );
\r
413 return ib_convert_cl_status( cl_status );
\r
416 *ph_mad_disp = h_mad_disp;
\r
418 /* Release the reference taken in init_al_obj. */
\r
419 deref_al_obj( &h_mad_disp->obj );
\r
421 AL_EXIT( AL_DBG_MAD_SVC );
\r
428 __cleanup_mad_disp(
\r
429 IN al_obj_t *p_obj )
\r
431 al_mad_disp_handle_t h_mad_disp;
\r
433 AL_ENTER( AL_DBG_MAD_SVC );
\r
434 CL_ASSERT( p_obj );
\r
435 h_mad_disp = PARENT_STRUCT( p_obj, al_mad_disp_t, obj );
\r
437 /* Detach from the QP that we were using. */
\r
438 if( h_mad_disp->h_qp )
\r
439 deref_al_obj( &h_mad_disp->h_qp->obj );
\r
441 AL_EXIT( AL_DBG_MAD_SVC );
\r
448 IN al_obj_t *p_obj )
\r
450 al_mad_disp_handle_t h_mad_disp;
\r
452 AL_ENTER( AL_DBG_MAD_SVC );
\r
453 CL_ASSERT( p_obj );
\r
454 h_mad_disp = PARENT_STRUCT( p_obj, al_mad_disp_t, obj );
\r
456 cl_vector_destroy( &h_mad_disp->client_vector );
\r
457 cl_vector_destroy( &h_mad_disp->version_vector );
\r
458 destroy_al_obj( p_obj );
\r
459 cl_free( h_mad_disp );
\r
460 AL_EXIT( AL_DBG_MAD_SVC );
\r
465 static al_mad_reg_handle_t
\r
467 IN const al_mad_disp_handle_t h_mad_disp,
\r
468 IN const ib_mad_svc_handle_t h_mad_svc,
\r
469 IN const ib_mad_svc_t *p_mad_svc,
\r
470 IN const pfn_mad_svc_send_done_t pfn_send_done,
\r
471 IN const pfn_mad_svc_recv_done_t pfn_recv_done )
\r
473 al_mad_reg_handle_t h_mad_reg;
\r
475 cl_status_t cl_status;
\r
477 AL_ENTER( AL_DBG_MAD_SVC );
\r
478 cl_spinlock_acquire( &h_mad_disp->obj.lock );
\r
480 /* Find an empty slot in the client vector for the registration. */
\r
481 for( i = 0; i < cl_vector_get_size( &h_mad_disp->client_vector ); i++ )
\r
483 h_mad_reg = cl_vector_get_ptr( &h_mad_disp->client_vector, i );
\r
484 if( !h_mad_reg->ref_cnt )
\r
487 /* Trap for ClientID overflow. */
\r
488 if( i >= 0xFFFFFFFF )
\r
490 cl_spinlock_release( &h_mad_disp->obj.lock );
\r
493 cl_status = cl_vector_set_min_size( &h_mad_disp->client_vector, i+1 );
\r
494 if( cl_status != CL_SUCCESS )
\r
496 cl_spinlock_release( &h_mad_disp->obj.lock );
\r
499 h_mad_reg = cl_vector_get_ptr( &h_mad_disp->client_vector, i );
\r
501 /* Record the registration. */
\r
502 h_mad_reg->client_id = (uint32_t)i;
\r
503 h_mad_reg->support_unsol = p_mad_svc->support_unsol;
\r
504 h_mad_reg->mgmt_class = p_mad_svc->mgmt_class;
\r
505 h_mad_reg->mgmt_version = p_mad_svc->mgmt_version;
\r
506 h_mad_reg->pfn_recv_done = pfn_recv_done;
\r
507 h_mad_reg->pfn_send_done = pfn_send_done;
\r
509 /* If the client requires support for unsolicited MADs, add tracking. */
\r
510 if( p_mad_svc->support_unsol )
\r
512 if( !__mad_disp_reg_unsol( h_mad_disp, h_mad_reg, p_mad_svc ) )
\r
514 cl_spinlock_release( &h_mad_disp->obj.lock );
\r
515 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("reg unsol failed\n") );
\r
520 /* Record that the registration was successful. */
\r
521 h_mad_reg->h_mad_svc = h_mad_svc;
\r
522 h_mad_reg->ref_cnt = 1;
\r
523 cl_spinlock_release( &h_mad_disp->obj.lock );
\r
525 /* The MAD service needs to take a reference on the dispatcher. */
\r
526 ref_al_obj( &h_mad_disp->obj );
\r
528 AL_EXIT( AL_DBG_MAD_SVC );
\r
535 IN void* const p_element,
\r
538 al_mad_reg_handle_t h_mad_reg;
\r
540 /* Record the MAD dispatcher for the registration structure. */
\r
541 h_mad_reg = p_element;
\r
542 h_mad_reg->h_mad_disp = context;
\r
543 h_mad_reg->ref_cnt = 0;
\r
550 * Initialize an entry in the version vector. Each entry is a vector of
\r
554 __init_version_entry(
\r
555 IN void* const p_element,
\r
558 cl_vector_t *p_vector;
\r
560 p_vector = p_element;
\r
561 UNUSED_PARAM( context );
\r
563 cl_vector_construct( p_vector );
\r
564 return cl_vector_init( p_vector, MAD_VECTOR_SIZE, MAD_VECTOR_SIZE,
\r
565 sizeof( cl_ptr_vector_t ), __init_class_entry, __destroy_class_entry,
\r
571 __destroy_version_entry(
\r
572 IN void* const p_element,
\r
575 cl_vector_t *p_vector;
\r
577 p_vector = p_element;
\r
578 UNUSED_PARAM( context );
\r
580 cl_vector_destroy( p_vector );
\r
585 * Initialize an entry in the class vector. Each entry is a pointer vector
\r
589 __init_class_entry(
\r
590 IN void* const p_element,
\r
593 cl_ptr_vector_t *p_ptr_vector;
\r
595 p_ptr_vector = p_element;
\r
596 UNUSED_PARAM( context );
\r
598 cl_ptr_vector_construct( p_ptr_vector );
\r
599 return cl_ptr_vector_init( p_ptr_vector,
\r
600 MAD_VECTOR_SIZE, MAD_VECTOR_SIZE );
\r
605 __destroy_class_entry(
\r
606 IN void* const p_element,
\r
609 cl_ptr_vector_t *p_ptr_vector;
\r
611 p_ptr_vector = p_element;
\r
612 UNUSED_PARAM( context );
\r
614 cl_ptr_vector_destroy( p_ptr_vector );
\r
619 * Add support for unsolicited MADs for the given MAD service.
\r
622 __mad_disp_reg_unsol(
\r
623 IN const al_mad_disp_handle_t h_mad_disp,
\r
624 IN const al_mad_reg_handle_t h_mad_reg,
\r
625 IN const ib_mad_svc_t *p_mad_svc )
\r
627 cl_status_t cl_status;
\r
628 cl_vector_t *p_class_vector;
\r
629 cl_ptr_vector_t *p_method_ptr_vector;
\r
632 /* Ensure that we are ready to handle this version number. */
\r
633 AL_ENTER( AL_DBG_MAD_SVC );
\r
634 cl_status = cl_vector_set_min_size( &h_mad_disp->version_vector,
\r
635 __mgmt_version_index( p_mad_svc->mgmt_version ) + 1 );
\r
636 if( cl_status != CL_SUCCESS )
\r
639 /* Get the list of classes in use for this version. */
\r
640 p_class_vector = cl_vector_get_ptr( &h_mad_disp->version_vector,
\r
641 __mgmt_version_index( p_mad_svc->mgmt_version ) );
\r
643 /* Ensure that we are ready to handle the specified class. */
\r
644 cl_status = cl_vector_set_min_size( p_class_vector,
\r
645 __mgmt_class_index( p_mad_svc->mgmt_class ) + 1 );
\r
646 if( cl_status != CL_SUCCESS )
\r
649 /* Get the list of methods in use for this class. */
\r
650 p_method_ptr_vector = cl_vector_get_ptr( p_class_vector,
\r
651 __mgmt_class_index( p_mad_svc->mgmt_class ) );
\r
653 /* Ensure that we can handle all requested methods. */
\r
654 for( i = MAX_METHOD - 1; i > 0; i-- )
\r
656 if( p_mad_svc->method_array[i] )
\r
658 cl_status = cl_ptr_vector_set_min_size( p_method_ptr_vector, i+1 );
\r
659 if( cl_status != CL_SUCCESS )
\r
662 /* No one else can be registered for this method. */
\r
663 if( cl_ptr_vector_get( p_method_ptr_vector, i ) )
\r
665 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
666 ("Other client already registered for Un-Solicited Method "
\r
667 "%u for version %u of class %u.\n", i, p_mad_svc->mgmt_version,
\r
668 p_mad_svc->mgmt_class ) );
\r
674 /* We can support the request. Record the methods. */
\r
675 for( i = 0; i < MAX_METHOD; i++ )
\r
677 if( p_mad_svc->method_array[i] )
\r
679 cl_ptr_vector_set( p_method_ptr_vector, i, h_mad_reg );
\r
681 AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_MAD_SVC,
\r
682 ("Register version:%u (%u) class:0x%02X(%u) method:0x%02X Hdl:%016I64x\n",
\r
683 p_mad_svc->mgmt_version,
\r
684 __mgmt_version_index( p_mad_svc->mgmt_version ),
\r
685 p_mad_svc->mgmt_class,
\r
686 __mgmt_class_index( p_mad_svc->mgmt_class ),
\r
688 (LONG_PTR)h_mad_reg) );
\r
692 AL_EXIT( AL_DBG_MAD_SVC );
\r
697 static __inline uint8_t
\r
698 __mgmt_version_index(
\r
699 IN const uint8_t mgmt_version )
\r
701 return (uint8_t)(mgmt_version - 1);
\r
705 static __inline uint8_t
\r
706 __mgmt_class_index(
\r
707 IN const uint8_t mgmt_class )
\r
709 /* Map class 0x81 to 0 to remove empty class values. */
\r
710 if( mgmt_class == IB_MCLASS_SUBN_DIR )
\r
711 return IB_MCLASS_SUBN_LID;
\r
719 * Deregister a MAD service from the dispatcher.
\r
723 IN const al_mad_reg_handle_t h_mad_reg )
\r
725 al_mad_disp_handle_t h_mad_disp;
\r
726 cl_vector_t *p_class_vector;
\r
727 cl_ptr_vector_t *p_method_ptr_vector;
\r
730 AL_ENTER( AL_DBG_MAD_SVC );
\r
731 h_mad_disp = h_mad_reg->h_mad_disp;
\r
733 cl_spinlock_acquire( &h_mad_disp->obj.lock );
\r
735 if( h_mad_reg->support_unsol )
\r
737 /* Deregister the service from receiving unsolicited MADs. */
\r
738 p_class_vector = cl_vector_get_ptr( &h_mad_disp->version_vector,
\r
739 __mgmt_version_index( h_mad_reg->mgmt_version ) );
\r
741 p_method_ptr_vector = cl_vector_get_ptr( p_class_vector,
\r
742 __mgmt_class_index( h_mad_reg->mgmt_class ) );
\r
744 /* Deregister all methods registered to the client. */
\r
745 for( i = 0; i < cl_ptr_vector_get_size( p_method_ptr_vector ); i++ )
\r
747 if( cl_ptr_vector_get( p_method_ptr_vector, i ) == h_mad_reg )
\r
749 cl_ptr_vector_set( p_method_ptr_vector, i, NULL );
\r
754 cl_spinlock_release( &h_mad_disp->obj.lock );
\r
756 /* Decrement the reference count in the registration table. */
\r
757 cl_atomic_dec( &h_mad_reg->ref_cnt );
\r
759 /* The MAD service no longer requires access to the MAD dispatcher. */
\r
760 deref_al_obj( &h_mad_disp->obj );
\r
761 AL_EXIT( AL_DBG_MAD_SVC );
\r
767 __mad_disp_queue_send(
\r
768 IN const al_mad_reg_handle_t h_mad_reg,
\r
769 IN al_mad_wr_t* const p_mad_wr )
\r
771 ib_mad_t *p_mad_hdr;
\r
774 * Increment the reference count on the registration to ensure that
\r
775 * the MAD service does not go away until the send completes.
\r
777 AL_ENTER( AL_DBG_MAD_SVC );
\r
778 cl_atomic_inc( &h_mad_reg->ref_cnt );
\r
779 ref_al_obj( &h_mad_reg->h_mad_svc->obj );
\r
781 /* Get the MAD header. */
\r
782 p_mad_hdr = get_mad_hdr_from_wr( p_mad_wr );
\r
783 CL_ASSERT( !p_mad_wr->send_wr.wr_id );
\r
784 p_mad_wr->send_wr.wr_id = (uintn_t)p_mad_wr;
\r
787 * If we are the originator of the transaction, we need to modify the
\r
788 * TID to ensure that duplicate TIDs are not used by multiple clients.
\r
790 AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_MAD_SVC, ("dispatching TID: 0x%I64x\n",
\r
791 p_mad_hdr->trans_id) );
\r
792 p_mad_wr->client_tid = p_mad_hdr->trans_id;
\r
793 if( __use_tid_routing( p_mad_hdr, TRUE ) )
\r
795 /* Clear the AL portion of the TID before setting. */
\r
796 ((al_tid_t*)&p_mad_hdr->trans_id)->tid32.al_tid = 0;
\r
798 #pragma warning( push, 3 )
\r
799 al_set_al_tid( &p_mad_hdr->trans_id, h_mad_reg->client_id );
\r
800 #pragma warning( pop )
\r
802 AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_MAD_SVC,
\r
803 ("modified TID to: 0x%0I64x\n", p_mad_hdr->trans_id) );
\r
806 /* Post the work request to the QP. */
\r
807 p_mad_wr->client_id = h_mad_reg->client_id;
\r
808 h_mad_reg->h_mad_disp->h_qp->pfn_queue_mad(
\r
809 h_mad_reg->h_mad_disp->h_qp, p_mad_wr );
\r
811 AL_EXIT( AL_DBG_MAD_SVC );
\r
816 __mad_disp_resume_send(
\r
817 IN const al_mad_reg_handle_t h_mad_reg )
\r
819 AL_ENTER( AL_DBG_MAD_SVC );
\r
821 h_mad_reg->h_mad_disp->h_qp->pfn_resume_mad(
\r
822 h_mad_reg->h_mad_disp->h_qp );
\r
824 AL_EXIT( AL_DBG_MAD_SVC );
\r
829 * Complete a sent MAD. Route the completion to the correct MAD service.
\r
832 mad_disp_send_done(
\r
833 IN al_mad_disp_handle_t h_mad_disp,
\r
834 IN al_mad_wr_t *p_mad_wr,
\r
837 al_mad_reg_handle_t h_mad_reg;
\r
838 ib_mad_t *p_mad_hdr;
\r
840 AL_ENTER( AL_DBG_MAD_SVC );
\r
842 AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_MAD_SVC,
\r
843 ("p_mad_wr 0x%016I64x\n", (LONG_PTR)p_mad_wr ) );
\r
845 /* Get the MAD header. */
\r
846 p_mad_hdr = get_mad_hdr_from_wr( p_mad_wr );
\r
848 /* Get the MAD service that issued the send. */
\r
849 cl_spinlock_acquire( &h_mad_disp->obj.lock );
\r
850 h_mad_reg = cl_vector_get_ptr( &h_mad_disp->client_vector,
\r
851 p_mad_wr->client_id );
\r
852 cl_spinlock_release( &h_mad_disp->obj.lock );
\r
853 CL_ASSERT( h_mad_reg && (h_mad_reg->client_id == p_mad_wr->client_id) );
\r
855 /* Reset the TID and WR ID. */
\r
856 AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_MAD_SVC, ("send done TID: 0x%I64x\n",
\r
857 p_mad_hdr->trans_id) );
\r
858 p_mad_hdr->trans_id = p_mad_wr->client_tid;
\r
859 p_mad_wr->send_wr.wr_id = 0;
\r
861 /* Return the completed request to the MAD service. */
\r
862 CL_ASSERT( h_mad_reg->h_mad_svc );
\r
863 h_mad_reg->pfn_send_done( h_mad_reg->h_mad_svc, p_mad_wr, p_wc );
\r
865 /* The MAD service is no longer referenced once the send completes. */
\r
866 deref_al_obj( &h_mad_reg->h_mad_svc->obj );
\r
867 cl_atomic_dec( &h_mad_reg->ref_cnt );
\r
869 AL_EXIT( AL_DBG_MAD_SVC );
\r
875 * Process a received MAD. Route the completion to the correct MAD service.
\r
878 mad_disp_recv_done(
\r
879 IN al_mad_disp_handle_t h_mad_disp,
\r
880 IN ib_mad_element_t *p_mad_element )
\r
882 ib_mad_t *p_mad_hdr;
\r
883 al_mad_reg_handle_t h_mad_reg;
\r
884 ib_al_handle_t h_al;
\r
885 ib_mad_svc_handle_t h_mad_svc;
\r
887 cl_vector_t *p_class_vector;
\r
888 cl_ptr_vector_t *p_method_ptr_vector;
\r
891 AL_ENTER( AL_DBG_MAD_SVC );
\r
892 p_mad_hdr = ib_get_mad_buf( p_mad_element );
\r
894 AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_MAD_SVC,
\r
897 "version = 0x%x.\n"
\r
898 "method = 0x%x.\n",
\r
899 p_mad_hdr->trans_id,
\r
900 p_mad_hdr->mgmt_class,
\r
901 p_mad_hdr->class_ver,
\r
902 p_mad_hdr->method) );
\r
904 /* Get the client to route the receive to. */
\r
905 cl_spinlock_acquire( &h_mad_disp->obj.lock );
\r
906 if( __use_tid_routing( p_mad_hdr, FALSE ) )
\r
908 /* The MAD was received in response to a send. */
\r
909 AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_MAD_SVC, ("routing based on TID\n"));
\r
911 /* Verify that we have a registration entry. */
\r
912 if( al_get_al_tid( p_mad_hdr->trans_id ) >=
\r
913 cl_vector_get_size( &h_mad_disp->client_vector ) )
\r
915 /* No clients for this version-class-method. */
\r
916 cl_spinlock_release( &h_mad_disp->obj.lock );
\r
917 AL_PRINT_EXIT( TRACE_LEVEL_WARNING, AL_DBG_MAD_SVC,
\r
918 ("invalid client ID\n") );
\r
919 return IB_NOT_FOUND;
\r
922 h_mad_reg = cl_vector_get_ptr( &h_mad_disp->client_vector,
\r
923 al_get_al_tid( p_mad_hdr->trans_id ) );
\r
926 * Disable warning about passing unaligned 64-bit value.
\r
927 * The value is always aligned given how buffers are allocated
\r
928 * and given the layout of a MAD.
\r
930 #pragma warning( push, 3 )
\r
931 al_set_al_tid( &p_mad_hdr->trans_id, 0 );
\r
932 #pragma warning( pop )
\r
936 AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_MAD_SVC,
\r
937 ("routing based on version, class, method\n"));
\r
939 /* The receive is unsolicited. Find the client. */
\r
940 if( __mgmt_version_index( p_mad_hdr->class_ver ) >=
\r
941 cl_vector_get_size( &h_mad_disp->version_vector ) )
\r
943 /* No clients for this version of MADs. */
\r
944 cl_spinlock_release( &h_mad_disp->obj.lock );
\r
945 AL_PRINT_EXIT( TRACE_LEVEL_WARNING, AL_DBG_MAD_SVC,
\r
946 ("no clients registered for this class version\n") );
\r
947 return IB_NOT_FOUND;
\r
950 /* See if we have a client for this class of MADs. */
\r
951 p_class_vector = cl_vector_get_ptr( &h_mad_disp->version_vector,
\r
952 __mgmt_version_index( p_mad_hdr->class_ver ) );
\r
954 if( __mgmt_class_index( p_mad_hdr->mgmt_class ) >=
\r
955 cl_vector_get_size( p_class_vector ) )
\r
957 /* No clients for this version-class. */
\r
958 cl_spinlock_release( &h_mad_disp->obj.lock );
\r
959 AL_PRINT_EXIT( TRACE_LEVEL_WARNING, AL_DBG_MAD_SVC,
\r
960 ("no clients registered for this class\n") );
\r
961 return IB_NOT_FOUND;
\r
964 /* See if we have a client for this method. */
\r
965 p_method_ptr_vector = cl_vector_get_ptr( p_class_vector,
\r
966 __mgmt_class_index( p_mad_hdr->mgmt_class ) );
\r
967 method = (uint8_t)(p_mad_hdr->method & (~IB_MAD_METHOD_RESP_MASK));
\r
969 if( method >= cl_ptr_vector_get_size( p_method_ptr_vector ) )
\r
971 /* No clients for this version-class-method. */
\r
972 cl_spinlock_release( &h_mad_disp->obj.lock );
\r
973 AL_PRINT_EXIT( TRACE_LEVEL_WARNING, AL_DBG_MAD_SVC,
\r
974 ("no clients registered for this method-out of range\n") );
\r
975 return IB_NOT_FOUND;
\r
978 h_mad_reg = cl_ptr_vector_get( p_method_ptr_vector, method );
\r
981 /* No clients for this version-class-method. */
\r
982 cl_spinlock_release( &h_mad_disp->obj.lock );
\r
983 AL_PRINT_EXIT( TRACE_LEVEL_WARNING, AL_DBG_MAD_SVC,
\r
984 ("no clients registered for method %u of class %u(%u) version %u(%u)\n",
\r
986 p_mad_hdr->mgmt_class,
\r
987 __mgmt_class_index( p_mad_hdr->mgmt_class ),
\r
988 p_mad_hdr->class_ver,
\r
989 __mgmt_version_index( p_mad_hdr->class_ver )
\r
991 return IB_NOT_FOUND;
\r
995 /* Verify that the registration is still valid. */
\r
996 if( !h_mad_reg->ref_cnt )
\r
998 cl_spinlock_release( &h_mad_disp->obj.lock );
\r
999 AL_PRINT_EXIT( TRACE_LEVEL_WARNING, AL_DBG_MAD_SVC,
\r
1000 ("no client registered\n") );
\r
1001 return IB_NOT_FOUND;
\r
1004 /* Take a reference on the MAD service in case it deregisters. */
\r
1005 h_mad_svc = h_mad_reg->h_mad_svc;
\r
1006 ref_al_obj( &h_mad_svc->obj );
\r
1007 cl_spinlock_release( &h_mad_disp->obj.lock );
\r
1009 /* Handoff the MAD to the correct AL instance. */
\r
1010 h_al = qp_get_al( (ib_qp_handle_t)(h_mad_svc->obj.p_parent_obj) );
\r
1011 al_handoff_mad( h_al, p_mad_element );
\r
1013 h_mad_reg->pfn_recv_done( h_mad_svc, p_mad_element );
\r
1014 deref_al_obj( &h_mad_svc->obj );
\r
1015 AL_EXIT( AL_DBG_MAD_SVC );
\r
1016 return IB_SUCCESS;
\r
1022 * Return TRUE if we should route the MAD to the recipient based on the TID.
\r
1025 __use_tid_routing(
\r
1026 IN const ib_mad_t* const p_mad_hdr,
\r
1027 IN const boolean_t are_we_sender )
\r
1029 ib_rmpp_mad_t *p_rmpp_mad;
\r
1030 boolean_t is_orig;
\r
1032 AL_ENTER( AL_DBG_MAD_SVC );
\r
1034 /* CM MADs are never TID routed. */
\r
1035 if( p_mad_hdr->mgmt_class == IB_MCLASS_COMM_MGMT )
\r
1037 AL_EXIT( AL_DBG_MAD_SVC );
\r
1042 * Determine originator for a sent MAD. Received MADs are just the
\r
1046 /* Non-DATA RMPP MADs are handled differently. */
\r
1047 p_rmpp_mad = (ib_rmpp_mad_t*)p_mad_hdr;
\r
1048 if( (p_mad_hdr->mgmt_class == IB_MCLASS_SUBN_ADM) &&
\r
1049 ( ib_rmpp_is_flag_set( p_rmpp_mad, IB_RMPP_FLAG_ACTIVE ) &&
\r
1050 (p_rmpp_mad->rmpp_type != IB_RMPP_TYPE_DATA) ) )
\r
1053 * We need to distinguish between ACKs sent after receiving
\r
1054 * a request, versus ACKs sent after receiving a response. ACKs
\r
1055 * to a request are from the responder. ACKs to a response are
\r
1056 * from the originator.
\r
1058 * Note that we assume STOP and ABORT packets are initiated by
\r
1059 * receivers. If both senders and receivers can
\r
1060 * initiate STOP and ABORT MADs, then we can't distinguish which
\r
1061 * transaction is associated with the MAD. The TID for a
\r
1062 * send and receive can be the same.
\r
1064 is_orig = !ib_mad_is_response( p_mad_hdr );
\r
1069 * See if the MAD is being sent in response to a previous MAD. If
\r
1070 * it is, then we're NOT the originator. Note that trap repress
\r
1071 * MADs are responses, even though the response bit isn't set.
\r
1073 is_orig = !( ib_mad_is_response( p_mad_hdr ) ||
\r
1074 (p_mad_hdr->method == IB_MAD_METHOD_TRAP_REPRESS) );
\r
1077 /* If we're the receiver, toggle the result. */
\r
1078 if( !are_we_sender )
\r
1079 is_orig = !is_orig;
\r
1081 AL_EXIT( AL_DBG_MAD_SVC );
\r
1096 * Create and initialize a MAD service for use.
\r
1100 IN const ib_qp_handle_t h_qp,
\r
1101 IN const ib_mad_svc_t* const p_mad_svc,
\r
1102 OUT ib_mad_svc_handle_t* const ph_mad_svc )
\r
1104 ib_api_status_t status;
\r
1105 cl_status_t cl_status;
\r
1106 ib_mad_svc_handle_t h_mad_svc;
\r
1107 al_qp_alias_t *p_qp_alias;
\r
1108 ib_qp_attr_t qp_attr;
\r
1110 AL_ENTER( AL_DBG_MAD_SVC );
\r
1111 CL_ASSERT( h_qp );
\r
1113 switch( h_qp->type )
\r
1117 case IB_QPT_QP0_ALIAS:
\r
1118 case IB_QPT_QP1_ALIAS:
\r
1123 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") );
\r
1124 return IB_INVALID_PARAMETER;
\r
1127 if( !p_mad_svc || !ph_mad_svc )
\r
1129 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") );
\r
1130 return IB_INVALID_PARAMETER;
\r
1133 h_mad_svc = cl_zalloc( sizeof( al_mad_svc_t) );
\r
1136 return IB_INSUFFICIENT_MEMORY;
\r
1139 /* Construct the MAD service. */
\r
1140 construct_al_obj( &h_mad_svc->obj, AL_OBJ_TYPE_H_MAD_SVC );
\r
1141 cl_timer_construct( &h_mad_svc->send_timer );
\r
1142 cl_timer_construct( &h_mad_svc->recv_timer );
\r
1143 cl_qlist_init( &h_mad_svc->send_list );
\r
1144 cl_qlist_init( &h_mad_svc->recv_list );
\r
1146 p_qp_alias = PARENT_STRUCT( h_qp, al_qp_alias_t, qp );
\r
1147 h_mad_svc->svc_type = p_mad_svc->svc_type;
\r
1148 h_mad_svc->obj.context = p_mad_svc->mad_svc_context;
\r
1149 h_mad_svc->pfn_user_recv_cb = p_mad_svc->pfn_mad_recv_cb;
\r
1150 h_mad_svc->pfn_user_send_cb = p_mad_svc->pfn_mad_send_cb;
\r
1152 /* Initialize the MAD service. */
\r
1153 status = init_al_obj( &h_mad_svc->obj, p_mad_svc->mad_svc_context,
\r
1154 TRUE, __destroying_mad_svc, __cleanup_mad_svc, free_mad_svc );
\r
1155 if( status != IB_SUCCESS )
\r
1157 free_mad_svc( &h_mad_svc->obj );
\r
1160 status = attach_al_obj( &h_qp->obj, &h_mad_svc->obj );
\r
1161 if( status != IB_SUCCESS )
\r
1163 h_mad_svc->obj.pfn_destroy( &h_mad_svc->obj, NULL );
\r
1164 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
1165 ("attach_al_obj returned %s.\n", ib_get_err_str(status)) );
\r
1169 h_mad_svc->h_mad_reg = __mad_disp_reg( p_qp_alias->h_mad_disp,
\r
1170 h_mad_svc, p_mad_svc, __mad_svc_send_done, __mad_svc_recv_done );
\r
1171 if( !h_mad_svc->h_mad_reg )
\r
1173 h_mad_svc->obj.pfn_destroy( &h_mad_svc->obj, NULL );
\r
1174 return IB_INSUFFICIENT_MEMORY;
\r
1177 /* Record which port this MAD service uses, to use when creating AVs. */
\r
1178 status = ib_query_qp( h_qp, &qp_attr );
\r
1179 if( status != IB_SUCCESS )
\r
1181 h_mad_svc->obj.pfn_destroy( &h_mad_svc->obj, NULL );
\r
1184 h_mad_svc->h_pd = qp_attr.h_pd;
\r
1185 h_mad_svc->port_num = qp_attr.primary_port;
\r
1187 cl_status = cl_timer_init( &h_mad_svc->send_timer,
\r
1188 __send_timer_cb, h_mad_svc );
\r
1189 if( cl_status != CL_SUCCESS )
\r
1191 h_mad_svc->obj.pfn_destroy( &h_mad_svc->obj, NULL );
\r
1192 return ib_convert_cl_status( cl_status );
\r
1195 cl_status = cl_timer_init( &h_mad_svc->recv_timer,
\r
1196 __recv_timer_cb, h_mad_svc );
\r
1197 if( cl_status != CL_SUCCESS )
\r
1199 h_mad_svc->obj.pfn_destroy( &h_mad_svc->obj, NULL );
\r
1200 return ib_convert_cl_status( cl_status );
\r
1203 *ph_mad_svc = h_mad_svc;
\r
1205 AL_EXIT( AL_DBG_MAD_SVC );
\r
1206 return IB_SUCCESS;
\r
1212 __destroying_mad_svc(
\r
1213 IN struct _al_obj *p_obj )
\r
1215 ib_qp_handle_t h_qp;
\r
1216 ib_mad_svc_handle_t h_mad_svc;
\r
1217 ib_mad_send_handle_t h_send;
\r
1218 cl_list_item_t *p_list_item;
\r
1219 int32_t timeout_ms;
\r
1224 AL_ENTER( AL_DBG_MAD_SVC );
\r
1225 CL_ASSERT( p_obj );
\r
1226 h_mad_svc = PARENT_STRUCT( p_obj, al_mad_svc_t, obj );
\r
1228 /* Deregister the MAD service. */
\r
1229 h_qp = (ib_qp_handle_t)p_obj->p_parent_obj;
\r
1230 if( h_qp->pfn_dereg_mad_svc )
\r
1231 h_qp->pfn_dereg_mad_svc( h_mad_svc );
\r
1233 /* Wait here until the MAD service is no longer in use. */
\r
1234 timeout_ms = (int32_t)h_mad_svc->obj.timeout_ms;
\r
1235 while( h_mad_svc->ref_cnt && timeout_ms > 0 )
\r
1237 /* Use a timeout to avoid waiting forever - just in case. */
\r
1238 cl_thread_suspend( 10 );
\r
1243 * Deregister from the MAD dispatcher. The MAD dispatcher holds
\r
1244 * a reference on the MAD service when invoking callbacks. Since we
\r
1245 * issue sends, we know how many callbacks are expected for send
\r
1246 * completions. With receive completions, we need to wait until all
\r
1247 * receive callbacks have completed before cleaning up receives.
\r
1249 if( h_mad_svc->h_mad_reg )
\r
1250 __mad_disp_dereg( h_mad_svc->h_mad_reg );
\r
1252 /* Cancel all outstanding send requests. */
\r
1253 cl_spinlock_acquire( &h_mad_svc->obj.lock );
\r
1254 for( p_list_item = cl_qlist_head( &h_mad_svc->send_list );
\r
1255 p_list_item != cl_qlist_end( &h_mad_svc->send_list );
\r
1256 p_list_item = cl_qlist_next( p_list_item ) )
\r
1258 AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_MAD_SVC, ("canceling MAD\n") );
\r
1259 h_send = PARENT_STRUCT( p_list_item, al_mad_send_t, pool_item );
\r
1260 h_send->canceled = TRUE;
\r
1262 cl_spinlock_release( &h_mad_svc->obj.lock );
\r
1265 * Invoke the timer callback to return the canceled MADs to the user.
\r
1266 * Since the MAD service is being destroyed, the user cannot be issuing
\r
1269 if( h_mad_svc->h_mad_reg )
\r
1272 old_irql = KeRaiseIrqlToDpcLevel();
\r
1274 __check_send_queue( h_mad_svc );
\r
1276 KeLowerIrql( old_irql );
\r
1280 cl_timer_destroy( &h_mad_svc->send_timer );
\r
1284 * Reclaim any pending receives sent to the proxy for UAL.
\r
1286 if( h_mad_svc->obj.h_al->p_context )
\r
1288 cl_qlist_t *p_cblist;
\r
1289 al_proxy_cb_info_t *p_cb_info;
\r
1291 cl_spinlock_acquire( &h_mad_svc->obj.h_al->p_context->cb_lock );
\r
1292 p_cblist = &h_mad_svc->obj.h_al->p_context->misc_cb_list;
\r
1293 p_list_item = cl_qlist_head( p_cblist );
\r
1294 while( p_list_item != cl_qlist_end( p_cblist ) )
\r
1296 p_cb_info = (al_proxy_cb_info_t*)p_list_item;
\r
1297 p_list_item = cl_qlist_next( p_list_item );
\r
1299 if( p_cb_info->p_al_obj && p_cb_info->p_al_obj == &h_mad_svc->obj )
\r
1301 cl_qlist_remove_item( p_cblist, &p_cb_info->pool_item.list_item );
\r
1302 deref_al_obj( p_cb_info->p_al_obj );
\r
1303 proxy_cb_put( p_cb_info );
\r
1306 cl_spinlock_release( &h_mad_svc->obj.h_al->p_context->cb_lock );
\r
1310 AL_EXIT( AL_DBG_MAD_SVC );
\r
1316 __cleanup_mad_svc(
\r
1317 IN struct _al_obj *p_obj )
\r
1319 ib_mad_svc_handle_t h_mad_svc;
\r
1320 al_mad_rmpp_t *p_rmpp;
\r
1321 cl_list_item_t *p_list_item;
\r
1323 CL_ASSERT( p_obj );
\r
1324 h_mad_svc = PARENT_STRUCT( p_obj, al_mad_svc_t, obj );
\r
1327 * There are no more callbacks from the MAD dispatcher that are active.
\r
1328 * Cleanup any receives that may still be lying around. Stop the receive
\r
1329 * timer to avoid synchronizing with it.
\r
1331 cl_timer_destroy( &h_mad_svc->recv_timer );
\r
1332 for( p_list_item = cl_qlist_head( &h_mad_svc->recv_list );
\r
1333 p_list_item != cl_qlist_end( &h_mad_svc->recv_list );
\r
1334 p_list_item = cl_qlist_next( p_list_item ) )
\r
1336 p_rmpp = PARENT_STRUCT( p_list_item, al_mad_rmpp_t, pool_item );
\r
1337 p_rmpp->inactive = TRUE;
\r
1339 __recv_timer_cb( h_mad_svc );
\r
1341 CL_ASSERT( cl_is_qlist_empty( &h_mad_svc->send_list ) );
\r
1342 CL_ASSERT( cl_is_qlist_empty( &h_mad_svc->recv_list ) );
\r
1349 IN al_obj_t *p_obj )
\r
1351 ib_mad_svc_handle_t h_mad_svc;
\r
1353 CL_ASSERT( p_obj );
\r
1354 h_mad_svc = PARENT_STRUCT( p_obj, al_mad_svc_t, obj );
\r
1356 destroy_al_obj( p_obj );
\r
1357 cl_free( h_mad_svc );
\r
1364 IN const ib_mad_svc_handle_t h_mad_svc,
\r
1365 IN ib_mad_element_t* const p_mad_element_list,
\r
1366 OUT ib_mad_element_t **pp_mad_failure OPTIONAL )
\r
1368 ib_api_status_t status = IB_SUCCESS;
\r
1370 ib_mad_send_handle_t h_send;
\r
1371 ib_mad_element_t *p_cur_mad, *p_next_mad;
\r
1374 AL_ENTER( AL_DBG_MAD_SVC );
\r
1376 if( AL_OBJ_INVALID_HANDLE( h_mad_svc, AL_OBJ_TYPE_H_MAD_SVC ) )
\r
1378 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_HANDLE\n") );
\r
1379 return IB_INVALID_HANDLE;
\r
1381 if( !p_mad_element_list ||
\r
1382 ( p_mad_element_list->p_next && !pp_mad_failure ) )
\r
1384 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") );
\r
1385 return IB_INVALID_PARAMETER;
\r
1389 /* This is a send from user mode using special QP alias */
\r
1390 AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_MAD_SVC,
\r
1391 ("ib_send_mad: ual_context non-zero, TID = 0x%I64x.\n",
\r
1392 ((ib_mad_t*)(ib_get_mad_buf( p_mad_element_list )))->trans_id ));
\r
1393 status = spl_qp_mad_send( h_mad_svc, p_mad_element_list,
\r
1395 AL_EXIT( AL_DBG_MAD_SVC );
\r
1398 /* Post each send on the list. */
\r
1399 p_cur_mad = p_mad_element_list;
\r
1400 while( p_cur_mad )
\r
1402 p_next_mad = p_cur_mad->p_next;
\r
1404 /* Get an element to track the send. */
\r
1405 h_send = get_mad_send( PARENT_STRUCT( p_cur_mad,
\r
1406 al_mad_element_t, element ) );
\r
1409 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("unable to get mad_send\n") );
\r
1410 if( pp_mad_failure )
\r
1411 *pp_mad_failure = p_cur_mad;
\r
1412 return IB_INSUFFICIENT_RESOURCES;
\r
1415 /* Initialize the MAD for sending. */
\r
1416 status = __init_send_mad( h_mad_svc, h_send, p_cur_mad );
\r
1417 if( status != IB_SUCCESS )
\r
1419 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("init_send_mad failed: %s\n",
\r
1420 ib_get_err_str(status)) );
\r
1421 put_mad_send( h_send );
\r
1422 if( pp_mad_failure )
\r
1423 *pp_mad_failure = p_cur_mad;
\r
1427 /* Add the MADs to our list. */
\r
1428 cl_spinlock_acquire( &h_mad_svc->obj.lock );
\r
1429 cl_qlist_insert_tail( &h_mad_svc->send_list,
\r
1430 (cl_list_item_t*)&h_send->pool_item );
\r
1432 /* Post the MAD to the dispatcher, and check for failures. */
\r
1433 ref_al_obj( &h_mad_svc->obj );
\r
1434 p_cur_mad->p_next = NULL;
\r
1435 if( h_send->uses_rmpp )
\r
1436 __queue_rmpp_seg( h_mad_svc->h_mad_reg, h_send );
\r
1438 __queue_mad_wr( h_mad_svc->h_mad_reg, h_send );
\r
1439 cl_spinlock_release( &h_mad_svc->obj.lock );
\r
1441 p_cur_mad = p_next_mad;
\r
1445 * Resume any sends that can now be sent without holding
\r
1446 * the mad service lock.
\r
1448 __mad_disp_resume_send( h_mad_svc->h_mad_reg );
\r
1450 AL_EXIT( AL_DBG_MAD_SVC );
\r
1457 static ib_api_status_t
\r
1459 IN ib_mad_svc_handle_t h_mad_svc,
\r
1460 IN const ib_mad_send_handle_t h_send,
\r
1461 IN ib_mad_element_t* const p_mad_element )
\r
1463 ib_rmpp_mad_t *p_rmpp_hdr;
\r
1464 uint8_t rmpp_version;
\r
1465 ib_api_status_t status;
\r
1467 AL_ENTER( AL_DBG_MAD_SVC );
\r
1469 /* Initialize tracking the send. */
\r
1470 h_send->p_send_mad = p_mad_element;
\r
1471 h_send->retry_time = MAX_TIME;
\r
1472 h_send->retry_cnt = p_mad_element->retry_cnt;
\r
1474 /* See if the send uses RMPP. */
\r
1475 h_send->uses_rmpp = __does_send_req_rmpp( h_mad_svc->svc_type,
\r
1476 p_mad_element, &rmpp_version );
\r
1477 if( h_send->uses_rmpp )
\r
1479 /* The RMPP header is present. */
\r
1480 AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_MAD_SVC, ("RMPP is activated\n") );
\r
1481 p_rmpp_hdr = (ib_rmpp_mad_t*)p_mad_element->p_mad_buf;
\r
1483 /* We only support version 1. */
\r
1484 if( rmpp_version != DEFAULT_RMPP_VERSION )
\r
1486 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("unsupported version\n") );
\r
1487 return IB_INVALID_SETTING;
\r
1490 p_rmpp_hdr->rmpp_version = rmpp_version;
\r
1491 p_rmpp_hdr->rmpp_type = IB_RMPP_TYPE_DATA;
\r
1492 ib_rmpp_set_resp_time( p_rmpp_hdr, IB_RMPP_NO_RESP_TIME );
\r
1493 p_rmpp_hdr->rmpp_status = IB_RMPP_STATUS_SUCCESS;
\r
1495 * The segment number, flags, and payload size are set when
\r
1496 * sending, so that they are set correctly when issuing retries.
\r
1499 h_send->ack_seg = 0;
\r
1500 h_send->seg_limit = 1;
\r
1501 h_send->cur_seg = 1;
\r
1502 /* For SA RMPP MADS we need different data size and header size */
\r
1503 if( p_mad_element->p_mad_buf->mgmt_class == IB_MCLASS_SUBN_ADM )
\r
1505 h_send->total_seg = ( (p_mad_element->size - IB_SA_MAD_HDR_SIZE) +
\r
1506 (IB_SA_DATA_SIZE - 1) ) / IB_SA_DATA_SIZE;
\r
1510 h_send->total_seg = ( (p_mad_element->size - MAD_RMPP_HDR_SIZE) +
\r
1511 (MAD_RMPP_DATA_SIZE - 1) ) / MAD_RMPP_DATA_SIZE;
\r
1515 /* See if we need to create the address vector for the user.
\r
1516 We also create AV for local send to pass the slid and grh in case of trap generation*/
\r
1517 if( !p_mad_element->h_av){
\r
1519 status = __create_send_av( h_mad_svc, h_send );
\r
1520 if( status != IB_SUCCESS )
\r
1526 AL_EXIT( AL_DBG_MAD_SVC );
\r
1527 return IB_SUCCESS;
\r
1532 static ib_api_status_t
\r
1534 IN ib_mad_svc_handle_t h_mad_svc,
\r
1535 IN ib_mad_send_handle_t h_send )
\r
1537 ib_av_attr_t av_attr;
\r
1538 ib_mad_element_t *p_mad_element;
\r
1540 p_mad_element = h_send->p_send_mad;
\r
1542 av_attr.port_num = h_mad_svc->port_num;
\r
1544 av_attr.sl = p_mad_element->remote_sl;
\r
1545 av_attr.dlid = p_mad_element->remote_lid;
\r
1547 av_attr.grh_valid = p_mad_element->grh_valid;
\r
1548 if( av_attr.grh_valid )
\r
1549 av_attr.grh = *p_mad_element->p_grh;
\r
1551 av_attr.static_rate = IB_PATH_RECORD_RATE_10_GBS;
\r
1552 av_attr.path_bits = p_mad_element->path_bits;
\r
1554 return ib_create_av( h_mad_svc->h_pd, &av_attr, &h_send->h_av );
\r
1560 __does_send_req_rmpp(
\r
1561 IN const ib_mad_svc_type_t mad_svc_type,
\r
1562 IN const ib_mad_element_t* const p_mad_element,
\r
1563 OUT uint8_t *p_rmpp_version )
\r
1565 switch( mad_svc_type )
\r
1567 case IB_MAD_SVC_DEFAULT:
\r
1568 case IB_MAD_SVC_RMPP:
\r
1569 /* Internally generated MADs do not use RMPP. */
\r
1570 if( __is_internal_send( mad_svc_type, p_mad_element ) )
\r
1573 /* If the MAD has the version number set, just return it. */
\r
1574 if( p_mad_element->rmpp_version )
\r
1576 *p_rmpp_version = p_mad_element->rmpp_version;
\r
1580 /* If the class is well known and uses RMPP, use the default version. */
\r
1581 if( p_mad_element->p_mad_buf->mgmt_class == IB_MCLASS_SUBN_ADM )
\r
1583 switch( p_mad_element->p_mad_buf->method )
\r
1585 case IB_MAD_METHOD_GETTABLE_RESP:
\r
1586 case IB_MAD_METHOD_GETMULTI:
\r
1587 case IB_MAD_METHOD_GETMULTI_RESP:
\r
1588 *p_rmpp_version = DEFAULT_RMPP_VERSION;
\r
1596 /* The RMPP is not active. */
\r
1607 * Sends the next RMPP segment of an RMPP transfer.
\r
1611 IN const al_mad_reg_handle_t h_mad_reg,
\r
1612 IN ib_mad_send_handle_t h_send )
\r
1614 ib_rmpp_mad_t *p_rmpp_hdr;
\r
1616 AL_ENTER( AL_DBG_MAD_SVC );
\r
1618 CL_ASSERT( h_mad_reg && h_send );
\r
1619 CL_ASSERT( h_send->cur_seg <= h_send->seg_limit );
\r
1621 /* Reset information to track the send. */
\r
1622 h_send->retry_time = MAX_TIME;
\r
1624 /* Set the RMPP header information. */
\r
1625 p_rmpp_hdr = (ib_rmpp_mad_t*)h_send->p_send_mad->p_mad_buf;
\r
1626 p_rmpp_hdr->seg_num = cl_hton32( h_send->cur_seg );
\r
1627 p_rmpp_hdr->rmpp_flags = IB_RMPP_FLAG_ACTIVE;
\r
1628 p_rmpp_hdr->paylen_newwin = 0;
\r
1630 /* See if this is the first segment that needs to be sent. */
\r
1631 if( h_send->cur_seg == 1 )
\r
1633 p_rmpp_hdr->rmpp_flags |= IB_RMPP_FLAG_FIRST;
\r
1636 * Since the RMPP layer is the one to support SA MADs by duplicating
\r
1637 * the SA header. The actual Payload Length should include the
\r
1638 * original mad size + NumSegs * SA-extra-header.
\r
1640 if( h_send->p_send_mad->p_mad_buf->mgmt_class == IB_MCLASS_SUBN_ADM )
\r
1642 /* Add sa_ext_hdr to each segment over the first one. */
\r
1643 p_rmpp_hdr->paylen_newwin = cl_hton32(
\r
1644 h_send->p_send_mad->size - MAD_RMPP_HDR_SIZE +
\r
1645 (h_send->total_seg - 1) *
\r
1646 (IB_SA_MAD_HDR_SIZE - MAD_RMPP_HDR_SIZE) );
\r
1650 /* For other RMPP packets we simply use the given MAD */
\r
1651 p_rmpp_hdr->paylen_newwin = cl_hton32( h_send->p_send_mad->size -
\r
1652 MAD_RMPP_HDR_SIZE );
\r
1656 /* See if this is the last segment that needs to be sent. */
\r
1657 if( h_send->cur_seg == h_send->total_seg )
\r
1659 p_rmpp_hdr->rmpp_flags |= IB_RMPP_FLAG_LAST;
\r
1661 /* But for SA MADs we need extra header size */
\r
1662 if( h_send->p_send_mad->p_mad_buf->mgmt_class == IB_MCLASS_SUBN_ADM )
\r
1664 p_rmpp_hdr->paylen_newwin = cl_hton32( h_send->p_send_mad->size -
\r
1665 (h_send->cur_seg -1)*IB_SA_DATA_SIZE - MAD_RMPP_HDR_SIZE );
\r
1669 p_rmpp_hdr->paylen_newwin = cl_hton32( h_send->p_send_mad->size -
\r
1670 (h_send->cur_seg -1)*MAD_RMPP_DATA_SIZE );
\r
1674 /* Set the current segment to the next one. */
\r
1675 h_send->cur_seg++;
\r
1677 /* Send the MAD. */
\r
1678 __queue_mad_wr( h_mad_reg, h_send );
\r
1680 AL_EXIT( AL_DBG_MAD_SVC );
\r
1686 * Posts a send work request to the dispatcher for a MAD send.
\r
1690 IN const al_mad_reg_handle_t h_mad_reg,
\r
1691 IN const ib_mad_send_handle_t h_send )
\r
1693 ib_send_wr_t *p_send_wr;
\r
1694 al_mad_element_t *p_al_element;
\r
1695 ib_rmpp_mad_t *p_rmpp_hdr;
\r
1696 uint8_t *p_rmpp_src, *p_rmpp_dst;
\r
1697 uintn_t hdr_len, offset, max_len;
\r
1699 AL_ENTER( AL_DBG_MAD_SVC );
\r
1700 p_send_wr = &h_send->mad_wr.send_wr;
\r
1702 cl_memclr( p_send_wr, sizeof( ib_send_wr_t ) );
\r
1704 p_send_wr->wr_type = WR_SEND;
\r
1705 p_send_wr->send_opt = h_send->p_send_mad->send_opt;
\r
1707 p_al_element = PARENT_STRUCT( h_send->p_send_mad,
\r
1708 al_mad_element_t, element );
\r
1710 /* See if the MAD requires RMPP support. */
\r
1711 if( h_send->uses_rmpp && p_al_element->p_al_mad_buf )
\r
1713 #if defined( CL_KERNEL )
\r
1714 p_rmpp_dst = p_al_element->mad_buf + sizeof(ib_grh_t);
\r
1716 p_rmpp_dst = (uint8_t*)(uintn_t)p_al_element->mad_ds.vaddr;
\r
1718 p_rmpp_src = (uint8_t* __ptr64)h_send->p_send_mad->p_mad_buf;
\r
1719 p_rmpp_hdr = (ib_rmpp_mad_t*)p_rmpp_src;
\r
1721 if( h_send->p_send_mad->p_mad_buf->mgmt_class == IB_MCLASS_SUBN_ADM )
\r
1722 hdr_len = IB_SA_MAD_HDR_SIZE;
\r
1724 hdr_len = MAD_RMPP_HDR_SIZE;
\r
1726 max_len = MAD_BLOCK_SIZE - hdr_len;
\r
1728 offset = hdr_len + (max_len * (cl_ntoh32( p_rmpp_hdr->seg_num ) - 1));
\r
1730 /* Copy the header into the registered send buffer. */
\r
1731 cl_memcpy( p_rmpp_dst, p_rmpp_src, hdr_len );
\r
1733 /* Copy this segment's payload into the registered send buffer. */
\r
1734 CL_ASSERT( h_send->p_send_mad->size != offset );
\r
1735 if( (h_send->p_send_mad->size - offset) < max_len )
\r
1737 max_len = h_send->p_send_mad->size - offset;
\r
1738 /* Clear unused payload. */
\r
1739 cl_memclr( p_rmpp_dst + hdr_len + max_len,
\r
1740 MAD_BLOCK_SIZE - hdr_len - max_len );
\r
1744 p_rmpp_dst + hdr_len, p_rmpp_src + offset, max_len );
\r
1747 p_send_wr->num_ds = 1;
\r
1748 p_send_wr->ds_array = &p_al_element->mad_ds;
\r
1750 p_send_wr->dgrm.ud.remote_qp = h_send->p_send_mad->remote_qp;
\r
1751 p_send_wr->dgrm.ud.remote_qkey = h_send->p_send_mad->remote_qkey;
\r
1752 p_send_wr->dgrm.ud.pkey_index = h_send->p_send_mad->pkey_index;
\r
1754 /* See if we created the address vector on behalf of the user. */
\r
1755 if( h_send->p_send_mad->h_av )
\r
1756 p_send_wr->dgrm.ud.h_av = h_send->p_send_mad->h_av;
\r
1758 p_send_wr->dgrm.ud.h_av = h_send->h_av;
\r
1760 __mad_disp_queue_send( h_mad_reg, &h_send->mad_wr );
\r
1762 AL_EXIT( AL_DBG_MAD_SVC );
\r
1767 static cl_status_t
\r
1768 __mad_svc_find_send(
\r
1769 IN const cl_list_item_t* const p_list_item,
\r
1770 IN void* context )
\r
1772 ib_mad_send_handle_t h_send;
\r
1774 h_send = PARENT_STRUCT( p_list_item, al_mad_send_t, pool_item );
\r
1776 if( h_send->p_send_mad == context )
\r
1777 return CL_SUCCESS;
\r
1779 return CL_NOT_FOUND;
\r
1786 IN const ib_mad_svc_handle_t h_mad_svc,
\r
1787 IN ib_mad_element_t* const p_mad_element )
\r
1790 cl_list_item_t *p_list_item;
\r
1791 ib_mad_send_handle_t h_send;
\r
1793 ib_api_status_t status;
\r
1796 AL_ENTER( AL_DBG_MAD_SVC );
\r
1798 if( AL_OBJ_INVALID_HANDLE( h_mad_svc, AL_OBJ_TYPE_H_MAD_SVC ) )
\r
1800 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_HANDLE\n") );
\r
1801 return IB_INVALID_HANDLE;
\r
1803 if( !p_mad_element )
\r
1805 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") );
\r
1806 return IB_INVALID_PARAMETER;
\r
1810 /* This is a send from user mode using special QP alias */
\r
1811 status = spl_qp_cancel_mad( h_mad_svc, p_mad_element );
\r
1812 AL_EXIT( AL_DBG_MAD_SVC );
\r
1815 /* Search for the MAD in our MAD list. It may have already completed. */
\r
1816 cl_spinlock_acquire( &h_mad_svc->obj.lock );
\r
1817 p_list_item = cl_qlist_find_from_head( &h_mad_svc->send_list,
\r
1818 __mad_svc_find_send, p_mad_element );
\r
1820 if( p_list_item == cl_qlist_end( &h_mad_svc->send_list ) )
\r
1822 cl_spinlock_release( &h_mad_svc->obj.lock );
\r
1823 AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_MAD_SVC, ("mad not found\n") );
\r
1824 return IB_NOT_FOUND;
\r
1827 /* Mark the MAD as having been canceled. */
\r
1828 h_send = PARENT_STRUCT( p_list_item, al_mad_send_t, pool_item );
\r
1829 h_send->canceled = TRUE;
\r
1831 /* If the MAD is active, process it in the send callback. */
\r
1832 if( h_send->retry_time != MAX_TIME )
\r
1834 /* Process the canceled MAD using the timer thread. */
\r
1835 cl_timer_trim( &h_mad_svc->send_timer, 0 );
\r
1838 cl_spinlock_release( &h_mad_svc->obj.lock );
\r
1839 AL_EXIT( AL_DBG_MAD_SVC );
\r
1840 return IB_SUCCESS;
\r
1847 IN const ib_mad_svc_handle_t h_mad_svc,
\r
1848 IN ib_mad_element_t* const p_mad_element,
\r
1849 IN const uint32_t delay_ms )
\r
1852 cl_list_item_t *p_list_item;
\r
1853 ib_mad_send_handle_t h_send;
\r
1856 AL_ENTER( AL_DBG_MAD_SVC );
\r
1858 if( AL_OBJ_INVALID_HANDLE( h_mad_svc, AL_OBJ_TYPE_H_MAD_SVC ) )
\r
1860 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_HANDLE\n") );
\r
1861 return IB_INVALID_HANDLE;
\r
1863 if( !p_mad_element )
\r
1865 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") );
\r
1866 return IB_INVALID_PARAMETER;
\r
1870 UNUSED_PARAM( p_mad_element );
\r
1871 UNUSED_PARAM( delay_ms );
\r
1872 /* TODO: support for user-mode MAD QP's. */
\r
1873 AL_EXIT( AL_DBG_MAD_SVC );
\r
1874 return IB_UNSUPPORTED;
\r
1876 /* Search for the MAD in our MAD list. It may have already completed. */
\r
1877 cl_spinlock_acquire( &h_mad_svc->obj.lock );
\r
1878 p_list_item = cl_qlist_find_from_head( &h_mad_svc->send_list,
\r
1879 __mad_svc_find_send, p_mad_element );
\r
1881 if( p_list_item == cl_qlist_end( &h_mad_svc->send_list ) )
\r
1883 cl_spinlock_release( &h_mad_svc->obj.lock );
\r
1884 AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_MAD_SVC, ("MAD not found\n") );
\r
1885 return IB_NOT_FOUND;
\r
1888 /* Mark the MAD as having been canceled. */
\r
1889 h_send = PARENT_STRUCT( p_list_item, al_mad_send_t, pool_item );
\r
1891 if( h_send->retry_time == MAX_TIME )
\r
1892 h_send->delay = delay_ms;
\r
1894 h_send->retry_time += ((uint64_t)delay_ms * 1000Ui64);
\r
1896 cl_spinlock_release( &h_mad_svc->obj.lock );
\r
1897 AL_EXIT( AL_DBG_MAD_SVC );
\r
1898 return IB_SUCCESS;
\r
1904 * Process a send completion.
\r
1907 __mad_svc_send_done(
\r
1908 IN ib_mad_svc_handle_t h_mad_svc,
\r
1909 IN al_mad_wr_t *p_mad_wr,
\r
1910 IN ib_wc_t *p_wc )
\r
1912 ib_mad_send_handle_t h_send;
\r
1914 AL_ENTER( AL_DBG_MAD_SVC );
\r
1915 CL_ASSERT( h_mad_svc && p_mad_wr && !p_wc->p_next );
\r
1917 h_send = PARENT_STRUCT( p_mad_wr, al_mad_send_t, mad_wr );
\r
1918 AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_MAD_SVC, ("send callback TID:0x%I64x\n",
\r
1919 __get_send_tid( h_send )) );
\r
1921 /* We need to synchronize access to the list as well as the MAD request. */
\r
1922 cl_spinlock_acquire( &h_mad_svc->obj.lock );
\r
1924 /* Complete internally sent MADs. */
\r
1925 if( __is_internal_send( h_mad_svc->svc_type, h_send->p_send_mad ) )
\r
1927 AL_PRINT_EXIT( TRACE_LEVEL_WARNING, AL_DBG_MAD_SVC, ("internal send\n") );
\r
1928 cl_qlist_remove_item( &h_mad_svc->send_list,
\r
1929 (cl_list_item_t*)&h_send->pool_item );
\r
1930 cl_spinlock_release( &h_mad_svc->obj.lock );
\r
1931 ib_put_mad( h_send->p_send_mad );
\r
1932 __cleanup_mad_send( h_mad_svc, h_send );
\r
1936 /* See if the send request has completed. */
\r
1937 if( __is_send_mad_done( h_send, p_wc ) )
\r
1939 /* The send has completed. */
\r
1940 cl_qlist_remove_item( &h_mad_svc->send_list,
\r
1941 (cl_list_item_t*)&h_send->pool_item );
\r
1942 cl_spinlock_release( &h_mad_svc->obj.lock );
\r
1944 /* Report the send as canceled only if we don't have the response. */
\r
1945 if( h_send->canceled && !h_send->p_resp_mad )
\r
1946 __notify_send_comp( h_mad_svc, h_send, IB_WCS_CANCELED );
\r
1948 __notify_send_comp( h_mad_svc, h_send, p_wc->status );
\r
1952 /* See if this is an RMPP MAD, and we should send more segments. */
\r
1953 if( h_send->uses_rmpp && (h_send->cur_seg <= h_send->seg_limit) )
\r
1955 /* Send the next segment. */
\r
1956 AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_MAD_SVC,
\r
1957 ("sending next RMPP segment for TID:0x%I64x\n",
\r
1958 __get_send_tid( h_send )) );
\r
1960 __queue_rmpp_seg( h_mad_svc->h_mad_reg, h_send );
\r
1964 /* Continue waiting for a response or ACK. */
\r
1965 AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_MAD_SVC,
\r
1966 ("waiting for response for TID:0x%I64x\n",
\r
1967 __get_send_tid( h_send )) );
\r
1969 __set_retry_time( h_send );
\r
1970 cl_timer_trim( &h_mad_svc->send_timer,
\r
1971 h_send->p_send_mad->timeout_ms );
\r
1973 cl_spinlock_release( &h_mad_svc->obj.lock );
\r
1977 * Resume any sends that can now be sent without holding
\r
1978 * the mad service lock.
\r
1980 __mad_disp_resume_send( h_mad_svc->h_mad_reg );
\r
1982 AL_EXIT( AL_DBG_MAD_SVC );
\r
1988 * Notify the user of a completed send operation.
\r
1991 __notify_send_comp(
\r
1992 IN ib_mad_svc_handle_t h_mad_svc,
\r
1993 IN ib_mad_send_handle_t h_send,
\r
1994 IN ib_wc_status_t wc_status )
\r
1996 AL_ENTER( AL_DBG_MAD_SVC );
\r
1998 AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_MAD_SVC, ("completing TID:0x%I64x\n",
\r
1999 __get_send_tid( h_send )) );
\r
2001 h_send->p_send_mad->status = wc_status;
\r
2003 /* Notify the user of a received response, if one exists. */
\r
2004 if( h_send->p_resp_mad )
\r
2006 h_mad_svc->pfn_user_recv_cb( h_mad_svc, (void*)h_mad_svc->obj.context,
\r
2007 h_send->p_resp_mad );
\r
2010 /* The transaction has completed, return the send MADs. */
\r
2011 h_mad_svc->pfn_user_send_cb( h_mad_svc, (void*)h_mad_svc->obj.context,
\r
2012 h_send->p_send_mad );
\r
2014 __cleanup_mad_send( h_mad_svc, h_send );
\r
2016 AL_EXIT( AL_DBG_MAD_SVC );
\r
2022 * Return a send MAD tracking structure to its pool and cleanup any resources
\r
2023 * it may have allocated.
\r
2026 __cleanup_mad_send(
\r
2027 IN ib_mad_svc_handle_t h_mad_svc,
\r
2028 IN ib_mad_send_handle_t h_send )
\r
2030 /* Release any address vectors that we may have created. */
\r
2031 if( h_send->h_av )
\r
2033 ib_destroy_av( h_send->h_av );
\r
2036 /* Return the send MAD tracking structure to its pool. */
\r
2037 put_mad_send( h_send );
\r
2039 /* We no longer need to reference the MAD service. */
\r
2040 deref_al_obj( &h_mad_svc->obj );
\r
2046 __is_send_mad_done(
\r
2047 IN ib_mad_send_handle_t h_send,
\r
2048 IN ib_wc_t *p_wc )
\r
2050 AL_ENTER( AL_DBG_MAD_SVC );
\r
2052 /* Complete the send if the request failed. */
\r
2053 if( p_wc->status != IB_WCS_SUCCESS )
\r
2055 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("y-send failed\n" ) );
\r
2059 /* Complete the send if it has been canceled. */
\r
2060 if( h_send->canceled )
\r
2062 AL_PRINT_EXIT( TRACE_LEVEL_INFORMATION, AL_DBG_MAD_SVC,
\r
2063 ("y-send was canceled\n") );
\r
2067 /* Complete the send if we have its response. */
\r
2068 if( h_send->p_resp_mad )
\r
2070 AL_PRINT_EXIT( TRACE_LEVEL_INFORMATION, AL_DBG_MAD_SVC,
\r
2071 ("y-response received\n") );
\r
2075 /* RMPP sends cannot complete until all segments have been acked. */
\r
2076 if( h_send->uses_rmpp && (h_send->ack_seg < h_send->total_seg) )
\r
2078 AL_PRINT_EXIT( TRACE_LEVEL_INFORMATION, AL_DBG_MAD_SVC,
\r
2079 ("more RMPP segments to send\n") );
\r
2084 * All segments of this send have been sent.
\r
2085 * The send has completed if we are not waiting for a response.
\r
2087 if( h_send->p_send_mad->resp_expected )
\r
2089 AL_PRINT_EXIT( TRACE_LEVEL_INFORMATION, AL_DBG_MAD_SVC,
\r
2090 ("no-waiting on response\n") );
\r
2095 AL_PRINT_EXIT( TRACE_LEVEL_INFORMATION, AL_DBG_MAD_SVC,
\r
2096 ("send completed\n") );
\r
2104 * Try to find a send that matches the received response. This call must
\r
2105 * be synchronized with access to the MAD service send_list.
\r
2107 static ib_mad_send_handle_t
\r
2108 __mad_svc_match_recv(
\r
2109 IN const ib_mad_svc_handle_t h_mad_svc,
\r
2110 IN ib_mad_element_t* const p_recv_mad )
\r
2112 ib_mad_t *p_recv_hdr;
\r
2113 cl_list_item_t *p_list_item;
\r
2114 ib_mad_send_handle_t h_send;
\r
2116 AL_ENTER( AL_DBG_MAD_SVC );
\r
2118 p_recv_hdr = p_recv_mad->p_mad_buf;
\r
2120 /* Search the send list for a matching request. */
\r
2121 for( p_list_item = cl_qlist_head( &h_mad_svc->send_list );
\r
2122 p_list_item != cl_qlist_end( &h_mad_svc->send_list );
\r
2123 p_list_item = cl_qlist_next( p_list_item ) )
\r
2125 h_send = PARENT_STRUCT( p_list_item, al_mad_send_t, pool_item );
\r
2127 /* Match on the transaction ID, ignoring internally generated sends. */
\r
2128 AL_EXIT( AL_DBG_MAD_SVC );
\r
2129 if( (p_recv_hdr->trans_id == h_send->mad_wr.client_tid) &&
\r
2130 !__is_internal_send( h_mad_svc->svc_type, h_send->p_send_mad ) )
\r
2142 __mad_svc_recv_done(
\r
2143 IN ib_mad_svc_handle_t h_mad_svc,
\r
2144 IN ib_mad_element_t *p_mad_element )
\r
2146 ib_mad_t *p_mad_hdr;
\r
2147 ib_api_status_t cl_status;
\r
2149 AL_ENTER( AL_DBG_MAD_SVC );
\r
2151 p_mad_hdr = ib_get_mad_buf( p_mad_element );
\r
2152 AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_MAD_SVC, ("recv done TID:0x%I64x\n",
\r
2153 p_mad_hdr->trans_id) );
\r
2155 /* Raw MAD services get all receives. */
\r
2156 if( h_mad_svc->svc_type == IB_MAD_SVC_RAW )
\r
2158 /* Report the receive. */
\r
2159 AL_PRINT_EXIT( TRACE_LEVEL_WARNING, AL_DBG_MAD_SVC,
\r
2160 ("recv TID:0x%I64x\n", p_mad_hdr->trans_id) );
\r
2161 h_mad_svc->pfn_user_recv_cb( h_mad_svc, (void*)h_mad_svc->obj.context,
\r
2166 /* Fully reassemble received MADs before completing them. */
\r
2167 if( __recv_requires_rmpp( h_mad_svc->svc_type, p_mad_element ) )
\r
2169 /* Reassembling the receive. */
\r
2170 cl_status = __do_rmpp_recv( h_mad_svc, &p_mad_element );
\r
2171 if( cl_status != CL_SUCCESS )
\r
2173 /* The reassembly is not done. */
\r
2174 AL_PRINT_EXIT( TRACE_LEVEL_WARNING, AL_DBG_MAD_SVC,
\r
2175 ("no RMPP receive to report\n") );
\r
2180 * Get the header to the MAD element to report to the user. This
\r
2181 * will be a MAD element received earlier.
\r
2183 p_mad_hdr = ib_get_mad_buf( p_mad_element );
\r
2187 * If the response indicates that the responder was busy, continue
\r
2188 * retrying the request.
\r
2190 if( p_mad_hdr->status & IB_MAD_STATUS_BUSY )
\r
2192 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
2193 ("responder busy TID:0x%I64x\n", p_mad_hdr->trans_id) );
\r
2194 ib_put_mad( p_mad_element );
\r
2199 * See if the MAD was sent in response to a previously sent MAD. Note
\r
2200 * that trap repress messages are responses, even though the response
\r
2203 if( ib_mad_is_response( p_mad_hdr ) ||
\r
2204 (p_mad_hdr->method == IB_MAD_METHOD_TRAP_REPRESS) )
\r
2206 /* Process the received response. */
\r
2207 __process_recv_resp( h_mad_svc, p_mad_element );
\r
2211 /* Report the receive. */
\r
2212 AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_MAD_SVC, ("unsol recv TID:0x%I64x\n",
\r
2213 p_mad_hdr->trans_id) );
\r
2214 h_mad_svc->pfn_user_recv_cb( h_mad_svc, (void*)h_mad_svc->obj.context,
\r
2217 AL_EXIT( AL_DBG_MAD_SVC );
\r
2223 * A MAD was received in response to a send. Find the corresponding send
\r
2224 * and process the receive completion.
\r
2227 __process_recv_resp(
\r
2228 IN ib_mad_svc_handle_t h_mad_svc,
\r
2229 IN ib_mad_element_t *p_mad_element )
\r
2231 ib_mad_t *p_mad_hdr;
\r
2232 ib_mad_send_handle_t h_send;
\r
2235 * Try to find the send. The send may have already timed out or
\r
2236 * have been canceled, so we need to search for it.
\r
2238 AL_ENTER( AL_DBG_MAD_SVC );
\r
2239 p_mad_hdr = ib_get_mad_buf( p_mad_element );
\r
2240 cl_spinlock_acquire( &h_mad_svc->obj.lock );
\r
2242 h_send = __mad_svc_match_recv( h_mad_svc, p_mad_element );
\r
2245 /* A matching send was not found. */
\r
2246 AL_PRINT_EXIT( TRACE_LEVEL_WARNING, AL_DBG_MAD_SVC,
\r
2247 ("unmatched resp TID:0x%I64x\n", p_mad_hdr->trans_id) );
\r
2248 cl_spinlock_release( &h_mad_svc->obj.lock );
\r
2249 ib_put_mad( p_mad_element );
\r
2253 /* We've found the matching send. */
\r
2254 h_send->p_send_mad->status = IB_WCS_SUCCESS;
\r
2256 /* Record the send contexts with the receive. */
\r
2257 p_mad_element->send_context1 = (void* __ptr64)h_send->p_send_mad->context1;
\r
2258 p_mad_element->send_context2 = (void* __ptr64)h_send->p_send_mad->context2;
\r
2260 if( h_send->retry_time == MAX_TIME )
\r
2262 /* The send is currently active. Do not report it. */
\r
2263 AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_MAD_SVC,
\r
2264 ("resp send active TID:0x%I64x\n", p_mad_hdr->trans_id) );
\r
2265 /* Handle a duplicate receive happening before the send completion is processed. */
\r
2266 if( h_send->p_resp_mad )
\r
2267 ib_put_mad( h_send->p_resp_mad );
\r
2268 h_send->p_resp_mad = p_mad_element;
\r
2269 cl_spinlock_release( &h_mad_svc->obj.lock );
\r
2273 AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_MAD_SVC,
\r
2274 ("resp received TID:0x%I64x\n", p_mad_hdr->trans_id) );
\r
2276 /* Report the send completion below. */
\r
2277 cl_qlist_remove_item( &h_mad_svc->send_list,
\r
2278 (cl_list_item_t*)&h_send->pool_item );
\r
2279 cl_spinlock_release( &h_mad_svc->obj.lock );
\r
2281 /* Report the receive. */
\r
2282 h_mad_svc->pfn_user_recv_cb( h_mad_svc, (void*)h_mad_svc->obj.context,
\r
2285 /* Report the send completion. */
\r
2286 h_mad_svc->pfn_user_send_cb( h_mad_svc, (void*)h_mad_svc->obj.context,
\r
2287 h_send->p_send_mad );
\r
2288 __cleanup_mad_send( h_mad_svc, h_send );
\r
2290 AL_EXIT( AL_DBG_MAD_SVC );
\r
2296 * Return TRUE if a received MAD requires RMPP processing.
\r
2298 static __inline boolean_t
\r
2299 __recv_requires_rmpp(
\r
2300 IN const ib_mad_svc_type_t mad_svc_type,
\r
2301 IN const ib_mad_element_t* const p_mad_element )
\r
2303 ib_rmpp_mad_t *p_rmpp_mad;
\r
2305 AL_ENTER( AL_DBG_MAD_SVC );
\r
2307 p_rmpp_mad = (ib_rmpp_mad_t*)ib_get_mad_buf( p_mad_element );
\r
2309 AL_EXIT( AL_DBG_MAD_SVC );
\r
2311 switch( mad_svc_type )
\r
2313 case IB_MAD_SVC_DEFAULT:
\r
2314 /* Only subnet management receives require RMPP. */
\r
2315 return( (p_rmpp_mad->common_hdr.mgmt_class == IB_MCLASS_SUBN_ADM) &&
\r
2316 ib_rmpp_is_flag_set( p_rmpp_mad, IB_RMPP_FLAG_ACTIVE ) );
\r
2318 case IB_MAD_SVC_RMPP:
\r
2319 return( ib_rmpp_is_flag_set( p_rmpp_mad, IB_RMPP_FLAG_ACTIVE ) );
\r
2329 * Return TRUE if the MAD was issued by AL itself.
\r
2331 static __inline boolean_t
\r
2332 __is_internal_send(
\r
2333 IN const ib_mad_svc_type_t mad_svc_type,
\r
2334 IN const ib_mad_element_t* const p_mad_element )
\r
2336 ib_rmpp_mad_t *p_rmpp_mad;
\r
2338 p_rmpp_mad = (ib_rmpp_mad_t*)ib_get_mad_buf( p_mad_element );
\r
2340 /* See if the MAD service issues internal MADs. */
\r
2341 switch( mad_svc_type )
\r
2343 case IB_MAD_SVC_DEFAULT:
\r
2344 /* Internal sends are non-RMPP data MADs. */
\r
2345 return( (p_rmpp_mad->common_hdr.mgmt_class == IB_MCLASS_SUBN_ADM) &&
\r
2346 (p_rmpp_mad->rmpp_type &&
\r
2347 (p_rmpp_mad->rmpp_type != IB_RMPP_TYPE_DATA) ) );
\r
2349 case IB_MAD_SVC_RMPP:
\r
2350 /* The RMPP header is present. Check its type. */
\r
2351 return( (p_rmpp_mad->rmpp_type) &&
\r
2352 (p_rmpp_mad->rmpp_type != IB_RMPP_TYPE_DATA) );
\r
2362 * Fully reassemble a received MAD. Return TRUE once all segments of the
\r
2363 * MAD have been received. Return the fully reassembled MAD.
\r
2365 static cl_status_t
\r
2367 IN ib_mad_svc_handle_t h_mad_svc,
\r
2368 IN OUT ib_mad_element_t **pp_mad_element )
\r
2370 ib_rmpp_mad_t *p_rmpp_mad;
\r
2371 cl_status_t cl_status;
\r
2373 AL_ENTER( AL_DBG_MAD_SVC );
\r
2375 p_rmpp_mad = ib_get_mad_buf( *pp_mad_element );
\r
2376 CL_ASSERT( ib_rmpp_is_flag_set( p_rmpp_mad, IB_RMPP_FLAG_ACTIVE ) );
\r
2378 /* Perform the correct operation base on the RMPP MAD type. */
\r
2379 switch( p_rmpp_mad->rmpp_type )
\r
2381 case IB_RMPP_TYPE_DATA:
\r
2382 cl_status = __process_rmpp_data( h_mad_svc, pp_mad_element );
\r
2383 /* Return the received element back to its MAD pool if not needed. */
\r
2384 if( (cl_status != CL_SUCCESS) && (cl_status != CL_NOT_DONE) )
\r
2386 ib_put_mad( *pp_mad_element );
\r
2390 case IB_RMPP_TYPE_ACK:
\r
2391 /* Process the ACK. */
\r
2392 __process_rmpp_ack( h_mad_svc, *pp_mad_element );
\r
2393 ib_put_mad( *pp_mad_element );
\r
2394 cl_status = CL_COMPLETED;
\r
2397 case IB_RMPP_TYPE_STOP:
\r
2398 case IB_RMPP_TYPE_ABORT:
\r
2400 /* Process the ABORT or STOP. */
\r
2401 __process_rmpp_nack( h_mad_svc, *pp_mad_element );
\r
2402 ib_put_mad( *pp_mad_element );
\r
2403 cl_status = CL_REJECT;
\r
2407 AL_EXIT( AL_DBG_MAD_SVC );
\r
2414 * Process an RMPP DATA message. Reassemble the received data. If the
\r
2415 * received MAD is fully reassembled, this call returns CL_SUCCESS.
\r
2417 static cl_status_t
\r
2418 __process_rmpp_data(
\r
2419 IN ib_mad_svc_handle_t h_mad_svc,
\r
2420 IN OUT ib_mad_element_t **pp_mad_element )
\r
2422 ib_mad_element_t *p_rmpp_resp_mad = NULL;
\r
2423 al_mad_rmpp_t *p_rmpp;
\r
2424 ib_rmpp_mad_t *p_rmpp_hdr;
\r
2426 cl_status_t cl_status;
\r
2427 ib_api_status_t status;
\r
2429 p_rmpp_hdr = ib_get_mad_buf( *pp_mad_element );
\r
2430 CL_ASSERT( p_rmpp_hdr->rmpp_type == IB_RMPP_TYPE_DATA );
\r
2432 /* Try to find a receive already being reassembled. */
\r
2433 cl_spinlock_acquire( &h_mad_svc->obj.lock );
\r
2434 p_rmpp = __find_rmpp( h_mad_svc, *pp_mad_element );
\r
2437 /* This receive is not being reassembled. It should be the first seg. */
\r
2438 if( cl_ntoh32( p_rmpp_hdr->seg_num ) != 1 )
\r
2440 cl_spinlock_release( &h_mad_svc->obj.lock );
\r
2441 return CL_NOT_FOUND;
\r
2444 /* Start tracking the new reassembly. */
\r
2445 p_rmpp = __get_mad_rmpp( h_mad_svc, *pp_mad_element );
\r
2448 cl_spinlock_release( &h_mad_svc->obj.lock );
\r
2449 return CL_INSUFFICIENT_MEMORY;
\r
2453 /* Verify that we just received the expected segment. */
\r
2454 cur_seg = cl_ntoh32( p_rmpp_hdr->seg_num );
\r
2455 if( cur_seg == p_rmpp->expected_seg )
\r
2457 /* Copy the new segment's data into our reassembly buffer. */
\r
2458 cl_status = __process_segment( h_mad_svc, p_rmpp,
\r
2459 pp_mad_element, &p_rmpp_resp_mad );
\r
2461 /* See if the RMPP is done. */
\r
2462 if( cl_status == CL_SUCCESS )
\r
2464 /* Stop tracking the reassembly. */
\r
2465 __put_mad_rmpp( h_mad_svc, p_rmpp );
\r
2467 else if( cl_status == CL_NOT_DONE )
\r
2469 /* Start the reassembly timer. */
\r
2470 cl_timer_trim( &h_mad_svc->recv_timer, AL_REASSEMBLY_TIMEOUT );
\r
2473 else if( cur_seg < p_rmpp->expected_seg )
\r
2475 /* We received an old segment. Resend the last ACK. */
\r
2476 p_rmpp_resp_mad = __get_rmpp_ack( p_rmpp );
\r
2477 cl_status = CL_DUPLICATE;
\r
2481 /* The sender is confused, ignore this MAD. We could ABORT here. */
\r
2482 cl_status = CL_OVERRUN;
\r
2485 cl_spinlock_release( &h_mad_svc->obj.lock );
\r
2488 * Send any response MAD (ACK, ABORT, etc.) to the sender. Note that
\r
2489 * we are currently in the callback from the MAD dispatcher. The
\r
2490 * dispatcher holds a reference on the MAD service while in the callback,
\r
2491 * preventing the MAD service from being destroyed. This allows the
\r
2492 * call to ib_send_mad() to proceed even if the user tries to destroy
\r
2493 * the MAD service.
\r
2495 if( p_rmpp_resp_mad )
\r
2497 status = ib_send_mad( h_mad_svc, p_rmpp_resp_mad, NULL );
\r
2498 if( status != IB_SUCCESS )
\r
2500 /* Return the MAD. The MAD is considered dropped. */
\r
2501 ib_put_mad( p_rmpp_resp_mad );
\r
2511 * Locate an existing RMPP MAD being reassembled. Return NULL if one is not
\r
2512 * found. This call assumes access to the recv_list is synchronized.
\r
2514 static al_mad_rmpp_t*
\r
2516 IN ib_mad_svc_handle_t h_mad_svc,
\r
2517 IN OUT ib_mad_element_t *p_mad_element )
\r
2519 al_mad_rmpp_t *p_rmpp;
\r
2520 cl_list_item_t *p_list_item;
\r
2521 ib_mad_t *p_mad_hdr, *p_mad_hdr2;
\r
2522 ib_mad_element_t *p_mad_element2;
\r
2525 p_mad_hdr = ib_get_mad_buf( p_mad_element );
\r
2527 /* Search all MADs being reassembled. */
\r
2528 for( p_list_item = cl_qlist_head( &h_mad_svc->recv_list );
\r
2529 p_list_item != cl_qlist_end( &h_mad_svc->recv_list );
\r
2530 p_list_item = cl_qlist_next( p_list_item ) )
\r
2532 p_rmpp = PARENT_STRUCT( p_list_item, al_mad_rmpp_t, pool_item );
\r
2534 p_mad_element2 = p_rmpp->p_mad_element;
\r
2535 p_mad_hdr2 = ib_get_mad_buf( p_mad_element2 );
\r
2537 /* See if the incoming MAD matches - what a check. */
\r
2538 if( (p_mad_hdr->trans_id == p_mad_hdr2->trans_id) &&
\r
2539 (p_mad_hdr->class_ver == p_mad_hdr2->class_ver) &&
\r
2540 (p_mad_hdr->mgmt_class == p_mad_hdr2->mgmt_class) &&
\r
2541 (p_mad_hdr->method == p_mad_hdr2->method) &&
\r
2542 (p_mad_element->remote_lid == p_mad_element2->remote_lid) &&
\r
2543 (p_mad_element->remote_qp == p_mad_element2->remote_qp) )
\r
2555 * Acquire a new RMPP tracking structure. This call assumes access to
\r
2556 * the recv_list is synchronized.
\r
2558 static al_mad_rmpp_t*
\r
2560 IN ib_mad_svc_handle_t h_mad_svc,
\r
2561 IN ib_mad_element_t *p_mad_element )
\r
2563 al_mad_rmpp_t *p_rmpp;
\r
2564 al_mad_element_t *p_al_element;
\r
2566 p_al_element = PARENT_STRUCT( p_mad_element, al_mad_element_t, element );
\r
2568 /* Get an RMPP tracking structure. */
\r
2569 p_rmpp = get_mad_rmpp( p_al_element );
\r
2573 /* Initialize the tracking information. */
\r
2574 p_rmpp->expected_seg = 1;
\r
2575 p_rmpp->seg_limit = 1;
\r
2576 p_rmpp->inactive = FALSE;
\r
2577 p_rmpp->p_mad_element = p_mad_element;
\r
2579 /* Insert the tracking structure into the reassembly list. */
\r
2580 cl_qlist_insert_tail( &h_mad_svc->recv_list,
\r
2581 (cl_list_item_t*)&p_rmpp->pool_item );
\r
2589 * Return the RMPP tracking structure. This call assumes access to
\r
2590 * the recv_list is synchronized.
\r
2594 IN ib_mad_svc_handle_t h_mad_svc,
\r
2595 IN al_mad_rmpp_t *p_rmpp )
\r
2597 /* Remove the tracking structure from the reassembly list. */
\r
2598 cl_qlist_remove_item( &h_mad_svc->recv_list,
\r
2599 (cl_list_item_t*)&p_rmpp->pool_item );
\r
2601 /* Return the RMPP tracking structure. */
\r
2602 put_mad_rmpp( p_rmpp );
\r
2608 * Process a received RMPP segment. Copy the data into our receive buffer,
\r
2609 * update the expected segment, and send an ACK if needed.
\r
2611 static cl_status_t
\r
2612 __process_segment(
\r
2613 IN ib_mad_svc_handle_t h_mad_svc,
\r
2614 IN al_mad_rmpp_t *p_rmpp,
\r
2615 IN OUT ib_mad_element_t **pp_mad_element,
\r
2616 OUT ib_mad_element_t **pp_rmpp_resp_mad )
\r
2618 ib_rmpp_mad_t *p_rmpp_hdr;
\r
2620 ib_api_status_t status;
\r
2621 cl_status_t cl_status;
\r
2622 uint8_t *p_dst_seg, *p_src_seg;
\r
2625 CL_ASSERT( h_mad_svc && p_rmpp && pp_mad_element && *pp_mad_element );
\r
2627 p_rmpp_hdr = (ib_rmpp_mad_t*)(*pp_mad_element)->p_mad_buf;
\r
2628 cur_seg = cl_ntoh32( p_rmpp_hdr->seg_num );
\r
2629 CL_ASSERT( cur_seg == p_rmpp->expected_seg );
\r
2630 CL_ASSERT( cur_seg <= p_rmpp->seg_limit );
\r
2632 /* See if the receive has been fully reassembled. */
\r
2633 if( ib_rmpp_is_flag_set( p_rmpp_hdr, IB_RMPP_FLAG_LAST ) )
\r
2634 cl_status = CL_SUCCESS;
\r
2636 cl_status = CL_NOT_DONE;
\r
2638 /* Save the payload length for later use. */
\r
2639 paylen = cl_ntoh32(p_rmpp_hdr->paylen_newwin);
\r
2641 /* The element of the first segment starts the reasembly. */
\r
2642 if( *pp_mad_element != p_rmpp->p_mad_element )
\r
2644 /* SA MADs require extra header size ... */
\r
2645 if( (*pp_mad_element)->p_mad_buf->mgmt_class == IB_MCLASS_SUBN_ADM )
\r
2647 /* Copy the received data into our reassembly buffer. */
\r
2648 p_src_seg = ((uint8_t* __ptr64)(*pp_mad_element)->p_mad_buf) +
\r
2649 IB_SA_MAD_HDR_SIZE;
\r
2650 p_dst_seg = ((uint8_t* __ptr64)p_rmpp->p_mad_element->p_mad_buf) +
\r
2651 IB_SA_MAD_HDR_SIZE + IB_SA_DATA_SIZE * (cur_seg - 1);
\r
2652 cl_memcpy( p_dst_seg, p_src_seg, IB_SA_DATA_SIZE );
\r
2656 /* Copy the received data into our reassembly buffer. */
\r
2657 p_src_seg = ((uint8_t* __ptr64)(*pp_mad_element)->p_mad_buf) +
\r
2658 MAD_RMPP_HDR_SIZE;
\r
2659 p_dst_seg = ((uint8_t* __ptr64)p_rmpp->p_mad_element->p_mad_buf) +
\r
2660 MAD_RMPP_HDR_SIZE + MAD_RMPP_DATA_SIZE * (cur_seg - 1);
\r
2661 cl_memcpy( p_dst_seg, p_src_seg, MAD_RMPP_DATA_SIZE );
\r
2663 /* This MAD is no longer needed. */
\r
2664 ib_put_mad( *pp_mad_element );
\r
2667 /* Update the size of the mad if the last segment */
\r
2668 if ( cl_status == CL_SUCCESS )
\r
2670 if (p_rmpp->p_mad_element->p_mad_buf->mgmt_class == IB_MCLASS_SUBN_ADM )
\r
2673 * Note we will get one extra SA Hdr size in the paylen,
\r
2674 * so we only take the rmpp header size of the first segment.
\r
2676 p_rmpp->p_mad_element->size =
\r
2677 MAD_RMPP_HDR_SIZE + IB_SA_DATA_SIZE *(cur_seg - 1) + paylen;
\r
2681 p_rmpp->p_mad_element->size =
\r
2682 MAD_RMPP_HDR_SIZE + MAD_RMPP_DATA_SIZE * (cur_seg - 1) + paylen;
\r
2687 * We are ready to accept the next segment. We increment expected segment
\r
2688 * even if we're done, so that ACKs correctly report the last segment.
\r
2690 p_rmpp->expected_seg++;
\r
2692 /* Mark the RMPP as active if we're not destroying the MAD service. */
\r
2693 p_rmpp->inactive = (h_mad_svc->obj.state == CL_DESTROYING);
\r
2695 /* See if the receive has been fully reassembled. */
\r
2696 if( cl_status == CL_NOT_DONE && cur_seg == p_rmpp->seg_limit )
\r
2698 /* Allocate more segments for the incoming receive. */
\r
2699 status = al_resize_mad( p_rmpp->p_mad_element,
\r
2700 p_rmpp->p_mad_element->size + AL_RMPP_WINDOW * MAD_RMPP_DATA_SIZE );
\r
2702 /* If we couldn't allocate a new buffer, just drop the MAD. */
\r
2703 if( status == IB_SUCCESS )
\r
2705 /* Send an ACK indicating that more space is available. */
\r
2706 p_rmpp->seg_limit += AL_RMPP_WINDOW;
\r
2707 *pp_rmpp_resp_mad = __get_rmpp_ack( p_rmpp );
\r
2710 else if( cl_status == CL_SUCCESS )
\r
2712 /* Return the element referencing the reassembled MAD. */
\r
2713 *pp_mad_element = p_rmpp->p_mad_element;
\r
2714 *pp_rmpp_resp_mad = __get_rmpp_ack( p_rmpp );
\r
2723 * Get an ACK message to return to the sender of an RMPP MAD.
\r
2725 static ib_mad_element_t*
\r
2727 IN al_mad_rmpp_t *p_rmpp )
\r
2729 ib_mad_element_t *p_mad_element;
\r
2730 al_mad_element_t *p_al_element;
\r
2731 ib_api_status_t status;
\r
2732 ib_rmpp_mad_t *p_ack_rmpp_hdr, *p_data_rmpp_hdr;
\r
2734 /* Get a MAD to carry the ACK. */
\r
2735 p_al_element = PARENT_STRUCT( p_rmpp->p_mad_element,
\r
2736 al_mad_element_t, element );
\r
2737 status = ib_get_mad( p_al_element->pool_key, MAD_BLOCK_SIZE,
\r
2739 if( status != IB_SUCCESS )
\r
2741 /* Just return. The ACK will be treated as being dropped. */
\r
2745 /* Format the ACK. */
\r
2746 p_ack_rmpp_hdr = ib_get_mad_buf( p_mad_element );
\r
2747 p_data_rmpp_hdr = ib_get_mad_buf( p_rmpp->p_mad_element );
\r
2749 __init_reply_element( p_mad_element, p_rmpp->p_mad_element );
\r
2751 /* Copy the MAD common header. */
\r
2752 cl_memcpy( &p_ack_rmpp_hdr->common_hdr, &p_data_rmpp_hdr->common_hdr,
\r
2753 sizeof( ib_mad_t ) );
\r
2755 /* Reset the status (in case the BUSY bit is set). */
\r
2756 p_ack_rmpp_hdr->common_hdr.status = 0;
\r
2758 /* Flip the response bit in the method */
\r
2759 p_ack_rmpp_hdr->common_hdr.method ^= IB_MAD_METHOD_RESP_MASK;
\r
2761 p_ack_rmpp_hdr->rmpp_version = p_data_rmpp_hdr->rmpp_version;
\r
2762 p_ack_rmpp_hdr->rmpp_type = IB_RMPP_TYPE_ACK;
\r
2763 ib_rmpp_set_resp_time( p_ack_rmpp_hdr, IB_RMPP_NO_RESP_TIME );
\r
2764 p_ack_rmpp_hdr->rmpp_flags |= IB_RMPP_FLAG_ACTIVE;
\r
2765 p_ack_rmpp_hdr->rmpp_status = IB_RMPP_STATUS_SUCCESS;
\r
2767 p_ack_rmpp_hdr->seg_num = cl_hton32( p_rmpp->expected_seg - 1 );
\r
2769 if (p_rmpp->seg_limit == p_rmpp->expected_seg - 1 &&
\r
2770 !ib_rmpp_is_flag_set( p_data_rmpp_hdr, IB_RMPP_FLAG_LAST ) )
\r
2772 p_ack_rmpp_hdr->paylen_newwin = cl_hton32( 1 + p_rmpp->seg_limit);
\r
2776 p_ack_rmpp_hdr->paylen_newwin = cl_hton32( p_rmpp->seg_limit );
\r
2779 return p_mad_element;
\r
2785 * Copy necessary data between MAD elements to allow the destination
\r
2786 * element to be sent to the sender of the source element.
\r
2789 __init_reply_element(
\r
2790 IN ib_mad_element_t *p_dst_element,
\r
2791 IN ib_mad_element_t *p_src_element )
\r
2793 p_dst_element->remote_qp = p_src_element->remote_qp;
\r
2794 p_dst_element->remote_qkey = p_src_element->remote_qkey;
\r
2796 if( p_src_element->grh_valid )
\r
2798 p_dst_element->grh_valid = p_src_element->grh_valid;
\r
2799 cl_memcpy( p_dst_element->p_grh, p_src_element->p_grh,
\r
2800 sizeof( ib_grh_t ) );
\r
2803 p_dst_element->remote_lid = p_src_element->remote_lid;
\r
2804 p_dst_element->remote_sl = p_src_element->remote_sl;
\r
2805 p_dst_element->pkey_index = p_src_element->pkey_index;
\r
2806 p_dst_element->path_bits = p_src_element->path_bits;
\r
2812 * Process an RMPP ACK message. Continue sending addition segments.
\r
2815 __process_rmpp_ack(
\r
2816 IN ib_mad_svc_handle_t h_mad_svc,
\r
2817 IN ib_mad_element_t *p_mad_element )
\r
2819 ib_mad_send_handle_t h_send;
\r
2820 ib_rmpp_mad_t *p_rmpp_mad;
\r
2821 boolean_t send_done = FALSE;
\r
2822 ib_wc_status_t wc_status = IB_WCS_SUCCESS;
\r
2824 AL_ENTER( AL_DBG_MAD_SVC );
\r
2825 p_rmpp_mad = (ib_rmpp_mad_t*)ib_get_mad_buf( p_mad_element );
\r
2828 * Search for the send. The send may have timed out, been canceled,
\r
2829 * or received a response.
\r
2831 cl_spinlock_acquire( &h_mad_svc->obj.lock );
\r
2832 h_send = __mad_svc_match_recv( h_mad_svc, p_mad_element );
\r
2835 cl_spinlock_release( &h_mad_svc->obj.lock );
\r
2836 AL_PRINT_EXIT( TRACE_LEVEL_WARNING, AL_DBG_MAD_SVC,
\r
2837 ("ACK cannot find a matching send\n") );
\r
2841 /* Drop old ACKs. */
\r
2842 if( cl_ntoh32( p_rmpp_mad->seg_num ) < h_send->ack_seg )
\r
2844 cl_spinlock_release( &h_mad_svc->obj.lock );
\r
2845 AL_PRINT_EXIT( TRACE_LEVEL_WARNING, AL_DBG_MAD_SVC,
\r
2846 ("old ACK - being dropped\n") );
\r
2850 /* Update the acknowledged segment and segment limit. */
\r
2851 h_send->ack_seg = cl_ntoh32( p_rmpp_mad->seg_num );
\r
2853 /* Keep seg_limit <= total_seg to simplify checks. */
\r
2854 if( cl_ntoh32( p_rmpp_mad->paylen_newwin ) > h_send->total_seg )
\r
2855 h_send->seg_limit = h_send->total_seg;
\r
2857 h_send->seg_limit = cl_ntoh32( p_rmpp_mad->paylen_newwin );
\r
2859 /* Reset the current segment to start resending from the ACK. */
\r
2860 h_send->cur_seg = h_send->ack_seg + 1;
\r
2862 /* If the send is active, we will finish processing it once it completes. */
\r
2863 if( h_send->retry_time == MAX_TIME )
\r
2865 cl_spinlock_release( &h_mad_svc->obj.lock );
\r
2866 AL_PRINT_EXIT( TRACE_LEVEL_WARNING, AL_DBG_MAD_SVC,
\r
2867 ("ACK processed, waiting for send to complete\n") );
\r
2872 * Complete the send if all segments have been ack'ed and no
\r
2873 * response is expected. (If the response for a send had already been
\r
2874 * received, we would have reported the completion regardless of the
\r
2875 * send having been ack'ed.)
\r
2877 CL_ASSERT( !h_send->p_send_mad->resp_expected || !h_send->p_resp_mad );
\r
2878 if( (h_send->ack_seg == h_send->total_seg) &&
\r
2879 !h_send->p_send_mad->resp_expected )
\r
2881 /* The send is done. All segments have been ack'ed. */
\r
2884 else if( h_send->ack_seg < h_send->seg_limit )
\r
2886 /* Send the next segment. */
\r
2887 __queue_rmpp_seg( h_mad_svc->h_mad_reg, h_send );
\r
2892 /* Notify the user of a send completion or error. */
\r
2893 cl_qlist_remove_item( &h_mad_svc->send_list,
\r
2894 (cl_list_item_t*)&h_send->pool_item );
\r
2895 cl_spinlock_release( &h_mad_svc->obj.lock );
\r
2896 __notify_send_comp( h_mad_svc, h_send, wc_status );
\r
2900 /* Continue waiting for a response or a larger send window. */
\r
2901 cl_spinlock_release( &h_mad_svc->obj.lock );
\r
2905 * Resume any sends that can now be sent without holding
\r
2906 * the mad service lock.
\r
2908 __mad_disp_resume_send( h_mad_svc->h_mad_reg );
\r
2910 AL_EXIT( AL_DBG_MAD_SVC );
\r
2916 * Process an RMPP STOP or ABORT message.
\r
2919 __process_rmpp_nack(
\r
2920 IN ib_mad_svc_handle_t h_mad_svc,
\r
2921 IN ib_mad_element_t *p_mad_element )
\r
2923 ib_mad_send_handle_t h_send;
\r
2924 ib_rmpp_mad_t *p_rmpp_mad;
\r
2926 AL_ENTER( AL_DBG_MAD_SVC );
\r
2927 p_rmpp_mad = (ib_rmpp_mad_t*)ib_get_mad_buf( p_mad_element );
\r
2929 /* Search for the send. The send may have timed out or been canceled. */
\r
2930 cl_spinlock_acquire( &h_mad_svc->obj.lock );
\r
2931 h_send = __mad_svc_match_recv( h_mad_svc, p_mad_element );
\r
2934 cl_spinlock_release( &h_mad_svc->obj.lock );
\r
2938 /* If the send is active, we will finish processing it once it completes. */
\r
2939 if( h_send->retry_time == MAX_TIME )
\r
2941 h_send->canceled = TRUE;
\r
2942 cl_spinlock_release( &h_mad_svc->obj.lock );
\r
2943 AL_EXIT( AL_DBG_MAD_SVC );
\r
2947 /* Fail the send operation. */
\r
2948 cl_qlist_remove_item( &h_mad_svc->send_list,
\r
2949 (cl_list_item_t*)&h_send->pool_item );
\r
2950 cl_spinlock_release( &h_mad_svc->obj.lock );
\r
2951 __notify_send_comp( h_mad_svc, h_send, IB_WCS_CANCELED );
\r
2953 AL_EXIT( AL_DBG_MAD_SVC );
\r
2958 static __inline void
\r
2960 IN ib_mad_send_handle_t h_send )
\r
2962 h_send->retry_time =
\r
2963 (uint64_t)(h_send->p_send_mad->timeout_ms + h_send->delay) * 1000Ui64 +
\r
2964 cl_get_time_stamp();
\r
2965 h_send->delay = 0;
\r
2972 IN void *context )
\r
2974 AL_ENTER( AL_DBG_MAD_SVC );
\r
2976 __check_send_queue( (ib_mad_svc_handle_t)context );
\r
2978 AL_EXIT( AL_DBG_MAD_SVC );
\r
2984 * Check the send queue for any sends that have timed out or were canceled
\r
2988 __check_send_queue(
\r
2989 IN ib_mad_svc_handle_t h_mad_svc )
\r
2991 ib_mad_send_handle_t h_send;
\r
2992 cl_list_item_t *p_list_item, *p_next_item;
\r
2993 uint64_t cur_time;
\r
2994 cl_qlist_t timeout_list;
\r
2996 AL_ENTER( AL_DBG_MAD_SVC );
\r
2999 * The timeout out list is used to call the user back without
\r
3000 * holding the lock on the MAD service.
\r
3002 cl_qlist_init( &timeout_list );
\r
3003 cur_time = cl_get_time_stamp();
\r
3005 cl_spinlock_acquire( &h_mad_svc->obj.lock );
\r
3007 /* Check all outstanding sends. */
\r
3008 for( p_list_item = cl_qlist_head( &h_mad_svc->send_list );
\r
3009 p_list_item != cl_qlist_end( &h_mad_svc->send_list );
\r
3010 p_list_item = p_next_item )
\r
3012 p_next_item = cl_qlist_next( p_list_item );
\r
3013 h_send = PARENT_STRUCT( p_list_item, al_mad_send_t, pool_item );
\r
3015 /* See if the request is active. */
\r
3016 if( h_send->retry_time == MAX_TIME )
\r
3018 /* The request is still active. */
\r
3019 AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_MAD_SVC, ("active TID:0x%I64x\n",
\r
3020 __get_send_tid( h_send )) );
\r
3024 /* The request is not active. */
\r
3025 /* See if the request has been canceled. */
\r
3026 if( h_send->canceled )
\r
3028 /* The request has been canceled. */
\r
3029 AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_MAD_SVC, ("canceling TID:0x%I64x\n",
\r
3030 __get_send_tid( h_send )) );
\r
3032 h_send->p_send_mad->status = IB_WCS_CANCELED;
\r
3033 cl_qlist_remove_item( &h_mad_svc->send_list, p_list_item );
\r
3034 cl_qlist_insert_tail( &timeout_list, p_list_item );
\r
3038 /* Skip requests that have not timed out. */
\r
3039 if( cur_time < h_send->retry_time )
\r
3041 /* The request has not timed out. */
\r
3042 AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_MAD_SVC, ("waiting on TID:0x%I64x\n",
\r
3043 __get_send_tid( h_send )) );
\r
3045 /* Set the retry timer to the minimum needed time, in ms. */
\r
3046 cl_timer_trim( &h_mad_svc->send_timer,
\r
3047 ((uint32_t)(h_send->retry_time - cur_time) / 1000) );
\r
3051 /* See if we need to retry the send operation. */
\r
3052 if( h_send->retry_cnt )
\r
3054 AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_MAD_SVC, ("retrying TID:0x%I64x\n",
\r
3055 __get_send_tid( h_send )) );
\r
3057 /* Retry the send. */
\r
3058 h_send->retry_time = MAX_TIME;
\r
3059 h_send->retry_cnt--;
\r
3061 if( h_send->uses_rmpp )
\r
3063 if( h_send->ack_seg < h_send->seg_limit )
\r
3065 /* Resend all unacknowledged segments. */
\r
3066 h_send->cur_seg = h_send->ack_seg + 1;
\r
3067 __queue_rmpp_seg( h_mad_svc->h_mad_reg, h_send );
\r
3071 /* The send was delivered. Continue waiting. */
\r
3072 __set_retry_time( h_send );
\r
3073 cl_timer_trim( &h_mad_svc->send_timer,
\r
3074 ((uint32_t)(h_send->retry_time - cur_time) / 1000) );
\r
3079 /* The work request should already be formatted properly. */
\r
3080 __mad_disp_queue_send( h_mad_svc->h_mad_reg,
\r
3081 &h_send->mad_wr );
\r
3085 /* The request has timed out or failed to be retried. */
\r
3086 AL_PRINT( TRACE_LEVEL_WARNING, AL_DBG_MAD_SVC,
\r
3087 ("timing out TID:0x%I64x\n", __get_send_tid( h_send )) );
\r
3089 h_send->p_send_mad->status = IB_WCS_TIMEOUT_RETRY_ERR;
\r
3090 cl_qlist_remove_item( &h_mad_svc->send_list, p_list_item );
\r
3091 cl_qlist_insert_tail( &timeout_list, p_list_item );
\r
3094 cl_spinlock_release( &h_mad_svc->obj.lock );
\r
3097 * Resume any sends that can now be sent without holding
\r
3098 * the mad service lock.
\r
3100 __mad_disp_resume_send( h_mad_svc->h_mad_reg );
\r
3102 /* Report all timed out sends to the user. */
\r
3103 p_list_item = cl_qlist_remove_head( &timeout_list );
\r
3104 while( p_list_item != cl_qlist_end( &timeout_list ) )
\r
3106 h_send = PARENT_STRUCT( p_list_item, al_mad_send_t, pool_item );
\r
3108 h_mad_svc->pfn_user_send_cb( h_mad_svc, (void*)h_mad_svc->obj.context,
\r
3109 h_send->p_send_mad );
\r
3110 __cleanup_mad_send( h_mad_svc, h_send );
\r
3111 p_list_item = cl_qlist_remove_head( &timeout_list );
\r
3113 AL_EXIT( AL_DBG_MAD_SVC );
\r
3120 IN void *context )
\r
3122 ib_mad_svc_handle_t h_mad_svc;
\r
3123 al_mad_rmpp_t *p_rmpp;
\r
3124 cl_list_item_t *p_list_item, *p_next_item;
\r
3125 boolean_t restart_timer;
\r
3127 AL_ENTER( AL_DBG_MAD_SVC );
\r
3129 h_mad_svc = (ib_mad_svc_handle_t)context;
\r
3131 cl_spinlock_acquire( &h_mad_svc->obj.lock );
\r
3133 /* Check all outstanding receives. */
\r
3134 for( p_list_item = cl_qlist_head( &h_mad_svc->recv_list );
\r
3135 p_list_item != cl_qlist_end( &h_mad_svc->recv_list );
\r
3136 p_list_item = p_next_item )
\r
3138 p_next_item = cl_qlist_next( p_list_item );
\r
3139 p_rmpp = PARENT_STRUCT( p_list_item, al_mad_rmpp_t, pool_item );
\r
3141 /* Fail all RMPP MADs that have remained inactive. */
\r
3142 if( p_rmpp->inactive )
\r
3144 ib_put_mad( p_rmpp->p_mad_element );
\r
3145 __put_mad_rmpp( h_mad_svc, p_rmpp );
\r
3149 /* Mark the RMPP as inactive. */
\r
3150 p_rmpp->inactive = TRUE;
\r
3154 restart_timer = !cl_is_qlist_empty( &h_mad_svc->recv_list );
\r
3155 cl_spinlock_release( &h_mad_svc->obj.lock );
\r
3157 if( restart_timer )
\r
3158 cl_timer_start( &h_mad_svc->recv_timer, AL_REASSEMBLY_TIMEOUT );
\r
3159 AL_EXIT( AL_DBG_MAD_SVC );
\r
3166 IN const ib_ca_handle_t h_ca,
\r
3167 IN const uint8_t port_num,
\r
3168 IN const void* const p_mad_in,
\r
3169 IN void* p_mad_out )
\r
3171 ib_api_status_t status;
\r
3173 AL_ENTER( AL_DBG_MAD_SVC );
\r
3175 if( AL_OBJ_INVALID_HANDLE( h_ca, AL_OBJ_TYPE_H_CA ) )
\r
3177 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_CA_HANDLE\n") );
\r
3178 return IB_INVALID_CA_HANDLE;
\r
3180 if( !p_mad_in || !p_mad_out )
\r
3182 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") );
\r
3183 return IB_INVALID_PARAMETER;
\r
3186 status = al_local_mad(h_ca, port_num, NULL,p_mad_in, p_mad_out);
\r
3188 AL_EXIT( AL_DBG_MAD_SVC );
\r
3194 IN const ib_ca_handle_t h_ca,
\r
3195 IN const uint8_t port_num,
\r
3196 IN const ib_av_attr_t* p_src_av_attr,
\r
3197 IN const void* const p_mad_in,
\r
3198 IN void* p_mad_out )
\r
3200 ib_api_status_t status;
\r
3201 void* p_mad_out_local = NULL;
\r
3202 AL_ENTER( AL_DBG_MAD_SVC );
\r
3204 if( AL_OBJ_INVALID_HANDLE( h_ca, AL_OBJ_TYPE_H_CA ) )
\r
3206 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_CA_HANDLE\n") );
\r
3207 return IB_INVALID_CA_HANDLE;
\r
3211 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") );
\r
3212 return IB_INVALID_PARAMETER;
\r
3216 p_mad_out_local = cl_zalloc(256);
\r
3217 if(!p_mad_out_local)
\r
3219 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INSUFFICIENT_MEMORY\n") );
\r
3220 return IB_INSUFFICIENT_MEMORY;
\r
3224 p_mad_out_local = p_mad_out;
\r
3227 status = verbs_local_mad( h_ca, port_num, p_src_av_attr, p_mad_in, p_mad_out_local );
\r
3231 cl_free(p_mad_out_local);
\r
3234 AL_EXIT( AL_DBG_MAD_SVC );
\r
3241 IN const ib_net64_t tid64 )
\r
3245 al_tid.tid64 = tid64;
\r
3246 return( al_tid.tid32.user_tid );
\r
3251 IN const ib_net64_t tid64 )
\r
3255 al_tid.tid64 = tid64;
\r
3256 return( cl_ntoh32( al_tid.tid32.al_tid ) );
\r
3261 IN ib_net64_t* const p_tid64,
\r
3262 IN const uint32_t &nb