2 * Copyright (c) 2005 SilverStorm Technologies. All rights reserved.
\r
3 * Copyright (c) 1996-2003 Intel Corporation. All rights reserved.
\r
5 * This software is available to you under the OpenIB.org BSD license
\r
8 * Redistribution and use in source and binary forms, with or
\r
9 * without modification, are permitted provided that the following
\r
10 * conditions are met:
\r
12 * - Redistributions of source code must retain the above
\r
13 * copyright notice, this list of conditions and the following
\r
16 * - Redistributions in binary form must reproduce the above
\r
17 * copyright notice, this list of conditions and the following
\r
18 * disclaimer in the documentation and/or other materials
\r
19 * provided with the distribution.
\r
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
\r
22 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
\r
23 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
\r
24 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
\r
25 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
\r
26 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
\r
27 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
\r
35 #include "al_ci_ca.h"
\r
36 #include "al_debug.h"
\r
38 #if defined(EVENT_TRACING)
\r
42 #include "al_mad_pool.tmh"
\r
45 #include "al_mad_pool.h"
\r
47 #include "al_verbs.h"
\r
48 #include "ib_common.h"
\r
51 typedef struct _mad_reg
\r
53 al_obj_t obj; /* Child of al_pool_key_t */
\r
54 ib_mr_handle_t h_mr;
\r
57 mad_array_t* p_mad_array;
\r
63 typedef struct _mad_send
\r
65 al_mad_send_t mad_send;
\r
66 ib_pool_handle_t h_pool;
\r
73 typedef struct _mad_rmpp
\r
75 al_mad_rmpp_t mad_rmpp;
\r
76 ib_pool_handle_t h_pool;
\r
83 * Function prototypes.
\r
87 IN al_obj_t* p_obj );
\r
91 IN al_obj_t* p_obj );
\r
95 IN al_obj_t* p_obj );
\r
99 IN al_obj_t* p_obj );
\r
101 static ib_api_status_t
\r
103 IN al_pool_key_t* const p_pool_key,
\r
104 IN mad_array_t* const p_mad_array );
\r
108 IN al_obj_t* p_obj );
\r
110 static ib_api_status_t
\r
111 __init_mad_element(
\r
112 IN const al_pool_key_t* p_pool_key,
\r
113 IN OUT mad_item_t* p_mad_item );
\r
117 IN const cl_list_item_t* const p_list_item,
\r
118 IN void* context );
\r
120 static ib_api_status_t
\r
122 IN const ib_pool_handle_t h_pool,
\r
123 OUT mad_item_t** pp_mad_item OPTIONAL );
\r
127 IN al_obj_t* p_obj );
\r
131 IN void* const p_object,
\r
133 OUT cl_pool_item_t** const pp_pool_item );
\r
137 IN void* const p_object,
\r
139 OUT cl_pool_item_t** const pp_pool_item );
\r
144 * Create a MAD pool.
\r
147 ib_create_mad_pool(
\r
148 IN const ib_al_handle_t h_al,
\r
149 IN const size_t min,
\r
150 IN const size_t max,
\r
151 IN const size_t grow_size,
\r
152 OUT ib_pool_handle_t* const ph_pool )
\r
154 ib_pool_handle_t h_pool;
\r
155 ib_api_status_t status;
\r
156 cl_status_t cl_status;
\r
158 AL_ENTER(AL_DBG_MAD_POOL);
\r
160 if( AL_OBJ_INVALID_HANDLE( h_al, AL_OBJ_TYPE_H_AL ) )
\r
162 AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , ("IB_INVALID_AL_HANDLE\n") );
\r
163 return IB_INVALID_AL_HANDLE;
\r
167 AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , ("IB_INVALID_PARAMETER\n") );
\r
168 return IB_INVALID_PARAMETER;
\r
171 /* Validate the min and max parameters. */
\r
172 if( (min > 0) && (max > 0) && (min > max) )
\r
173 return IB_INVALID_SETTING;
\r
175 h_pool = cl_zalloc( sizeof( al_pool_t ) );
\r
177 return IB_INSUFFICIENT_MEMORY;
\r
179 /* Initialize the pool lists. */
\r
180 cl_qlist_init( &h_pool->mad_stack );
\r
181 cl_qlist_init( &h_pool->key_list );
\r
182 cl_qpool_construct( &h_pool->mad_send_pool );
\r
183 cl_qpool_construct( &h_pool->mad_rmpp_pool );
\r
185 /* Initialize the pool object. */
\r
186 construct_al_obj( &h_pool->obj, AL_OBJ_TYPE_H_MAD_POOL );
\r
187 status = init_al_obj( &h_pool->obj, h_pool, TRUE,
\r
188 __destroying_pool, NULL, __free_pool );
\r
189 if( status != IB_SUCCESS )
\r
191 __free_pool( &h_pool->obj );
\r
192 AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR ,
\r
193 ("init_al_obj failed with status %s.\n", ib_get_err_str(status)) );
\r
197 /* Attach the pool to the AL object. */
\r
198 attach_al_obj( &h_al->obj, &h_pool->obj );
\r
200 /* Save the pool parameters. Set grow_size to min for initialization. */
\r
204 h_pool->grow_size = min;
\r
206 /* Grow the pool to the minimum size. */
\r
207 status = __grow_mad_pool( h_pool, NULL );
\r
208 if( status != IB_SUCCESS )
\r
210 h_pool->obj.pfn_destroy( &h_pool->obj, NULL );
\r
211 AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR ,
\r
212 ("grow_mad_pool failed with status %s.\n",
\r
213 ib_get_err_str(status)) );
\r
218 /* Save the grow_size for subsequent allocations. */
\r
219 h_pool->grow_size = grow_size;
\r
221 /* Initialize the pool of mad send tracking structures. */
\r
222 cl_status = cl_qpool_init( &h_pool->mad_send_pool,
\r
223 min, max, grow_size, sizeof( mad_send_t ),
\r
224 __mad_send_init, NULL, h_pool );
\r
225 if( cl_status != CL_SUCCESS )
\r
227 h_pool->obj.pfn_destroy( &h_pool->obj, NULL );
\r
228 status = ib_convert_cl_status( cl_status );
\r
229 h_pool->obj.pfn_destroy( &h_pool->obj, NULL );
\r
231 AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR ,
\r
232 ("cl_qpool_init failed with status %s.\n", ib_get_err_str(status)) );
\r
236 /* Initialize the pool of mad send tracking structures. */
\r
237 cl_status = cl_qpool_init( &h_pool->mad_rmpp_pool,
\r
238 min, max, grow_size, sizeof( mad_rmpp_t ),
\r
239 __mad_rmpp_init, NULL, h_pool );
\r
240 if( cl_status != CL_SUCCESS )
\r
242 h_pool->obj.pfn_destroy( &h_pool->obj, NULL );
\r
243 status = ib_convert_cl_status( cl_status );
\r
244 h_pool->obj.pfn_destroy( &h_pool->obj, NULL );
\r
246 AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR ,
\r
247 ("cl_qpool_init failed with status %s.\n", ib_get_err_str(status)) );
\r
251 /* Return the pool handle. */
\r
254 /* Release the reference taken in init_al_obj. */
\r
255 deref_al_obj( &h_pool->obj );
\r
257 AL_EXIT(AL_DBG_MAD_POOL);
\r
264 * Pre-destory the pool.
\r
268 IN al_obj_t* p_obj )
\r
270 ib_pool_handle_t h_pool;
\r
271 ib_al_handle_t h_al;
\r
273 AL_ENTER(AL_DBG_MAD_POOL);
\r
275 CL_ASSERT( p_obj );
\r
276 h_pool = PARENT_STRUCT( p_obj, al_pool_t, obj );
\r
278 /* Get the AL instance of this MAD pool. */
\r
279 p_obj = h_pool->obj.p_parent_obj;
\r
280 h_al = PARENT_STRUCT( p_obj, ib_al_t, obj );
\r
282 /* Deregister this MAD pool from all protection domains. */
\r
283 al_dereg_pool( h_al, h_pool );
\r
285 AL_EXIT(AL_DBG_MAD_POOL);
\r
295 IN al_obj_t* p_obj )
\r
297 ib_pool_handle_t h_pool;
\r
299 CL_ASSERT( p_obj );
\r
300 h_pool = PARENT_STRUCT( p_obj, al_pool_t, obj );
\r
302 cl_qpool_destroy( &h_pool->mad_send_pool );
\r
303 cl_qpool_destroy( &h_pool->mad_rmpp_pool );
\r
304 destroy_al_obj( &h_pool->obj );
\r
311 * Destory a MAD pool.
\r
314 ib_destroy_mad_pool(
\r
315 IN const ib_pool_handle_t h_pool )
\r
317 cl_list_item_t* p_array_item;
\r
321 AL_ENTER(AL_DBG_MAD_POOL);
\r
323 if( AL_OBJ_INVALID_HANDLE( h_pool, AL_OBJ_TYPE_H_MAD_POOL ) )
\r
325 AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , ("IB_INVALID_HANDLE\n") );
\r
326 return IB_INVALID_HANDLE;
\r
329 /* Verify that all send handles and MAD elements are in pool. */
\r
330 cl_spinlock_acquire( &h_pool->obj.lock );
\r
331 busy = ( h_pool->obj.ref_cnt > 1 );
\r
332 for( p_array_item = cl_qlist_head( &h_pool->obj.obj_list );
\r
333 p_array_item != cl_qlist_end( &h_pool->obj.obj_list ) && !busy;
\r
334 p_array_item = cl_qlist_next( p_array_item ) )
\r
336 p_obj = PARENT_STRUCT( p_array_item, al_obj_t, pool_item );
\r
337 busy = ( p_obj->ref_cnt > 1 );
\r
339 cl_spinlock_release( &h_pool->obj.lock );
\r
341 /* Return an error if the pool is busy. */
\r
344 AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR ,
\r
345 ("h_pool (0x%p) is busy!.\n", h_pool) );
\r
346 return IB_RESOURCE_BUSY;
\r
349 ref_al_obj( &h_pool->obj );
\r
350 h_pool->obj.pfn_destroy( &h_pool->obj, NULL );
\r
352 AL_EXIT(AL_DBG_MAD_POOL);
\r
359 * Register a MAD pool with a protection domain.
\r
363 IN const ib_pool_handle_t h_pool,
\r
364 IN const ib_pd_handle_t h_pd,
\r
365 OUT ib_pool_key_t* const pp_pool_key )
\r
367 al_pool_key_t* p_pool_key;
\r
368 cl_list_item_t* p_array_item;
\r
370 ib_al_handle_t h_al;
\r
371 mad_array_t* p_mad_array;
\r
372 ib_api_status_t status;
\r
373 al_key_type_t key_type;
\r
375 AL_ENTER(AL_DBG_MAD_POOL);
\r
377 if( AL_OBJ_INVALID_HANDLE( h_pool, AL_OBJ_TYPE_H_MAD_POOL ) )
\r
379 AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , ("IB_INVALID_HANDLE\n") );
\r
380 return IB_INVALID_HANDLE;
\r
382 /* Alias keys require an alias PD. */
\r
383 if( AL_OBJ_INVALID_HANDLE( h_pd, AL_OBJ_TYPE_H_PD ) )
\r
385 AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , ("IB_INVALID_PD_HANDLE\n") );
\r
386 return IB_INVALID_PD_HANDLE;
\r
390 AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , ("IB_INVALID_PARAMETER\n") );
\r
391 return IB_INVALID_PARAMETER;
\r
394 /* Set the type of key to create. */
\r
395 if( h_pd->type != IB_PDT_ALIAS )
\r
396 key_type = AL_KEY_NORMAL;
\r
398 key_type = AL_KEY_ALIAS;
\r
400 /* Allocate a pool key structure. */
\r
401 p_pool_key = cl_zalloc( sizeof( al_pool_key_t ) );
\r
403 return IB_INSUFFICIENT_MEMORY;
\r
405 /* Initialize the pool key. */
\r
406 construct_al_obj( &p_pool_key->obj, AL_OBJ_TYPE_H_POOL_KEY );
\r
407 p_pool_key->type = key_type;
\r
408 p_pool_key->h_pool = h_pool;
\r
409 p_pool_key->h_pd = h_pd;
\r
411 /* Initialize the pool key object. */
\r
412 status = init_al_obj( &p_pool_key->obj, p_pool_key, TRUE,
\r
413 NULL, __cleanup_pool_key, __free_pool_key );
\r
414 if( status != IB_SUCCESS )
\r
416 __free_pool_key( &p_pool_key->obj );
\r
418 AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR ,
\r
419 ("init_al_obj failed with status %s.\n", ib_get_err_str(status)) );
\r
423 status = attach_al_obj( &h_pd->obj, &p_pool_key->obj );
\r
424 if( status != IB_SUCCESS )
\r
426 p_pool_key->obj.pfn_destroy( &p_pool_key->obj, NULL );
\r
427 AL_PRINT_EXIT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR,
\r
428 ("attach_al_obj returned %s.\n", ib_get_err_str(status)) );
\r
432 /* From the PD, get the AL handle of the pool_key. */
\r
433 p_obj = h_pd->obj.p_parent_obj->p_parent_obj;
\r
434 h_al = PARENT_STRUCT( p_obj, ib_al_t, obj );
\r
436 /* Add this pool_key to the AL instance. */
\r
437 al_insert_key( h_al, p_pool_key );
\r
439 ref_al_obj( &h_pd->obj );
\r
440 ref_al_obj( &h_pool->obj );
\r
443 * Take a reference on the global pool_key for this CA, if it exists.
\r
444 * Note that the pool_key does not exist for the global MAD pool in
\r
445 * user-mode, as that MAD pool never registers memory on a PD.
\r
447 if( key_type == AL_KEY_ALIAS && h_pd->obj.p_ci_ca->pool_key )
\r
449 ref_al_obj( &h_pd->obj.p_ci_ca->pool_key->obj );
\r
450 p_pool_key->pool_key = h_pd->obj.p_ci_ca->pool_key;
\r
453 /* Register the pool on the protection domain. */
\r
454 if( key_type == AL_KEY_NORMAL )
\r
456 /* Chain the pool key onto the pool. */
\r
457 cl_spinlock_acquire( &h_pool->obj.lock );
\r
458 cl_qlist_insert_tail( &h_pool->key_list, &p_pool_key->pool_item );
\r
460 /* Synchronize with growing the MAD pool. */
\r
461 for( p_array_item = cl_qlist_head( &h_pool->obj.obj_list );
\r
462 p_array_item != cl_qlist_end( &h_pool->obj.obj_list );
\r
463 p_array_item = cl_qlist_next( p_array_item ) )
\r
465 p_obj = PARENT_STRUCT( p_array_item, al_obj_t, pool_item );
\r
466 p_mad_array = PARENT_STRUCT( p_obj, mad_array_t, obj );
\r
468 status = __reg_mad_array( p_pool_key, p_mad_array );
\r
470 if( status != IB_SUCCESS )
\r
472 cl_spinlock_release( &h_pool->obj.lock );
\r
473 p_pool_key->obj.pfn_destroy( &p_pool_key->obj, NULL );
\r
475 AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR ,
\r
476 ("reg_mad_array failed with status %s.\n",
\r
477 ib_get_err_str(status)) );
\r
481 cl_spinlock_release( &h_pool->obj.lock );
\r
485 * If the PD is of alias type, then we need to create/register an
\r
486 * equivalent pool key in the kernel.
\r
488 if( h_pd->type == IB_PDT_ALIAS )
\r
490 status = create_reg_mad_pool( h_pool, h_pd, p_pool_key );
\r
491 if( status != IB_SUCCESS )
\r
493 p_pool_key->obj.pfn_destroy( &p_pool_key->obj, NULL );
\r
498 /* Return the pool key. */
\r
499 *pp_pool_key = (ib_pool_key_t)p_pool_key;
\r
501 /* Release the reference taken in init_al_obj. */
\r
502 deref_al_obj( &p_pool_key->obj );
\r
504 AL_EXIT(AL_DBG_MAD_POOL);
\r
511 * Release all references on objects that were needed by the pool key.
\r
514 __cleanup_pool_key(
\r
515 IN al_obj_t* p_obj )
\r
517 cl_list_item_t *p_list_item, *p_next_item;
\r
518 ib_mad_element_t *p_mad_element_list, *p_last_mad_element;
\r
519 al_mad_element_t *p_mad;
\r
520 ib_api_status_t status;
\r
521 al_pool_key_t* p_pool_key;
\r
523 CL_ASSERT( p_obj );
\r
524 p_pool_key = PARENT_STRUCT( p_obj, al_pool_key_t, obj );
\r
526 /* Search for any outstanding MADs associated with the given pool key. */
\r
527 if( p_pool_key->mad_cnt )
\r
529 p_mad_element_list = p_last_mad_element = NULL;
\r
531 cl_spinlock_acquire( &p_pool_key->h_al->mad_lock );
\r
532 for( p_list_item = cl_qlist_head( &p_pool_key->h_al->mad_list );
\r
533 p_list_item != cl_qlist_end( &p_pool_key->h_al->mad_list );
\r
534 p_list_item = p_next_item )
\r
536 p_next_item = cl_qlist_next( p_list_item );
\r
537 p_mad = PARENT_STRUCT( p_list_item, al_mad_element_t, al_item );
\r
539 if( p_mad->pool_key != p_pool_key ) continue;
\r
541 /* Build the list of MADs to be returned to pool. */
\r
542 if( p_last_mad_element )
\r
543 p_last_mad_element->p_next = &p_mad->element;
\r
545 p_mad_element_list = &p_mad->element;
\r
547 p_last_mad_element = &p_mad->element;
\r
548 p_last_mad_element->p_next = NULL;
\r
550 cl_spinlock_release( &p_pool_key->h_al->mad_lock );
\r
552 /* Return any outstanding MADs to the pool. */
\r
553 if( p_mad_element_list )
\r
555 status = ib_put_mad( p_mad_element_list );
\r
556 if( status != IB_SUCCESS )
\r
558 AL_PRINT(TRACE_LEVEL_ERROR , AL_DBG_ERROR ,
\r
559 ("ib_put_mad failed with status %s, continuing.\n",
\r
560 ib_get_err_str(status)) );
\r
566 * Remove the pool key from the pool to prevent further registrations
\r
567 * against this pool.
\r
569 * Warning: There is a small window where a pool key can be destroyed
\r
570 * while its associated pool is growing. In this case, the pool key
\r
571 * will receive a new registration after it has been destroyed. This
\r
572 * is a result of having to register memory with the HCA without holding
\r
573 * a lock, making correct synchronization impossible. One solution to
\r
574 * this problem is to register all of physical memory, which avoids
\r
575 * having to register more memory as a MAD pool grows.
\r
577 if( p_pool_key->type == AL_KEY_NORMAL )
\r
579 cl_spinlock_acquire( &p_pool_key->h_pool->obj.lock );
\r
580 cl_qlist_remove_item( &p_pool_key->h_pool->key_list,
\r
581 &p_pool_key->pool_item );
\r
582 cl_spinlock_release( &p_pool_key->h_pool->obj.lock );
\r
585 /* Remove this pool_key from the AL instance. */
\r
586 al_remove_key( p_pool_key );
\r
588 /* User-mode only: cleanup kernel resources. */
\r
589 dereg_destroy_mad_pool( p_pool_key );
\r
591 deref_al_obj( &p_pool_key->h_pool->obj );
\r
592 p_pool_key->h_pool = NULL;
\r
593 deref_al_obj( &p_pool_key->h_pd->obj );
\r
594 p_pool_key->h_pd = NULL;
\r
595 if( p_pool_key->pool_key )
\r
596 deref_al_obj( &p_pool_key->pool_key->obj );
\r
606 IN al_obj_t* p_obj )
\r
608 al_pool_key_t* p_pool_key;
\r
610 CL_ASSERT( p_obj );
\r
611 p_pool_key = PARENT_STRUCT( p_obj, al_pool_key_t, obj );
\r
613 destroy_al_obj( &p_pool_key->obj );
\r
614 cl_free( p_pool_key );
\r
620 * Register a MAD array with a protection domain.
\r
622 static ib_api_status_t
\r
624 IN al_pool_key_t* const p_pool_key,
\r
625 IN mad_array_t* const p_mad_array )
\r
628 ib_mr_create_t mr_create;
\r
629 ib_api_status_t status;
\r
631 CL_ASSERT( p_pool_key );
\r
632 CL_ASSERT( p_mad_array );
\r
634 /* Determine if there is memory to register. */
\r
635 if( p_mad_array->sizeof_array == 0 )
\r
638 p_reg = cl_zalloc( sizeof( mad_reg_t ) );
\r
639 if( p_reg == NULL )
\r
640 return IB_INSUFFICIENT_MEMORY;
\r
643 * Initialize the registration object. We use synchronous
\r
644 * destruction to deregister memory immediately. Otherwise, the
\r
645 * memory will be automatically deregistered when destroying the
\r
646 * PD, which can lead to trying to deregister the memory twice.
\r
648 construct_al_obj( &p_reg->obj, AL_OBJ_TYPE_MAD_POOL );
\r
649 status = init_al_obj( &p_reg->obj, p_reg, FALSE,
\r
650 NULL, NULL, __free_mad_reg );
\r
651 if( status != IB_SUCCESS )
\r
653 __free_mad_reg( &p_reg->obj );
\r
657 /* Attach the registration to the pool key. */
\r
658 attach_al_obj( &p_pool_key->obj, &p_reg->obj );
\r
660 if( p_pool_key->h_pd->type != IB_PDT_ALIAS )
\r
662 /* Register the MAD array on the protection domain. */
\r
663 cl_memclr( &mr_create, sizeof( ib_mr_create_t ) );
\r
664 mr_create.vaddr = p_mad_array->p_data;
\r
665 mr_create.length = p_mad_array->sizeof_array;
\r
666 mr_create.access_ctrl = IB_AC_LOCAL_WRITE;
\r
668 status = ib_reg_mem( p_pool_key->h_pd, &mr_create, &p_reg->lkey,
\r
669 &p_reg->rkey, &p_reg->h_mr );
\r
672 if( status != IB_SUCCESS )
\r
674 p_reg->obj.pfn_destroy( &p_reg->obj, NULL );
\r
678 /* Save p_mad_array to match the registration with the array. */
\r
679 p_reg->p_mad_array = p_mad_array;
\r
681 /* Release the reference taken in init_al_obj. */
\r
682 deref_al_obj( &p_reg->obj );
\r
690 * Free a MAD registration.
\r
694 IN al_obj_t* p_obj )
\r
697 ib_api_status_t status;
\r
699 CL_ASSERT( p_obj );
\r
700 p_reg = PARENT_STRUCT( p_obj, mad_reg_t, obj );
\r
702 /* Deregister the MAD array if it was registered. */
\r
705 status = ib_dereg_mr( p_reg->h_mr );
\r
706 CL_ASSERT( status == IB_SUCCESS );
\r
709 destroy_al_obj( &p_reg->obj );
\r
716 * Deregister a MAD pool from a protection domain. Only normal pool_keys
\r
717 * can be destroyed using this routine.
\r
721 IN const ib_pool_key_t pool_key )
\r
723 ib_api_status_t status;
\r
725 AL_ENTER(AL_DBG_MAD_POOL);
\r
727 if( AL_OBJ_INVALID_HANDLE( pool_key, AL_OBJ_TYPE_H_POOL_KEY ) )
\r
729 AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , ("IB_INVALID_PARAMETER\n") );
\r
730 return IB_INVALID_PARAMETER;
\r
733 ref_al_obj( &pool_key->obj );
\r
734 status = dereg_mad_pool( pool_key, AL_KEY_NORMAL );
\r
736 if( status != IB_SUCCESS )
\r
737 deref_al_obj( &pool_key->obj );
\r
739 AL_EXIT(AL_DBG_MAD_POOL);
\r
746 * Deregister a MAD pool from a protection domain.
\r
750 IN const ib_pool_key_t pool_key ,
\r
751 IN const al_key_type_t expected_type )
\r
753 AL_ENTER(AL_DBG_MAD_POOL);
\r
755 if( pool_key->type != expected_type )
\r
757 AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , ("IB_INVALID_PARAMETER\n") );
\r
758 return IB_INVALID_PARAMETER;
\r
761 ///* Check mad_cnt to see if MADs are still outstanding. */
\r
762 //if( pool_key->mad_cnt )
\r
764 // AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_MAD_POOL, ("IB_RESOURCE_BUSY\n") );
\r
765 // return IB_RESOURCE_BUSY;
\r
768 pool_key->obj.pfn_destroy( &pool_key->obj, NULL );
\r
770 AL_EXIT(AL_DBG_MAD_POOL);
\r
777 * Obtain a MAD element from the pool.
\r
779 static ib_api_status_t
\r
781 IN const ib_pool_key_t pool_key,
\r
782 OUT al_mad_element_t** pp_mad_element )
\r
784 al_pool_key_t* p_pool_key;
\r
785 ib_pool_handle_t h_pool;
\r
786 cl_list_item_t* p_item;
\r
787 mad_item_t* p_mad_item;
\r
788 ib_api_status_t status;
\r
790 AL_ENTER(AL_DBG_MAD_POOL);
\r
792 CL_ASSERT( pool_key );
\r
793 CL_ASSERT( pp_mad_element );
\r
795 p_pool_key = (al_pool_key_t*)pool_key;
\r
796 h_pool = p_pool_key->h_pool;
\r
798 /* Obtain a MAD item from the stack. */
\r
799 cl_spinlock_acquire( &h_pool->obj.lock );
\r
800 p_item = cl_qlist_remove_head( &h_pool->mad_stack );
\r
801 p_mad_item = PARENT_STRUCT( p_item, mad_item_t, al_mad_element.list_item );
\r
802 if( p_item == cl_qlist_end( &h_pool->mad_stack ) )
\r
804 /* The stack was empty. Grow the pool and obtain a new item. */
\r
805 cl_spinlock_release( &h_pool->obj.lock );
\r
806 status = __grow_mad_pool( h_pool, &p_mad_item );
\r
807 if( status != IB_SUCCESS )
\r
809 AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR ,
\r
810 ("grow_mad_pool failed with status %s.\n",
\r
811 ib_get_err_str(status)) );
\r
817 cl_spinlock_release( &h_pool->obj.lock );
\r
820 /* Get the local data segment information for this pool key. */
\r
821 status = __init_mad_element( p_pool_key, p_mad_item );
\r
822 if( status != IB_SUCCESS )
\r
824 cl_spinlock_acquire( &h_pool->obj.lock );
\r
825 cl_qlist_insert_head( &h_pool->mad_stack,
\r
826 &p_mad_item->al_mad_element.list_item );
\r
827 cl_spinlock_release( &h_pool->obj.lock );
\r
829 AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR ,
\r
830 ("init_mad_element failed with status %s.\n",
\r
831 ib_get_err_str(status)) );
\r
835 /* Hold a reference on the array while a MAD element is removed. */
\r
836 ref_al_obj( &p_mad_item->p_mad_array->obj );
\r
838 p_mad_item->al_mad_element.pool_key = (ib_pool_key_t)pool_key;
\r
839 /* Return the MAD element. */
\r
840 *pp_mad_element = &p_mad_item->al_mad_element;
\r
842 AL_EXIT(AL_DBG_MAD_POOL);
\r
849 __setup_mad_element(
\r
850 IN OUT al_mad_element_t* const p_al_mad_element,
\r
851 IN const uint32_t lkey )
\r
853 /* Clear the MAD element. */
\r
854 cl_memclr( &p_al_mad_element->element, sizeof( ib_mad_element_t ) );
\r
856 /* Initialize the receive data segment information. */
\r
857 p_al_mad_element->grh_ds.lkey = lkey;
\r
859 /* Initialize the send data segment information. */
\r
860 p_al_mad_element->mad_ds.lkey = lkey;
\r
862 /* Initialize grh */
\r
863 p_al_mad_element->element.p_grh =
\r
864 (ib_grh_t*)(uintn_t)p_al_mad_element->grh_ds.vaddr;
\r
870 * Initialize the MAD element local data segment for this pool key.
\r
872 static ib_api_status_t
\r
873 __init_mad_element(
\r
874 IN const al_pool_key_t* p_pool_key,
\r
875 IN OUT mad_item_t* p_mad_item )
\r
877 cl_list_item_t *p_item;
\r
878 cl_qlist_t *p_list;
\r
881 ib_pool_handle_t h_pool;
\r
883 CL_ASSERT( p_pool_key );
\r
884 CL_ASSERT( p_mad_item != NULL );
\r
886 /* Find the MAD array registration entry. */
\r
887 if( p_pool_key->type == AL_KEY_NORMAL )
\r
889 p_list = (cl_qlist_t*)&p_pool_key->obj.obj_list;
\r
893 #if defined( CL_KERNEL )
\r
894 /* Search the registrations on the actual pool key, not the alias. */
\r
895 p_list = (cl_qlist_t*)&p_pool_key->pool_key->obj.obj_list;
\r
898 * Note that MAD elements used by user-mode clients on special QPs
\r
899 * are not registered on a user-mode PD. The user-mode MAD elements
\r
900 * must be copied into a kernel-mode MAD element before being sent.
\r
902 __setup_mad_element( &p_mad_item->al_mad_element, 0 );
\r
907 /* Prevent MAD array registrations. */
\r
908 h_pool = p_pool_key->h_pool;
\r
909 cl_spinlock_acquire( &h_pool->obj.lock );
\r
911 /* Search for the registration entry. */
\r
912 p_item = cl_qlist_find_from_head( p_list, __locate_reg_cb,
\r
913 p_mad_item->p_mad_array );
\r
914 if( p_item == cl_qlist_end( p_list ) )
\r
916 cl_spinlock_release( &h_pool->obj.lock );
\r
917 return IB_NOT_FOUND;
\r
920 /* Allow MAD array registrations. */
\r
921 cl_spinlock_release( &h_pool->obj.lock );
\r
923 /* Get a pointer to the registration. */
\r
924 p_obj = PARENT_STRUCT( p_item, al_obj_t, pool_item );
\r
925 p_reg = PARENT_STRUCT( p_obj, mad_reg_t, obj );
\r
926 __setup_mad_element( &p_mad_item->al_mad_element, p_reg->lkey );
\r
934 * Determine if a registration is for a given array.
\r
938 IN const cl_list_item_t* const p_list_item,
\r
943 mad_array_t* p_mad_array;
\r
945 CL_ASSERT( p_list_item );
\r
946 CL_ASSERT( context );
\r
948 p_obj = PARENT_STRUCT( p_list_item, al_obj_t, pool_item );
\r
949 p_reg = PARENT_STRUCT( p_obj, mad_reg_t, obj );
\r
950 p_mad_array = context;
\r
952 return ( p_reg->p_mad_array == p_mad_array ) ? CL_SUCCESS : CL_NOT_FOUND;
\r
958 * Return a MAD element to the pool.
\r
962 IN al_mad_element_t* p_mad_element )
\r
964 mad_item_t* p_mad_item;
\r
965 ib_pool_handle_t h_pool;
\r
967 CL_ASSERT( p_mad_element );
\r
968 p_mad_item = PARENT_STRUCT( p_mad_element, mad_item_t, al_mad_element );
\r
970 /* Get a handle to the pool. */
\r
971 h_pool = p_mad_item->p_mad_array->h_pool;
\r
973 /* Clear the MAD buffer. */
\r
975 (uint8_t*)(uintn_t)p_mad_element->grh_ds.vaddr, MAD_BLOCK_GRH_SIZE );
\r
976 p_mad_element->element.p_next = NULL;
\r
978 /* Return the MAD element to the pool. */
\r
979 cl_spinlock_acquire( &h_pool->obj.lock );
\r
980 cl_qlist_insert_head( &h_pool->mad_stack,
\r
981 &p_mad_item->al_mad_element.list_item );
\r
982 cl_spinlock_release( &h_pool->obj.lock );
\r
984 /* Dereference the array when a MAD element is returned. */
\r
985 deref_al_obj( &p_mad_item->p_mad_array->obj );
\r
993 static ib_api_status_t
\r
995 IN const ib_pool_handle_t h_pool,
\r
996 OUT mad_item_t** pp_mad_item OPTIONAL )
\r
1001 mad_array_t* p_mad_array;
\r
1002 mad_item_t* p_mad_item;
\r
1003 mad_item_t* p_mad_items;
\r
1004 cl_list_item_t* p_key_item;
\r
1005 al_pool_key_t* p_pool_key;
\r
1006 ib_api_status_t status;
\r
1008 AL_ENTER(AL_DBG_MAD_POOL);
\r
1010 CL_ASSERT( h_pool );
\r
1012 /* Determine if the pool is allowed to grow. */
\r
1013 if( h_pool->grow_size == 0 )
\r
1014 return IB_INSUFFICIENT_RESOURCES;
\r
1016 /* Lock the pool. */
\r
1017 cl_spinlock_acquire( &h_pool->obj.lock );
\r
1019 /* Determine if the pool has a maximum. */
\r
1020 if( h_pool->max != 0 )
\r
1022 /* Determine if the pool maximum has been reached. */
\r
1023 if( h_pool->actual >= h_pool->max )
\r
1025 cl_spinlock_release( &h_pool->obj.lock );
\r
1027 AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR ,
\r
1028 ("h_pool's (0x%p) maximum has been reached.\n", h_pool) );
\r
1029 return IB_INSUFFICIENT_RESOURCES;
\r
1032 /* Determine if growing the pool will exceed the maximum. */
\r
1033 if( (h_pool->actual + h_pool->grow_size) > h_pool->max )
\r
1035 cl_spinlock_release( &h_pool->obj.lock );
\r
1037 AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR ,
\r
1038 ("h_pool's (0x%p) will exceed maximum on grow.\n", h_pool) );
\r
1039 return IB_INSUFFICIENT_RESOURCES;
\r
1043 /* Calculate the allocation size. */
\r
1044 alloc_size = sizeof( mad_item_t );
\r
1045 alloc_size += MAD_BLOCK_GRH_SIZE;
\r
1046 alloc_size *= h_pool->grow_size;
\r
1047 alloc_size += sizeof( mad_array_t );
\r
1049 /* Allocate a MAD data array and item structures. */
\r
1050 p_data = cl_zalloc( alloc_size );
\r
1051 if( p_data == NULL )
\r
1053 cl_spinlock_release( &h_pool->obj.lock );
\r
1054 return IB_INSUFFICIENT_MEMORY;
\r
1057 /* Offset to the MAD array structure. */
\r
1058 alloc_size -= sizeof( mad_array_t );
\r
1059 p_mad_array = (mad_array_t*)(p_data + alloc_size);
\r
1061 /* Offset to the array of MAD item structures. */
\r
1062 alloc_size -= sizeof( mad_item_t ) * h_pool->grow_size;
\r
1063 p_mad_items = (mad_item_t*)(p_data + alloc_size);
\r
1065 /* Initialize the MAD array structure. */
\r
1066 p_mad_array->h_pool = h_pool;
\r
1067 p_mad_array->p_data = p_data;
\r
1068 p_mad_array->sizeof_array = alloc_size;
\r
1070 /* Initialize the MAD array object. */
\r
1071 construct_al_obj( &p_mad_array->obj, AL_OBJ_TYPE_MAD_POOL );
\r
1072 status = init_al_obj( &p_mad_array->obj, p_mad_array, TRUE,
\r
1073 NULL, NULL, __free_mad_array );
\r
1074 if( status != IB_SUCCESS )
\r
1076 cl_spinlock_release( &h_pool->obj.lock );
\r
1077 __free_mad_array( &p_mad_array->obj );
\r
1079 AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR ,
\r
1080 ("init_al_obj failed with status %s.\n", ib_get_err_str(status)) );
\r
1084 /* Register the MAD array on the existing pool protection domains. */
\r
1085 for( p_key_item = cl_qlist_head( &h_pool->key_list );
\r
1086 p_key_item != cl_qlist_end( &h_pool->key_list );
\r
1087 p_key_item = cl_qlist_next( p_key_item ) )
\r
1089 p_pool_key = PARENT_STRUCT( p_key_item, al_pool_key_t, pool_item );
\r
1090 ref_al_obj( &p_pool_key->obj );
\r
1091 status = __reg_mad_array( p_pool_key, p_mad_array );
\r
1092 deref_al_obj( &p_pool_key->obj );
\r
1093 if( status != IB_SUCCESS )
\r
1097 if( status != IB_SUCCESS )
\r
1099 cl_spinlock_release( &h_pool->obj.lock );
\r
1100 p_mad_array->obj.pfn_destroy( &p_mad_array->obj, NULL );
\r
1104 /* The pool has been successfully grown. Update the actual size. */
\r
1105 h_pool->actual += h_pool->grow_size;
\r
1107 /* Intialize the MAD stack item structures. */
\r
1108 p_mad_item = p_mad_items;
\r
1109 for( i = 0; i < h_pool->grow_size; i++ )
\r
1111 p_mad_item->p_mad_array = p_mad_array;
\r
1113 p_mad_item->al_mad_element.grh_ds.vaddr = (uintn_t)p_data;
\r
1114 p_mad_item->al_mad_element.grh_ds.length = MAD_BLOCK_GRH_SIZE;
\r
1116 p_mad_item->al_mad_element.mad_ds.vaddr =
\r
1117 (uintn_t)(p_data + sizeof( ib_grh_t ));
\r
1118 p_mad_item->al_mad_element.mad_ds.length = MAD_BLOCK_SIZE;
\r
1119 p_data += MAD_BLOCK_GRH_SIZE;
\r
1123 /* Return a MAD item to the caller if one was requested. */
\r
1124 if( pp_mad_item != NULL )
\r
1126 *pp_mad_item = p_mad_items;
\r
1131 /* Append the remaining MAD items to the existing stack. */
\r
1132 cl_qlist_insert_array_tail( &h_pool->mad_stack,
\r
1133 &p_mad_items->al_mad_element.list_item, i, sizeof( mad_item_t ) );
\r
1135 /* Unlock the pool. */
\r
1136 cl_spinlock_release( &h_pool->obj.lock );
\r
1138 /* Attach the array object to the pool. */
\r
1139 attach_al_obj( &h_pool->obj, &p_mad_array->obj );
\r
1141 /* Release the reference taken in init_al_obj. */
\r
1142 deref_al_obj( &p_mad_array->obj );
\r
1144 AL_EXIT(AL_DBG_MAD_POOL);
\r
1145 return IB_SUCCESS;
\r
1151 * Free the MAD array structure.
\r
1155 IN al_obj_t* p_obj )
\r
1157 mad_array_t* p_mad_array;
\r
1158 ib_pool_handle_t h_pool;
\r
1159 cl_list_item_t* p_key_item;
\r
1160 al_pool_key_t* p_pool_key;
\r
1161 cl_list_item_t* p_reg_item;
\r
1162 cl_list_item_t* p_next_item;
\r
1165 AL_ENTER(AL_DBG_MAD_POOL);
\r
1167 CL_ASSERT( p_obj );
\r
1168 p_mad_array = PARENT_STRUCT( p_obj, mad_array_t, obj );
\r
1170 /* Destroy any registrations for this MAD array. */
\r
1171 h_pool = p_mad_array->h_pool;
\r
1172 cl_spinlock_acquire( &h_pool->obj.lock );
\r
1174 /* Walk the pool key list. */
\r
1175 p_key_item = cl_qlist_head( &h_pool->key_list );
\r
1176 while( p_key_item != cl_qlist_end( &h_pool->key_list ) )
\r
1178 p_pool_key = PARENT_STRUCT( p_key_item, al_pool_key_t, pool_item );
\r
1180 /* Walk the pool key registrations. */
\r
1181 for( p_reg_item = cl_qlist_head( &p_pool_key->obj.obj_list );
\r
1182 p_reg_item != cl_qlist_end( &p_pool_key->obj.obj_list );
\r
1183 p_reg_item = p_next_item )
\r
1185 p_next_item = cl_qlist_next( p_reg_item );
\r
1187 p_obj = PARENT_STRUCT( p_reg_item, al_obj_t, pool_item );
\r
1188 p_reg = PARENT_STRUCT( p_obj, mad_reg_t, obj );
\r
1190 /* Destroy registrations for this MAD array. */
\r
1191 if( p_reg->p_mad_array == p_mad_array )
\r
1193 ref_al_obj( &p_reg->obj );
\r
1194 p_reg->obj.pfn_destroy( &p_reg->obj, NULL );
\r
1198 p_key_item = cl_qlist_next( p_key_item );
\r
1200 cl_spinlock_release( &h_pool->obj.lock );
\r
1202 destroy_al_obj( &p_mad_array->obj );
\r
1203 cl_free( p_mad_array->p_data );
\r
1205 AL_EXIT(AL_DBG_MAD_POOL);
\r
1211 * Initialize a MAD send tracking structure to reference the pool from
\r
1214 static cl_status_t
\r
1216 IN void* const p_object,
\r
1218 OUT cl_pool_item_t** const pp_pool_item )
\r
1220 mad_send_t *p_mad_send;
\r
1222 p_mad_send = (mad_send_t*)p_object;
\r
1223 p_mad_send->h_pool = context;
\r
1224 *pp_pool_item = &p_mad_send->mad_send.pool_item;
\r
1225 return CL_SUCCESS;
\r
1230 ib_mad_send_handle_t
\r
1232 IN const al_mad_element_t *p_mad_element )
\r
1234 mad_item_t* p_mad_item;
\r
1235 ib_pool_handle_t h_pool;
\r
1236 cl_pool_item_t *p_pool_item;
\r
1237 ib_mad_send_handle_t h_send;
\r
1239 CL_ASSERT( p_mad_element );
\r
1241 /* Get a handle to the pool. */
\r
1242 p_mad_item = PARENT_STRUCT( p_mad_element, mad_item_t, al_mad_element );
\r
1243 h_pool = p_mad_item->p_mad_array->h_pool;
\r
1245 cl_spinlock_acquire( &h_pool->obj.lock );
\r
1246 p_pool_item = cl_qpool_get( &h_pool->mad_send_pool );
\r
1247 cl_spinlock_release( &h_pool->obj.lock );
\r
1249 if( !p_pool_item )
\r
1252 ref_al_obj( &h_pool->obj );
\r
1253 h_send = PARENT_STRUCT( p_pool_item, al_mad_send_t, pool_item );
\r
1254 h_send->canceled = FALSE;
\r
1255 h_send->p_send_mad = NULL;
\r
1256 h_send->p_resp_mad = NULL;
\r
1257 h_send->h_av = NULL;
\r
1258 h_send->retry_cnt = 0;
\r
1259 h_send->retry_time = 0;
\r
1268 IN ib_mad_send_handle_t h_mad_send )
\r
1270 mad_send_t *p_mad_send;
\r
1272 p_mad_send = PARENT_STRUCT( h_mad_send, mad_send_t, mad_send );
\r
1274 cl_spinlock_acquire( &p_mad_send->h_pool->obj.lock );
\r
1275 cl_qpool_put( &p_mad_send->h_pool->mad_send_pool, &h_mad_send->pool_item );
\r
1276 cl_spinlock_release( &p_mad_send->h_pool->obj.lock );
\r
1277 deref_al_obj( &p_mad_send->h_pool->obj );
\r
1283 * Initialize a MAD RMPP tracking structure to reference the pool from
\r
1286 static cl_status_t
\r
1288 IN void* const p_object,
\r
1290 OUT cl_pool_item_t** const pp_pool_item )
\r
1292 mad_rmpp_t *p_mad_rmpp;
\r
1294 p_mad_rmpp = (mad_rmpp_t*)p_object;
\r
1295 p_mad_rmpp->h_pool = context;
\r
1296 *pp_pool_item = &p_mad_rmpp->mad_rmpp.pool_item;
\r
1297 return CL_SUCCESS;
\r
1304 IN const al_mad_element_t *p_mad_element )
\r
1306 mad_item_t* p_mad_item;
\r
1307 ib_pool_handle_t h_pool;
\r
1308 cl_pool_item_t *p_pool_item;
\r
1310 CL_ASSERT( p_mad_element );
\r
1312 /* Get a handle to the pool. */
\r
1313 p_mad_item = PARENT_STRUCT( p_mad_element, mad_item_t, al_mad_element );
\r
1314 h_pool = p_mad_item->p_mad_array->h_pool;
\r
1316 cl_spinlock_acquire( &h_pool->obj.lock );
\r
1317 p_pool_item = cl_qpool_get( &h_pool->mad_rmpp_pool );
\r
1318 cl_spinlock_release( &h_pool->obj.lock );
\r
1320 if( !p_pool_item )
\r
1323 ref_al_obj( &h_pool->obj );
\r
1324 return PARENT_STRUCT( p_pool_item, al_mad_rmpp_t, pool_item );
\r
1331 IN al_mad_rmpp_t* h_mad_rmpp )
\r
1333 mad_rmpp_t *p_mad_rmpp;
\r
1335 p_mad_rmpp = PARENT_STRUCT( h_mad_rmpp, mad_rmpp_t, mad_rmpp );
\r
1337 cl_spinlock_acquire( &p_mad_rmpp->h_pool->obj.lock );
\r
1338 cl_qpool_put( &p_mad_rmpp->h_pool->mad_rmpp_pool, &h_mad_rmpp->pool_item );
\r
1339 cl_spinlock_release( &p_mad_rmpp->h_pool->obj.lock );
\r
1340 deref_al_obj( &p_mad_rmpp->h_pool->obj );
\r
1347 IN const ib_pool_key_t pool_key,
\r
1348 IN const size_t buf_size,
\r
1349 OUT ib_mad_element_t **pp_mad_element )
\r
1351 al_pool_key_t* p_pool_key;
\r
1352 al_mad_element_t* p_mad;
\r
1353 ib_api_status_t status;
\r
1355 AL_ENTER(AL_DBG_MAD_POOL);
\r
1357 if( AL_OBJ_INVALID_HANDLE( pool_key, AL_OBJ_TYPE_H_POOL_KEY ) )
\r
1359 AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , ("IB_INVALID_PARAMETER\n") );
\r
1360 return IB_INVALID_PARAMETER;
\r
1362 if( !buf_size || !pp_mad_element )
\r
1364 AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , ("IB_INVALID_PARAMETER\n") );
\r
1365 return IB_INVALID_PARAMETER;
\r
1368 p_pool_key = (al_pool_key_t*)pool_key;
\r
1370 status = __get_mad_element( pool_key, &p_mad );
\r
1371 if( status != IB_SUCCESS )
\r
1373 AL_EXIT(AL_DBG_MAD_POOL);
\r
1377 /* Set the user accessible buffer. */
\r
1378 if( buf_size <= MAD_BLOCK_SIZE )
\r
1380 /* Use the send buffer for 256 byte MADs. */
\r
1381 p_mad->element.p_mad_buf = (ib_mad_t*)(uintn_t)p_mad->mad_ds.vaddr;
\r
1383 else if( buf_size >= 0xFFFFFFFF )
\r
1385 __put_mad_element( p_mad );
\r
1386 return IB_INVALID_SETTING;
\r
1390 /* Allocate a new buffer for the MAD. */
\r
1391 p_mad->p_al_mad_buf = cl_zalloc( buf_size );
\r
1392 if( !p_mad->p_al_mad_buf )
\r
1394 __put_mad_element( p_mad );
\r
1395 AL_EXIT(AL_DBG_MAD_POOL);
\r
1396 return IB_INSUFFICIENT_MEMORY;
\r
1398 p_mad->element.p_mad_buf = p_mad->p_al_mad_buf;
\r
1400 p_mad->element.size = (uint32_t)buf_size;
\r
1402 /* Track the MAD element with the requesting AL instance. */
\r
1403 al_insert_mad( p_pool_key->h_al, p_mad );
\r
1405 ref_al_obj( &p_pool_key->obj );
\r
1406 cl_atomic_inc( &p_pool_key->mad_cnt );
\r
1408 /* Return the MAD element to the client. */
\r
1409 *pp_mad_element = &p_mad->element;
\r
1411 AL_EXIT(AL_DBG_MAD_POOL);
\r
1412 return IB_SUCCESS;
\r
1419 IN const ib_mad_element_t* p_mad_element_list )
\r
1421 al_mad_element_t* p_mad;
\r
1423 if( !p_mad_element_list )
\r
1425 AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , ("IB_INVALID_PARAMETER\n") );
\r
1426 return IB_INVALID_PARAMETER;
\r
1429 while( p_mad_element_list )
\r
1431 p_mad = PARENT_STRUCT( p_mad_element_list, al_mad_element_t, element );
\r
1432 p_mad_element_list = p_mad_element_list->p_next;
\r
1434 /* Deallocate any buffers allocated for the user. */
\r
1435 if( p_mad->p_al_mad_buf )
\r
1437 cl_free( p_mad->p_al_mad_buf );
\r
1438 p_mad->p_al_mad_buf = NULL;
\r
1441 /* See if the MAD has already been returned to the MAD pool. */
\r
1444 /* Remove the MAD element from the owning AL instance. */
\r
1445 al_remove_mad( p_mad );
\r
1447 /* Return the MAD element to the pool. */
\r
1448 cl_atomic_dec( &p_mad->pool_key->mad_cnt );
\r
1449 deref_al_obj( &p_mad->pool_key->obj );
\r
1450 __put_mad_element( p_mad );
\r
1454 AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR ,
\r
1455 ("MAD has already been returned to MAD pool.\n") );
\r
1459 return IB_SUCCESS;
\r
1465 * Resize the data buffer associated with a MAD element.
\r
1469 OUT ib_mad_element_t *p_mad_element,
\r
1470 IN const size_t buf_size )
\r
1472 al_mad_element_t *p_al_element;
\r
1473 ib_mad_t *p_new_buf;
\r
1475 CL_ASSERT( p_mad_element );
\r
1477 /* We only support growing the buffer for now. */
\r
1478 CL_ASSERT( buf_size > p_mad_element->size );
\r
1480 /* Cap the size. */
\r
1481 if( buf_size >= 0xFFFFFFFF )
\r
1482 return IB_INVALID_SETTING;
\r
1484 p_al_element = PARENT_STRUCT( p_mad_element, al_mad_element_t, element );
\r
1486 /* Allocate a new buffer. */
\r
1487 p_new_buf = cl_malloc( buf_size );
\r
1489 return IB_INSUFFICIENT_MEMORY;
\r
1491 /* Copy the existing buffer's data into the new buffer. */
\r
1492 cl_memcpy( p_new_buf, p_mad_element->p_mad_buf, p_mad_element->size );
\r
1493 cl_memclr( (uint8_t*)p_new_buf + p_mad_element->size,
\r
1494 buf_size - p_mad_element->size );
\r
1496 /* Update the MAD element to use the new buffer. */
\r
1497 p_mad_element->p_mad_buf = p_new_buf;
\r
1498 p_mad_element->size = (uint32_t)buf_size;
\r
1500 /* Free any old buffer. */
\r
1501 if( p_al_element->p_al_mad_buf )
\r
1502 cl_free( p_al_element->p_al_mad_buf );
\r
1503 p_al_element->p_al_mad_buf = p_new_buf;
\r
1505 return IB_SUCCESS;
\r