2 * Copyright (c) 2005 SilverStorm Technologies. All rights reserved.
\r
4 * This software is available to you under the OpenIB.org BSD license
\r
7 * Redistribution and use in source and binary forms, with or
\r
8 * without modification, are permitted provided that the following
\r
9 * conditions are met:
\r
11 * - Redistributions of source code must retain the above
\r
12 * copyright notice, this list of conditions and the following
\r
15 * - Redistributions in binary form must reproduce the above
\r
16 * copyright notice, this list of conditions and the following
\r
17 * disclaimer in the documentation and/or other materials
\r
18 * provided with the distribution.
\r
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
\r
21 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
\r
22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
\r
23 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
\r
24 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
\r
25 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
\r
26 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
\r
33 #include "al_ci_ca.h"
\r
34 #include "al_debug.h"
\r
36 #if defined(EVENT_TRACING)
\r
40 #include "al_mad_pool.tmh"
\r
43 #include "al_mad_pool.h"
\r
45 #include "al_verbs.h"
\r
46 #include "ib_common.h"
\r
49 typedef struct _mad_send
\r
51 al_mad_send_t mad_send;
\r
52 ib_pool_handle_t h_pool;
\r
59 typedef struct _mad_rmpp
\r
61 al_mad_rmpp_t mad_rmpp;
\r
62 ib_pool_handle_t h_pool;
\r
69 * Function prototypes.
\r
73 IN al_obj_t* p_obj );
\r
77 IN al_obj_t* p_obj );
\r
80 __destroying_pool_key(
\r
81 IN al_obj_t* p_obj );
\r
85 IN al_obj_t* p_obj );
\r
89 IN al_obj_t* p_obj );
\r
93 IN void* const p_object,
\r
95 OUT cl_pool_item_t** const pp_pool_item );
\r
99 IN void* const p_object,
\r
101 OUT cl_pool_item_t** const pp_pool_item );
\r
106 * Create a MAD pool.
\r
109 ib_create_mad_pool(
\r
110 IN const ib_al_handle_t h_al,
\r
111 IN const size_t min,
\r
112 IN const size_t max,
\r
113 IN const size_t grow_size,
\r
114 OUT ib_pool_handle_t* const ph_pool )
\r
116 ib_pool_handle_t h_pool;
\r
117 ib_api_status_t status;
\r
119 AL_ENTER( AL_DBG_MAD_POOL );
\r
121 if( AL_OBJ_INVALID_HANDLE( h_al, AL_OBJ_TYPE_H_AL ) )
\r
123 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_AL_HANDLE\n") );
\r
124 return IB_INVALID_AL_HANDLE;
\r
128 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") );
\r
129 return IB_INVALID_PARAMETER;
\r
132 /* Validate the min and max parameters. */
\r
133 if( (min > 0) && (max > 0) && (min > max) )
\r
134 return IB_INVALID_SETTING;
\r
136 h_pool = cl_zalloc( sizeof( al_pool_t ) );
\r
138 return IB_INSUFFICIENT_MEMORY;
\r
140 /* Initialize the pool lists. */
\r
141 cl_qlist_init( &h_pool->key_list );
\r
142 ExInitializeNPagedLookasideList( &h_pool->mad_stack, NULL, NULL,
\r
143 0, sizeof(mad_item_t), 'dmla', 0 );
\r
144 ExInitializeNPagedLookasideList( &h_pool->mad_send_pool, NULL, NULL,
\r
145 0, sizeof(mad_send_t), 'dmla', 0 );
\r
146 ExInitializeNPagedLookasideList( &h_pool->mad_rmpp_pool, NULL, NULL,
\r
147 0, sizeof(mad_rmpp_t), 'dmla', 0 );
\r
149 /* Initialize the pool object. */
\r
150 construct_al_obj( &h_pool->obj, AL_OBJ_TYPE_H_MAD_POOL );
\r
151 status = init_al_obj( &h_pool->obj, h_pool, TRUE,
\r
152 __destroying_pool, NULL, __free_pool );
\r
153 if( status != IB_SUCCESS )
\r
155 __free_pool( &h_pool->obj );
\r
156 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
157 ("init_al_obj failed with status %s.\n", ib_get_err_str(status)) );
\r
161 /* Attach the pool to the AL object. */
\r
162 status = attach_al_obj( &h_al->obj, &h_pool->obj );
\r
163 if( status != IB_SUCCESS )
\r
165 h_pool->obj.pfn_destroy( &h_pool->obj, NULL );
\r
166 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
167 ("attach_al_obj returned %s.\n", ib_get_err_str(status)) );
\r
171 /* Save the pool parameters. Set grow_size to min for initialization. */
\r
173 h_pool->grow_size = min;
\r
175 /* Save the grow_size for subsequent allocations. */
\r
176 h_pool->grow_size = grow_size;
\r
178 /* Return the pool handle. */
\r
181 /* Release the reference taken in init_al_obj. */
\r
182 deref_al_obj( &h_pool->obj );
\r
184 AL_EXIT( AL_DBG_MAD_POOL );
\r
191 * Pre-destory the pool.
\r
195 IN al_obj_t* p_obj )
\r
197 ib_pool_handle_t h_pool;
\r
198 ib_al_handle_t h_al;
\r
200 AL_ENTER( AL_DBG_MAD_POOL );
\r
202 CL_ASSERT( p_obj );
\r
203 h_pool = PARENT_STRUCT( p_obj, al_pool_t, obj );
\r
205 /* Get the AL instance of this MAD pool. */
\r
206 p_obj = h_pool->obj.p_parent_obj;
\r
207 h_al = PARENT_STRUCT( p_obj, ib_al_t, obj );
\r
209 /* Deregister this MAD pool from all protection domains. */
\r
210 al_dereg_pool( h_al, h_pool );
\r
212 AL_EXIT( AL_DBG_MAD_POOL );
\r
222 IN al_obj_t* p_obj )
\r
224 ib_pool_handle_t h_pool;
\r
226 CL_ASSERT( p_obj );
\r
227 h_pool = PARENT_STRUCT( p_obj, al_pool_t, obj );
\r
229 ExDeleteNPagedLookasideList( &h_pool->mad_send_pool );
\r
230 ExDeleteNPagedLookasideList( &h_pool->mad_rmpp_pool );
\r
231 ExDeleteNPagedLookasideList( &h_pool->mad_stack );
\r
232 destroy_al_obj( &h_pool->obj );
\r
239 * Destory a MAD pool.
\r
242 ib_destroy_mad_pool(
\r
243 IN const ib_pool_handle_t h_pool )
\r
245 cl_list_item_t* p_array_item;
\r
249 AL_ENTER( AL_DBG_MAD_POOL );
\r
251 if( AL_OBJ_INVALID_HANDLE( h_pool, AL_OBJ_TYPE_H_MAD_POOL ) )
\r
253 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_HANDLE\n") );
\r
254 return IB_INVALID_HANDLE;
\r
257 /* Verify that all send handles and MAD elements are in pool. */
\r
258 cl_spinlock_acquire( &h_pool->obj.lock );
\r
259 busy = ( h_pool->obj.ref_cnt > 1 );
\r
260 for( p_array_item = cl_qlist_head( &h_pool->obj.obj_list );
\r
261 p_array_item != cl_qlist_end( &h_pool->obj.obj_list ) && !busy;
\r
262 p_array_item = cl_qlist_next( p_array_item ) )
\r
264 p_obj = PARENT_STRUCT( p_array_item, al_obj_t, pool_item );
\r
265 busy = ( p_obj->ref_cnt > 1 );
\r
267 cl_spinlock_release( &h_pool->obj.lock );
\r
269 /* Return an error if the pool is busy. */
\r
272 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
273 ("h_pool (0x%016I64x) is busy!.\n", (LONG64)h_pool) );
\r
274 return IB_RESOURCE_BUSY;
\r
277 ref_al_obj( &h_pool->obj );
\r
278 h_pool->obj.pfn_destroy( &h_pool->obj, NULL );
\r
280 AL_EXIT( AL_DBG_MAD_POOL );
\r
287 * Register a MAD pool with a protection domain.
\r
291 IN const ib_pool_handle_t h_pool,
\r
292 IN const ib_pd_handle_t h_pd,
\r
293 OUT ib_pool_key_t* const pp_pool_key )
\r
295 ib_api_status_t status;
\r
297 AL_ENTER( AL_DBG_MAD_POOL );
\r
299 if( AL_OBJ_INVALID_HANDLE( h_pool, AL_OBJ_TYPE_H_MAD_POOL ) )
\r
301 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_HANDLE\n") );
\r
302 return IB_INVALID_HANDLE;
\r
304 /* Alias keys require an alias PD. */
\r
305 if( AL_OBJ_INVALID_HANDLE( h_pd, AL_OBJ_TYPE_H_PD ) )
\r
307 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PD_HANDLE\n") );
\r
308 return IB_INVALID_PD_HANDLE;
\r
311 status = reg_mad_pool( h_pool, h_pd, pp_pool_key );
\r
312 /* Release the reference taken in init_al_obj. */
\r
313 if( status == IB_SUCCESS )
\r
314 deref_al_obj( &(*pp_pool_key)->obj );
\r
316 AL_EXIT( AL_DBG_MAD_POOL );
\r
323 IN const ib_pool_handle_t h_pool,
\r
324 IN const ib_pd_handle_t h_pd,
\r
325 OUT ib_pool_key_t* const pp_pool_key )
\r
327 al_pool_key_t* p_pool_key;
\r
328 ib_al_handle_t h_al;
\r
329 ib_api_status_t status;
\r
330 al_key_type_t key_type;
\r
332 AL_ENTER( AL_DBG_MAD_POOL );
\r
336 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") );
\r
337 return IB_INVALID_PARAMETER;
\r
340 /* Set the type of key to create. */
\r
341 if( h_pd->type != IB_PDT_ALIAS )
\r
342 key_type = AL_KEY_NORMAL;
\r
344 key_type = AL_KEY_ALIAS;
\r
346 /* Allocate a pool key structure. */
\r
347 p_pool_key = cl_zalloc( sizeof( al_pool_key_t ) );
\r
349 return IB_INSUFFICIENT_MEMORY;
\r
351 /* Initialize the pool key. */
\r
352 construct_al_obj( &p_pool_key->obj, AL_OBJ_TYPE_H_POOL_KEY );
\r
353 p_pool_key->type = key_type;
\r
354 p_pool_key->h_pool = h_pool;
\r
356 /* Initialize the pool key object. */
\r
357 status = init_al_obj( &p_pool_key->obj, p_pool_key, TRUE,
\r
358 __destroying_pool_key, __cleanup_pool_key, __free_pool_key );
\r
359 if( status != IB_SUCCESS )
\r
361 __free_pool_key( &p_pool_key->obj );
\r
363 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
364 ("init_al_obj failed with status %s.\n", ib_get_err_str(status)) );
\r
368 /* Register the pool on the protection domain. */
\r
369 if( key_type == AL_KEY_NORMAL )
\r
371 ib_phys_create_t phys_create;
\r
372 ib_phys_range_t phys_range;
\r
376 /* Register all of physical memory. */
\r
377 phys_create.length = 0xFFFFFFFFFFFFFFFF;
\r
378 phys_create.num_ranges = 1;
\r
379 phys_create.range_array = &phys_range;
\r
380 phys_create.buf_offset = 0;
\r
381 phys_create.hca_page_size = PAGE_SIZE;
\r
382 phys_create.access_ctrl = IB_AC_LOCAL_WRITE;
\r
383 phys_range.base_addr = 0;
\r
384 phys_range.size = 0xFFFFFFFFFFFFFFFF;
\r
386 status = ib_reg_phys( h_pd, &phys_create, &vaddr,
\r
387 &p_pool_key->lkey, &rkey, &p_pool_key->h_mr );
\r
388 if( status != IB_SUCCESS )
\r
390 p_pool_key->obj.pfn_destroy( &p_pool_key->obj, NULL );
\r
391 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
392 ("ib_reg_phys returned %s\n", ib_get_err_str( status )) );
\r
396 /* Chain the pool key onto the pool. */
\r
397 cl_spinlock_acquire( &h_pool->obj.lock );
\r
398 cl_qlist_insert_tail( &h_pool->key_list, &p_pool_key->pool_item );
\r
399 cl_spinlock_release( &h_pool->obj.lock );
\r
403 * Attach to the pool after we register the memory so that PD destruction
\r
404 * will cleanup the pool key before its memory region.
\r
406 status = attach_al_obj( &h_pd->obj, &p_pool_key->obj );
\r
407 if( status != IB_SUCCESS )
\r
409 p_pool_key->obj.pfn_destroy( &p_pool_key->obj, NULL );
\r
411 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
412 ("attach_al_obj returned %s\n", ib_get_err_str(status)) );
\r
416 /* From the PD, get the AL handle of the pool_key. */
\r
417 h_al = h_pd->obj.h_al;
\r
419 /* Add this pool_key to the AL instance. */
\r
420 al_insert_key( h_al, p_pool_key );
\r
422 ref_al_obj( &h_pool->obj );
\r
425 * Take a reference on the global pool_key for this CA, if it exists.
\r
426 * Note that the pool_key does not exist for the global MAD pool in
\r
427 * user-mode, as that MAD pool never registers memory on a PD.
\r
429 /* TODO: Is the pool_key check here needed since this is a kernel-only implementation? */
\r
430 if( key_type == AL_KEY_ALIAS && h_pd->obj.p_ci_ca->pool_key )
\r
432 ref_al_obj( &h_pd->obj.p_ci_ca->pool_key->obj );
\r
433 p_pool_key->pool_key = h_pd->obj.p_ci_ca->pool_key;
\r
436 /* Return the pool key. */
\r
437 *pp_pool_key = (ib_pool_key_t)p_pool_key;
\r
439 AL_EXIT( AL_DBG_MAD_POOL );
\r
445 * The destroying callback releases the memory registration. This is needed
\r
446 * to maintain the destroy semantics, where the pool key's destruction is
\r
447 * async, but the MAD registrations are sync. This means that all memory
\r
448 * registered on a pool key is deregistered before the pool key leaves the
\r
452 __destroying_pool_key(
\r
453 IN al_obj_t* p_obj )
\r
455 al_pool_key_t* p_pool_key;
\r
457 CL_ASSERT( p_obj );
\r
458 p_pool_key = PARENT_STRUCT( p_obj, al_pool_key_t, obj );
\r
460 /* Remove this pool_key from the AL instance. */
\r
461 al_remove_key( p_pool_key );
\r
463 p_pool_key->lkey = 0;
\r
468 * Release all references on objects that were needed by the pool key.
\r
471 __cleanup_pool_key(
\r
472 IN al_obj_t* p_obj )
\r
474 cl_list_item_t *p_list_item, *p_next_item;
\r
475 ib_mad_element_t *p_mad_element_list, *p_last_mad_element;
\r
476 al_mad_element_t *p_mad;
\r
477 ib_api_status_t status;
\r
478 al_pool_key_t* p_pool_key;
\r
480 CL_ASSERT( p_obj );
\r
481 p_pool_key = PARENT_STRUCT( p_obj, al_pool_key_t, obj );
\r
483 CL_ASSERT( !p_pool_key->mad_cnt );
\r
485 if( p_pool_key->h_mr )
\r
486 ib_dereg_mr( p_pool_key->h_mr );
\r
488 /* Search for any outstanding MADs associated with the given pool key. */
\r
489 if( p_pool_key->mad_cnt )
\r
491 p_mad_element_list = p_last_mad_element = NULL;
\r
493 cl_spinlock_acquire( &p_pool_key->obj.h_al->obj.lock );
\r
494 for( p_list_item = cl_qlist_head( &p_pool_key->obj.h_al->mad_list );
\r
495 p_list_item != cl_qlist_end( &p_pool_key->obj.h_al->mad_list );
\r
496 p_list_item = p_next_item )
\r
498 p_next_item = cl_qlist_next( p_list_item );
\r
499 p_mad = PARENT_STRUCT( p_list_item, al_mad_element_t, al_item );
\r
501 if( p_mad->pool_key != p_pool_key ) continue;
\r
503 /* Build the list of MADs to be returned to pool. */
\r
504 if( p_last_mad_element )
\r
505 p_last_mad_element->p_next = &p_mad->element;
\r
507 p_mad_element_list = &p_mad->element;
\r
509 p_last_mad_element = &p_mad->element;
\r
510 p_last_mad_element->p_next = NULL;
\r
512 cl_spinlock_release( &p_pool_key->obj.h_al->obj.lock );
\r
514 /* Return any outstanding MADs to the pool. */
\r
515 if( p_mad_element_list )
\r
517 status = ib_put_mad( p_mad_element_list );
\r
518 if( status != IB_SUCCESS )
\r
520 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
521 ("ib_put_mad failed with status %s, continuing.\n",
\r
522 ib_get_err_str(status)) );
\r
528 * Remove the pool key from the pool to prevent further registrations
\r
529 * against this pool.
\r
531 if( p_pool_key->type == AL_KEY_NORMAL )
\r
533 cl_spinlock_acquire( &p_pool_key->h_pool->obj.lock );
\r
534 cl_qlist_remove_item( &p_pool_key->h_pool->key_list,
\r
535 &p_pool_key->pool_item );
\r
536 cl_spinlock_release( &p_pool_key->h_pool->obj.lock );
\r
539 deref_al_obj( &p_pool_key->h_pool->obj );
\r
540 p_pool_key->h_pool = NULL;
\r
541 if( p_pool_key->pool_key )
\r
542 deref_al_obj( &p_pool_key->pool_key->obj );
\r
552 IN al_obj_t* p_obj )
\r
554 al_pool_key_t* p_pool_key;
\r
556 CL_ASSERT( p_obj );
\r
557 p_pool_key = PARENT_STRUCT( p_obj, al_pool_key_t, obj );
\r
559 destroy_al_obj( &p_pool_key->obj );
\r
560 cl_free( p_pool_key );
\r
565 * Deregister a MAD pool from a protection domain. Only normal pool_keys
\r
566 * can be destroyed using this routine.
\r
570 IN const ib_pool_key_t pool_key )
\r
572 ib_api_status_t status;
\r
574 AL_ENTER( AL_DBG_MAD_POOL );
\r
576 if( AL_OBJ_INVALID_HANDLE( pool_key, AL_OBJ_TYPE_H_POOL_KEY ) )
\r
578 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") );
\r
579 return IB_INVALID_PARAMETER;
\r
582 ref_al_obj( &pool_key->obj );
\r
583 status = dereg_mad_pool( pool_key, AL_KEY_NORMAL );
\r
585 if( status != IB_SUCCESS )
\r
586 deref_al_obj( &pool_key->obj );
\r
588 AL_EXIT( AL_DBG_MAD_POOL );
\r
595 * Deregister a MAD pool from a protection domain.
\r
599 IN const ib_pool_key_t pool_key,
\r
600 IN const al_key_type_t expected_type )
\r
602 AL_ENTER( AL_DBG_MAD_POOL );
\r
604 if( pool_key->type != expected_type )
\r
606 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") );
\r
607 return IB_INVALID_PARAMETER;
\r
610 /* Check mad_cnt to see if MADs are still outstanding. */
\r
611 //if( pool_key->mad_cnt )
\r
613 // AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_MAD_POOL, ("IB_RESOURCE_BUSY\n") );
\r
614 // return IB_RESOURCE_BUSY;
\r
617 pool_key->obj.pfn_destroy( &pool_key->obj, NULL );
\r
619 AL_EXIT( AL_DBG_MAD_POOL );
\r
626 * Obtain a MAD element from the pool.
\r
628 static ib_api_status_t
\r
630 IN const ib_pool_key_t pool_key,
\r
631 OUT al_mad_element_t** pp_mad_element )
\r
633 mad_item_t* p_mad_item;
\r
636 AL_ENTER( AL_DBG_MAD_POOL );
\r
638 CL_ASSERT( pool_key );
\r
639 CL_ASSERT( pp_mad_element );
\r
641 /* Obtain a MAD item from the stack. */
\r
642 p_mad_item = (mad_item_t*)ExAllocateFromNPagedLookasideList(
\r
643 &pool_key->h_pool->mad_stack );
\r
645 return IB_INSUFFICIENT_RESOURCES;
\r
647 p_mad_item->pool_key = pool_key;
\r
649 if( pool_key->type == AL_KEY_NORMAL )
\r
650 lkey = pool_key->lkey;
\r
652 lkey = pool_key->pool_key->lkey;
\r
654 CL_ASSERT( ADDRESS_AND_SIZE_TO_SPAN_PAGES(
\r
655 p_mad_item->al_mad_element.mad_buf, MAD_BLOCK_GRH_SIZE ) == 1 );
\r
657 /* Clear the element. */
\r
658 cl_memclr( &p_mad_item->al_mad_element, sizeof(al_mad_element_t) );
\r
660 /* Initialize the receive data segment information. */
\r
661 p_mad_item->al_mad_element.grh_ds.vaddr =
\r
662 cl_get_physaddr( p_mad_item->al_mad_element.mad_buf );
\r
663 p_mad_item->al_mad_element.grh_ds.length = MAD_BLOCK_GRH_SIZE;
\r
664 p_mad_item->al_mad_element.grh_ds.lkey = lkey;
\r
666 /* Initialize the send data segment information. */
\r
667 p_mad_item->al_mad_element.mad_ds.vaddr =
\r
668 p_mad_item->al_mad_element.grh_ds.vaddr + sizeof(ib_grh_t);
\r
669 p_mad_item->al_mad_element.mad_ds.length = MAD_BLOCK_SIZE;
\r
670 p_mad_item->al_mad_element.mad_ds.lkey = lkey;
\r
672 /* Initialize grh */
\r
673 p_mad_item->al_mad_element.element.p_grh =
\r
674 (ib_grh_t*)p_mad_item->al_mad_element.mad_buf;
\r
676 /* Hold a reference on the pool key while a MAD element is removed. */
\r
677 ref_al_obj( &pool_key->obj );
\r
678 cl_atomic_inc( &pool_key->mad_cnt );
\r
680 p_mad_item->al_mad_element.pool_key = (ib_pool_key_t)pool_key;
\r
681 /* Return the MAD element. */
\r
682 *pp_mad_element = &p_mad_item->al_mad_element;
\r
684 AL_EXIT( AL_DBG_MAD_POOL );
\r
691 * Return a MAD element to the pool.
\r
695 IN al_mad_element_t* p_mad_element )
\r
697 mad_item_t* p_mad_item;
\r
698 ib_pool_key_t pool_key;
\r
700 CL_ASSERT( p_mad_element );
\r
701 p_mad_item = PARENT_STRUCT( p_mad_element, mad_item_t, al_mad_element );
\r
702 pool_key = p_mad_item->pool_key;
\r
703 CL_ASSERT( pool_key );
\r
704 CL_ASSERT( pool_key->h_pool );
\r
706 /* Clear the MAD buffer. */
\r
707 cl_memclr( p_mad_element->mad_buf, MAD_BLOCK_GRH_SIZE );
\r
708 p_mad_element->element.p_next = NULL;
\r
710 /* Return the MAD element to the pool. */
\r
711 ExFreeToNPagedLookasideList( &pool_key->h_pool->mad_stack, p_mad_item );
\r
713 cl_atomic_dec( &pool_key->mad_cnt );
\r
714 deref_al_obj( &pool_key->obj );
\r
719 ib_mad_send_handle_t
\r
721 IN const al_mad_element_t *p_mad_element )
\r
723 mad_item_t* p_mad_item;
\r
724 mad_send_t *p_mad_send;
\r
726 CL_ASSERT( p_mad_element );
\r
728 /* Get a handle to the pool. */
\r
729 p_mad_item = PARENT_STRUCT( p_mad_element, mad_item_t, al_mad_element );
\r
730 CL_ASSERT( p_mad_item->pool_key );
\r
731 CL_ASSERT( p_mad_item->pool_key->h_pool );
\r
733 p_mad_send = ExAllocateFromNPagedLookasideList(
\r
734 &p_mad_item->pool_key->h_pool->mad_send_pool );
\r
738 p_mad_send->mad_send.canceled = FALSE;
\r
739 p_mad_send->mad_send.p_send_mad = NULL;
\r
740 p_mad_send->mad_send.p_resp_mad = NULL;
\r
741 p_mad_send->mad_send.h_av = NULL;
\r
742 p_mad_send->mad_send.retry_cnt = 0;
\r
743 p_mad_send->mad_send.retry_time = 0;
\r
744 p_mad_send->mad_send.delay = 0;
\r
745 p_mad_send->h_pool = p_mad_item->pool_key->h_pool;
\r
747 ref_al_obj( &p_mad_item->pool_key->h_pool->obj );
\r
748 return &p_mad_send->mad_send;
\r
755 IN ib_mad_send_handle_t h_mad_send )
\r
757 mad_send_t *p_mad_send;
\r
758 ib_pool_handle_t h_pool;
\r
760 p_mad_send = PARENT_STRUCT( h_mad_send, mad_send_t, mad_send );
\r
761 h_pool = p_mad_send->h_pool;
\r
763 ExFreeToNPagedLookasideList( &h_pool->mad_send_pool, p_mad_send );
\r
764 deref_al_obj( &h_pool->obj );
\r
771 IN const al_mad_element_t *p_mad_element )
\r
773 mad_item_t *p_mad_item;
\r
774 mad_rmpp_t *p_mad_rmpp;
\r
776 CL_ASSERT( p_mad_element );
\r
778 /* Get a handle to the pool. */
\r
779 p_mad_item = PARENT_STRUCT( p_mad_element, mad_item_t, al_mad_element );
\r
780 CL_ASSERT( p_mad_item->pool_key );
\r
781 CL_ASSERT( p_mad_item->pool_key->h_pool );
\r
783 p_mad_rmpp = ExAllocateFromNPagedLookasideList(
\r
784 &p_mad_item->pool_key->h_pool->mad_rmpp_pool );
\r
788 p_mad_rmpp->h_pool = p_mad_item->pool_key->h_pool;
\r
790 ref_al_obj( &p_mad_item->pool_key->h_pool->obj );
\r
791 return &p_mad_rmpp->mad_rmpp;
\r
798 IN al_mad_rmpp_t* h_mad_rmpp )
\r
800 mad_rmpp_t *p_mad_rmpp;
\r
801 ib_pool_handle_t h_pool;
\r
803 p_mad_rmpp = PARENT_STRUCT( h_mad_rmpp, mad_rmpp_t, mad_rmpp );
\r
805 h_pool = p_mad_rmpp->h_pool;
\r
807 ExFreeToNPagedLookasideList( &h_pool->mad_rmpp_pool, p_mad_rmpp );
\r
808 deref_al_obj( &h_pool->obj );
\r
815 IN const ib_pool_key_t pool_key,
\r
816 IN const size_t buf_size,
\r
817 OUT ib_mad_element_t **pp_mad_element )
\r
819 al_mad_element_t* p_mad;
\r
820 ib_api_status_t status;
\r
822 AL_ENTER( AL_DBG_MAD_POOL );
\r
824 if( AL_OBJ_INVALID_HANDLE( pool_key, AL_OBJ_TYPE_H_POOL_KEY ) )
\r
826 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") );
\r
827 return IB_INVALID_PARAMETER;
\r
829 if( !buf_size || !pp_mad_element )
\r
831 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") );
\r
832 return IB_INVALID_PARAMETER;
\r
835 status = __get_mad_element( pool_key, &p_mad );
\r
836 if( status != IB_SUCCESS )
\r
838 AL_EXIT( AL_DBG_MAD_POOL );
\r
842 /* Set the user accessible buffer. */
\r
843 if( buf_size <= MAD_BLOCK_SIZE )
\r
845 /* Use the send buffer for 256 byte MADs. */
\r
846 p_mad->element.p_mad_buf = (ib_mad_t*)(p_mad->mad_buf + sizeof(ib_grh_t));
\r
848 else if( buf_size >= 0xFFFFFFFF )
\r
850 __put_mad_element( p_mad );
\r
851 return IB_INVALID_SETTING;
\r
855 /* Allocate a new buffer for the MAD. */
\r
856 p_mad->p_al_mad_buf = cl_zalloc( buf_size );
\r
857 if( !p_mad->p_al_mad_buf )
\r
859 __put_mad_element( p_mad );
\r
860 AL_EXIT( AL_DBG_MAD_POOL );
\r
861 return IB_INSUFFICIENT_MEMORY;
\r
863 p_mad->element.p_mad_buf = p_mad->p_al_mad_buf;
\r
865 p_mad->element.size = (uint32_t)buf_size;
\r
867 /* Track the MAD element with the requesting AL instance. */
\r
868 al_insert_mad( pool_key->h_al, p_mad );
\r
870 /* Return the MAD element to the client. */
\r
871 *pp_mad_element = &p_mad->element;
\r
873 AL_EXIT( AL_DBG_MAD_POOL );
\r
881 IN const ib_mad_element_t* p_mad_element_list )
\r
883 al_mad_element_t* p_mad;
\r
885 if( !p_mad_element_list )
\r
887 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") );
\r
888 return IB_INVALID_PARAMETER;
\r
891 while( p_mad_element_list )
\r
893 p_mad = PARENT_STRUCT( p_mad_element_list, al_mad_element_t, element );
\r
894 p_mad_element_list = p_mad_element_list->p_next;
\r
896 /* Deallocate any buffers allocated for the user. */
\r
897 if( p_mad->p_al_mad_buf )
\r
899 cl_free( p_mad->p_al_mad_buf );
\r
900 p_mad->p_al_mad_buf = NULL;
\r
903 /* See if the MAD has already been returned to the MAD pool. */
\r
904 CL_ASSERT( p_mad->h_al );
\r
906 /* Remove the MAD element from the owning AL instance. */
\r
907 al_remove_mad( p_mad );
\r
909 /* Return the MAD element to the pool. */
\r
910 __put_mad_element( p_mad );
\r
919 IN const ib_mad_element_t* p_mad_element )
\r
921 al_mad_element_t* p_mad;
\r
923 CL_ASSERT( p_mad_element );
\r
924 CL_ASSERT( !p_mad_element->p_next );
\r
926 p_mad = PARENT_STRUCT( p_mad_element, al_mad_element_t, element );
\r
928 /* Deallocate any buffers allocated for the user. */
\r
929 if( p_mad->p_al_mad_buf )
\r
931 cl_free( p_mad->p_al_mad_buf );
\r
932 p_mad->p_al_mad_buf = NULL;
\r
935 /* See if the MAD has already been returned to the MAD pool. */
\r
936 CL_ASSERT( p_mad->h_al );
\r
938 /* Remove the MAD element from the owning AL instance. */
\r
939 cl_qlist_remove_item( &p_mad->h_al->mad_list, &p_mad->al_item );
\r
940 deref_al_obj( &p_mad->h_al->obj );
\r
941 p_mad->h_al = NULL;
\r
943 /* Return the MAD element to the pool. */
\r
944 __put_mad_element( p_mad );
\r
950 * Resize the data buffer associated with a MAD element.
\r
954 OUT ib_mad_element_t *p_mad_element,
\r
955 IN const size_t buf_size )
\r
957 al_mad_element_t *p_al_element;
\r
958 ib_mad_t *p_new_buf;
\r
960 CL_ASSERT( p_mad_element );
\r
962 /* We only support growing the buffer for now. */
\r
963 CL_ASSERT( buf_size > p_mad_element->size );
\r
965 /* Cap the size. */
\r
966 if( buf_size >= 0xFFFFFFFF )
\r
967 return IB_INVALID_SETTING;
\r
969 p_al_element = PARENT_STRUCT( p_mad_element, al_mad_element_t, element );
\r
971 /* Allocate a new buffer. */
\r
972 p_new_buf = cl_malloc( buf_size );
\r
974 return IB_INSUFFICIENT_MEMORY;
\r
976 /* Copy the existing buffer's data into the new buffer. */
\r
977 cl_memcpy( p_new_buf, p_mad_element->p_mad_buf, p_mad_element->size );
\r
978 cl_memclr( (uint8_t*)p_new_buf + p_mad_element->size,
\r
979 buf_size - p_mad_element->size );
\r
981 /* Update the MAD element to use the new buffer. */
\r
982 p_mad_element->p_mad_buf = p_new_buf;
\r
983 p_mad_element->size = (uint32_t)buf_size;
\r
985 /* Free any old buffer. */
\r
986 if( p_al_element->p_al_mad_buf )
\r
987 cl_free( p_al_element->p_al_mad_buf );
\r
988 p_al_element->p_al_mad_buf = p_new_buf;
\r