2 * Copyright (c) 2005 SilverStorm Technologies. All rights reserved.
\r
3 * Copyright (c) 1996-2003 Intel Corporation. All rights reserved.
\r
5 * This software is available to you under the OpenIB.org BSD license
\r
8 * Redistribution and use in source and binary forms, with or
\r
9 * without modification, are permitted provided that the following
\r
10 * conditions are met:
\r
12 * - Redistributions of source code must retain the above
\r
13 * copyright notice, this list of conditions and the following
\r
16 * - Redistributions in binary form must reproduce the above
\r
17 * copyright notice, this list of conditions and the following
\r
18 * disclaimer in the documentation and/or other materials
\r
19 * provided with the distribution.
\r
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
\r
22 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
\r
23 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
\r
24 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
\r
25 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
\r
26 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
\r
27 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
\r
33 #include <iba/ib_al.h>
\r
35 #include "al_debug.h"
\r
36 #if defined(EVENT_TRACING)
\r
40 #include "al_mr.tmh"
\r
44 #include "al_res_mgr.h"
\r
45 #include "al_verbs.h"
\r
47 #include "ib_common.h"
\r
52 IN struct _al_obj *p_obj );
\r
56 IN al_obj_t *p_obj );
\r
61 IN const int shmid );
\r
65 IN struct _al_obj *p_obj );
\r
70 IN void* const p_object,
\r
72 OUT cl_pool_item_t** const pp_pool_item )
\r
74 ib_api_status_t status;
\r
75 mlnx_fmr_handle_t h_fmr;
\r
77 UNUSED_PARAM( context );
\r
79 h_fmr = (mlnx_fmr_handle_t)p_object;
\r
80 cl_memclr( h_fmr, sizeof(mlnx_fmr_t) );
\r
82 construct_al_obj( &h_fmr->obj, AL_OBJ_TYPE_H_FMR );
\r
83 status = init_al_obj( &h_fmr->obj, NULL, FALSE, NULL,
\r
84 __cleanup_mlnx_fmr, __return_mlnx_fmr );
\r
85 if( status != IB_SUCCESS )
\r
90 *pp_pool_item = &((mlnx_fmr_handle_t)p_object)->obj.pool_item;
\r
92 /* Release the reference taken in init_al_obj. */
\r
93 deref_al_obj( &h_fmr->obj );
\r
102 IN const cl_pool_item_t* const p_pool_item,
\r
107 UNUSED_PARAM( context );
\r
109 p_obj = PARENT_STRUCT( p_pool_item, al_obj_t, pool_item );
\r
112 * The FMR is being totally destroyed. Modify the free_cb to destroy the
\r
115 p_obj->pfn_free = (al_pfn_free_t)destroy_al_obj;
\r
116 ref_al_obj( p_obj );
\r
117 p_obj->pfn_destroy( p_obj, NULL );
\r
123 __cleanup_mlnx_fmr(
\r
124 IN struct _al_obj *p_obj )
\r
126 ib_api_status_t status;
\r
127 mlnx_fmr_handle_t h_fmr;
\r
129 CL_ASSERT( p_obj );
\r
130 h_fmr = PARENT_STRUCT( p_obj, mlnx_fmr_t, obj );
\r
132 /* Deregister the memory. */
\r
133 if( verbs_check_mlnx_fmr( h_fmr ) )
\r
135 status = verbs_destroy_mlnx_fmr( h_fmr );
\r
136 CL_ASSERT( status == IB_SUCCESS );
\r
138 h_fmr->h_ci_fmr = NULL;
\r
146 IN al_obj_t *p_obj )
\r
148 mlnx_fmr_handle_t h_fmr;
\r
150 h_fmr = PARENT_STRUCT( p_obj, mlnx_fmr_t, obj );
\r
151 reset_al_obj( p_obj );
\r
152 put_mlnx_fmr( h_fmr );
\r
159 IN const ib_pd_handle_t h_pd,
\r
160 IN const mlnx_fmr_create_t* const p_fmr_create,
\r
161 OUT mlnx_fmr_handle_t* const ph_fmr )
\r
163 mlnx_fmr_handle_t h_fmr;
\r
164 ib_api_status_t status;
\r
166 AL_ENTER( AL_DBG_MR );
\r
168 if( AL_OBJ_INVALID_HANDLE( h_pd, AL_OBJ_TYPE_H_PD ) )
\r
170 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PD_HANDLE\n") );
\r
171 return IB_INVALID_PD_HANDLE;
\r
174 if( !p_fmr_create || !ph_fmr )
\r
176 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") );
\r
177 return IB_INVALID_PARAMETER;
\r
180 /* Get a MR tracking structure. */
\r
181 h_fmr = alloc_mlnx_fmr();
\r
184 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
185 ("unable to allocate memory handle\n") );
\r
186 return IB_INSUFFICIENT_MEMORY;
\r
189 status = attach_al_obj( &h_pd->obj, &h_fmr->obj );
\r
190 if( status != IB_SUCCESS )
\r
192 h_fmr->obj.pfn_destroy( &h_fmr->obj, NULL );
\r
193 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
194 ("attach_al_obj returned %s.\n", ib_get_err_str(status)) );
\r
198 /* Register the memory region. */
\r
199 status = verbs_create_mlnx_fmr( h_pd, p_fmr_create, h_fmr );
\r
200 if( status != IB_SUCCESS )
\r
202 h_fmr->obj.pfn_destroy( &h_fmr->obj, NULL );
\r
203 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
204 ("unable to register memory: %s\n", ib_get_err_str(status)) );
\r
209 deref_al_obj( &(*ph_fmr )->obj );
\r
211 AL_EXIT( AL_DBG_MR );
\r
218 IN const mlnx_fmr_handle_t h_fmr,
\r
219 IN const uint64_t* const paddr_list,
\r
220 IN const int list_len,
\r
221 IN OUT uint64_t* const p_vaddr,
\r
222 OUT net32_t* const p_lkey,
\r
223 OUT net32_t* const p_rkey)
\r
225 ib_api_status_t status;
\r
227 AL_ENTER( AL_DBG_MR );
\r
229 if( AL_OBJ_INVALID_HANDLE( h_fmr, AL_OBJ_TYPE_H_FMR ) )
\r
231 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_FMR_HANDLE\n") );
\r
232 return IB_INVALID_FMR_HANDLE;
\r
235 if( !paddr_list || !p_vaddr || !p_lkey || !p_rkey )
\r
237 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") );
\r
238 return IB_INVALID_PARAMETER;
\r
241 ref_al_obj( &h_fmr->obj );
\r
243 /* Register the memory region. */
\r
244 status = verbs_map_phys_mlnx_fmr( h_fmr, paddr_list, list_len, p_vaddr, p_lkey, p_rkey);
\r
245 if( status != IB_SUCCESS )
\r
247 //TODO: do we need to do something more about the error ?
\r
248 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
249 ("unable to map FMR: %s\n", ib_get_err_str(status)) );
\r
252 deref_al_obj( &h_fmr->obj );
\r
254 AL_EXIT( AL_DBG_MR );
\r
261 IN const mlnx_fmr_handle_t h_fmr )
\r
263 ib_api_status_t status;
\r
265 AL_ENTER( AL_DBG_MR );
\r
267 if( AL_OBJ_INVALID_HANDLE( h_fmr, AL_OBJ_TYPE_H_FMR ) )
\r
269 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_FMR_HANDLE\n") );
\r
270 return IB_INVALID_FMR_HANDLE;
\r
273 ref_al_obj( &h_fmr->obj );
\r
274 status = verbs_unmap_mlnx_fmr( h_fmr );
\r
275 deref_al_obj( &h_fmr->obj );
\r
277 AL_EXIT( AL_DBG_MR );
\r
284 IN const mlnx_fmr_handle_t h_fmr )
\r
286 ib_api_status_t status;
\r
288 AL_ENTER( AL_DBG_MR );
\r
290 if( AL_OBJ_INVALID_HANDLE( h_fmr, AL_OBJ_TYPE_H_FMR ) )
\r
292 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_FMR_HANDLE\n") );
\r
293 return IB_INVALID_FMR_HANDLE;
\r
296 if( !verbs_check_mlnx_fmr( h_fmr ) )
\r
298 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_FMR_HANDLE\n") );
\r
299 return IB_INVALID_FMR_HANDLE;
\r
302 ref_al_obj( &h_fmr->obj );
\r
304 /* FMR's are destroyed synchronously */
\r
305 status = verbs_destroy_mlnx_fmr( h_fmr );
\r
307 if( status == IB_SUCCESS )
\r
309 h_fmr->h_ci_fmr = NULL;
\r
310 /* We're good to destroy the object. */
\r
311 h_fmr->obj.pfn_destroy( &h_fmr->obj, NULL );
\r
313 deref_al_obj( &h_fmr->obj );
\r
315 AL_EXIT( AL_DBG_MR );
\r
323 IN const ib_pd_handle_t h_pd,
\r
324 IN const int shmid,
\r
325 IN const ib_mr_create_t* const p_mr_create,
\r
326 OUT net32_t* const p_lkey,
\r
327 OUT net32_t* const p_rkey,
\r
328 OUT ib_mr_handle_t* const ph_mr )
\r
330 ib_api_status_t status;
\r
331 cl_status_t cl_status;
\r
334 ib_mr_handle_t h_mr;
\r
336 AL_ENTER( AL_DBG_MR );
\r
338 if( AL_OBJ_INVALID_HANDLE( h_pd, AL_OBJ_TYPE_H_PD ) )
\r
340 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PD_HANDLE\n") );
\r
341 return IB_INVALID_PD_HANDLE;
\r
343 if( !p_mr_create || !p_lkey || !p_rkey || !ph_mr )
\r
345 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") );
\r
346 return IB_INVALID_PARAMETER;
\r
349 /* Register the memory region. */
\r
350 status = ib_reg_mem( h_pd, p_mr_create, &lkey, &rkey, &h_mr );
\r
351 if( status != IB_SUCCESS )
\r
353 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
354 ("unable to register memory: %s\n", ib_get_err_str(status)) );
\r
358 /* Create the shmid tracking structure. */
\r
359 h_mr->p_shmid = __create_shmid( shmid );
\r
360 if( !h_mr->p_shmid )
\r
362 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
363 ("unable to allocate shmid\n") );
\r
364 ib_dereg_mr( h_mr );
\r
365 return IB_INSUFFICIENT_MEMORY;
\r
369 * Record that the memory region is associated with this shmid. The
\r
370 * insertion should automatically succeed since the list has a minimum
\r
373 ref_al_obj( &h_mr->p_shmid->obj );
\r
374 cl_status = cl_list_insert_head( &h_mr->p_shmid->mr_list, h_mr );
\r
375 CL_ASSERT( cl_status == CL_SUCCESS );
\r
377 /* Add the shmid to the CI CA for tracking. */
\r
378 add_shmid( h_pd->obj.p_ci_ca, h_mr->p_shmid );
\r
380 /* Return the results. */
\r
384 AL_EXIT( AL_DBG_MR );
\r
391 * Allocate a new structure to track memory registrations shared across
\r
396 IN const int shmid )
\r
398 al_shmid_t *p_shmid;
\r
399 ib_api_status_t status;
\r
400 cl_status_t cl_status;
\r
402 /* Allocate the shmid structure. */
\r
403 p_shmid = cl_zalloc( sizeof( al_shmid_t ) );
\r
409 /* Construct the shmid structure. */
\r
410 construct_al_obj( &p_shmid->obj, AL_OBJ_TYPE_H_MR );
\r
411 cl_list_construct( &p_shmid->mr_list );
\r
413 /* Initialize the shmid structure. */
\r
414 status = init_al_obj( &p_shmid->obj, p_shmid, TRUE,
\r
415 NULL, NULL, __free_shmid );
\r
416 if( status != IB_SUCCESS )
\r
418 __free_shmid( &p_shmid->obj );
\r
422 cl_status = cl_list_init( &p_shmid->mr_list, 1 );
\r
423 if( cl_status != CL_SUCCESS )
\r
425 p_shmid->obj.pfn_destroy( &p_shmid->obj, NULL );
\r
429 p_shmid->id = shmid;
\r
431 /* Release the reference taken in init_al_obj. */
\r
432 deref_al_obj( &p_shmid->obj );
\r
441 IN struct _al_obj *p_obj )
\r
443 al_shmid_t *p_shmid;
\r
445 p_shmid = PARENT_STRUCT( p_obj, al_shmid_t, obj );
\r
447 CL_ASSERT( cl_is_list_empty( &p_shmid->mr_list ) );
\r
449 cl_list_destroy( &p_shmid->mr_list );
\r
450 destroy_al_obj( p_obj );
\r
451 cl_free( p_shmid );
\r
458 IN const ib_pd_handle_t h_pd,
\r
459 IN const ib_shmid_t shmid,
\r
460 IN const ib_mr_create_t* const p_mr_create,
\r
461 IN OUT uint64_t* const p_vaddr,
\r
462 OUT net32_t* const p_lkey,
\r
463 OUT net32_t* const p_rkey,
\r
464 OUT ib_mr_handle_t* const ph_mr )
\r
466 return reg_shmid( h_pd, shmid, p_mr_create, p_vaddr, p_lkey, p_rkey, ph_mr );
\r
472 IN const ib_pd_handle_t h_pd,
\r
473 IN const ib_shmid_t shmid,
\r
474 IN const ib_mr_create_t* const p_mr_create,
\r
475 IN OUT uint64_t* const p_vaddr,
\r
476 OUT net32_t* const p_lkey,
\r
477 OUT net32_t* const p_rkey,
\r
478 OUT ib_mr_handle_t* const ph_mr )
\r
480 UNUSED_PARAM( h_pd );
\r
481 UNUSED_PARAM( shmid );
\r
482 UNUSED_PARAM( p_mr_create );
\r
483 UNUSED_PARAM( p_vaddr );
\r
484 UNUSED_PARAM( p_lkey );
\r
485 UNUSED_PARAM( p_rkey );
\r
486 UNUSED_PARAM( ph_mr );
\r
489 ib_api_status_t status;
\r
490 cl_status_t cl_status;
\r
491 al_shmid_t *p_shmid;
\r
495 ib_mr_handle_t h_mr, h_reg_mr;
\r
497 AL_ENTER( AL_DBG_MR );
\r
499 if( AL_OBJ_INVALID_HANDLE( h_pd, AL_OBJ_TYPE_H_PD ) )
\r
501 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PD_HANDLE\n") );
\r
502 return IB_INVALID_PD_HANDLE;
\r
504 if( !p_vaddr || !p_lkey || !p_rkey || !ph_mr )
\r
506 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") );
\r
507 return IB_INVALID_PARAMETER;
\r
510 /* Let's see if we can acquire the registered memory region. */
\r
511 status = acquire_shmid( h_pd->obj.p_ci_ca, shmid, &p_shmid );
\r
512 if( status != IB_SUCCESS )
\r
514 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
515 ("shmid not found: %s\n", ib_get_err_str(status)) );
\r
516 return IB_NOT_FOUND;
\r
519 /* Lock down the shmid to prevent deregistrations while we register. */
\r
520 cl_spinlock_acquire( &p_shmid->obj.lock );
\r
523 * There's a chance after we acquired the shmid, all current
\r
524 * registrations were deregistered.
\r
526 if( cl_is_list_empty( &p_shmid->mr_list ) )
\r
528 /* There are no registrations left to share. */
\r
529 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("shmid not found\n") );
\r
530 cl_spinlock_release( &p_shmid->obj.lock );
\r
531 release_shmid( p_shmid );
\r
532 return IB_NOT_FOUND;
\r
535 /* Get a handle to an existing registered memory region. */
\r
536 h_reg_mr = cl_list_obj( cl_list_head( &p_shmid->mr_list ) );
\r
538 // BUGBUG: This release is not safe since the h_reg_mr can be deregistered.
\r
539 cl_spinlock_release( &p_shmid->obj.lock );
\r
541 /* Register the memory region. */
\r
543 status = ib_reg_shared( h_reg_mr, h_pd, access_ctrl, &vaddr,
\r
544 &lkey, &rkey, &h_mr );
\r
545 if( status != IB_SUCCESS )
\r
547 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
548 ("unable to register shared memory: 0x%0I64x %s\n",
\r
549 vaddr, ib_get_err_str(status)) );
\r
550 release_shmid( p_shmid );
\r
554 cl_spinlock_acquire( &p_shmid->obj.lock );
\r
556 /* Track the registration with the shmid structure. */
\r
557 cl_status = cl_list_insert_head( &p_shmid->mr_list, h_mr );
\r
558 if( cl_status != CL_SUCCESS )
\r
560 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
561 ("insertion into shmid list failed\n") );
\r
562 cl_spinlock_release( &p_shmid->obj.lock );
\r
563 release_shmid( p_shmid );
\r
564 return ib_convert_cl_status( cl_status );
\r
567 cl_spinlock_release( &p_shmid->obj.lock );
\r
569 /* Return the results. */
\r
570 h_mr->p_shmid = p_shmid;
\r
575 AL_EXIT( AL_DBG_MR );
\r