2 * Copyright (c) 2005 SilverStorm Technologies. All rights reserved.
\r
3 * Copyright (c) 2004-2005 Mellanox Technologies, Inc. All rights reserved.
\r
5 * This software is available to you under the OpenIB.org BSD license
\r
8 * Redistribution and use in source and binary forms, with or
\r
9 * without modification, are permitted provided that the following
\r
10 * conditions are met:
\r
12 * - Redistributions of source code must retain the above
\r
13 * copyright notice, this list of conditions and the following
\r
16 * - Redistributions in binary form must reproduce the above
\r
17 * copyright notice, this list of conditions and the following
\r
18 * disclaimer in the documentation and/or other materials
\r
19 * provided with the distribution.
\r
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
\r
22 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
\r
23 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
\r
24 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
\r
25 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
\r
26 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
\r
27 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
\r
30 * $Id: hca_memory.c 46 2005-05-30 17:55:53Z sleybo $
\r
34 #include "hca_utils.h"
\r
35 #include "mthca_dev.h"
\r
37 #if defined(EVENT_TRACING)
\r
41 #include "hca_memory.tmh"
\r
45 * Memory Management Verbs.
\r
50 IN const ib_pd_handle_t h_pd,
\r
51 IN const ib_mr_create_t *p_mr_create,
\r
52 OUT net32_t* const p_lkey,
\r
53 OUT net32_t* const p_rkey,
\r
54 OUT ib_mr_handle_t *ph_mr,
\r
55 IN boolean_t um_call )
\r
57 ib_api_status_t status;
\r
60 struct ib_pd *ib_pd_p = (struct ib_pd *)h_pd;
\r
61 PREP_IBDEV_FOR_PRINT(ib_pd_p->device)
\r
63 HCA_ENTER(HCA_DBG_MEMORY);
\r
66 if( !cl_is_blockable() ) {
\r
67 status = IB_UNSUPPORTED;
\r
68 goto err_unsupported;
\r
70 if (!p_mr_create || 0 == p_mr_create->length) {
\r
71 HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_MEMORY,
\r
72 ("invalid attributes\n"));
\r
73 status = IB_INVALID_PARAMETER;
\r
74 goto err_invalid_parm;
\r
77 * Local write permission is required if remote write or
\r
78 * remote atomic permission is also requested.
\r
80 if (p_mr_create->access_ctrl & (IB_AC_RDMA_WRITE | IB_AC_ATOMIC) &&
\r
81 !(p_mr_create->access_ctrl & IB_AC_LOCAL_WRITE)) {
\r
82 HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_MEMORY,
\r
83 ("invalid access rights\n"));
\r
84 status = IB_INVALID_PERMISSION;
\r
85 goto err_invalid_access;
\r
89 mr_p = ibv_reg_mr(ib_pd_p, map_qp_ibal_acl(p_mr_create->access_ctrl),
\r
90 p_mr_create->vaddr, p_mr_create->length,
\r
91 (uint64_t)(ULONG_PTR)(void*)p_mr_create->vaddr, um_call );
\r
93 err = PTR_ERR(mr_p);
\r
94 HCA_PRINT(TRACE_LEVEL_ERROR, HCA_DBG_MEMORY,
\r
95 ("ibv_reg_mr failed (%d)\n", err));
\r
96 status = errno_to_iberr(err);
\r
101 *p_lkey = mr_p->lkey;
\r
102 *p_rkey = cl_hton32( mr_p->rkey );
\r
103 if (ph_mr) *ph_mr = (ib_mr_handle_t)mr_p;
\r
104 status = IB_SUCCESS;
\r
107 err_invalid_access:
\r
110 HCA_PRINT_EXIT(TRACE_LEVEL_ERROR ,HCA_DBG_MEMORY,
\r
111 ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));
\r
116 mlnx_register_pmr (
\r
117 IN const ib_pd_handle_t h_pd,
\r
118 IN const ib_phys_create_t* const p_pmr_create,
\r
119 IN OUT uint64_t* const p_vaddr,
\r
120 OUT net32_t* const p_lkey,
\r
121 OUT net32_t* const p_rkey,
\r
122 OUT ib_mr_handle_t* const ph_mr,
\r
123 IN boolean_t um_call )
\r
125 ib_api_status_t status;
\r
127 struct ib_mr *mr_p;
\r
128 struct ib_phys_buf *buffer_list;
\r
129 struct ib_pd *ib_pd_p = (struct ib_pd *)h_pd;
\r
130 PREP_IBDEV_FOR_PRINT(ib_pd_p->device)
\r
132 UNUSED_PARAM( um_call );
\r
134 HCA_ENTER(HCA_DBG_MEMORY);
\r
136 if (mthca_is_livefish(to_mdev(ib_pd_p->device))) {
\r
137 mr_p = kzalloc(sizeof *mr_p, GFP_KERNEL);
\r
139 status = IB_INSUFFICIENT_MEMORY;
\r
142 mr_p->device = ib_pd_p->device;
\r
143 mr_p->pd = ib_pd_p;
\r
148 if( !cl_is_blockable() ) {
\r
149 status = IB_UNSUPPORTED;
\r
150 goto err_unsupported;
\r
152 if (!p_vaddr || !p_pmr_create ||
\r
153 0 == p_pmr_create->length ) {
\r
154 status = IB_INVALID_PARAMETER;
\r
155 goto err_invalid_parm;
\r
158 // prepare parameters
\r
159 buffer_list = (void*)p_pmr_create->range_array;
\r
160 //NB: p_pmr_create->buf_offset is not used, i.e. supposed that region is page-aligned
\r
161 //NB: p_pmr_create->hca_page_size is not used, i.e. supposed it is always the same
\r
164 if (p_pmr_create->length == (uint64_t)-1i64)
\r
166 mr_p = ibv_get_dma_mr( ib_pd_p,
\r
167 map_qp_ibal_acl(p_pmr_create->access_ctrl) );
\r
170 mr_p = ibv_reg_phys_mr(ib_pd_p, buffer_list, p_pmr_create->num_ranges,
\r
171 map_qp_ibal_acl(p_pmr_create->access_ctrl), p_vaddr );
\r
172 if (IS_ERR(mr_p)) {
\r
173 err = PTR_ERR(mr_p);
\r
174 HCA_PRINT(TRACE_LEVEL_ERROR, HCA_DBG_MEMORY,
\r
175 ("mthca_reg_phys_mr failed (%d)\n", err));
\r
176 status = errno_to_iberr(err);
\r
177 goto err_reg_phys_mr;
\r
182 if (ph_mr) *ph_mr = (ib_mr_handle_t)mr_p;
\r
183 *p_lkey = mr_p->lkey;
\r
184 *p_rkey = cl_hton32( mr_p->rkey );
\r
185 //NB: p_vaddr was not changed
\r
186 status = IB_SUCCESS;
\r
192 HCA_PRINT_EXIT(TRACE_LEVEL_ERROR , HCA_DBG_MEMORY,
\r
193 ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));
\r
200 IN const ib_mr_handle_t h_mr,
\r
201 OUT ib_mr_attr_t *p_mr_query )
\r
203 UNREFERENCED_PARAMETER(h_mr);
\r
204 UNREFERENCED_PARAMETER(p_mr_query);
\r
205 HCA_PRINT(TRACE_LEVEL_ERROR , HCA_DBG_MEMORY ,("mlnx_query_mr not implemented\n"));
\r
206 return IB_UNSUPPORTED;
\r
212 IN const ib_mr_handle_t h_mr,
\r
213 IN const ib_mr_mod_t mem_modify_req,
\r
214 IN const ib_mr_create_t *p_mr_create,
\r
215 OUT uint32_t *p_lkey,
\r
216 OUT uint32_t *p_rkey,
\r
217 IN const ib_pd_handle_t h_pd OPTIONAL,
\r
218 IN boolean_t um_call )
\r
220 UNREFERENCED_PARAMETER(h_mr);
\r
221 UNREFERENCED_PARAMETER(mem_modify_req);
\r
222 UNREFERENCED_PARAMETER(p_mr_create);
\r
223 UNREFERENCED_PARAMETER(p_lkey);
\r
224 UNREFERENCED_PARAMETER(p_rkey);
\r
225 UNREFERENCED_PARAMETER(h_pd);
\r
226 UNREFERENCED_PARAMETER(um_call);
\r
227 HCA_PRINT(TRACE_LEVEL_ERROR , HCA_DBG_MEMORY ,("mlnx_modify_mr not implemented\n"));
\r
228 return IB_UNSUPPORTED;
\r
234 IN const ib_mr_handle_t h_mr,
\r
235 IN const ib_mr_mod_t mem_modify_req,
\r
236 IN const ib_phys_create_t* const p_pmr_create,
\r
237 IN OUT uint64_t* const p_vaddr,
\r
238 OUT uint32_t* const p_lkey,
\r
239 OUT uint32_t* const p_rkey,
\r
240 IN const ib_pd_handle_t h_pd OPTIONAL,
\r
241 IN boolean_t um_call )
\r
243 UNREFERENCED_PARAMETER(h_mr);
\r
244 UNREFERENCED_PARAMETER(mem_modify_req);
\r
245 UNREFERENCED_PARAMETER(p_pmr_create);
\r
246 UNREFERENCED_PARAMETER(p_vaddr);
\r
247 UNREFERENCED_PARAMETER(p_lkey);
\r
248 UNREFERENCED_PARAMETER(p_rkey);
\r
249 UNREFERENCED_PARAMETER(h_pd);
\r
250 UNREFERENCED_PARAMETER(um_call);
\r
251 HCA_PRINT(TRACE_LEVEL_ERROR , HCA_DBG_MEMORY ,("mlnx_modify_pmr not implemented\n"));
\r
252 return IB_UNSUPPORTED;
\r
256 mlnx_register_smr (
\r
257 IN const ib_mr_handle_t h_mr,
\r
258 IN const ib_pd_handle_t h_pd,
\r
259 IN const ib_access_t access_ctrl,
\r
260 IN OUT uint64_t* const p_vaddr,
\r
261 OUT net32_t* const p_lkey,
\r
262 OUT net32_t* const p_rkey,
\r
263 OUT ib_mr_handle_t* const ph_mr,
\r
264 IN boolean_t um_call )
\r
266 UNREFERENCED_PARAMETER(h_mr);
\r
267 UNREFERENCED_PARAMETER(h_pd);
\r
268 UNREFERENCED_PARAMETER(access_ctrl);
\r
269 UNREFERENCED_PARAMETER(p_vaddr);
\r
270 UNREFERENCED_PARAMETER(p_lkey);
\r
271 UNREFERENCED_PARAMETER(p_rkey);
\r
272 UNREFERENCED_PARAMETER(ph_mr);
\r
273 UNREFERENCED_PARAMETER(um_call);
\r
274 HCA_PRINT(TRACE_LEVEL_ERROR , HCA_DBG_MEMORY ,("mlnx_register_smr not implemented\n"));
\r
275 return IB_UNSUPPORTED;
\r
279 mlnx_deregister_mr (
\r
280 IN const ib_mr_handle_t h_mr)
\r
282 ib_api_status_t status;
\r
284 struct ib_mr *ib_mr = (struct ib_mr *)h_mr;
\r
285 PREP_IBDEV_FOR_PRINT(ib_mr->device)
\r
287 HCA_ENTER(HCA_DBG_SHIM);
\r
289 if (mthca_is_livefish(to_mdev(ib_mr->device))) {
\r
295 if( !cl_is_blockable() ) {
\r
296 status = IB_UNSUPPORTED;
\r
297 goto err_unsupported;
\r
301 err = ibv_dereg_mr((struct ib_mr *)h_mr);
\r
303 status = errno_to_iberr(err);
\r
304 HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_MEMORY,
\r
305 ("mthca_dereg_mr failed (%d)", status));
\r
310 status = IB_SUCCESS;
\r
314 HCA_PRINT_EXIT(TRACE_LEVEL_ERROR , HCA_DBG_MEMORY,
\r
315 ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));
\r
322 IN const ib_pd_handle_t h_pd,
\r
323 IN const ib_fmr_create_t *p_fmr_create,
\r
324 OUT ib_fmr_handle_t* const ph_fmr
\r
327 ib_api_status_t status;
\r
329 struct ib_fmr * fmr_p;
\r
330 struct ib_pd *ib_pd_p = (struct ib_pd *)h_pd;
\r
331 struct ib_fmr_attr fmr_attr;
\r
332 PREP_IBDEV_FOR_PRINT(ib_pd_p->device)
\r
334 HCA_ENTER(HCA_DBG_MEMORY);
\r
337 if( !cl_is_blockable() ) {
\r
338 status = IB_UNSUPPORTED;
\r
339 goto err_unsupported;
\r
341 if (!p_fmr_create ) {
\r
342 status = IB_INVALID_PARAMETER;
\r
343 goto err_invalid_parm;
\r
345 //TODO: temporary limitation, till implementing somewhat like Gen2's FMR_POOL
\r
346 if (p_fmr_create->max_maps != 1) {
\r
347 status = IB_INVALID_PARAMETER;
\r
348 goto err_invalid_parm;
\r
351 // prepare parameters
\r
352 RtlZeroMemory(&fmr_attr, sizeof(struct ib_fmr_attr));
\r
353 fmr_attr.max_maps = p_fmr_create->max_maps;
\r
354 fmr_attr.max_pages = p_fmr_create->max_pages;
\r
355 fmr_attr.page_shift = p_fmr_create->page_size;
\r
358 fmr_p = ibv_alloc_fmr(ib_pd_p,
\r
359 map_qp_ibal_acl(p_fmr_create->access_ctrl), &fmr_attr);
\r
360 if (IS_ERR(fmr_p)) {
\r
361 err = PTR_ERR(fmr_p);
\r
362 HCA_PRINT(TRACE_LEVEL_ERROR , HCA_DBG_MEMORY ,
\r
363 ("mthca_alloc_fmr failed (%d)\n", err));
\r
364 status = errno_to_iberr(err);
\r
365 goto err_alloc_fmr;
\r
369 if (ph_fmr) *ph_fmr = (ib_fmr_handle_t)fmr_p;
\r
370 status = IB_SUCCESS;
\r
375 HCA_PRINT_EXIT(TRACE_LEVEL_ERROR , HCA_DBG_MEMORY,
\r
376 ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));
\r
382 mlnx_map_phys_fmr (
\r
383 IN const ib_fmr_handle_t h_fmr,
\r
384 IN const void* __ptr64 page_list,
\r
385 IN const int list_len,
\r
386 IN OUT void** __ptr64 const p_vaddr,
\r
387 OUT net32_t* const p_lkey,
\r
388 OUT net32_t* const p_rkey
\r
392 ib_api_status_t status;
\r
393 struct ib_fmr *ib_fmr = (struct ib_fmr *)h_fmr;
\r
394 void *vaddr = PAGE_ALIGN(*p_vaddr);
\r
395 uint64_t *paddr_list = (uint64_t *__ptr64)page_list;
\r
396 PREP_IBDEV_FOR_PRINT(ib_fmr->device)
\r
398 HCA_ENTER(HCA_DBG_MEMORY);
\r
401 if( !cl_is_blockable() ) {
\r
402 status = IB_UNSUPPORTED;
\r
403 goto err_unsupported;
\r
408 err = ibv_map_phys_fmr(ib_fmr, paddr_list, list_len, (uint64_t)(ULONG_PTR)vaddr);
\r
410 status = errno_to_iberr(err);
\r
411 HCA_PRINT(TRACE_LEVEL_ERROR , HCA_DBG_MEMORY ,
\r
412 ("mthca_dereg_mr failed (%d) for mr %p\n", err, h_fmr));
\r
413 goto err_dealloc_fmr;
\r
416 // return the results
\r
418 *p_lkey = ib_fmr->lkey;
\r
419 *p_rkey = ib_fmr->rkey;
\r
421 status = IB_SUCCESS;
\r
425 HCA_PRINT_EXIT(TRACE_LEVEL_ERROR , HCA_DBG_MEMORY,
\r
426 ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));
\r
436 IN const ib_fmr_handle_t h_fmr)
\r
438 ib_api_status_t status;
\r
440 struct ib_fmr *ib_fmr = (struct ib_fmr *)h_fmr;
\r
441 struct list_head fmr_list;
\r
442 PREP_IBDEV_FOR_PRINT(ib_fmr->device)
\r
444 HCA_ENTER(HCA_DBG_MEMORY);
\r
447 if( !cl_is_blockable() ) {
\r
448 status = IB_UNSUPPORTED;
\r
449 goto err_unsupported;
\r
453 INIT_LIST_HEAD(&fmr_list);
\r
454 list_add_tail(&ib_fmr->list, &fmr_list);
\r
455 err = ibv_unmap_fmr(&fmr_list);
\r
457 status = errno_to_iberr(err);
\r
458 HCA_PRINT(TRACE_LEVEL_ERROR , HCA_DBG_MEMORY ,
\r
459 ("ibv_unmap_fmr failed (%d) for fmr %p\n", err, h_fmr));
\r
460 goto err_unmap_fmr;
\r
463 status = IB_SUCCESS;
\r
467 HCA_PRINT_EXIT(TRACE_LEVEL_ERROR , HCA_DBG_MEMORY,
\r
468 ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));
\r
478 IN ib_fmr_handle_t const h_fmr
\r
481 ib_api_status_t status;
\r
483 struct ib_fmr *fmr = (struct ib_fmr *)h_fmr;
\r
484 PREP_IBDEV_FOR_PRINT(fmr->device)
\r
486 HCA_ENTER(HCA_DBG_MEMORY);
\r
489 if( !cl_is_blockable() ) {
\r
490 status = IB_UNSUPPORTED;
\r
491 goto err_unsupported;
\r
496 err = ibv_dealloc_fmr((struct ib_fmr *)h_fmr);
\r
498 status = errno_to_iberr(err);
\r
499 HCA_PRINT(TRACE_LEVEL_ERROR , HCA_DBG_MEMORY ,
\r
500 ("mthca_dereg_mr failed (%d) for mr %p\n", err, h_fmr));
\r
501 goto err_dealloc_fmr;
\r
504 status = IB_SUCCESS;
\r
508 HCA_PRINT_EXIT(TRACE_LEVEL_ERROR , HCA_DBG_MEMORY,
\r
509 ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));
\r
517 * Memory Window Verbs.
\r
522 IN const ib_pd_handle_t h_pd,
\r
523 OUT net32_t* const p_rkey,
\r
524 OUT ib_mw_handle_t *ph_mw,
\r
525 IN OUT ci_umv_buf_t *p_umv_buf )
\r
527 UNREFERENCED_PARAMETER(h_pd);
\r
528 UNREFERENCED_PARAMETER(p_rkey);
\r
529 UNREFERENCED_PARAMETER(ph_mw);
\r
530 UNREFERENCED_PARAMETER(p_umv_buf);
\r
531 HCA_PRINT(TRACE_LEVEL_ERROR , HCA_DBG_MEMORY ,("mlnx_create_mw not implemented\n"));
\r
532 return IB_UNSUPPORTED;
\r
537 IN const ib_mw_handle_t h_mw,
\r
538 OUT ib_pd_handle_t *ph_pd,
\r
539 OUT net32_t* const p_rkey,
\r
540 IN OUT ci_umv_buf_t *p_umv_buf )
\r
542 UNREFERENCED_PARAMETER(h_mw);
\r
543 UNREFERENCED_PARAMETER(ph_pd);
\r
544 UNREFERENCED_PARAMETER(p_rkey);
\r
545 UNREFERENCED_PARAMETER(p_umv_buf);
\r
546 HCA_PRINT(TRACE_LEVEL_ERROR , HCA_DBG_MEMORY ,("mlnx_query_mw not implemented\n"));
\r
547 return IB_UNSUPPORTED;
\r
552 IN const ib_mw_handle_t h_mw)
\r
554 UNREFERENCED_PARAMETER(h_mw);
\r
555 HCA_PRINT(TRACE_LEVEL_ERROR , HCA_DBG_MEMORY ,("mlnx_destroy_mw not implemented\n"));
\r
556 return IB_UNSUPPORTED;
\r
562 IN OUT ci_interface_t *p_interface )
\r
564 p_interface->register_mr = mlnx_register_mr;
\r
565 p_interface->register_pmr = mlnx_register_pmr;
\r
566 p_interface->query_mr = mlnx_query_mr;
\r
567 p_interface->modify_mr = mlnx_modify_mr;
\r
568 p_interface->modify_pmr = mlnx_modify_pmr;
\r
569 p_interface->register_smr = mlnx_register_smr;
\r
570 p_interface->deregister_mr = mlnx_deregister_mr;
\r
572 p_interface->alloc_fmr = mlnx_alloc_fmr;
\r
573 p_interface->map_phys_fmr = mlnx_map_phys_fmr;
\r
574 p_interface->unmap_fmr = mlnx_unmap_fmr;
\r
575 p_interface->dealloc_fmr = mlnx_dealloc_fmr;
\r
577 p_interface->create_mw = mlnx_create_mw;
\r
578 p_interface->query_mw = mlnx_query_mw;
\r
579 p_interface->destroy_mw = mlnx_destroy_mw;
\r
583 mlnx_memory_if_livefish(
\r
584 IN OUT ci_interface_t *p_interface )
\r
586 p_interface->register_pmr = mlnx_register_pmr;
\r
587 p_interface->deregister_mr = mlnx_deregister_mr;
\r