\r
if( p_src->p_inout_buf )\r
{\r
- if( p_src->input_size && \r
- cl_check_for_read( p_src->p_inout_buf, (size_t)p_src->input_size )\r
+ if( p_src->input_size && cl_check_for_read(\r
+ (void*)(ULONG_PTR)p_src->p_inout_buf, (size_t)p_src->input_size )\r
!= CL_SUCCESS )\r
{\r
/* user-supplied memory area not readable */\r
return IB_INVALID_PERMISSION;\r
}\r
- if( p_src->output_size &&\r
- cl_check_for_write( p_src->p_inout_buf, (size_t)p_src->output_size )\r
+ if( p_src->output_size && cl_check_for_write(\r
+ (void*)(ULONG_PTR)p_src->p_inout_buf, (size_t)p_src->output_size )\r
!= CL_SUCCESS )\r
{\r
/* user-supplied memory area not writeable */\r
/* Copy the umv_buf structure. */\r
*p_dest = *p_src;\r
if( p_src->p_inout_buf )\r
- p_dest->p_inout_buf = (void*)(p_dest + 1);\r
+ p_dest->p_inout_buf = (ULONG_PTR)(p_dest + 1);\r
\r
/* Setup the buffer - either we have an input or output buffer */\r
if( p_src->input_size )\r
{\r
- if( cl_copy_from_user( p_dest->p_inout_buf, p_src->p_inout_buf,\r
+ if( cl_copy_from_user( (void*)(ULONG_PTR)p_dest->p_inout_buf,\r
+ (void*)(ULONG_PTR)p_src->p_inout_buf,\r
(size_t)p_src->input_size ) != CL_SUCCESS )\r
{\r
cl_free( p_dest );\r
\r
out_size = MIN( p_dest->output_size, p_src->output_size );\r
\r
- if( cl_copy_to_user( p_dest->p_inout_buf, p_src->p_inout_buf,\r
+ if( cl_copy_to_user(\r
+ (void*)(ULONG_PTR)p_dest->p_inout_buf,\r
+ (void*)(ULONG_PTR)p_src->p_inout_buf,\r
out_size ) != CL_SUCCESS )\r
{\r
p_dest->output_size = 0;\r
\r
static inline int from_umv_buf(void *dest, ci_umv_buf_t* const p_umv_buf, size_t len)\r
{\r
- RtlCopyMemory(dest, p_umv_buf->p_inout_buf, len);\r
+ RtlCopyMemory(dest, (void*)(ULONG_PTR)p_umv_buf->p_inout_buf, len);\r
return 0;\r
}\r
\r
p_umv_buf->output_size = 0;\r
return -EFAULT;\r
}\r
- RtlCopyMemory(p_umv_buf->p_inout_buf, src, len);\r
+ RtlCopyMemory( (void*)(ULONG_PTR)p_umv_buf->p_inout_buf, src, len);\r
p_umv_buf->status = IB_SUCCESS;\r
p_umv_buf->output_size = (uint32_t)len;\r
return 0;\r
/*\r
* Copyright (c) 2005 SilverStorm Technologies. All rights reserved.\r
* Copyright (c) 2004-2005 Mellanox Technologies, Inc. All rights reserved. \r
+ * Portions Copyright (c) 2008 Microsoft Corporation. All rights reserved.\r
*\r
* This software is available to you under the OpenIB.org BSD license\r
* below:\r
int err;\r
struct ib_mr *p_ib_mr;\r
struct ib_pd *p_ib_pd = (struct ib_pd *)h_pd;\r
- ci_umv_buf_t umv_buf = { 0, 0, 0, 0, NULL };\r
+ ci_umv_buf_t umv_buf = { 0, 0, 0, 0, 0 };\r
\r
HCA_ENTER(HCA_DBG_MEMORY);\r
\r
-/*
- * Copyright (c) 2005 SilverStorm Technologies. All rights reserved.
- * Copyright (c) 1996-2003 Intel Corporation. All rights reserved.
- *
- * This software is available to you under the OpenIB.org BSD license
- * below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- * $Id: al.c 1611 2006-08-20 14:48:55Z sleybo $
- */
-
-#include "precomp.h"
-#if defined(EVENT_TRACING)
-#ifdef offsetof
-#undef offsetof
-#endif
-#include "pd.tmh"
-#endif
-
-
-/* Protection domains */
-
-ib_api_status_t
-mlnx_allocate_pd (
- IN const ib_ca_handle_t h_ca,
- IN const ib_pd_type_t type,
- OUT ib_pd_handle_t *ph_pd,
- IN OUT ci_umv_buf_t *p_umv_buf )
-{
- ib_api_status_t status;
- struct ib_device *p_ibdev;
- struct ib_ucontext *p_uctx;
- struct ib_pd *p_ib_pd;
- struct ib_udata udata;
- struct ibv_alloc_pd_resp *p_resp = NULL;
- int err;
-
- //TODO: how are we to use it ?
- UNREFERENCED_PARAMETER(type);
-
- HCA_ENTER(HCA_DBG_PD);
-
- if( p_umv_buf ) {
- p_uctx = (struct ib_ucontext *)h_ca;
- p_ibdev = p_uctx->device;
-
- if( p_umv_buf->command ) {
- // sanity checks
- if (p_umv_buf->output_size < sizeof(struct ibv_alloc_pd_resp) ||
- !p_umv_buf->p_inout_buf) {
- status = IB_INVALID_PARAMETER;
- goto err_alloc_pd;
- }
-
- // prepare user parameters
- p_resp = (struct ibv_alloc_pd_resp*)(void*)p_umv_buf->p_inout_buf;
- INIT_UDATA(&udata, NULL, &p_resp->pdn,
- 0, sizeof(p_resp->pdn));
- }
- else {
- u32 pdn;
- INIT_UDATA(&udata, NULL, &pdn,
- 0, sizeof(pdn));
- }
- }
- else {
- mlnx_hca_t *p_hca = (mlnx_hca_t *)h_ca;
- p_ibdev = hca2ibdev(p_hca);
- p_uctx = NULL;
- }
-
- // create PD
- p_ib_pd = p_ibdev->alloc_pd(p_ibdev, p_uctx, &udata);
-
- if (IS_ERR(p_ib_pd)){
- err = PTR_ERR(p_ib_pd);
- status = errno_to_iberr(err);
- HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_PD,
- ("ibv_alloc_pd failed (%#x)\n", status));
- goto err_alloc_pd;
- }
- else {
- p_ib_pd->device = p_ibdev;
- p_ib_pd->p_uctx = p_uctx;
- atomic_set(&p_ib_pd->usecnt, 0);
- HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_PD ,("pdn %d, usecnt %d, pd_handle %p, ctx %p \n",
- ((struct mlx4_ib_pd*)p_ib_pd)->pdn, p_ib_pd->usecnt, p_ib_pd, p_ib_pd->p_uctx));
- }
-
- // complete user response
- if (p_umv_buf && p_umv_buf->command) {
- p_resp->pd_handle = (u64)(ULONG_PTR)p_ib_pd;
- }
-
- // return the result
- if (ph_pd) *ph_pd = (ib_pd_handle_t)p_ib_pd;
-
- status = IB_SUCCESS;
-
-err_alloc_pd:
- if (p_umv_buf && p_umv_buf->command)
- p_umv_buf->status = status;
- HCA_EXIT(HCA_DBG_PD);
- return status;
-}
-
-ib_api_status_t
-mlnx_deallocate_pd (
- IN ib_pd_handle_t h_pd)
-{
- ib_api_status_t status;
- int err;
- struct ib_pd *p_ib_pd = (struct ib_pd *)h_pd;
-
- HCA_ENTER( HCA_DBG_PD);
-
- HCA_PRINT(TRACE_LEVEL_INFORMATION,HCA_DBG_PD,
- ("pcs %p\n", PsGetCurrentProcess()));
-
- if (!hca_is_livefish(p_ib_pd->device->x.p_fdo)) {
- if (atomic_read(&p_ib_pd->usecnt)) {
- HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_PD,
- ("resources are not released (pdn %d, cnt %d)\n",
- ((struct mlx4_ib_pd*)p_ib_pd)->pdn, p_ib_pd->usecnt));
- status = IB_RESOURCE_BUSY;
- goto err_dealloc_pd;
- }
- }
-
- err = p_ib_pd->device->dealloc_pd(p_ib_pd);
- if (err) {
- status = errno_to_iberr(err);
- HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_PD
- ,("ibv_dealloc_pd failed (%#x)\n", status));
- goto err_dealloc_pd;
- }
- status = IB_SUCCESS;
-
-err_dealloc_pd:
- HCA_EXIT(HCA_DBG_PD);
- return status;
-}
-
-
-void
-mlnx_pd_if(
- IN OUT ci_interface_t *p_interface )
-{
- p_interface->allocate_pd = mlnx_allocate_pd;
- p_interface->deallocate_pd = mlnx_deallocate_pd;
-}
-
+/*\r
+ * Copyright (c) 2005 SilverStorm Technologies. All rights reserved.\r
+ * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. \r
+ * Portions Copyright (c) 2008 Microsoft Corporation. All rights reserved.\r
+ *\r
+ * This software is available to you under the OpenIB.org BSD license\r
+ * below:\r
+ *\r
+ * Redistribution and use in source and binary forms, with or\r
+ * without modification, are permitted provided that the following\r
+ * conditions are met:\r
+ *\r
+ * - Redistributions of source code must retain the above\r
+ * copyright notice, this list of conditions and the following\r
+ * disclaimer.\r
+ *\r
+ * - Redistributions in binary form must reproduce the above\r
+ * copyright notice, this list of conditions and the following\r
+ * disclaimer in the documentation and/or other materials\r
+ * provided with the distribution.\r
+ *\r
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
+ * SOFTWARE.\r
+ *\r
+ * $Id: al.c 1611 2006-08-20 14:48:55Z sleybo $\r
+ */\r
+\r
+#include "precomp.h"\r
+#if defined(EVENT_TRACING)\r
+#ifdef offsetof\r
+#undef offsetof\r
+#endif\r
+#include "pd.tmh"\r
+#endif\r
+\r
+\r
+/* Protection domains */\r
+\r
+ib_api_status_t\r
+mlnx_allocate_pd (\r
+ IN const ib_ca_handle_t h_ca,\r
+ IN const ib_pd_type_t type,\r
+ OUT ib_pd_handle_t *ph_pd,\r
+ IN OUT ci_umv_buf_t *p_umv_buf )\r
+{\r
+ ib_api_status_t status;\r
+ struct ib_device *p_ibdev;\r
+ struct ib_ucontext *p_uctx;\r
+ struct ib_pd *p_ib_pd;\r
+ struct ib_udata udata;\r
+ struct ibv_alloc_pd_resp *p_resp = NULL;\r
+ int err;\r
+\r
+ //TODO: how are we to use it ?\r
+ UNREFERENCED_PARAMETER(type);\r
+ \r
+ HCA_ENTER(HCA_DBG_PD);\r
+\r
+ if( p_umv_buf ) {\r
+ p_uctx = (struct ib_ucontext *)h_ca;\r
+ p_ibdev = p_uctx->device;\r
+\r
+ if( p_umv_buf->command ) {\r
+ // sanity checks \r
+ if (p_umv_buf->output_size < sizeof(struct ibv_alloc_pd_resp) ||\r
+ !p_umv_buf->p_inout_buf) {\r
+ status = IB_INVALID_PARAMETER;\r
+ goto err_alloc_pd;\r
+ }\r
+\r
+ // prepare user parameters\r
+ p_resp = (struct ibv_alloc_pd_resp*)(ULONG_PTR)p_umv_buf->p_inout_buf;\r
+ INIT_UDATA(&udata, NULL, &p_resp->pdn, \r
+ 0, sizeof(p_resp->pdn));\r
+ }\r
+ else {\r
+ u32 pdn;\r
+ INIT_UDATA(&udata, NULL, &pdn, \r
+ 0, sizeof(pdn));\r
+ }\r
+ }\r
+ else {\r
+ mlnx_hca_t *p_hca = (mlnx_hca_t *)h_ca;\r
+ p_ibdev = hca2ibdev(p_hca);\r
+ p_uctx = NULL;\r
+ }\r
+ \r
+ // create PD\r
+ p_ib_pd = p_ibdev->alloc_pd(p_ibdev, p_uctx, &udata);\r
+\r
+ if (IS_ERR(p_ib_pd)){\r
+ err = PTR_ERR(p_ib_pd);\r
+ status = errno_to_iberr(err);\r
+ HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_PD,\r
+ ("ibv_alloc_pd failed (%#x)\n", status));\r
+ goto err_alloc_pd;\r
+ }\r
+ else {\r
+ p_ib_pd->device = p_ibdev;\r
+ p_ib_pd->p_uctx = p_uctx;\r
+ atomic_set(&p_ib_pd->usecnt, 0);\r
+ HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_PD ,("pdn %d, usecnt %d, pd_handle %p, ctx %p \n", \r
+ ((struct mlx4_ib_pd*)p_ib_pd)->pdn, p_ib_pd->usecnt, p_ib_pd, p_ib_pd->p_uctx));\r
+ }\r
+\r
+ // complete user response\r
+ if (p_umv_buf && p_umv_buf->command) {\r
+ p_resp->pd_handle = (u64)(ULONG_PTR)p_ib_pd;\r
+ }\r
+ \r
+ // return the result\r
+ if (ph_pd) *ph_pd = (ib_pd_handle_t)p_ib_pd;\r
+\r
+ status = IB_SUCCESS;\r
+ \r
+err_alloc_pd: \r
+ if (p_umv_buf && p_umv_buf->command) \r
+ p_umv_buf->status = status;\r
+ HCA_EXIT(HCA_DBG_PD);\r
+ return status;\r
+}\r
+\r
+ib_api_status_t\r
+mlnx_deallocate_pd (\r
+ IN ib_pd_handle_t h_pd)\r
+{\r
+ ib_api_status_t status;\r
+ int err;\r
+ struct ib_pd *p_ib_pd = (struct ib_pd *)h_pd;\r
+\r
+ HCA_ENTER( HCA_DBG_PD);\r
+\r
+ HCA_PRINT(TRACE_LEVEL_INFORMATION,HCA_DBG_PD,\r
+ ("pcs %p\n", PsGetCurrentProcess()));\r
+ \r
+ if (!hca_is_livefish(p_ib_pd->device->x.p_fdo)) {\r
+ if (atomic_read(&p_ib_pd->usecnt)) {\r
+ HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_PD,\r
+ ("resources are not released (pdn %d, cnt %d)\n", \r
+ ((struct mlx4_ib_pd*)p_ib_pd)->pdn, p_ib_pd->usecnt));\r
+ status = IB_RESOURCE_BUSY;\r
+ goto err_dealloc_pd;\r
+ } \r
+ }\r
+\r
+ err = p_ib_pd->device->dealloc_pd(p_ib_pd);\r
+ if (err) {\r
+ status = errno_to_iberr(err);\r
+ HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_PD\r
+ ,("ibv_dealloc_pd failed (%#x)\n", status));\r
+ goto err_dealloc_pd;\r
+ }\r
+ status = IB_SUCCESS;\r
+\r
+err_dealloc_pd:\r
+ HCA_EXIT(HCA_DBG_PD);\r
+ return status;\r
+}\r
+\r
+\r
+void\r
+mlnx_pd_if(\r
+ IN OUT ci_interface_t *p_interface )\r
+{\r
+ p_interface->allocate_pd = mlnx_allocate_pd;\r
+ p_interface->deallocate_pd = mlnx_deallocate_pd;\r
+}\r
+\r
-/*
- * Copyright (c) 2005 SilverStorm Technologies. All rights reserved.
- * Copyright (c) 1996-2003 Intel Corporation. All rights reserved.
- *
- * This software is available to you under the OpenIB.org BSD license
- * below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- * $Id: al.c 1611 2006-08-20 14:48:55Z sleybo $
- */
-
-#include "precomp.h"
-
-#if defined(EVENT_TRACING)
-#ifdef offsetof
-#undef offsetof
-#endif
-#include "qp.tmh"
-#endif
-
-
-ib_api_status_t
-mlnx_query_qp (
- IN const ib_qp_handle_t h_qp,
- OUT ib_qp_attr_t *p_qp_attr,
- IN OUT ci_umv_buf_t *p_umv_buf )
-{
- ib_api_status_t status;
- struct ib_qp *p_ib_qp = (struct ib_qp *)h_qp;
- struct ib_qp_attr qp_attr;
- struct ib_qp_init_attr qp_init_attr;
- int qp_attr_mask = 0;
- int err;
-
- UNREFERENCED_PARAMETER(p_umv_buf);
-
- HCA_ENTER( HCA_DBG_QP);
-
- // sanity checks
- if (!p_qp_attr) {
- status = IB_INVALID_PARAMETER;
- goto err_parm;
- }
-
- // convert structures
- memset( &qp_attr, 0, sizeof(struct ib_qp_attr) );
- err = p_ib_qp->device->query_qp( p_ib_qp, &qp_attr,
- qp_attr_mask, &qp_init_attr);
- if (err){
- status = errno_to_iberr(err);
- HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_PD,
- ("ib_query_qp failed (%#x)\n", status));
- goto err_query_qp;
- }
-
- status = from_qp_attr( p_ib_qp, &qp_attr, p_qp_attr );
-
-err_query_qp:
-err_parm:
- HCA_EXIT(HCA_DBG_QP);
- return status;
-}
-
-static ib_api_status_t
-__create_qp (
- IN const ib_pd_handle_t h_pd,
- IN const uint8_t port_num,
- IN const void *qp_uctx,
- IN const ib_qp_create_t *p_create_attr,
- OUT ib_qp_attr_t *p_qp_attr,
- OUT ib_qp_handle_t *ph_qp,
- IN OUT ci_umv_buf_t *p_umv_buf )
-{
- int err;
- ib_api_status_t status;
- struct ib_qp * p_ib_qp;
- struct ib_qp_init_attr qp_init_attr;
- struct ib_ucontext *p_uctx = NULL;
- struct ib_pd *p_ib_pd = (struct ib_pd *)h_pd;
- struct ib_device *p_ib_dev = p_ib_pd->device;
- mlnx_hca_t *p_hca = ibdev2hca(p_ib_dev);
- struct ibv_create_qp *p_req = NULL;
-
- HCA_ENTER(HCA_DBG_QP);
-
- if( p_umv_buf && p_umv_buf->command ) {
- // sanity checks
- if (p_umv_buf->input_size < sizeof(struct ibv_create_qp) ||
- p_umv_buf->output_size < sizeof(struct ibv_create_qp_resp) ||
- !p_umv_buf->p_inout_buf) {
- status = IB_INVALID_PARAMETER;
- goto err_inval_params;
- }
- p_req = (struct ibv_create_qp*)(void*)p_umv_buf->p_inout_buf;
- p_uctx = p_ib_pd->p_uctx;
- }
-
- // prepare the parameters
- RtlZeroMemory(&qp_init_attr, sizeof(qp_init_attr));
- qp_init_attr.event_handler = qp_event_handler;
- qp_init_attr.qp_context = p_hca;
- qp_init_attr.send_cq = (struct ib_cq *)p_create_attr->h_sq_cq;
- qp_init_attr.recv_cq = (struct ib_cq *)p_create_attr->h_rq_cq;
- qp_init_attr.srq = (struct ib_srq *)p_create_attr->h_srq;
- if( p_umv_buf && p_umv_buf->command ) {
- qp_init_attr.cap.max_recv_sge = p_req->max_recv_sge;
- qp_init_attr.cap.max_send_sge = p_req->max_send_sge;
- qp_init_attr.cap.max_recv_wr = p_req->max_recv_wr;
- qp_init_attr.cap.max_send_wr = p_req->max_send_wr;
- qp_init_attr.cap.max_inline_data = p_req->max_inline_data;
- }
- else {
- qp_init_attr.cap.max_recv_sge = p_create_attr->rq_sge;
- qp_init_attr.cap.max_send_sge = p_create_attr->sq_sge;
- qp_init_attr.cap.max_recv_wr = p_create_attr->rq_depth;
- qp_init_attr.cap.max_send_wr = p_create_attr->sq_depth;
- qp_init_attr.cap.max_inline_data = 0; /* absent in IBAL */
- }
- qp_init_attr.sq_sig_type = (p_create_attr->sq_signaled) ? IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;
- qp_init_attr.qp_type = to_qp_type(p_create_attr->qp_type);
- qp_init_attr.port_num = port_num;
-
- // create qp
- p_ib_qp = ibv_create_qp( p_ib_pd, &qp_init_attr, p_uctx, p_umv_buf );
- if (IS_ERR(p_ib_qp)) {
- err = PTR_ERR(p_ib_qp);
- HCA_PRINT(TRACE_LEVEL_ERROR , HCA_DBG_QP,
- ("ibv_create_qp failed (%d)\n", err));
- status = errno_to_iberr(err);
- goto err_create_qp;
- }
-
- // fill the object
- p_ib_qp->x.ctx = (void*)qp_uctx;
-
- // Query QP to obtain requested attributes
- if (p_qp_attr) {
- status = mlnx_query_qp((ib_qp_handle_t)p_ib_qp, p_qp_attr, p_umv_buf);
- if (status != IB_SUCCESS)
- goto err_query_qp;
- }
-
- // return the results
- if (ph_qp) *ph_qp = (ib_qp_handle_t)p_ib_qp;
-
- status = IB_SUCCESS;
- goto end;
-
-err_query_qp:
- ib_destroy_qp( p_ib_qp );
-err_create_qp:
-err_inval_params:
-end:
- if (p_umv_buf && p_umv_buf->command)
- p_umv_buf->status = status;
- if (status != IB_SUCCESS)
- HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_QP,
- ("completes with ERROR status %x\n", status));
- HCA_EXIT(HCA_DBG_QP);
- return status;
-}
-
-ib_api_status_t
-mlnx_create_spl_qp (
- IN const ib_pd_handle_t h_pd,
- IN const uint8_t port_num,
- IN const void *qp_uctx,
- IN const ib_qp_create_t *p_create_attr,
- OUT ib_qp_attr_t *p_qp_attr,
- OUT ib_qp_handle_t *ph_qp )
-{
- ib_api_status_t status;
-
- HCA_ENTER(HCA_DBG_SHIM);
-
- status = __create_qp( h_pd, port_num,
- qp_uctx, p_create_attr, p_qp_attr, ph_qp, NULL );
-
- if (status != IB_SUCCESS)
- HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_QP,
- ("completes with ERROR status %x\n", status));
- HCA_EXIT(HCA_DBG_QP);
- return status;
-}
-
-ib_api_status_t
-mlnx_create_qp (
- IN const ib_pd_handle_t h_pd,
- IN const void *qp_uctx,
- IN const ib_qp_create_t *p_create_attr,
- OUT ib_qp_attr_t *p_qp_attr,
- OUT ib_qp_handle_t *ph_qp,
- IN OUT ci_umv_buf_t *p_umv_buf )
-{
- ib_api_status_t status;
-
- //NB: algorithm of mthca_alloc_sqp() requires port_num
- // PRM states, that special pares are created in couples, so
- // looks like we can put here port_num = 1 always
- uint8_t port_num = 1;
-
- HCA_ENTER(HCA_DBG_QP);
-
- status = __create_qp( h_pd, port_num,
- qp_uctx, p_create_attr, p_qp_attr, ph_qp, p_umv_buf );
-
- if (status != IB_SUCCESS)
- HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_QP,
- ("completes with ERROR status %x\n", status));
- HCA_EXIT(HCA_DBG_QP);
- return status;
-}
-
-ib_api_status_t
-mlnx_modify_qp (
- IN const ib_qp_handle_t h_qp,
- IN const ib_qp_mod_t *p_modify_attr,
- OUT ib_qp_attr_t *p_qp_attr OPTIONAL,
- IN OUT ci_umv_buf_t *p_umv_buf OPTIONAL )
-{
- int err;
- ib_api_status_t status;
- struct ib_qp_attr qp_attr;
- int qp_attr_mask;
- struct ib_qp *p_ib_qp = (struct ib_qp *)h_qp;
-
- HCA_ENTER(HCA_DBG_QP);
-
- // sanity checks
- if( p_umv_buf && p_umv_buf->command ) {
- // sanity checks
- if (p_umv_buf->output_size < sizeof(struct ibv_modify_qp_resp) ||
- !p_umv_buf->p_inout_buf) {
- status = IB_INVALID_PARAMETER;
- goto err_inval_params;
- }
- }
-
- // fill parameters
- status = to_qp_attr( p_ib_qp, from_qp_type(p_ib_qp->qp_type),
- p_modify_attr, &qp_attr, &qp_attr_mask );
- if (status == IB_NOT_DONE)
- goto query_qp;
- if (status != IB_SUCCESS )
- goto err_mode_unsupported;
-
- // modify QP
- err = p_ib_qp->device->modify_qp( p_ib_qp, &qp_attr, qp_attr_mask, NULL);
- if (err) {
- HCA_PRINT(TRACE_LEVEL_ERROR, HCA_DBG_QP,
- ("ibv_modify_qp failed (%d)\n", err));
- status = errno_to_iberr(err);
- goto err_modify_qp;
- }
-
- // Query QP to obtain requested attributes
-query_qp:
- if (p_qp_attr) {
- status = mlnx_query_qp ((ib_qp_handle_t)p_ib_qp, p_qp_attr, p_umv_buf);
- if (status != IB_SUCCESS)
- goto err_query_qp;
- }
-
- if( p_umv_buf && p_umv_buf->command ) {
- struct ibv_modify_qp_resp resp;
- resp.attr_mask = qp_attr_mask;
- resp.qp_state = qp_attr.qp_state;
- err = to_umv_buf(p_umv_buf, &resp, sizeof(struct ibv_modify_qp_resp));
- if (err) {
- HCA_PRINT(TRACE_LEVEL_ERROR , HCA_DBG_SHIM ,("to_umv_buf failed (%d)\n", err));
- status = errno_to_iberr(err);
- goto err_copy;
- }
- }
-
- status = IB_SUCCESS;
-
-err_copy:
-err_query_qp:
-err_modify_qp:
-err_mode_unsupported:
-err_inval_params:
- if (p_umv_buf && p_umv_buf->command)
- p_umv_buf->status = status;
- if (status != IB_SUCCESS) {
- HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_QP,
- ("completes with ERROR status %x\n", status));
- }
- HCA_EXIT(HCA_DBG_QP);
- return status;
-}
-
-ib_api_status_t
-mlnx_ndi_modify_qp (
- IN const ib_qp_handle_t h_qp,
- IN const ib_qp_mod_t *p_modify_attr,
- OUT ib_qp_attr_t *p_qp_attr OPTIONAL,
- IN const uint32_t buf_size,
- IN uint8_t* const p_outbuf)
-{
- ci_umv_buf_t umv_buf;
- ib_api_status_t status;
- struct ibv_modify_qp_resp resp;
- void *buf = &resp;
-
- HCA_ENTER(HCA_DBG_QP);
-
- /* imitate umv_buf */
- umv_buf.command = TRUE; /* special case for NDI. Usually it's TRUE */
- umv_buf.input_size = 0;
- umv_buf.output_size = sizeof(struct ibv_modify_qp_resp);
- umv_buf.p_inout_buf = buf;
-
- status = mlnx_modify_qp ( h_qp, p_modify_attr, p_qp_attr, &umv_buf );
-
- if (status == IB_SUCCESS) {
- cl_memclr( p_outbuf, buf_size );
- *p_outbuf = resp.qp_state;
- }
-
- HCA_EXIT(HCA_DBG_QP);
- return status;
-}
-
-ib_api_status_t
-mlnx_destroy_qp (
- IN const ib_qp_handle_t h_qp,
- IN const uint64_t timewait )
-{
- ib_api_status_t status;
- int err;
- struct ib_qp *p_ib_qp = (struct ib_qp *)h_qp;
-
- UNUSED_PARAM( timewait );
-
- HCA_ENTER( HCA_DBG_QP);
-
- HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_SHIM ,
- ("qpnum %#x, pcs %p\n", p_ib_qp->qp_num, PsGetCurrentProcess()) );
-
- err = ib_destroy_qp( p_ib_qp );
- if (err) {
- HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_QP,
- ("ibv_destroy_qp failed (%d)\n", err));
- status = errno_to_iberr(err);
- goto err_destroy_qp;
- }
-
- status = IB_SUCCESS;
-
-err_destroy_qp:
- if (status != IB_SUCCESS)
- HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_QP,
- ("completes with ERROR status %x\n", status));
- HCA_EXIT(HCA_DBG_QP);
- return status;
-}
-
-void
-mlnx_qp_if(
- IN OUT ci_interface_t *p_interface )
-{
- p_interface->create_qp = mlnx_create_qp;
- p_interface->create_spl_qp = mlnx_create_spl_qp;
- p_interface->modify_qp = mlnx_modify_qp;
- p_interface->ndi_modify_qp = mlnx_ndi_modify_qp;
- p_interface->query_qp = mlnx_query_qp;
- p_interface->destroy_qp = mlnx_destroy_qp;
-}
-
+/*\r
+ * Copyright (c) 2005 SilverStorm Technologies. All rights reserved.\r
+ * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. \r
+ * Portions Copyright (c) 2008 Microsoft Corporation. All rights reserved.\r
+ *\r
+ * This software is available to you under the OpenIB.org BSD license\r
+ * below:\r
+ *\r
+ * Redistribution and use in source and binary forms, with or\r
+ * without modification, are permitted provided that the following\r
+ * conditions are met:\r
+ *\r
+ * - Redistributions of source code must retain the above\r
+ * copyright notice, this list of conditions and the following\r
+ * disclaimer.\r
+ *\r
+ * - Redistributions in binary form must reproduce the above\r
+ * copyright notice, this list of conditions and the following\r
+ * disclaimer in the documentation and/or other materials\r
+ * provided with the distribution.\r
+ *\r
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
+ * SOFTWARE.\r
+ *\r
+ * $Id: al.c 1611 2006-08-20 14:48:55Z sleybo $\r
+ */\r
+\r
+#include "precomp.h"\r
+\r
+#if defined(EVENT_TRACING)\r
+#ifdef offsetof\r
+#undef offsetof\r
+#endif\r
+#include "qp.tmh"\r
+#endif\r
+\r
+\r
+ib_api_status_t\r
+mlnx_query_qp (\r
+ IN const ib_qp_handle_t h_qp,\r
+ OUT ib_qp_attr_t *p_qp_attr,\r
+ IN OUT ci_umv_buf_t *p_umv_buf )\r
+{\r
+ ib_api_status_t status;\r
+ struct ib_qp *p_ib_qp = (struct ib_qp *)h_qp;\r
+ struct ib_qp_attr qp_attr;\r
+ struct ib_qp_init_attr qp_init_attr;\r
+ int qp_attr_mask = 0;\r
+ int err;\r
+\r
+ UNREFERENCED_PARAMETER(p_umv_buf);\r
+ \r
+ HCA_ENTER( HCA_DBG_QP);\r
+\r
+ // sanity checks\r
+ if (!p_qp_attr) {\r
+ status = IB_INVALID_PARAMETER;\r
+ goto err_parm;\r
+ }\r
+\r
+ // convert structures\r
+ memset( &qp_attr, 0, sizeof(struct ib_qp_attr) );\r
+ err = p_ib_qp->device->query_qp( p_ib_qp, &qp_attr, \r
+ qp_attr_mask, &qp_init_attr);\r
+ if (err){\r
+ status = errno_to_iberr(err);\r
+ HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_PD,\r
+ ("ib_query_qp failed (%#x)\n", status));\r
+ goto err_query_qp;\r
+ }\r
+\r
+ status = from_qp_attr( p_ib_qp, &qp_attr, p_qp_attr );\r
+\r
+err_query_qp:\r
+err_parm:\r
+ HCA_EXIT(HCA_DBG_QP);\r
+ return status;\r
+}\r
+\r
+static ib_api_status_t\r
+__create_qp (\r
+ IN const ib_pd_handle_t h_pd,\r
+ IN const uint8_t port_num,\r
+ IN const void *qp_uctx,\r
+ IN const ib_qp_create_t *p_create_attr,\r
+ OUT ib_qp_attr_t *p_qp_attr,\r
+ OUT ib_qp_handle_t *ph_qp,\r
+ IN OUT ci_umv_buf_t *p_umv_buf )\r
+{\r
+ int err;\r
+ ib_api_status_t status;\r
+ struct ib_qp * p_ib_qp;\r
+ struct ib_qp_init_attr qp_init_attr;\r
+ struct ib_ucontext *p_uctx = NULL;\r
+ struct ib_pd *p_ib_pd = (struct ib_pd *)h_pd;\r
+ struct ib_device *p_ib_dev = p_ib_pd->device;\r
+ mlnx_hca_t *p_hca = ibdev2hca(p_ib_dev);\r
+ struct ibv_create_qp *p_req = NULL;\r
+ \r
+ HCA_ENTER(HCA_DBG_QP);\r
+\r
+ if( p_umv_buf && p_umv_buf->command ) {\r
+ // sanity checks \r
+ if (p_umv_buf->input_size < sizeof(struct ibv_create_qp) ||\r
+ p_umv_buf->output_size < sizeof(struct ibv_create_qp_resp) ||\r
+ !p_umv_buf->p_inout_buf) {\r
+ status = IB_INVALID_PARAMETER;\r
+ goto err_inval_params;\r
+ }\r
+ p_req = (struct ibv_create_qp*)(ULONG_PTR)p_umv_buf->p_inout_buf;\r
+ p_uctx = p_ib_pd->p_uctx;\r
+ }\r
+\r
+ // prepare the parameters\r
+ RtlZeroMemory(&qp_init_attr, sizeof(qp_init_attr));\r
+ qp_init_attr.event_handler = qp_event_handler;\r
+ qp_init_attr.qp_context = p_hca;\r
+ qp_init_attr.send_cq = (struct ib_cq *)p_create_attr->h_sq_cq;\r
+ qp_init_attr.recv_cq = (struct ib_cq *)p_create_attr->h_rq_cq;\r
+ qp_init_attr.srq = (struct ib_srq *)p_create_attr->h_srq;\r
+ if( p_umv_buf && p_umv_buf->command ) {\r
+ qp_init_attr.cap.max_recv_sge = p_req->max_recv_sge;\r
+ qp_init_attr.cap.max_send_sge = p_req->max_send_sge;\r
+ qp_init_attr.cap.max_recv_wr = p_req->max_recv_wr;\r
+ qp_init_attr.cap.max_send_wr = p_req->max_send_wr;\r
+ qp_init_attr.cap.max_inline_data = p_req->max_inline_data;\r
+ }\r
+ else {\r
+ qp_init_attr.cap.max_recv_sge = p_create_attr->rq_sge;\r
+ qp_init_attr.cap.max_send_sge = p_create_attr->sq_sge;\r
+ qp_init_attr.cap.max_recv_wr = p_create_attr->rq_depth;\r
+ qp_init_attr.cap.max_send_wr = p_create_attr->sq_depth;\r
+ qp_init_attr.cap.max_inline_data = 0; /* absent in IBAL */\r
+ }\r
+ qp_init_attr.sq_sig_type = (p_create_attr->sq_signaled) ? IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;\r
+ qp_init_attr.qp_type = to_qp_type(p_create_attr->qp_type);\r
+ qp_init_attr.port_num = port_num;\r
+\r
+ // create qp \r
+ p_ib_qp = ibv_create_qp( p_ib_pd, &qp_init_attr, p_uctx, p_umv_buf );\r
+ if (IS_ERR(p_ib_qp)) {\r
+ err = PTR_ERR(p_ib_qp);\r
+ HCA_PRINT(TRACE_LEVEL_ERROR , HCA_DBG_QP,\r
+ ("ibv_create_qp failed (%d)\n", err));\r
+ status = errno_to_iberr(err);\r
+ goto err_create_qp;\r
+ }\r
+\r
+ // fill the object\r
+ p_ib_qp->x.ctx = (void*)qp_uctx;\r
+\r
+ // Query QP to obtain requested attributes\r
+ if (p_qp_attr) {\r
+ status = mlnx_query_qp((ib_qp_handle_t)p_ib_qp, p_qp_attr, p_umv_buf);\r
+ if (status != IB_SUCCESS)\r
+ goto err_query_qp;\r
+ }\r
+ \r
+ // return the results\r
+ if (ph_qp) *ph_qp = (ib_qp_handle_t)p_ib_qp;\r
+\r
+ status = IB_SUCCESS;\r
+ goto end;\r
+\r
+err_query_qp:\r
+ ib_destroy_qp( p_ib_qp );\r
+err_create_qp:\r
+err_inval_params:\r
+end:\r
+ if (p_umv_buf && p_umv_buf->command) \r
+ p_umv_buf->status = status;\r
+ if (status != IB_SUCCESS)\r
+ HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_QP,\r
+ ("completes with ERROR status %x\n", status));\r
+ HCA_EXIT(HCA_DBG_QP);\r
+ return status;\r
+}\r
+\r
+ib_api_status_t\r
+mlnx_create_spl_qp (\r
+ IN const ib_pd_handle_t h_pd,\r
+ IN const uint8_t port_num,\r
+ IN const void *qp_uctx,\r
+ IN const ib_qp_create_t *p_create_attr,\r
+ OUT ib_qp_attr_t *p_qp_attr,\r
+ OUT ib_qp_handle_t *ph_qp )\r
+{\r
+ ib_api_status_t status;\r
+\r
+ HCA_ENTER(HCA_DBG_SHIM);\r
+\r
+ status = __create_qp( h_pd, port_num,\r
+ qp_uctx, p_create_attr, p_qp_attr, ph_qp, NULL );\r
+ \r
+ if (status != IB_SUCCESS)\r
+ HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_QP,\r
+ ("completes with ERROR status %x\n", status));\r
+ HCA_EXIT(HCA_DBG_QP);\r
+ return status;\r
+}\r
+\r
+ib_api_status_t\r
+mlnx_create_qp (\r
+ IN const ib_pd_handle_t h_pd,\r
+ IN const void *qp_uctx,\r
+ IN const ib_qp_create_t *p_create_attr,\r
+ OUT ib_qp_attr_t *p_qp_attr,\r
+ OUT ib_qp_handle_t *ph_qp,\r
+ IN OUT ci_umv_buf_t *p_umv_buf )\r
+{\r
+ ib_api_status_t status;\r
+\r
+ //NB: algorithm of mthca_alloc_sqp() requires port_num\r
+ // PRM states, that special pares are created in couples, so\r
+ // looks like we can put here port_num = 1 always\r
+ uint8_t port_num = 1;\r
+\r
+ HCA_ENTER(HCA_DBG_QP);\r
+\r
+ status = __create_qp( h_pd, port_num,\r
+ qp_uctx, p_create_attr, p_qp_attr, ph_qp, p_umv_buf );\r
+ \r
+ if (status != IB_SUCCESS)\r
+ HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_QP,\r
+ ("completes with ERROR status %x\n", status));\r
+ HCA_EXIT(HCA_DBG_QP);\r
+ return status;\r
+}\r
+\r
+ib_api_status_t\r
+mlnx_modify_qp (\r
+ IN const ib_qp_handle_t h_qp,\r
+ IN const ib_qp_mod_t *p_modify_attr,\r
+ OUT ib_qp_attr_t *p_qp_attr OPTIONAL,\r
+ IN OUT ci_umv_buf_t *p_umv_buf OPTIONAL )\r
+{\r
+ int err;\r
+ ib_api_status_t status;\r
+ struct ib_qp_attr qp_attr;\r
+ int qp_attr_mask;\r
+ struct ib_qp *p_ib_qp = (struct ib_qp *)h_qp;\r
+\r
+ HCA_ENTER(HCA_DBG_QP);\r
+\r
+ // sanity checks\r
+ if( p_umv_buf && p_umv_buf->command ) {\r
+ // sanity checks \r
+ if (p_umv_buf->output_size < sizeof(struct ibv_modify_qp_resp) ||\r
+ !p_umv_buf->p_inout_buf) {\r
+ status = IB_INVALID_PARAMETER;\r
+ goto err_inval_params;\r
+ }\r
+ }\r
+ \r
+ // fill parameters \r
+ status = to_qp_attr( p_ib_qp, from_qp_type(p_ib_qp->qp_type), \r
+ p_modify_attr, &qp_attr, &qp_attr_mask );\r
+ if (status == IB_NOT_DONE)\r
+ goto query_qp;\r
+ if (status != IB_SUCCESS ) \r
+ goto err_mode_unsupported;\r
+\r
+ // modify QP\r
+ err = p_ib_qp->device->modify_qp( p_ib_qp, &qp_attr, qp_attr_mask, NULL);\r
+ if (err) {\r
+ HCA_PRINT(TRACE_LEVEL_ERROR, HCA_DBG_QP,\r
+ ("ibv_modify_qp failed (%d)\n", err));\r
+ status = errno_to_iberr(err);\r
+ goto err_modify_qp;\r
+ }\r
+\r
+ // Query QP to obtain requested attributes\r
+query_qp: \r
+ if (p_qp_attr) {\r
+ status = mlnx_query_qp ((ib_qp_handle_t)p_ib_qp, p_qp_attr, p_umv_buf);\r
+ if (status != IB_SUCCESS)\r
+ goto err_query_qp;\r
+ }\r
+ \r
+ if( p_umv_buf && p_umv_buf->command ) {\r
+ struct ibv_modify_qp_resp resp;\r
+ resp.attr_mask = qp_attr_mask;\r
+ resp.qp_state = qp_attr.qp_state;\r
+ err = to_umv_buf(p_umv_buf, &resp, sizeof(struct ibv_modify_qp_resp));\r
+ if (err) {\r
+ HCA_PRINT(TRACE_LEVEL_ERROR , HCA_DBG_SHIM ,("to_umv_buf failed (%d)\n", err));\r
+ status = errno_to_iberr(err);\r
+ goto err_copy;\r
+ }\r
+ }\r
+\r
+ status = IB_SUCCESS;\r
+\r
+err_copy: \r
+err_query_qp:\r
+err_modify_qp: \r
+err_mode_unsupported:\r
+err_inval_params:\r
+ if (p_umv_buf && p_umv_buf->command) \r
+ p_umv_buf->status = status;\r
+ if (status != IB_SUCCESS) {\r
+ HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_QP,\r
+ ("completes with ERROR status %x\n", status));\r
+ }\r
+ HCA_EXIT(HCA_DBG_QP);\r
+ return status;\r
+}\r
+\r
+ib_api_status_t\r
+mlnx_ndi_modify_qp (\r
+ IN const ib_qp_handle_t h_qp,\r
+ IN const ib_qp_mod_t *p_modify_attr,\r
+ OUT ib_qp_attr_t *p_qp_attr OPTIONAL,\r
+ IN const uint32_t buf_size,\r
+ IN uint8_t* const p_outbuf)\r
+{\r
+ ci_umv_buf_t umv_buf;\r
+ ib_api_status_t status;\r
+ struct ibv_modify_qp_resp resp;\r
+ void *buf = &resp;\r
+\r
+ HCA_ENTER(HCA_DBG_QP);\r
+\r
+ /* imitate umv_buf */\r
+ umv_buf.command = TRUE; /* special case for NDI. Usually it's TRUE */\r
+ umv_buf.input_size = 0;\r
+ umv_buf.output_size = sizeof(struct ibv_modify_qp_resp);\r
+ umv_buf.p_inout_buf = (ULONG_PTR)buf;\r
+\r
+ status = mlnx_modify_qp ( h_qp, p_modify_attr, p_qp_attr, &umv_buf );\r
+\r
+ if (status == IB_SUCCESS) {\r
+ cl_memclr( p_outbuf, buf_size );\r
+ *p_outbuf = resp.qp_state;\r
+ }\r
+\r
+ HCA_EXIT(HCA_DBG_QP);\r
+ return status;\r
+}\r
+\r
+ib_api_status_t\r
+mlnx_destroy_qp (\r
+ IN const ib_qp_handle_t h_qp,\r
+ IN const uint64_t timewait )\r
+{\r
+ ib_api_status_t status;\r
+ int err;\r
+ struct ib_qp *p_ib_qp = (struct ib_qp *)h_qp;\r
+\r
+ UNUSED_PARAM( timewait );\r
+\r
+ HCA_ENTER( HCA_DBG_QP);\r
+\r
+ HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_SHIM ,\r
+ ("qpnum %#x, pcs %p\n", p_ib_qp->qp_num, PsGetCurrentProcess()) );\r
+\r
+ err = ib_destroy_qp( p_ib_qp );\r
+ if (err) {\r
+ HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_QP,\r
+ ("ibv_destroy_qp failed (%d)\n", err));\r
+ status = errno_to_iberr(err);\r
+ goto err_destroy_qp;\r
+ }\r
+\r
+ status = IB_SUCCESS;\r
+\r
+err_destroy_qp:\r
+ if (status != IB_SUCCESS)\r
+ HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_QP,\r
+ ("completes with ERROR status %x\n", status));\r
+ HCA_EXIT(HCA_DBG_QP);\r
+ return status;\r
+}\r
+\r
+void\r
+mlnx_qp_if(\r
+ IN OUT ci_interface_t *p_interface )\r
+{\r
+ p_interface->create_qp = mlnx_create_qp;\r
+ p_interface->create_spl_qp = mlnx_create_spl_qp;\r
+ p_interface->modify_qp = mlnx_modify_qp;\r
+ p_interface->ndi_modify_qp = mlnx_ndi_modify_qp;\r
+ p_interface->query_qp = mlnx_query_qp;\r
+ p_interface->destroy_qp = mlnx_destroy_qp;\r
+}\r
+\r
/*\r
* Copyright (c) 2005 SilverStorm Technologies. All rights reserved.\r
* Copyright (c) 2004-2005 Mellanox Technologies, Inc. All rights reserved. \r
+ * Portions Copyright (c) 2008 Microsoft Corporation. All rights reserved.\r
*\r
* This software is available to you under the OpenIB.org BSD license\r
* below:\r
\r
if ( p_uctx && p_umv_buf && p_umv_buf->p_inout_buf ) {\r
// prepare user parameters\r
- p_req = (struct ibv_create_cq*)(void*)p_umv_buf->p_inout_buf;\r
- p_resp = (struct ibv_create_cq_resp*)(void*)\r
+ p_req = (struct ibv_create_cq*)(ULONG_PTR)p_umv_buf->p_inout_buf;\r
+ p_resp = (struct ibv_create_cq_resp*)(ULONG_PTR)\r
p_umv_buf->p_inout_buf;\r
INIT_UDATA(&udata, &p_req->buf_addr, &p_resp->cqn, \r
sizeof(struct mlx4_ib_create_cq), sizeof(struct mlx4_ib_create_cq_resp));\r
\r
if ( p_uctx && p_umv_buf && p_umv_buf->command ) {\r
// prepare user parameters\r
- p_req = (struct ibv_create_qp*)(void*)p_umv_buf->p_inout_buf;\r
- p_resp = (struct ibv_create_qp_resp*)(void*)p_umv_buf->p_inout_buf;\r
+ p_req = (struct ibv_create_qp*)(ULONG_PTR)p_umv_buf->p_inout_buf;\r
+ p_resp = (struct ibv_create_qp_resp*)(ULONG_PTR)p_umv_buf->p_inout_buf;\r
INIT_UDATA(&udata, &p_req->buf_addr, NULL, \r
sizeof(struct mlx4_ib_create_qp), 0);\r
}\r
\r
if ( p_uctx && p_umv_buf && p_umv_buf->p_inout_buf) {\r
// prepare user parameters\r
- p_req = (struct ibv_create_srq*)(void*)p_umv_buf->p_inout_buf;\r
- p_resp = (struct ibv_create_srq_resp*)(void*)p_umv_buf->p_inout_buf;\r
+ p_req = (struct ibv_create_srq*)(ULONG_PTR)p_umv_buf->p_inout_buf;\r
+ p_resp = (struct ibv_create_srq_resp*)(ULONG_PTR)p_umv_buf->p_inout_buf;\r
INIT_UDATA(&udata, &p_req->buf_addr, &p_resp->srqn, \r
sizeof(struct ibv_create_srq), sizeof(struct ibv_create_srq_resp));\r
}\r
goto err_alloc_ucontext;\r
}\r
p_muctx = to_mucontext(p_uctx);\r
- p_uresp = (struct ibv_get_context_resp *)(void*)p_umv_buf->p_inout_buf;\r
+ p_uresp = (struct ibv_get_context_resp *)(ULONG_PTR)p_umv_buf->p_inout_buf;\r
\r
// fill the rest of ib_ucontext fields \r
p_uctx->device = p_ibdev;\r
-/*
- * Copyright (c) 2005 SilverStorm Technologies. All rights reserved.
- * Copyright (c) 2004-2005 Mellanox Technologies, Inc. All rights reserved.
- *
- * This software is available to you under the OpenIB.org BSD license
- * below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- * $Id: hca_verbs.c 2073 2007-11-13 11:38:40Z leonid $
- */
-
-#include "precomp.h"
-
-#if defined(EVENT_TRACING)
-#ifdef offsetof
-#undef offsetof
-#endif
-#include "vp.tmh"
-#endif
-
-static ib_api_status_t
-mlnx_um_open(
- IN const ib_ca_handle_t h_ca,
- IN OUT ci_umv_buf_t* const p_umv_buf,
- OUT ib_ca_handle_t* const ph_um_ca )
-{
- ib_api_status_t status;
- mlnx_hca_t *p_hca = (mlnx_hca_t *)h_ca;
- PFDO_DEVICE_DATA p_fdo = hca2fdo(p_hca);
- struct ib_device *p_ibdev = hca2ibdev(p_hca);
- struct ib_ucontext *p_uctx;
- struct ibv_get_context_resp *p_uresp;
-
- HCA_ENTER(HCA_DBG_SHIM);
-
- // sanity check
- ASSERT( p_umv_buf );
- if( !p_umv_buf->command )
- { // no User Verb Provider
- p_uctx = cl_zalloc( sizeof(struct ib_ucontext) );
- if( !p_uctx )
- {
- status = IB_INSUFFICIENT_MEMORY;
- goto err_alloc_ucontext;
- }
- /* Copy the dev info. */
- p_uctx->device = p_ibdev;
- p_umv_buf->output_size = 0;
- status = IB_SUCCESS;
- goto done;
- }
-
- // sanity check
- if ( p_umv_buf->output_size < sizeof(struct ibv_get_context_resp) ||
- !p_umv_buf->p_inout_buf) {
- status = IB_INVALID_PARAMETER;
- goto err_inval_params;
- }
-
- status = ibv_um_open( p_ibdev, p_umv_buf, &p_uctx );
- if (!NT_SUCCESS(status)) {
- goto end;
- }
-
- // fill more parameters for user (sanity checks are in mthca_alloc_ucontext)
- p_uresp = (struct ibv_get_context_resp *)(void*)p_umv_buf->p_inout_buf;
- p_uresp->vend_id = (uint32_t)p_fdo->bus_ib_ifc.pdev->ven_id;
- p_uresp->dev_id = (uint16_t)p_fdo->bus_ib_ifc.pdev->dev_id;
- p_uresp->max_qp_wr = hca2mdev(p_hca)->caps.max_wqes;
- p_uresp->max_cqe = hca2mdev(p_hca)->caps.max_cqes;
- p_uresp->max_sge = min( hca2mdev(p_hca)->caps.max_sq_sg,
- hca2mdev(p_hca)->caps.max_rq_sg );
-
-done:
- // fill the rest of ib_ucontext_ex fields
- atomic_set(&p_uctx->x.usecnt, 0);
- p_uctx->x.va = p_uctx->x.p_mdl = NULL;
- p_uctx->x.fw_if_open = FALSE;
- mutex_init( &p_uctx->x.mutex );
-
- // chain user context to the device
- spin_lock( &p_fdo->uctx_lock );
- cl_qlist_insert_tail( &p_fdo->uctx_list, &p_uctx->x.list_item );
- cl_atomic_inc(&p_fdo->usecnt);
- spin_unlock( &p_fdo->uctx_lock );
-
- // return the result
- if (ph_um_ca) *ph_um_ca = (ib_ca_handle_t)p_uctx;
-
- status = IB_SUCCESS;
- goto end;
-
-err_inval_params:
-err_alloc_ucontext:
-end:
- if (p_umv_buf && p_umv_buf->command)
- p_umv_buf->status = status;
- if (status != IB_SUCCESS)
- {
- HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_SHIM,
- ("completes with ERROR status %x\n", status));
- }
- HCA_EXIT(HCA_DBG_SHIM);
- return status;
-}
-
-
-static void
-mlnx_um_close(
- IN ib_ca_handle_t h_ca,
- IN ib_ca_handle_t h_um_ca )
-{
- struct ib_ucontext *p_uctx = (struct ib_ucontext *)h_um_ca;
- PFDO_DEVICE_DATA p_fdo = p_uctx->device->x.p_fdo;
-
- UNUSED_PARAM(h_ca);
-
- if ( !hca_is_livefish(p_fdo))
- unmap_crspace_for_all(p_uctx);
- spin_lock( &p_fdo->uctx_lock );
- cl_qlist_remove_item( &p_fdo->uctx_list, &p_uctx->x.list_item );
- cl_atomic_dec(&p_fdo->usecnt);
- spin_unlock( &p_fdo->uctx_lock );
- if( !p_uctx->x.uar.kva)
- cl_free( h_um_ca ); // no User Verb Provider
- else
- ibv_um_close(p_uctx);
-#if 0
- // TODO: replace where pa_cash.c is found
- pa_cash_print();
-#endif
- return;
-}
-
-
-ib_api_status_t
-mlnx_local_mad (
- IN const ib_ca_handle_t h_ca,
- IN const uint8_t port_num,
- IN const ib_av_attr_t* p_av_attr,
- IN const ib_mad_t *p_mad_in,
- OUT ib_mad_t *p_mad_out )
-{
- int err;
- ib_api_status_t status = IB_SUCCESS;
- mlnx_hca_t *p_hca = (mlnx_hca_t *)h_ca;
- PFDO_DEVICE_DATA p_fdo = hca2fdo(p_hca);
- struct ib_device *p_ibdev = p_fdo->bus_ib_ifc.p_ibdev;
- //TODO: do we need use flags (IB_MAD_IGNORE_MKEY, IB_MAD_IGNORE_BKEY) ?
- int mad_flags = 0;
- //TODO: do we need use grh ?
- struct ib_grh *p_grh = NULL;
- ib_wc_t *p_wc = NULL;
-
- HCA_ENTER(HCA_DBG_MAD);
-
- // sanity checks
- if (port_num > 2) {
- status = IB_INVALID_PARAMETER;
- goto err_port_num;
- }
-
- if (p_av_attr){
- p_wc = cl_zalloc(sizeof(ib_wc_t));
- if(!p_wc){
- status = IB_INSUFFICIENT_MEMORY ;
- goto err_wc_alloc;
- }
- //Copy part of the attributes need to fill the mad extended fields in mellanox devices
- p_wc->recv.ud.remote_lid = p_av_attr->dlid;
- p_wc->recv.ud.remote_sl = p_av_attr->sl;
- p_wc->recv.ud.path_bits = p_av_attr->path_bits;
- p_wc->recv.ud.recv_opt = p_av_attr->grh_valid ? IB_RECV_OPT_GRH_VALID : 0;
-
- if(p_wc->recv.ud.recv_opt & IB_RECV_OPT_GRH_VALID){
- p_grh = cl_zalloc(sizeof(struct _ib_grh));
- if(!p_grh){
- status = IB_INSUFFICIENT_MEMORY ;
- goto err_grh_alloc;
- }
- p_grh->version_tclass_flow = p_av_attr->grh.ver_class_flow;
- p_grh->hop_limit = p_av_attr->grh.hop_limit;
- cl_memcpy( &p_grh->sgid, &p_av_attr->grh.src_gid, sizeof(p_grh->sgid) );
- cl_memcpy( &p_grh->dgid, &p_av_attr->grh.dest_gid, sizeof(p_grh->dgid) );
- // TODO: no direct analogue in IBAL (seems like it is from rmpp)
- p_grh->paylen = 0;
- p_grh->next_hdr = 0;
- }
-
-
- }
-
- HCA_PRINT( TRACE_LEVEL_INFORMATION, HCA_DBG_MAD,
- ("MAD: Class %02x, Method %02x, Attr %02x, HopPtr %d, HopCnt %d, \n",
- (uint32_t)((ib_smp_t *)p_mad_in)->mgmt_class,
- (uint32_t)((ib_smp_t *)p_mad_in)->method,
- (uint32_t)((ib_smp_t *)p_mad_in)->attr_id,
- (uint32_t)((ib_smp_t *)p_mad_in)->hop_ptr,
- (uint32_t)((ib_smp_t *)p_mad_in)->hop_count));
-
- // process mad
- err = p_ibdev->process_mad( p_ibdev, mad_flags, (uint8_t)port_num,
- p_wc, p_grh, (struct ib_mad*)p_mad_in, (struct ib_mad*)p_mad_out);
- if (!err) {
- HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_MAD,
- ("MAD failed:\n\tClass 0x%x\n\tMethod 0x%x\n\tAttr 0x%x",
- p_mad_in->mgmt_class, p_mad_in->method, p_mad_in->attr_id ));
- status = IB_ERROR;
- goto err_process_mad;
- }
-
- if( (p_mad_in->mgmt_class == IB_MCLASS_SUBN_DIR ||
- p_mad_in->mgmt_class == IB_MCLASS_SUBN_LID) &&
- p_mad_in->attr_id == IB_MAD_ATTR_PORT_INFO )
- {
- ib_port_info_t *p_pi_in, *p_pi_out;
-
- if( p_mad_in->mgmt_class == IB_MCLASS_SUBN_DIR )
- {
- p_pi_in = (ib_port_info_t*)
- ib_smp_get_payload_ptr( (ib_smp_t*)p_mad_in );
- p_pi_out = (ib_port_info_t*)
- ib_smp_get_payload_ptr( (ib_smp_t*)p_mad_out );
- }
- else
- {
- p_pi_in = (ib_port_info_t*)(p_mad_in + 1);
- p_pi_out = (ib_port_info_t*)(p_mad_out + 1);
- }
-
- /* Work around FW bug 33958 */
- p_pi_out->subnet_timeout &= 0x7F;
- if( p_mad_in->method == IB_MAD_METHOD_SET )
- p_pi_out->subnet_timeout |= (p_pi_in->subnet_timeout & 0x80);
- }
-
- /* Modify direction for Direct MAD */
- if ( p_mad_in->mgmt_class == IB_MCLASS_SUBN_DIR )
- p_mad_out->status |= IB_SMP_DIRECTION;
-
-
-err_process_mad:
- if(p_grh)
- cl_free(p_grh);
-err_grh_alloc:
- if(p_wc)
- cl_free(p_wc);
-err_wc_alloc:
-err_port_num:
- if (status != IB_SUCCESS)
- {
- HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_MAD,
- ("completes with ERROR status %x\n", status));
- }
- HCA_EXIT(HCA_DBG_MAD);
- return status;
-}
-
-
-void
-setup_ci_interface(
- IN const ib_net64_t ca_guid,
- IN const int is_livefish,
- IN OUT ci_interface_t *p_interface )
-{
- cl_memclr(p_interface, sizeof(*p_interface));
-
- /* Guid of the CA. */
- p_interface->guid = ca_guid;
-
- /* Version of this interface. */
- p_interface->version = VERBS_VERSION;
-
- /* UVP name */
- cl_memcpy( p_interface->libname, mlnx_uvp_lib_name, MAX_LIB_NAME);
-
- HCA_PRINT(TRACE_LEVEL_VERBOSE , HCA_DBG_SHIM ,("UVP filename %s\n", p_interface->libname));
-
- /* The real interface. */
- mlnx_pd_if(p_interface);
- p_interface->um_open_ca = mlnx_um_open;
- p_interface->um_close_ca = mlnx_um_close;
- p_interface->vendor_call = fw_access_ctrl;
-
- if (is_livefish) {
- mlnx_ca_if_livefish(p_interface);
- mlnx_mr_if_livefish(p_interface);
- }
- else {
- mlnx_ca_if(p_interface);
- mlnx_av_if(p_interface);
- mlnx_srq_if(p_interface);
- mlnx_qp_if(p_interface);
- mlnx_cq_if(p_interface);
- mlnx_mr_if(p_interface);
- mlnx_direct_if(p_interface);
- mlnx_mcast_if(p_interface);
- p_interface->local_mad = mlnx_local_mad;
- }
-
- return;
-}
-
-
+/*\r
+ * Copyright (c) 2005 SilverStorm Technologies. All rights reserved.\r
+ * Copyright (c) 2004-2005 Mellanox Technologies, Inc. All rights reserved. \r
+ * Portions Copyright (c) 2008 Microsoft Corporation. All rights reserved.\r
+ *\r
+ * This software is available to you under the OpenIB.org BSD license\r
+ * below:\r
+ *\r
+ * Redistribution and use in source and binary forms, with or\r
+ * without modification, are permitted provided that the following\r
+ * conditions are met:\r
+ *\r
+ * - Redistributions of source code must retain the above\r
+ * copyright notice, this list of conditions and the following\r
+ * disclaimer.\r
+ *\r
+ * - Redistributions in binary form must reproduce the above\r
+ * copyright notice, this list of conditions and the following\r
+ * disclaimer in the documentation and/or other materials\r
+ * provided with the distribution.\r
+ *\r
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
+ * SOFTWARE.\r
+ *\r
+ * $Id: hca_verbs.c 2073 2007-11-13 11:38:40Z leonid $\r
+ */\r
+\r
+#include "precomp.h"\r
+\r
+#if defined(EVENT_TRACING)\r
+#ifdef offsetof\r
+#undef offsetof\r
+#endif\r
+#include "vp.tmh"\r
+#endif\r
+\r
+static ib_api_status_t\r
+mlnx_um_open(\r
+ IN const ib_ca_handle_t h_ca,\r
+ IN OUT ci_umv_buf_t* const p_umv_buf,\r
+ OUT ib_ca_handle_t* const ph_um_ca )\r
+{\r
+ ib_api_status_t status;\r
+ mlnx_hca_t *p_hca = (mlnx_hca_t *)h_ca;\r
+ PFDO_DEVICE_DATA p_fdo = hca2fdo(p_hca);\r
+ struct ib_device *p_ibdev = hca2ibdev(p_hca);\r
+ struct ib_ucontext *p_uctx;\r
+ struct ibv_get_context_resp *p_uresp;\r
+\r
+ HCA_ENTER(HCA_DBG_SHIM);\r
+\r
+ // sanity check\r
+ ASSERT( p_umv_buf );\r
+ if( !p_umv_buf->command )\r
+ { // no User Verb Provider\r
+ p_uctx = cl_zalloc( sizeof(struct ib_ucontext) );\r
+ if( !p_uctx )\r
+ {\r
+ status = IB_INSUFFICIENT_MEMORY;\r
+ goto err_alloc_ucontext;\r
+ }\r
+ /* Copy the dev info. */\r
+ p_uctx->device = p_ibdev;\r
+ p_umv_buf->output_size = 0;\r
+ status = IB_SUCCESS;\r
+ goto done;\r
+ }\r
+\r
+ // sanity check\r
+ if ( p_umv_buf->output_size < sizeof(struct ibv_get_context_resp) ||\r
+ !p_umv_buf->p_inout_buf) {\r
+ status = IB_INVALID_PARAMETER;\r
+ goto err_inval_params;\r
+ }\r
+\r
+ status = ibv_um_open( p_ibdev, p_umv_buf, &p_uctx );\r
+ if (!NT_SUCCESS(status)) {\r
+ goto end;\r
+ }\r
+ \r
+ // fill more parameters for user (sanity checks are in mthca_alloc_ucontext) \r
+ p_uresp = (struct ibv_get_context_resp *)(ULONG_PTR)p_umv_buf->p_inout_buf;\r
+ p_uresp->vend_id = (uint32_t)p_fdo->bus_ib_ifc.pdev->ven_id;\r
+ p_uresp->dev_id = (uint16_t)p_fdo->bus_ib_ifc.pdev->dev_id;\r
+ p_uresp->max_qp_wr = hca2mdev(p_hca)->caps.max_wqes;\r
+ p_uresp->max_cqe = hca2mdev(p_hca)->caps.max_cqes;\r
+ p_uresp->max_sge = min( hca2mdev(p_hca)->caps.max_sq_sg,\r
+ hca2mdev(p_hca)->caps.max_rq_sg );\r
+\r
+done:\r
+ // fill the rest of ib_ucontext_ex fields \r
+ atomic_set(&p_uctx->x.usecnt, 0);\r
+ p_uctx->x.va = p_uctx->x.p_mdl = NULL;\r
+ p_uctx->x.fw_if_open = FALSE;\r
+ mutex_init( &p_uctx->x.mutex );\r
+\r
+ // chain user context to the device\r
+ spin_lock( &p_fdo->uctx_lock );\r
+ cl_qlist_insert_tail( &p_fdo->uctx_list, &p_uctx->x.list_item );\r
+ cl_atomic_inc(&p_fdo->usecnt);\r
+ spin_unlock( &p_fdo->uctx_lock );\r
+ \r
+ // return the result\r
+ if (ph_um_ca) *ph_um_ca = (ib_ca_handle_t)p_uctx;\r
+\r
+ status = IB_SUCCESS;\r
+ goto end;\r
+\r
+err_inval_params:\r
+err_alloc_ucontext:\r
+end:\r
+ if (p_umv_buf && p_umv_buf->command) \r
+ p_umv_buf->status = status;\r
+ if (status != IB_SUCCESS) \r
+ {\r
+ HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_SHIM,\r
+ ("completes with ERROR status %x\n", status));\r
+ }\r
+ HCA_EXIT(HCA_DBG_SHIM);\r
+ return status;\r
+}\r
+\r
+\r
+static void\r
+mlnx_um_close(\r
+ IN ib_ca_handle_t h_ca,\r
+ IN ib_ca_handle_t h_um_ca )\r
+{\r
+ struct ib_ucontext *p_uctx = (struct ib_ucontext *)h_um_ca;\r
+ PFDO_DEVICE_DATA p_fdo = p_uctx->device->x.p_fdo;\r
+\r
+ UNUSED_PARAM(h_ca);\r
+ \r
+ if ( !hca_is_livefish(p_fdo))\r
+ unmap_crspace_for_all(p_uctx);\r
+ spin_lock( &p_fdo->uctx_lock );\r
+ cl_qlist_remove_item( &p_fdo->uctx_list, &p_uctx->x.list_item );\r
+ cl_atomic_dec(&p_fdo->usecnt);\r
+ spin_unlock( &p_fdo->uctx_lock );\r
+ if( !p_uctx->x.uar.kva)\r
+ cl_free( h_um_ca ); // no User Verb Provider\r
+ else \r
+ ibv_um_close(p_uctx);\r
+#if 0\r
+ // TODO: replace where pa_cash.c is found\r
+ pa_cash_print();\r
+#endif\r
+ return;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+mlnx_local_mad (\r
+ IN const ib_ca_handle_t h_ca,\r
+ IN const uint8_t port_num,\r
+ IN const ib_av_attr_t* p_av_attr,\r
+ IN const ib_mad_t *p_mad_in,\r
+ OUT ib_mad_t *p_mad_out )\r
+{\r
+ int err;\r
+ ib_api_status_t status = IB_SUCCESS;\r
+ mlnx_hca_t *p_hca = (mlnx_hca_t *)h_ca;\r
+ PFDO_DEVICE_DATA p_fdo = hca2fdo(p_hca);\r
+ struct ib_device *p_ibdev = p_fdo->bus_ib_ifc.p_ibdev;\r
+ //TODO: do we need use flags (IB_MAD_IGNORE_MKEY, IB_MAD_IGNORE_BKEY) ?\r
+ int mad_flags = 0; \r
+ //TODO: do we need use grh ?\r
+ struct ib_grh *p_grh = NULL;\r
+ ib_wc_t *p_wc = NULL;\r
+\r
+ HCA_ENTER(HCA_DBG_MAD);\r
+\r
+ // sanity checks\r
+ if (port_num > 2) {\r
+ status = IB_INVALID_PARAMETER;\r
+ goto err_port_num;\r
+ }\r
+\r
+ if (p_av_attr){\r
+ p_wc = cl_zalloc(sizeof(ib_wc_t));\r
+ if(!p_wc){\r
+ status = IB_INSUFFICIENT_MEMORY ;\r
+ goto err_wc_alloc;\r
+ }\r
+ //Copy part of the attributes need to fill the mad extended fields in mellanox devices\r
+ p_wc->recv.ud.remote_lid = p_av_attr->dlid;\r
+ p_wc->recv.ud.remote_sl = p_av_attr->sl;\r
+ p_wc->recv.ud.path_bits = p_av_attr->path_bits;\r
+ p_wc->recv.ud.recv_opt = p_av_attr->grh_valid ? IB_RECV_OPT_GRH_VALID : 0;\r
+\r
+ if(p_wc->recv.ud.recv_opt & IB_RECV_OPT_GRH_VALID){\r
+ p_grh = cl_zalloc(sizeof(struct _ib_grh));\r
+ if(!p_grh){\r
+ status = IB_INSUFFICIENT_MEMORY ;\r
+ goto err_grh_alloc;\r
+ }\r
+ p_grh->version_tclass_flow = p_av_attr->grh.ver_class_flow;\r
+ p_grh->hop_limit = p_av_attr->grh.hop_limit;\r
+ cl_memcpy( &p_grh->sgid, &p_av_attr->grh.src_gid, sizeof(p_grh->sgid) );\r
+ cl_memcpy( &p_grh->dgid, &p_av_attr->grh.dest_gid, sizeof(p_grh->dgid) );\r
+ // TODO: no direct analogue in IBAL (seems like it is from rmpp)\r
+ p_grh->paylen = 0;\r
+ p_grh->next_hdr = 0;\r
+ }\r
+ \r
+\r
+ }\r
+\r
+ HCA_PRINT( TRACE_LEVEL_INFORMATION, HCA_DBG_MAD, \r
+ ("MAD: Class %02x, Method %02x, Attr %02x, HopPtr %d, HopCnt %d, \n",\r
+ (uint32_t)((ib_smp_t *)p_mad_in)->mgmt_class, \r
+ (uint32_t)((ib_smp_t *)p_mad_in)->method, \r
+ (uint32_t)((ib_smp_t *)p_mad_in)->attr_id, \r
+ (uint32_t)((ib_smp_t *)p_mad_in)->hop_ptr,\r
+ (uint32_t)((ib_smp_t *)p_mad_in)->hop_count));\r
+\r
+ // process mad\r
+ err = p_ibdev->process_mad( p_ibdev, mad_flags, (uint8_t)port_num, \r
+ p_wc, p_grh, (struct ib_mad*)p_mad_in, (struct ib_mad*)p_mad_out);\r
+ if (!err) {\r
+ HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_MAD, \r
+ ("MAD failed:\n\tClass 0x%x\n\tMethod 0x%x\n\tAttr 0x%x",\r
+ p_mad_in->mgmt_class, p_mad_in->method, p_mad_in->attr_id ));\r
+ status = IB_ERROR;\r
+ goto err_process_mad;\r
+ }\r
+ \r
+ if( (p_mad_in->mgmt_class == IB_MCLASS_SUBN_DIR ||\r
+ p_mad_in->mgmt_class == IB_MCLASS_SUBN_LID) &&\r
+ p_mad_in->attr_id == IB_MAD_ATTR_PORT_INFO )\r
+ {\r
+ ib_port_info_t *p_pi_in, *p_pi_out;\r
+\r
+ if( p_mad_in->mgmt_class == IB_MCLASS_SUBN_DIR )\r
+ {\r
+ p_pi_in = (ib_port_info_t*)\r
+ ib_smp_get_payload_ptr( (ib_smp_t*)p_mad_in );\r
+ p_pi_out = (ib_port_info_t*)\r
+ ib_smp_get_payload_ptr( (ib_smp_t*)p_mad_out );\r
+ }\r
+ else\r
+ {\r
+ p_pi_in = (ib_port_info_t*)(p_mad_in + 1);\r
+ p_pi_out = (ib_port_info_t*)(p_mad_out + 1);\r
+ }\r
+\r
+ /* Work around FW bug 33958 */\r
+ p_pi_out->subnet_timeout &= 0x7F;\r
+ if( p_mad_in->method == IB_MAD_METHOD_SET )\r
+ p_pi_out->subnet_timeout |= (p_pi_in->subnet_timeout & 0x80);\r
+ }\r
+\r
+ /* Modify direction for Direct MAD */\r
+ if ( p_mad_in->mgmt_class == IB_MCLASS_SUBN_DIR )\r
+ p_mad_out->status |= IB_SMP_DIRECTION;\r
+\r
+\r
+err_process_mad:\r
+ if(p_grh)\r
+ cl_free(p_grh);\r
+err_grh_alloc:\r
+ if(p_wc)\r
+ cl_free(p_wc);\r
+err_wc_alloc:\r
+err_port_num: \r
+ if (status != IB_SUCCESS)\r
+ {\r
+ HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_MAD,\r
+ ("completes with ERROR status %x\n", status));\r
+ }\r
+ HCA_EXIT(HCA_DBG_MAD);\r
+ return status;\r
+}\r
+ \r
+\r
+void\r
+setup_ci_interface(\r
+ IN const ib_net64_t ca_guid,\r
+ IN const int is_livefish,\r
+ IN OUT ci_interface_t *p_interface )\r
+{\r
+ cl_memclr(p_interface, sizeof(*p_interface));\r
+\r
+ /* Guid of the CA. */\r
+ p_interface->guid = ca_guid;\r
+\r
+ /* Version of this interface. */\r
+ p_interface->version = VERBS_VERSION;\r
+\r
+ /* UVP name */\r
+ cl_memcpy( p_interface->libname, mlnx_uvp_lib_name, MAX_LIB_NAME);\r
+\r
+ HCA_PRINT(TRACE_LEVEL_VERBOSE , HCA_DBG_SHIM ,("UVP filename %s\n", p_interface->libname));\r
+\r
+ /* The real interface. */\r
+ mlnx_pd_if(p_interface);\r
+ p_interface->um_open_ca = mlnx_um_open;\r
+ p_interface->um_close_ca = mlnx_um_close;\r
+ p_interface->vendor_call = fw_access_ctrl;\r
+\r
+ if (is_livefish) {\r
+ mlnx_ca_if_livefish(p_interface);\r
+ mlnx_mr_if_livefish(p_interface);\r
+ }\r
+ else { \r
+ mlnx_ca_if(p_interface);\r
+ mlnx_av_if(p_interface);\r
+ mlnx_srq_if(p_interface);\r
+ mlnx_qp_if(p_interface);\r
+ mlnx_cq_if(p_interface);\r
+ mlnx_mr_if(p_interface);\r
+ mlnx_direct_if(p_interface);\r
+ mlnx_mcast_if(p_interface);\r
+ p_interface->local_mad = mlnx_local_mad;\r
+ }\r
+\r
+ return;\r
+}\r
+\r
+\r
/*\r
* Copyright (c) 2007 Cisco, Inc. All rights reserved.\r
+ * Portions Copyright (c) 2008 Microsoft Corporation. All rights reserved.\r
*\r
- * This software is available to you under a choice of one of two\r
- * licenses. You may choose to be licensed under the terms of the GNU\r
- * General Public License (GPL) Version 2, available from the file\r
- * COPYING in the main directory of this source tree, or the\r
- * OpenIB.org BSD license below:\r
+ * This software is available to you under the OpenIB.org BSD license\r
+ * below:\r
*\r
* Redistribution and use in source and binary forms, with or\r
* without modification, are permitted provided that the following\r
{\r
if( !p_umv_buf->p_inout_buf )\r
{\r
- p_umv_buf->p_inout_buf = cl_zalloc( sizeof(struct ibv_get_context_resp) );\r
+ p_umv_buf->p_inout_buf =\r
+ (ULONG_PTR)cl_zalloc( sizeof(struct ibv_get_context_resp) );\r
if( !p_umv_buf->p_inout_buf )\r
{\r
status = IB_INSUFFICIENT_MEMORY;\r
\r
CL_ASSERT(p_umv_buf && p_umv_buf->p_inout_buf);\r
\r
- p_resp = p_umv_buf->p_inout_buf;\r
+ p_resp = (struct ibv_get_context_resp*)(ULONG_PTR)p_umv_buf->p_inout_buf;\r
\r
if (IB_SUCCESS == ioctl_status)\r
{\r
*/\r
if ( p_ca_attr != NULL )\r
{\r
- p_umv_buf->p_inout_buf = cl_malloc(byte_count);\r
+ p_umv_buf->p_inout_buf = (ULONG_PTR)cl_malloc(byte_count);\r
if ( !p_umv_buf->p_inout_buf )\r
{\r
status = IB_INSUFFICIENT_RESOURCES;\r
\r
if (context->p_hca_attr)\r
cl_free(context->p_hca_attr);\r
- context->p_hca_attr = p_umv_buf->p_inout_buf;\r
+ context->p_hca_attr = (ib_ca_attr_t*)(ULONG_PTR)p_umv_buf->p_inout_buf;\r
cl_memcpy( context->p_hca_attr, p_ca_attr, p_ca_attr->size );\r
__fixup_ca_attr( context->p_hca_attr, p_ca_attr );\r
\r
}\r
else if (p_umv_buf->p_inout_buf) \r
{\r
- cl_free(p_umv_buf->p_inout_buf);\r
+ cl_free( (void*)(ULONG_PTR)p_umv_buf->p_inout_buf );\r
}\r
}\r
\r
\r
if( !p_umv_buf->p_inout_buf )\r
{\r
- p_umv_buf->p_inout_buf = cl_malloc( sizeof(struct ibv_alloc_pd_resp) );\r
+ p_umv_buf->p_inout_buf = (ULONG_PTR)cl_malloc( sizeof(struct ibv_alloc_pd_resp) );\r
if( !p_umv_buf->p_inout_buf )\r
{\r
status = IB_INSUFFICIENT_MEMORY;\r
\r
CL_ASSERT(p_umv_buf && p_umv_buf->p_inout_buf);\r
\r
- p_resp = p_umv_buf->p_inout_buf;\r
+ p_resp = (struct ibv_alloc_pd_resp*)(ULONG_PTR)p_umv_buf->p_inout_buf;\r
\r
if (IB_SUCCESS == ioctl_status)\r
{\r
\r
if( !p_umv_buf->p_inout_buf )\r
{\r
- p_umv_buf->p_inout_buf = cl_malloc( size );\r
+ p_umv_buf->p_inout_buf = (ULONG_PTR)cl_malloc( size );\r
if( !p_umv_buf->p_inout_buf )\r
{\r
status = IB_INSUFFICIENT_MEMORY;\r
p_umv_buf->output_size = sizeof(struct ibv_create_cq_resp);\r
p_umv_buf->command = TRUE;\r
\r
- p_create_cq = p_umv_buf->p_inout_buf;\r
+ p_create_cq = (struct ibv_create_cq*)(ULONG_PTR)p_umv_buf->p_inout_buf;\r
\r
// Mlx4 code:\r
\r
cl_free(cq);\r
err_cq:\r
err_cqe_size:\r
- cl_free(p_umv_buf->p_inout_buf);\r
+ cl_free( (void*)(ULONG_PTR)p_umv_buf->p_inout_buf );\r
err_umv_buf:\r
end:\r
return status;\r
\r
CL_ASSERT(p_umv_buf && p_umv_buf->p_inout_buf);\r
\r
- p_resp = p_umv_buf->p_inout_buf;\r
+ p_resp = (struct ibv_create_cq_resp*)(ULONG_PTR)p_umv_buf->p_inout_buf;\r
\r
if (IB_SUCCESS == ioctl_status)\r
{\r
\r
if( !p_umv_buf->p_inout_buf )\r
{\r
- p_umv_buf->p_inout_buf = cl_malloc( size ); \r
+ p_umv_buf->p_inout_buf = (ULONG_PTR)cl_malloc( size ); \r
if( !p_umv_buf->p_inout_buf )\r
{\r
status = IB_INSUFFICIENT_MEMORY;\r
p_umv_buf->output_size = sizeof(struct ibv_create_srq_resp);\r
p_umv_buf->command = TRUE;\r
\r
- p_create_srq = p_umv_buf->p_inout_buf;\r
+ p_create_srq = (struct ibv_create_srq*)(ULONG_PTR)p_umv_buf->p_inout_buf;\r
\r
// Mlx4 code:\r
\r
err_lock:\r
cl_free(srq);\r
err_alloc_srq:\r
- cl_free(p_umv_buf->p_inout_buf);\r
+ cl_free( (void*)(ULONG_PTR)p_umv_buf->p_inout_buf );\r
err_params: err_memory:\r
end:\r
return status;\r
\r
CL_ASSERT(p_umv_buf && p_umv_buf->p_inout_buf);\r
\r
- p_resp = p_umv_buf->p_inout_buf;\r
+ p_resp = (struct ibv_create_srq_resp*)(ULONG_PTR)p_umv_buf->p_inout_buf;\r
\r
if (IB_SUCCESS == ioctl_status)\r
{\r
\r
if( !p_umv_buf->p_inout_buf )\r
{\r
- p_umv_buf->p_inout_buf = cl_malloc(size);\r
+ p_umv_buf->p_inout_buf = (ULONG_PTR)cl_malloc(size);\r
if( !p_umv_buf->p_inout_buf )\r
{\r
status = IB_INSUFFICIENT_MEMORY;\r
p_umv_buf->output_size = sizeof(struct ibv_create_qp_resp);\r
p_umv_buf->command = TRUE;\r
\r
- p_create_qp = p_umv_buf->p_inout_buf;\r
+ p_create_qp = (struct ibv_create_qp*)(ULONG_PTR)p_umv_buf->p_inout_buf;\r
\r
/* convert attributes */\r
memset( &attr, 0, sizeof(attr) );\r
err_alloc_qp_buff:\r
cl_free(qp); \r
err_alloc_qp:\r
- cl_free(p_umv_buf->p_inout_buf);\r
+ cl_free( (void*)(ULONG_PTR)p_umv_buf->p_inout_buf );\r
err_memory:\r
end:\r
return status;\r
\r
CL_ASSERT(p_umv_buf && p_umv_buf->p_inout_buf);\r
\r
- p_resp = p_umv_buf->p_inout_buf;\r
+ p_resp = (struct ibv_create_qp_resp*)(ULONG_PTR)p_umv_buf->p_inout_buf;\r
\r
if (IB_SUCCESS == ioctl_status)\r
{\r
\r
if( !p_umv_buf->p_inout_buf )\r
{\r
- p_umv_buf->p_inout_buf = cl_malloc(sizeof(struct ibv_modify_qp_resp));\r
+ p_umv_buf->p_inout_buf = (ULONG_PTR)cl_malloc(sizeof(struct ibv_modify_qp_resp));\r
if( !p_umv_buf->p_inout_buf )\r
{\r
status = IB_INSUFFICIENT_MEMORY;\r
\r
CL_ASSERT(p_umv_buf && p_umv_buf->p_inout_buf);\r
\r
- p_resp = p_umv_buf->p_inout_buf;\r
+ p_resp = (struct ibv_modify_qp_resp*)(ULONG_PTR)p_umv_buf->p_inout_buf;\r
\r
if (IB_SUCCESS == ioctl_status) \r
{\r
umv_buf.command = 1;\r
umv_buf.input_size = umv_buf.status = 0;\r
umv_buf.output_size = sizeof(struct ibv_alloc_pd_resp);\r
- umv_buf.p_inout_buf = &resp;\r
+ umv_buf.p_inout_buf = (ULONG_PTR)&resp;\r
//NB: Pay attention ! Ucontext parameter is important here:\r
// when it is present (i.e. - for user space) - mthca_alloc_pd won't create MR\r
p_context->pd = ibv_alloc_pd(ib_dev, p_context, &umv_buf);\r
}\r
\r
// fill more parameters for user (sanity checks are in mthca_alloc_ucontext)\r
- uresp_p = (struct ibv_get_context_resp *)(void*)p_umv_buf->p_inout_buf;\r
+ uresp_p = (struct ibv_get_context_resp *)(ULONG_PTR)p_umv_buf->p_inout_buf;\r
uresp_p->uar_addr = (uint64_t)(UINT_PTR)p_context->user_uar;\r
uresp_p->pd_handle = resp.pd_handle;\r
uresp_p->pdn = resp.pdn;\r
umv_buf.command = TRUE; /* special case for NDI. Usually it's TRUE */\r
umv_buf.input_size = 0;\r
umv_buf.output_size = sizeof(struct ibv_modify_qp_resp);\r
- umv_buf.p_inout_buf = buf;\r
+ umv_buf.p_inout_buf = (ULONG_PTR)buf;\r
\r
status = mlnx_modify_qp ( h_qp, p_modify_attr, p_qp_attr, &umv_buf );\r
\r
\r
// for user call we need also allocate MR\r
if (context && p_umv_buf && p_umv_buf->p_inout_buf) {\r
- struct ibv_create_ah *create_ah = (struct ibv_create_ah *)(void*)p_umv_buf->p_inout_buf;\r
+ struct ibv_create_ah *create_ah = (struct ibv_create_ah *)(ULONG_PTR)p_umv_buf->p_inout_buf;\r
\r
// create region; destroy will be done on dealloc_pd\r
ib_mr = ibv_reg_mr( \r
\r
/* fill obligatory fields */\r
if (context && p_umv_buf && p_umv_buf->p_inout_buf) {\r
- create_ah_resp = (struct ibv_create_ah_resp *)(void*)p_umv_buf->p_inout_buf;\r
+ create_ah_resp = (struct ibv_create_ah_resp *)(ULONG_PTR)p_umv_buf->p_inout_buf;\r
create_ah_resp->user_handle = user_handle;\r
}\r
\r
\r
// fill results for user\r
if (context && p_umv_buf && p_umv_buf->p_inout_buf) {\r
- struct ibv_create_ah_resp *create_ah_resp = (struct ibv_create_ah_resp *)(void*)p_umv_buf->p_inout_buf;\r
+ struct ibv_create_ah_resp *create_ah_resp = (struct ibv_create_ah_resp *)(ULONG_PTR)p_umv_buf->p_inout_buf;\r
create_ah_resp->start = start;\r
create_ah_resp->mr.lkey = ib_mr->lkey;\r
create_ah_resp->mr.rkey = ib_mr->rkey;\r
\r
// for user call we need also allocate MR\r
if (context && p_umv_buf && p_umv_buf->p_inout_buf) {\r
- struct ibv_create_srq *create_srp = (struct ibv_create_srq *)(void*)p_umv_buf->p_inout_buf;\r
+ struct ibv_create_srq *create_srp = (struct ibv_create_srq *)(ULONG_PTR)p_umv_buf->p_inout_buf;\r
\r
// create region\r
ib_mr = ibv_reg_mr( \r
\r
/* fill obligatory fields */\r
if (context && p_umv_buf && p_umv_buf->p_inout_buf) {\r
- create_srq_resp = (struct ibv_create_srq_resp *)(void*)p_umv_buf->p_inout_buf;\r
+ create_srq_resp = (struct ibv_create_srq_resp *)(ULONG_PTR)p_umv_buf->p_inout_buf;\r
create_srq_resp->user_handle = user_handle;\r
}\r
\r
\r
// for user call we need also allocate MR\r
if (context && p_umv_buf && p_umv_buf->p_inout_buf) {\r
- struct ibv_create_qp *create_qp = (struct ibv_create_qp *)(void*)p_umv_buf->p_inout_buf;\r
+ struct ibv_create_qp *create_qp = (struct ibv_create_qp *)(ULONG_PTR)p_umv_buf->p_inout_buf;\r
\r
// create region\r
ib_mr = ibv_reg_mr( \r
// fill results for user\r
if (context && p_umv_buf && p_umv_buf->p_inout_buf) {\r
struct mthca_qp *qp = (struct mthca_qp *)ib_qp;\r
- struct ibv_create_qp_resp *create_qp_resp = (struct ibv_create_qp_resp *)(void*)p_umv_buf->p_inout_buf;\r
+ struct ibv_create_qp_resp *create_qp_resp = (struct ibv_create_qp_resp *)(ULONG_PTR)p_umv_buf->p_inout_buf;\r
ib_qp->ib_mr = ib_mr;\r
create_qp_resp->qpn = ib_qp->qp_num;\r
create_qp_resp->user_handle = user_handle;\r
\r
// for user call we need also allocate MR\r
if (context && p_umv_buf && p_umv_buf->p_inout_buf) {\r
- struct ibv_create_cq *create_cq = (struct ibv_create_cq *)(void*)p_umv_buf->p_inout_buf;\r
+ struct ibv_create_cq *create_cq = (struct ibv_create_cq *)(ULONG_PTR)p_umv_buf->p_inout_buf;\r
\r
// create region\r
ib_mr = ibv_reg_mr( \r
\r
// fill results\r
if (context && p_umv_buf && p_umv_buf->p_inout_buf) {\r
- struct ibv_create_cq_resp *create_cq_resp = (struct ibv_create_cq_resp *)(void*)p_umv_buf->p_inout_buf;\r
+ struct ibv_create_cq_resp *create_cq_resp = (struct ibv_create_cq_resp *)(ULONG_PTR)p_umv_buf->p_inout_buf;\r
cq->ib_mr = ib_mr;\r
create_cq_resp->user_handle = user_handle;\r
create_cq_resp->mr.lkey = ib_mr->lkey;\r
if (err)\r
goto err_free;\r
\r
- if (context ) {\r
- struct ibv_create_cq_resp *create_cq_resp = (struct ibv_create_cq_resp *)(void*)p_umv_buf->p_inout_buf;\r
+ if (context) {\r
+ struct ibv_create_cq_resp *create_cq_resp = (struct ibv_create_cq_resp *)(ULONG_PTR)p_umv_buf->p_inout_buf;\r
create_cq_resp->cqn = cq->cqn;\r
}\r
\r
-/*
- * Copyright (c) 2004 Topspin Communications. All rights reserved.
- * Copyright (c) 2005 Cisco Systems. All rights reserved.
- * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- * $Id$
- */
-
-#ifndef MTHCA_PROVIDER_H
-#define MTHCA_PROVIDER_H
-
-#include <ib_verbs.h>
-#include <ib_pack.h>
-#include <iba/ib_ci.h>
-
-typedef uint32_t mthca_mpt_access_t;
-#define MTHCA_MPT_FLAG_ATOMIC (1 << 14)
-#define MTHCA_MPT_FLAG_REMOTE_WRITE (1 << 13)
-#define MTHCA_MPT_FLAG_REMOTE_READ (1 << 12)
-#define MTHCA_MPT_FLAG_LOCAL_WRITE (1 << 11)
-#define MTHCA_MPT_FLAG_LOCAL_READ (1 << 10)
-
-union mthca_buf {
- struct scatterlist direct;
- struct scatterlist *page_list;
-};
-
-struct mthca_uar {
- PFN_NUMBER pfn;
- int index;
-};
-
-struct mthca_user_db_table;
-
-struct mthca_ucontext {
- struct ib_ucontext ibucontext;
- struct mthca_uar uar;
- struct mthca_user_db_table *db_tab;
- // for user UAR
- PMDL mdl;
- PVOID kva;
- SIZE_T uar_size;
-};
-
-struct mthca_mtt;
-
-struct mthca_mr {
- //NB: the start of this structure is to be equal to mlnx_mro_t !
- //NB: the structure was not inserted here for not to mix driver and provider structures
- struct ib_mr ibmr;
- struct mthca_mtt *mtt;
- int iobuf_used;
- mt_iobuf_t iobuf;
- void *secure_handle;
-};
-
-struct mthca_fmr {
- struct ib_fmr ibfmr;
- struct ib_fmr_attr attr;
- struct mthca_mtt *mtt;
- int maps;
- union {
- struct {
- struct mthca_mpt_entry __iomem *mpt;
- u64 __iomem *mtts;
- } tavor;
- struct {
- struct mthca_mpt_entry *mpt;
- __be64 *mtts;
- } arbel;
- } mem;
-};
-
-struct mthca_pd {
- struct ib_pd ibpd;
- u32 pd_num;
- atomic_t sqp_count;
- struct mthca_mr ntmr;
- int privileged;
-};
-
-struct mthca_eq {
- struct mthca_dev *dev;
- int eqn;
- int eq_num;
- u32 eqn_mask;
- u32 cons_index;
- u16 msi_x_vector;
- u16 msi_x_entry;
- int have_irq;
- int nent;
- struct scatterlist *page_list;
- struct mthca_mr mr;
- KDPC dpc; /* DPC for MSI-X interrupts */
- spinlock_t lock; /* spinlock for simult DPCs */
-};
-
-struct mthca_av;
-
-enum mthca_ah_type {
- MTHCA_AH_ON_HCA,
- MTHCA_AH_PCI_POOL,
- MTHCA_AH_KMALLOC
-};
-
-struct mthca_ah {
- struct ib_ah ibah;
- enum mthca_ah_type type;
- u32 key;
- struct mthca_av *av;
- dma_addr_t avdma;
-};
-
-/*
- * Quick description of our CQ/QP locking scheme:
- *
- * We have one global lock that protects dev->cq/qp_table. Each
- * struct mthca_cq/qp also has its own lock. An individual qp lock
- * may be taken inside of an individual cq lock. Both cqs attached to
- * a qp may be locked, with the send cq locked first. No other
- * nesting should be done.
- *
- * Each struct mthca_cq/qp also has an atomic_t ref count. The
- * pointer from the cq/qp_table to the struct counts as one reference.
- * This reference also is good for access through the consumer API, so
- * modifying the CQ/QP etc doesn't need to take another reference.
- * Access because of a completion being polled does need a reference.
- *
- * Finally, each struct mthca_cq/qp has a wait_queue_head_t for the
- * destroy function to sleep on.
- *
- * This means that access from the consumer API requires nothing but
- * taking the struct's lock.
- *
- * Access because of a completion event should go as follows:
- * - lock cq/qp_table and look up struct
- * - increment ref count in struct
- * - drop cq/qp_table lock
- * - lock struct, do your thing, and unlock struct
- * - decrement ref count; if zero, wake up waiters
- *
- * To destroy a CQ/QP, we can do the following:
- * - lock cq/qp_table, remove pointer, unlock cq/qp_table lock
- * - decrement ref count
- * - wait_event until ref count is zero
- *
- * It is the consumer's responsibilty to make sure that no QP
- * operations (WQE posting or state modification) are pending when the
- * QP is destroyed. Also, the consumer must make sure that calls to
- * qp_modify are serialized.
- *
- * Possible optimizations (wait for profile data to see if/where we
- * have locks bouncing between CPUs):
- * - split cq/qp table lock into n separate (cache-aligned) locks,
- * indexed (say) by the page in the table
- * - split QP struct lock into three (one for common info, one for the
- * send queue and one for the receive queue)
- */
-//TODO: check correctness of the above requirement: "It is the consumer's responsibilty to make sure that no QP
-// operations (WQE posting or state modification) are pending when the QP is destroyed"
-
-struct mthca_cq {
- struct ib_cq ibcq;
- void *cq_context; // leo: for IBAL shim
- spinlock_t lock;
- atomic_t refcount;
- int cqn;
- u32 cons_index;
- int is_direct;
- int is_kernel;
-
- /* Next fields are Arbel only */
- int set_ci_db_index;
- __be32 *set_ci_db;
- int arm_db_index;
- __be32 *arm_db;
- int arm_sn;
- int u_arm_db_index;
- int *p_u_arm_sn;
-
- union mthca_buf queue;
- struct mthca_mr mr;
- wait_queue_head_t wait;
- KMUTEX mutex;
-};
-
-struct mthca_srq {
- struct ib_srq ibsrq;
- spinlock_t lock;
- atomic_t refcount;
- int srqn;
- int max;
- int max_gs;
- int wqe_shift;
- int first_free;
- int last_free;
- u16 counter; /* Arbel only */
- int db_index; /* Arbel only */
- __be32 *db; /* Arbel only */
- void *last;
-
- int is_direct;
- u64 *wrid;
- union mthca_buf queue;
- struct mthca_mr mr;
-
- wait_queue_head_t wait;
- KMUTEX mutex;
- void *srq_context;
-};
-
-struct mthca_wq {
- spinlock_t lock;
- int max;
- unsigned next_ind;
- unsigned last_comp;
- unsigned head;
- unsigned tail;
- void *last;
- int max_gs;
- int wqe_shift;
-
- int db_index; /* Arbel only */
- __be32 *db;
-};
-
-struct mthca_qp {
- struct ib_qp ibqp;
- void *qp_context; // leo: for IBAL shim
- //TODO: added just because absense of ibv_query_qp
- // thereafter it may be worth to be replaced by struct ib_qp_attr qp_attr;
- struct ib_qp_init_attr qp_init_attr; // leo: for query_qp
- atomic_t refcount;
- u32 qpn;
- int is_direct;
- u8 transport;
- u8 state;
- u8 atomic_rd_en;
- u8 resp_depth;
-
- struct mthca_mr mr;
-
- struct mthca_wq rq;
- struct mthca_wq sq;
- enum ib_sig_type sq_policy;
- int send_wqe_offset;
- int max_inline_data;
-
- u64 *wrid;
- union mthca_buf queue;
-
- wait_queue_head_t wait;
- KMUTEX mutex;
-};
-
-struct mthca_sqp {
- struct mthca_qp qp;
- int port;
- int pkey_index;
- u32 qkey;
- u32 send_psn;
- struct ib_ud_header ud_header;
- struct scatterlist sg;
-};
-
-static inline struct mthca_ucontext *to_mucontext(struct ib_ucontext *ibucontext)
-{
- return container_of(ibucontext, struct mthca_ucontext, ibucontext);
-}
-
-static inline struct mthca_fmr *to_mfmr(struct ib_fmr *ibfmr)
-{
- return container_of(ibfmr, struct mthca_fmr, ibfmr);
-}
-
-static inline struct mthca_mr *to_mmr(struct ib_mr *ibmr)
-{
- return container_of(ibmr, struct mthca_mr, ibmr);
-}
-
-static inline struct mthca_pd *to_mpd(struct ib_pd *ibpd)
-{
- return container_of(ibpd, struct mthca_pd, ibpd);
-}
-
-static inline struct mthca_ah *to_mah(struct ib_ah *ibah)
-{
- return container_of(ibah, struct mthca_ah, ibah);
-}
-
-static inline struct mthca_cq *to_mcq(struct ib_cq *ibcq)
-{
- return container_of(ibcq, struct mthca_cq, ibcq);
-}
-
-static inline struct mthca_srq *to_msrq(struct ib_srq *ibsrq)
-{
- return container_of(ibsrq, struct mthca_srq, ibsrq);
-}
-
-static inline struct mthca_qp *to_mqp(struct ib_qp *ibqp)
-{
- return container_of(ibqp, struct mthca_qp, ibqp);
-}
-
-static inline struct mthca_sqp *to_msqp(struct mthca_qp *qp)
-{
- return container_of(qp, struct mthca_sqp, qp);
-}
-
-static inline uint8_t start_port(struct ib_device *device)
-{
- return device->node_type == IB_NODE_SWITCH ? 0 : 1;
-}
-
-static inline uint8_t end_port(struct ib_device *device)
-{
- return device->node_type == IB_NODE_SWITCH ? 0 : device->phys_port_cnt;
-}
-
-static inline int ib_copy_from_umv_buf(void *dest, ci_umv_buf_t* const p_umv_buf, size_t len)
-{
- RtlCopyMemory(dest, p_umv_buf->p_inout_buf, len);
- return 0;
-}
-
-static inline int ib_copy_to_umv_buf(ci_umv_buf_t* const p_umv_buf, void *src, size_t len)
-{
- if (p_umv_buf->output_size < len) {
- p_umv_buf->status = IB_INSUFFICIENT_MEMORY;
- p_umv_buf->output_size = 0;
- return -EFAULT;
- }
- RtlCopyMemory(p_umv_buf->p_inout_buf, src, len);
- p_umv_buf->status = IB_SUCCESS;
- p_umv_buf->output_size = (uint32_t)len;
- return 0;
-}
-
-
-
-// API
-int mthca_query_device(struct ib_device *ibdev,
- struct ib_device_attr *props);
-
-int mthca_query_port(struct ib_device *ibdev,
- u8 port, struct ib_port_attr *props);
-
-int mthca_modify_port(struct ib_device *ibdev,
- u8 port, int port_modify_mask,
- struct ib_port_modify *props);
-
-struct ib_pd *mthca_alloc_pd(struct ib_device *ibdev,
- struct ib_ucontext *context,
- ci_umv_buf_t* const p_umv_buf);
-
-int mthca_dealloc_pd(struct ib_pd *pd);
-
-int mthca_dereg_mr(struct ib_mr *mr);
-
-int mthca_query_srq(struct ib_srq *ibsrq, ib_srq_attr_t *srq_attr);
-
-struct ib_ucontext *mthca_alloc_ucontext(struct ib_device *ibdev,
- ci_umv_buf_t* const p_umv_buf);
-
-int mthca_dealloc_ucontext(struct ib_ucontext *context);
-
-struct ib_mr *mthca_get_dma_mr(struct ib_pd *pd, mthca_qp_access_t acc);
-
-int mthca_poll_cq_list(
- IN struct ib_cq *ibcq,
- IN OUT ib_wc_t** const pp_free_wclist,
- OUT ib_wc_t** const pp_done_wclist );
-
-
-#endif /* MTHCA_PROVIDER_H */
+/*\r
+ * Copyright (c) 2004 Topspin Communications. All rights reserved.\r
+ * Copyright (c) 2005 Cisco Systems. All rights reserved.\r
+ * Copyright (c) 2005 Mellanox Technologies. All rights reserved.\r
+ *\r
+ * This software is available to you under a choice of one of two\r
+ * licenses. You may choose to be licensed under the terms of the GNU\r
+ * General Public License (GPL) Version 2, available from the file\r
+ * COPYING in the main directory of this source tree, or the\r
+ * OpenIB.org BSD license below:\r
+ *\r
+ * Redistribution and use in source and binary forms, with or\r
+ * without modification, are permitted provided that the following\r
+ * conditions are met:\r
+ *\r
+ * - Redistributions of source code must retain the above\r
+ * copyright notice, this list of conditions and the following\r
+ * disclaimer.\r
+ *\r
+ * - Redistributions in binary form must reproduce the above\r
+ * copyright notice, this list of conditions and the following\r
+ * disclaimer in the documentation and/or other materials\r
+ * provided with the distribution.\r
+ *\r
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
+ * SOFTWARE.\r
+ *\r
+ * $Id$\r
+ */\r
+\r
+#ifndef MTHCA_PROVIDER_H\r
+#define MTHCA_PROVIDER_H\r
+\r
+#include <ib_verbs.h>\r
+#include <ib_pack.h>\r
+#include <iba/ib_ci.h>\r
+\r
+typedef uint32_t mthca_mpt_access_t;\r
+#define MTHCA_MPT_FLAG_ATOMIC (1 << 14)\r
+#define MTHCA_MPT_FLAG_REMOTE_WRITE (1 << 13)\r
+#define MTHCA_MPT_FLAG_REMOTE_READ (1 << 12)\r
+#define MTHCA_MPT_FLAG_LOCAL_WRITE (1 << 11)\r
+#define MTHCA_MPT_FLAG_LOCAL_READ (1 << 10)\r
+\r
+union mthca_buf {\r
+ struct scatterlist direct;\r
+ struct scatterlist *page_list;\r
+};\r
+\r
+struct mthca_uar {\r
+ PFN_NUMBER pfn;\r
+ int index;\r
+};\r
+\r
+struct mthca_user_db_table;\r
+\r
+struct mthca_ucontext {\r
+ struct ib_ucontext ibucontext;\r
+ struct mthca_uar uar;\r
+ struct mthca_user_db_table *db_tab;\r
+ // for user UAR \r
+ PMDL mdl;\r
+ PVOID kva;\r
+ SIZE_T uar_size; \r
+};\r
+\r
+struct mthca_mtt;\r
+\r
+struct mthca_mr {\r
+ //NB: the start of this structure is to be equal to mlnx_mro_t !\r
+ //NB: the structure was not inserted here for not to mix driver and provider structures\r
+ struct ib_mr ibmr;\r
+ struct mthca_mtt *mtt;\r
+ int iobuf_used;\r
+ mt_iobuf_t iobuf;\r
+ void *secure_handle;\r
+};\r
+\r
+struct mthca_fmr {\r
+ struct ib_fmr ibfmr;\r
+ struct ib_fmr_attr attr;\r
+ struct mthca_mtt *mtt;\r
+ int maps;\r
+ union {\r
+ struct {\r
+ struct mthca_mpt_entry __iomem *mpt;\r
+ u64 __iomem *mtts;\r
+ } tavor;\r
+ struct {\r
+ struct mthca_mpt_entry *mpt;\r
+ __be64 *mtts;\r
+ } arbel;\r
+ } mem;\r
+};\r
+\r
+struct mthca_pd {\r
+ struct ib_pd ibpd;\r
+ u32 pd_num;\r
+ atomic_t sqp_count;\r
+ struct mthca_mr ntmr;\r
+ int privileged;\r
+};\r
+\r
+struct mthca_eq {\r
+ struct mthca_dev *dev;\r
+ int eqn;\r
+ int eq_num;\r
+ u32 eqn_mask;\r
+ u32 cons_index;\r
+ u16 msi_x_vector;\r
+ u16 msi_x_entry;\r
+ int have_irq;\r
+ int nent;\r
+ struct scatterlist *page_list;\r
+ struct mthca_mr mr;\r
+ KDPC dpc; /* DPC for MSI-X interrupts */\r
+ spinlock_t lock; /* spinlock for simult DPCs */\r
+};\r
+\r
+struct mthca_av;\r
+\r
+enum mthca_ah_type {\r
+ MTHCA_AH_ON_HCA,\r
+ MTHCA_AH_PCI_POOL,\r
+ MTHCA_AH_KMALLOC\r
+};\r
+\r
+struct mthca_ah {\r
+ struct ib_ah ibah;\r
+ enum mthca_ah_type type;\r
+ u32 key;\r
+ struct mthca_av *av;\r
+ dma_addr_t avdma;\r
+};\r
+\r
+/*\r
+ * Quick description of our CQ/QP locking scheme:\r
+ *\r
+ * We have one global lock that protects dev->cq/qp_table. Each\r
+ * struct mthca_cq/qp also has its own lock. An individual qp lock\r
+ * may be taken inside of an individual cq lock. Both cqs attached to\r
+ * a qp may be locked, with the send cq locked first. No other\r
+ * nesting should be done.\r
+ *\r
+ * Each struct mthca_cq/qp also has an atomic_t ref count. The\r
+ * pointer from the cq/qp_table to the struct counts as one reference.\r
+ * This reference also is good for access through the consumer API, so\r
+ * modifying the CQ/QP etc doesn't need to take another reference.\r
+ * Access because of a completion being polled does need a reference.\r
+ *\r
+ * Finally, each struct mthca_cq/qp has a wait_queue_head_t for the\r
+ * destroy function to sleep on.\r
+ *\r
+ * This means that access from the consumer API requires nothing but\r
+ * taking the struct's lock.\r
+ *\r
+ * Access because of a completion event should go as follows:\r
+ * - lock cq/qp_table and look up struct\r
+ * - increment ref count in struct\r
+ * - drop cq/qp_table lock\r
+ * - lock struct, do your thing, and unlock struct\r
+ * - decrement ref count; if zero, wake up waiters\r
+ *\r
+ * To destroy a CQ/QP, we can do the following:\r
+ * - lock cq/qp_table, remove pointer, unlock cq/qp_table lock\r
+ * - decrement ref count\r
+ * - wait_event until ref count is zero\r
+ *\r
+ * It is the consumer's responsibilty to make sure that no QP\r
+ * operations (WQE posting or state modification) are pending when the\r
+ * QP is destroyed. Also, the consumer must make sure that calls to\r
+ * qp_modify are serialized.\r
+ *\r
+ * Possible optimizations (wait for profile data to see if/where we\r
+ * have locks bouncing between CPUs):\r
+ * - split cq/qp table lock into n separate (cache-aligned) locks,\r
+ * indexed (say) by the page in the table\r
+ * - split QP struct lock into three (one for common info, one for the\r
+ * send queue and one for the receive queue)\r
+ */\r
+//TODO: check correctness of the above requirement: "It is the consumer's responsibilty to make sure that no QP\r
+// operations (WQE posting or state modification) are pending when the QP is destroyed"\r
+\r
+struct mthca_cq {\r
+ struct ib_cq ibcq;\r
+ void *cq_context; // leo: for IBAL shim\r
+ spinlock_t lock;\r
+ atomic_t refcount;\r
+ int cqn;\r
+ u32 cons_index;\r
+ int is_direct;\r
+ int is_kernel;\r
+\r
+ /* Next fields are Arbel only */\r
+ int set_ci_db_index;\r
+ __be32 *set_ci_db;\r
+ int arm_db_index;\r
+ __be32 *arm_db;\r
+ int arm_sn;\r
+ int u_arm_db_index;\r
+ int *p_u_arm_sn;\r
+\r
+ union mthca_buf queue;\r
+ struct mthca_mr mr;\r
+ wait_queue_head_t wait;\r
+ KMUTEX mutex;\r
+};\r
+\r
+struct mthca_srq {\r
+ struct ib_srq ibsrq;\r
+ spinlock_t lock;\r
+ atomic_t refcount;\r
+ int srqn;\r
+ int max;\r
+ int max_gs;\r
+ int wqe_shift;\r
+ int first_free;\r
+ int last_free;\r
+ u16 counter; /* Arbel only */\r
+ int db_index; /* Arbel only */\r
+ __be32 *db; /* Arbel only */\r
+ void *last;\r
+\r
+ int is_direct;\r
+ u64 *wrid;\r
+ union mthca_buf queue;\r
+ struct mthca_mr mr;\r
+\r
+ wait_queue_head_t wait;\r
+ KMUTEX mutex;\r
+ void *srq_context; \r
+};\r
+\r
+struct mthca_wq {\r
+ spinlock_t lock;\r
+ int max;\r
+ unsigned next_ind;\r
+ unsigned last_comp;\r
+ unsigned head;\r
+ unsigned tail;\r
+ void *last;\r
+ int max_gs;\r
+ int wqe_shift;\r
+\r
+ int db_index; /* Arbel only */\r
+ __be32 *db;\r
+};\r
+\r
+struct mthca_qp {\r
+ struct ib_qp ibqp;\r
+ void *qp_context; // leo: for IBAL shim\r
+ //TODO: added just because absense of ibv_query_qp\r
+ // thereafter it may be worth to be replaced by struct ib_qp_attr qp_attr;\r
+ struct ib_qp_init_attr qp_init_attr; // leo: for query_qp\r
+ atomic_t refcount;\r
+ u32 qpn;\r
+ int is_direct;\r
+ u8 transport;\r
+ u8 state;\r
+ u8 atomic_rd_en;\r
+ u8 resp_depth;\r
+\r
+ struct mthca_mr mr;\r
+\r
+ struct mthca_wq rq;\r
+ struct mthca_wq sq;\r
+ enum ib_sig_type sq_policy;\r
+ int send_wqe_offset;\r
+ int max_inline_data;\r
+\r
+ u64 *wrid;\r
+ union mthca_buf queue;\r
+\r
+ wait_queue_head_t wait;\r
+ KMUTEX mutex;\r
+};\r
+\r
+struct mthca_sqp {\r
+ struct mthca_qp qp;\r
+ int port;\r
+ int pkey_index;\r
+ u32 qkey;\r
+ u32 send_psn;\r
+ struct ib_ud_header ud_header;\r
+ struct scatterlist sg;\r
+};\r
+\r
+static inline struct mthca_ucontext *to_mucontext(struct ib_ucontext *ibucontext)\r
+{\r
+ return container_of(ibucontext, struct mthca_ucontext, ibucontext);\r
+}\r
+\r
+static inline struct mthca_fmr *to_mfmr(struct ib_fmr *ibfmr)\r
+{\r
+ return container_of(ibfmr, struct mthca_fmr, ibfmr);\r
+}\r
+\r
+static inline struct mthca_mr *to_mmr(struct ib_mr *ibmr)\r
+{\r
+ return container_of(ibmr, struct mthca_mr, ibmr);\r
+}\r
+\r
+static inline struct mthca_pd *to_mpd(struct ib_pd *ibpd)\r
+{\r
+ return container_of(ibpd, struct mthca_pd, ibpd);\r
+}\r
+\r
+static inline struct mthca_ah *to_mah(struct ib_ah *ibah)\r
+{\r
+ return container_of(ibah, struct mthca_ah, ibah);\r
+}\r
+\r
+static inline struct mthca_cq *to_mcq(struct ib_cq *ibcq)\r
+{\r
+ return container_of(ibcq, struct mthca_cq, ibcq);\r
+}\r
+\r
+static inline struct mthca_srq *to_msrq(struct ib_srq *ibsrq)\r
+{\r
+ return container_of(ibsrq, struct mthca_srq, ibsrq);\r
+}\r
+\r
+static inline struct mthca_qp *to_mqp(struct ib_qp *ibqp)\r
+{\r
+ return container_of(ibqp, struct mthca_qp, ibqp);\r
+}\r
+\r
+static inline struct mthca_sqp *to_msqp(struct mthca_qp *qp)\r
+{\r
+ return container_of(qp, struct mthca_sqp, qp);\r
+}\r
+\r
+static inline uint8_t start_port(struct ib_device *device)\r
+{\r
+ return device->node_type == IB_NODE_SWITCH ? 0 : 1;\r
+}\r
+\r
+static inline uint8_t end_port(struct ib_device *device)\r
+{\r
+ return device->node_type == IB_NODE_SWITCH ? 0 : device->phys_port_cnt;\r
+}\r
+\r
+static inline int ib_copy_from_umv_buf(void *dest, ci_umv_buf_t* const p_umv_buf, size_t len)\r
+{\r
+ RtlCopyMemory(dest, (void*)(ULONG_PTR)p_umv_buf->p_inout_buf, len);\r
+ return 0;\r
+}\r
+\r
+static inline int ib_copy_to_umv_buf(ci_umv_buf_t* const p_umv_buf, void *src, size_t len)\r
+{\r
+ if (p_umv_buf->output_size < len) {\r
+ p_umv_buf->status = IB_INSUFFICIENT_MEMORY;\r
+ p_umv_buf->output_size = 0;\r
+ return -EFAULT;\r
+ }\r
+ RtlCopyMemory((void*)(ULONG_PTR)p_umv_buf->p_inout_buf, src, len);\r
+ p_umv_buf->status = IB_SUCCESS;\r
+ p_umv_buf->output_size = (uint32_t)len;\r
+ return 0;\r
+}\r
+\r
+\r
+\r
+// API\r
+int mthca_query_device(struct ib_device *ibdev,\r
+ struct ib_device_attr *props);\r
+\r
+int mthca_query_port(struct ib_device *ibdev,\r
+ u8 port, struct ib_port_attr *props);\r
+\r
+int mthca_modify_port(struct ib_device *ibdev,\r
+ u8 port, int port_modify_mask,\r
+ struct ib_port_modify *props);\r
+\r
+struct ib_pd *mthca_alloc_pd(struct ib_device *ibdev,\r
+ struct ib_ucontext *context,\r
+ ci_umv_buf_t* const p_umv_buf);\r
+\r
+int mthca_dealloc_pd(struct ib_pd *pd);\r
+\r
+int mthca_dereg_mr(struct ib_mr *mr);\r
+\r
+int mthca_query_srq(struct ib_srq *ibsrq, ib_srq_attr_t *srq_attr);\r
+\r
+struct ib_ucontext *mthca_alloc_ucontext(struct ib_device *ibdev,\r
+ ci_umv_buf_t* const p_umv_buf);\r
+\r
+int mthca_dealloc_ucontext(struct ib_ucontext *context);\r
+\r
+struct ib_mr *mthca_get_dma_mr(struct ib_pd *pd, mthca_qp_access_t acc);\r
+\r
+int mthca_poll_cq_list(\r
+ IN struct ib_cq *ibcq, \r
+ IN OUT ib_wc_t** const pp_free_wclist,\r
+ OUT ib_wc_t** const pp_done_wclist );\r
+\r
+\r
+#endif /* MTHCA_PROVIDER_H */\r
\r
// allocate parameters\r
if( !p_umv_buf->p_inout_buf ) {\r
- p_umv_buf->p_inout_buf = cl_zalloc( size );\r
+ p_umv_buf->p_inout_buf = (ULONG_PTR)cl_zalloc( size );\r
if( !p_umv_buf->p_inout_buf )\r
{\r
status = IB_INSUFFICIENT_MEMORY;\r
{\r
if( !p_umv_buf->p_inout_buf )\r
{\r
- p_umv_buf->p_inout_buf = cl_zalloc( sizeof(struct ibv_get_context_resp) );\r
+ p_umv_buf->p_inout_buf =\r
+ (ULONG_PTR)cl_zalloc( sizeof(struct ibv_get_context_resp) );\r
if( !p_umv_buf->p_inout_buf )\r
{\r
status = IB_INSUFFICIENT_MEMORY;\r
*/\r
if ( p_ca_attr != NULL )\r
{\r
- p_umv_buf->p_inout_buf = cl_zalloc(byte_count);\r
+ p_umv_buf->p_inout_buf = (ULONG_PTR)cl_zalloc(byte_count);\r
if ( !p_umv_buf->p_inout_buf )\r
{\r
UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_SHIM ,\r
byte_count && !h_uvp_ca->p_hca_attr )\r
{\r
CL_ASSERT( byte_count >= p_ca_attr->size );\r
- h_uvp_ca->p_hca_attr = p_umv_buf->p_inout_buf;\r
+ h_uvp_ca->p_hca_attr = (ib_ca_attr_t*)(ULONG_PTR)p_umv_buf->p_inout_buf;\r
ib_copy_ca_attr( h_uvp_ca->p_hca_attr, p_ca_attr );\r
}\r
else if (p_umv_buf->p_inout_buf) \r
{\r
- cl_free (p_umv_buf->p_inout_buf);\r
+ cl_free( (void*)(ULONG_PTR)p_umv_buf->p_inout_buf );\r
}\r
\r
UVP_EXIT(UVP_DBG_SHIM);\r
\r
if( !p_umv_buf->p_inout_buf )\r
{\r
- p_umv_buf->p_inout_buf = cl_zalloc( size );\r
+ p_umv_buf->p_inout_buf = (ULONG_PTR)cl_zalloc( size );\r
if( !p_umv_buf->p_inout_buf )\r
{\r
status = IB_INSUFFICIENT_MEMORY;\r
goto end;\r
\r
err_alloc_cq:\r
- cl_free(p_umv_buf->p_inout_buf);\r
+ cl_free((void*)(ULONG_PTR)p_umv_buf->p_inout_buf);\r
err_memory:\r
end:\r
UVP_EXIT(UVP_DBG_CQ);\r
/*\r
* Copyright (c) 2005 SilverStorm Technologies. All rights reserved.\r
+ * Portions Copyright (c) 2008 Microsoft Corporation. All rights reserved.\r
*\r
* This software is available to you under the OpenIB.org BSD license\r
* below:\r
mlnx_ual_hobul_t *p_hobul;\r
} mlnx_ual_pd_info_t;\r
\r
-typedef struct _ib_mw\r
-{\r
-TO_LONG_PTR( ib_pd_handle_t , h_uvp_pd) ; \r
- uint32_t rkey;\r
-} mlnx_ual_mw_info_t;\r
-\r
\r
UVP_ENTER(UVP_DBG_SHIM);\r
CL_ASSERT(p_umv_buf);\r
- p_umv_buf->p_inout_buf = NULL;;\r
+ p_umv_buf->p_inout_buf = 0;;\r
p_umv_buf->input_size = 0;\r
p_umv_buf->output_size = 0;\r
p_umv_buf->command = TRUE;\r
UVP_ENTER(UVP_DBG_SHIM);\r
\r
CL_ASSERT(p_umv_buf);\r
- p_umv_buf->p_inout_buf = NULL;;\r
+ p_umv_buf->p_inout_buf = 0;\r
p_umv_buf->input_size = 0;\r
p_umv_buf->output_size = 0;\r
\r
\r
UVP_ENTER(UVP_DBG_SHIM);\r
CL_ASSERT(p_umv_buf);\r
- p_umv_buf->p_inout_buf = NULL;;\r
+ p_umv_buf->p_inout_buf = 0;\r
p_umv_buf->input_size = 0;\r
p_umv_buf->output_size = 0;\r
\r
{\r
UVP_ENTER(UVP_DBG_SHIM);\r
CL_ASSERT(p_umv_buf);\r
- p_umv_buf->p_inout_buf = NULL;;\r
+ p_umv_buf->p_inout_buf = 0;\r
p_umv_buf->input_size = 0;\r
p_umv_buf->output_size = 0;\r
\r
{\r
UVP_ENTER(UVP_DBG_SHIM);\r
CL_ASSERT(p_umv_buf);\r
- p_umv_buf->p_inout_buf = NULL;;\r
+ p_umv_buf->p_inout_buf = 0;\r
p_umv_buf->input_size = 0;\r
p_umv_buf->output_size = 0;\r
\r
{\r
UVP_ENTER(UVP_DBG_SHIM);\r
CL_ASSERT(p_umv_buf);\r
- p_umv_buf->p_inout_buf = NULL;;\r
+ p_umv_buf->p_inout_buf = 0;\r
p_umv_buf->input_size = 0;\r
p_umv_buf->output_size = 0;\r
\r
\r
if( !p_umv_buf->p_inout_buf )\r
{\r
- p_umv_buf->p_inout_buf = cl_zalloc( sizeof(struct ibv_alloc_pd_resp) );\r
+ p_umv_buf->p_inout_buf = (ULONG_PTR)cl_zalloc( sizeof(struct ibv_alloc_pd_resp) );\r
if( !p_umv_buf->p_inout_buf )\r
{\r
status = IB_INSUFFICIENT_MEMORY;\r
\r
if( !p_umv_buf->p_inout_buf )\r
{\r
- p_umv_buf->p_inout_buf = cl_zalloc( size );\r
+ p_umv_buf->p_inout_buf = (ULONG_PTR)cl_zalloc( size );\r
if( !p_umv_buf->p_inout_buf )\r
{\r
status = IB_INSUFFICIENT_MEMORY;\r
goto end;\r
\r
err_alloc_qp:\r
- cl_free(p_umv_buf->p_inout_buf);\r
+ cl_free((void*)(ULONG_PTR)p_umv_buf->p_inout_buf);\r
err_memory:\r
end:\r
UVP_EXIT(UVP_DBG_QP);\r
\r
if( !p_umv_buf->p_inout_buf )\r
{\r
- p_umv_buf->p_inout_buf = cl_zalloc( sizeof(struct ibv_modify_qp_resp) );\r
+ p_umv_buf->p_inout_buf =\r
+ (ULONG_PTR)cl_zalloc( sizeof(struct ibv_modify_qp_resp) );\r
if( !p_umv_buf->p_inout_buf )\r
{\r
status = IB_INSUFFICIENT_MEMORY;\r
\r
if( !p_umv_buf->p_inout_buf )\r
{\r
- p_umv_buf->p_inout_buf = cl_zalloc( size );\r
+ p_umv_buf->p_inout_buf = (ULONG_PTR)cl_zalloc( size );\r
if( !p_umv_buf->p_inout_buf )\r
{\r
status = IB_INSUFFICIENT_MEMORY;\r
err_lock:\r
cl_free(srq);\r
err_alloc_srq:\r
- cl_free(p_umv_buf->p_inout_buf);\r
+ cl_free((void*)(ULONG_PTR)p_umv_buf->p_inout_buf);\r
err_memory:\r
err_params:\r
end:\r
-/*
- * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
- * Copyright (c) 2005 Cisco Systems. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- * $Id$
- */
-
-#ifndef MTHCA_H
-#define MTHCA_H
-
-#include <cl_spinlock.h>
-#include <mlnx_uvp_verbs.h>
-#include <arch.h>
-#include "mlnx_uvp_debug.h"
-
-#define PFX "mthca: "
-
-enum mthca_hca_type {
- MTHCA_TAVOR,
- MTHCA_ARBEL,
- MTHCA_LIVEFISH
-};
-
-enum {
- MTHCA_CQ_ENTRY_SIZE = 0x20,
- MTHCA_BYTES_PER_ATOMIC_COMPL = 0x8
-};
-
-enum {
- MTHCA_QP_TABLE_BITS = 8,
- MTHCA_QP_TABLE_SIZE = 1 << MTHCA_QP_TABLE_BITS,
- MTHCA_QP_TABLE_MASK = MTHCA_QP_TABLE_SIZE - 1
-};
-
-enum {
- MTHCA_DB_REC_PAGE_SIZE = 4096,
- MTHCA_DB_REC_PER_PAGE = MTHCA_DB_REC_PAGE_SIZE / 8
-};
-
-enum mthca_db_type {
- MTHCA_DB_TYPE_INVALID = 0x0,
- MTHCA_DB_TYPE_CQ_SET_CI = 0x1,
- MTHCA_DB_TYPE_CQ_ARM = 0x2,
- MTHCA_DB_TYPE_SQ = 0x3,
- MTHCA_DB_TYPE_RQ = 0x4,
- MTHCA_DB_TYPE_SRQ = 0x5,
- MTHCA_DB_TYPE_GROUP_SEP = 0x7
-};
-
-enum mthca_wr_opcode {
- MTHCA_OPCODE_NOP = 0x00,
- MTHCA_OPCODE_RDMA_WRITE = 0x08,
- MTHCA_OPCODE_RDMA_WRITE_IMM = 0x09,
- MTHCA_OPCODE_SEND = 0x0a,
- MTHCA_OPCODE_SEND_IMM = 0x0b,
- MTHCA_OPCODE_RDMA_READ = 0x10,
- MTHCA_OPCODE_ATOMIC_CS = 0x11,
- MTHCA_OPCODE_ATOMIC_FA = 0x12,
- MTHCA_OPCODE_BIND_MW = 0x18,
- MTHCA_OPCODE_INVALID = 0xff
-};
-
-struct mthca_ah_page;
-
-struct mthca_db_table;
-
-struct mthca_context {
- struct ibv_context ibv_ctx;
- void *uar;
- cl_spinlock_t uar_lock;
- struct mthca_db_table *db_tab;
- struct ibv_pd *pd;
- struct {
- struct mthca_qp **table;
- int refcnt;
- } qp_table[MTHCA_QP_TABLE_SIZE];
- HANDLE qp_table_mutex;
- int num_qps;
- int qp_table_shift;
- int qp_table_mask;
- enum mthca_hca_type hca_type;
-};
-
-struct mthca_pd {
- struct ibv_pd ibv_pd;
- struct mthca_ah_page *ah_list;
- HANDLE ah_mutex;
- uint32_t pdn;
-};
-
-struct mthca_cq {
- struct ibv_cq ibv_cq;
- void *buf;
- cl_spinlock_t lock;
- struct ibv_mr mr;
- uint32_t cqn;
- uint32_t cons_index;
-
- /* Next fields are mem-free only */
- int set_ci_db_index;
- uint32_t *set_ci_db;
- int arm_db_index;
- uint32_t *arm_db;
- int u_arm_db_index;
- uint32_t *p_u_arm_sn;
-};
-
-struct mthca_srq {
- struct ibv_srq ibv_srq;
- void *buf;
- void *last;
- cl_spinlock_t lock;
- struct ibv_mr mr;
- uint64_t *wrid;
- uint32_t srqn;
- int max;
- int max_gs;
- int wqe_shift;
- int first_free;
- int last_free;
- int buf_size;
-
- /* Next fields are mem-free only */
- int db_index;
- uint32_t *db;
- uint16_t counter;
-};
-
-struct mthca_wq {
- cl_spinlock_t lock;
- int max;
- unsigned next_ind;
- unsigned last_comp;
- unsigned head;
- unsigned tail;
- void *last;
- int max_gs;
- int wqe_shift;
-
- /* Next fields are mem-free only */
- int db_index;
- uint32_t *db;
-};
-
-struct mthca_qp {
- struct ibv_qp ibv_qp;
- uint8_t *buf;
- uint64_t *wrid;
- int send_wqe_offset;
- int max_inline_data;
- int buf_size;
- struct mthca_wq sq;
- struct mthca_wq rq;
- struct ibv_mr mr;
- int sq_sig_all;
-};
-
-struct mthca_av {
- uint32_t port_pd;
- uint8_t reserved1;
- uint8_t g_slid;
- uint16_t dlid;
- uint8_t reserved2;
- uint8_t gid_index;
- uint8_t msg_sr;
- uint8_t hop_limit;
- uint32_t sl_tclass_flowlabel;
- uint32_t dgid[4];
-};
-
-struct mthca_ah {
- struct mthca_av *av;
- ib_av_attr_t av_attr;
-TO_LONG_PTR( ib_pd_handle_t , h_uvp_pd) ;
- struct mthca_ah_page *page;
- uint32_t key;
- int in_kernel;
-};
-
-#pragma warning( disable : 4200)
-struct mthca_ah_page {
- struct mthca_ah_page *prev, *next;
- void *buf;
- struct ibv_mr mr;
- int use_cnt;
- unsigned free[0];
-};
-#pragma warning( default : 4200)
-
-
-static inline uintptr_t db_align(uint32_t *db)
-{
- return (uintptr_t) db & ~((uintptr_t) MTHCA_DB_REC_PAGE_SIZE - 1);
-}
-
-#define to_mxxx(xxx, type) \
- ((struct mthca_##type *) \
- ((uint8_t *) ib##xxx - offsetof(struct mthca_##type, ibv_##xxx)))
-
-static inline struct mthca_context *to_mctx(struct ibv_context *ibctx)
-{
- return to_mxxx(ctx, context);
-}
-
-static inline struct mthca_pd *to_mpd(struct ibv_pd *ibpd)
-{
- return to_mxxx(pd, pd);
-}
-
-static inline struct mthca_cq *to_mcq(struct ibv_cq *ibcq)
-{
- return to_mxxx(cq, cq);
-}
-
-static inline struct mthca_srq *to_msrq(struct ibv_srq *ibsrq)
-{
- return to_mxxx(srq, srq);
-}
-
-static inline struct mthca_qp *to_mqp(struct ibv_qp *ibqp)
-{
- return to_mxxx(qp, qp);
-}
-
-static inline int mthca_is_memfree(struct ibv_context *ibctx)
-{
- return to_mctx(ibctx)->hca_type == MTHCA_ARBEL;
-}
-
-int mthca_alloc_db(struct mthca_db_table *db_tab, enum mthca_db_type type,
- uint32_t **db);
-void mthca_set_db_qn(uint32_t *db, enum mthca_db_type type, uint32_t qn);
-void mthca_free_db(struct mthca_db_table *db_tab, enum mthca_db_type type, int db_index);
-struct mthca_db_table *mthca_alloc_db_tab(int uarc_size);
-void mthca_free_db_tab(struct mthca_db_table *db_tab);
-
-int mthca_query_device(struct ibv_context *context,
- struct ibv_device_attr *attr);
-int mthca_query_port(struct ibv_context *context, uint8_t port,
- struct ibv_port_attr *attr);
-
- struct ibv_pd *mthca_alloc_pd(struct ibv_context *context,
- struct ibv_alloc_pd_resp *resp_p);
-
-int mthca_free_pd(struct ibv_pd *pd);
-
-struct ibv_cq *mthca_create_cq_pre(struct ibv_context *context, int *cqe,
- struct ibv_create_cq *req);
-struct ibv_cq *mthca_create_cq_post(struct ibv_context *context,
- struct ibv_create_cq_resp *resp);
-int mthca_destroy_cq(struct ibv_cq *cq);
-int mthca_poll_cq(struct ibv_cq *cq, int ne, struct _ib_wc *wc);
-int mthca_poll_cq_list(struct ibv_cq *ibcq,
- struct _ib_wc** const pp_free_wclist,
- struct _ib_wc** const pp_done_wclist );
-int mthca_tavor_arm_cq(struct ibv_cq *cq, int solicited);
-int mthca_arbel_arm_cq(struct ibv_cq *cq, int solicited);
-void mthca_cq_clean(struct mthca_cq *cq, uint32_t qpn,
- struct mthca_srq *srq);
-void mthca_init_cq_buf(struct mthca_cq *cq, int nent);
-
-struct ibv_srq *mthca_create_srq(struct ibv_pd *pd,
- struct ibv_srq_init_attr *attr);
-int mthca_modify_srq(struct ibv_srq *srq,
- struct ibv_srq_attr *attr,
- enum ibv_srq_attr_mask mask);
-int mthca_destroy_srq(struct ibv_srq *srq);
-int mthca_alloc_srq_buf(struct ibv_pd *pd, struct ibv_srq_attr *attr,
- struct mthca_srq *srq);
-void mthca_free_srq_wqe(struct mthca_srq *srq, int ind);
-int mthca_tavor_post_srq_recv(struct ibv_srq *ibsrq,
- struct _ib_recv_wr *wr,
- struct _ib_recv_wr **bad_wr);
-int mthca_arbel_post_srq_recv(struct ibv_srq *ibsrq,
- struct _ib_recv_wr *wr,
- struct _ib_recv_wr **bad_wr);
-struct ibv_qp *mthca_create_qp_pre(struct ibv_pd *pd,
- struct ibv_qp_init_attr *attr, struct ibv_create_qp *req);
-struct ibv_qp *mthca_create_qp_post(struct ibv_pd *pd,
- struct ibv_create_qp_resp *resp);
-int mthca_modify_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr,
- enum ibv_qp_attr_mask attr_mask);
-void mthca_destroy_qp_pre(struct ibv_qp *qp);
-void mthca_destroy_qp_post(struct ibv_qp *qp, int ret);
-void mthca_init_qp_indices(struct mthca_qp *qp);
-int mthca_tavor_post_send(struct ibv_qp *ibqp, struct _ib_send_wr *wr,
- struct _ib_send_wr **bad_wr);
-int mthca_tavor_post_recv(struct ibv_qp *ibqp, struct _ib_recv_wr *wr,
- struct _ib_recv_wr **bad_wr);
-int mthca_arbel_post_send(struct ibv_qp *ibqp, struct _ib_send_wr *wr,
- struct _ib_send_wr **bad_wr);
-int mthca_arbel_post_recv(struct ibv_qp *ibqp, struct _ib_recv_wr *wr,
- struct _ib_recv_wr **bad_wr);
-int mthca_alloc_qp_buf(struct ibv_pd *pd, struct ibv_qp_cap *cap,
- ib_qp_type_t type, struct mthca_qp *qp);
-struct mthca_qp *mthca_find_qp(struct mthca_context *ctx, uint32_t qpn);
-int mthca_store_qp(struct mthca_context *ctx, uint32_t qpn, struct mthca_qp *qp);
-void mthca_clear_qp(struct mthca_context *ctx, uint32_t qpn);
-int mthca_free_err_wqe(struct mthca_qp *qp, int is_send,
- int index, int *dbd, uint32_t *new_wqe);
-int mthca_alloc_av(struct mthca_pd *pd, struct ibv_ah_attr *attr,
- struct mthca_ah *ah, struct ibv_create_ah_resp *resp);
-void mthca_free_av(struct mthca_ah *ah);
-int mthca_attach_mcast(struct ibv_qp *qp, union ibv_gid *gid, uint16_t lid);
-int mthca_detach_mcast(struct ibv_qp *qp, union ibv_gid *gid, uint16_t lid);
-struct ibv_context *mthca_alloc_context(struct ibv_get_context_resp *resp_p);
-void mthca_free_context(struct ibv_context *ibctx);
-
-#endif /* MTHCA_H */
+/*\r
+ * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.\r
+ * Copyright (c) 2005 Cisco Systems. All rights reserved.\r
+ * Portions Copyright (c) 2008 Microsoft Corporation. All rights reserved.\r
+ *\r
+ * This software is available to you under a choice of one of two\r
+ * licenses. You may choose to be licensed under the terms of the GNU\r
+ * General Public License (GPL) Version 2, available from the file\r
+ * COPYING in the main directory of this source tree, or the\r
+ * OpenIB.org BSD license below:\r
+ *\r
+ * Redistribution and use in source and binary forms, with or\r
+ * without modification, are permitted provided that the following\r
+ * conditions are met:\r
+ *\r
+ * - Redistributions of source code must retain the above\r
+ * copyright notice, this list of conditions and the following\r
+ * disclaimer.\r
+ *\r
+ * - Redistributions in binary form must reproduce the above\r
+ * copyright notice, this list of conditions and the following\r
+ * disclaimer in the documentation and/or other materials\r
+ * provided with the distribution.\r
+ *\r
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
+ * SOFTWARE.\r
+ *\r
+ * $Id$\r
+ */\r
+\r
+#ifndef MTHCA_H\r
+#define MTHCA_H\r
+\r
+#include <cl_spinlock.h>\r
+#include <mlnx_uvp_verbs.h>\r
+#include <arch.h>\r
+#include "mlnx_uvp_debug.h"\r
+\r
+#define PFX "mthca: "\r
+\r
+enum mthca_hca_type {\r
+ MTHCA_TAVOR,\r
+ MTHCA_ARBEL,\r
+ MTHCA_LIVEFISH\r
+};\r
+\r
+enum {\r
+ MTHCA_CQ_ENTRY_SIZE = 0x20,\r
+ MTHCA_BYTES_PER_ATOMIC_COMPL = 0x8\r
+};\r
+\r
+enum {\r
+ MTHCA_QP_TABLE_BITS = 8,\r
+ MTHCA_QP_TABLE_SIZE = 1 << MTHCA_QP_TABLE_BITS,\r
+ MTHCA_QP_TABLE_MASK = MTHCA_QP_TABLE_SIZE - 1\r
+};\r
+\r
+enum {\r
+ MTHCA_DB_REC_PAGE_SIZE = 4096,\r
+ MTHCA_DB_REC_PER_PAGE = MTHCA_DB_REC_PAGE_SIZE / 8\r
+};\r
+\r
+enum mthca_db_type {\r
+ MTHCA_DB_TYPE_INVALID = 0x0,\r
+ MTHCA_DB_TYPE_CQ_SET_CI = 0x1,\r
+ MTHCA_DB_TYPE_CQ_ARM = 0x2,\r
+ MTHCA_DB_TYPE_SQ = 0x3,\r
+ MTHCA_DB_TYPE_RQ = 0x4,\r
+ MTHCA_DB_TYPE_SRQ = 0x5,\r
+ MTHCA_DB_TYPE_GROUP_SEP = 0x7\r
+};\r
+\r
+enum mthca_wr_opcode {\r
+ MTHCA_OPCODE_NOP = 0x00,\r
+ MTHCA_OPCODE_RDMA_WRITE = 0x08,\r
+ MTHCA_OPCODE_RDMA_WRITE_IMM = 0x09,\r
+ MTHCA_OPCODE_SEND = 0x0a,\r
+ MTHCA_OPCODE_SEND_IMM = 0x0b,\r
+ MTHCA_OPCODE_RDMA_READ = 0x10,\r
+ MTHCA_OPCODE_ATOMIC_CS = 0x11,\r
+ MTHCA_OPCODE_ATOMIC_FA = 0x12,\r
+ MTHCA_OPCODE_BIND_MW = 0x18,\r
+ MTHCA_OPCODE_INVALID = 0xff\r
+};\r
+\r
+struct mthca_ah_page;\r
+\r
+struct mthca_db_table;\r
+\r
+struct mthca_context {\r
+ struct ibv_context ibv_ctx;\r
+ void *uar;\r
+ cl_spinlock_t uar_lock;\r
+ struct mthca_db_table *db_tab;\r
+ struct ibv_pd *pd;\r
+ struct {\r
+ struct mthca_qp **table;\r
+ int refcnt;\r
+ } qp_table[MTHCA_QP_TABLE_SIZE];\r
+ HANDLE qp_table_mutex;\r
+ int num_qps;\r
+ int qp_table_shift;\r
+ int qp_table_mask;\r
+ enum mthca_hca_type hca_type;\r
+};\r
+\r
+struct mthca_pd {\r
+ struct ibv_pd ibv_pd;\r
+ struct mthca_ah_page *ah_list;\r
+ HANDLE ah_mutex;\r
+ uint32_t pdn;\r
+};\r
+\r
+struct mthca_cq {\r
+ struct ibv_cq ibv_cq;\r
+ void *buf;\r
+ cl_spinlock_t lock;\r
+ struct ibv_mr mr;\r
+ uint32_t cqn;\r
+ uint32_t cons_index;\r
+\r
+ /* Next fields are mem-free only */\r
+ int set_ci_db_index;\r
+ uint32_t *set_ci_db;\r
+ int arm_db_index;\r
+ uint32_t *arm_db;\r
+ int u_arm_db_index;\r
+ uint32_t *p_u_arm_sn;\r
+};\r
+\r
+struct mthca_srq {\r
+ struct ibv_srq ibv_srq;\r
+ void *buf;\r
+ void *last;\r
+ cl_spinlock_t lock;\r
+ struct ibv_mr mr;\r
+ uint64_t *wrid;\r
+ uint32_t srqn;\r
+ int max;\r
+ int max_gs;\r
+ int wqe_shift;\r
+ int first_free;\r
+ int last_free;\r
+ int buf_size;\r
+\r
+ /* Next fields are mem-free only */\r
+ int db_index;\r
+ uint32_t *db;\r
+ uint16_t counter;\r
+};\r
+\r
+struct mthca_wq {\r
+ cl_spinlock_t lock;\r
+ int max;\r
+ unsigned next_ind;\r
+ unsigned last_comp;\r
+ unsigned head;\r
+ unsigned tail;\r
+ void *last;\r
+ int max_gs;\r
+ int wqe_shift;\r
+\r
+ /* Next fields are mem-free only */\r
+ int db_index;\r
+ uint32_t *db;\r
+};\r
+\r
+struct mthca_qp {\r
+ struct ibv_qp ibv_qp;\r
+ uint8_t *buf;\r
+ uint64_t *wrid;\r
+ int send_wqe_offset;\r
+ int max_inline_data;\r
+ int buf_size;\r
+ struct mthca_wq sq;\r
+ struct mthca_wq rq;\r
+ struct ibv_mr mr;\r
+ int sq_sig_all;\r
+};\r
+\r
+struct mthca_av {\r
+ uint32_t port_pd;\r
+ uint8_t reserved1;\r
+ uint8_t g_slid;\r
+ uint16_t dlid;\r
+ uint8_t reserved2;\r
+ uint8_t gid_index;\r
+ uint8_t msg_sr;\r
+ uint8_t hop_limit;\r
+ uint32_t sl_tclass_flowlabel;\r
+ uint32_t dgid[4];\r
+};\r
+\r
+struct mthca_ah {\r
+ struct mthca_av *av;\r
+ ib_av_attr_t av_attr;\r
+ ib_pd_handle_t h_uvp_pd;\r
+ struct mthca_ah_page *page;\r
+ uint32_t key;\r
+ int in_kernel;\r
+};\r
+\r
+#pragma warning( disable : 4200)\r
+struct mthca_ah_page {\r
+ struct mthca_ah_page *prev, *next;\r
+ void *buf;\r
+ struct ibv_mr mr;\r
+ int use_cnt;\r
+ unsigned free[0];\r
+};\r
+#pragma warning( default : 4200)\r
+\r
+\r
+static inline uintptr_t db_align(uint32_t *db)\r
+{\r
+ return (uintptr_t) db & ~((uintptr_t) MTHCA_DB_REC_PAGE_SIZE - 1);\r
+}\r
+\r
+#define to_mxxx(xxx, type) \\r
+ ((struct mthca_##type *) \\r
+ ((uint8_t *) ib##xxx - offsetof(struct mthca_##type, ibv_##xxx)))\r
+\r
+static inline struct mthca_context *to_mctx(struct ibv_context *ibctx)\r
+{\r
+ return to_mxxx(ctx, context);\r
+}\r
+\r
+static inline struct mthca_pd *to_mpd(struct ibv_pd *ibpd)\r
+{\r
+ return to_mxxx(pd, pd);\r
+}\r
+\r
+static inline struct mthca_cq *to_mcq(struct ibv_cq *ibcq)\r
+{\r
+ return to_mxxx(cq, cq);\r
+}\r
+\r
+static inline struct mthca_srq *to_msrq(struct ibv_srq *ibsrq)\r
+{\r
+ return to_mxxx(srq, srq);\r
+}\r
+\r
+static inline struct mthca_qp *to_mqp(struct ibv_qp *ibqp)\r
+{\r
+ return to_mxxx(qp, qp);\r
+}\r
+\r
+static inline int mthca_is_memfree(struct ibv_context *ibctx)\r
+{\r
+ return to_mctx(ibctx)->hca_type == MTHCA_ARBEL;\r
+}\r
+\r
+int mthca_alloc_db(struct mthca_db_table *db_tab, enum mthca_db_type type,\r
+ uint32_t **db);\r
+void mthca_set_db_qn(uint32_t *db, enum mthca_db_type type, uint32_t qn);\r
+void mthca_free_db(struct mthca_db_table *db_tab, enum mthca_db_type type, int db_index);\r
+struct mthca_db_table *mthca_alloc_db_tab(int uarc_size);\r
+void mthca_free_db_tab(struct mthca_db_table *db_tab);\r
+\r
+int mthca_query_device(struct ibv_context *context,\r
+ struct ibv_device_attr *attr);\r
+int mthca_query_port(struct ibv_context *context, uint8_t port,\r
+ struct ibv_port_attr *attr);\r
+\r
+ struct ibv_pd *mthca_alloc_pd(struct ibv_context *context, \r
+ struct ibv_alloc_pd_resp *resp_p);\r
+\r
+int mthca_free_pd(struct ibv_pd *pd);\r
+\r
+struct ibv_cq *mthca_create_cq_pre(struct ibv_context *context, int *cqe,\r
+ struct ibv_create_cq *req);\r
+struct ibv_cq *mthca_create_cq_post(struct ibv_context *context, \r
+ struct ibv_create_cq_resp *resp);\r
+int mthca_destroy_cq(struct ibv_cq *cq);\r
+int mthca_poll_cq(struct ibv_cq *cq, int ne, struct _ib_wc *wc);\r
+int mthca_poll_cq_list(struct ibv_cq *ibcq, \r
+ struct _ib_wc** const pp_free_wclist,\r
+ struct _ib_wc** const pp_done_wclist );\r
+int mthca_tavor_arm_cq(struct ibv_cq *cq, int solicited);\r
+int mthca_arbel_arm_cq(struct ibv_cq *cq, int solicited);\r
+void mthca_cq_clean(struct mthca_cq *cq, uint32_t qpn,\r
+ struct mthca_srq *srq);\r
+void mthca_init_cq_buf(struct mthca_cq *cq, int nent);\r
+\r
+struct ibv_srq *mthca_create_srq(struct ibv_pd *pd,\r
+ struct ibv_srq_init_attr *attr);\r
+int mthca_modify_srq(struct ibv_srq *srq,\r
+ struct ibv_srq_attr *attr,\r
+ enum ibv_srq_attr_mask mask);\r
+int mthca_destroy_srq(struct ibv_srq *srq);\r
+int mthca_alloc_srq_buf(struct ibv_pd *pd, struct ibv_srq_attr *attr,\r
+ struct mthca_srq *srq);\r
+void mthca_free_srq_wqe(struct mthca_srq *srq, int ind);\r
+int mthca_tavor_post_srq_recv(struct ibv_srq *ibsrq,\r
+ struct _ib_recv_wr *wr,\r
+ struct _ib_recv_wr **bad_wr);\r
+int mthca_arbel_post_srq_recv(struct ibv_srq *ibsrq,\r
+ struct _ib_recv_wr *wr,\r
+ struct _ib_recv_wr **bad_wr);\r
+struct ibv_qp *mthca_create_qp_pre(struct ibv_pd *pd, \r
+ struct ibv_qp_init_attr *attr, struct ibv_create_qp *req);\r
+struct ibv_qp *mthca_create_qp_post(struct ibv_pd *pd, \r
+ struct ibv_create_qp_resp *resp);\r
+int mthca_modify_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr,\r
+ enum ibv_qp_attr_mask attr_mask);\r
+void mthca_destroy_qp_pre(struct ibv_qp *qp);\r
+void mthca_destroy_qp_post(struct ibv_qp *qp, int ret);\r
+void mthca_init_qp_indices(struct mthca_qp *qp);\r
+int mthca_tavor_post_send(struct ibv_qp *ibqp, struct _ib_send_wr *wr,\r
+ struct _ib_send_wr **bad_wr);\r
+int mthca_tavor_post_recv(struct ibv_qp *ibqp, struct _ib_recv_wr *wr,\r
+ struct _ib_recv_wr **bad_wr);\r
+int mthca_arbel_post_send(struct ibv_qp *ibqp, struct _ib_send_wr *wr,\r
+ struct _ib_send_wr **bad_wr);\r
+int mthca_arbel_post_recv(struct ibv_qp *ibqp, struct _ib_recv_wr *wr,\r
+ struct _ib_recv_wr **bad_wr);\r
+int mthca_alloc_qp_buf(struct ibv_pd *pd, struct ibv_qp_cap *cap,\r
+ ib_qp_type_t type, struct mthca_qp *qp);\r
+struct mthca_qp *mthca_find_qp(struct mthca_context *ctx, uint32_t qpn);\r
+int mthca_store_qp(struct mthca_context *ctx, uint32_t qpn, struct mthca_qp *qp);\r
+void mthca_clear_qp(struct mthca_context *ctx, uint32_t qpn);\r
+int mthca_free_err_wqe(struct mthca_qp *qp, int is_send,\r
+ int index, int *dbd, uint32_t *new_wqe);\r
+int mthca_alloc_av(struct mthca_pd *pd, struct ibv_ah_attr *attr,\r
+ struct mthca_ah *ah, struct ibv_create_ah_resp *resp);\r
+void mthca_free_av(struct mthca_ah *ah);\r
+int mthca_attach_mcast(struct ibv_qp *qp, union ibv_gid *gid, uint16_t lid);\r
+int mthca_detach_mcast(struct ibv_qp *qp, union ibv_gid *gid, uint16_t lid);\r
+struct ibv_context *mthca_alloc_context(struct ibv_get_context_resp *resp_p);\r
+void mthca_free_context(struct ibv_context *ibctx);\r
+\r
+#endif /* MTHCA_H */\r
uint32_t status;\r
uint32_t input_size;\r
uint32_t output_size;\r
- TO_LONG_PTR(void* , p_inout_buf) ;\r
+ uint64_t p_inout_buf;\r
} ci_umv_buf_t;\r
/******/\r
\r