[IBAL, HW, IPOIB] Remove VOID_PTR64.
authorleonidk <leonidk@ad392aa1-c5ef-ae45-8dd8-e69d62a5ef86>
Sun, 13 Jul 2008 10:28:11 +0000 (10:28 +0000)
committerleonidk <leonidk@ad392aa1-c5ef-ae45-8dd8-e69d62a5ef86>
Sun, 13 Jul 2008 10:28:11 +0000 (10:28 +0000)
Also fixed GPL license and missing copyright issues.
Signed-off-by: Fab Tillier <ftillier@microsoft.com>
git-svn-id: svn://openib.tc.cornell.edu/gen1/trunk@1380 ad392aa1-c5ef-ae45-8dd8-e69d62a5ef86

24 files changed:
core/al/kernel/al_proxy.c
core/al/kernel/al_proxy_ndi.c
core/al/kernel/al_proxy_subnet.c
core/al/kernel/al_proxy_verbs.c
core/al/user/ual_cm_cep.c
core/al/user/ual_mcast.c
core/al/user/ual_mgr.c
core/al/user/ual_mw.c
core/al/user/ual_qp.c
hw/mlx4/kernel/hca/ca.c
hw/mlx4/user/hca/verbs.c
hw/mthca/kernel/hca_data.h
hw/mthca/kernel/hca_mcast.c
hw/mthca/kernel/hca_memory.c
hw/mthca/kernel/hca_verbs.c
hw/mthca/kernel/mthca_mad.c
hw/mthca/user/mlnx_ual_av.c
hw/mthca/user/mlnx_ual_ca.c
hw/mthca/user/mlnx_ual_cq.c
hw/mthca/user/mlnx_ual_pd.c
hw/mthca/user/mlnx_ual_qp.c
hw/mthca/user/mlnx_ual_srq.c
inc/iba/ib_types.h
ulp/ipoib/kernel/ipoib_driver.c

index bb15047..12809bb 100644 (file)
@@ -776,7 +776,7 @@ __proxy_pnp_cb(
                break;\r
        }\r
 \r
-       p_evt_rec->pnp.h_pnp = (ib_pnp_handle_t VOID_PTR64)HDL_TO_PTR(p_pnp_rec->h_pnp->obj.hdl);\r
+       p_evt_rec->pnp.h_pnp = (ib_pnp_handle_t)HDL_TO_PTR(p_pnp_rec->h_pnp->obj.hdl);\r
        p_pnp_rec->h_pnp->obj.hdl_valid = TRUE;\r
 \r
        hdl =\r
index 4927638..4a94740 100644 (file)
@@ -387,7 +387,7 @@ __ndi_rep_cm(
        }\r
 \r
        /* Get and validate QP handle */\r
-       h_qp = (ib_qp_handle_t VOID_PTR64)al_hdl_ref( p_context->h_al, p_rep->h_qp, AL_OBJ_TYPE_H_QP );\r
+       h_qp = (ib_qp_handle_t)al_hdl_ref( p_context->h_al, p_rep->h_qp, AL_OBJ_TYPE_H_QP );\r
        if( !h_qp )\r
        {\r
                cl_status = CL_INVALID_HANDLE;\r
index 3935553..3c05a35 100644 (file)
@@ -697,7 +697,7 @@ __proxy_mad_recv_cb(
        cb_info.ioctl_rec.mad_recv_cb_ioctl_rec.mad_svc_context = mad_svc_context;\r
        cb_info.ioctl_rec.mad_recv_cb_ioctl_rec.elem_size = p_mad_element->size;\r
        cb_info.ioctl_rec.mad_recv_cb_ioctl_rec.p_send_mad =\r
-               (ib_mad_element_t* VOID_PTR64)p_mad_element->send_context1;\r
+               (ib_mad_element_t*)p_mad_element->send_context1;\r
 \r
        /*\r
         * If we're already closing the device - do not queue a callback, since\r
index 57df4d2..58e0f7d 100644 (file)
@@ -347,7 +347,7 @@ proxy_ca_err_cb(
        cb_info.rec_type = CA_ERROR_REC;\r
        /* Return the Proxy's open_ca handle and the user's context */\r
        cb_info.ioctl_rec.event_rec = *p_err_rec;\r
-       cb_info.ioctl_rec.event_rec.handle.h_ca = (ib_ca_handle_t VOID_PTR64)HDL_TO_PTR(h_ca->obj.hdl);\r
+       cb_info.ioctl_rec.event_rec.handle.h_ca = (ib_ca_handle_t)HDL_TO_PTR(h_ca->obj.hdl);\r
 \r
        /* The proxy handle must be valid now. */\r
        if( !h_ca->obj.hdl_valid )\r
@@ -982,7 +982,7 @@ proxy_srq_err_cb(
        cb_info.rec_type = SRQ_ERROR_REC;\r
        /* Return the Proxy's SRQ handle and the user's context */\r
        cb_info.ioctl_rec.event_rec = *p_err_rec;\r
-       cb_info.ioctl_rec.event_rec.handle.h_srq = (ib_srq_handle_t VOID_PTR64) HDL_TO_PTR(h_srq->obj.hdl);\r
+       cb_info.ioctl_rec.event_rec.handle.h_srq = (ib_srq_handle_t) HDL_TO_PTR(h_srq->obj.hdl);\r
 \r
        /* The proxy handle must be valid now. */\r
        if( !h_srq->obj.hdl_valid )\r
@@ -1286,7 +1286,7 @@ proxy_qp_err_cb(
        cb_info.rec_type = QP_ERROR_REC;\r
        /* Return the Proxy's QP handle and the user's context */\r
        cb_info.ioctl_rec.event_rec = *p_err_rec;\r
-       cb_info.ioctl_rec.event_rec.handle.h_qp = (ib_qp_handle_t VOID_PTR64)HDL_TO_PTR(h_qp->obj.hdl);\r
+       cb_info.ioctl_rec.event_rec.handle.h_qp = (ib_qp_handle_t)HDL_TO_PTR(h_qp->obj.hdl);\r
 \r
        /* The proxy handle must be valid now. */\r
        if( !h_qp->obj.hdl_valid )\r
@@ -1337,14 +1337,14 @@ proxy_create_qp(
        }\r
 \r
        /* Validate handles. */\r
-       h_pd = (ib_pd_handle_t VOID_PTR64)\r
+       h_pd = (ib_pd_handle_t)\r
                al_hdl_ref( p_context->h_al, p_ioctl->in.h_pd, AL_OBJ_TYPE_H_PD );\r
-       h_sq_cq = (ib_cq_handle_t VOID_PTR64)al_hdl_ref( p_context->h_al,\r
+       h_sq_cq = (ib_cq_handle_t)al_hdl_ref( p_context->h_al,\r
                (uint64_t)p_ioctl->in.qp_create.h_sq_cq, AL_OBJ_TYPE_H_CQ );\r
-       h_rq_cq = (ib_cq_handle_t VOID_PTR64)al_hdl_ref( p_context->h_al,\r
+       h_rq_cq = (ib_cq_handle_t)al_hdl_ref( p_context->h_al,\r
                (uint64_t)p_ioctl->in.qp_create.h_rq_cq, AL_OBJ_TYPE_H_CQ );\r
        if (p_ioctl->in.qp_create.h_srq) {\r
-               h_srq = (ib_srq_handle_t VOID_PTR64)al_hdl_ref( p_context->h_al,\r
+               h_srq = (ib_srq_handle_t)al_hdl_ref( p_context->h_al,\r
                        (uint64_t)p_ioctl->in.qp_create.h_srq, AL_OBJ_TYPE_H_SRQ );\r
                if( !h_srq)\r
                {\r
@@ -1486,25 +1486,25 @@ proxy_query_qp(
                if( p_ioctl->out.attr.h_pd )\r
                {\r
                        p_ioctl->out.attr.h_pd =\r
-                               (ib_pd_handle_t VOID_PTR64)HDL_TO_PTR(p_ioctl->out.attr.h_pd->obj.hdl);\r
+                               (ib_pd_handle_t)HDL_TO_PTR(p_ioctl->out.attr.h_pd->obj.hdl);\r
                }\r
 \r
                if( p_ioctl->out.attr.h_sq_cq )\r
                {\r
                        p_ioctl->out.attr.h_sq_cq =\r
-                               (ib_cq_handle_t VOID_PTR64)HDL_TO_PTR(p_ioctl->out.attr.h_sq_cq->obj.hdl);\r
+                               (ib_cq_handle_t)HDL_TO_PTR(p_ioctl->out.attr.h_sq_cq->obj.hdl);\r
                }\r
 \r
                if( p_ioctl->out.attr.h_rq_cq )\r
                {\r
                        p_ioctl->out.attr.h_rq_cq =\r
-                               (ib_cq_handle_t VOID_PTR64)HDL_TO_PTR(p_ioctl->out.attr.h_rq_cq->obj.hdl);\r
+                               (ib_cq_handle_t)HDL_TO_PTR(p_ioctl->out.attr.h_rq_cq->obj.hdl);\r
                }\r
 \r
                if( p_ioctl->out.attr.h_srq )\r
                {\r
                        p_ioctl->out.attr.h_srq =\r
-                               (ib_srq_handle_t VOID_PTR64)HDL_TO_PTR(p_ioctl->out.attr.h_srq->obj.hdl);\r
+                               (ib_srq_handle_t)HDL_TO_PTR(p_ioctl->out.attr.h_srq->obj.hdl);\r
                }\r
        }\r
        else\r
@@ -2045,7 +2045,7 @@ proxy_cq_err_cb(
        cb_info.rec_type = CQ_ERROR_REC;\r
        /* Return the Proxy's cq handle and the user's context */\r
        cb_info.ioctl_rec.event_rec = *p_err_rec;\r
-       cb_info.ioctl_rec.event_rec.handle.h_cq = (ib_cq_handle_t VOID_PTR64)HDL_TO_PTR(h_cq->obj.hdl);\r
+       cb_info.ioctl_rec.event_rec.handle.h_cq = (ib_cq_handle_t)HDL_TO_PTR(h_cq->obj.hdl);\r
 \r
        /* The proxy handle must be valid now. */\r
        if( !h_cq->obj.hdl_valid )\r
@@ -2358,7 +2358,7 @@ proxy_post_send(
                if( h_qp->type == IB_QPT_UNRELIABLE_DGRM )\r
                {\r
                        /* Validate the AV handle for UD */\r
-                       h_av = (ib_av_handle_t VOID_PTR64)al_hdl_ref( p_context->h_al,\r
+                       h_av = (ib_av_handle_t)al_hdl_ref( p_context->h_al,\r
                                (uint64_t)p_wr[i].dgrm.ud.h_av, AL_OBJ_TYPE_H_AV );\r
                        if( !h_av )\r
                        {\r
@@ -3041,7 +3041,7 @@ proxy_query_mr(
        {\r
                /* Replace the pd handle with proxy's handle */\r
                p_ioctl->out.attr.h_pd =\r
-                       (ib_pd_handle_t VOID_PTR64)HDL_TO_PTR(p_ioctl->out.attr.h_pd->obj.hdl);\r
+                       (ib_pd_handle_t)HDL_TO_PTR(p_ioctl->out.attr.h_pd->obj.hdl);\r
        }\r
        else\r
        {\r
@@ -3492,7 +3492,7 @@ proxy_bind_mw(
        }\r
 \r
        /* Validate MR handle */\r
-       h_mr = (ib_mr_handle_t VOID_PTR64)al_hdl_ref( p_context->h_al,\r
+       h_mr = (ib_mr_handle_t)al_hdl_ref( p_context->h_al,\r
                (uint64_t)p_ioctl->in.mw_bind.h_mr, AL_OBJ_TYPE_H_MR );\r
        if( !h_mr )\r
        {\r
index e404fff..07b14fe 100644 (file)
@@ -522,7 +522,7 @@ al_cep_pre_req(
     cl_memclr(&ioctl, sizeof(ioctl));\r
        ioctl.in.cid = cid;\r
        ioctl.in.cm_req = *p_cm_req;\r
-       ioctl.in.cm_req.h_qp = (ib_qp_handle_t VOID_PTR64) HDL_TO_PTR(p_cm_req->h_qp->obj.hdl);\r
+       ioctl.in.cm_req.h_qp = (ib_qp_handle_t) HDL_TO_PTR(p_cm_req->h_qp->obj.hdl);\r
        ioctl.in.paths[0] = *(p_cm_req->p_primary_path);\r
        if( p_cm_req->p_alt_path )\r
                ioctl.in.paths[1] = *(p_cm_req->p_alt_path);\r
@@ -651,7 +651,7 @@ al_cep_pre_rep(
        ioctl.in.context = context;\r
        ioctl.in.cid = cid;\r
        ioctl.in.cm_rep = *p_cm_rep;\r
-       ioctl.in.cm_rep.h_qp = (ib_qp_handle_t VOID_PTR64)HDL_TO_PTR(p_cm_rep->h_qp->obj.hdl);\r
+       ioctl.in.cm_rep.h_qp = (ib_qp_handle_t)HDL_TO_PTR(p_cm_rep->h_qp->obj.hdl);\r
        /* Copy private data, if any. */\r
        if( p_cm_rep->p_rep_pdata )\r
        {\r
@@ -1010,7 +1010,7 @@ al_cep_lap(
     cl_memclr(&ioctl,sizeof (ioctl));\r
        ioctl.cid = cid;\r
        ioctl.cm_lap = *p_cm_lap;\r
-       ioctl.cm_lap.h_qp = (ib_qp_handle_t VOID_PTR64) HDL_TO_PTR(p_cm_lap->h_qp->obj.hdl);\r
+       ioctl.cm_lap.h_qp = (ib_qp_handle_t) HDL_TO_PTR(p_cm_lap->h_qp->obj.hdl);\r
        ioctl.alt_path = *(p_cm_lap->p_alt_path);\r
        /* Copy private data, if any. */\r
        if( p_cm_lap->p_lap_pdata )\r
@@ -1066,7 +1066,7 @@ al_cep_pre_apr(
     cl_memclr(&ioctl, sizeof (ioctl));\r
        ioctl.in.cid = cid;\r
        ioctl.in.cm_apr = *p_cm_apr;\r
-       ioctl.in.cm_apr.h_qp = (ib_qp_handle_t VOID_PTR64)HDL_TO_PTR(p_cm_apr->h_qp->obj.hdl);\r
+       ioctl.in.cm_apr.h_qp = (ib_qp_handle_t)HDL_TO_PTR(p_cm_apr->h_qp->obj.hdl);\r
        if( p_cm_apr->p_info )\r
        {\r
                if( p_cm_apr->info_length > IB_APR_INFO_SIZE )\r
index cda009e..01253bd 100644 (file)
@@ -99,7 +99,7 @@ ual_attach_mcast(
                status = ioctl_buf.out.status;\r
                if( status == IB_SUCCESS ){\r
                        h_mcast->obj.hdl = ioctl_buf.out.h_attach;\r
-                       h_mcast->h_ci_mcast = (ib_mcast_handle_t VOID_PTR64) HDL_TO_PTR(ioctl_buf.out.h_attach);\r
+                       h_mcast->h_ci_mcast = (ib_mcast_handle_t) HDL_TO_PTR(ioctl_buf.out.h_attach);\r
                }\r
        }\r
 \r
index ea7b04f..e5f7c90 100644 (file)
@@ -646,7 +646,7 @@ __process_misc_cb(
                /* We got a send completion. */\r
                ib_mad_element_t                        *p_element;\r
 \r
-               ib_mad_svc_handle_t VOID_PTR64                  h_mad_svc = (ib_mad_svc_handle_t VOID_PTR64)\r
+               ib_mad_svc_handle_t                     h_mad_svc = (ib_mad_svc_handle_t)\r
                        p_misc_cb_info->ioctl_rec.mad_send_cb_ioctl_rec.mad_svc_context;\r
 \r
                /* Copy the data to the user's element. */\r
@@ -682,7 +682,7 @@ __process_misc_cb(
                ib_mad_t                                *p_mad_buf = NULL;\r
                ib_grh_t                                *p_grh = NULL;\r
 \r
-               h_mad_svc = (ib_mad_svc_handle_t VOID_PTR64)\r
+               h_mad_svc = (ib_mad_svc_handle_t)\r
                        p_misc_cb_info->ioctl_rec.mad_recv_cb_ioctl_rec.mad_svc_context;\r
 \r
                p_send_mad =\r
index 03ba1d7..0d5dc23 100644 (file)
@@ -280,7 +280,7 @@ ual_bind_mw(
        mw_ioctl.in.h_mw = h_mw->obj.hdl;\r
        mw_ioctl.in.h_qp = h_qp->obj.hdl;\r
        mw_ioctl.in.mw_bind = *p_mw_bind;\r
-       mw_ioctl.in.mw_bind.h_mr = (ib_mr_handle_t VOID_PTR64) HDL_TO_PTR(p_mw_bind->h_mr->obj.hdl);\r
+       mw_ioctl.in.mw_bind.h_mr = (ib_mr_handle_t) HDL_TO_PTR(p_mw_bind->h_mr->obj.hdl);\r
 \r
        cl_status = do_al_dev_ioctl( UAL_BIND_MW,\r
                &mw_ioctl.in, sizeof(mw_ioctl.in), &mw_ioctl.out, sizeof(mw_ioctl.out),\r
index 4cb94c1..b7996b8 100644 (file)
@@ -314,12 +314,12 @@ ual_create_qp(
        qp_ioctl.in.h_pd = h_pd->obj.hdl;\r
        qp_ioctl.in.qp_create = *p_qp_create;\r
        qp_ioctl.in.qp_create.h_rq_cq =\r
-               (ib_cq_handle_t VOID_PTR64)HDL_TO_PTR(p_qp_create->h_rq_cq->obj.hdl);\r
+               (ib_cq_handle_t)HDL_TO_PTR(p_qp_create->h_rq_cq->obj.hdl);\r
        qp_ioctl.in.qp_create.h_sq_cq =\r
-               (ib_cq_handle_t VOID_PTR64)HDL_TO_PTR(p_qp_create->h_sq_cq->obj.hdl);\r
+               (ib_cq_handle_t)HDL_TO_PTR(p_qp_create->h_sq_cq->obj.hdl);\r
        if (p_qp_create->h_srq)\r
                qp_ioctl.in.qp_create.h_srq =\r
-                       (ib_srq_handle_t VOID_PTR64)HDL_TO_PTR(p_qp_create->h_srq->obj.hdl);\r
+                       (ib_srq_handle_t)HDL_TO_PTR(p_qp_create->h_srq->obj.hdl);\r
        qp_ioctl.in.context = h_qp;\r
        qp_ioctl.in.ev_notify = (h_qp->pfn_event_cb != NULL) ? TRUE : FALSE;\r
 \r
index f8b1419..8ce4413 100644 (file)
-/*
- * Copyright (c) 2005 SilverStorm Technologies.  All rights reserved.
- * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. 
- *
- * This software is available to you under the OpenIB.org BSD license
- * below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- * $Id: al.c 1611 2006-08-20 14:48:55Z sleybo $
- */
-
-#include "precomp.h"
-
-#if defined(EVENT_TRACING)
-#ifdef offsetof
-#undef offsetof
-#endif
-#include "ca.tmh"
-#endif
-
-ib_api_status_t
-mlnx_open_ca (
-       IN              const   ib_net64_t                                      ca_guid, // IN  const char *                ca_name,
-       IN              const   ci_completion_cb_t                      pfn_completion_cb,
-       IN              const   ci_async_event_cb_t                     pfn_async_event_cb,
-       IN              const   void*const                                      ca_context,
-               OUT                     ib_ca_handle_t                          *ph_ca)
-{
-       mlnx_hca_t                              *p_hca;
-       ib_api_status_t status = IB_NOT_FOUND;
-       struct ib_device *p_ibdev;
-
-       HCA_ENTER(HCA_DBG_SHIM);
-       HCA_PRINT(TRACE_LEVEL_INFORMATION  ,HCA_DBG_SHIM,
-               ("context 0x%p\n", ca_context));
-
-       // find CA object
-       p_hca = mlnx_hca_from_guid( ca_guid );
-       if( !p_hca ) {
-               if (status != IB_SUCCESS) 
-               {
-                       HCA_PRINT(TRACE_LEVEL_ERROR  , HCA_DBG_SHIM,
-                       ("completes with ERROR status IB_NOT_FOUND\n"));
-               }
-               HCA_EXIT(HCA_DBG_SHIM);
-               return IB_NOT_FOUND;
-       }
-
-       p_ibdev = hca2ibdev(p_hca);
-
-       if (hca_is_livefish(hca2fdo(p_hca))) 
-               goto done;
-
-       HCA_PRINT(TRACE_LEVEL_INFORMATION  ,HCA_DBG_SHIM,
-               ("context 0x%p\n", ca_context));
-       status = mlnx_set_cb(p_hca,
-               pfn_completion_cb,
-               pfn_async_event_cb,
-               ca_context);
-       if (IB_SUCCESS != status) {
-               goto err_set_cb;
-       }
-
-       
-       //TODO: do we need something for kernel users ?
-
-       // Return pointer to HCA object
-done:  
-       if (ph_ca) *ph_ca = (ib_ca_handle_t)p_hca;
-       status =  IB_SUCCESS;
-
-//err_mad_cache:
-err_set_cb:
-       if (status != IB_SUCCESS)
-       {
-               HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_SHIM,
-                       ("completes with ERROR status %x\n", status));
-       }
-       HCA_EXIT(HCA_DBG_SHIM);
-       return status;
-}
-
-ib_api_status_t
-mlnx_query_ca (
-       IN              const   ib_ca_handle_t                          h_ca,
-               OUT                     ib_ca_attr_t                            *p_ca_attr,
-       IN      OUT                     uint32_t                                        *p_byte_count,
-       IN      OUT                     ci_umv_buf_t                            *p_umv_buf )
-{
-       int i;
-       int err;
-       ib_api_status_t         status;
-       uint32_t                        size, required_size;
-       int                                     port_num, num_ports;
-       uint32_t                        num_gids, num_pkeys;
-       uint32_t                        num_page_sizes = 1; // TBD: what is actually supported
-       uint8_t                         *last_p;
-       struct ib_device_attr props;
-       struct ib_port_attr  *hca_ports = NULL;
-       mlnx_hca_t *p_hca = (mlnx_hca_t *)h_ca;
-       struct ib_device *p_ibdev = hca2ibdev(p_hca);
-       
-       
-       HCA_ENTER(HCA_DBG_SHIM);
-
-       // sanity checks
-       if( p_umv_buf && p_umv_buf->command ) {
-                       HCA_PRINT (TRACE_LEVEL_ERROR, HCA_DBG_SHIM ,("User mode is not supported yet\n"));
-                       p_umv_buf->status = status = IB_UNSUPPORTED;
-                       goto err_user_unsupported;
-       }
-
-       if( !cl_is_blockable() ) {
-                       status = IB_UNSUPPORTED;
-                       goto err_unsupported;
-       }
-
-       if (NULL == p_byte_count) {
-               status = IB_INVALID_PARAMETER;
-               goto err_byte_count;
-       }
-
-       // query the device
-       if ( hca_is_livefish(hca2fdo(p_hca)) ) {
-               struct pci_dev *pdev = hca2pdev(p_hca);
-               props.max_pd = 1;
-               props.vendor_id = pdev->ven_id;
-               props.vendor_part_id = pdev->dev_id;
-               err = 0;
-       }
-       else 
-               err = p_ibdev->query_device(p_ibdev, &props);
-       if (err) {
-               HCA_PRINT (TRACE_LEVEL_ERROR, HCA_DBG_SHIM, 
-                       ("ib_query_device failed (%d)\n",err));
-               status = errno_to_iberr(err);
-               goto err_query_device;
-       }
-       
-       // alocate arrary for port properties
-       num_ports = p_ibdev->phys_port_cnt;   /* Number of physical ports of the HCA */
-       if ( num_ports )
-               if (NULL == (hca_ports = cl_zalloc( num_ports * sizeof *hca_ports))) {
-                       HCA_PRINT (TRACE_LEVEL_ERROR, HCA_DBG_SHIM, ("Failed to cl_zalloc ports array\n"));
-                       status = IB_INSUFFICIENT_MEMORY;
-                       goto err_alloc_ports;
-               }
-
-       // start calculation of ib_ca_attr_t full size
-       num_gids = 0;
-       num_pkeys = 0;
-       required_size = PTR_ALIGN(sizeof(ib_ca_attr_t)) +
-               PTR_ALIGN(sizeof(uint32_t) * num_page_sizes) +
-               PTR_ALIGN(sizeof(ib_port_attr_t) * num_ports)+
-               PTR_ALIGN(MLX4_BOARD_ID_LEN)+
-               PTR_ALIGN(sizeof(uplink_info_t));       /* uplink info */
-       
-       // get port properties
-       for (port_num = 0; port_num <= (end_port(p_ibdev) - start_port(p_ibdev)); ++port_num) {
-               // request
-               err = p_ibdev->query_port(p_ibdev, (u8)(port_num + start_port(p_ibdev)), &hca_ports[port_num]);
-               if (err) {
-                       HCA_PRINT (TRACE_LEVEL_ERROR, HCA_DBG_SHIM, ("ib_query_port failed(%d) for port %d\n",err, port_num));
-                       status = errno_to_iberr(err);
-                       goto err_query_port;
-               }
-
-               // calculate GID table size
-               num_gids  = hca_ports[port_num].gid_tbl_len;
-               size = PTR_ALIGN(sizeof(ib_gid_t)  * num_gids);
-               required_size += size;
-
-               // calculate pkeys table size
-               num_pkeys = hca_ports[port_num].pkey_tbl_len;
-               size = PTR_ALIGN(sizeof(uint16_t) * num_pkeys);
-               required_size += size;
-       }
-
-       // resource sufficience check
-       if (NULL == p_ca_attr || *p_byte_count < required_size) {
-               *p_byte_count = required_size;
-               status = IB_INSUFFICIENT_MEMORY;
-               if ( p_ca_attr != NULL) {
-                       HCA_PRINT (TRACE_LEVEL_ERROR,HCA_DBG_SHIM, 
-                               ("Failed *p_byte_count (%d) < required_size (%d)\n", *p_byte_count, required_size ));
-               }
-               goto err_insuff_mem;
-       }
-
-       // Space is sufficient - setup table pointers
-       last_p = (uint8_t*)p_ca_attr;
-       last_p += PTR_ALIGN(sizeof(*p_ca_attr));
-
-       p_ca_attr->p_page_size = (uint32_t*)last_p;
-       last_p += PTR_ALIGN(num_page_sizes * sizeof(uint32_t));
-
-       p_ca_attr->p_port_attr = (ib_port_attr_t *)last_p;
-       last_p += PTR_ALIGN(num_ports * sizeof(ib_port_attr_t));
-
-       for (port_num = 0; port_num < num_ports; port_num++) {
-               p_ca_attr->p_port_attr[port_num].p_gid_table = (ib_gid_t *)last_p;
-               size = PTR_ALIGN(sizeof(ib_gid_t) * hca_ports[port_num].gid_tbl_len);
-               last_p += size;
-
-               p_ca_attr->p_port_attr[port_num].p_pkey_table = (uint16_t *)last_p;
-               size = PTR_ALIGN(sizeof(uint16_t) * hca_ports[port_num].pkey_tbl_len);
-               last_p += size;
-       }
-       
-       //copy vendor specific data
-       cl_memcpy(last_p,hca2mdev(p_hca)->board_id, MLX4_BOARD_ID_LEN);
-       last_p += PTR_ALIGN(MLX4_BOARD_ID_LEN);
-       *(uplink_info_t*)last_p = hca2pdev(p_hca)->uplink_info;
-       last_p += PTR_ALIGN(sizeof(uplink_info_t));     /* uplink info */
-       
-       // Separate the loops to ensure that table pointers are always setup
-       for (port_num = 0; port_num < num_ports; port_num++) {
-
-               // get pkeys, using cache
-               for (i=0; i < hca_ports[port_num].pkey_tbl_len; ++i) {
-                       err = p_ibdev->x.get_cached_pkey( p_ibdev, (u8)(port_num + start_port(p_ibdev)), i,
-                               &p_ca_attr->p_port_attr[port_num].p_pkey_table[i] );
-                       if (err) {
-                               status = errno_to_iberr(err);
-                               HCA_PRINT (TRACE_LEVEL_ERROR,HCA_DBG_SHIM, 
-                                       ("ib_get_cached_pkey failed (%d) for port_num %d, index %d\n",
-                                       err, port_num + start_port(p_ibdev), i));
-                               goto err_get_pkey;
-                       }
-               }
-               
-               // get gids, using cache
-               for (i=0; i < hca_ports[port_num].gid_tbl_len; ++i) {
-                       union ib_gid * VOID_PTR64  gid = (union ib_gid     *)&p_ca_attr->p_port_attr[port_num].p_gid_table[i];
-                       err = p_ibdev->x.get_cached_gid( p_ibdev, (u8)(port_num + start_port(p_ibdev)), i, (union ib_gid *)gid );
-                       //TODO: do we need to convert gids to little endian
-                       if (err) {
-                               status = errno_to_iberr(err);
-                               HCA_PRINT (TRACE_LEVEL_ERROR, HCA_DBG_SHIM, 
-                                       ("ib_get_cached_gid failed (%d) for port_num %d, index %d\n",
-                                       err, port_num + start_port(p_ibdev), i));
-                               goto err_get_gid;
-                       }
-               }
-
-               HCA_PRINT(TRACE_LEVEL_VERBOSE, HCA_DBG_SHIM,("port %d gid0:\n", port_num));
-               HCA_PRINT(TRACE_LEVEL_VERBOSE, HCA_DBG_SHIM,
-                       (" 0x%x%x%x%x%x%x%x%x-0x%x%x%x%x%x%x%x%x\n", 
-                       p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[0],
-                       p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[1],
-                       p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[2],
-                       p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[3],
-                       p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[4],
-                       p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[5],
-                       p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[6],
-                       p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[7],
-                       p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[8],
-                       p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[9],
-                       p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[10],
-                       p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[11],
-                       p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[12],
-                       p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[13],
-                       p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[14],
-                       p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[15]));
-       }
-
-       // set result size
-       p_ca_attr->size = required_size;
-       CL_ASSERT( required_size == (((uintn_t)last_p) - ((uintn_t)p_ca_attr)) );
-       HCA_PRINT(TRACE_LEVEL_VERBOSE, HCA_DBG_SHIM , ("Space required %d used %d\n",
-               required_size, (int)((uintn_t)last_p - (uintn_t)p_ca_attr) ));
-       
-       // !!! GID/PKEY tables must be queried before this call !!!
-       from_hca_cap(p_ibdev, &props, hca_ports, p_ca_attr);
-
-       status = IB_SUCCESS;
-
-err_get_gid:
-err_get_pkey:
-err_insuff_mem:
-err_query_port:
-       if (hca_ports)
-               cl_free(hca_ports);
-err_alloc_ports:
-err_query_device:
-err_byte_count:        
-err_unsupported:
-err_user_unsupported:
-       if( status != IB_INSUFFICIENT_MEMORY && status != IB_SUCCESS )
-               HCA_PRINT(TRACE_LEVEL_ERROR, HCA_DBG_SHIM,
-               ("completes with ERROR status %x\n", status));
-       HCA_EXIT(HCA_DBG_SHIM);
-       return status;
-}
-
-ib_api_status_t
-mlnx_modify_ca (
-       IN              const   ib_ca_handle_t                          h_ca,
-       IN              const   uint8_t                                         port_num,
-       IN              const   ib_ca_mod_t                                     modca_cmd,
-       IN              const   ib_port_attr_mod_t                      *p_port_attr)
-{
-#define SET_CAP_MOD(al_mask, al_fld, ib)               \
-               if (modca_cmd & al_mask) {      \
-                       if (p_port_attr->cap.##al_fld)          \
-                               props.set_port_cap_mask |= ib;  \
-                       else            \
-                               props.clr_port_cap_mask |= ib;  \
-               }
-
-       ib_api_status_t status;
-       int err;
-       struct ib_port_modify props;
-       int port_modify_mask = 0;
-       mlnx_hca_t *p_hca = (mlnx_hca_t *)h_ca;
-       struct ib_device *p_ibdev = hca2ibdev(p_hca);
-
-       HCA_ENTER(HCA_DBG_SHIM);
-
-       //sanity check
-       if( !cl_is_blockable() ) {
-                       status = IB_UNSUPPORTED;
-                       goto err_unsupported;
-       }
-       
-       if (port_num < start_port(p_ibdev) || port_num > end_port(p_ibdev)) {
-               status = IB_INVALID_PORT;
-               goto err_port;
-       }
-
-       // prepare parameters
-       RtlZeroMemory(&props, sizeof(props));
-       SET_CAP_MOD(IB_CA_MOD_IS_SM, sm, IB_PORT_SM);
-       SET_CAP_MOD(IB_CA_MOD_IS_SNMP_SUPPORTED, snmp, IB_PORT_SNMP_TUNNEL_SUP);
-       SET_CAP_MOD(IB_CA_MOD_IS_DEV_MGMT_SUPPORTED, dev_mgmt, IB_PORT_DEVICE_MGMT_SUP);
-       SET_CAP_MOD(IB_CA_MOD_IS_VEND_SUPPORTED, vend, IB_PORT_VENDOR_CLASS_SUP);
-       if ((modca_cmd & IB_CA_MOD_QKEY_CTR) && (p_port_attr->qkey_ctr == 0)) 
-               port_modify_mask |= IB_PORT_RESET_QKEY_CNTR;
-       
-       // modify port
-       err = p_ibdev->modify_port(p_ibdev, port_num, port_modify_mask, &props );
-       if (err) {
-               status = errno_to_iberr(err);
-               HCA_PRINT(TRACE_LEVEL_ERROR  , HCA_DBG_SHIM  ,("ib_modify_port failed (%d) \n",err));
-               goto err_modify_port;
-       }
-
-       status =        IB_SUCCESS;
-
-err_modify_port:
-err_port:
-err_unsupported:
-       if (status != IB_SUCCESS)
-       {
-               HCA_PRINT(TRACE_LEVEL_ERROR, HCA_DBG_SHIM,
-                       ("completes with ERROR status %x\n", status));
-       }
-       HCA_EXIT(HCA_DBG_SHIM);
-       return status;
-}
-
-ib_api_status_t
-mlnx_close_ca (
-       IN                              ib_ca_handle_t                          h_ca)
-{
-       mlnx_hca_t *p_hca = (mlnx_hca_t *)h_ca;
-       HCA_ENTER(HCA_DBG_SHIM);
-       
-
-       if (hca_is_livefish(hca2fdo(p_hca))) 
-               goto done;
-
-       mlnx_reset_cb(p_hca);
-
-done:
-       HCA_EXIT(HCA_DBG_SHIM);
-       
-       return IB_SUCCESS;
-}
-
-       
-
-void
-mlnx_ca_if(
-       IN      OUT                     ci_interface_t                          *p_interface )
-{
-       p_interface->open_ca = mlnx_open_ca;
-       p_interface->modify_ca = mlnx_modify_ca; 
-       p_interface->query_ca = mlnx_query_ca;
-       p_interface->close_ca = mlnx_close_ca;
-}
-
-void
-mlnx_ca_if_livefish(
-       IN      OUT                     ci_interface_t                          *p_interface )
-{
-       p_interface->open_ca = mlnx_open_ca;
-       p_interface->query_ca = mlnx_query_ca;
-       p_interface->close_ca = mlnx_close_ca;
-}
-
+/*\r
+ * Copyright (c) 2005 SilverStorm Technologies.  All rights reserved.\r
+ * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. \r
+ *\r
+ * This software is available to you under the OpenIB.org BSD license\r
+ * below:\r
+ *\r
+ *     Redistribution and use in source and binary forms, with or\r
+ *     without modification, are permitted provided that the following\r
+ *     conditions are met:\r
+ *\r
+ *      - Redistributions of source code must retain the above\r
+ *        copyright notice, this list of conditions and the following\r
+ *        disclaimer.\r
+ *\r
+ *      - Redistributions in binary form must reproduce the above\r
+ *        copyright notice, this list of conditions and the following\r
+ *        disclaimer in the documentation and/or other materials\r
+ *        provided with the distribution.\r
+ *\r
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
+ * SOFTWARE.\r
+ *\r
+ * $Id: al.c 1611 2006-08-20 14:48:55Z sleybo $\r
+ */\r
+\r
+#include "precomp.h"\r
+\r
+#if defined(EVENT_TRACING)\r
+#ifdef offsetof\r
+#undef offsetof\r
+#endif\r
+#include "ca.tmh"\r
+#endif\r
+\r
+ib_api_status_t\r
+mlnx_open_ca (\r
+       IN              const   ib_net64_t                                      ca_guid, // IN  const char *                ca_name,\r
+       IN              const   ci_completion_cb_t                      pfn_completion_cb,\r
+       IN              const   ci_async_event_cb_t                     pfn_async_event_cb,\r
+       IN              const   void*const                                      ca_context,\r
+               OUT                     ib_ca_handle_t                          *ph_ca)\r
+{\r
+       mlnx_hca_t                              *p_hca;\r
+       ib_api_status_t status = IB_NOT_FOUND;\r
+       struct ib_device *p_ibdev;\r
+\r
+       HCA_ENTER(HCA_DBG_SHIM);\r
+       HCA_PRINT(TRACE_LEVEL_INFORMATION  ,HCA_DBG_SHIM,\r
+               ("context 0x%p\n", ca_context));\r
+\r
+       // find CA object\r
+       p_hca = mlnx_hca_from_guid( ca_guid );\r
+       if( !p_hca ) {\r
+               if (status != IB_SUCCESS) \r
+               {\r
+                       HCA_PRINT(TRACE_LEVEL_ERROR  , HCA_DBG_SHIM,\r
+                       ("completes with ERROR status IB_NOT_FOUND\n"));\r
+               }\r
+               HCA_EXIT(HCA_DBG_SHIM);\r
+               return IB_NOT_FOUND;\r
+       }\r
+\r
+       p_ibdev = hca2ibdev(p_hca);\r
+\r
+       if (hca_is_livefish(hca2fdo(p_hca))) \r
+               goto done;\r
+\r
+       HCA_PRINT(TRACE_LEVEL_INFORMATION  ,HCA_DBG_SHIM,\r
+               ("context 0x%p\n", ca_context));\r
+       status = mlnx_set_cb(p_hca,\r
+               pfn_completion_cb,\r
+               pfn_async_event_cb,\r
+               ca_context);\r
+       if (IB_SUCCESS != status) {\r
+               goto err_set_cb;\r
+       }\r
+\r
+       \r
+       //TODO: do we need something for kernel users ?\r
+\r
+       // Return pointer to HCA object\r
+done:  \r
+       if (ph_ca) *ph_ca = (ib_ca_handle_t)p_hca;\r
+       status =  IB_SUCCESS;\r
+\r
+//err_mad_cache:\r
+err_set_cb:\r
+       if (status != IB_SUCCESS)\r
+       {\r
+               HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_SHIM,\r
+                       ("completes with ERROR status %x\n", status));\r
+       }\r
+       HCA_EXIT(HCA_DBG_SHIM);\r
+       return status;\r
+}\r
+\r
+ib_api_status_t\r
+mlnx_query_ca (\r
+       IN              const   ib_ca_handle_t                          h_ca,\r
+               OUT                     ib_ca_attr_t                            *p_ca_attr,\r
+       IN      OUT                     uint32_t                                        *p_byte_count,\r
+       IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
+{\r
+       int i;\r
+       int err;\r
+       ib_api_status_t         status;\r
+       uint32_t                        size, required_size;\r
+       int                                     port_num, num_ports;\r
+       uint32_t                        num_gids, num_pkeys;\r
+       uint32_t                        num_page_sizes = 1; // TBD: what is actually supported\r
+       uint8_t                         *last_p;\r
+       struct ib_device_attr props;\r
+       struct ib_port_attr  *hca_ports = NULL;\r
+       mlnx_hca_t *p_hca = (mlnx_hca_t *)h_ca;\r
+       struct ib_device *p_ibdev = hca2ibdev(p_hca);\r
+       \r
+       \r
+       HCA_ENTER(HCA_DBG_SHIM);\r
+\r
+       // sanity checks\r
+       if( p_umv_buf && p_umv_buf->command ) {\r
+                       HCA_PRINT (TRACE_LEVEL_ERROR, HCA_DBG_SHIM ,("User mode is not supported yet\n"));\r
+                       p_umv_buf->status = status = IB_UNSUPPORTED;\r
+                       goto err_user_unsupported;\r
+       }\r
+\r
+       if( !cl_is_blockable() ) {\r
+                       status = IB_UNSUPPORTED;\r
+                       goto err_unsupported;\r
+       }\r
+\r
+       if (NULL == p_byte_count) {\r
+               status = IB_INVALID_PARAMETER;\r
+               goto err_byte_count;\r
+       }\r
+\r
+       // query the device\r
+       if ( hca_is_livefish(hca2fdo(p_hca)) ) {\r
+               struct pci_dev *pdev = hca2pdev(p_hca);\r
+               props.max_pd = 1;\r
+               props.vendor_id = pdev->ven_id;\r
+               props.vendor_part_id = pdev->dev_id;\r
+               err = 0;\r
+       }\r
+       else \r
+               err = p_ibdev->query_device(p_ibdev, &props);\r
+       if (err) {\r
+               HCA_PRINT (TRACE_LEVEL_ERROR, HCA_DBG_SHIM, \r
+                       ("ib_query_device failed (%d)\n",err));\r
+               status = errno_to_iberr(err);\r
+               goto err_query_device;\r
+       }\r
+       \r
+       // alocate arrary for port properties\r
+       num_ports = p_ibdev->phys_port_cnt;   /* Number of physical ports of the HCA */\r
+       if ( num_ports )\r
+               if (NULL == (hca_ports = cl_zalloc( num_ports * sizeof *hca_ports))) {\r
+                       HCA_PRINT (TRACE_LEVEL_ERROR, HCA_DBG_SHIM, ("Failed to cl_zalloc ports array\n"));\r
+                       status = IB_INSUFFICIENT_MEMORY;\r
+                       goto err_alloc_ports;\r
+               }\r
+\r
+       // start calculation of ib_ca_attr_t full size\r
+       num_gids = 0;\r
+       num_pkeys = 0;\r
+       required_size = PTR_ALIGN(sizeof(ib_ca_attr_t)) +\r
+               PTR_ALIGN(sizeof(uint32_t) * num_page_sizes) +\r
+               PTR_ALIGN(sizeof(ib_port_attr_t) * num_ports)+\r
+               PTR_ALIGN(MLX4_BOARD_ID_LEN)+\r
+               PTR_ALIGN(sizeof(uplink_info_t));       /* uplink info */\r
+       \r
+       // get port properties\r
+       for (port_num = 0; port_num <= (end_port(p_ibdev) - start_port(p_ibdev)); ++port_num) {\r
+               // request\r
+               err = p_ibdev->query_port(p_ibdev, (u8)(port_num + start_port(p_ibdev)), &hca_ports[port_num]);\r
+               if (err) {\r
+                       HCA_PRINT (TRACE_LEVEL_ERROR, HCA_DBG_SHIM, ("ib_query_port failed(%d) for port %d\n",err, port_num));\r
+                       status = errno_to_iberr(err);\r
+                       goto err_query_port;\r
+               }\r
+\r
+               // calculate GID table size\r
+               num_gids  = hca_ports[port_num].gid_tbl_len;\r
+               size = PTR_ALIGN(sizeof(ib_gid_t)  * num_gids);\r
+               required_size += size;\r
+\r
+               // calculate pkeys table size\r
+               num_pkeys = hca_ports[port_num].pkey_tbl_len;\r
+               size = PTR_ALIGN(sizeof(uint16_t) * num_pkeys);\r
+               required_size += size;\r
+       }\r
+\r
+       // resource sufficience check\r
+       if (NULL == p_ca_attr || *p_byte_count < required_size) {\r
+               *p_byte_count = required_size;\r
+               status = IB_INSUFFICIENT_MEMORY;\r
+               if ( p_ca_attr != NULL) {\r
+                       HCA_PRINT (TRACE_LEVEL_ERROR,HCA_DBG_SHIM, \r
+                               ("Failed *p_byte_count (%d) < required_size (%d)\n", *p_byte_count, required_size ));\r
+               }\r
+               goto err_insuff_mem;\r
+       }\r
+\r
+       // Space is sufficient - setup table pointers\r
+       last_p = (uint8_t*)p_ca_attr;\r
+       last_p += PTR_ALIGN(sizeof(*p_ca_attr));\r
+\r
+       p_ca_attr->p_page_size = (uint32_t*)last_p;\r
+       last_p += PTR_ALIGN(num_page_sizes * sizeof(uint32_t));\r
+\r
+       p_ca_attr->p_port_attr = (ib_port_attr_t *)last_p;\r
+       last_p += PTR_ALIGN(num_ports * sizeof(ib_port_attr_t));\r
+\r
+       for (port_num = 0; port_num < num_ports; port_num++) {\r
+               p_ca_attr->p_port_attr[port_num].p_gid_table = (ib_gid_t *)last_p;\r
+               size = PTR_ALIGN(sizeof(ib_gid_t) * hca_ports[port_num].gid_tbl_len);\r
+               last_p += size;\r
+\r
+               p_ca_attr->p_port_attr[port_num].p_pkey_table = (uint16_t *)last_p;\r
+               size = PTR_ALIGN(sizeof(uint16_t) * hca_ports[port_num].pkey_tbl_len);\r
+               last_p += size;\r
+       }\r
+       \r
+       //copy vendor specific data\r
+       cl_memcpy(last_p,hca2mdev(p_hca)->board_id, MLX4_BOARD_ID_LEN);\r
+       last_p += PTR_ALIGN(MLX4_BOARD_ID_LEN);\r
+       *(uplink_info_t*)last_p = hca2pdev(p_hca)->uplink_info;\r
+       last_p += PTR_ALIGN(sizeof(uplink_info_t));     /* uplink info */\r
+       \r
+       // Separate the loops to ensure that table pointers are always setup\r
+       for (port_num = 0; port_num < num_ports; port_num++) {\r
+\r
+               // get pkeys, using cache\r
+               for (i=0; i < hca_ports[port_num].pkey_tbl_len; ++i) {\r
+                       err = p_ibdev->x.get_cached_pkey( p_ibdev, (u8)(port_num + start_port(p_ibdev)), i,\r
+                               &p_ca_attr->p_port_attr[port_num].p_pkey_table[i] );\r
+                       if (err) {\r
+                               status = errno_to_iberr(err);\r
+                               HCA_PRINT (TRACE_LEVEL_ERROR,HCA_DBG_SHIM, \r
+                                       ("ib_get_cached_pkey failed (%d) for port_num %d, index %d\n",\r
+                                       err, port_num + start_port(p_ibdev), i));\r
+                               goto err_get_pkey;\r
+                       }\r
+               }\r
+               \r
+               // get gids, using cache\r
+               for (i=0; i < hca_ports[port_num].gid_tbl_len; ++i) {\r
+                       union ib_gid * gid = (union ib_gid *)&p_ca_attr->p_port_attr[port_num].p_gid_table[i];\r
+                       err = p_ibdev->x.get_cached_gid( p_ibdev, (u8)(port_num + start_port(p_ibdev)), i, (union ib_gid *)gid );\r
+                       //TODO: do we need to convert gids to little endian\r
+                       if (err) {\r
+                               status = errno_to_iberr(err);\r
+                               HCA_PRINT (TRACE_LEVEL_ERROR, HCA_DBG_SHIM, \r
+                                       ("ib_get_cached_gid failed (%d) for port_num %d, index %d\n",\r
+                                       err, port_num + start_port(p_ibdev), i));\r
+                               goto err_get_gid;\r
+                       }\r
+               }\r
+\r
+               HCA_PRINT(TRACE_LEVEL_VERBOSE, HCA_DBG_SHIM,("port %d gid0:\n", port_num));\r
+               HCA_PRINT(TRACE_LEVEL_VERBOSE, HCA_DBG_SHIM,\r
+                       (" 0x%x%x%x%x%x%x%x%x-0x%x%x%x%x%x%x%x%x\n", \r
+                       p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[0],\r
+                       p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[1],\r
+                       p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[2],\r
+                       p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[3],\r
+                       p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[4],\r
+                       p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[5],\r
+                       p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[6],\r
+                       p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[7],\r
+                       p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[8],\r
+                       p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[9],\r
+                       p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[10],\r
+                       p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[11],\r
+                       p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[12],\r
+                       p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[13],\r
+                       p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[14],\r
+                       p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[15]));\r
+       }\r
+\r
+       // set result size\r
+       p_ca_attr->size = required_size;\r
+       CL_ASSERT( required_size == (((uintn_t)last_p) - ((uintn_t)p_ca_attr)) );\r
+       HCA_PRINT(TRACE_LEVEL_VERBOSE, HCA_DBG_SHIM , ("Space required %d used %d\n",\r
+               required_size, (int)((uintn_t)last_p - (uintn_t)p_ca_attr) ));\r
+       \r
+       // !!! GID/PKEY tables must be queried before this call !!!\r
+       from_hca_cap(p_ibdev, &props, hca_ports, p_ca_attr);\r
+\r
+       status = IB_SUCCESS;\r
+\r
+err_get_gid:\r
+err_get_pkey:\r
+err_insuff_mem:\r
+err_query_port:\r
+       if (hca_ports)\r
+               cl_free(hca_ports);\r
+err_alloc_ports:\r
+err_query_device:\r
+err_byte_count:        \r
+err_unsupported:\r
+err_user_unsupported:\r
+       if( status != IB_INSUFFICIENT_MEMORY && status != IB_SUCCESS )\r
+               HCA_PRINT(TRACE_LEVEL_ERROR, HCA_DBG_SHIM,\r
+               ("completes with ERROR status %x\n", status));\r
+       HCA_EXIT(HCA_DBG_SHIM);\r
+       return status;\r
+}\r
+\r
+ib_api_status_t\r
+mlnx_modify_ca (\r
+       IN              const   ib_ca_handle_t                          h_ca,\r
+       IN              const   uint8_t                                         port_num,\r
+       IN              const   ib_ca_mod_t                                     modca_cmd,\r
+       IN              const   ib_port_attr_mod_t                      *p_port_attr)\r
+{\r
+#define SET_CAP_MOD(al_mask, al_fld, ib)               \\r
+               if (modca_cmd & al_mask) {      \\r
+                       if (p_port_attr->cap.##al_fld)          \\r
+                               props.set_port_cap_mask |= ib;  \\r
+                       else            \\r
+                               props.clr_port_cap_mask |= ib;  \\r
+               }\r
+\r
+       ib_api_status_t status;\r
+       int err;\r
+       struct ib_port_modify props;\r
+       int port_modify_mask = 0;\r
+       mlnx_hca_t *p_hca = (mlnx_hca_t *)h_ca;\r
+       struct ib_device *p_ibdev = hca2ibdev(p_hca);\r
+\r
+       HCA_ENTER(HCA_DBG_SHIM);\r
+\r
+       //sanity check\r
+       if( !cl_is_blockable() ) {\r
+                       status = IB_UNSUPPORTED;\r
+                       goto err_unsupported;\r
+       }\r
+       \r
+       if (port_num < start_port(p_ibdev) || port_num > end_port(p_ibdev)) {\r
+               status = IB_INVALID_PORT;\r
+               goto err_port;\r
+       }\r
+\r
+       // prepare parameters\r
+       RtlZeroMemory(&props, sizeof(props));\r
+       SET_CAP_MOD(IB_CA_MOD_IS_SM, sm, IB_PORT_SM);\r
+       SET_CAP_MOD(IB_CA_MOD_IS_SNMP_SUPPORTED, snmp, IB_PORT_SNMP_TUNNEL_SUP);\r
+       SET_CAP_MOD(IB_CA_MOD_IS_DEV_MGMT_SUPPORTED, dev_mgmt, IB_PORT_DEVICE_MGMT_SUP);\r
+       SET_CAP_MOD(IB_CA_MOD_IS_VEND_SUPPORTED, vend, IB_PORT_VENDOR_CLASS_SUP);\r
+       if ((modca_cmd & IB_CA_MOD_QKEY_CTR) && (p_port_attr->qkey_ctr == 0)) \r
+               port_modify_mask |= IB_PORT_RESET_QKEY_CNTR;\r
+       \r
+       // modify port\r
+       err = p_ibdev->modify_port(p_ibdev, port_num, port_modify_mask, &props );\r
+       if (err) {\r
+               status = errno_to_iberr(err);\r
+               HCA_PRINT(TRACE_LEVEL_ERROR  , HCA_DBG_SHIM  ,("ib_modify_port failed (%d) \n",err));\r
+               goto err_modify_port;\r
+       }\r
+\r
+       status =        IB_SUCCESS;\r
+\r
+err_modify_port:\r
+err_port:\r
+err_unsupported:\r
+       if (status != IB_SUCCESS)\r
+       {\r
+               HCA_PRINT(TRACE_LEVEL_ERROR, HCA_DBG_SHIM,\r
+                       ("completes with ERROR status %x\n", status));\r
+       }\r
+       HCA_EXIT(HCA_DBG_SHIM);\r
+       return status;\r
+}\r
+\r
+ib_api_status_t\r
+mlnx_close_ca (\r
+       IN                              ib_ca_handle_t                          h_ca)\r
+{\r
+       mlnx_hca_t *p_hca = (mlnx_hca_t *)h_ca;\r
+       HCA_ENTER(HCA_DBG_SHIM);\r
+       \r
+\r
+       if (hca_is_livefish(hca2fdo(p_hca))) \r
+               goto done;\r
+\r
+       mlnx_reset_cb(p_hca);\r
+\r
+done:\r
+       HCA_EXIT(HCA_DBG_SHIM);\r
+       \r
+       return IB_SUCCESS;\r
+}\r
+\r
+       \r
+\r
+void\r
+mlnx_ca_if(\r
+       IN      OUT                     ci_interface_t                          *p_interface )\r
+{\r
+       p_interface->open_ca = mlnx_open_ca;\r
+       p_interface->modify_ca = mlnx_modify_ca; \r
+       p_interface->query_ca = mlnx_query_ca;\r
+       p_interface->close_ca = mlnx_close_ca;\r
+}\r
+\r
+void\r
+mlnx_ca_if_livefish(\r
+       IN      OUT                     ci_interface_t                          *p_interface )\r
+{\r
+       p_interface->open_ca = mlnx_open_ca;\r
+       p_interface->query_ca = mlnx_query_ca;\r
+       p_interface->close_ca = mlnx_close_ca;\r
+}\r
+\r
index 9a16c63..be05909 100644 (file)
-/*
- * Copyright (c) 2007 Cisco, Inc.  All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include "mlx4.h"
-#include "verbs.h"
-#include "mx_abi.h"
-#include "wqe.h"
-#include "mlx4_debug.h"
-
-#if defined(EVENT_TRACING)
-#include "verbs.tmh"
-#endif
-
-ib_api_status_t
-mlx4_pre_open_ca (
-       IN              const   ib_net64_t                              ca_guid,
-       IN      OUT                     ci_umv_buf_t                            *p_umv_buf,
-               OUT                     ib_ca_handle_t                  *ph_uvp_ca )
-{
-       struct ibv_context      *context;
-       ib_api_status_t status = IB_SUCCESS;
-
-       UNREFERENCED_PARAMETER(ca_guid);
-
-       context = mlx4_alloc_context();
-       if (!context) {
-               status = IB_INSUFFICIENT_MEMORY;                
-               goto end;
-       }
-       
-       if( p_umv_buf )
-       {
-               if( !p_umv_buf->p_inout_buf )
-               {
-                       p_umv_buf->p_inout_buf = cl_zalloc( sizeof(struct ibv_get_context_resp) );
-                       if( !p_umv_buf->p_inout_buf )
-                       {
-                               status = IB_INSUFFICIENT_MEMORY;
-                               goto end;
-                       }
-               }
-               p_umv_buf->input_size = 0;
-               p_umv_buf->output_size = sizeof(struct ibv_get_context_resp);
-               p_umv_buf->command = TRUE;
-       }
-
-       *ph_uvp_ca = (ib_ca_handle_t)context;
-
-end:   
-       return status;
-}
-
-ib_api_status_t
-mlx4_post_open_ca (
-       IN              const   ib_net64_t                              ca_guid,
-       IN                              ib_api_status_t                 ioctl_status,
-       IN      OUT                     ib_ca_handle_t                  *ph_uvp_ca,
-       IN                              ci_umv_buf_t                            *p_umv_buf )
-{
-       struct ibv_get_context_resp *p_resp;
-       struct ibv_context *context = (struct ibv_context *)*ph_uvp_ca;
-       ib_api_status_t status = IB_SUCCESS;
-
-       UNREFERENCED_PARAMETER(ca_guid);
-
-       CL_ASSERT(p_umv_buf && p_umv_buf->p_inout_buf);
-       
-       p_resp = p_umv_buf->p_inout_buf;
-
-       if (IB_SUCCESS == ioctl_status)
-       {
-               if (!mlx4_fill_context(context, p_resp))
-               {
-                       status = IB_INSUFFICIENT_RESOURCES;
-                       goto end;
-               }
-       }
-
-end:
-       cl_free(p_resp);
-       return status;
-}
-
-ib_api_status_t
-mlx4_pre_query_ca (
-       IN                              ib_ca_handle_t                  h_uvp_ca,
-       IN                              ib_ca_attr_t                            *p_ca_attr,
-       IN                              size_t                                  byte_count,
-       IN                              ci_umv_buf_t                            *p_umv_buf )
-{
-       ib_api_status_t status = IB_SUCCESS;
-
-       UNREFERENCED_PARAMETER(h_uvp_ca);
-
-       /* Note that query_ca calls *always* get their attributes from the kernel.
-        *
-        * Assume if user buffer is valid then byte_cnt is valid too 
-        * so we can preallocate ca attr buffer for post ioctl data saving
-        *
-        * Note that we squirrel the buffer away into the umv_buf and only
-        * set it into the HCA if the query is successful.
-        */
-       if ( p_ca_attr != NULL )
-       {
-               p_umv_buf->p_inout_buf = cl_malloc(byte_count);
-               if ( !p_umv_buf->p_inout_buf )
-               {
-                       status = IB_INSUFFICIENT_RESOURCES;
-                       goto end;
-               }
-       }
-
-end:
-       return status;
-}
-
-void
-__fixup_ca_attr(
-       IN                              ib_ca_attr_t* const                     p_dest,
-       IN              const   ib_ca_attr_t* const                     p_src )
-{
-       uint8_t         i;
-       uintn_t         offset = (uintn_t)p_dest - (uintn_t)p_src;
-       ib_port_attr_t                  *p_tmp_port_attr = NULL;
-
-       CL_ASSERT( p_dest );
-       CL_ASSERT( p_src );
-
-       /* Fix up the pointers to point within the destination buffer. */
-       p_dest->p_page_size =
-               (uint32_t* VOID_PTR64)(((uint8_t* VOID_PTR64)p_dest->p_page_size) + offset);
-
-       p_tmp_port_attr =
-               (ib_port_attr_t* VOID_PTR64)(((uint8_t* VOID_PTR64)p_dest->p_port_attr) + offset);
-
-       /* Fix up each port attribute's gid and pkey table pointers. */
-       for( i = 0; i < p_dest->num_ports; i++ )
-       {
-               p_tmp_port_attr[i].p_gid_table = (ib_gid_t* VOID_PTR64)
-                       (((uint8_t* VOID_PTR64)p_tmp_port_attr[i].p_gid_table) + offset);
-
-               p_tmp_port_attr[i].p_pkey_table =(ib_net16_t* VOID_PTR64)
-                       (((uint8_t* VOID_PTR64)p_tmp_port_attr[i].p_pkey_table) + offset);
-       }
-       p_dest->p_port_attr = p_tmp_port_attr;
-}
-
-void
-mlx4_post_query_ca (
-       IN                              ib_ca_handle_t                  h_uvp_ca,
-       IN                              ib_api_status_t                 ioctl_status,
-       IN                              ib_ca_attr_t                            *p_ca_attr,
-       IN                              size_t                                  byte_count,
-       IN                              ci_umv_buf_t                            *p_umv_buf )
-{
-       struct ibv_context *context = (struct ibv_context *)h_uvp_ca;
-       
-       CL_ASSERT(context && p_umv_buf);
-
-       if ( ioctl_status == IB_SUCCESS && p_ca_attr && byte_count)
-       {
-               CL_ASSERT( byte_count >= p_ca_attr->size );
-
-               pthread_mutex_lock(&context->mutex);
-
-               if (context->p_hca_attr)
-                       cl_free(context->p_hca_attr);
-               context->p_hca_attr = p_umv_buf->p_inout_buf;
-               cl_memcpy( context->p_hca_attr, p_ca_attr, p_ca_attr->size );
-               __fixup_ca_attr( context->p_hca_attr, p_ca_attr );
-               
-               pthread_mutex_unlock(&context->mutex);
-       }
-       else if (p_umv_buf->p_inout_buf) 
-       {
-               cl_free(p_umv_buf->p_inout_buf);
-       }
-}
-
-ib_api_status_t
-mlx4_post_close_ca (
-       IN                      ib_ca_handle_t                          h_uvp_ca,
-       IN                      ib_api_status_t                         ioctl_status )
-{
-       struct ibv_context *context = (struct ibv_context *)h_uvp_ca;
-
-       CL_ASSERT(context);
-
-       if (IB_SUCCESS == ioctl_status)
-               mlx4_free_context(context);
-
-       return IB_SUCCESS;
-}
-
-ib_api_status_t
-mlx4_pre_alloc_pd (
-       IN              const   ib_ca_handle_t                  h_uvp_ca,
-       IN      OUT                     ci_umv_buf_t                            *p_umv_buf,
-               OUT                     ib_pd_handle_t                  *ph_uvp_pd )
-{
-       struct mlx4_pd *pd;
-       struct ibv_context *context = (struct ibv_context *)h_uvp_ca;
-       ib_api_status_t status = IB_SUCCESS;
-
-       CL_ASSERT(context && p_umv_buf);
-
-       if( !p_umv_buf->p_inout_buf )
-       {
-               p_umv_buf->p_inout_buf = cl_malloc( sizeof(struct ibv_alloc_pd_resp) );
-               if( !p_umv_buf->p_inout_buf )
-               {
-                       status = IB_INSUFFICIENT_MEMORY;
-                       goto end;
-               }
-       }
-       p_umv_buf->input_size = 0;
-       p_umv_buf->output_size = sizeof(struct ibv_alloc_pd_resp);
-       p_umv_buf->command = TRUE;
-
-       // Mlx4 code:
-
-       pd = cl_malloc(sizeof *pd);
-       if (!pd) {
-               status = IB_INSUFFICIENT_MEMORY;                
-               goto end;
-       }
-
-       pd->ibv_pd.context = context;
-
-       *ph_uvp_pd = (ib_pd_handle_t)&pd->ibv_pd;
-       
-end:
-       return status;
-}
-
-void
-mlx4_post_alloc_pd (
-       IN                              ib_ca_handle_t                  h_uvp_ca,
-       IN                              ib_api_status_t                 ioctl_status,
-       IN      OUT                     ib_pd_handle_t                  *ph_uvp_pd,
-       IN                              ci_umv_buf_t                            *p_umv_buf )
-{
-       struct ibv_pd                   *pd = (struct ibv_pd *)*ph_uvp_pd;
-       struct ibv_alloc_pd_resp        *p_resp;
-
-
-       UNREFERENCED_PARAMETER(h_uvp_ca);
-       
-       CL_ASSERT(p_umv_buf && p_umv_buf->p_inout_buf);
-
-       p_resp = p_umv_buf->p_inout_buf;
-
-       if (IB_SUCCESS == ioctl_status)
-       {
-               // Mlx4 code:
-               
-               pd->handle = p_resp->pd_handle;
-               to_mpd(pd)->pdn = p_resp->pdn;
-       }
-       else
-       {
-               cl_free(to_mpd(pd));
-       }
-       
-       cl_free(p_resp);
-       return;
-}
-
-void
-mlx4_post_free_pd (
-       IN              const   ib_pd_handle_t                  h_uvp_pd,
-       IN                              ib_api_status_t                 ioctl_status )
-{
-       struct ibv_pd *pd = (struct ibv_pd *)h_uvp_pd;
-
-       CL_ASSERT(pd);
-
-       if (IB_SUCCESS == ioctl_status)
-               cl_free(to_mpd(pd));
-}
-
-static int __align_queue_size(int req)
-{
-       int nent;
-
-       for (nent = 1; nent < req; nent <<= 1)
-               ; /* nothing */
-
-       return nent;
-}
-
-ib_api_status_t
-mlx4_pre_create_cq (
-       IN              const   ib_ca_handle_t                  h_uvp_ca,
-       IN      OUT             uint32_t* const                 p_size,
-       IN      OUT             ci_umv_buf_t                            *p_umv_buf,
-               OUT                     ib_cq_handle_t                  *ph_uvp_cq )
-{
-       struct mlx4_cq          *cq;
-       struct ibv_create_cq    *p_create_cq;
-       struct ibv_context              *context = (struct ibv_context *)h_uvp_ca;
-       ib_api_status_t         status = IB_SUCCESS;
-       int size = max( sizeof(struct ibv_create_cq), sizeof(struct ibv_create_cq_resp) );
-
-       CL_ASSERT(h_uvp_ca && p_umv_buf);
-
-       if( !p_umv_buf->p_inout_buf )
-       {
-               p_umv_buf->p_inout_buf = cl_malloc( size );
-               if( !p_umv_buf->p_inout_buf )
-               {
-                       status = IB_INSUFFICIENT_MEMORY;
-                       goto err_umv_buf;
-               }
-       }
-       p_umv_buf->input_size = sizeof(struct ibv_create_cq);
-       p_umv_buf->output_size = sizeof(struct ibv_create_cq_resp);
-       p_umv_buf->command = TRUE;
-
-       p_create_cq = p_umv_buf->p_inout_buf;
-
-       // Mlx4 code:
-       
-       /* Sanity check CQ size before proceeding */
-       if (*p_size > 0x3fffff) {
-               status = IB_INVALID_CQ_SIZE;
-               goto err_cqe_size;
-       }
-
-       cq = cl_malloc(sizeof *cq);
-       if (!cq) {
-               status = IB_INSUFFICIENT_MEMORY;
-               goto err_cq;
-       }
-
-       if (cl_spinlock_init(&cq->lock)) {
-               status = IB_INSUFFICIENT_MEMORY;
-               goto err_lock;
-       }
-
-       *p_size = __align_queue_size(*p_size + 1);
-
-       if (mlx4_alloc_buf(&cq->buf, *p_size * MLX4_CQ_ENTRY_SIZE, 
-                                               context->page_size))
-               goto err_alloc_buf;
-
-       cq->ibv_cq.context = context;
-       cq->cons_index = 0;
-               
-       cq->set_ci_db  = mlx4_alloc_db(to_mctx(context), MLX4_DB_TYPE_CQ);
-       if (!cq->set_ci_db)
-               goto err_alloc_db;
-
-       cq->arm_db = cq->set_ci_db + 1;
-       *cq->arm_db = 0;
-       cq->arm_sn = 1;
-       *cq->set_ci_db = 0;
-
-       p_create_cq->buf_addr = (uintptr_t) cq->buf.buf;
-       p_create_cq->db_addr  = (uintptr_t) cq->set_ci_db;
-       p_create_cq->arm_sn_addr  = (uintptr_t) &cq->arm_sn;
-       p_create_cq->cqe = --(*p_size);
-
-       *ph_uvp_cq = (ib_cq_handle_t)&cq->ibv_cq;
-       goto end;
-
-err_alloc_db:
-       mlx4_free_buf(&cq->buf);
-err_alloc_buf:
-       cl_spinlock_destroy(&cq->lock);
-err_lock:
-       cl_free(cq);
-err_cq:
-err_cqe_size:
-       cl_free(p_umv_buf->p_inout_buf);
-err_umv_buf:
-end:
-       return status;
-}
-
-void
-mlx4_post_create_cq (
-       IN              const   ib_ca_handle_t                  h_uvp_ca,
-       IN                              ib_api_status_t                 ioctl_status,
-       IN              const   uint32_t                                        size,
-       IN      OUT                     ib_cq_handle_t                  *ph_uvp_cq,
-       IN                              ci_umv_buf_t                            *p_umv_buf )
-{
-       struct ibv_cq                           *cq = (struct ibv_cq *)*ph_uvp_cq;
-       struct ibv_create_cq_resp       *p_resp;
-
-       UNREFERENCED_PARAMETER(h_uvp_ca);
-       UNREFERENCED_PARAMETER(size);
-       
-       CL_ASSERT(p_umv_buf && p_umv_buf->p_inout_buf);
-
-       p_resp = p_umv_buf->p_inout_buf;
-
-       if (IB_SUCCESS == ioctl_status)
-       {
-               // Mlx4 code:
-               
-               to_mcq(cq)->cqn = p_resp->cqn;
-               cq->cqe                 = p_resp->cqe;
-               cq->handle              = p_resp->cq_handle;
-       }
-       else
-       {
-               mlx4_post_destroy_cq (*ph_uvp_cq, IB_SUCCESS);
-       }
-       
-       cl_free(p_resp);
-       return;
-}
-
-ib_api_status_t
-mlx4_pre_query_cq (
-       IN              const   ib_cq_handle_t                  h_uvp_cq,
-               OUT                     uint32_t* const                 p_size,
-       IN      OUT                     ci_umv_buf_t                            *p_umv_buf )
-{
-       struct ibv_cq *cq = (struct ibv_cq *)h_uvp_cq;
-
-       UNREFERENCED_PARAMETER(p_umv_buf);
-       
-       *p_size = cq->cqe;
-
-       return IB_VERBS_PROCESSING_DONE;
-}
-
-void
-mlx4_post_destroy_cq (
-       IN              const   ib_cq_handle_t                  h_uvp_cq,
-       IN                              ib_api_status_t                 ioctl_status )
-{
-       struct ibv_cq *cq = (struct ibv_cq *)h_uvp_cq;
-
-       CL_ASSERT(cq);
-
-       if (IB_SUCCESS == ioctl_status) {
-               mlx4_free_db(to_mctx(cq->context), MLX4_DB_TYPE_CQ, to_mcq(cq)->set_ci_db);
-               mlx4_free_buf(&to_mcq(cq)->buf);
-
-               cl_spinlock_destroy(&to_mcq(cq)->lock);
-               cl_free(to_mcq(cq));
-       }
-}
-
-ib_api_status_t  
-mlx4_pre_create_srq (
-       IN              const   ib_pd_handle_t                  h_uvp_pd,
-       IN              const   ib_srq_attr_t                           *p_srq_attr,
-       IN      OUT                     ci_umv_buf_t                            *p_umv_buf,
-               OUT                     ib_srq_handle_t                 *ph_uvp_srq )
-{
-       struct mlx4_srq *srq;
-       struct ibv_create_srq *p_create_srq;
-       struct ibv_pd *pd = (struct ibv_pd *)h_uvp_pd;
-       ib_api_status_t status = IB_SUCCESS;
-       size_t size = max( sizeof(struct ibv_create_srq), sizeof(struct ibv_create_srq_resp) );
-
-       CL_ASSERT(p_umv_buf);
-
-       if( !p_umv_buf->p_inout_buf )
-       {
-               p_umv_buf->p_inout_buf = cl_malloc( size ); 
-               if( !p_umv_buf->p_inout_buf )
-               {
-                       status = IB_INSUFFICIENT_MEMORY;
-                       goto err_memory;
-               }
-       }
-       p_umv_buf->input_size = sizeof(struct ibv_create_srq);
-       p_umv_buf->output_size = sizeof(struct ibv_create_srq_resp);
-       p_umv_buf->command = TRUE;
-
-       p_create_srq = p_umv_buf->p_inout_buf;
-       
-       // Mlx4 code:
-
-       /* Sanity check SRQ size before proceeding */
-       if (p_srq_attr->max_wr > 1 << 16 || p_srq_attr->max_sge > 64)
-       {
-               status = IB_INVALID_PARAMETER;
-               goto err_params;
-       }
-
-       srq = cl_malloc(sizeof *srq);
-       if (!srq) {
-               status = IB_INSUFFICIENT_MEMORY;
-               goto err_alloc_srq;
-       }
-
-       if (cl_spinlock_init(&srq->lock)) {
-               status = IB_INSUFFICIENT_MEMORY;
-               goto err_lock;
-       }
-
-       srq->ibv_srq.pd                 = pd;
-       srq->ibv_srq.context    = pd->context;
-       
-       srq->max        = __align_queue_size(p_srq_attr->max_wr + 1);
-       srq->max_gs  = p_srq_attr->max_sge;
-       srq->counter    = 0;
-
-       if (mlx4_alloc_srq_buf(pd, (struct ibv_srq_attr *)p_srq_attr, srq))
-       {
-               status = IB_INSUFFICIENT_MEMORY;
-               goto err_alloc_buf;
-       }
-
-       srq->db = mlx4_alloc_db(to_mctx(pd->context), MLX4_DB_TYPE_RQ);
-       if (!srq->db)
-               goto err_alloc_db;
-
-       *srq->db = 0;
-       
-       // fill the parameters for ioctl
-       p_create_srq->buf_addr = (uintptr_t) srq->buf.buf;
-       p_create_srq->db_addr  = (uintptr_t) srq->db;
-       p_create_srq->pd_handle = pd->handle;
-       p_create_srq->max_wr = p_srq_attr->max_wr;
-       p_create_srq->max_sge = p_srq_attr->max_sge;
-       p_create_srq->srq_limit = p_srq_attr->srq_limit;
-
-       *ph_uvp_srq = (ib_srq_handle_t)&srq->ibv_srq;
-       goto end;
-
-err_alloc_db:
-       cl_free(srq->wrid);
-       mlx4_free_buf(&srq->buf);
-err_alloc_buf:
-       cl_spinlock_destroy(&srq->lock);
-err_lock:
-       cl_free(srq);
-err_alloc_srq:
-       cl_free(p_umv_buf->p_inout_buf);
-err_params: err_memory:
-end:
-       return status;
-}
-
-void
-mlx4_post_create_srq (
-       IN              const   ib_pd_handle_t                  h_uvp_pd,
-       IN                              ib_api_status_t                 ioctl_status,
-       IN      OUT                     ib_srq_handle_t                 *ph_uvp_srq,
-       IN                              ci_umv_buf_t                            *p_umv_buf )
-{
-       struct ibv_srq *ibsrq = (struct ibv_srq *)*ph_uvp_srq;
-       struct mlx4_srq *srq = to_msrq(ibsrq);
-       struct ibv_create_srq_resp *p_resp;
-
-       UNREFERENCED_PARAMETER(h_uvp_pd);
-       
-       CL_ASSERT(p_umv_buf && p_umv_buf->p_inout_buf);
-       
-       p_resp = p_umv_buf->p_inout_buf;
-
-       if (IB_SUCCESS == ioctl_status)
-       {
-               // Mlx4 code:
-
-               srq->srqn       = p_resp->srqn;
-               ibsrq->handle   = p_resp->srq_handle;
-               
-               srq->max                = p_resp->max_wr;
-               srq->max_gs     = p_resp->max_sge;
-       }
-       else
-       {
-               mlx4_post_destroy_srq (*ph_uvp_srq, IB_SUCCESS);
-       }
-
-       cl_free(p_resp);
-       return;
-}
-
-ib_api_status_t
-mlx4_pre_destroy_srq (
-       IN              const   ib_srq_handle_t                 h_uvp_srq )
-{
-#ifdef XRC_SUPPORT
-       struct ibv_srq *ibsrq = (struct ibv_srq *)h_uvp_srq;
-       struct mlx4_srq *srq = to_msrq(ibsrq);
-       struct mlx4_cq *mcq = NULL;
-       
-       if (ibsrq->xrc_cq)
-       {
-               /* is an xrc_srq */
-               mcq = to_mcq(ibsrq->xrc_cq);
-               mlx4_cq_clean(mcq, 0, srq);
-               cl_spinlock_acquire(&mcq->lock);
-               mlx4_clear_xrc_srq(to_mctx(ibsrq->context), srq->srqn);
-               cl_spinlock_release(&mcq->lock);
-       }
-#else
-       UNUSED_PARAM(h_uvp_srq);
-#endif 
-       return IB_SUCCESS;
-}
-
-void
-mlx4_post_destroy_srq (
-       IN              const   ib_srq_handle_t                 h_uvp_srq,
-       IN                              ib_api_status_t                 ioctl_status )
-{
-       struct ibv_srq          *ibsrq = (struct ibv_srq *)h_uvp_srq;
-       struct mlx4_srq *srq = to_msrq(ibsrq);
-       
-       CL_ASSERT(srq);
-
-       if (IB_SUCCESS == ioctl_status)
-       {
-               mlx4_free_db(to_mctx(ibsrq->context), MLX4_DB_TYPE_RQ, srq->db);
-               cl_free(srq->wrid);
-               mlx4_free_buf(&srq->buf);
-               cl_spinlock_destroy(&srq->lock);
-               cl_free(srq);
-       }
-       else
-       {
-#ifdef XRC_SUPPORT
-               if (ibsrq->xrc_cq) {
-                       /* is an xrc_srq */
-                       struct mlx4_cq  *mcq = to_mcq(ibsrq->xrc_cq);
-                       cl_spinlock_acquire(&mcq->lock);
-                       mlx4_store_xrc_srq(to_mctx(ibsrq->context), srq->srqn, srq);
-                       cl_spinlock_release(&mcq->lock);
-               }
-#endif         
-       }
-}
-
-static enum ibv_qp_type
-__to_qp_type(ib_qp_type_t type)
-{
-       switch (type) {
-       case IB_QPT_RELIABLE_CONN: return IBV_QPT_RC;
-       case IB_QPT_UNRELIABLE_CONN: return IBV_QPT_UC;
-       case IB_QPT_UNRELIABLE_DGRM: return IBV_QPT_UD;
-#ifdef XRC_SUPPORT
-       //case IB_QPT_XRC_CONN: return IBV_QPT_XRC;
-#endif 
-       default: return IBV_QPT_RC;
-       }
-}
-
-ib_api_status_t
-mlx4_pre_create_qp (
-       IN              const   ib_pd_handle_t                  h_uvp_pd,
-       IN              const   ib_qp_create_t                  *p_create_attr,
-       IN      OUT                     ci_umv_buf_t                            *p_umv_buf,
-               OUT                     ib_qp_handle_t                  *ph_uvp_qp )
-{
-       struct ibv_pd                   *pd = (struct ibv_pd *)h_uvp_pd;
-       struct mlx4_context     *context = to_mctx(pd->context);
-       struct mlx4_qp          *qp;
-       struct ibv_create_qp    *p_create_qp;
-       struct ibv_qp_init_attr attr;
-       ib_api_status_t                 status = IB_SUCCESS;
-       int size = max( sizeof(struct ibv_create_qp), sizeof(struct ibv_create_qp_resp) );
-
-       CL_ASSERT(p_umv_buf);
-
-       if( !p_umv_buf->p_inout_buf )
-       {
-               p_umv_buf->p_inout_buf = cl_malloc(size);
-               if( !p_umv_buf->p_inout_buf )
-               {
-                       status = IB_INSUFFICIENT_MEMORY;
-                       goto err_memory;
-               }
-       }
-       p_umv_buf->input_size = sizeof(struct ibv_create_qp);
-       p_umv_buf->output_size = sizeof(struct ibv_create_qp_resp);
-       p_umv_buf->command = TRUE;
-
-       p_create_qp = p_umv_buf->p_inout_buf;
-       
-       /* convert attributes */
-       memset( &attr, 0, sizeof(attr) );
-       attr.send_cq                            = (struct ibv_cq *)p_create_attr->h_sq_cq;
-       attr.recv_cq                            = (struct ibv_cq *)p_create_attr->h_rq_cq;
-       attr.srq                                        = (struct ibv_srq*)p_create_attr->h_srq;
-       attr.cap.max_send_wr            = p_create_attr->sq_depth;
-       attr.cap.max_recv_wr            = p_create_attr->rq_depth;
-       attr.cap.max_send_sge           = p_create_attr->sq_sge;
-       attr.cap.max_recv_sge           = p_create_attr->rq_sge;
-       attr.cap.max_inline_data        = p_create_attr->sq_max_inline;
-       attr.qp_type                            = __to_qp_type(p_create_attr->qp_type);
-       attr.sq_sig_all                         = p_create_attr->sq_signaled;
-
-       // Mlx4 code:
-       
-       /* Sanity check QP size before proceeding */
-       if (attr.cap.max_send_wr    > (uint32_t) context->max_qp_wr ||
-           attr.cap.max_recv_wr     > (uint32_t) context->max_qp_wr ||
-           attr.cap.max_send_sge   > (uint32_t) context->max_sge   ||
-           attr.cap.max_recv_sge   > (uint32_t) context->max_sge   ||
-           attr.cap.max_inline_data > 1024)
-       {
-               status = IB_INVALID_PARAMETER;
-               goto end;
-       }
-
-       qp = cl_malloc(sizeof *qp);
-       if (!qp) {
-               status = IB_INSUFFICIENT_MEMORY;
-               goto err_alloc_qp;
-       }
-
-       mlx4_calc_sq_wqe_size(&attr.cap, attr.qp_type, qp);
-
-       /*
-        * We need to leave 2 KB + 1 WQE of headroom in the SQ to
-        * allow HW to prefetch.
-        */
-       qp->sq_spare_wqes = (2048 >> qp->sq.wqe_shift) + 1;
-       qp->sq.wqe_cnt = __align_queue_size(attr.cap.max_send_wr + qp->sq_spare_wqes);
-       qp->rq.wqe_cnt = __align_queue_size(attr.cap.max_recv_wr);
-
-       if (attr.srq || attr.qp_type == IBV_QPT_XRC)
-               attr.cap.max_recv_wr = qp->rq.wqe_cnt = 0;
-       else 
-       {
-               if (attr.cap.max_recv_sge < 1)
-                       attr.cap.max_recv_sge = 1;
-               if (attr.cap.max_recv_wr < 1)
-                       attr.cap.max_recv_wr = 1;
-       }
-
-       if (mlx4_alloc_qp_buf(pd, &attr.cap, attr.qp_type, qp))
-               goto err_alloc_qp_buff;
-
-       mlx4_init_qp_indices(qp);
-
-       if (cl_spinlock_init(&qp->sq.lock)) {
-               status = IB_INSUFFICIENT_MEMORY;
-               goto err_spinlock_sq;
-       }
-       if (cl_spinlock_init(&qp->rq.lock)) {
-               status = IB_INSUFFICIENT_MEMORY;
-               goto err_spinlock_rq;
-       }
-
-       // fill qp fields
-       if (!attr.srq && attr.qp_type != IBV_QPT_XRC) {
-               qp->db = mlx4_alloc_db(context, MLX4_DB_TYPE_RQ);
-               if (!qp->db) {
-                       status = IB_INSUFFICIENT_MEMORY;
-                       goto err_db;
-               }
-
-               *qp->db = 0;
-       }
-       if (attr.sq_sig_all)
-               qp->sq_signal_bits = cl_hton32(MLX4_WQE_CTRL_CQ_UPDATE);
-       else
-               qp->sq_signal_bits = 0;
-
-       // fill the rest of qp fields
-       qp->ibv_qp.pd = pd;
-       qp->ibv_qp.context= pd->context;
-       qp->ibv_qp.send_cq = attr.send_cq;
-       qp->ibv_qp.recv_cq = attr.recv_cq;
-       qp->ibv_qp.srq = attr.srq;
-       qp->ibv_qp.state = IBV_QPS_RESET;
-       qp->ibv_qp.qp_type = attr.qp_type;
-
-       // fill request fields
-       p_create_qp->buf_addr = (uintptr_t) qp->buf.buf;
-       if (!attr.srq && attr.qp_type != IBV_QPT_XRC)
-               p_create_qp->db_addr = (uintptr_t) qp->db;
-       else
-               p_create_qp->db_addr = 0;
-
-       p_create_qp->pd_handle = pd->handle;
-       p_create_qp->send_cq_handle = attr.send_cq->handle;
-       p_create_qp->recv_cq_handle = attr.recv_cq->handle;
-       p_create_qp->srq_handle = attr.qp_type == IBV_QPT_XRC ?
-               (attr.xrc_domain ? attr.xrc_domain->handle : 0) :
-               (attr.srq ? attr.srq->handle : 0);
-
-       p_create_qp->max_send_wr = attr.cap.max_send_wr;
-       p_create_qp->max_recv_wr = attr.cap.max_recv_wr;
-       p_create_qp->max_send_sge = attr.cap.max_send_sge;
-       p_create_qp->max_recv_sge = attr.cap.max_recv_sge;
-       p_create_qp->max_inline_data = attr.cap.max_inline_data;
-       p_create_qp->sq_sig_all = (uint8_t)attr.sq_sig_all;
-       p_create_qp->qp_type = attr.qp_type;
-       p_create_qp->is_srq = (uint8_t)(attr.qp_type == IBV_QPT_XRC ?
-                                                                       !!attr.xrc_domain : !!attr.srq);
-
-       p_create_qp->log_sq_stride   = (uint8_t)qp->sq.wqe_shift;
-       for (p_create_qp->log_sq_bb_count = 0;
-            qp->sq.wqe_cnt > 1 << p_create_qp->log_sq_bb_count;
-            ++p_create_qp->log_sq_bb_count)
-               ; /* nothing */
-       p_create_qp->sq_no_prefetch = 0;
-
-       *ph_uvp_qp = (ib_qp_handle_t)&qp->ibv_qp;
-       goto end;
-
-err_db:
-       cl_spinlock_destroy(&qp->rq.lock);
-err_spinlock_rq:
-       cl_spinlock_destroy(&qp->sq.lock);
-err_spinlock_sq:
-       cl_free(qp->sq.wrid);
-       if (qp->rq.wqe_cnt)
-               free(qp->rq.wrid);
-       mlx4_free_buf(&qp->buf);
-err_alloc_qp_buff:
-       cl_free(qp);    
-err_alloc_qp:
-       cl_free(p_umv_buf->p_inout_buf);
-err_memory:
-end:
-       return status;
-}
-
-ib_api_status_t
-mlx4_post_create_qp (
-       IN              const   ib_pd_handle_t                  h_uvp_pd,
-       IN                              ib_api_status_t                         ioctl_status,
-       IN      OUT             ib_qp_handle_t                  *ph_uvp_qp,
-       IN                              ci_umv_buf_t                            *p_umv_buf )
-{
-       struct mlx4_qp                  *qp = (struct mlx4_qp *)*ph_uvp_qp;
-       struct ibv_pd                   *pd = (struct ibv_pd *)h_uvp_pd;
-       struct ibv_context                      *context = pd->context;
-       struct ibv_create_qp_resp       *p_resp;
-       ib_api_status_t status = IB_SUCCESS;
-               
-       CL_ASSERT(p_umv_buf && p_umv_buf->p_inout_buf);
-       
-       p_resp = p_umv_buf->p_inout_buf;
-
-       if (IB_SUCCESS == ioctl_status)
-       {
-               // Mlx4 code:
-               
-               struct ibv_qp_cap       cap;
-               
-               cap.max_recv_sge                = p_resp->max_recv_sge;
-               cap.max_send_sge                = p_resp->max_send_sge;
-               cap.max_recv_wr         = p_resp->max_recv_wr;
-               cap.max_send_wr         = p_resp->max_send_wr;
-               cap.max_inline_data     = p_resp->max_inline_data;
-               
-               qp->ibv_qp.handle               = p_resp->qp_handle;
-               qp->ibv_qp.qp_num       = p_resp->qpn;
-               
-               qp->rq.wqe_cnt  = cap.max_recv_wr;
-               qp->rq.max_gs   = cap.max_recv_sge;
-
-               /* adjust rq maxima to not exceed reported device maxima */
-               cap.max_recv_wr = min((uint32_t) to_mctx(context)->max_qp_wr, cap.max_recv_wr);
-               cap.max_recv_sge = min((uint32_t) to_mctx(context)->max_sge, cap.max_recv_sge);
-
-               qp->rq.max_post = cap.max_recv_wr;
-               //qp->rq.max_gs = cap.max_recv_sge;  - RIB : add this ?
-               mlx4_set_sq_sizes(qp, &cap, qp->ibv_qp.qp_type);
-
-               qp->doorbell_qpn    = cl_hton32(qp->ibv_qp.qp_num << 8);
-
-               if (mlx4_store_qp(to_mctx(context), qp->ibv_qp.qp_num, qp))
-               {
-                       mlx4_post_destroy_qp(*ph_uvp_qp, IB_SUCCESS);
-                       status = IB_INSUFFICIENT_MEMORY;
-               }
-               MLX4_PRINT( TRACE_LEVEL_INFORMATION, MLX4_DBG_QP, 
-                       ("qpn %#x, buf %p, db_rec %p, sq %d:%d, rq %d:%d\n", 
-                       qp->ibv_qp.qp_num, qp->buf.buf, qp->db,
-                       qp->sq.head, qp->sq.tail, qp->rq.head, qp->rq.tail )); 
-       }
-       else
-       {
-               mlx4_post_destroy_qp(*ph_uvp_qp, IB_SUCCESS);
-       }
-
-       cl_free(p_resp);
-       return status;
-}
-
-ib_api_status_t
-mlx4_pre_modify_qp (
-       IN              const   ib_qp_handle_t                  h_uvp_qp,
-       IN              const   ib_qp_mod_t                             *p_modify_attr,
-       IN      OUT                     ci_umv_buf_t                            *p_umv_buf )
-{
-       ib_api_status_t status = IB_SUCCESS;
-
-       UNREFERENCED_PARAMETER(h_uvp_qp);
-       UNREFERENCED_PARAMETER(p_modify_attr);
-
-       CL_ASSERT(p_umv_buf);
-
-       if( !p_umv_buf->p_inout_buf )
-       {
-               p_umv_buf->p_inout_buf = cl_malloc(sizeof(struct ibv_modify_qp_resp));
-               if( !p_umv_buf->p_inout_buf )
-               {
-                       status = IB_INSUFFICIENT_MEMORY;
-                       goto err_memory;
-               }
-       }
-       p_umv_buf->input_size = 0;
-       p_umv_buf->output_size = sizeof(struct ibv_modify_qp_resp);
-       p_umv_buf->command = TRUE;
-       
-err_memory:
-       return status;
-}
-
-void
-mlx4_post_query_qp (
-       IN                              ib_qp_handle_t                          h_uvp_qp,
-       IN                              ib_api_status_t                         ioctl_status,
-       IN      OUT                     ib_qp_attr_t                            *p_query_attr,
-       IN      OUT                     ci_umv_buf_t                                    *p_umv_buf )
-{
-       struct mlx4_qp *qp = (struct mlx4_qp *)h_uvp_qp;
-
-       UNREFERENCED_PARAMETER(p_umv_buf);
-
-       if(IB_SUCCESS == ioctl_status)
-       {
-               p_query_attr->sq_max_inline = qp->max_inline_data;
-               p_query_attr->sq_sge            = qp->sq.max_gs;
-               p_query_attr->sq_depth          = qp->sq.max_post;
-               p_query_attr->rq_sge            = qp->rq.max_gs;
-               p_query_attr->rq_depth          = qp->rq.max_post;
-       }
-}
-
-void
-mlx4_post_modify_qp (
-       IN              const   ib_qp_handle_t                  h_uvp_qp,
-       IN                              ib_api_status_t                 ioctl_status,
-       IN      OUT                     ci_umv_buf_t                            *p_umv_buf )
-{
-       struct ibv_qp                           *qp = (struct ibv_qp *)h_uvp_qp;
-       struct ibv_modify_qp_resp       *p_resp;
-
-       CL_ASSERT(p_umv_buf && p_umv_buf->p_inout_buf);
-
-       p_resp = p_umv_buf->p_inout_buf;
-
-       if (IB_SUCCESS == ioctl_status) 
-       {
-               // Mlx4 code:
-               
-               if (qp->state == IBV_QPS_RESET &&
-                   p_resp->attr_mask & IBV_QP_STATE &&
-                   p_resp->qp_state == IBV_QPS_INIT)
-               {
-                       mlx4_qp_init_sq_ownership(to_mqp(qp));
-               }
-
-               if (p_resp->attr_mask & IBV_QP_STATE) {
-                       qp->state = p_resp->qp_state;
-               }
-
-               if (p_resp->attr_mask & IBV_QP_STATE &&
-                   p_resp->qp_state == IBV_QPS_RESET)
-               {
-                       mlx4_cq_clean(to_mcq(qp->recv_cq), qp->qp_num,
-                                               qp->srq ? to_msrq(qp->srq) : NULL);
-                       if (qp->send_cq != qp->recv_cq)
-                               mlx4_cq_clean(to_mcq(qp->send_cq), qp->qp_num, NULL);
-
-                       mlx4_init_qp_indices(to_mqp(qp));
-                       if (!qp->srq && qp->qp_type != IBV_QPT_XRC)
-                               *to_mqp(qp)->db = 0;
-               }
-       }
-
-       cl_free (p_resp);
-       return;
-}
-
-static void
-__mlx4_lock_cqs(struct ibv_qp *qp)
-{
-       struct mlx4_cq *send_cq = to_mcq(qp->send_cq);
-       struct mlx4_cq *recv_cq = to_mcq(qp->recv_cq);
-
-       if (send_cq == recv_cq)
-               cl_spinlock_acquire(&send_cq->lock);
-       else if (send_cq->cqn < recv_cq->cqn) {
-               cl_spinlock_acquire(&send_cq->lock);
-               cl_spinlock_acquire(&recv_cq->lock);
-       } else {
-               cl_spinlock_acquire(&recv_cq->lock);
-               cl_spinlock_acquire(&send_cq->lock);
-       }
-}
-
-static void
-__mlx4_unlock_cqs(struct ibv_qp *qp)
-{
-       struct mlx4_cq *send_cq = to_mcq(qp->send_cq);
-       struct mlx4_cq *recv_cq = to_mcq(qp->recv_cq);
-
-       if (send_cq == recv_cq)
-               cl_spinlock_release(&send_cq->lock);
-       else if (send_cq->cqn < recv_cq->cqn) {
-               cl_spinlock_release(&recv_cq->lock);
-               cl_spinlock_release(&send_cq->lock);
-       } else {
-               cl_spinlock_release(&send_cq->lock);
-               cl_spinlock_release(&recv_cq->lock);
-       }
-}
-
-ib_api_status_t
-mlx4_pre_destroy_qp (
-       IN              const   ib_qp_handle_t                  h_uvp_qp )
-{
-       struct ibv_qp *qp = (struct ibv_qp*)h_uvp_qp;
-
-       mlx4_cq_clean(to_mcq(qp->recv_cq), qp->qp_num,
-                               qp->srq ? to_msrq(qp->srq) : NULL);
-       if (qp->send_cq != qp->recv_cq)
-               mlx4_cq_clean(to_mcq(qp->send_cq), qp->qp_num, NULL);
-
-       __mlx4_lock_cqs(qp);
-       mlx4_clear_qp(to_mctx(qp->context), qp->qp_num);
-       __mlx4_unlock_cqs(qp);
-
-       return IB_SUCCESS;
-}
-
-void
-mlx4_post_destroy_qp (
-       IN              const   ib_qp_handle_t                  h_uvp_qp,
-       IN                              ib_api_status_t                 ioctl_status )
-{
-       struct ibv_qp* ibqp = (struct ibv_qp *)h_uvp_qp;
-       struct mlx4_qp* qp = to_mqp(ibqp);
-       
-       CL_ASSERT(h_uvp_qp);
-
-       if (IB_SUCCESS == ioctl_status)
-       {
-               if (!ibqp->srq && ibqp->qp_type != IBV_QPT_XRC)
-                       mlx4_free_db(to_mctx(ibqp->context), MLX4_DB_TYPE_RQ, qp->db);
-
-               cl_spinlock_destroy(&qp->sq.lock);
-               cl_spinlock_destroy(&qp->rq.lock);
-
-               MLX4_PRINT( TRACE_LEVEL_INFORMATION, MLX4_DBG_QP, 
-                       ("qpn %#x, buf %p, sq %d:%d, rq %d:%d\n", qp->ibv_qp.qp_num, qp->buf.buf, 
-                       qp->sq.head, qp->sq.tail, qp->rq.head, qp->rq.tail )); 
-               cl_free(qp->sq.wrid);
-               if (qp->rq.wqe_cnt)
-                       cl_free(qp->rq.wrid);
-               mlx4_free_buf(&qp->buf);
-               cl_free(qp);
-       }
-       else
-       {
-               __mlx4_lock_cqs(ibqp);
-               mlx4_store_qp(to_mctx(ibqp->context), ibqp->qp_num, qp);
-               __mlx4_unlock_cqs(ibqp);                
-       }
-}
-
-void
-mlx4_nd_modify_qp (
-       IN              const   ib_qp_handle_t                  h_uvp_qp,
-               OUT                     void**                                  pp_outbuf,
-               OUT                     DWORD*                                  p_size )
-{
-       struct ibv_qp *ibv_qp = (struct ibv_qp *)h_uvp_qp;
-
-       *(uint32_t**)pp_outbuf = (uint32_t*)&ibv_qp->state;
-       *p_size = sizeof(ibv_qp->state);
-}
-
-static ib_qp_state_t __from_qp_state(enum ibv_qp_state state)
-{
-       switch (state) {
-               case IBV_QPS_RESET: return IB_QPS_RESET;
-               case IBV_QPS_INIT: return IB_QPS_INIT;
-               case IBV_QPS_RTR: return IB_QPS_RTR;
-               case IBV_QPS_RTS: return IB_QPS_RTS;
-               case IBV_QPS_SQD: return IB_QPS_SQD;
-               case IBV_QPS_SQE: return IB_QPS_SQERR;
-               case IBV_QPS_ERR: return IB_QPS_ERROR;
-               default: return IB_QPS_TIME_WAIT;
-       };
-}
-
-uint32_t
-mlx4_nd_get_qp_state (
-       IN              const   ib_qp_handle_t                  h_uvp_qp )
-{
-       struct ibv_qp *ibv_qp = (struct ibv_qp *)h_uvp_qp;
-
-       return __from_qp_state(ibv_qp->state);
-}
-
-static uint8_t
-__gid_to_index_lookup (
-       IN                      ib_ca_attr_t                                    *p_ca_attr,
-       IN                      uint8_t                                         port_num,
-       IN                      uint8_t                                         *raw_gid )
-{
-       ib_gid_t *p_gid_table = NULL;
-       uint8_t i, index = 0;
-       uint16_t num_gids;
-
-       p_gid_table = p_ca_attr->p_port_attr[port_num-1].p_gid_table;
-       CL_ASSERT (p_gid_table);
-
-       num_gids = p_ca_attr->p_port_attr[port_num-1].num_gids;
-
-       for (i = 0; i < num_gids; i++)
-       {
-               if (cl_memcmp (raw_gid, p_gid_table[i].raw, 16))
-               {
-                       index = i;
-                       break;
-               }
-       }
-       return index;
-}
-
-static enum ibv_rate __to_rate(uint8_t rate)
-{
-       if (rate == IB_PATH_RECORD_RATE_2_5_GBS) return IBV_RATE_2_5_GBPS;
-       if (rate == IB_PATH_RECORD_RATE_5_GBS) return IBV_RATE_5_GBPS;
-       if (rate == IB_PATH_RECORD_RATE_10_GBS) return IBV_RATE_10_GBPS;
-       if (rate == IB_PATH_RECORD_RATE_20_GBS) return IBV_RATE_20_GBPS;
-       if (rate == IB_PATH_RECORD_RATE_30_GBS) return IBV_RATE_30_GBPS;
-       if (rate == IB_PATH_RECORD_RATE_40_GBS) return IBV_RATE_40_GBPS;
-       if (rate == IB_PATH_RECORD_RATE_60_GBS) return IBV_RATE_60_GBPS;
-       if (rate == IB_PATH_RECORD_RATE_80_GBS) return IBV_RATE_80_GBPS;
-       if (rate == IB_PATH_RECORD_RATE_120_GBS) return IBV_RATE_120_GBPS;
-       return IBV_RATE_MAX;
-}
-
-inline void 
-__grh_get_ver_class_flow(
-       IN              const   ib_net32_t                                      ver_class_flow,
-               OUT                     uint8_t* const                          p_ver OPTIONAL,
-               OUT                     uint8_t* const                          p_tclass OPTIONAL,
-               OUT                     net32_t* const                          p_flow_lbl OPTIONAL )
-{
-       ib_net32_t tmp_ver_class_flow;
-
-       tmp_ver_class_flow = cl_ntoh32( ver_class_flow );
-
-       if (p_ver)
-               *p_ver = (uint8_t)(tmp_ver_class_flow >> 28);
-
-       if (p_tclass)
-               *p_tclass = (uint8_t)(tmp_ver_class_flow >> 20);
-
-       if (p_flow_lbl)
-               *p_flow_lbl = (ver_class_flow & CL_HTON32( 0x000FFFFF ));
-}
-
-static ib_api_status_t
-__to_ah (
-       IN                              ib_ca_attr_t                            *p_ca_attr,
-       IN              const   ib_av_attr_t                            *p_av_attr,
-               OUT                     struct ibv_ah_attr                      *p_attr )
-{
-       if (p_av_attr->port_num == 0 || 
-               p_av_attr->port_num > p_ca_attr->num_ports) {
-               MLX4_PRINT(TRACE_LEVEL_WARNING ,MLX4_DBG_AV ,
-                       (" invalid port number specified (%d)\n",p_av_attr->port_num));
-               return IB_INVALID_PORT;
-       }
-
-       p_attr->port_num = p_av_attr->port_num;
-       p_attr->sl = p_av_attr->sl;
-       p_attr->dlid = cl_ntoh16 (p_av_attr->dlid);
-       p_attr->static_rate = __to_rate(p_av_attr->static_rate);
-       p_attr->src_path_bits = p_av_attr->path_bits;
-                       
-       /* For global destination or Multicast address:*/
-       if (p_av_attr->grh_valid)
-       {
-               p_attr->is_global               = TRUE;
-               p_attr->grh.hop_limit   = p_av_attr->grh.hop_limit;
-               __grh_get_ver_class_flow( p_av_attr->grh.ver_class_flow, NULL,
-                                                               &p_attr->grh.traffic_class, &p_attr->grh.flow_label );
-               p_attr->grh.sgid_index  = __gid_to_index_lookup (p_ca_attr, p_av_attr->port_num,
-                                                                                                       (uint8_t *) p_av_attr->grh.src_gid.raw); 
-               cl_memcpy (p_attr->grh.dgid.raw, p_av_attr->grh.dest_gid.raw, 16);
-       }
-       else
-       {
-               p_attr->is_global = FALSE;
-       }
-       return IB_SUCCESS;
-} 
-
-static void
-__set_av_params(struct mlx4_ah *ah, struct ibv_pd *pd, struct ibv_ah_attr *attr)
-{
-       ah->av.port_pd = cl_hton32(to_mpd(pd)->pdn | (attr->port_num << 24));
-       ah->av.g_slid  = attr->src_path_bits;
-       ah->av.dlid    = cl_hton16(attr->dlid);
-       if (attr->static_rate) {
-               ah->av.stat_rate = (uint8_t)(attr->static_rate + MLX4_STAT_RATE_OFFSET);
-               /* XXX check rate cap? */
-       }
-       ah->av.sl_tclass_flowlabel = cl_hton32(attr->sl << 28);
-       if (attr->is_global)
-       {
-               ah->av.g_slid |= 0x80;
-               ah->av.gid_index = attr->grh.sgid_index;
-               ah->av.hop_limit = attr->grh.hop_limit;
-               ah->av.sl_tclass_flowlabel |=
-                       cl_hton32((attr->grh.traffic_class << 20) |
-                                   attr->grh.flow_label);
-               cl_memcpy(ah->av.dgid, attr->grh.dgid.raw, 16);
-       }
-}
-
-ib_api_status_t
-mlx4_pre_create_ah (
-       IN              const   ib_pd_handle_t                  h_uvp_pd,
-       IN              const   ib_av_attr_t                            *p_av_attr,
-       IN      OUT                     ci_umv_buf_t                            *p_umv_buf,
-               OUT                     ib_av_handle_t                  *ph_uvp_av )
-{
-       struct mlx4_ah *ah;
-       struct ibv_ah_attr attr;
-       struct ibv_pd *pd = (struct ibv_pd *)h_uvp_pd;
-       ib_api_status_t status = IB_SUCCESS;
-       
-       UNREFERENCED_PARAMETER(p_umv_buf);
-
-       if (pd->context->p_hca_attr == NULL) {
-               status = IB_ERROR;
-               goto end;
-       }
-
-       ah = cl_malloc(sizeof *ah);
-       if (!ah) {
-               status = IB_INSUFFICIENT_MEMORY;
-               goto end;
-       }
-
-       // sanity check
-       if (p_av_attr->port_num == 0 || 
-               p_av_attr->port_num > pd->context->p_hca_attr->num_ports)
-       {
-               status = IB_INVALID_PORT;
-               goto end;
-       }
-
-       // convert parameters 
-       cl_memset(&attr, 0, sizeof(attr));
-       status = __to_ah(pd->context->p_hca_attr, p_av_attr, &attr);
-       if (status)
-               goto end;
-
-       ah->ibv_ah.pd = pd;
-       ah->ibv_ah.context = pd->context;
-       cl_memcpy(&ah->ibv_ah.av_attr, p_av_attr, sizeof (ib_av_attr_t));
-
-       cl_memset(&ah->av, 0, sizeof ah->av);
-       __set_av_params(ah, pd, &attr);
-
-       *ph_uvp_av = (ib_av_handle_t)&ah->ibv_ah;
-       status = IB_VERBS_PROCESSING_DONE;
-
-end:
-       return status;
-}
-
-ib_api_status_t
-mlx4_pre_query_ah (
-       IN              const   ib_av_handle_t                  h_uvp_av,
-       IN      OUT                     ci_umv_buf_t                            *p_umv_buf )
-{
-       UNREFERENCED_PARAMETER(h_uvp_av);
-       UNREFERENCED_PARAMETER(p_umv_buf);
-       
-       return IB_VERBS_PROCESSING_DONE;
-}
-
-void
-mlx4_post_query_ah (
-       IN              const   ib_av_handle_t                  h_uvp_av,
-       IN                              ib_api_status_t                 ioctl_status,
-       IN      OUT                     ib_av_attr_t                            *p_addr_vector,
-       IN      OUT                     ib_pd_handle_t                  *ph_pd,
-       IN      OUT                     ci_umv_buf_t                            *p_umv_buf )
-{
-       struct ibv_ah *ah = (struct ibv_ah *)h_uvp_av;
-
-       UNREFERENCED_PARAMETER(p_umv_buf);
-
-       CL_ASSERT(h_uvp_av && p_addr_vector);
-
-       if (ioctl_status == IB_SUCCESS)
-       {
-               cl_memcpy(p_addr_vector, &ah->av_attr, sizeof(ib_av_attr_t));
-               if (ph_pd)
-                       *ph_pd = (ib_pd_handle_t)ah->pd;
-       }
-}
-
-ib_api_status_t
-mlx4_pre_modify_ah (
-       IN              const   ib_av_handle_t                  h_uvp_av,
-       IN              const   ib_av_attr_t                            *p_addr_vector,
-       IN      OUT                     ci_umv_buf_t                            *p_umv_buf )
-{
-       struct ibv_ah *ah = (struct ibv_ah *)h_uvp_av;
-       struct ibv_ah_attr attr;
-       ib_api_status_t status;
-
-       UNREFERENCED_PARAMETER(p_umv_buf);
-       
-       CL_ASSERT (h_uvp_av);
-
-       status = __to_ah(ah->context->p_hca_attr, p_addr_vector, &attr);
-       if (status)
-               return status;
-
-       __set_av_params(to_mah(ah), ah->pd, &attr);
-       cl_memcpy(&ah->av_attr, p_addr_vector, sizeof(ib_av_attr_t));
-       
-       return IB_VERBS_PROCESSING_DONE;
-}
-
-ib_api_status_t
-mlx4_pre_destroy_ah (
-       IN              const   ib_av_handle_t                  h_uvp_av )
-{
-       struct ibv_ah *ah = (struct ibv_ah *)h_uvp_av;
-       
-       CL_ASSERT(ah);
-       
-       cl_free(to_mah(ah));
-       
-       return IB_VERBS_PROCESSING_DONE;
-}
-
-#ifdef XRC_SUPPORT
-ib_api_status_t  
-mlx4_pre_create_xrc_srq (
-       IN              const   ib_pd_handle_t                  h_uvp_pd,
-       IN              const   ib_xrcd_handle_t                        h_uvp_xrcd,
-       IN              const   ib_srq_attr_t                           *p_srq_attr,
-       IN      OUT                     ci_umv_buf_t                            *p_umv_buf,
-               OUT                     ib_srq_handle_t                 *ph_uvp_srq )
-{
-       struct mlx4_srq *srq;
-       struct ibv_create_srq *p_create_srq;
-       struct ibv_pd *pd = (struct ibv_pd *)h_uvp_pd;
-       struct ibv_xrc_domain *xrc_domain = (struct ibv_xrc_domain *)h_uvp_xrcd;
-       ib_api_status_t status = IB_SUCCESS;
-       size_t size = max( sizeof(struct ibv_create_srq), sizeof(struct ibv_create_srq_resp) );
-
-       CL_ASSERT(p_umv_buf);
-
-       if( !p_umv_buf->p_inout_buf )
-       {
-               p_umv_buf->p_inout_buf = cl_malloc( size ); 
-               if( !p_umv_buf->p_inout_buf )
-               {
-                       status = IB_INSUFFICIENT_MEMORY;
-                       goto err_memory;
-               }
-       }
-       p_umv_buf->input_size = sizeof(struct ibv_create_srq);
-       p_umv_buf->output_size = sizeof(struct ibv_create_srq_resp);
-       p_umv_buf->command = TRUE;
-
-       p_create_srq = p_umv_buf->p_inout_buf;
-       
-       // Mlx4 code:
-
-       /* Sanity check SRQ size before proceeding */
-       if (p_srq_attr->max_wr > 1 << 16 || p_srq_attr->max_sge > 64)
-       {
-               status = IB_INVALID_PARAMETER;
-               goto err_params;
-       }
-
-       srq = cl_malloc(sizeof *srq);
-       if (!srq) {
-               status = IB_INSUFFICIENT_MEMORY;
-               goto err_alloc_srq;
-       }
-
-       if (cl_spinlock_init(&srq->lock)) {
-               status = IB_INSUFFICIENT_MEMORY;
-               goto err_lock;
-       }
-
-       srq->ibv_srq.pd                 = pd;
-       srq->ibv_srq.context    = pd->context;
-       
-       srq->max        = __align_queue_size(p_srq_attr->max_wr + 1);
-       srq->max_gs  = p_srq_attr->max_sge;
-       srq->counter    = 0;
-
-       if (mlx4_alloc_srq_buf(pd, (struct ibv_srq_attr *)p_srq_attr, srq))
-       {
-               status = IB_INSUFFICIENT_MEMORY;
-               goto err_alloc_buf;
-       }
-
-       srq->db = mlx4_alloc_db(to_mctx(pd->context), MLX4_DB_TYPE_RQ);
-       if (!srq->db)
-               goto err_alloc_db;
-
-       *srq->db = 0;
-       
-       // fill the parameters for ioctl
-       p_create_srq->buf_addr = (uintptr_t) srq->buf.buf;
-       p_create_srq->db_addr  = (uintptr_t) srq->db;
-       p_create_srq->pd_handle = pd->handle;
-       p_create_srq->max_wr = p_srq_attr->max_wr;
-       p_create_srq->max_sge = p_srq_attr->max_sge;
-       p_create_srq->srq_limit = p_srq_attr->srq_limit;
-
-       *ph_uvp_srq = (ib_srq_handle_t)&srq->ibv_srq;
-       goto end;
-
-err_alloc_db:
-       cl_free(srq->wrid);
-       mlx4_free_buf(&srq->buf);
-err_alloc_buf:
-       cl_spinlock_destroy(&srq->lock);
-err_lock:
-       cl_free(srq);
-err_alloc_srq:
-       cl_free(p_umv_buf->p_inout_buf);
-err_params: err_memory:
-end:
-       return status;
-}
-
-ib_api_status_t  
-mlx4_post_create_xrc_srq (
-       IN              const   ib_pd_handle_t                  h_uvp_pd,
-       IN                              ib_api_status_t                 ioctl_status,
-       IN      OUT                     ib_srq_handle_t                 *ph_uvp_srq,
-       IN                              ci_umv_buf_t                            *p_umv_buf )
-{
-       struct mlx4_srq *srq = (struct mlx4_srq *)*ph_uvp_srq;
-       struct ibv_create_srq_resp *p_resp;
-       ib_api_status_t status = IB_SUCCESS;
-
-       UNREFERENCED_PARAMETER(h_uvp_pd);
-       
-       CL_ASSERT(p_umv_buf && p_umv_buf->p_inout_buf);
-       
-       p_resp = p_umv_buf->p_inout_buf;
-
-       if (IB_SUCCESS == ioctl_status)
-       {
-               // Mlx4 code:
-
-               srq->ibv_srq.xrc_srq_num        = srq->srqn = p_resp->srqn;
-               srq->ibv_srq.handle             = p_resp->srq_handle;
-
-               srq->max                = p_resp->max_wr;
-               srq->max_gs     = p_resp->max_sge;
-               
-               if (mlx4_store_xrc_srq(to_mctx(pd->context), srq->ibv_srq.xrc_srq_num, srq))
-               {
-                       mlx4_post_destroy_srq(*ph_uvp_srq, IB_SUCCESS);
-                       status = IB_INSUFFICIENT_MEMORY;
-               }       
-       }
-       else
-       {
-               mlx4_post_destroy_srq (*ph_uvp_srq, IB_SUCCESS);
-       }
-
-       cl_free( p_resp );
-       return status;
-}
-
-ib_api_status_t
-mlx4_pre_open_xrc_domain (
-       IN              const   ib_ca_handle_t                  h_uvp_ca,
-       IN              const   uint32_t                                        oflag,
-       IN      OUT             ci_umv_buf_t                            *p_umv_buf,
-               OUT                     ib_xrcd_handle_t                        *ph_uvp_xrcd )
-{
-       struct mlx4_xrc_domain *xrcd;
-       struct ibv_context * context = (struct ibv_context *)h_uvp_ca;
-       struct ibv_open_xrc_domain      *p_open_xrcd;
-       ib_api_status_t status = IB_SUCCESS;
-       int size = max( sizeof(struct ibv_open_xrc_domain), sizeof(struct ibv_open_xrc_domain_resp) );
-
-       CL_ASSERT(h_uvp_ca && p_umv_buf);
-
-       if( !p_umv_buf->p_inout_buf )
-       {
-               p_umv_buf->p_inout_buf = cl_malloc( size );
-               if( !p_umv_buf->p_inout_buf )
-               {
-                       status = IB_INSUFFICIENT_MEMORY;
-                       goto err_umv_buf;
-               }
-       }
-       p_umv_buf->input_size = sizeof(struct ibv_open_xrc_domain);
-       p_umv_buf->output_size = sizeof(struct ibv_open_xrc_domain_resp);
-       p_umv_buf->command = TRUE;
-
-       p_open_xrcd = p_umv_buf->p_inout_buf;
-
-       // Mlx4 code:
-
-       xrcd = cl_malloc(sizeof *xrcd);
-       if (!xrcd) {
-               status = IB_INSUFFICIENT_MEMORY;
-               goto err_xrc;
-       }
-
-       xrcd->ibv_xrcd.context = context;
-       
-       p_open_xrcd->oflags = oflag;
-
-       *ph_uvp_xrcd = (struct ibv_xrc_domain *)&xrcd->ibv_xrcd;
-       goto end;
-
-err_xrc:
-       cl_free(p_umv_buf->p_inout_buf);
-err_umv_buf:
-end:
-       return status;
-}
-
-void
-mlx4_post_open_xrc_domain (
-       IN              const   ib_ca_handle_t                  h_uvp_ca,
-       IN                              ib_api_status_t                 ioctl_status,
-       IN      OUT                     ib_xrcd_handle_t                        *ph_uvp_xrcd,
-       IN                              ci_umv_buf_t                            *p_umv_buf )
-{
-       struct ibv_xrc_domain *xrcd = (struct ibv_xrc_domain *)*ph_uvp_xrcd;
-       struct ibv_open_xrc_domain_resp *p_resp;
-
-       UNREFERENCED_PARAMETER(h_uvp_ca);
-       
-       CL_ASSERT(p_umv_buf && p_umv_buf->p_inout_buf);
-
-       p_resp = p_umv_buf->p_inout_buf;
-
-       if (IB_SUCCESS == ioctl_status)
-       {
-               // Mlx4 code:
-               
-               xrcd->handle = p_resp->xrcd_handle;
-               to_mxrcd(xrcd)->xrcdn = p_resp->xrcdn;
-       }
-       else
-       {
-               cl_free(to_mxrcd(xrcd));
-       }
-       
-       cl_free(p_resp);
-       return;
-}
-
-void
-mlx4_post_close_xrc_domain (
-       IN              const   ib_xrcd_handle_t                        h_uvp_xrcd,
-       IN                              ib_api_status_t                 ioctl_status )
-{
-       struct ibv_xrc_domain *xrdc = (struct ibv_xrc_domain *)h_uvp_xrcd;
-
-       CL_ASSERT(xrdc);
-
-       if (IB_SUCCESS == ioctl_status) {
-               cl_free(to_mxrcd(xrdc));
-       }
-}
-#endif
+/*\r
+ * Copyright (c) 2007 Cisco, Inc.  All rights reserved.\r
+ *\r
+ * This software is available to you under a choice of one of two\r
+ * licenses.  You may choose to be licensed under the terms of the GNU\r
+ * General Public License (GPL) Version 2, available from the file\r
+ * COPYING in the main directory of this source tree, or the\r
+ * OpenIB.org BSD license below:\r
+ *\r
+ *     Redistribution and use in source and binary forms, with or\r
+ *     without modification, are permitted provided that the following\r
+ *     conditions are met:\r
+ *\r
+ *      - Redistributions of source code must retain the above\r
+ *        copyright notice, this list of conditions and the following\r
+ *        disclaimer.\r
+ *\r
+ *      - Redistributions in binary form must reproduce the above\r
+ *        copyright notice, this list of conditions and the following\r
+ *        disclaimer in the documentation and/or other materials\r
+ *        provided with the distribution.\r
+ *\r
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
+ * SOFTWARE.\r
+ */\r
+\r
+#include "mlx4.h"\r
+#include "verbs.h"\r
+#include "mx_abi.h"\r
+#include "wqe.h"\r
+#include "mlx4_debug.h"\r
+\r
+#if defined(EVENT_TRACING)\r
+#include "verbs.tmh"\r
+#endif\r
+\r
+ib_api_status_t\r
+mlx4_pre_open_ca (\r
+       IN              const   ib_net64_t                              ca_guid,\r
+       IN      OUT                     ci_umv_buf_t                            *p_umv_buf,\r
+               OUT                     ib_ca_handle_t                  *ph_uvp_ca )\r
+{\r
+       struct ibv_context      *context;\r
+       ib_api_status_t status = IB_SUCCESS;\r
+\r
+       UNREFERENCED_PARAMETER(ca_guid);\r
+\r
+       context = mlx4_alloc_context();\r
+       if (!context) {\r
+               status = IB_INSUFFICIENT_MEMORY;                \r
+               goto end;\r
+       }\r
+       \r
+       if( p_umv_buf )\r
+       {\r
+               if( !p_umv_buf->p_inout_buf )\r
+               {\r
+                       p_umv_buf->p_inout_buf = cl_zalloc( sizeof(struct ibv_get_context_resp) );\r
+                       if( !p_umv_buf->p_inout_buf )\r
+                       {\r
+                               status = IB_INSUFFICIENT_MEMORY;\r
+                               goto end;\r
+                       }\r
+               }\r
+               p_umv_buf->input_size = 0;\r
+               p_umv_buf->output_size = sizeof(struct ibv_get_context_resp);\r
+               p_umv_buf->command = TRUE;\r
+       }\r
+\r
+       *ph_uvp_ca = (ib_ca_handle_t)context;\r
+\r
+end:   \r
+       return status;\r
+}\r
+\r
+ib_api_status_t\r
+mlx4_post_open_ca (\r
+       IN              const   ib_net64_t                              ca_guid,\r
+       IN                              ib_api_status_t                 ioctl_status,\r
+       IN      OUT                     ib_ca_handle_t                  *ph_uvp_ca,\r
+       IN                              ci_umv_buf_t                            *p_umv_buf )\r
+{\r
+       struct ibv_get_context_resp *p_resp;\r
+       struct ibv_context *context = (struct ibv_context *)*ph_uvp_ca;\r
+       ib_api_status_t status = IB_SUCCESS;\r
+\r
+       UNREFERENCED_PARAMETER(ca_guid);\r
+\r
+       CL_ASSERT(p_umv_buf && p_umv_buf->p_inout_buf);\r
+       \r
+       p_resp = p_umv_buf->p_inout_buf;\r
+\r
+       if (IB_SUCCESS == ioctl_status)\r
+       {\r
+               if (!mlx4_fill_context(context, p_resp))\r
+               {\r
+                       status = IB_INSUFFICIENT_RESOURCES;\r
+                       goto end;\r
+               }\r
+       }\r
+\r
+end:\r
+       cl_free(p_resp);\r
+       return status;\r
+}\r
+\r
+ib_api_status_t\r
+mlx4_pre_query_ca (\r
+       IN                              ib_ca_handle_t                  h_uvp_ca,\r
+       IN                              ib_ca_attr_t                            *p_ca_attr,\r
+       IN                              size_t                                  byte_count,\r
+       IN                              ci_umv_buf_t                            *p_umv_buf )\r
+{\r
+       ib_api_status_t status = IB_SUCCESS;\r
+\r
+       UNREFERENCED_PARAMETER(h_uvp_ca);\r
+\r
+       /* Note that query_ca calls *always* get their attributes from the kernel.\r
+        *\r
+        * Assume if user buffer is valid then byte_cnt is valid too \r
+        * so we can preallocate ca attr buffer for post ioctl data saving\r
+        *\r
+        * Note that we squirrel the buffer away into the umv_buf and only\r
+        * set it into the HCA if the query is successful.\r
+        */\r
+       if ( p_ca_attr != NULL )\r
+       {\r
+               p_umv_buf->p_inout_buf = cl_malloc(byte_count);\r
+               if ( !p_umv_buf->p_inout_buf )\r
+               {\r
+                       status = IB_INSUFFICIENT_RESOURCES;\r
+                       goto end;\r
+               }\r
+       }\r
+\r
+end:\r
+       return status;\r
+}\r
+\r
+void\r
+__fixup_ca_attr(\r
+       IN                              ib_ca_attr_t* const                     p_dest,\r
+       IN              const   ib_ca_attr_t* const                     p_src )\r
+{\r
+       uint8_t         i;\r
+       uintn_t         offset = (uintn_t)p_dest - (uintn_t)p_src;\r
+       ib_port_attr_t                  *p_tmp_port_attr = NULL;\r
+\r
+       CL_ASSERT( p_dest );\r
+       CL_ASSERT( p_src );\r
+\r
+       /* Fix up the pointers to point within the destination buffer. */\r
+       p_dest->p_page_size =\r
+               (uint32_t*)(((uint8_t*)p_dest->p_page_size) + offset);\r
+\r
+       p_tmp_port_attr =\r
+               (ib_port_attr_t*)(((uint8_t*)p_dest->p_port_attr) + offset);\r
+\r
+       /* Fix up each port attribute's gid and pkey table pointers. */\r
+       for( i = 0; i < p_dest->num_ports; i++ )\r
+       {\r
+               p_tmp_port_attr[i].p_gid_table = (ib_gid_t*)\r
+                       (((uint8_t*)p_tmp_port_attr[i].p_gid_table) + offset);\r
+\r
+               p_tmp_port_attr[i].p_pkey_table =(ib_net16_t*)\r
+                       (((uint8_t*)p_tmp_port_attr[i].p_pkey_table) + offset);\r
+       }\r
+       p_dest->p_port_attr = p_tmp_port_attr;\r
+}\r
+\r
+void\r
+mlx4_post_query_ca (\r
+       IN                              ib_ca_handle_t                  h_uvp_ca,\r
+       IN                              ib_api_status_t                 ioctl_status,\r
+       IN                              ib_ca_attr_t                            *p_ca_attr,\r
+       IN                              size_t                                  byte_count,\r
+       IN                              ci_umv_buf_t                            *p_umv_buf )\r
+{\r
+       struct ibv_context *context = (struct ibv_context *)h_uvp_ca;\r
+       \r
+       CL_ASSERT(context && p_umv_buf);\r
+\r
+       if ( ioctl_status == IB_SUCCESS && p_ca_attr && byte_count)\r
+       {\r
+               CL_ASSERT( byte_count >= p_ca_attr->size );\r
+\r
+               pthread_mutex_lock(&context->mutex);\r
+\r
+               if (context->p_hca_attr)\r
+                       cl_free(context->p_hca_attr);\r
+               context->p_hca_attr = p_umv_buf->p_inout_buf;\r
+               cl_memcpy( context->p_hca_attr, p_ca_attr, p_ca_attr->size );\r
+               __fixup_ca_attr( context->p_hca_attr, p_ca_attr );\r
+               \r
+               pthread_mutex_unlock(&context->mutex);\r
+       }\r
+       else if (p_umv_buf->p_inout_buf) \r
+       {\r
+               cl_free(p_umv_buf->p_inout_buf);\r
+       }\r
+}\r
+\r
+ib_api_status_t\r
+mlx4_post_close_ca (\r
+       IN                      ib_ca_handle_t                          h_uvp_ca,\r
+       IN                      ib_api_status_t                         ioctl_status )\r
+{\r
+       struct ibv_context *context = (struct ibv_context *)h_uvp_ca;\r
+\r
+       CL_ASSERT(context);\r
+\r
+       if (IB_SUCCESS == ioctl_status)\r
+               mlx4_free_context(context);\r
+\r
+       return IB_SUCCESS;\r
+}\r
+\r
+ib_api_status_t\r
+mlx4_pre_alloc_pd (\r
+       IN              const   ib_ca_handle_t                  h_uvp_ca,\r
+       IN      OUT                     ci_umv_buf_t                            *p_umv_buf,\r
+               OUT                     ib_pd_handle_t                  *ph_uvp_pd )\r
+{\r
+       struct mlx4_pd *pd;\r
+       struct ibv_context *context = (struct ibv_context *)h_uvp_ca;\r
+       ib_api_status_t status = IB_SUCCESS;\r
+\r
+       CL_ASSERT(context && p_umv_buf);\r
+\r
+       if( !p_umv_buf->p_inout_buf )\r
+       {\r
+               p_umv_buf->p_inout_buf = cl_malloc( sizeof(struct ibv_alloc_pd_resp) );\r
+               if( !p_umv_buf->p_inout_buf )\r
+               {\r
+                       status = IB_INSUFFICIENT_MEMORY;\r
+                       goto end;\r
+               }\r
+       }\r
+       p_umv_buf->input_size = 0;\r
+       p_umv_buf->output_size = sizeof(struct ibv_alloc_pd_resp);\r
+       p_umv_buf->command = TRUE;\r
+\r
+       // Mlx4 code:\r
+\r
+       pd = cl_malloc(sizeof *pd);\r
+       if (!pd) {\r
+               status = IB_INSUFFICIENT_MEMORY;                \r
+               goto end;\r
+       }\r
+\r
+       pd->ibv_pd.context = context;\r
+\r
+       *ph_uvp_pd = (ib_pd_handle_t)&pd->ibv_pd;\r
+       \r
+end:\r
+       return status;\r
+}\r
+\r
+void\r
+mlx4_post_alloc_pd (\r
+       IN                              ib_ca_handle_t                  h_uvp_ca,\r
+       IN                              ib_api_status_t                 ioctl_status,\r
+       IN      OUT                     ib_pd_handle_t                  *ph_uvp_pd,\r
+       IN                              ci_umv_buf_t                            *p_umv_buf )\r
+{\r
+       struct ibv_pd                   *pd = (struct ibv_pd *)*ph_uvp_pd;\r
+       struct ibv_alloc_pd_resp        *p_resp;\r
+\r
+\r
+       UNREFERENCED_PARAMETER(h_uvp_ca);\r
+       \r
+       CL_ASSERT(p_umv_buf && p_umv_buf->p_inout_buf);\r
+\r
+       p_resp = p_umv_buf->p_inout_buf;\r
+\r
+       if (IB_SUCCESS == ioctl_status)\r
+       {\r
+               // Mlx4 code:\r
+               \r
+               pd->handle = p_resp->pd_handle;\r
+               to_mpd(pd)->pdn = p_resp->pdn;\r
+       }\r
+       else\r
+       {\r
+               cl_free(to_mpd(pd));\r
+       }\r
+       \r
+       cl_free(p_resp);\r
+       return;\r
+}\r
+\r
+void\r
+mlx4_post_free_pd (\r
+       IN              const   ib_pd_handle_t                  h_uvp_pd,\r
+       IN                              ib_api_status_t                 ioctl_status )\r
+{\r
+       struct ibv_pd *pd = (struct ibv_pd *)h_uvp_pd;\r
+\r
+       CL_ASSERT(pd);\r
+\r
+       if (IB_SUCCESS == ioctl_status)\r
+               cl_free(to_mpd(pd));\r
+}\r
+\r
+static int __align_queue_size(int req)\r
+{\r
+       int nent;\r
+\r
+       for (nent = 1; nent < req; nent <<= 1)\r
+               ; /* nothing */\r
+\r
+       return nent;\r
+}\r
+\r
+ib_api_status_t\r
+mlx4_pre_create_cq (\r
+       IN              const   ib_ca_handle_t                  h_uvp_ca,\r
+       IN      OUT             uint32_t* const                 p_size,\r
+       IN      OUT             ci_umv_buf_t                            *p_umv_buf,\r
+               OUT                     ib_cq_handle_t                  *ph_uvp_cq )\r
+{\r
+       struct mlx4_cq          *cq;\r
+       struct ibv_create_cq    *p_create_cq;\r
+       struct ibv_context              *context = (struct ibv_context *)h_uvp_ca;\r
+       ib_api_status_t         status = IB_SUCCESS;\r
+       int size = max( sizeof(struct ibv_create_cq), sizeof(struct ibv_create_cq_resp) );\r
+\r
+       CL_ASSERT(h_uvp_ca && p_umv_buf);\r
+\r
+       if( !p_umv_buf->p_inout_buf )\r
+       {\r
+               p_umv_buf->p_inout_buf = cl_malloc( size );\r
+               if( !p_umv_buf->p_inout_buf )\r
+               {\r
+                       status = IB_INSUFFICIENT_MEMORY;\r
+                       goto err_umv_buf;\r
+               }\r
+       }\r
+       p_umv_buf->input_size = sizeof(struct ibv_create_cq);\r
+       p_umv_buf->output_size = sizeof(struct ibv_create_cq_resp);\r
+       p_umv_buf->command = TRUE;\r
+\r
+       p_create_cq = p_umv_buf->p_inout_buf;\r
+\r
+       // Mlx4 code:\r
+       \r
+       /* Sanity check CQ size before proceeding */\r
+       if (*p_size > 0x3fffff) {\r
+               status = IB_INVALID_CQ_SIZE;\r
+               goto err_cqe_size;\r
+       }\r
+\r
+       cq = cl_malloc(sizeof *cq);\r
+       if (!cq) {\r
+               status = IB_INSUFFICIENT_MEMORY;\r
+               goto err_cq;\r
+       }\r
+\r
+       if (cl_spinlock_init(&cq->lock)) {\r
+               status = IB_INSUFFICIENT_MEMORY;\r
+               goto err_lock;\r
+       }\r
+\r
+       *p_size = __align_queue_size(*p_size + 1);\r
+\r
+       if (mlx4_alloc_buf(&cq->buf, *p_size * MLX4_CQ_ENTRY_SIZE, \r
+                                               context->page_size))\r
+               goto err_alloc_buf;\r
+\r
+       cq->ibv_cq.context = context;\r
+       cq->cons_index = 0;\r
+               \r
+       cq->set_ci_db  = mlx4_alloc_db(to_mctx(context), MLX4_DB_TYPE_CQ);\r
+       if (!cq->set_ci_db)\r
+               goto err_alloc_db;\r
+\r
+       cq->arm_db = cq->set_ci_db + 1;\r
+       *cq->arm_db = 0;\r
+       cq->arm_sn = 1;\r
+       *cq->set_ci_db = 0;\r
+\r
+       p_create_cq->buf_addr = (uintptr_t) cq->buf.buf;\r
+       p_create_cq->db_addr  = (uintptr_t) cq->set_ci_db;\r
+       p_create_cq->arm_sn_addr  = (uintptr_t) &cq->arm_sn;\r
+       p_create_cq->cqe = --(*p_size);\r
+\r
+       *ph_uvp_cq = (ib_cq_handle_t)&cq->ibv_cq;\r
+       goto end;\r
+\r
+err_alloc_db:\r
+       mlx4_free_buf(&cq->buf);\r
+err_alloc_buf:\r
+       cl_spinlock_destroy(&cq->lock);\r
+err_lock:\r
+       cl_free(cq);\r
+err_cq:\r
+err_cqe_size:\r
+       cl_free(p_umv_buf->p_inout_buf);\r
+err_umv_buf:\r
+end:\r
+       return status;\r
+}\r
+\r
+void\r
+mlx4_post_create_cq (\r
+       IN              const   ib_ca_handle_t                  h_uvp_ca,\r
+       IN                              ib_api_status_t                 ioctl_status,\r
+       IN              const   uint32_t                                        size,\r
+       IN      OUT                     ib_cq_handle_t                  *ph_uvp_cq,\r
+       IN                              ci_umv_buf_t                            *p_umv_buf )\r
+{\r
+       struct ibv_cq                           *cq = (struct ibv_cq *)*ph_uvp_cq;\r
+       struct ibv_create_cq_resp       *p_resp;\r
+\r
+       UNREFERENCED_PARAMETER(h_uvp_ca);\r
+       UNREFERENCED_PARAMETER(size);\r
+       \r
+       CL_ASSERT(p_umv_buf && p_umv_buf->p_inout_buf);\r
+\r
+       p_resp = p_umv_buf->p_inout_buf;\r
+\r
+       if (IB_SUCCESS == ioctl_status)\r
+       {\r
+               // Mlx4 code:\r
+               \r
+               to_mcq(cq)->cqn = p_resp->cqn;\r
+               cq->cqe                 = p_resp->cqe;\r
+               cq->handle              = p_resp->cq_handle;\r
+       }\r
+       else\r
+       {\r
+               mlx4_post_destroy_cq (*ph_uvp_cq, IB_SUCCESS);\r
+       }\r
+       \r
+       cl_free(p_resp);\r
+       return;\r
+}\r
+\r
+ib_api_status_t\r
+mlx4_pre_query_cq (\r
+       IN              const   ib_cq_handle_t                  h_uvp_cq,\r
+               OUT                     uint32_t* const                 p_size,\r
+       IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
+{\r
+       struct ibv_cq *cq = (struct ibv_cq *)h_uvp_cq;\r
+\r
+       UNREFERENCED_PARAMETER(p_umv_buf);\r
+       \r
+       *p_size = cq->cqe;\r
+\r
+       return IB_VERBS_PROCESSING_DONE;\r
+}\r
+\r
+void\r
+mlx4_post_destroy_cq (\r
+       IN              const   ib_cq_handle_t                  h_uvp_cq,\r
+       IN                              ib_api_status_t                 ioctl_status )\r
+{\r
+       struct ibv_cq *cq = (struct ibv_cq *)h_uvp_cq;\r
+\r
+       CL_ASSERT(cq);\r
+\r
+       if (IB_SUCCESS == ioctl_status) {\r
+               mlx4_free_db(to_mctx(cq->context), MLX4_DB_TYPE_CQ, to_mcq(cq)->set_ci_db);\r
+               mlx4_free_buf(&to_mcq(cq)->buf);\r
+\r
+               cl_spinlock_destroy(&to_mcq(cq)->lock);\r
+               cl_free(to_mcq(cq));\r
+       }\r
+}\r
+\r
+ib_api_status_t  \r
+mlx4_pre_create_srq (\r
+       IN              const   ib_pd_handle_t                  h_uvp_pd,\r
+       IN              const   ib_srq_attr_t                           *p_srq_attr,\r
+       IN      OUT                     ci_umv_buf_t                            *p_umv_buf,\r
+               OUT                     ib_srq_handle_t                 *ph_uvp_srq )\r
+{\r
+       struct mlx4_srq *srq;\r
+       struct ibv_create_srq *p_create_srq;\r
+       struct ibv_pd *pd = (struct ibv_pd *)h_uvp_pd;\r
+       ib_api_status_t status = IB_SUCCESS;\r
+       size_t size = max( sizeof(struct ibv_create_srq), sizeof(struct ibv_create_srq_resp) );\r
+\r
+       CL_ASSERT(p_umv_buf);\r
+\r
+       if( !p_umv_buf->p_inout_buf )\r
+       {\r
+               p_umv_buf->p_inout_buf = cl_malloc( size ); \r
+               if( !p_umv_buf->p_inout_buf )\r
+               {\r
+                       status = IB_INSUFFICIENT_MEMORY;\r
+                       goto err_memory;\r
+               }\r
+       }\r
+       p_umv_buf->input_size = sizeof(struct ibv_create_srq);\r
+       p_umv_buf->output_size = sizeof(struct ibv_create_srq_resp);\r
+       p_umv_buf->command = TRUE;\r
+\r
+       p_create_srq = p_umv_buf->p_inout_buf;\r
+       \r
+       // Mlx4 code:\r
+\r
+       /* Sanity check SRQ size before proceeding */\r
+       if (p_srq_attr->max_wr > 1 << 16 || p_srq_attr->max_sge > 64)\r
+       {\r
+               status = IB_INVALID_PARAMETER;\r
+               goto err_params;\r
+       }\r
+\r
+       srq = cl_malloc(sizeof *srq);\r
+       if (!srq) {\r
+               status = IB_INSUFFICIENT_MEMORY;\r
+               goto err_alloc_srq;\r
+       }\r
+\r
+       if (cl_spinlock_init(&srq->lock)) {\r
+               status = IB_INSUFFICIENT_MEMORY;\r
+               goto err_lock;\r
+       }\r
+\r
+       srq->ibv_srq.pd                 = pd;\r
+       srq->ibv_srq.context    = pd->context;\r
+       \r
+       srq->max        = __align_queue_size(p_srq_attr->max_wr + 1);\r
+       srq->max_gs  = p_srq_attr->max_sge;\r
+       srq->counter    = 0;\r
+\r
+       if (mlx4_alloc_srq_buf(pd, (struct ibv_srq_attr *)p_srq_attr, srq))\r
+       {\r
+               status = IB_INSUFFICIENT_MEMORY;\r
+               goto err_alloc_buf;\r
+       }\r
+\r
+       srq->db = mlx4_alloc_db(to_mctx(pd->context), MLX4_DB_TYPE_RQ);\r
+       if (!srq->db)\r
+               goto err_alloc_db;\r
+\r
+       *srq->db = 0;\r
+       \r
+       // fill the parameters for ioctl\r
+       p_create_srq->buf_addr = (uintptr_t) srq->buf.buf;\r
+       p_create_srq->db_addr  = (uintptr_t) srq->db;\r
+       p_create_srq->pd_handle = pd->handle;\r
+       p_create_srq->max_wr = p_srq_attr->max_wr;\r
+       p_create_srq->max_sge = p_srq_attr->max_sge;\r
+       p_create_srq->srq_limit = p_srq_attr->srq_limit;\r
+\r
+       *ph_uvp_srq = (ib_srq_handle_t)&srq->ibv_srq;\r
+       goto end;\r
+\r
+err_alloc_db:\r
+       cl_free(srq->wrid);\r
+       mlx4_free_buf(&srq->buf);\r
+err_alloc_buf:\r
+       cl_spinlock_destroy(&srq->lock);\r
+err_lock:\r
+       cl_free(srq);\r
+err_alloc_srq:\r
+       cl_free(p_umv_buf->p_inout_buf);\r
+err_params: err_memory:\r
+end:\r
+       return status;\r
+}\r
+\r
+void\r
+mlx4_post_create_srq (\r
+       IN              const   ib_pd_handle_t                  h_uvp_pd,\r
+       IN                              ib_api_status_t                 ioctl_status,\r
+       IN      OUT                     ib_srq_handle_t                 *ph_uvp_srq,\r
+       IN                              ci_umv_buf_t                            *p_umv_buf )\r
+{\r
+       struct ibv_srq *ibsrq = (struct ibv_srq *)*ph_uvp_srq;\r
+       struct mlx4_srq *srq = to_msrq(ibsrq);\r
+       struct ibv_create_srq_resp *p_resp;\r
+\r
+       UNREFERENCED_PARAMETER(h_uvp_pd);\r
+       \r
+       CL_ASSERT(p_umv_buf && p_umv_buf->p_inout_buf);\r
+       \r
+       p_resp = p_umv_buf->p_inout_buf;\r
+\r
+       if (IB_SUCCESS == ioctl_status)\r
+       {\r
+               // Mlx4 code:\r
+\r
+               srq->srqn       = p_resp->srqn;\r
+               ibsrq->handle   = p_resp->srq_handle;\r
+               \r
+               srq->max                = p_resp->max_wr;\r
+               srq->max_gs     = p_resp->max_sge;\r
+       }\r
+       else\r
+       {\r
+               mlx4_post_destroy_srq (*ph_uvp_srq, IB_SUCCESS);\r
+       }\r
+\r
+       cl_free(p_resp);\r
+       return;\r
+}\r
+\r
+ib_api_status_t\r
+mlx4_pre_destroy_srq (\r
+       IN              const   ib_srq_handle_t                 h_uvp_srq )\r
+{\r
+#ifdef XRC_SUPPORT\r
+       struct ibv_srq *ibsrq = (struct ibv_srq *)h_uvp_srq;\r
+       struct mlx4_srq *srq = to_msrq(ibsrq);\r
+       struct mlx4_cq *mcq = NULL;\r
+       \r
+       if (ibsrq->xrc_cq)\r
+       {\r
+               /* is an xrc_srq */\r
+               mcq = to_mcq(ibsrq->xrc_cq);\r
+               mlx4_cq_clean(mcq, 0, srq);\r
+               cl_spinlock_acquire(&mcq->lock);\r
+               mlx4_clear_xrc_srq(to_mctx(ibsrq->context), srq->srqn);\r
+               cl_spinlock_release(&mcq->lock);\r
+       }\r
+#else\r
+       UNUSED_PARAM(h_uvp_srq);\r
+#endif \r
+       return IB_SUCCESS;\r
+}\r
+\r
+void\r
+mlx4_post_destroy_srq (\r
+       IN              const   ib_srq_handle_t                 h_uvp_srq,\r
+       IN                              ib_api_status_t                 ioctl_status )\r
+{\r
+       struct ibv_srq          *ibsrq = (struct ibv_srq *)h_uvp_srq;\r
+       struct mlx4_srq *srq = to_msrq(ibsrq);\r
+       \r
+       CL_ASSERT(srq);\r
+\r
+       if (IB_SUCCESS == ioctl_status)\r
+       {\r
+               mlx4_free_db(to_mctx(ibsrq->context), MLX4_DB_TYPE_RQ, srq->db);\r
+               cl_free(srq->wrid);\r
+               mlx4_free_buf(&srq->buf);\r
+               cl_spinlock_destroy(&srq->lock);\r
+               cl_free(srq);\r
+       }\r
+       else\r
+       {\r
+#ifdef XRC_SUPPORT\r
+               if (ibsrq->xrc_cq) {\r
+                       /* is an xrc_srq */\r
+                       struct mlx4_cq  *mcq = to_mcq(ibsrq->xrc_cq);\r
+                       cl_spinlock_acquire(&mcq->lock);\r
+                       mlx4_store_xrc_srq(to_mctx(ibsrq->context), srq->srqn, srq);\r
+                       cl_spinlock_release(&mcq->lock);\r
+               }\r
+#endif         \r
+       }\r
+}\r
+\r
+static enum ibv_qp_type\r
+__to_qp_type(ib_qp_type_t type)\r
+{\r
+       switch (type) {\r
+       case IB_QPT_RELIABLE_CONN: return IBV_QPT_RC;\r
+       case IB_QPT_UNRELIABLE_CONN: return IBV_QPT_UC;\r
+       case IB_QPT_UNRELIABLE_DGRM: return IBV_QPT_UD;\r
+#ifdef XRC_SUPPORT\r
+       //case IB_QPT_XRC_CONN: return IBV_QPT_XRC;\r
+#endif \r
+       default: return IBV_QPT_RC;\r
+       }\r
+}\r
+\r
+ib_api_status_t\r
+mlx4_pre_create_qp (\r
+       IN              const   ib_pd_handle_t                  h_uvp_pd,\r
+       IN              const   ib_qp_create_t                  *p_create_attr,\r
+       IN      OUT                     ci_umv_buf_t                            *p_umv_buf,\r
+               OUT                     ib_qp_handle_t                  *ph_uvp_qp )\r
+{\r
+       struct ibv_pd                   *pd = (struct ibv_pd *)h_uvp_pd;\r
+       struct mlx4_context     *context = to_mctx(pd->context);\r
+       struct mlx4_qp          *qp;\r
+       struct ibv_create_qp    *p_create_qp;\r
+       struct ibv_qp_init_attr attr;\r
+       ib_api_status_t                 status = IB_SUCCESS;\r
+       int size = max( sizeof(struct ibv_create_qp), sizeof(struct ibv_create_qp_resp) );\r
+\r
+       CL_ASSERT(p_umv_buf);\r
+\r
+       if( !p_umv_buf->p_inout_buf )\r
+       {\r
+               p_umv_buf->p_inout_buf = cl_malloc(size);\r
+               if( !p_umv_buf->p_inout_buf )\r
+               {\r
+                       status = IB_INSUFFICIENT_MEMORY;\r
+                       goto err_memory;\r
+               }\r
+       }\r
+       p_umv_buf->input_size = sizeof(struct ibv_create_qp);\r
+       p_umv_buf->output_size = sizeof(struct ibv_create_qp_resp);\r
+       p_umv_buf->command = TRUE;\r
+\r
+       p_create_qp = p_umv_buf->p_inout_buf;\r
+       \r
+       /* convert attributes */\r
+       memset( &attr, 0, sizeof(attr) );\r
+       attr.send_cq                            = (struct ibv_cq *)p_create_attr->h_sq_cq;\r
+       attr.recv_cq                            = (struct ibv_cq *)p_create_attr->h_rq_cq;\r
+       attr.srq                                        = (struct ibv_srq*)p_create_attr->h_srq;\r
+       attr.cap.max_send_wr            = p_create_attr->sq_depth;\r
+       attr.cap.max_recv_wr            = p_create_attr->rq_depth;\r
+       attr.cap.max_send_sge           = p_create_attr->sq_sge;\r
+       attr.cap.max_recv_sge           = p_create_attr->rq_sge;\r
+       attr.cap.max_inline_data        = p_create_attr->sq_max_inline;\r
+       attr.qp_type                            = __to_qp_type(p_create_attr->qp_type);\r
+       attr.sq_sig_all                         = p_create_attr->sq_signaled;\r
+\r
+       // Mlx4 code:\r
+       \r
+       /* Sanity check QP size before proceeding */\r
+       if (attr.cap.max_send_wr    > (uint32_t) context->max_qp_wr ||\r
+           attr.cap.max_recv_wr     > (uint32_t) context->max_qp_wr ||\r
+           attr.cap.max_send_sge   > (uint32_t) context->max_sge   ||\r
+           attr.cap.max_recv_sge   > (uint32_t) context->max_sge   ||\r
+           attr.cap.max_inline_data > 1024)\r
+       {\r
+               status = IB_INVALID_PARAMETER;\r
+               goto end;\r
+       }\r
+\r
+       qp = cl_malloc(sizeof *qp);\r
+       if (!qp) {\r
+               status = IB_INSUFFICIENT_MEMORY;\r
+               goto err_alloc_qp;\r
+       }\r
+\r
+       mlx4_calc_sq_wqe_size(&attr.cap, attr.qp_type, qp);\r
+\r
+       /*\r
+        * We need to leave 2 KB + 1 WQE of headroom in the SQ to\r
+        * allow HW to prefetch.\r
+        */\r
+       qp->sq_spare_wqes = (2048 >> qp->sq.wqe_shift) + 1;\r
+       qp->sq.wqe_cnt = __align_queue_size(attr.cap.max_send_wr + qp->sq_spare_wqes);\r
+       qp->rq.wqe_cnt = __align_queue_size(attr.cap.max_recv_wr);\r
+\r
+       if (attr.srq || attr.qp_type == IBV_QPT_XRC)\r
+               attr.cap.max_recv_wr = qp->rq.wqe_cnt = 0;\r
+       else \r
+       {\r
+               if (attr.cap.max_recv_sge < 1)\r
+                       attr.cap.max_recv_sge = 1;\r
+               if (attr.cap.max_recv_wr < 1)\r
+                       attr.cap.max_recv_wr = 1;\r
+       }\r
+\r
+       if (mlx4_alloc_qp_buf(pd, &attr.cap, attr.qp_type, qp))\r
+               goto err_alloc_qp_buff;\r
+\r
+       mlx4_init_qp_indices(qp);\r
+\r
+       if (cl_spinlock_init(&qp->sq.lock)) {\r
+               status = IB_INSUFFICIENT_MEMORY;\r
+               goto err_spinlock_sq;\r
+       }\r
+       if (cl_spinlock_init(&qp->rq.lock)) {\r
+               status = IB_INSUFFICIENT_MEMORY;\r
+               goto err_spinlock_rq;\r
+       }\r
+\r
+       // fill qp fields\r
+       if (!attr.srq && attr.qp_type != IBV_QPT_XRC) {\r
+               qp->db = mlx4_alloc_db(context, MLX4_DB_TYPE_RQ);\r
+               if (!qp->db) {\r
+                       status = IB_INSUFFICIENT_MEMORY;\r
+                       goto err_db;\r
+               }\r
+\r
+               *qp->db = 0;\r
+       }\r
+       if (attr.sq_sig_all)\r
+               qp->sq_signal_bits = cl_hton32(MLX4_WQE_CTRL_CQ_UPDATE);\r
+       else\r
+               qp->sq_signal_bits = 0;\r
+\r
+       // fill the rest of qp fields\r
+       qp->ibv_qp.pd = pd;\r
+       qp->ibv_qp.context= pd->context;\r
+       qp->ibv_qp.send_cq = attr.send_cq;\r
+       qp->ibv_qp.recv_cq = attr.recv_cq;\r
+       qp->ibv_qp.srq = attr.srq;\r
+       qp->ibv_qp.state = IBV_QPS_RESET;\r
+       qp->ibv_qp.qp_type = attr.qp_type;\r
+\r
+       // fill request fields\r
+       p_create_qp->buf_addr = (uintptr_t) qp->buf.buf;\r
+       if (!attr.srq && attr.qp_type != IBV_QPT_XRC)\r
+               p_create_qp->db_addr = (uintptr_t) qp->db;\r
+       else\r
+               p_create_qp->db_addr = 0;\r
+\r
+       p_create_qp->pd_handle = pd->handle;\r
+       p_create_qp->send_cq_handle = attr.send_cq->handle;\r
+       p_create_qp->recv_cq_handle = attr.recv_cq->handle;\r
+       p_create_qp->srq_handle = attr.qp_type == IBV_QPT_XRC ?\r
+               (attr.xrc_domain ? attr.xrc_domain->handle : 0) :\r
+               (attr.srq ? attr.srq->handle : 0);\r
+\r
+       p_create_qp->max_send_wr = attr.cap.max_send_wr;\r
+       p_create_qp->max_recv_wr = attr.cap.max_recv_wr;\r
+       p_create_qp->max_send_sge = attr.cap.max_send_sge;\r
+       p_create_qp->max_recv_sge = attr.cap.max_recv_sge;\r
+       p_create_qp->max_inline_data = attr.cap.max_inline_data;\r
+       p_create_qp->sq_sig_all = (uint8_t)attr.sq_sig_all;\r
+       p_create_qp->qp_type = attr.qp_type;\r
+       p_create_qp->is_srq = (uint8_t)(attr.qp_type == IBV_QPT_XRC ?\r
+                                                                       !!attr.xrc_domain : !!attr.srq);\r
+\r
+       p_create_qp->log_sq_stride   = (uint8_t)qp->sq.wqe_shift;\r
+       for (p_create_qp->log_sq_bb_count = 0;\r
+            qp->sq.wqe_cnt > 1 << p_create_qp->log_sq_bb_count;\r
+            ++p_create_qp->log_sq_bb_count)\r
+               ; /* nothing */\r
+       p_create_qp->sq_no_prefetch = 0;\r
+\r
+       *ph_uvp_qp = (ib_qp_handle_t)&qp->ibv_qp;\r
+       goto end;\r
+\r
+err_db:\r
+       cl_spinlock_destroy(&qp->rq.lock);\r
+err_spinlock_rq:\r
+       cl_spinlock_destroy(&qp->sq.lock);\r
+err_spinlock_sq:\r
+       cl_free(qp->sq.wrid);\r
+       if (qp->rq.wqe_cnt)\r
+               free(qp->rq.wrid);\r
+       mlx4_free_buf(&qp->buf);\r
+err_alloc_qp_buff:\r
+       cl_free(qp);    \r
+err_alloc_qp:\r
+       cl_free(p_umv_buf->p_inout_buf);\r
+err_memory:\r
+end:\r
+       return status;\r
+}\r
+\r
+ib_api_status_t\r
+mlx4_post_create_qp (\r
+       IN              const   ib_pd_handle_t                  h_uvp_pd,\r
+       IN                              ib_api_status_t                         ioctl_status,\r
+       IN      OUT             ib_qp_handle_t                  *ph_uvp_qp,\r
+       IN                              ci_umv_buf_t                            *p_umv_buf )\r
+{\r
+       struct mlx4_qp                  *qp = (struct mlx4_qp *)*ph_uvp_qp;\r
+       struct ibv_pd                   *pd = (struct ibv_pd *)h_uvp_pd;\r
+       struct ibv_context                      *context = pd->context;\r
+       struct ibv_create_qp_resp       *p_resp;\r
+       ib_api_status_t status = IB_SUCCESS;\r
+               \r
+       CL_ASSERT(p_umv_buf && p_umv_buf->p_inout_buf);\r
+       \r
+       p_resp = p_umv_buf->p_inout_buf;\r
+\r
+       if (IB_SUCCESS == ioctl_status)\r
+       {\r
+               // Mlx4 code:\r
+               \r
+               struct ibv_qp_cap       cap;\r
+               \r
+               cap.max_recv_sge                = p_resp->max_recv_sge;\r
+               cap.max_send_sge                = p_resp->max_send_sge;\r
+               cap.max_recv_wr         = p_resp->max_recv_wr;\r
+               cap.max_send_wr         = p_resp->max_send_wr;\r
+               cap.max_inline_data     = p_resp->max_inline_data;\r
+               \r
+               qp->ibv_qp.handle               = p_resp->qp_handle;\r
+               qp->ibv_qp.qp_num       = p_resp->qpn;\r
+               \r
+               qp->rq.wqe_cnt  = cap.max_recv_wr;\r
+               qp->rq.max_gs   = cap.max_recv_sge;\r
+\r
+               /* adjust rq maxima to not exceed reported device maxima */\r
+               cap.max_recv_wr = min((uint32_t) to_mctx(context)->max_qp_wr, cap.max_recv_wr);\r
+               cap.max_recv_sge = min((uint32_t) to_mctx(context)->max_sge, cap.max_recv_sge);\r
+\r
+               qp->rq.max_post = cap.max_recv_wr;\r
+               //qp->rq.max_gs = cap.max_recv_sge;  - RIB : add this ?\r
+               mlx4_set_sq_sizes(qp, &cap, qp->ibv_qp.qp_type);\r
+\r
+               qp->doorbell_qpn    = cl_hton32(qp->ibv_qp.qp_num << 8);\r
+\r
+               if (mlx4_store_qp(to_mctx(context), qp->ibv_qp.qp_num, qp))\r
+               {\r
+                       mlx4_post_destroy_qp(*ph_uvp_qp, IB_SUCCESS);\r
+                       status = IB_INSUFFICIENT_MEMORY;\r
+               }\r
+               MLX4_PRINT( TRACE_LEVEL_INFORMATION, MLX4_DBG_QP, \r
+                       ("qpn %#x, buf %p, db_rec %p, sq %d:%d, rq %d:%d\n", \r
+                       qp->ibv_qp.qp_num, qp->buf.buf, qp->db,\r
+                       qp->sq.head, qp->sq.tail, qp->rq.head, qp->rq.tail )); \r
+       }\r
+       else\r
+       {\r
+               mlx4_post_destroy_qp(*ph_uvp_qp, IB_SUCCESS);\r
+       }\r
+\r
+       cl_free(p_resp);\r
+       return status;\r
+}\r
+\r
+ib_api_status_t\r
+mlx4_pre_modify_qp (\r
+       IN              const   ib_qp_handle_t                  h_uvp_qp,\r
+       IN              const   ib_qp_mod_t                             *p_modify_attr,\r
+       IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
+{\r
+       ib_api_status_t status = IB_SUCCESS;\r
+\r
+       UNREFERENCED_PARAMETER(h_uvp_qp);\r
+       UNREFERENCED_PARAMETER(p_modify_attr);\r
+\r
+       CL_ASSERT(p_umv_buf);\r
+\r
+       if( !p_umv_buf->p_inout_buf )\r
+       {\r
+               p_umv_buf->p_inout_buf = cl_malloc(sizeof(struct ibv_modify_qp_resp));\r
+               if( !p_umv_buf->p_inout_buf )\r
+               {\r
+                       status = IB_INSUFFICIENT_MEMORY;\r
+                       goto err_memory;\r
+               }\r
+       }\r
+       p_umv_buf->input_size = 0;\r
+       p_umv_buf->output_size = sizeof(struct ibv_modify_qp_resp);\r
+       p_umv_buf->command = TRUE;\r
+       \r
+err_memory:\r
+       return status;\r
+}\r
+\r
+void\r
+mlx4_post_query_qp (\r
+       IN                              ib_qp_handle_t                          h_uvp_qp,\r
+       IN                              ib_api_status_t                         ioctl_status,\r
+       IN      OUT                     ib_qp_attr_t                            *p_query_attr,\r
+       IN      OUT                     ci_umv_buf_t                                    *p_umv_buf )\r
+{\r
+       struct mlx4_qp *qp = (struct mlx4_qp *)h_uvp_qp;\r
+\r
+       UNREFERENCED_PARAMETER(p_umv_buf);\r
+\r
+       if(IB_SUCCESS == ioctl_status)\r
+       {\r
+               p_query_attr->sq_max_inline = qp->max_inline_data;\r
+               p_query_attr->sq_sge            = qp->sq.max_gs;\r
+               p_query_attr->sq_depth          = qp->sq.max_post;\r
+               p_query_attr->rq_sge            = qp->rq.max_gs;\r
+               p_query_attr->rq_depth          = qp->rq.max_post;\r
+       }\r
+}\r
+\r
+void\r
+mlx4_post_modify_qp (\r
+       IN              const   ib_qp_handle_t                  h_uvp_qp,\r
+       IN                              ib_api_status_t                 ioctl_status,\r
+       IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
+{\r
+       struct ibv_qp                           *qp = (struct ibv_qp *)h_uvp_qp;\r
+       struct ibv_modify_qp_resp       *p_resp;\r
+\r
+       CL_ASSERT(p_umv_buf && p_umv_buf->p_inout_buf);\r
+\r
+       p_resp = p_umv_buf->p_inout_buf;\r
+\r
+       if (IB_SUCCESS == ioctl_status) \r
+       {\r
+               // Mlx4 code:\r
+               \r
+               if (qp->state == IBV_QPS_RESET &&\r
+                   p_resp->attr_mask & IBV_QP_STATE &&\r
+                   p_resp->qp_state == IBV_QPS_INIT)\r
+               {\r
+                       mlx4_qp_init_sq_ownership(to_mqp(qp));\r
+               }\r
+\r
+               if (p_resp->attr_mask & IBV_QP_STATE) {\r
+                       qp->state = p_resp->qp_state;\r
+               }\r
+\r
+               if (p_resp->attr_mask & IBV_QP_STATE &&\r
+                   p_resp->qp_state == IBV_QPS_RESET)\r
+               {\r
+                       mlx4_cq_clean(to_mcq(qp->recv_cq), qp->qp_num,\r
+                                               qp->srq ? to_msrq(qp->srq) : NULL);\r
+                       if (qp->send_cq != qp->recv_cq)\r
+                               mlx4_cq_clean(to_mcq(qp->send_cq), qp->qp_num, NULL);\r
+\r
+                       mlx4_init_qp_indices(to_mqp(qp));\r
+                       if (!qp->srq && qp->qp_type != IBV_QPT_XRC)\r
+                               *to_mqp(qp)->db = 0;\r
+               }\r
+       }\r
+\r
+       cl_free (p_resp);\r
+       return;\r
+}\r
+\r
+static void\r
+__mlx4_lock_cqs(struct ibv_qp *qp)\r
+{\r
+       struct mlx4_cq *send_cq = to_mcq(qp->send_cq);\r
+       struct mlx4_cq *recv_cq = to_mcq(qp->recv_cq);\r
+\r
+       if (send_cq == recv_cq)\r
+               cl_spinlock_acquire(&send_cq->lock);\r
+       else if (send_cq->cqn < recv_cq->cqn) {\r
+               cl_spinlock_acquire(&send_cq->lock);\r
+               cl_spinlock_acquire(&recv_cq->lock);\r
+       } else {\r
+               cl_spinlock_acquire(&recv_cq->lock);\r
+               cl_spinlock_acquire(&send_cq->lock);\r
+       }\r
+}\r
+\r
+static void\r
+__mlx4_unlock_cqs(struct ibv_qp *qp)\r
+{\r
+       struct mlx4_cq *send_cq = to_mcq(qp->send_cq);\r
+       struct mlx4_cq *recv_cq = to_mcq(qp->recv_cq);\r
+\r
+       if (send_cq == recv_cq)\r
+               cl_spinlock_release(&send_cq->lock);\r
+       else if (send_cq->cqn < recv_cq->cqn) {\r
+               cl_spinlock_release(&recv_cq->lock);\r
+               cl_spinlock_release(&send_cq->lock);\r
+       } else {\r
+               cl_spinlock_release(&send_cq->lock);\r
+               cl_spinlock_release(&recv_cq->lock);\r
+       }\r
+}\r
+\r
+ib_api_status_t\r
+mlx4_pre_destroy_qp (\r
+       IN              const   ib_qp_handle_t                  h_uvp_qp )\r
+{\r
+       struct ibv_qp *qp = (struct ibv_qp*)h_uvp_qp;\r
+\r
+       mlx4_cq_clean(to_mcq(qp->recv_cq), qp->qp_num,\r
+                               qp->srq ? to_msrq(qp->srq) : NULL);\r
+       if (qp->send_cq != qp->recv_cq)\r
+               mlx4_cq_clean(to_mcq(qp->send_cq), qp->qp_num, NULL);\r
+\r
+       __mlx4_lock_cqs(qp);\r
+       mlx4_clear_qp(to_mctx(qp->context), qp->qp_num);\r
+       __mlx4_unlock_cqs(qp);\r
+\r
+       return IB_SUCCESS;\r
+}\r
+\r
+void\r
+mlx4_post_destroy_qp (\r
+       IN              const   ib_qp_handle_t                  h_uvp_qp,\r
+       IN                              ib_api_status_t                 ioctl_status )\r
+{\r
+       struct ibv_qp* ibqp = (struct ibv_qp *)h_uvp_qp;\r
+       struct mlx4_qp* qp = to_mqp(ibqp);\r
+       \r
+       CL_ASSERT(h_uvp_qp);\r
+\r
+       if (IB_SUCCESS == ioctl_status)\r
+       {\r
+               if (!ibqp->srq && ibqp->qp_type != IBV_QPT_XRC)\r
+                       mlx4_free_db(to_mctx(ibqp->context), MLX4_DB_TYPE_RQ, qp->db);\r
+\r
+               cl_spinlock_destroy(&qp->sq.lock);\r
+               cl_spinlock_destroy(&qp->rq.lock);\r
+\r
+               MLX4_PRINT( TRACE_LEVEL_INFORMATION, MLX4_DBG_QP, \r
+                       ("qpn %#x, buf %p, sq %d:%d, rq %d:%d\n", qp->ibv_qp.qp_num, qp->buf.buf, \r
+                       qp->sq.head, qp->sq.tail, qp->rq.head, qp->rq.tail )); \r
+               cl_free(qp->sq.wrid);\r
+               if (qp->rq.wqe_cnt)\r
+                       cl_free(qp->rq.wrid);\r
+               mlx4_free_buf(&qp->buf);\r
+               cl_free(qp);\r
+       }\r
+       else\r
+       {\r
+               __mlx4_lock_cqs(ibqp);\r
+               mlx4_store_qp(to_mctx(ibqp->context), ibqp->qp_num, qp);\r
+               __mlx4_unlock_cqs(ibqp);                \r
+       }\r
+}\r
+\r
+void\r
+mlx4_nd_modify_qp (\r
+       IN              const   ib_qp_handle_t                  h_uvp_qp,\r
+               OUT                     void**                                  pp_outbuf,\r
+               OUT                     DWORD*                                  p_size )\r
+{\r
+       struct ibv_qp *ibv_qp = (struct ibv_qp *)h_uvp_qp;\r
+\r
+       *(uint32_t**)pp_outbuf = (uint32_t*)&ibv_qp->state;\r
+       *p_size = sizeof(ibv_qp->state);\r
+}\r
+\r
+static ib_qp_state_t __from_qp_state(enum ibv_qp_state state)\r
+{\r
+       switch (state) {\r
+               case IBV_QPS_RESET: return IB_QPS_RESET;\r
+               case IBV_QPS_INIT: return IB_QPS_INIT;\r
+               case IBV_QPS_RTR: return IB_QPS_RTR;\r
+               case IBV_QPS_RTS: return IB_QPS_RTS;\r
+               case IBV_QPS_SQD: return IB_QPS_SQD;\r
+               case IBV_QPS_SQE: return IB_QPS_SQERR;\r
+               case IBV_QPS_ERR: return IB_QPS_ERROR;\r
+               default: return IB_QPS_TIME_WAIT;\r
+       };\r
+}\r
+\r
+uint32_t\r
+mlx4_nd_get_qp_state (\r
+       IN              const   ib_qp_handle_t                  h_uvp_qp )\r
+{\r
+       struct ibv_qp *ibv_qp = (struct ibv_qp *)h_uvp_qp;\r
+\r
+       return __from_qp_state(ibv_qp->state);\r
+}\r
+\r
+static uint8_t\r
+__gid_to_index_lookup (\r
+       IN                      ib_ca_attr_t                                    *p_ca_attr,\r
+       IN                      uint8_t                                         port_num,\r
+       IN                      uint8_t                                         *raw_gid )\r
+{\r
+       ib_gid_t *p_gid_table = NULL;\r
+       uint8_t i, index = 0;\r
+       uint16_t num_gids;\r
+\r
+       p_gid_table = p_ca_attr->p_port_attr[port_num-1].p_gid_table;\r
+       CL_ASSERT (p_gid_table);\r
+\r
+       num_gids = p_ca_attr->p_port_attr[port_num-1].num_gids;\r
+\r
+       for (i = 0; i < num_gids; i++)\r
+       {\r
+               if (cl_memcmp (raw_gid, p_gid_table[i].raw, 16))\r
+               {\r
+                       index = i;\r
+                       break;\r
+               }\r
+       }\r
+       return index;\r
+}\r
+\r
+static enum ibv_rate __to_rate(uint8_t rate)\r
+{\r
+       if (rate == IB_PATH_RECORD_RATE_2_5_GBS) return IBV_RATE_2_5_GBPS;\r
+       if (rate == IB_PATH_RECORD_RATE_5_GBS) return IBV_RATE_5_GBPS;\r
+       if (rate == IB_PATH_RECORD_RATE_10_GBS) return IBV_RATE_10_GBPS;\r
+       if (rate == IB_PATH_RECORD_RATE_20_GBS) return IBV_RATE_20_GBPS;\r
+       if (rate == IB_PATH_RECORD_RATE_30_GBS) return IBV_RATE_30_GBPS;\r
+       if (rate == IB_PATH_RECORD_RATE_40_GBS) return IBV_RATE_40_GBPS;\r
+       if (rate == IB_PATH_RECORD_RATE_60_GBS) return IBV_RATE_60_GBPS;\r
+       if (rate == IB_PATH_RECORD_RATE_80_GBS) return IBV_RATE_80_GBPS;\r
+       if (rate == IB_PATH_RECORD_RATE_120_GBS) return IBV_RATE_120_GBPS;\r
+       return IBV_RATE_MAX;\r
+}\r
+\r
+inline void \r
+__grh_get_ver_class_flow(\r
+       IN              const   ib_net32_t                                      ver_class_flow,\r
+               OUT                     uint8_t* const                          p_ver OPTIONAL,\r
+               OUT                     uint8_t* const                          p_tclass OPTIONAL,\r
+               OUT                     net32_t* const                          p_flow_lbl OPTIONAL )\r
+{\r
+       ib_net32_t tmp_ver_class_flow;\r
+\r
+       tmp_ver_class_flow = cl_ntoh32( ver_class_flow );\r
+\r
+       if (p_ver)\r
+               *p_ver = (uint8_t)(tmp_ver_class_flow >> 28);\r
+\r
+       if (p_tclass)\r
+               *p_tclass = (uint8_t)(tmp_ver_class_flow >> 20);\r
+\r
+       if (p_flow_lbl)\r
+               *p_flow_lbl = (ver_class_flow & CL_HTON32( 0x000FFFFF ));\r
+}\r
+\r
+static ib_api_status_t\r
+__to_ah (\r
+       IN                              ib_ca_attr_t                            *p_ca_attr,\r
+       IN              const   ib_av_attr_t                            *p_av_attr,\r
+               OUT                     struct ibv_ah_attr                      *p_attr )\r
+{\r
+       if (p_av_attr->port_num == 0 || \r
+               p_av_attr->port_num > p_ca_attr->num_ports) {\r
+               MLX4_PRINT(TRACE_LEVEL_WARNING ,MLX4_DBG_AV ,\r
+                       (" invalid port number specified (%d)\n",p_av_attr->port_num));\r
+               return IB_INVALID_PORT;\r
+       }\r
+\r
+       p_attr->port_num = p_av_attr->port_num;\r
+       p_attr->sl = p_av_attr->sl;\r
+       p_attr->dlid = cl_ntoh16 (p_av_attr->dlid);\r
+       p_attr->static_rate = __to_rate(p_av_attr->static_rate);\r
+       p_attr->src_path_bits = p_av_attr->path_bits;\r
+                       \r
+       /* For global destination or Multicast address:*/\r
+       if (p_av_attr->grh_valid)\r
+       {\r
+               p_attr->is_global               = TRUE;\r
+               p_attr->grh.hop_limit   = p_av_attr->grh.hop_limit;\r
+               __grh_get_ver_class_flow( p_av_attr->grh.ver_class_flow, NULL,\r
+                                                               &p_attr->grh.traffic_class, &p_attr->grh.flow_label );\r
+               p_attr->grh.sgid_index  = __gid_to_index_lookup (p_ca_attr, p_av_attr->port_num,\r
+                                                                                                       (uint8_t *) p_av_attr->grh.src_gid.raw); \r
+               cl_memcpy (p_attr->grh.dgid.raw, p_av_attr->grh.dest_gid.raw, 16);\r
+       }\r
+       else\r
+       {\r
+               p_attr->is_global = FALSE;\r
+       }\r
+       return IB_SUCCESS;\r
+} \r
+\r
+static void\r
+__set_av_params(struct mlx4_ah *ah, struct ibv_pd *pd, struct ibv_ah_attr *attr)\r
+{\r
+       ah->av.port_pd = cl_hton32(to_mpd(pd)->pdn | (attr->port_num << 24));\r
+       ah->av.g_slid  = attr->src_path_bits;\r
+       ah->av.dlid    = cl_hton16(attr->dlid);\r
+       if (attr->static_rate) {\r
+               ah->av.stat_rate = (uint8_t)(attr->static_rate + MLX4_STAT_RATE_OFFSET);\r
+               /* XXX check rate cap? */\r
+       }\r
+       ah->av.sl_tclass_flowlabel = cl_hton32(attr->sl << 28);\r
+       if (attr->is_global)\r
+       {\r
+               ah->av.g_slid |= 0x80;\r
+               ah->av.gid_index = attr->grh.sgid_index;\r
+               ah->av.hop_limit = attr->grh.hop_limit;\r
+               ah->av.sl_tclass_flowlabel |=\r
+                       cl_hton32((attr->grh.traffic_class << 20) |\r
+                                   attr->grh.flow_label);\r
+               cl_memcpy(ah->av.dgid, attr->grh.dgid.raw, 16);\r
+       }\r
+}\r
+\r
+ib_api_status_t\r
+mlx4_pre_create_ah (\r
+       IN              const   ib_pd_handle_t                  h_uvp_pd,\r
+       IN              const   ib_av_attr_t                            *p_av_attr,\r
+       IN      OUT                     ci_umv_buf_t                            *p_umv_buf,\r
+               OUT                     ib_av_handle_t                  *ph_uvp_av )\r
+{\r
+       struct mlx4_ah *ah;\r
+       struct ibv_ah_attr attr;\r
+       struct ibv_pd *pd = (struct ibv_pd *)h_uvp_pd;\r
+       ib_api_status_t status = IB_SUCCESS;\r
+       \r
+       UNREFERENCED_PARAMETER(p_umv_buf);\r
+\r
+       if (pd->context->p_hca_attr == NULL) {\r
+               status = IB_ERROR;\r
+               goto end;\r
+       }\r
+\r
+       ah = cl_malloc(sizeof *ah);\r
+       if (!ah) {\r
+               status = IB_INSUFFICIENT_MEMORY;\r
+               goto end;\r
+       }\r
+\r
+       // sanity check\r
+       if (p_av_attr->port_num == 0 || \r
+               p_av_attr->port_num > pd->context->p_hca_attr->num_ports)\r
+       {\r
+               status = IB_INVALID_PORT;\r
+               goto end;\r
+       }\r
+\r
+       // convert parameters \r
+       cl_memset(&attr, 0, sizeof(attr));\r
+       status = __to_ah(pd->context->p_hca_attr, p_av_attr, &attr);\r
+       if (status)\r
+               goto end;\r
+\r
+       ah->ibv_ah.pd = pd;\r
+       ah->ibv_ah.context = pd->context;\r
+       cl_memcpy(&ah->ibv_ah.av_attr, p_av_attr, sizeof (ib_av_attr_t));\r
+\r
+       cl_memset(&ah->av, 0, sizeof ah->av);\r
+       __set_av_params(ah, pd, &attr);\r
+\r
+       *ph_uvp_av = (ib_av_handle_t)&ah->ibv_ah;\r
+       status = IB_VERBS_PROCESSING_DONE;\r
+\r
+end:\r
+       return status;\r
+}\r
+\r
+ib_api_status_t\r
+mlx4_pre_query_ah (\r
+       IN              const   ib_av_handle_t                  h_uvp_av,\r
+       IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
+{\r
+       UNREFERENCED_PARAMETER(h_uvp_av);\r
+       UNREFERENCED_PARAMETER(p_umv_buf);\r
+       \r
+       return IB_VERBS_PROCESSING_DONE;\r
+}\r
+\r
+void\r
+mlx4_post_query_ah (\r
+       IN              const   ib_av_handle_t                  h_uvp_av,\r
+       IN                              ib_api_status_t                 ioctl_status,\r
+       IN      OUT                     ib_av_attr_t                            *p_addr_vector,\r
+       IN      OUT                     ib_pd_handle_t                  *ph_pd,\r
+       IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
+{\r
+       struct ibv_ah *ah = (struct ibv_ah *)h_uvp_av;\r
+\r
+       UNREFERENCED_PARAMETER(p_umv_buf);\r
+\r
+       CL_ASSERT(h_uvp_av && p_addr_vector);\r
+\r
+       if (ioctl_status == IB_SUCCESS)\r
+       {\r
+               cl_memcpy(p_addr_vector, &ah->av_attr, sizeof(ib_av_attr_t));\r
+               if (ph_pd)\r
+                       *ph_pd = (ib_pd_handle_t)ah->pd;\r
+       }\r
+}\r
+\r
+ib_api_status_t\r
+mlx4_pre_modify_ah (\r
+       IN              const   ib_av_handle_t                  h_uvp_av,\r
+       IN              const   ib_av_attr_t                            *p_addr_vector,\r
+       IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
+{\r
+       struct ibv_ah *ah = (struct ibv_ah *)h_uvp_av;\r
+       struct ibv_ah_attr attr;\r
+       ib_api_status_t status;\r
+\r
+       UNREFERENCED_PARAMETER(p_umv_buf);\r
+       \r
+       CL_ASSERT (h_uvp_av);\r
+\r
+       status = __to_ah(ah->context->p_hca_attr, p_addr_vector, &attr);\r
+       if (status)\r
+               return status;\r
+\r
+       __set_av_params(to_mah(ah), ah->pd, &attr);\r
+       cl_memcpy(&ah->av_attr, p_addr_vector, sizeof(ib_av_attr_t));\r
+       \r
+       return IB_VERBS_PROCESSING_DONE;\r
+}\r
+\r
+ib_api_status_t\r
+mlx4_pre_destroy_ah (\r
+       IN              const   ib_av_handle_t                  h_uvp_av )\r
+{\r
+       struct ibv_ah *ah = (struct ibv_ah *)h_uvp_av;\r
+       \r
+       CL_ASSERT(ah);\r
+       \r
+       cl_free(to_mah(ah));\r
+       \r
+       return IB_VERBS_PROCESSING_DONE;\r
+}\r
+\r
+#ifdef XRC_SUPPORT\r
+ib_api_status_t  \r
+mlx4_pre_create_xrc_srq (\r
+       IN              const   ib_pd_handle_t                  h_uvp_pd,\r
+       IN              const   ib_xrcd_handle_t                        h_uvp_xrcd,\r
+       IN              const   ib_srq_attr_t                           *p_srq_attr,\r
+       IN      OUT                     ci_umv_buf_t                            *p_umv_buf,\r
+               OUT                     ib_srq_handle_t                 *ph_uvp_srq )\r
+{\r
+       struct mlx4_srq *srq;\r
+       struct ibv_create_srq *p_create_srq;\r
+       struct ibv_pd *pd = (struct ibv_pd *)h_uvp_pd;\r
+       struct ibv_xrc_domain *xrc_domain = (struct ibv_xrc_domain *)h_uvp_xrcd;\r
+       ib_api_status_t status = IB_SUCCESS;\r
+       size_t size = max( sizeof(struct ibv_create_srq), sizeof(struct ibv_create_srq_resp) );\r
+\r
+       CL_ASSERT(p_umv_buf);\r
+\r
+       if( !p_umv_buf->p_inout_buf )\r
+       {\r
+               p_umv_buf->p_inout_buf = cl_malloc( size ); \r
+               if( !p_umv_buf->p_inout_buf )\r
+               {\r
+                       status = IB_INSUFFICIENT_MEMORY;\r
+                       goto err_memory;\r
+               }\r
+       }\r
+       p_umv_buf->input_size = sizeof(struct ibv_create_srq);\r
+       p_umv_buf->output_size = sizeof(struct ibv_create_srq_resp);\r
+       p_umv_buf->command = TRUE;\r
+\r
+       p_create_srq = p_umv_buf->p_inout_buf;\r
+       \r
+       // Mlx4 code:\r
+\r
+       /* Sanity check SRQ size before proceeding */\r
+       if (p_srq_attr->max_wr > 1 << 16 || p_srq_attr->max_sge > 64)\r
+       {\r
+               status = IB_INVALID_PARAMETER;\r
+               goto err_params;\r
+       }\r
+\r
+       srq = cl_malloc(sizeof *srq);\r
+       if (!srq) {\r
+               status = IB_INSUFFICIENT_MEMORY;\r
+               goto err_alloc_srq;\r
+       }\r
+\r
+       if (cl_spinlock_init(&srq->lock)) {\r
+               status = IB_INSUFFICIENT_MEMORY;\r
+               goto err_lock;\r
+       }\r
+\r
+       srq->ibv_srq.pd                 = pd;\r
+       srq->ibv_srq.context    = pd->context;\r
+       \r
+       srq->max        = __align_queue_size(p_srq_attr->max_wr + 1);\r
+       srq->max_gs  = p_srq_attr->max_sge;\r
+       srq->counter    = 0;\r
+\r
+       if (mlx4_alloc_srq_buf(pd, (struct ibv_srq_attr *)p_srq_attr, srq))\r
+       {\r
+               status = IB_INSUFFICIENT_MEMORY;\r
+               goto err_alloc_buf;\r
+       }\r
+\r
+       srq->db = mlx4_alloc_db(to_mctx(pd->context), MLX4_DB_TYPE_RQ);\r
+       if (!srq->db)\r
+               goto err_alloc_db;\r
+\r
+       *srq->db = 0;\r
+       \r
+       // fill the parameters for ioctl\r
+       p_create_srq->buf_addr = (uintptr_t) srq->buf.buf;\r
+       p_create_srq->db_addr  = (uintptr_t) srq->db;\r
+       p_create_srq->pd_handle = pd->handle;\r
+       p_create_srq->max_wr = p_srq_attr->max_wr;\r
+       p_create_srq->max_sge = p_srq_attr->max_sge;\r
+       p_create_srq->srq_limit = p_srq_attr->srq_limit;\r
+\r
+       *ph_uvp_srq = (ib_srq_handle_t)&srq->ibv_srq;\r
+       goto end;\r
+\r
+err_alloc_db:\r
+       cl_free(srq->wrid);\r
+       mlx4_free_buf(&srq->buf);\r
+err_alloc_buf:\r
+       cl_spinlock_destroy(&srq->lock);\r
+err_lock:\r
+       cl_free(srq);\r
+err_alloc_srq:\r
+       cl_free(p_umv_buf->p_inout_buf);\r
+err_params: err_memory:\r
+end:\r
+       return status;\r
+}\r
+\r
+ib_api_status_t  \r
+mlx4_post_create_xrc_srq (\r
+       IN              const   ib_pd_handle_t                  h_uvp_pd,\r
+       IN                              ib_api_status_t                 ioctl_status,\r
+       IN      OUT                     ib_srq_handle_t                 *ph_uvp_srq,\r
+       IN                              ci_umv_buf_t                            *p_umv_buf )\r
+{\r
+       struct mlx4_srq *srq = (struct mlx4_srq *)*ph_uvp_srq;\r
+       struct ibv_create_srq_resp *p_resp;\r
+       ib_api_status_t status = IB_SUCCESS;\r
+\r
+       UNREFERENCED_PARAMETER(h_uvp_pd);\r
+       \r
+       CL_ASSERT(p_umv_buf && p_umv_buf->p_inout_buf);\r
+       \r
+       p_resp = p_umv_buf->p_inout_buf;\r
+\r
+       if (IB_SUCCESS == ioctl_status)\r
+       {\r
+               // Mlx4 code:\r
+\r
+               srq->ibv_srq.xrc_srq_num        = srq->srqn = p_resp->srqn;\r
+               srq->ibv_srq.handle             = p_resp->srq_handle;\r
+\r
+               srq->max                = p_resp->max_wr;\r
+               srq->max_gs     = p_resp->max_sge;\r
+               \r
+               if (mlx4_store_xrc_srq(to_mctx(pd->context), srq->ibv_srq.xrc_srq_num, srq))\r
+               {\r
+                       mlx4_post_destroy_srq(*ph_uvp_srq, IB_SUCCESS);\r
+                       status = IB_INSUFFICIENT_MEMORY;\r
+               }       \r
+       }\r
+       else\r
+       {\r
+               mlx4_post_destroy_srq (*ph_uvp_srq, IB_SUCCESS);\r
+       }\r
+\r
+       cl_free( p_resp );\r
+       return status;\r
+}\r
+\r
+ib_api_status_t\r
+mlx4_pre_open_xrc_domain (\r
+       IN              const   ib_ca_handle_t                  h_uvp_ca,\r
+       IN              const   uint32_t                                        oflag,\r
+       IN      OUT             ci_umv_buf_t                            *p_umv_buf,\r
+               OUT                     ib_xrcd_handle_t                        *ph_uvp_xrcd )\r
+{\r
+       struct mlx4_xrc_domain *xrcd;\r
+       struct ibv_context * context = (struct ibv_context *)h_uvp_ca;\r
+       struct ibv_open_xrc_domain      *p_open_xrcd;\r
+       ib_api_status_t status = IB_SUCCESS;\r
+       int size = max( sizeof(struct ibv_open_xrc_domain), sizeof(struct ibv_open_xrc_domain_resp) );\r
+\r
+       CL_ASSERT(h_uvp_ca && p_umv_buf);\r
+\r
+       if( !p_umv_buf->p_inout_buf )\r
+       {\r
+               p_umv_buf->p_inout_buf = cl_malloc( size );\r
+               if( !p_umv_buf->p_inout_buf )\r
+               {\r
+                       status = IB_INSUFFICIENT_MEMORY;\r
+                       goto err_umv_buf;\r
+               }\r
+       }\r
+       p_umv_buf->input_size = sizeof(struct ibv_open_xrc_domain);\r
+       p_umv_buf->output_size = sizeof(struct ibv_open_xrc_domain_resp);\r
+       p_umv_buf->command = TRUE;\r
+\r
+       p_open_xrcd = p_umv_buf->p_inout_buf;\r
+\r
+       // Mlx4 code:\r
+\r
+       xrcd = cl_malloc(sizeof *xrcd);\r
+       if (!xrcd) {\r
+               status = IB_INSUFFICIENT_MEMORY;\r
+               goto err_xrc;\r
+       }\r
+\r
+       xrcd->ibv_xrcd.context = context;\r
+       \r
+       p_open_xrcd->oflags = oflag;\r
+\r
+       *ph_uvp_xrcd = (struct ibv_xrc_domain *)&xrcd->ibv_xrcd;\r
+       goto end;\r
+\r
+err_xrc:\r
+       cl_free(p_umv_buf->p_inout_buf);\r
+err_umv_buf:\r
+end:\r
+       return status;\r
+}\r
+\r
+void\r
+mlx4_post_open_xrc_domain (\r
+       IN              const   ib_ca_handle_t                  h_uvp_ca,\r
+       IN                              ib_api_status_t                 ioctl_status,\r
+       IN      OUT                     ib_xrcd_handle_t                        *ph_uvp_xrcd,\r
+       IN                              ci_umv_buf_t                            *p_umv_buf )\r
+{\r
+       struct ibv_xrc_domain *xrcd = (struct ibv_xrc_domain *)*ph_uvp_xrcd;\r
+       struct ibv_open_xrc_domain_resp *p_resp;\r
+\r
+       UNREFERENCED_PARAMETER(h_uvp_ca);\r
+       \r
+       CL_ASSERT(p_umv_buf && p_umv_buf->p_inout_buf);\r
+\r
+       p_resp = p_umv_buf->p_inout_buf;\r
+\r
+       if (IB_SUCCESS == ioctl_status)\r
+       {\r
+               // Mlx4 code:\r
+               \r
+               xrcd->handle = p_resp->xrcd_handle;\r
+               to_mxrcd(xrcd)->xrcdn = p_resp->xrcdn;\r
+       }\r
+       else\r
+       {\r
+               cl_free(to_mxrcd(xrcd));\r
+       }\r
+       \r
+       cl_free(p_resp);\r
+       return;\r
+}\r
+\r
+void\r
+mlx4_post_close_xrc_domain (\r
+       IN              const   ib_xrcd_handle_t                        h_uvp_xrcd,\r
+       IN                              ib_api_status_t                 ioctl_status )\r
+{\r
+       struct ibv_xrc_domain *xrdc = (struct ibv_xrc_domain *)h_uvp_xrcd;\r
+\r
+       CL_ASSERT(xrdc);\r
+\r
+       if (IB_SUCCESS == ioctl_status) {\r
+               cl_free(to_mxrcd(xrdc));\r
+       }\r
+}\r
+#endif\r
index 8ebcda9..91432d8 100644 (file)
@@ -220,7 +220,7 @@ typedef struct _mlnx_hca_t {
 \r
 #ifdef WIN_TO_BE_REMOVED \r
        // removed as it is found in p_ext->cl_ext.p_pdo\r
-       const void* VOID_PTR64  p_dev_obj;              // Driver PDO\r
+       const void*     p_dev_obj;              // Driver PDO\r
 #endif \r
 } mlnx_hca_t;\r
 \r
index f7c744c..7164281 100644 (file)
@@ -114,7 +114,7 @@ mlnx_attach_mcast (
                cl_ntoh64(*(uint64_t*)&mcast_p->mcast_gid.raw[8] )));\r
        \r
        // return the result\r
-       if (ph_mcast) *ph_mcast = (ib_mcast_handle_t VOID_PTR64)mcast_p;\r
+       if (ph_mcast) *ph_mcast = (ib_mcast_handle_t)mcast_p;\r
 \r
        status = IB_SUCCESS;\r
        goto end;\r
index 434e5fb..fdb626f 100644 (file)
@@ -101,7 +101,7 @@ mlnx_register_mr (
        // results\r
        *p_lkey = mr_p->lkey;\r
        *p_rkey = cl_hton32( mr_p->rkey );\r
-       if (ph_mr)      *ph_mr = (ib_mr_handle_t VOID_PTR64)mr_p;\r
+       if (ph_mr)      *ph_mr = (ib_mr_handle_t)mr_p;\r
        status = IB_SUCCESS;\r
 \r
 err_reg_mr:\r
@@ -184,7 +184,7 @@ mlnx_register_pmr (
 \r
        // results\r
 done:\r
-       if (ph_mr)      *ph_mr = (ib_mr_handle_t VOID_PTR64)mr_p;\r
+       if (ph_mr)      *ph_mr = (ib_mr_handle_t)mr_p;\r
        *p_lkey = mr_p->lkey;\r
        *p_rkey = cl_hton32( mr_p->rkey );\r
        //NB:  p_vaddr was not changed\r
@@ -375,7 +375,7 @@ mlnx_alloc_fmr(
        }\r
 \r
        // results\r
-       if (ph_fmr)     *ph_fmr = (mlnx_fmr_handle_t VOID_PTR64)fmr_p;\r
+       if (ph_fmr)     *ph_fmr = (mlnx_fmr_handle_t)fmr_p;\r
        status = IB_SUCCESS;\r
 \r
 err_alloc_fmr:\r
index 0ca785d..64d479e 100644 (file)
@@ -260,7 +260,7 @@ mlnx_query_ca (
                \r
                // get gids, using cache\r
                for (i=0; i < hca_ports[port_num].gid_tbl_len; ++i) {\r
-                       union ib_gid * VOID_PTR64  gid = (union ib_gid     *)&p_ca_attr->p_port_attr[port_num].p_gid_table[i];\r
+                       union ib_gid * gid = (union ib_gid *)&p_ca_attr->p_port_attr[port_num].p_gid_table[i];\r
                        err = ib_get_cached_gid( ib_dev, port_num + start_port(ib_dev), i, (union ib_gid *)gid );\r
                        //TODO: do we need to convert gids to little endian\r
                        if (err) {\r
@@ -486,7 +486,7 @@ done:
        cl_spinlock_release( &ext_p->uctx_lock );\r
        \r
        // return the result\r
-       if (ph_um_ca) *ph_um_ca = (ib_ca_handle_t VOID_PTR64)p_context;\r
+       if (ph_um_ca) *ph_um_ca = (ib_ca_handle_t)p_context;\r
 \r
        status = IB_SUCCESS;\r
        goto end;\r
@@ -575,7 +575,7 @@ mlnx_allocate_pd (
        }\r
 \r
        // return the result\r
-       if (ph_pd) *ph_pd = (ib_pd_handle_t VOID_PTR64)ib_pd_p;\r
+       if (ph_pd) *ph_pd = (ib_pd_handle_t)ib_pd_p;\r
 \r
        status = IB_SUCCESS;\r
        \r
@@ -672,7 +672,7 @@ mlnx_create_av (
        }\r
 \r
        // return the result\r
-       if (ph_av) *ph_av = (ib_av_handle_t VOID_PTR64)ib_av_p;\r
+       if (ph_av) *ph_av = (ib_av_handle_t)ib_av_p;\r
 \r
        status = IB_SUCCESS;\r
 \r
@@ -735,7 +735,7 @@ mlnx_query_av (
 #endif\r
 \r
        // results\r
-       *ph_pd = (ib_pd_handle_t VOID_PTR64)ib_ah_p->pd;\r
+       *ph_pd = (ib_pd_handle_t)ib_ah_p->pd;\r
        \r
 err_conv_mthca_av:\r
 err_user_unsupported:\r
@@ -885,7 +885,7 @@ mlnx_create_srq (
        srq_p->srq_context = (void*)srq_context;\r
        \r
        // return the result\r
-       if (ph_srq) *ph_srq = (ib_srq_handle_t VOID_PTR64)srq_p;\r
+       if (ph_srq) *ph_srq = (ib_srq_handle_t)srq_p;\r
 \r
        status = IB_SUCCESS;\r
        \r
@@ -1065,13 +1065,13 @@ _create_qp (
 \r
        // Query QP to obtain requested attributes\r
        if (p_qp_attr) {\r
-               status = mlnx_query_qp ((ib_qp_handle_t VOID_PTR64)ib_qp_p, p_qp_attr, p_umv_buf);\r
+               status = mlnx_query_qp ((ib_qp_handle_t)ib_qp_p, p_qp_attr, p_umv_buf);\r
                if (status != IB_SUCCESS)\r
                                goto err_query_qp;\r
        }\r
        \r
        // return the results\r
-       if (ph_qp) *ph_qp = (ib_qp_handle_t VOID_PTR64)ib_qp_p;\r
+       if (ph_qp) *ph_qp = (ib_qp_handle_t)ib_qp_p;\r
 \r
        status = IB_SUCCESS;\r
        goto end;\r
@@ -1195,7 +1195,7 @@ mlnx_modify_qp (
        // Query QP to obtain requested attributes\r
 query_qp:      \r
        if (p_qp_attr) {\r
-               status = mlnx_query_qp ((ib_qp_handle_t VOID_PTR64)ib_qp_p, p_qp_attr, p_umv_buf);\r
+               status = mlnx_query_qp ((ib_qp_handle_t)ib_qp_p, p_qp_attr, p_umv_buf);\r
                if (status != IB_SUCCESS)\r
                                goto err_query_qp;\r
        }\r
@@ -1282,7 +1282,7 @@ mlnx_query_qp (
        \r
        // fill the structure\r
        //TODO: this function is to be implemented via ibv_query_qp, which is not supported now \r
-       p_qp_attr->h_pd                                         = (ib_pd_handle_t VOID_PTR64)qp_p->ibqp.pd;\r
+       p_qp_attr->h_pd                                         = (ib_pd_handle_t)qp_p->ibqp.pd;\r
        p_qp_attr->qp_type                              = qp_p->ibqp.qp_type;\r
        p_qp_attr->sq_max_inline                = qp_p->qp_init_attr.cap.max_inline_data;\r
        p_qp_attr->sq_depth                             = qp_p->qp_init_attr.cap.max_send_wr;\r
@@ -1290,8 +1290,8 @@ mlnx_query_qp (
        p_qp_attr->sq_sge                                       = qp_p->qp_init_attr.cap.max_send_sge;\r
        p_qp_attr->rq_sge                                       = qp_p->qp_init_attr.cap.max_recv_sge;\r
        p_qp_attr->resp_res                             = qp_p->resp_depth;\r
-       p_qp_attr->h_sq_cq                              = (ib_cq_handle_t VOID_PTR64)qp_p->ibqp.send_cq;\r
-       p_qp_attr->h_rq_cq                              = (ib_cq_handle_t VOID_PTR64)qp_p->ibqp.recv_cq;\r
+       p_qp_attr->h_sq_cq                              = (ib_cq_handle_t)qp_p->ibqp.send_cq;\r
+       p_qp_attr->h_rq_cq                              = (ib_cq_handle_t)qp_p->ibqp.recv_cq;\r
        p_qp_attr->sq_signaled                  = qp_p->sq_policy == IB_SIGNAL_ALL_WR;\r
        p_qp_attr->state                                                = mlnx_qps_to_ibal( qp_p->state );\r
        p_qp_attr->num                                          = cl_hton32(qp_p->ibqp.qp_num);\r
@@ -1422,7 +1422,7 @@ mlnx_create_cq (
 //     *p_size = *p_size;      // return the same value\r
        *p_size = ib_cq_p->cqe;\r
 \r
-       if (ph_cq) *ph_cq = (ib_cq_handle_t VOID_PTR64)cq_p;\r
+       if (ph_cq) *ph_cq = (ib_cq_handle_t)cq_p;\r
 \r
        status = IB_SUCCESS;\r
        \r
index 2596961..f001805 100644 (file)
-/*
- * Copyright (c) 2004 Topspin Communications.  All rights reserved.
- * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- * $Id$
- */
-
-#include <ib_verbs.h>
-#include <ib_mad.h>
-#include <ib_smi.h>
-
-#include "mthca_dev.h"
-#if defined(EVENT_TRACING)
-#ifdef offsetof
-#undef offsetof
-#endif
-#include "mthca_mad.tmh"
-#endif
-#include "mthca_cmd.h"
-
-enum {
-       MTHCA_VENDOR_CLASS1 = 0x9,
-       MTHCA_VENDOR_CLASS2 = 0xa
-};
-
-struct mthca_trap_mad {
-       struct scatterlist sg;
-};
-
-static void update_sm_ah(struct mthca_dev *dev,
-                        u8 port_num, u16 lid, u8 sl)
-{
-       struct ib_ah *new_ah;
-       struct ib_ah_attr ah_attr;
-       SPIN_LOCK_PREP(lh);
-
-       if (!dev->send_agent[port_num - 1][0])
-               return;
-
-       RtlZeroMemory(&ah_attr, sizeof ah_attr);
-       ah_attr.dlid     = lid;
-       ah_attr.sl       = sl;
-       ah_attr.port_num = port_num;
-
-       new_ah = ibv_create_ah(dev->send_agent[port_num - 1][0]->qp->pd,
-                             &ah_attr, NULL, NULL);
-       if (IS_ERR(new_ah))
-               return;
-
-       spin_lock_irqsave(&dev->sm_lock, &lh);
-       if (dev->sm_ah[port_num - 1]) {
-               ibv_destroy_ah(dev->sm_ah[port_num - 1]);
-       }
-       dev->sm_ah[port_num - 1] = new_ah;
-       spin_unlock_irqrestore(&lh);
-}
-
-/*
- * Snoop SM MADs for port info and P_Key table sets, so we can
- * synthesize LID change and P_Key change events.
- */
-static void smp_snoop(struct ib_device *ibdev,
-                     u8 port_num,
-                     struct ib_mad *mad)
-{
-       struct ib_event event;
-
-       if ((mad->mad_hdr.mgmt_class  == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
-            mad->mad_hdr.mgmt_class  == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
-           mad->mad_hdr.method     == IB_MGMT_METHOD_SET) {
-               if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO) {
-                       update_sm_ah(to_mdev(ibdev), port_num,
-                                    cl_ntoh16(*(__be16 *) (mad->data + 58)),
-                                    (*(u8 *) (mad->data + 76)) & 0xf);
-
-                       event.device           = ibdev;
-                       event.event            = IB_EVENT_LID_CHANGE;
-                       event.element.port_num = port_num;
-                       ib_dispatch_event(&event);
-               }
-
-               if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PKEY_TABLE) {
-                       event.device           = ibdev;
-                       event.event            = IB_EVENT_PKEY_CHANGE;
-                       event.element.port_num = port_num;
-                       ib_dispatch_event(&event);
-               }
-       }
-}
-
-static void forward_trap(struct mthca_dev *dev,
-                        u8 port_num,
-                        struct ib_mad *mad)
-{
-       int qpn = mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED;
-       struct mthca_trap_mad *tmad;
-       struct ib_sge      gather_list;
-       struct _ib_send_wr wr;
-       struct ib_mad_agent *agent = dev->send_agent[port_num - 1][qpn];
-       int ret;
-       SPIN_LOCK_PREP(lh);
-
-       /* fill the template */
-       wr.ds_array = (ib_local_ds_t* VOID_PTR64)(void*)&gather_list;
-       wr.num_ds = 1;
-       wr.wr_type = WR_SEND;
-       wr.send_opt = IB_SEND_OPT_SIGNALED;
-       wr.dgrm.ud.remote_qp = cl_hton32(qpn);
-       wr.dgrm.ud.remote_qkey = qpn ? IB_QP1_QKEY : 0;
-       
-       if (agent) {
-               tmad = kmalloc(sizeof *tmad, GFP_KERNEL);
-               if (!tmad)
-                       return;
-
-               alloc_dma_zmem(dev, sizeof *mad, &tmad->sg);
-               if (!tmad->sg.page) {
-                       kfree(tmad);
-                       return;
-               }
-
-               memcpy(tmad->sg.page, mad, sizeof *mad);
-
-               wr.dgrm.ud.rsvd = (void* VOID_PTR64)&((struct ib_mad *)tmad->sg.page)->mad_hdr;
-               wr.wr_id         = (u64)(ULONG_PTR)tmad;
-               gather_list.addr   = tmad->sg.dma_address;
-               gather_list.length = tmad->sg.length;
-               gather_list.lkey   = to_mpd(agent->qp->pd)->ntmr.ibmr.lkey;
-
-               /*
-                * We rely here on the fact that MLX QPs don't use the
-                * address handle after the send is posted (this is
-                * wrong following the IB spec strictly, but we know
-                * it's OK for our devices).
-                */
-               spin_lock_irqsave(&dev->sm_lock, &lh);
-               wr.dgrm.ud.h_av = (ib_av_handle_t VOID_PTR64)dev->sm_ah[port_num - 1];
-               if (wr.dgrm.ud.h_av) {
-                               HCA_PRINT( TRACE_LEVEL_ERROR ,HCA_DBG_MAD ,(" ib_post_send_mad not ported \n" ));
-                               ret = -EINVAL;
-               }
-               else
-                       ret = -EINVAL;
-               spin_unlock_irqrestore(&lh);
-
-               if (ret) {
-                       free_dma_mem_map(dev, &tmad->sg, PCI_DMA_BIDIRECTIONAL );
-                       kfree(tmad);
-               }
-       }
-}
-
-int mthca_process_mad(struct ib_device *ibdev,
-                     int mad_flags,
-                     u8 port_num,
-                     struct _ib_wc *in_wc,
-                     struct _ib_grh *in_grh,
-                     struct ib_mad *in_mad,
-                     struct ib_mad *out_mad)
-{
-       int err;
-       u8 status;
-       u16 slid = in_wc ? in_wc->recv.ud.remote_lid : cl_ntoh16(IB_LID_PERMISSIVE);
-
-       HCA_PRINT( TRACE_LEVEL_VERBOSE ,HCA_DBG_MAD ,("in: Class %02x, Method %02x, AttrId %x, AttrMod %x, ClSpec %x, Tid %I64x\n",
-               (u32)in_mad->mad_hdr.mgmt_class, (u32)in_mad->mad_hdr.method, 
-               (u32)in_mad->mad_hdr.attr_id, in_mad->mad_hdr.attr_mod, 
-               (u32)in_mad->mad_hdr.class_specific, in_mad->mad_hdr.tid ));
-
-       /* Forward locally generated traps to the SM */
-       if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP &&
-           slid == 0) {
-               forward_trap(to_mdev(ibdev), port_num, in_mad);
-               HCA_PRINT( TRACE_LEVEL_VERBOSE ,HCA_DBG_MAD ,("Not sent, but locally forwarded\n"));
-               return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
-       }
-
-       /*
-        * Only handle SM gets, sets and trap represses for SM class
-        *
-        * Only handle PMA and Mellanox vendor-specific class gets and
-        * sets for other classes.
-        */
-       if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
-           in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
-
-               if (in_mad->mad_hdr.method   != IB_MGMT_METHOD_GET &&
-                   in_mad->mad_hdr.method   != IB_MGMT_METHOD_SET &&
-                   in_mad->mad_hdr.method   != IB_MGMT_METHOD_TRAP_REPRESS) {
-                       HCA_PRINT( TRACE_LEVEL_VERBOSE,HCA_DBG_MAD,(" Skip some methods. Nothing done !\n"));
-                       return IB_MAD_RESULT_SUCCESS;
-               }
-
-               /*
-                * Don't process SMInfo queries or vendor-specific
-                * MADs -- the SMA can't handle them.
-                */
-               if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO ||
-                   ((in_mad->mad_hdr.attr_id & IB_SMP_ATTR_VENDOR_MASK) ==
-                    IB_SMP_ATTR_VENDOR_MASK)) {
-                       HCA_PRINT( TRACE_LEVEL_VERBOSE ,HCA_DBG_MAD ,("Skip SMInfo queries or vendor-specific MADs. Nothing done !\n"));
-                       return IB_MAD_RESULT_SUCCESS;
-               }
-       } 
-       else {
-               if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT ||
-                  in_mad->mad_hdr.mgmt_class == MTHCA_VENDOR_CLASS1     ||
-                  in_mad->mad_hdr.mgmt_class == MTHCA_VENDOR_CLASS2) {
-
-                       if (in_mad->mad_hdr.method  != IB_MGMT_METHOD_GET &&
-                           in_mad->mad_hdr.method  != IB_MGMT_METHOD_SET) {
-                               HCA_PRINT( TRACE_LEVEL_VERBOSE ,HCA_DBG_MAD ,("Skip some management methods. Nothing done !\n"));
-                               return IB_MAD_RESULT_SUCCESS;
-                       }
-               } 
-               else {
-                       HCA_PRINT( TRACE_LEVEL_VERBOSE ,HCA_DBG_MAD ,("Skip IB_MGMT_CLASS_PERF_MGMT et al. Nothing done !\n"));
-                       return IB_MAD_RESULT_SUCCESS;
-               }       
-       }
-
-       // send MAD
-       err = mthca_MAD_IFC(to_mdev(ibdev),
-                           mad_flags & IB_MAD_IGNORE_MKEY,
-                           mad_flags & IB_MAD_IGNORE_BKEY,
-                           port_num, in_wc, in_grh, in_mad, out_mad,
-                           &status);
-       if (err) {
-               HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_MAD ,("MAD_IFC failed\n"));
-               return IB_MAD_RESULT_FAILURE;
-       }
-       if (status == MTHCA_CMD_STAT_BAD_PKT)
-               return IB_MAD_RESULT_SUCCESS;
-       if (status) {
-               HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_MAD ,("MAD_IFC returned status %02x\n", status));
-               return IB_MAD_RESULT_FAILURE;
-       }
-
-       if (!out_mad->mad_hdr.status)
-               smp_snoop(ibdev, port_num, in_mad);
-
-       HCA_PRINT( TRACE_LEVEL_VERBOSE ,HCA_DBG_MAD,("out: Class %02x, Method %02x, AttrId %x, AttrMod %x, ClSpec %x, Tid %I64x, Status %x\n",
-               (u32)out_mad->mad_hdr.mgmt_class, (u32)out_mad->mad_hdr.method, 
-               (u32)out_mad->mad_hdr.attr_id, out_mad->mad_hdr.attr_mod, 
-               (u32)out_mad->mad_hdr.class_specific, out_mad->mad_hdr.tid,
-               (u32)out_mad->mad_hdr.status ));
-
-       if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS) {
-               /* no response for trap repress */
-               return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
-       }
-
-       return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
-}
-
-static void send_handler(struct ib_mad_agent *agent,
-                        struct ib_mad_send_wc *mad_send_wc)
-{
-       struct mthca_trap_mad *tmad =
-               (void *) (ULONG_PTR) mad_send_wc->wr_id;
-
-       free_dma_mem_map(agent->device->mdev, &tmad->sg, PCI_DMA_BIDIRECTIONAL );
-       kfree(tmad);
-}
+/*\r
+ * Copyright (c) 2004 Topspin Communications.  All rights reserved.\r
+ * Copyright (c) 2005 Mellanox Technologies. All rights reserved.\r
+ * Copyright (c) 2004 Voltaire, Inc. All rights reserved.\r
+ * Portions Copyright (c) 2008 Microsoft Corporation.  All rights reserved.\r
+ *\r
+ * This software is available to you under the OpenIB.org BSD license\r
+ * below:\r
+ *\r
+ *     Redistribution and use in source and binary forms, with or\r
+ *     without modification, are permitted provided that the following\r
+ *     conditions are met:\r
+ *\r
+ *      - Redistributions of source code must retain the above\r
+ *        copyright notice, this list of conditions and the following\r
+ *        disclaimer.\r
+ *\r
+ *      - Redistributions in binary form must reproduce the above\r
+ *        copyright notice, this list of conditions and the following\r
+ *        disclaimer in the documentation and/or other materials\r
+ *        provided with the distribution.\r
+ *\r
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
+ * SOFTWARE.\r
+ *\r
+ * $Id$\r
+ */\r
+\r
+#include <ib_verbs.h>\r
+#include <ib_mad.h>\r
+#include <ib_smi.h>\r
+\r
+#include "mthca_dev.h"\r
+#if defined(EVENT_TRACING)\r
+#ifdef offsetof\r
+#undef offsetof\r
+#endif\r
+#include "mthca_mad.tmh"\r
+#endif\r
+#include "mthca_cmd.h"\r
+\r
+enum {\r
+       MTHCA_VENDOR_CLASS1 = 0x9,\r
+       MTHCA_VENDOR_CLASS2 = 0xa\r
+};\r
+\r
+struct mthca_trap_mad {\r
+       struct scatterlist sg;\r
+};\r
+\r
+static void update_sm_ah(struct mthca_dev *dev,\r
+                        u8 port_num, u16 lid, u8 sl)\r
+{\r
+       struct ib_ah *new_ah;\r
+       struct ib_ah_attr ah_attr;\r
+       SPIN_LOCK_PREP(lh);\r
+\r
+       if (!dev->send_agent[port_num - 1][0])\r
+               return;\r
+\r
+       RtlZeroMemory(&ah_attr, sizeof ah_attr);\r
+       ah_attr.dlid     = lid;\r
+       ah_attr.sl       = sl;\r
+       ah_attr.port_num = port_num;\r
+\r
+       new_ah = ibv_create_ah(dev->send_agent[port_num - 1][0]->qp->pd,\r
+                             &ah_attr, NULL, NULL);\r
+       if (IS_ERR(new_ah))\r
+               return;\r
+\r
+       spin_lock_irqsave(&dev->sm_lock, &lh);\r
+       if (dev->sm_ah[port_num - 1]) {\r
+               ibv_destroy_ah(dev->sm_ah[port_num - 1]);\r
+       }\r
+       dev->sm_ah[port_num - 1] = new_ah;\r
+       spin_unlock_irqrestore(&lh);\r
+}\r
+\r
+/*\r
+ * Snoop SM MADs for port info and P_Key table sets, so we can\r
+ * synthesize LID change and P_Key change events.\r
+ */\r
+static void smp_snoop(struct ib_device *ibdev,\r
+                     u8 port_num,\r
+                     struct ib_mad *mad)\r
+{\r
+       struct ib_event event;\r
+\r
+       if ((mad->mad_hdr.mgmt_class  == IB_MGMT_CLASS_SUBN_LID_ROUTED ||\r
+            mad->mad_hdr.mgmt_class  == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&\r
+           mad->mad_hdr.method     == IB_MGMT_METHOD_SET) {\r
+               if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO) {\r
+                       update_sm_ah(to_mdev(ibdev), port_num,\r
+                                    cl_ntoh16(*(__be16 *) (mad->data + 58)),\r
+                                    (*(u8 *) (mad->data + 76)) & 0xf);\r
+\r
+                       event.device           = ibdev;\r
+                       event.event            = IB_EVENT_LID_CHANGE;\r
+                       event.element.port_num = port_num;\r
+                       ib_dispatch_event(&event);\r
+               }\r
+\r
+               if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PKEY_TABLE) {\r
+                       event.device           = ibdev;\r
+                       event.event            = IB_EVENT_PKEY_CHANGE;\r
+                       event.element.port_num = port_num;\r
+                       ib_dispatch_event(&event);\r
+               }\r
+       }\r
+}\r
+\r
+static void forward_trap(struct mthca_dev *dev,\r
+                        u8 port_num,\r
+                        struct ib_mad *mad)\r
+{\r
+       int qpn = mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED;\r
+       struct mthca_trap_mad *tmad;\r
+       struct ib_sge      gather_list;\r
+       struct _ib_send_wr wr;\r
+       struct ib_mad_agent *agent = dev->send_agent[port_num - 1][qpn];\r
+       int ret;\r
+       SPIN_LOCK_PREP(lh);\r
+\r
+       /* fill the template */\r
+       wr.ds_array = (ib_local_ds_t*)(void*)&gather_list;\r
+       wr.num_ds = 1;\r
+       wr.wr_type = WR_SEND;\r
+       wr.send_opt = IB_SEND_OPT_SIGNALED;\r
+       wr.dgrm.ud.remote_qp = cl_hton32(qpn);\r
+       wr.dgrm.ud.remote_qkey = qpn ? IB_QP1_QKEY : 0;\r
+       \r
+       if (agent) {\r
+               tmad = kmalloc(sizeof *tmad, GFP_KERNEL);\r
+               if (!tmad)\r
+                       return;\r
+\r
+               alloc_dma_zmem(dev, sizeof *mad, &tmad->sg);\r
+               if (!tmad->sg.page) {\r
+                       kfree(tmad);\r
+                       return;\r
+               }\r
+\r
+               memcpy(tmad->sg.page, mad, sizeof *mad);\r
+\r
+               wr.dgrm.ud.rsvd = (void*)&((struct ib_mad *)tmad->sg.page)->mad_hdr;\r
+               wr.wr_id         = (u64)(ULONG_PTR)tmad;\r
+               gather_list.addr   = tmad->sg.dma_address;\r
+               gather_list.length = tmad->sg.length;\r
+               gather_list.lkey   = to_mpd(agent->qp->pd)->ntmr.ibmr.lkey;\r
+\r
+               /*\r
+                * We rely here on the fact that MLX QPs don't use the\r
+                * address handle after the send is posted (this is\r
+                * wrong following the IB spec strictly, but we know\r
+                * it's OK for our devices).\r
+                */\r
+               spin_lock_irqsave(&dev->sm_lock, &lh);\r
+               wr.dgrm.ud.h_av = (ib_av_handle_t)dev->sm_ah[port_num - 1];\r
+               if (wr.dgrm.ud.h_av) {\r
+                               HCA_PRINT( TRACE_LEVEL_ERROR ,HCA_DBG_MAD ,(" ib_post_send_mad not ported \n" ));\r
+                               ret = -EINVAL;\r
+               }\r
+               else\r
+                       ret = -EINVAL;\r
+               spin_unlock_irqrestore(&lh);\r
+\r
+               if (ret) {\r
+                       free_dma_mem_map(dev, &tmad->sg, PCI_DMA_BIDIRECTIONAL );\r
+                       kfree(tmad);\r
+               }\r
+       }\r
+}\r
+\r
+int mthca_process_mad(struct ib_device *ibdev,\r
+                     int mad_flags,\r
+                     u8 port_num,\r
+                     struct _ib_wc *in_wc,\r
+                     struct _ib_grh *in_grh,\r
+                     struct ib_mad *in_mad,\r
+                     struct ib_mad *out_mad)\r
+{\r
+       int err;\r
+       u8 status;\r
+       u16 slid = in_wc ? in_wc->recv.ud.remote_lid : cl_ntoh16(IB_LID_PERMISSIVE);\r
+\r
+       HCA_PRINT( TRACE_LEVEL_VERBOSE ,HCA_DBG_MAD ,("in: Class %02x, Method %02x, AttrId %x, AttrMod %x, ClSpec %x, Tid %I64x\n",\r
+               (u32)in_mad->mad_hdr.mgmt_class, (u32)in_mad->mad_hdr.method, \r
+               (u32)in_mad->mad_hdr.attr_id, in_mad->mad_hdr.attr_mod, \r
+               (u32)in_mad->mad_hdr.class_specific, in_mad->mad_hdr.tid ));\r
+\r
+       /* Forward locally generated traps to the SM */\r
+       if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP &&\r
+           slid == 0) {\r
+               forward_trap(to_mdev(ibdev), port_num, in_mad);\r
+               HCA_PRINT( TRACE_LEVEL_VERBOSE ,HCA_DBG_MAD ,("Not sent, but locally forwarded\n"));\r
+               return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;\r
+       }\r
+\r
+       /*\r
+        * Only handle SM gets, sets and trap represses for SM class\r
+        *\r
+        * Only handle PMA and Mellanox vendor-specific class gets and\r
+        * sets for other classes.\r
+        */\r
+       if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||\r
+           in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {\r
+\r
+               if (in_mad->mad_hdr.method   != IB_MGMT_METHOD_GET &&\r
+                   in_mad->mad_hdr.method   != IB_MGMT_METHOD_SET &&\r
+                   in_mad->mad_hdr.method   != IB_MGMT_METHOD_TRAP_REPRESS) {\r
+                       HCA_PRINT( TRACE_LEVEL_VERBOSE,HCA_DBG_MAD,(" Skip some methods. Nothing done !\n"));\r
+                       return IB_MAD_RESULT_SUCCESS;\r
+               }\r
+\r
+               /*\r
+                * Don't process SMInfo queries or vendor-specific\r
+                * MADs -- the SMA can't handle them.\r
+                */\r
+               if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO ||\r
+                   ((in_mad->mad_hdr.attr_id & IB_SMP_ATTR_VENDOR_MASK) ==\r
+                    IB_SMP_ATTR_VENDOR_MASK)) {\r
+                       HCA_PRINT( TRACE_LEVEL_VERBOSE ,HCA_DBG_MAD ,("Skip SMInfo queries or vendor-specific MADs. Nothing done !\n"));\r
+                       return IB_MAD_RESULT_SUCCESS;\r
+               }\r
+       } \r
+       else {\r
+               if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT ||\r
+                  in_mad->mad_hdr.mgmt_class == MTHCA_VENDOR_CLASS1     ||\r
+                  in_mad->mad_hdr.mgmt_class == MTHCA_VENDOR_CLASS2) {\r
+\r
+                       if (in_mad->mad_hdr.method  != IB_MGMT_METHOD_GET &&\r
+                           in_mad->mad_hdr.method  != IB_MGMT_METHOD_SET) {\r
+                               HCA_PRINT( TRACE_LEVEL_VERBOSE ,HCA_DBG_MAD ,("Skip some management methods. Nothing done !\n"));\r
+                               return IB_MAD_RESULT_SUCCESS;\r
+                       }\r
+               } \r
+               else {\r
+                       HCA_PRINT( TRACE_LEVEL_VERBOSE ,HCA_DBG_MAD ,("Skip IB_MGMT_CLASS_PERF_MGMT et al. Nothing done !\n"));\r
+                       return IB_MAD_RESULT_SUCCESS;\r
+               }       \r
+       }\r
+\r
+       // send MAD\r
+       err = mthca_MAD_IFC(to_mdev(ibdev),\r
+                           mad_flags & IB_MAD_IGNORE_MKEY,\r
+                           mad_flags & IB_MAD_IGNORE_BKEY,\r
+                           port_num, in_wc, in_grh, in_mad, out_mad,\r
+                           &status);\r
+       if (err) {\r
+               HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_MAD ,("MAD_IFC failed\n"));\r
+               return IB_MAD_RESULT_FAILURE;\r
+       }\r
+       if (status == MTHCA_CMD_STAT_BAD_PKT)\r
+               return IB_MAD_RESULT_SUCCESS;\r
+       if (status) {\r
+               HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_MAD ,("MAD_IFC returned status %02x\n", status));\r
+               return IB_MAD_RESULT_FAILURE;\r
+       }\r
+\r
+       if (!out_mad->mad_hdr.status)\r
+               smp_snoop(ibdev, port_num, in_mad);\r
+\r
+       HCA_PRINT( TRACE_LEVEL_VERBOSE ,HCA_DBG_MAD,("out: Class %02x, Method %02x, AttrId %x, AttrMod %x, ClSpec %x, Tid %I64x, Status %x\n",\r
+               (u32)out_mad->mad_hdr.mgmt_class, (u32)out_mad->mad_hdr.method, \r
+               (u32)out_mad->mad_hdr.attr_id, out_mad->mad_hdr.attr_mod, \r
+               (u32)out_mad->mad_hdr.class_specific, out_mad->mad_hdr.tid,\r
+               (u32)out_mad->mad_hdr.status ));\r
+\r
+       if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS) {\r
+               /* no response for trap repress */\r
+               return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;\r
+       }\r
+\r
+       return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;\r
+}\r
+\r
+static void send_handler(struct ib_mad_agent *agent,\r
+                        struct ib_mad_send_wc *mad_send_wc)\r
+{\r
+       struct mthca_trap_mad *tmad =\r
+               (void *) (ULONG_PTR) mad_send_wc->wr_id;\r
+\r
+       free_dma_mem_map(agent->device->mdev, &tmad->sg, PCI_DMA_BIDIRECTIONAL );\r
+       kfree(tmad);\r
+}\r
index 6e952b6..6bb6daf 100644 (file)
@@ -244,7 +244,7 @@ __post_create_av (
                        }\r
                        ah->key  = page->mr.lkey;\r
                }\r
-               *ph_uvp_av = (ib_av_handle_t VOID_PTR64)ah;\r
+               *ph_uvp_av = (ib_av_handle_t)ah;\r
        }\r
        else {\r
                mthca_free_av(ah);\r
@@ -290,7 +290,7 @@ __post_query_av (
        {\r
                cl_memcpy (p_addr_vector, &ah->av_attr, sizeof (ib_av_attr_t));\r
                if (ph_pd)\r
-                       *ph_pd = (ib_pd_handle_t VOID_PTR64)ah->h_uvp_pd;\r
+                       *ph_pd = (ib_pd_handle_t)ah->h_uvp_pd;\r
        }\r
        \r
        UVP_EXIT(UVP_DBG_AV);\r
index 31c6814..0145247 100644 (file)
@@ -111,7 +111,7 @@ __post_open_ca (
                /* return results */\r
                new_ca->ibv_ctx = ibvcontext;\r
                new_ca->p_hca_attr = NULL;\r
-               *ph_uvp_ca = (ib_ca_handle_t VOID_PTR64)new_ca;\r
+               *ph_uvp_ca = (ib_ca_handle_t)new_ca;\r
        }\r
 \r
 err_memory:    \r
index ddf67cb..f695ca9 100644 (file)
@@ -129,7 +129,7 @@ __post_create_cq (
                        goto err_create_cq;\r
                }\r
 \r
-               *ph_uvp_cq = (ib_cq_handle_t VOID_PTR64)ibv_cq;\r
+               *ph_uvp_cq = (ib_cq_handle_t)ibv_cq;\r
        }\r
        goto end;\r
 \r
index 5a448b3..a30b34b 100644 (file)
@@ -114,7 +114,7 @@ __post_allocate_pd (
                /* return results */\r
                p_new_pd->ibv_pd = ibv_pd;\r
                p_new_pd->p_hobul = p_hobul;\r
-               *ph_uvp_pd = (ib_pd_handle_t VOID_PTR64)p_new_pd;\r
+               *ph_uvp_pd = (ib_pd_handle_t)p_new_pd;\r
        }\r
        goto end;\r
        \r
index a0ecde5..fc5a2a3 100644 (file)
@@ -180,7 +180,7 @@ __post_create_qp (
                        goto err_create_cq;\r
                }\r
 \r
-               *ph_uvp_qp = (ib_qp_handle_t VOID_PTR64)ibv_qp;\r
+               *ph_uvp_qp = (ib_qp_handle_t)ibv_qp;\r
        }\r
        goto end;\r
        \r
index e3c9782..5f22b95 100644 (file)
@@ -207,7 +207,7 @@ __post_create_srq (
                if (mthca_is_memfree(ibv_pd->context))\r
                        mthca_set_db_qn(srq->db, MTHCA_DB_TYPE_SRQ, srq->srqn);\r
                \r
-               *ph_uvp_srq = (ib_srq_handle_t VOID_PTR64)srq;\r
+               *ph_uvp_srq = (ib_srq_handle_t)srq;\r
        }\r
        else\r
                __free_srq(srq);\r
index f656dfb..0fa13ac 100644 (file)
 #define TYPEDEF_PTR64\r
 #endif\r
 \r
-#ifndef VOID_PTR64\r
-#define VOID_PTR64\r
-#endif\r
-\r
 #ifndef STRUCT_PTR64\r
 #define STRUCT_PTR64\r
 #endif\r
index 0563bca..c8f3431 100644 (file)
@@ -1,6 +1,7 @@
 /*\r
  * Copyright (c) 2005 SilverStorm Technologies.  All rights reserved.\r
  * Copyright (c) 2006 Mellanox Technologies.  All rights reserved.\r
+ * Portions Copyright (c) 2008 Microsoft Corporation.  All rights reserved.\r
  *\r
  * This software is available to you under the OpenIB.org BSD license\r
  * below:\r
@@ -2542,7 +2543,7 @@ __ipoib_ats_reg_cb(
        CL_ASSERT( p_reg_svc_rec );\r
        CL_ASSERT( p_reg_svc_rec->svc_context );\r
 \r
-       p_reg = (ats_reg_t* VOID_PTR64)p_reg_svc_rec->svc_context;\r
+       p_reg = (ats_reg_t*)p_reg_svc_rec->svc_context;\r
        port_num = p_reg->p_adapter->guids.port_num;\r
 \r
        cl_obj_lock( &p_reg->p_adapter->obj );\r