[MTHCA] 1. preliminary support for livefish devices (not released yet);
authorleonidk <leonidk@ad392aa1-c5ef-ae45-8dd8-e69d62a5ef86>
Thu, 11 May 2006 08:41:40 +0000 (08:41 +0000)
committerleonidk <leonidk@ad392aa1-c5ef-ae45-8dd8-e69d62a5ef86>
Thu, 11 May 2006 08:41:40 +0000 (08:41 +0000)
2. (36436) fixed with kernel memory overconsumption on creating/destroying AVs;
3. (36433) bugfix: incorrect error code on create_cq with bad number of cqes;
4. (36427) bugfix: incorrect error code on create_qp with bad number of WRs;
5. fixed PREFAST errors;
6. fixed a bug in the cleanup after an unsuccessful driver start up;
7. fixed a bug in using debug print macros;

git-svn-id: svn://openib.tc.cornell.edu/gen1/trunk@340 ad392aa1-c5ef-ae45-8dd8-e69d62a5ef86

31 files changed:
hw/mthca/kernel/hca_data.h
hw/mthca/kernel/hca_driver.h
hw/mthca/kernel/hca_memory.c
hw/mthca/kernel/hca_pci.c
hw/mthca/kernel/hca_pnp.c
hw/mthca/kernel/hca_verbs.c
hw/mthca/kernel/ib_verbs.h
hw/mthca/kernel/mt_device.c
hw/mthca/kernel/mt_packer.c
hw/mthca/kernel/mt_pci.h
hw/mthca/kernel/mt_reset_tavor.c
hw/mthca/kernel/mt_uverbsmem.c
hw/mthca/kernel/mthca_cmd.c
hw/mthca/kernel/mthca_dev.h
hw/mthca/kernel/mthca_eq.c
hw/mthca/kernel/mthca_main.c
hw/mthca/kernel/mthca_memfree.c
hw/mthca/kernel/mthca_mr.c
hw/mthca/kernel/mthca_profile.c
hw/mthca/kernel/mthca_provider.c
hw/mthca/kernel/mthca_qp.c
hw/mthca/kernel/mthca_srq.c
hw/mthca/mx_abi.h
hw/mthca/user/mlnx_ual_av.c
hw/mthca/user/mlnx_ual_ca.c
hw/mthca/user/mlnx_ual_qp.c
hw/mthca/user/mlnx_uvp.c
hw/mthca/user/mlnx_uvp.h
hw/mthca/user/mlnx_uvp_ah.c
hw/mthca/user/mlnx_uvp_qp.c
hw/mthca/user/mlnx_uvp_verbs.c

index b721ba6..8dda3cc 100644 (file)
@@ -230,6 +230,7 @@ extern mlnx_hobul_t *mlnx_hobul_array[];
 void\r
 setup_ci_interface(\r
        IN              const   ib_net64_t                                      ca_guid,\r
+       IN              const   int                                                     is_livefish,\r
                OUT                     ci_interface_t                          *p_interface );\r
 \r
 void\r
@@ -316,6 +317,10 @@ void
 mlnx_memory_if(\r
        IN      OUT                     ci_interface_t                          *p_interface );\r
 \r
+void\r
+mlnx_memory_if_livefish(\r
+       IN      OUT                     ci_interface_t                          *p_interface );\r
+\r
 void\r
 mlnx_ecc_if(\r
        IN      OUT                     ci_interface_t                          *p_interface );\r
index e673412..153c36c 100644 (file)
@@ -156,6 +156,8 @@ typedef struct _hca_dev_ext
 }      hca_dev_ext_t;\r
 \r
 #define EXT_FROM_HOB(hob_p)                    (container_of(hob_p,  hca_dev_ext_t, hca.hob))\r
+#define HCA_FROM_HOB(hob_p)                    (container_of(hob_p,  mlnx_hca_t, hob))\r
+#define MDEV_FROM_HOB(hob_p)           (HCA_FROM_HOB(hob_p)->mdev)\r
 #define IBDEV_FROM_HOB(hob_p)          (&EXT_FROM_HOB(hob_p)->hca.mdev->ib_dev)\r
 #define HOBUL_FROM_HOB(hob_p)          (&EXT_FROM_HOB(hob_p)->hca.hobul)\r
 #define HOB_FROM_IBDEV(dev_p)          (mlnx_hob_t *)&dev_p->mdev->ext->hca.hob\r
index ca276a6..1775cba 100644 (file)
@@ -189,7 +189,7 @@ mlnx_register_pmr (
        //NB: p_pmr_create->hca_page_size is not used, i.e. supposed it is always the same\r
        \r
        // register pmr \r
-       if (p_pmr_create->length == (uint64_t)-1LL)\r
+       if (p_pmr_create->length == (uint64_t)-1i64)\r
        {\r
                mr_p = ibv_get_dma_mr( ib_pd_p,\r
                        map_qp_ibal_acl(p_pmr_create->access_ctrl) );\r
@@ -406,3 +406,11 @@ mlnx_memory_if(
        p_interface->destroy_mw = mlnx_destroy_mw;\r
 }\r
 \r
+void\r
+mlnx_memory_if_livefish(\r
+       IN      OUT                     ci_interface_t                          *p_interface )\r
+{\r
+       p_interface->register_pmr = mlnx_register_pmr;\r
+       p_interface->deregister_mr = mlnx_deregister_mr;\r
+}\r
+\r
index 9efa74f..5e12d4f 100644 (file)
@@ -653,6 +653,9 @@ hca_enable_pci(
 void hca_disable_pci(PBUS_INTERFACE_STANDARD   phcaBusIfc)\r
 {\r
        // no need to disable the card, so just release the PCI bus i/f\r
-       phcaBusIfc->InterfaceDereference( phcaBusIfc->Context );\r
+       if (phcaBusIfc) {\r
+               phcaBusIfc->InterfaceDereference( phcaBusIfc->Context );\r
+               phcaBusIfc = NULL;\r
+       }\r
 }\r
 \r
index bb1b387..0590f3d 100644 (file)
@@ -25,7 +25,6 @@
 \r
 extern const char *mthca_version;\r
 \r
-\r
 static NTSTATUS\r
 hca_start(\r
        IN                              DEVICE_OBJECT* const            p_dev_obj,\r
@@ -408,7 +407,9 @@ __alloc_hca_ifc(
                return NULL;\r
        }\r
 \r
-       setup_ci_interface( p_ext->hca.guid, pIfc );\r
+       setup_ci_interface( p_ext->hca.guid,\r
+               !!mthca_is_livefish(p_ext->hca.mdev),\r
+               pIfc );\r
 \r
        pIfc->p_hca_dev = p_ext->cl_ext.p_pdo;\r
        pIfc->vend_id = (uint32_t)p_ext->hcaConfig.VendorID;\r
@@ -687,6 +688,14 @@ __UnmapHcaMemoryResources(
 }\r
 \r
 \r
+static int mthca_get_livefish_info(struct mthca_dev *mdev, __be64 *node_guid, u32 *hw_id)\r
+{\r
+       *node_guid = cl_hton64((uint64_t)(ULONG_PTR)mdev);\r
+       mdev->ib_dev.node_guid = *node_guid;\r
+       *hw_id = 0;\r
+       return 0;\r
+}\r
+\r
 static NTSTATUS\r
 hca_start(\r
        IN                              DEVICE_OBJECT* const            p_dev_obj,\r
@@ -778,7 +787,11 @@ hca_start(
 \r
        /*leo: get node GUID */\r
        {\r
-               int err =       mthca_get_dev_info( p_ext->hca.mdev, &p_ext->hca.guid, &p_ext->hca.hw_ver );\r
+               int err;\r
+               if (mthca_is_livefish(p_ext->hca.mdev)) \r
+                       err = mthca_get_livefish_info( p_ext->hca.mdev, &p_ext->hca.guid, &p_ext->hca.hw_ver );\r
+               else\r
+                       err = mthca_get_dev_info( p_ext->hca.mdev, &p_ext->hca.guid, &p_ext->hca.hw_ver );\r
                if (err) {\r\r
                        //TODO: no cleanup on error\r
                        HCA_PRINT( TRACE_LEVEL_ERROR,HCA_DBG_PNP, \r
index 81d5a6b..1a75067 100644 (file)
@@ -82,6 +82,9 @@ mlnx_open_ca (
 \r
        ib_dev = &p_hca->mdev->ib_dev;\r
 \r
+       if (mthca_is_livefish(p_hca->mdev)) \r
+               goto done;\r
+\r
        HCA_PRINT(TRACE_LEVEL_INFORMATION  ,HCA_DBG_SHIM,\r
                ("context 0x%p\n", ca_context));\r
        status = mlnx_hobs_set_cb(&p_hca->hob,\r
@@ -104,6 +107,7 @@ mlnx_open_ca (
        //TODO: do we need something for kernel users ?\r
 \r
        // Return pointer to HOB object\r
+done:  \r
        if (ph_ca) *ph_ca = &p_hca->hob;\r
        status =  IB_SUCCESS;\r
 \r
@@ -345,7 +349,7 @@ mlnx_modify_ca (
                HCA_PRINT(TRACE_LEVEL_ERROR  , HCA_DBG_SHIM  ,("mthca_modify_port failed (%d) \n",err));\r
                goto err_modify_port;\r
        }\r
-       \r
+\r
        status =        IB_SUCCESS;\r
 \r
 err_modify_port:\r
@@ -357,13 +361,15 @@ ib_api_status_t
 mlnx_close_ca (\r
        IN                              ib_ca_handle_t                          h_ca)\r
 {\r
+       mlnx_hob_t                      *hob_p = (mlnx_hob_t *)h_ca;\r
        HCA_ENTER(HCA_DBG_SHIM);\r
 \r
-       // release HOB resources\r
-       mlnx_hobs_remove(h_ca);\r
+       if (mthca_is_livefish(MDEV_FROM_HOB( hob_p ))) \r
+               goto done;\r
 \r
-       //TODO: release HOBUL resources\r
+       mlnx_hobs_remove(h_ca);\r
 \r
+done:\r
        HCA_EXIT(HCA_DBG_SHIM);\r
        \r
        return IB_SUCCESS;\r
@@ -469,7 +475,10 @@ mlnx_um_close(
        struct ib_ucontext *p_ucontext = (struct ib_ucontext *)h_um_ca;\r
        UNREFERENCED_PARAMETER(h_ca);\r
 \r
+       if (mthca_is_livefish(to_mdev(p_ucontext->device)))\r
+               goto done;\r
        unmap_crspace_for_all(p_ucontext);\r
+done:  \r
        if( !p_ucontext->pd )\r
                cl_free( h_um_ca );\r
        else\r
@@ -613,7 +622,7 @@ mlnx_create_av (
        if (ph_av) *ph_av = (ib_av_handle_t)ib_av_p;\r
 \r
        status = IB_SUCCESS;\r
-       \r
+\r
 err_alloc_av:  \r
 err_inval_params:\r
        if (p_umv_buf && p_umv_buf->command) \r
@@ -765,85 +774,84 @@ _create_qp (
                OUT                     ib_qp_handle_t                          *ph_qp,\r
        IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
 {\r
-               int err;\r
-               ib_api_status_t         status;\r
-               struct ib_qp * ib_qp_p;\r
-               struct mthca_qp *qp_p;\r
-               struct ib_qp_init_attr qp_init_attr;\r
-               struct ib_ucontext *p_context = NULL;\r
-               struct ib_pd *ib_pd_p = (struct ib_pd *)h_pd;\r
-               struct ib_device *ib_dev = ib_pd_p->device;\r
-               mlnx_hob_t       *hob_p = HOB_FROM_IBDEV(ib_dev);\r
-               \r
-               HCA_ENTER(HCA_DBG_QP);\r
-\r
-       \r
-               if( p_umv_buf && p_umv_buf->command ) {\r
-                       // sanity checks \r
-                       if (p_umv_buf->input_size < sizeof(struct ibv_create_qp) ||\r
-                               p_umv_buf->output_size < sizeof(struct ibv_create_qp_resp) ||\r
-                               !p_umv_buf->p_inout_buf) {\r
-                               status = IB_INVALID_PARAMETER;\r
-                               goto err_inval_params;\r
-                       }\r
-                       p_context = ib_pd_p->ucontext;\r
-               }\r
-               else \r
-                       p_context = NULL;\r
-\r
-               // prepare the parameters\r
-               RtlZeroMemory(&qp_init_attr, sizeof(qp_init_attr));\r
-               qp_init_attr.qp_type = p_create_attr->qp_type;\r
-               qp_init_attr.event_handler = qp_event_handler;\r
-               qp_init_attr.qp_context = hob_p;\r
-               qp_init_attr.recv_cq = (struct ib_cq *)p_create_attr->h_rq_cq;\r
-               qp_init_attr.send_cq = (struct ib_cq *)p_create_attr->h_sq_cq;\r
-               qp_init_attr.cap.max_recv_sge = p_create_attr->rq_sge;\r
-               qp_init_attr.cap.max_send_sge = p_create_attr->sq_sge;\r
-               qp_init_attr.cap.max_recv_wr = p_create_attr->rq_depth;\r
-               qp_init_attr.cap.max_send_wr = p_create_attr->sq_depth;\r
-               qp_init_attr.sq_sig_type = (p_create_attr->sq_signaled) ? IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;\r
-               qp_init_attr.port_num = port_num;\r
-\r
-\r
-               // create qp            \r
-               ib_qp_p = ibv_create_qp( ib_pd_p, &qp_init_attr, p_context, p_umv_buf );\r
-               if (IS_ERR(ib_qp_p)) {\r
-                       err = PTR_ERR(ib_qp_p);\r
-                       HCA_PRINT(TRACE_LEVEL_ERROR  , HCA_DBG_QP,\r
-                               ("ibv_create_qp failed (%d)\n", err));\r
-                       status = errno_to_iberr(err);\r
-                       goto err_create_qp;\r
-               }\r
-       \r
-               // fill the object\r
-               qp_p = (struct mthca_qp *)ib_qp_p;\r
-               qp_p->qp_context = (void*)qp_context;\r
-               qp_p->qp_init_attr = qp_init_attr;\r
+       int err;\r
+       ib_api_status_t         status;\r
+       struct ib_qp * ib_qp_p;\r
+       struct mthca_qp *qp_p;\r
+       struct ib_qp_init_attr qp_init_attr;\r
+       struct ib_ucontext *p_context = NULL;\r
+       struct ib_pd *ib_pd_p = (struct ib_pd *)h_pd;\r
+       struct ib_device *ib_dev = ib_pd_p->device;\r
+       mlnx_hob_t       *hob_p = HOB_FROM_IBDEV(ib_dev);\r
        \r
-               // Query QP to obtain requested attributes\r
-               if (p_qp_attr) {\r
-                       status = mlnx_query_qp ((ib_qp_handle_t)ib_qp_p, p_qp_attr, p_umv_buf);\r
-                       if (status != IB_SUCCESS)\r
-                                       goto err_query_qp;\r
+       HCA_ENTER(HCA_DBG_QP);\r
+\r
+       if( p_umv_buf && p_umv_buf->command ) {\r
+               // sanity checks \r
+               if (p_umv_buf->input_size < sizeof(struct ibv_create_qp) ||\r
+                       p_umv_buf->output_size < sizeof(struct ibv_create_qp_resp) ||\r
+                       !p_umv_buf->p_inout_buf) {\r
+                       status = IB_INVALID_PARAMETER;\r
+                       goto err_inval_params;\r
                }\r
-               \r
-               // return the results\r
-               if (ph_qp) *ph_qp = (ib_qp_handle_t)ib_qp_p;\r
-       \r
-               status = IB_SUCCESS;\r
-               goto end;\r
+               p_context = ib_pd_p->ucontext;\r
+       }\r
+       else \r
+               p_context = NULL;\r
+\r
+       // prepare the parameters\r
+       RtlZeroMemory(&qp_init_attr, sizeof(qp_init_attr));\r
+       qp_init_attr.qp_type = p_create_attr->qp_type;\r
+       qp_init_attr.event_handler = qp_event_handler;\r
+       qp_init_attr.qp_context = hob_p;\r
+       qp_init_attr.recv_cq = (struct ib_cq *)p_create_attr->h_rq_cq;\r
+       qp_init_attr.send_cq = (struct ib_cq *)p_create_attr->h_sq_cq;\r
+       qp_init_attr.cap.max_recv_sge = p_create_attr->rq_sge;\r
+       qp_init_attr.cap.max_send_sge = p_create_attr->sq_sge;\r
+       qp_init_attr.cap.max_recv_wr = p_create_attr->rq_depth;\r
+       qp_init_attr.cap.max_send_wr = p_create_attr->sq_depth;\r
+       qp_init_attr.sq_sig_type = (p_create_attr->sq_signaled) ? IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;\r
+       qp_init_attr.port_num = port_num;\r
+\r
+\r
+       // create qp            \r
+       ib_qp_p = ibv_create_qp( ib_pd_p, &qp_init_attr, p_context, p_umv_buf );\r
+       if (IS_ERR(ib_qp_p)) {\r
+               err = PTR_ERR(ib_qp_p);\r
+               HCA_PRINT(TRACE_LEVEL_ERROR  , HCA_DBG_QP,\r
+                       ("ibv_create_qp failed (%d)\n", err));\r
+               status = errno_to_iberr(err);\r
+               goto err_create_qp;\r
+       }\r
+\r
+       // fill the object\r
+       qp_p = (struct mthca_qp *)ib_qp_p;\r
+       qp_p->qp_context = (void*)qp_context;\r
+       qp_p->qp_init_attr = qp_init_attr;\r
+\r
+       // Query QP to obtain requested attributes\r
+       if (p_qp_attr) {\r
+               status = mlnx_query_qp ((ib_qp_handle_t)ib_qp_p, p_qp_attr, p_umv_buf);\r
+               if (status != IB_SUCCESS)\r
+                               goto err_query_qp;\r
+       }\r
        \r
-       err_query_qp:\r
-               ibv_destroy_qp( ib_qp_p );\r
-       err_create_qp:\r
-       err_inval_params:\r
-       end:\r
-               if (p_umv_buf && p_umv_buf->command) \r
-                       p_umv_buf->status = status;\r
-               HCA_PRINT_EXIT(TRACE_LEVEL_ERROR ,HCA_DBG_QP ,\r
-                       ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
-               return status;\r
+       // return the results\r
+       if (ph_qp) *ph_qp = (ib_qp_handle_t)ib_qp_p;\r
+\r
+       status = IB_SUCCESS;\r
+       goto end;\r
+\r
+err_query_qp:\r
+       ibv_destroy_qp( ib_qp_p );\r
+err_create_qp:\r
+err_inval_params:\r
+end:\r
+       if (p_umv_buf && p_umv_buf->command) \r
+               p_umv_buf->status = status;\r
+       HCA_PRINT_EXIT(TRACE_LEVEL_ERROR ,HCA_DBG_QP ,\r
+               ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
+       return status;\r
 }\r
 \r
 ib_api_status_t\r
@@ -1082,7 +1090,7 @@ mlnx_create_cq (
        mlnx_hob_t                      *hob_p;\r
        struct ib_device *ib_dev;\r
        struct ib_ucontext *p_context;\r
-       \r
+\r
        HCA_ENTER(HCA_DBG_CQ);\r
 \r
        if( p_umv_buf ) {\r
@@ -1105,6 +1113,12 @@ mlnx_create_cq (
                ib_dev = IBDEV_FROM_HOB( hob_p );\r
        }\r
 \r
+       /* sanity check */\r
+       if (*p_size > (uint32_t)ib_dev->mdev->limits.max_cqes) {\r
+               status = IB_INVALID_CQ_SIZE;\r
+               goto err_cqe;\r
+       }\r
+\r
        // allocate cq  \r
        ib_cq_p = ibv_create_cq(ib_dev, \r
                cq_comp_handler, cq_event_handler,\r
@@ -1130,6 +1144,7 @@ mlnx_create_cq (
        \r
 err_create_cq:\r
 err_inval_params:\r
+err_cqe:\r
        if (p_umv_buf && p_umv_buf->command) \r
                p_umv_buf->status = status;\r
        HCA_PRINT_EXIT(TRACE_LEVEL_ERROR ,HCA_DBG_CQ,\r
@@ -1203,6 +1218,7 @@ err_destroy_cq:
 void\r
 setup_ci_interface(\r
        IN              const   ib_net64_t                                      ca_guid,\r
+       IN              const   int                                                     is_livefish,\r
        IN      OUT                     ci_interface_t                          *p_interface )\r
 {\r
        cl_memclr(p_interface, sizeof(*p_interface));\r
@@ -1221,38 +1237,43 @@ setup_ci_interface(
        /* The real interface. */\r
        p_interface->open_ca = mlnx_open_ca;\r
        p_interface->query_ca = mlnx_query_ca;\r
-       p_interface->modify_ca = mlnx_modify_ca; \r
        p_interface->close_ca = mlnx_close_ca;\r
        p_interface->um_open_ca = mlnx_um_open;\r
        p_interface->um_close_ca = mlnx_um_close;\r
 \r
        p_interface->allocate_pd = mlnx_allocate_pd;\r
        p_interface->deallocate_pd = mlnx_deallocate_pd;\r
-\r
-       p_interface->create_av = mlnx_create_av;\r
-       p_interface->query_av = mlnx_query_av;\r
-       p_interface->modify_av = mlnx_modify_av;\r
-       p_interface->destroy_av = mlnx_destroy_av;\r
-\r
-       p_interface->create_qp = mlnx_create_qp;\r
-       p_interface->create_spl_qp = mlnx_create_spl_qp;\r
-       p_interface->modify_qp = mlnx_modify_qp;\r
-       p_interface->query_qp = mlnx_query_qp;\r
-       p_interface->destroy_qp = mlnx_destroy_qp;\r
-\r
-       p_interface->create_cq = mlnx_create_cq;\r
-       p_interface->resize_cq = mlnx_resize_cq;\r
-       p_interface->query_cq = mlnx_query_cq;\r
-       p_interface->destroy_cq = mlnx_destroy_cq;\r
-\r
-       p_interface->local_mad = mlnx_local_mad;\r
-       \r
        p_interface->vendor_call = fw_access_ctrl;\r
 \r
-       mlnx_memory_if(p_interface);\r
-       mlnx_direct_if(p_interface);\r
-       mlnx_mcast_if(p_interface);\r
+       if (is_livefish) {\r
+               mlnx_memory_if_livefish(p_interface);\r
+       }\r
+       else {  \r
+               p_interface->modify_ca = mlnx_modify_ca; \r
+               \r
+               p_interface->create_av = mlnx_create_av;\r
+               p_interface->query_av = mlnx_query_av;\r
+               p_interface->modify_av = mlnx_modify_av;\r
+               p_interface->destroy_av = mlnx_destroy_av;\r
+\r
+               p_interface->create_qp = mlnx_create_qp;\r
+               p_interface->create_spl_qp = mlnx_create_spl_qp;\r
+               p_interface->modify_qp = mlnx_modify_qp;\r
+               p_interface->query_qp = mlnx_query_qp;\r
+               p_interface->destroy_qp = mlnx_destroy_qp;\r
+\r
+               p_interface->create_cq = mlnx_create_cq;\r
+               p_interface->resize_cq = mlnx_resize_cq;\r
+               p_interface->query_cq = mlnx_query_cq;\r
+               p_interface->destroy_cq = mlnx_destroy_cq;\r
+\r
+               p_interface->local_mad = mlnx_local_mad;\r
+               \r
 \r
+               mlnx_memory_if(p_interface);\r
+               mlnx_direct_if(p_interface);\r
+               mlnx_mcast_if(p_interface);\r
+       }\r
 \r
        return;\r
 }\r
index 0ba30cc..ef4df02 100644 (file)
@@ -84,7 +84,6 @@ struct ib_device_attr {
        u64                     page_size_cap;
        u32                     vendor_id;
        u32                     vendor_part_id;
-       u8                      board_id[16];
        u32                     hw_ver;
        int                     max_qp;
        int                     max_qp_wr;
index 6e520bc..5091b3c 100644 (file)
@@ -140,14 +140,16 @@ static int alloc_name(char *name)
                        continue;
                if (i < 0 || i >= PAGE_SIZE * 8)
                        continue;
-               snprintf(buf, sizeof buf, name, i);
+               snprintf(buf, sizeof(buf)-1, name, i);
+               buf[sizeof(buf)-1] = '\0';
                if (!strncmp(buf, device->name, IB_DEVICE_NAME_MAX))
                        set_bit(i, inuse);
        }
 
        i = find_first_zero_bit((const unsigned long *)inuse, PAGE_SIZE * 8);
        free_page(inuse);
-       snprintf(buf, sizeof buf, name, i);
+       snprintf(buf, sizeof(buf)-1, name, i);
+       buf[sizeof(buf)-1] = '\0';
 
        if (__ib_device_get_by_name(buf))
                return -ENFILE;
index afba3a6..c11fb1b 100644 (file)
@@ -87,7 +87,7 @@ void ib_pack(const struct ib_field        *desc,
                        else
                                val = 0;
 
-                       mask = cl_hton32(((1ull << desc[i].size_bits) - 1) << shift);
+                       mask = cl_hton32(((1Ui64 << desc[i].size_bits) - 1) << shift);
                        addr = (__be32 *) buf + desc[i].offset_words;
                        *addr = (*addr & ~mask) | (cl_hton32(val) & mask);
                } else if (desc[i].size_bits <= 64) {
@@ -104,7 +104,7 @@ void ib_pack(const struct ib_field        *desc,
                        else
                                val = 0;
 
-                       mask = CPU_2_BE64((~0ull >> (64 - desc[i].size_bits)) << shift);
+                       mask = CPU_2_BE64((~0Ui64 >> (64 - desc[i].size_bits)) << shift);
                        addr = (__be64 *) ((__be32 *) buf + desc[i].offset_words);
                        *addr = (*addr & ~mask) | (cl_hton64(val) & mask);
                } else {
@@ -167,7 +167,7 @@ void ib_unpack(const struct ib_field        *desc,
                        __be32 *addr;
 
                        shift = 32 - desc[i].offset_bits - desc[i].size_bits;
-                       mask = ((1ull << desc[i].size_bits) - 1) << shift;
+                       mask = ((1Ui64 << desc[i].size_bits) - 1) << shift;
                        addr = (__be32 *) buf + desc[i].offset_words;
                        val = (cl_ntoh32(*addr) & mask) >> shift;
                        value_write(desc[i].struct_offset_bytes,
@@ -181,7 +181,7 @@ void ib_unpack(const struct ib_field        *desc,
                        __be64 *addr;
 
                        shift = 64 - desc[i].offset_bits - desc[i].size_bits;
-                       mask = (~0ull >> (64 - desc[i].size_bits)) << shift;
+                       mask = (~0Ui64 >> (64 - desc[i].size_bits)) << shift;
                        addr = (__be64 *) buf + desc[i].offset_words;
                        val = (cl_ntoh64(*addr) & mask) >> shift;
                        value_write(desc[i].struct_offset_bytes,
index a19ceaa..3f389ca 100644 (file)
 #endif
 
 /* live fishes */
+#ifndef PCI_DEVICE_ID_MELLANOX_TAVOR_BD
 #define PCI_DEVICE_ID_MELLANOX_TAVOR_BD                0x5a45
+#endif
+
+#ifndef PCI_DEVICE_ID_MELLANOX_ARBEL_BD
 #define PCI_DEVICE_ID_MELLANOX_ARBEL_BD                0x6279
+#endif
+
+#ifndef PCI_DEVICE_ID_MELLANOX_SINAI_OLD_BD
+#define PCI_DEVICE_ID_MELLANOX_SINAI_OLD_BD    0x5e8d
+#endif
+
+#ifndef PCI_DEVICE_ID_MELLANOX_SINAI_BD
+#define PCI_DEVICE_ID_MELLANOX_SINAI_BD                0x6275
+#endif
 
 // ===========================================
 // TYPES
index a8367df..23dbea5 100644 (file)
@@ -272,7 +272,7 @@ MdGetDevLocation(
 
        /* Get the device number  */
        l_Status = IoGetDeviceProperty(pi_pPdo,
-               DevicePropertyLocationInformation, sizeof(l_Buffer), &l_Buffer, &l_ResultLength);
+               DevicePropertyLocationInformation, sizeof(l_Buffer), l_Buffer, &l_ResultLength);
 
        /* Verify if the function was successful */
        if ( !NT_SUCCESS(l_Status) || !l_ResultLength ) {
@@ -360,11 +360,20 @@ static BOOLEAN PciFindPdoByPdoAndLocation(
        // suppose that there is no more than N_PCI_DEVICES, belonging to PCI.SYS
        #define N_PCI_DEVICES   256
        // suppose that the PDO objects, once created, never get moved
-       PDEVICE_OBJECT pdo[N_PCI_DEVICES];
+       PDEVICE_OBJECT *pdo;
        int i, n_pdos = 0;
+       KIRQL irql;
+
+       
+       pdo = (PDEVICE_OBJECT *)ExAllocatePoolWithTag(
+               NonPagedPool,
+               N_PCI_DEVICES * sizeof(PDEVICE_OBJECT),
+               MT_TAG_KERNEL );
+       if (!pdo)
+               return FALSE;
        
        // suppose, that PDOs are added only at PASSIVE_LEVEL
-       KIRQL irql = KeRaiseIrqlToDpcLevel();
+       irql = KeRaiseIrqlToDpcLevel();
                
        // get to the PCI.SYS driver
        l_pDrv = pi_pPdo->DriverObject;
@@ -395,11 +404,9 @@ static BOOLEAN PciFindPdoByPdoAndLocation(
                }
        }
 
-       // check whether we found the PDO
-       if (!l_pPdo)
-               return FALSE;
        *po_pPdo = l_pPdo;
-       return TRUE;    
+       ExFreePool(pdo);
+       return (BOOLEAN)!!*po_pPdo;     
 }
 
 /*----------------------------------------------------------------*/
index 3fbc005..6dce471 100644 (file)
@@ -78,6 +78,10 @@ int ibv_umem_get(struct ib_device *dev, struct ib_umem *mem,
 
        /* build sg list */
        npages = (unsigned long)(NEXT_PAGE_ALIGN(size + mem->offset) >> PAGE_SHIFT);
+       if (!npages) {
+               ret = -EINVAL;
+               goto err_inval;
+       }
        cur_base = (u64)(UINT_PTR)addr & PAGE_MASK;
        while (npages) {
                /* allocate a max large chunk (it's <= PAGE_SIZE) */
@@ -129,7 +133,7 @@ int ibv_umem_get(struct ib_device *dev, struct ib_umem *mem,
        
 out:
        ibv_umem_release(dev, mem);
-err_kmalloc:   
+err_kmalloc: err_inval:
 exit:
        return ret;
 }
index 41476e4..698339e 100644 (file)
@@ -620,7 +620,7 @@ static int mthca_map_cmd(struct mthca_dev *dev, u16 op, struct mthca_icm *icm,
                lg = ffs(i) - 1;
                if (lg < 12) {
                        HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_LOW ,("Got FW area not aligned to 4K (%I64x/%lx).\n",
-                                  (unsigned long long) mthca_icm_addr(&iter),
+                                  (u64) mthca_icm_addr(&iter),
                                   mthca_icm_size(&iter)));
                        err = -EINVAL;
                        goto out;
@@ -628,7 +628,7 @@ static int mthca_map_cmd(struct mthca_dev *dev, u16 op, struct mthca_icm *icm,
                for (i = 0; i < mthca_icm_size(&iter) >> lg; ++i) {
                        if (virt != -1) {
                                pages[nent * 2] = cl_hton64(virt);
-                               virt += 1ULL << lg;
+                               virt += 1Ui64 << lg;
                        }
                        pages[nent * 2 + 1] = CPU_2_BE64((mthca_icm_addr(&iter) +
                                                           (i << lg)) | (lg - 12));
@@ -658,7 +658,7 @@ static int mthca_map_cmd(struct mthca_dev *dev, u16 op, struct mthca_icm *icm,
                break;
        case CMD_MAP_ICM:
                HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Mapped %d chunks/%d KB at %I64x for ICM.\n",
-                         tc, ts, (unsigned long long) virt - (ts << 10)));
+                         tc, ts, (u64) virt - (ts << 10)));
                break;
        }
 
@@ -719,9 +719,9 @@ int mthca_QUERY_FW(struct mthca_dev *dev, u8 *status)
         * FW subSIZE_Tor version is at more signifant bits than minor
         * version, so swap here.
         */
-       dev->fw_ver = (dev->fw_ver & 0xffff00000000ull) |
-               ((dev->fw_ver & 0xffff0000ull) >> 16) |
-               ((dev->fw_ver & 0x0000ffffull) << 16);
+       dev->fw_ver = (dev->fw_ver & 0xffff00000000Ui64) |
+               ((dev->fw_ver & 0xffff0000Ui64) >> 16) |
+               ((dev->fw_ver & 0x0000ffffUi64) << 16);
 
        MTHCA_GET(lg, outbox, QUERY_FW_MAX_CMD_OFFSET);
        dev->cmd.max_cmds = 1 << lg;
@@ -729,9 +729,9 @@ int mthca_QUERY_FW(struct mthca_dev *dev, u8 *status)
        MTHCA_GET(dev->catas_err.size, outbox, QUERY_FW_ERR_SIZE_OFFSET);
 
        HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("FW version %012I64x, max commands %d\n",
-                 (unsigned long long) dev->fw_ver, dev->cmd.max_cmds));
+                 (u64) dev->fw_ver, dev->cmd.max_cmds));
        HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Catastrophic error buffer at 0x%I64x, size 0x%x\n",
-               (unsigned long long) dev->catas_err.addr, dev->catas_err.size));
+               (u64) dev->catas_err.addr, dev->catas_err.size));
 
 
        if (mthca_is_memfree(dev)) {
@@ -750,17 +750,17 @@ int mthca_QUERY_FW(struct mthca_dev *dev, u8 *status)
                                (PAGE_SHIFT - 12);
 
                HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Clear int @ %I64x, EQ arm @ %I64x, EQ set CI @ %I64x\n",
-                         (unsigned long long) dev->fw.arbel.clr_int_base,
-                         (unsigned long long) dev->fw.arbel.eq_arm_base,
-                         (unsigned long long) dev->fw.arbel.eq_set_ci_base));
+                         (u64) dev->fw.arbel.clr_int_base,
+                         (u64) dev->fw.arbel.eq_arm_base,
+                         (u64) dev->fw.arbel.eq_set_ci_base));
        } else {
                MTHCA_GET(dev->fw.tavor.fw_start, outbox, QUERY_FW_START_OFFSET);
                MTHCA_GET(dev->fw.tavor.fw_end,   outbox, QUERY_FW_END_OFFSET);
 
                HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("FW size %d KB (start %I64x, end %I64x)\n",
                          (int) ((dev->fw.tavor.fw_end - dev->fw.tavor.fw_start) >> 10),
-                         (unsigned long long) dev->fw.tavor.fw_start,
-                         (unsigned long long) dev->fw.tavor.fw_end));
+                         (u64) dev->fw.tavor.fw_start,
+                         (u64) dev->fw.tavor.fw_end));
        }
 
 out:
@@ -813,8 +813,8 @@ int mthca_ENABLE_LAM(struct mthca_dev *dev, u8 *status)
 
        HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("HCA memory size %d KB (start %I64x, end %I64x)\n",
                  (int) ((dev->ddr_end - dev->ddr_start) >> 10),
-                 (unsigned long long) dev->ddr_start,
-                 (unsigned long long) dev->ddr_end));
+                 (u64) dev->ddr_start,
+                 (u64) dev->ddr_end));
 
 out:
        mthca_free_mailbox(dev, mailbox);
@@ -869,8 +869,8 @@ int mthca_QUERY_DDR(struct mthca_dev *dev, u8 *status)
 
        HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("HCA memory size %d KB (start %I64x, end %I64x)\n",
                  (int) ((dev->ddr_end - dev->ddr_start) >> 10),
-                 (unsigned long long) dev->ddr_start,
-                 (unsigned long long) dev->ddr_end));
+                 (u64) dev->ddr_start,
+                 (u64) dev->ddr_end));
 
 out:
        mthca_free_mailbox(dev, mailbox);
@@ -1087,7 +1087,7 @@ int mthca_QUERY_DEV_LIM(struct mthca_dev *dev,
                }
 
                HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Max ICM size %I64d MB\n",
-                         (unsigned long long) dev_lim->hca.arbel.max_icm_sz >> 20));
+                         (u64) dev_lim->hca.arbel.max_icm_sz >> 20));
        } 
        else {
                MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_SRQ_SZ_OFFSET);
@@ -1430,7 +1430,7 @@ int mthca_MAP_ICM_page(struct mthca_dev *dev, u64 dma_addr, u64 virt, u8 *status
 
        if (!err)
                HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Mapped page at %I64x to %I64x for ICM.\n",
-                         (unsigned long long) dma_addr, (unsigned long long) virt));
+                         (u64) dma_addr, (u64) virt));
 
        return err;
 }
@@ -1438,7 +1438,7 @@ int mthca_MAP_ICM_page(struct mthca_dev *dev, u64 dma_addr, u64 virt, u8 *status
 int mthca_UNMAP_ICM(struct mthca_dev *dev, u64 virt, u32 page_count, u8 *status)
 {
        HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Unmapping %d pages at %I64x from ICM.\n",
-                 page_count, (unsigned long long) virt));
+                 page_count, (u64) virt));
 
        return mthca_cmd(dev, virt, page_count, 0, CMD_UNMAP_ICM, CMD_TIME_CLASS_B, status);
 }
@@ -1504,7 +1504,7 @@ int mthca_MAP_EQ(struct mthca_dev *dev, u64 event_mask, int unmap,
 {
        HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("%s mask %016I64x for eqn %d\n",
                  unmap ? "Clearing" : "Setting",
-                 (unsigned long long) event_mask, eq_num));
+                 (u64) event_mask, eq_num));
        return mthca_cmd(dev, event_mask, (unmap << 31) | eq_num,
                         0, CMD_MAP_EQ, CMD_TIME_CLASS_B, status);
 }
index febb6cf..519d7fe 100644 (file)
@@ -61,7 +61,8 @@ enum {
        MTHCA_FLAG_NO_LAM     = 1 << 5,
        MTHCA_FLAG_FMR        = 1 << 6,
        MTHCA_FLAG_MEMFREE    = 1 << 7,
-       MTHCA_FLAG_PCIE       = 1 << 8
+       MTHCA_FLAG_PCIE           = 1 << 8,
+       MTHCA_FLAG_LIVEFISH       = 1 << 9
 };
 
 enum {
@@ -555,18 +556,6 @@ static inline int mthca_is_memfree(struct mthca_dev *dev)
        return dev->mthca_flags & MTHCA_FLAG_MEMFREE;
 }
 
-void mthca_get_av_params(      struct mthca_ah *ah_p, u8 *port_num, __be16 *dlid, u8 *sr, u8 *path_bits );
-
-void mthca_set_av_params(      struct mthca_dev *dev, struct mthca_ah *ah_p, struct ib_ah_attr *ah_attr );
-
-int ib_uverbs_init(void);
-void ib_uverbs_cleanup(void);
-int mthca_ah_grh_present(struct mthca_ah *ah);
-
-
-
-
-
 VOID
 WriteEventLogEntry(
        PVOID   pi_pIoObject,
@@ -588,4 +577,19 @@ WriteEventLogEntryStr(
        ...
        );
 
+
+static inline int mthca_is_livefish(struct mthca_dev *mdev)
+{
+       return mdev->mthca_flags & MTHCA_FLAG_LIVEFISH;
+}
+
+void mthca_get_av_params(      struct mthca_ah *ah_p, u8 *port_num, __be16 *dlid, u8 *sr, u8 *path_bits );
+
+void mthca_set_av_params(      struct mthca_dev *dev, struct mthca_ah *ah_p, struct ib_ah_attr *ah_attr );
+
+int ib_uverbs_init(void);
+void ib_uverbs_cleanup(void);
+int mthca_ah_grh_present(struct mthca_ah *ah);
+
+
 #endif /* MTHCA_DEV_H */
index 10f7920..ec9ccdf 100644 (file)
@@ -126,23 +126,23 @@ enum {
        MTHCA_EVENT_TYPE_SRQ_LIMIT                                      = 0x14  
 };
 
-#define MTHCA_ASYNC_EVENT_MASK ((1ULL << MTHCA_EVENT_TYPE_PATH_MIG)           | \
-                               (1ULL << MTHCA_EVENT_TYPE_COMM_EST)           | \
-                               (1ULL << MTHCA_EVENT_TYPE_SQ_DRAINED)         | \
-                               (1ULL << MTHCA_EVENT_TYPE_CQ_ERROR)           | \
-                               (1ULL << MTHCA_EVENT_TYPE_WQ_CATAS_ERROR)     | \
-                               (1ULL << MTHCA_EVENT_TYPE_EEC_CATAS_ERROR)    | \
-                               (1ULL << MTHCA_EVENT_TYPE_PATH_MIG_FAILED)    | \
-                               (1ULL << MTHCA_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \
-                               (1ULL << MTHCA_EVENT_TYPE_WQ_ACCESS_ERROR)    | \
-                               (1ULL << MTHCA_EVENT_TYPE_LOCAL_CATAS_ERROR)  | \
-                               (1ULL << MTHCA_EVENT_TYPE_PORT_CHANGE)        | \
-                               (1ULL << MTHCA_EVENT_TYPE_ECC_DETECT))
-#define MTHCA_SRQ_EVENT_MASK   ((1ULL << MTHCA_EVENT_TYPE_SRQ_CATAS_ERROR)    | \
-                                       (1ULL << MTHCA_EVENT_TYPE_SRQ_QP_LAST_WQE)              | \
-                                       (1ULL << MTHCA_EVENT_TYPE_SRQ_LIMIT))
-
-#define MTHCA_CMD_EVENT_MASK    (1ULL << MTHCA_EVENT_TYPE_CMD)
+#define MTHCA_ASYNC_EVENT_MASK ((1Ui64 << MTHCA_EVENT_TYPE_PATH_MIG)           | \
+                               (1Ui64 << MTHCA_EVENT_TYPE_COMM_EST)           | \
+                               (1Ui64 << MTHCA_EVENT_TYPE_SQ_DRAINED)         | \
+                               (1Ui64 << MTHCA_EVENT_TYPE_CQ_ERROR)           | \
+                               (1Ui64 << MTHCA_EVENT_TYPE_WQ_CATAS_ERROR)     | \
+                               (1Ui64 << MTHCA_EVENT_TYPE_EEC_CATAS_ERROR)    | \
+                               (1Ui64 << MTHCA_EVENT_TYPE_PATH_MIG_FAILED)    | \
+                               (1Ui64 << MTHCA_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \
+                               (1Ui64 << MTHCA_EVENT_TYPE_WQ_ACCESS_ERROR)    | \
+                               (1Ui64 << MTHCA_EVENT_TYPE_LOCAL_CATAS_ERROR)  | \
+                               (1Ui64 << MTHCA_EVENT_TYPE_PORT_CHANGE)        | \
+                               (1Ui64 << MTHCA_EVENT_TYPE_ECC_DETECT))
+#define MTHCA_SRQ_EVENT_MASK   ((1Ui64 << MTHCA_EVENT_TYPE_SRQ_CATAS_ERROR)    | \
+                                       (1Ui64 << MTHCA_EVENT_TYPE_SRQ_QP_LAST_WQE)             | \
+                                       (1Ui64 << MTHCA_EVENT_TYPE_SRQ_LIMIT))
+
+#define MTHCA_CMD_EVENT_MASK    (1Ui64 << MTHCA_EVENT_TYPE_CMD)
 
 #define MTHCA_EQ_DB_INC_CI     (1 << 24)
 #define MTHCA_EQ_DB_REQ_NOT    (2 << 24)
index 92e1ef5..d2ecd02 100644 (file)
@@ -68,8 +68,9 @@ static struct mthca_profile default_profile = {
 enum __hca_type {
        TAVOR,                  /* MT23108                        */
        ARBEL_COMPAT,           /* MT25208 in Tavor compat mode   */
-       ARBEL_NATIVE,           /* MT25208 with extended features */
-       SINAI                   /* MT25204 */
+       ARBEL_NATIVE,           /* MT25218 with extended features */
+       SINAI,                  /* MT25204 */
+       LIVEFISH                /* a burning device */
 };
 
 #define MTHCA_FW_VER(major, minor, subminor) \
@@ -81,10 +82,11 @@ static struct {
        int is_memfree;
        int is_pcie;
 } mthca_hca_table[] = {
-       { MTHCA_FW_VER(3, 2, 0), MTHCA_FW_VER(3, 3, 3), 0, 0 },
-       { MTHCA_FW_VER(4, 6, 0), MTHCA_FW_VER(4, 7, 0), 0, 1 },
-       { MTHCA_FW_VER(5, 0, 0), MTHCA_FW_VER(5, 1, 300), 1, 1 },
-       { MTHCA_FW_VER(1, 0, 0), MTHCA_FW_VER(1, 0, 1), 1, 1 }
+       { MTHCA_FW_VER(3, 2, 0), MTHCA_FW_VER(3, 3, 3), 0, 0 }, /* TAVOR */
+       { MTHCA_FW_VER(4, 6, 0), MTHCA_FW_VER(4, 7, 0), 0, 1 }, /* ARBEL_COMPAT */
+       { MTHCA_FW_VER(5, 0, 0), MTHCA_FW_VER(5, 1, 300), 1, 1 },       /* ARBEL_NATIVE */
+       { MTHCA_FW_VER(1, 0, 0), MTHCA_FW_VER(1, 0, 1), 1, 1 }, /* SINAI */
+       { MTHCA_FW_VER(0, 0, 0), MTHCA_FW_VER(0, 0, 0), 0, 0 }          /* LIVEFISH */
 };
 
 
@@ -106,6 +108,15 @@ static struct pci_device_id {
        HCA(TOPSPIN,  ARBEL,        ARBEL_NATIVE),
        HCA(TOPSPIN,  SINAI_OLD,    SINAI),
        HCA(TOPSPIN,  SINAI,        SINAI),
+       // live fishes
+       HCA(MELLANOX, TAVOR_BD, LIVEFISH),
+       HCA(MELLANOX, ARBEL_BD,         LIVEFISH),
+       HCA(MELLANOX, SINAI_OLD_BD,     LIVEFISH),
+       HCA(MELLANOX, SINAI_BD,         LIVEFISH),
+       HCA(TOPSPIN, TAVOR_BD,          LIVEFISH),
+       HCA(TOPSPIN, ARBEL_BD,          LIVEFISH),
+       HCA(TOPSPIN, SINAI_OLD_BD,      LIVEFISH),
+       HCA(TOPSPIN, SINAI_BD,          LIVEFISH),
 };
 #define MTHCA_PCI_TABLE_SIZE (sizeof(mthca_pci_table)/sizeof(struct pci_device_id))
 
@@ -392,8 +403,8 @@ static int  mthca_init_icm(struct mthca_dev *mdev,
        }
 
        HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_LOW , ("%I64d KB of HCA context requires %I64d KB aux memory.\n",
-                 (unsigned long long) icm_size >> 10,
-                 (unsigned long long) aux_pages << 2));
+                 (u64) icm_size >> 10,
+                 (u64) aux_pages << 2));
 
        mdev->fw.arbel.aux_icm = mthca_alloc_icm(mdev, (int)aux_pages,
                                                 GFP_HIGHUSER | __GFP_NOWARN);
@@ -737,7 +748,7 @@ static int  mthca_setup_hca(struct mthca_dev *mdev)
                goto err_uar_table_free;
        }
 
-       mdev->kar = ioremap(mdev->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE,&mdev->kar_size);
+       mdev->kar = ioremap((io_addr_t)mdev->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE,&mdev->kar_size);
        if (!mdev->kar) {
                HCA_PRINT_EV(TRACE_LEVEL_ERROR,HCA_DBG_LOW,("Couldn't map kernel access region, "
                          "aborting.\n"));
@@ -929,7 +940,8 @@ NTSTATUS mthca_init_one(hca_dev_ext_t *ext)
        /* allocate mdev structure */
        mdev = kmalloc(sizeof *mdev, GFP_KERNEL);
        if (!mdev) {
-               HCA_PRINT_EV(TRACE_LEVEL_ERROR ,HCA_DBG_LOW ,("Device struct alloc failed, "
+               // can't use HCA_PRINT_EV here !
+               HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_LOW ,("Device struct alloc failed, "
                        "aborting.\n"));
                status = STATUS_INSUFFICIENT_RESOURCES;
                goto end;
@@ -938,13 +950,19 @@ NTSTATUS mthca_init_one(hca_dev_ext_t *ext)
        /* set some fields */
        RtlZeroMemory(mdev, sizeof *mdev);
        mdev->ext = ext;                /* pointer to DEVICE OBJECT extension */
+       mdev->hca_type = p_id->driver_data;
+       mdev->ib_dev.mdev = mdev;
+       if (p_id->driver_data == LIVEFISH)
+               mdev->mthca_flags |= MTHCA_FLAG_LIVEFISH;
+       if (mthca_is_livefish(mdev))
+               goto done;
        if (ext->hca_hidden)
                mdev->mthca_flags |= MTHCA_FLAG_DDR_HIDDEN;
        if (mthca_hca_table[p_id->driver_data].is_memfree)
                mdev->mthca_flags |= MTHCA_FLAG_MEMFREE;
        if (mthca_hca_table[p_id->driver_data].is_pcie)
                mdev->mthca_flags |= MTHCA_FLAG_PCIE;
-
+       
 //TODO: after we have a FW, capable of reset, 
 // write a routine, that only presses the button
 
@@ -994,6 +1012,7 @@ NTSTATUS mthca_init_one(hca_dev_ext_t *ext)
                goto err_cleanup;
        }
 
+       done:
        ext->hca.mdev = mdev;
        mdev->state = MTHCA_DEV_INITIALIZED;
        return 0;
@@ -1033,6 +1052,8 @@ void mthca_remove_one(hca_dev_ext_t *ext)
        int p;
 
        if (mdev) {
+               if (mthca_is_livefish(mdev))
+                       goto done;
                mthca_unregister_device(mdev);
 
                for (p = 1; p <= mdev->limits.num_ports; ++p)
@@ -1053,7 +1074,7 @@ void mthca_remove_one(hca_dev_ext_t *ext)
                mthca_cleanup_uar_table(mdev);
                mthca_close_hca(mdev);
                mthca_cmd_cleanup(mdev);
-
+done:
                kfree(mdev);
                ext->hca.mdev = NULL;
        }
index 1711f4e..48a1c3a 100644 (file)
@@ -349,7 +349,7 @@ struct mthca_icm_table *mthca_alloc_icm_table(struct mthca_dev *dev,
 
        HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,
                ("Allocated/max chunks %d:%d, reserved/max objects %#x:%#x, one/total size %#x:%#x at %I64x \n",
-                 i, num_icm, reserved, nobj, obj_size, nobj * obj_size, (unsigned long long) virt));
+                 i, num_icm, reserved, nobj, obj_size, nobj * obj_size, (u64) virt));
 
        return table;
 
@@ -381,7 +381,7 @@ void mthca_free_icm_table(struct mthca_dev *dev, struct mthca_icm_table *table)
        HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW, 
                ( "Released chunks %d, objects %#x, one/total size %#x:%#x at %I64x \n",
                  table->num_icm, table->num_obj, table->obj_size, 
-                 table->num_obj * table->obj_size, (unsigned long long) table->virt));
+                 table->num_obj * table->obj_size, (u64) table->virt));
        kfree(table);
 }
 
index a6a7e50..52e07d9 100644 (file)
@@ -481,7 +481,7 @@ int mthca_mr_alloc_notrans(struct mthca_dev *dev, u32 pd,
                           mthca_mpt_access_t access, struct mthca_mr *mr)
 {
        mr->mtt = NULL;
-       return mthca_mr_alloc(dev, pd, 12, 0, ~0ULL, access, mr);
+       return mthca_mr_alloc(dev, pd, 12, 0, ~0Ui64, access, mr);
 }
 
 int mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
@@ -727,7 +727,7 @@ int mthca_tavor_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
        }
 
        mpt_entry.lkey   = cl_hton32(key);
-       mpt_entry.length = CPU_2_BE64(list_len * (1ull << fmr->attr.page_shift));
+       mpt_entry.length = CPU_2_BE64(list_len * (1Ui64 << fmr->attr.page_shift));
        mpt_entry.start  = cl_hton64(iova);
 
        __raw_writel((u32) mpt_entry.lkey, &fmr->mem.tavor.mpt->key);
@@ -772,7 +772,7 @@ int mthca_arbel_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
 
        fmr->mem.arbel.mpt->key    = cl_hton32(key);
        fmr->mem.arbel.mpt->lkey   = cl_hton32(key);
-       fmr->mem.arbel.mpt->length = CPU_2_BE64(list_len * (1ull << fmr->attr.page_shift));
+       fmr->mem.arbel.mpt->length = CPU_2_BE64(list_len * (1Ui64 << fmr->attr.page_shift));
        fmr->mem.arbel.mpt->start  = cl_hton64(iova);
 
        wmb();
index e936b15..05b8898 100644 (file)
@@ -157,8 +157,8 @@ u64 mthca_make_profile(struct mthca_dev *dev,
                if (total_size > mem_avail) {
                        HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_LOW,("Profile requires 0x%I64x bytes; "
                                  "won't in 0x%I64x bytes of context memory.\n",
-                                 (unsigned long long) total_size,
-                                 (unsigned long long) mem_avail));
+                                 (u64) total_size,
+                                 (u64) mem_avail));
                        kfree(profile);
                        return (u64)-ENOMEM;
                }
@@ -167,8 +167,8 @@ u64 mthca_make_profile(struct mthca_dev *dev,
                        HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("profile[%2d]--%2d/%2d @ 0x%16I64x "
                                  "(size 0x%8I64x)\n",
                                  i, profile[i].type, profile[i].log_num,
-                                 (unsigned long long) profile[i].start,
-                                 (unsigned long long) profile[i].size));
+                                 (u64) profile[i].start,
+                                 (u64) profile[i].size));
        }
 
        if (mthca_is_memfree(dev)){
index ce3d98e..52b65cd 100644 (file)
 
        u8 status;
 
+       RtlZeroMemory(props, sizeof *props);
+
+       if (mthca_is_livefish(mdev)) {
+               props->max_pd = 1;
+               props->vendor_id = mdev->ext->hcaConfig.VendorID;
+               props->vendor_part_id = mdev->ext->hcaConfig.DeviceID;
+               return 0;
+       }
+
        in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
        out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
        if (!in_mad || !out_mad)
@@ -89,7 +98,6 @@
                goto out;
        }
 
-       RtlZeroMemory(props, sizeof *props);
        props->fw_ver              = mdev->fw_ver;
        props->device_cap_flags    = mdev->device_cap_flags;
        props->vendor_id           = cl_ntoh32(*(__be32 *) (out_mad->data + 36)) &
        props->vendor_part_id      = cl_ntoh16(*(__be16 *) (out_mad->data + 30));
        props->hw_ver              = cl_ntoh32(*(__be32 *) (out_mad->data + 32));
        memcpy(&props->sys_image_guid, out_mad->data +  4, 8);
-       props->max_mr_size         = ~0ull;
+       props->max_mr_size         = ~0Ui64;
        props->page_size_cap       = mdev->limits.page_size_cap;
        props->max_qp              = mdev->limits.num_qps - mdev->limits.reserved_qps;
        props->max_qp_wr           = mdev->limits.max_wqes;
@@ -334,6 +342,9 @@ struct ib_ucontext *mthca_alloc_ucontext(struct ib_device *ibdev,
                err = -ENOMEM;
                goto err_nomem;
        }
+
+       if (mthca_is_livefish(to_mdev(ibdev)))
+               goto done;
        
        err = mthca_uar_alloc(to_mdev(ibdev), &context->uar);
        if (err) 
@@ -344,7 +355,7 @@ struct ib_ucontext *mthca_alloc_ucontext(struct ib_device *ibdev,
        */
 
        /* map UAR to kernel */
-       context->kva = ioremap(context->uar.pfn << PAGE_SHIFT, PAGE_SIZE,&context->uar_size);
+       context->kva = ioremap((io_addr_t)context->uar.pfn << PAGE_SHIFT, PAGE_SIZE,&context->uar_size);
        if (!context->kva) {
                HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_LOW ,("Couldn't map kernel access region, aborting.\n") );
                err = -ENOMEM;
@@ -377,6 +388,7 @@ struct ib_ucontext *mthca_alloc_ucontext(struct ib_device *ibdev,
                goto err_init_user;
        }
 
+done:
        err = ib_copy_to_umv_buf(p_umv_buf, &uresp, sizeof uresp);
        if (err) 
                goto err_copy_to_umv_buf;
@@ -407,12 +419,15 @@ err_nomem:
 {
         struct mthca_ucontext                                   *mucontext = to_mucontext(context);
 
+       if (mthca_is_livefish(to_mdev(context->device)))
+               goto done;
        mthca_cleanup_user_db_tab(to_mdev(context->device), &mucontext->uar,
                                  mucontext->db_tab);
        MmUnmapLockedPages( mucontext->ibucontext.user_uar, mucontext->mdl );
        IoFreeMdl(mucontext->mdl);
        iounmap(mucontext->kva, PAGE_SIZE);
        mthca_uar_free(to_mdev(context->device), &mucontext->uar);
+done:  
        kfree(mucontext);
        
        return 0;
@@ -440,11 +455,15 @@ struct ib_pd *mthca_alloc_pd(struct ib_device *ibdev,
                goto err_mem;
        }
 
+       if (mthca_is_livefish(to_mdev(ibdev))) 
+               goto done;
+
        err = mthca_pd_alloc(to_mdev(ibdev), !context, pd);
        if (err) {
                goto err_pd_alloc;
        }
 
+done:
        if (p_umv_buf && p_umv_buf->command) {
                resp.pd_handle = (u64)(UINT_PTR)pd;
                resp.pdn = pd->pd_num;
@@ -467,9 +486,13 @@ err_param:
 
 int mthca_dealloc_pd(struct ib_pd *pd)
 {
+       if (mthca_is_livefish(to_mdev(pd->device))) 
+               goto done;
+
        mthca_pd_free(to_mdev(pd->device), to_mpd(pd));
-       kfree(pd);
 
+done:  
+       kfree(pd);
        return 0;
 }
 
@@ -909,17 +932,17 @@ struct ib_mr *mthca_reg_phys_mr(struct ib_pd       *pd,
        /* Find largest page shift we can use to cover buffers */
        for (shift = PAGE_SHIFT; shift < 31; ++shift)
                if (num_phys_buf > 1) {
-                       if ((1ULL << shift) & mask)
+                       if ((1Ui64 << shift) & mask)
                                break;
                } else {
-                       if (1ULL << shift >=
+                       if (1Ui64 << shift >=
                            buffer_list[0].size +
-                           (buffer_list[0].addr & ((1ULL << shift) - 1)))
+                           (buffer_list[0].addr & ((1Ui64 << shift) - 1)))
                                break;
                }
 
-       buffer_list[0].size += buffer_list[0].addr & ((1ULL << shift) - 1);
-       buffer_list[0].addr &= ~0ull << shift;
+       buffer_list[0].size += buffer_list[0].addr & ((1Ui64 << shift) - 1);
+       buffer_list[0].addr &= ~0Ui64 << shift;
 
        mr = kmalloc(sizeof *mr, GFP_KERNEL);
        if (!mr)
@@ -928,7 +951,7 @@ struct ib_mr *mthca_reg_phys_mr(struct ib_pd       *pd,
 
        npages = 0;
        for (i = 0; i < num_phys_buf; ++i)
-               npages += (int)((buffer_list[i].size + (1ULL << shift) - 1) >> shift);
+               npages += (int)((buffer_list[i].size + (1Ui64 << shift) - 1) >> shift);
 
        if (!npages)
                return &mr->ibmr;
@@ -942,14 +965,14 @@ struct ib_mr *mthca_reg_phys_mr(struct ib_pd       *pd,
        n = 0;
        for (i = 0; i < num_phys_buf; ++i)
                for (j = 0;
-                    j < (buffer_list[i].size + (1ULL << shift) - 1) >> shift;
+                    j < (buffer_list[i].size + (1Ui64 << shift) - 1) >> shift;
                     ++j)
                        page_list[n++] = buffer_list[i].addr + ((u64) j << shift);
 
        HCA_PRINT( TRACE_LEVEL_VERBOSE ,HCA_DBG_LOW ,("Registering memory at %I64x (iova %I64x) "
                  "in PD %x; shift %d, npages %d.\n",
-                 (unsigned long long) buffer_list[0].addr,
-                 (unsigned long long) *iova_start,
+                 (u64) buffer_list[0].addr,
+                 (u64) *iova_start,
                  to_mpd(pd)->pd_num,
                  shift, npages));
 
index 78f67e3..5ca1009 100644 (file)
@@ -1215,10 +1215,6 @@ int mthca_alloc_qp(struct mthca_dev *dev,
        int err;
        SPIN_LOCK_PREP(lh);
 
-       err = mthca_set_qp_size(dev, cap, pd, qp);
-       if (err)
-               return err;
-
        switch (type) {
        case IB_QPT_RELIABLE_CONN: qp->transport = RC; break;
        case IB_QPT_UNRELIABLE_CONN: qp->transport = UC; break;
@@ -1226,6 +1222,10 @@ int mthca_alloc_qp(struct mthca_dev *dev,
        default: return -EINVAL;
        }
 
+       err = mthca_set_qp_size(dev, cap, pd, qp);
+       if (err)
+               return err;
+
        qp->qpn = mthca_alloc(&dev->qp_table.alloc);
        if (qp->qpn == -1)
                return -ENOMEM;
index 0dcb056..c731fce 100644 (file)
@@ -109,7 +109,7 @@ static void mthca_tavor_init_srq_context(struct mthca_dev *dev,
 
        RtlZeroMemory(context, sizeof *context);
 
-       context->wqe_base_ds = CPU_2_BE64(1ULL  << (srq->wqe_shift - 4));
+       context->wqe_base_ds = CPU_2_BE64(1Ui64  << (srq->wqe_shift - 4));
        context->state_pd    = cl_hton32(pd->pd_num);
        context->lkey        = cl_hton32(srq->mr.ibmr.lkey);
 
index e64ac6d..6d93165 100644 (file)
@@ -154,18 +154,13 @@ struct ibv_modify_qp_resp {
        uint8_t reserved[3];
 };
 
-struct __ibv_ah {
-       uint64_t user_handle;
-       int use_mr;
-};
-
 struct ibv_create_ah {
-       struct __ibv_ah;
+       uint64_t user_handle;
        struct ibv_reg_mr mr;   
 };
 
 struct ibv_create_ah_resp {
-       struct __ibv_ah;
+       uint64_t user_handle;
        uint64_t start;
        struct ibv_reg_mr_resp mr;
 };
index cd60fb8..393c7bc 100644 (file)
@@ -144,7 +144,6 @@ mlnx_pre_create_av (
        struct ibv_ah_attr attr;\r
        struct ibv_create_ah *p_create_av;\r
        ib_api_status_t status = IB_SUCCESS;\r
-       int AV_created = TRUE;\r
        size_t size = max( sizeof(struct ibv_create_ah), sizeof(struct ibv_create_ah_resp) );\r
        mlnx_ual_pd_info_t *p_pd = (mlnx_ual_pd_info_t *)h_uvp_pd;\r
        mlnx_ual_hobul_t *p_hobul = p_pd->p_hobul;\r
@@ -180,9 +179,6 @@ mlnx_pre_create_av (
 \r
        // try to create AV\r
        err = mthca_alloc_av(to_mpd(p_pd->ibv_pd), &attr, ah, NULL);\r
-       if (err == -EAGAIN) \r
-               AV_created = FALSE;\r
-       else\r
        if (err) {\r
                UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_AV , ("mthca_alloc_av failed (%d)\n", err));\r
                status = errno_to_iberr(err);\r
@@ -205,7 +201,7 @@ mlnx_pre_create_av (
        p_umv_buf->command = TRUE;\r
        p_create_av = (struct ibv_create_ah *)p_umv_buf->p_inout_buf;\r
        p_create_av->user_handle = (uint64_t)(ULONG_PTR)ah;\r
-       if (!AV_created) {\r
+       if (ah->in_kernel) {\r
                struct mthca_ah_page *page = ah->page;\r
                p_create_av->mr.start = (uint64_t)(ULONG_PTR)page->buf;\r
                p_create_av->mr.length = g_page_size;\r
@@ -213,7 +209,6 @@ mlnx_pre_create_av (
                p_create_av->mr.pd_handle        = p_pd->ibv_pd->handle;\r
                p_create_av->mr.pdn = to_mpd(p_pd->ibv_pd)->pdn;\r
                p_create_av->mr.access_flags = 0;       //local read\r
-               p_create_av->use_mr = TRUE;\r
                status = IB_SUCCESS;\r
        }\r
        else\r
@@ -256,7 +251,7 @@ mlnx_post_create_av (
 \r
                if (!mthca_is_memfree(p_pd->ibv_pd->context)) {\r
                        page = ah->page;\r
-                       if (p_resp->use_mr) {\r
+                       if (ah->in_kernel) {\r
                                // fill mr parameters\r
                                page->mr.handle = p_resp->mr.mr_handle;\r
                                page->mr.lkey = p_resp->mr.lkey;\r
@@ -367,9 +362,15 @@ ib_api_status_t
 mlnx_pre_destroy_av (\r
     IN         const ib_av_handle_t            h_uvp_av)\r
 {\r
+       ib_api_status_t status ;\r
+       struct mthca_ah *mthca_ah = (struct mthca_ah *)h_uvp_av;\r
        UVP_ENTER(UVP_DBG_AV);\r
+       if (mthca_ah->in_kernel)\r
+               status = IB_SUCCESS;\r
+       else\r
+               status = IB_VERBS_PROCESSING_DONE;\r
        UVP_EXIT(UVP_DBG_AV);\r
-       return IB_SUCCESS;\r
+       return status;\r
 }\r
 \r
 void\r
index 64ca42a..83dd92c 100644 (file)
@@ -105,16 +105,17 @@ mlnx_post_open_ca (
 {\r
        ib_api_status_t  status = ioctl_status;\r
        mlnx_ual_hobul_t *new_ca;\r
-       struct ibv_get_context_resp *resp_p;\r
+       struct ibv_get_context_resp *p_resp;\r
        struct ibv_context * ibvcontext;\r
        int err;\r
 \r
        UVP_ENTER(UVP_DBG_SHIM);\r
 \r
+       p_resp = (struct ibv_get_context_resp *)p_umv_buf->p_inout_buf;\r
+\r
        if (IB_SUCCESS == status) {\r
                /* allocate ibv context */\r
-               resp_p = (struct ibv_get_context_resp *)p_umv_buf->p_inout_buf;\r
-               ibvcontext = mthca_alloc_context(resp_p);\r
+               ibvcontext = mthca_alloc_context(p_resp);\r
                if (IS_ERR(ibvcontext)) {\r
                        err = PTR_ERR(ibvcontext);\r
                        UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_SHIM ,("mthca_alloc_context failed (%d)\n", err));\r
@@ -137,7 +138,8 @@ mlnx_post_open_ca (
 \r
 err_memory:    \r
 err_alloc_context:\r
-       cl_free( resp_p );\r
+       if (p_resp)\r
+               cl_free( p_resp );\r
        UVP_EXIT(UVP_DBG_SHIM);\r
        return status;\r
 }\r
index d708f64..d4597e8 100644 (file)
@@ -80,9 +80,19 @@ ib_api_status_t
        size_t size = max( sizeof(struct ibv_create_qp), sizeof(struct ibv_create_qp_resp) );\r
        mlnx_ual_pd_info_t *p_pd = (mlnx_ual_pd_info_t *)h_uvp_pd;\r
        mlnx_ual_hobul_t *p_hobul = p_pd->p_hobul;\r
+       ib_ca_attr_t *p_hca_attr = p_hobul->p_hca_attr;\r
 \r
        UVP_ENTER(UVP_DBG_QP);\r
 \r
+       /* sanity checks */\r
+       if(p_create_attr->sq_depth > p_hca_attr->max_wrs ||p_create_attr->rq_depth > p_hca_attr->max_wrs )\r
+               status = IB_INVALID_MAX_WRS;\r
+       else \r
+       if(p_create_attr->sq_sge> p_hca_attr->max_sges ||p_create_attr->rq_sge> p_hca_attr->max_sges )\r
+               status = IB_INVALID_MAX_SGE;\r
+       if (status)\r
+               goto err_params;\r
+\r
        CL_ASSERT(p_umv_buf);\r
 \r
        if( !p_umv_buf->p_inout_buf )\r
@@ -99,9 +109,9 @@ ib_api_status_t
        p_umv_buf->command = TRUE;\r
 \r
        /* convert attributes */\r
-       attr.send_cq                                            = p_create_attr->h_sq_cq->ibv_cq;\r
-       attr.recv_cq                                                    = p_create_attr->h_rq_cq->ibv_cq;\r
-       attr.srq                                                                = NULL; /* absent in IBAL */\r
+       attr.send_cq                            = p_create_attr->h_sq_cq->ibv_cq;\r
+       attr.recv_cq                            = p_create_attr->h_rq_cq->ibv_cq;\r
+       attr.srq                                        = NULL; /* absent in IBAL */\r
        attr.cap.max_send_wr            = p_create_attr->sq_depth;\r
        attr.cap.max_recv_wr            = p_create_attr->rq_depth;\r
        attr.cap.max_send_sge           = p_create_attr->sq_sge;\r
@@ -116,17 +126,11 @@ ib_api_status_t
        if (IS_ERR(ibv_qp)) {\r
                err = PTR_ERR(ibv_qp);\r
                UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_QP ,("mthca_create_qp_pre failed (%d)\n", err));\r
-               //fix return values\r
-               if(err == -EINVAL && (attr.cap.max_send_wr     > 65536 ||attr.cap.max_recv_wr     > 65536 ))\r
-                       status = IB_INVALID_MAX_WRS;\r
-               else if(err == -EINVAL && (attr.cap.max_send_sge> 64 ||attr.cap.max_recv_sge> 64 ))\r
-                       status = IB_INVALID_MAX_SGE;\r
-               else if(err == -ENOMEM && (attr.cap.max_send_sge == 0 ||attr.cap.max_recv_sge == 0|| \r
+               if(err == -ENOMEM && (attr.cap.max_send_sge == 0 ||attr.cap.max_recv_sge == 0|| \r
                        attr.cap.max_send_wr == 0 || attr.cap.max_recv_wr == 0))\r
                        status = IB_INVALID_SETTING;\r
                else\r
                        status = errno_to_iberr(err);\r
-\r
                goto err_alloc_qp;\r
        }\r
 \r
@@ -135,6 +139,7 @@ ib_api_status_t
 err_alloc_qp:\r
        cl_free(p_umv_buf->p_inout_buf);\r
 err_memory:\r
+err_params:\r
 end:\r
                UVP_EXIT(UVP_DBG_QP);\r
                return status;\r
@@ -251,13 +256,14 @@ mlnx_post_modify_qp (
     {\r
                memset( &attr, 0, sizeof(attr));\r
                attr.qp_state = p_resp->qp_state;\r
-               if (p_qp_info->ibv_qp)\r
+               if (p_qp_info->ibv_qp) {\r
                        err = p_qp_info->h_uvp_pd->p_hobul->ibv_ctx->ops.modify_qp(\r
                                h_uvp_qp->ibv_qp, &attr, p_resp->attr_mask);\r
-               if (err) {\r
-                       UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_SHIM , ("mthca_modify_qp failed (%d)\n", err));\r
-                       status = errno_to_iberr(err);\r
-                       goto err_modify_qp;\r
+                       if (err) {\r
+                               UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_SHIM , ("mthca_modify_qp failed (%d)\n", err));\r
+                               status = errno_to_iberr(err);\r
+                               goto err_modify_qp;\r
+                       }\r
                }\r
                UVP_PRINT(TRACE_LEVEL_INFORMATION ,UVP_DBG_SHIM ,\r
                        ("Committed to modify QP to state %d\n", p_resp->qp_state));\r
index fd383ee..9038f5e 100644 (file)
@@ -71,6 +71,24 @@ size_t g_page_size = 0;
 #define PCI_VENDOR_ID_TOPSPIN                  0x1867
 #endif
 
+/* live fishes */
+#ifndef PCI_DEVICE_ID_MELLANOX_TAVOR_BD
+#define PCI_DEVICE_ID_MELLANOX_TAVOR_BD                0x5a45
+#endif
+
+#ifndef PCI_DEVICE_ID_MELLANOX_ARBEL_BD
+#define PCI_DEVICE_ID_MELLANOX_ARBEL_BD                0x6279
+#endif
+
+#ifndef PCI_DEVICE_ID_MELLANOX_SINAI_OLD_BD
+#define PCI_DEVICE_ID_MELLANOX_SINAI_OLD_BD    0x5e8d
+#endif
+
+#ifndef PCI_DEVICE_ID_MELLANOX_SINAI_BD
+#define PCI_DEVICE_ID_MELLANOX_SINAI_BD                0x6275
+#endif
+
+
 #define HCA(v, d, t) \
        { PCI_VENDOR_ID_##v,    PCI_DEVICE_ID_MELLANOX_##d, MTHCA_##t }
 
@@ -89,6 +107,15 @@ static struct pci_device_id {
        HCA( TOPSPIN,           ARBEL,                                  ARBEL),
        HCA( TOPSPIN,           SINAI_OLD,                      ARBEL),
        HCA( TOPSPIN,           SINAI,                                  ARBEL),
+       // live fishes
+       HCA(MELLANOX, TAVOR_BD, LIVEFISH),
+       HCA(MELLANOX, ARBEL_BD,         LIVEFISH),
+       HCA(MELLANOX, SINAI_OLD_BD, LIVEFISH),
+       HCA(MELLANOX, SINAI_BD,         LIVEFISH),
+       HCA(TOPSPIN, TAVOR_BD,          LIVEFISH),
+       HCA(TOPSPIN, ARBEL_BD,          LIVEFISH),
+       HCA(TOPSPIN, SINAI_OLD_BD,      LIVEFISH),
+       HCA(TOPSPIN, SINAI_BD,          LIVEFISH),
 };
 
 static struct ibv_context_ops mthca_ctx_ops = {
index 47a1a87..b9ed87c 100644 (file)
@@ -45,7 +45,8 @@
 
 enum mthca_hca_type {
        MTHCA_TAVOR,
-       MTHCA_ARBEL
+       MTHCA_ARBEL,
+       MTHCA_LIVEFISH
 };
 
 enum {
@@ -200,6 +201,7 @@ struct mthca_ah {
        ib_pd_handle_t  h_uvp_pd;
        struct mthca_ah_page *page;
        uint32_t              key;
+       int     in_kernel;
 };
 
 struct mthca_ah_page {
index 74201fd..f68fc3b 100644 (file)
@@ -68,7 +68,6 @@ static struct mthca_ah_page *__add_page(
 int mthca_alloc_av(struct mthca_pd *pd, struct ibv_ah_attr *attr,
                   struct mthca_ah *ah, struct ibv_create_ah_resp *resp)
 {
-       int added_page = FALSE;
        if (mthca_is_memfree(pd->ibv_pd.context)) {
                ah->av = cl_malloc(sizeof *ah->av);
                if (!ah->av)
@@ -94,7 +93,7 @@ int mthca_alloc_av(struct mthca_pd *pd, struct ibv_ah_attr *attr,
                        ReleaseMutex( pd->ah_mutex );
                        return -ENOMEM;
                }
-               added_page = TRUE;
+               ah->in_kernel = TRUE;
 
        found:
                ++page->use_cnt;
@@ -135,11 +134,7 @@ int mthca_alloc_av(struct mthca_pd *pd, struct ibv_ah_attr *attr,
                /* Arbel workaround -- low byte of GID must be 2 */
                ah->av->dgid[3] = cl_hton32(2);
        }
-
-       if (added_page)
-               return -EAGAIN;
-       else
-               return 0;
+       return 0;
 }
 
 void mthca_free_av(struct mthca_ah *ah)
@@ -156,30 +151,13 @@ void mthca_free_av(struct mthca_ah *ah)
                page = ah->page;
                i = ((uint8_t *)ah->av - (uint8_t *)page->buf) / sizeof *ah->av;
                page->free[i / (8 * sizeof (int))] |= 1 << (i % (8 * sizeof (int)));
-
-               if (!--page->use_cnt) {
-                       if (page->prev)
-                               page->prev->next = page->next;
-                       else
-                               pd->ah_list = page->next;
-                       if (page->next)
-                               page->next->prev = page->prev;
-
-#ifdef NOT_USE_VIRTUAL_ALLOC   
-                       cl_free(page->buf);
-#else
-                       VirtualFree( page->buf, 0, MEM_RELEASE);
-#endif
-                       
-                       cl_free(page);
-               }
-
+               --page->use_cnt;
                ReleaseMutex( pd->ah_mutex );
        }
 }
 
 //NB: temporary, for support of modify_qp
-void mthca_set_av_params(      struct mthca_ah *ah_p, struct ibv_ah_attr *ah_attr )
+void mthca_set_av_params( struct mthca_ah *ah_p, struct ibv_ah_attr *ah_attr )
 {
        struct mthca_av *av      = ah_p->av;
        mlnx_ual_pd_info_t *p_pd = (mlnx_ual_pd_info_t *)ah_p->h_uvp_pd;
index 9ebcbd6..e61e9ed 100644 (file)
@@ -572,7 +572,7 @@ int mthca_arbel_post_send(struct ibv_qp *ibqp, struct _ib_send_wr *wr,
                ((struct mthca_next_seg *) wqe)->flags =
                        ((wr->send_opt & IB_SEND_OPT_SIGNALED) ?
                         cl_hton32(MTHCA_NEXT_CQ_UPDATE) : 0) |
-                       ((wr->send_opt & IB_SEND_OPT_SIGNALED) ?
+                       ((wr->send_opt & IB_SEND_OPT_SOLICITED) ?
                         cl_hton32(MTHCA_NEXT_SOLICIT) : 0)   |
                        cl_hton32(1);
                if (opcode == MTHCA_OPCODE_SEND_IMM||
index 0321110..557d1be 100644 (file)
@@ -73,8 +73,21 @@ err_malloc:
 int mthca_free_pd(struct ibv_pd *ibv_pd)
 {
        struct mthca_pd *pd = to_mpd(ibv_pd);
-       if (!mthca_is_memfree(ibv_pd->context)) 
+       if (!mthca_is_memfree(ibv_pd->context)) {
+               struct mthca_ah_page *page, *next_page;
+               WaitForSingleObject( pd->ah_mutex, INFINITE );
+               for (page = pd->ah_list; page; page = next_page) {
+                       next_page = page->next;
+                       #ifdef NOT_USE_VIRTUAL_ALLOC    
+                               cl_free(page->buf);
+                       #else
+                               VirtualFree( page->buf, 0, MEM_RELEASE);
+                       #endif
+                       cl_free(page);
+               }
+               ReleaseMutex( pd->ah_mutex );
                CloseHandle(pd->ah_mutex);
+       }
        cl_free(pd);
        return 0;
 }
@@ -248,7 +261,7 @@ struct ibv_qp *mthca_create_qp_pre(struct ibv_pd *pd,
 {
        struct mthca_qp       *qp;
        struct ibv_context *context = pd->context;
-       int                    ret;
+       int                    ret = -ENOMEM;
 
        UVP_ENTER(UVP_DBG_QP);
        /* Sanity check QP size before proceeding */
@@ -264,7 +277,6 @@ struct ibv_qp *mthca_create_qp_pre(struct ibv_pd *pd,
 
        qp = cl_malloc(sizeof *qp);
        if (!qp) {
-               ret = -ENOMEM;
                UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_QP ,("cl_malloc  failed (%d)\n",ret));
                goto err_nomem;
        }       
@@ -273,7 +285,6 @@ struct ibv_qp *mthca_create_qp_pre(struct ibv_pd *pd,
        qp->rq.max = align_queue_size(context, attr->cap.max_recv_wr, 0);
 
        if (mthca_alloc_qp_buf(pd, &attr->cap, attr->qp_type, qp)) {
-               ret = -ENOMEM;
                UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_QP ,("mthca_alloc_qp_buf  failed (%d)\n",ret));
                goto err_nomem;
        }