[MTHCA, IBAL et al] added SRQ support
authorleonidk <leonidk@ad392aa1-c5ef-ae45-8dd8-e69d62a5ef86>
Mon, 27 Nov 2006 20:03:51 +0000 (20:03 +0000)
committerleonidk <leonidk@ad392aa1-c5ef-ae45-8dd8-e69d62a5ef86>
Mon, 27 Nov 2006 20:03:51 +0000 (20:03 +0000)
git-svn-id: svn://openib.tc.cornell.edu/gen1/trunk@548 ad392aa1-c5ef-ae45-8dd8-e69d62a5ef86

72 files changed:
core/al/al_ci_ca_shared.c
core/al/al_common.h
core/al/al_debug.h
core/al/al_dev.h
core/al/al_pd.c
core/al/al_proxy_ioctl.h
core/al/al_qp.c
core/al/al_qp.h
core/al/al_srq.c [new file with mode: 0644]
core/al/al_srq.h [new file with mode: 0644]
core/al/al_verbs.h
core/al/ib_statustext.c
core/al/kernel/SOURCES
core/al/kernel/al_mgr.c
core/al/kernel/al_proxy_verbs.c
core/al/user/SOURCES
core/al/user/ual_ci_ca.h
core/al/user/ual_mgr.c
core/al/user/ual_qp.c
core/al/user/ual_srq.c [new file with mode: 0644]
core/bus/kernel/bus_pnp.c
hw/mthca/kernel/hca_data.c
hw/mthca/kernel/hca_data.h
hw/mthca/kernel/hca_debug.h
hw/mthca/kernel/hca_direct.c
hw/mthca/kernel/hca_driver.h
hw/mthca/kernel/hca_mcast.c
hw/mthca/kernel/hca_verbs.c
hw/mthca/kernel/ib_verbs.h
hw/mthca/kernel/mt_l2w.c
hw/mthca/kernel/mt_l2w.h
hw/mthca/kernel/mt_verbs.c
hw/mthca/kernel/mthca_cmd.c
hw/mthca/kernel/mthca_cmd.h
hw/mthca/kernel/mthca_cq.c
hw/mthca/kernel/mthca_dev.h
hw/mthca/kernel/mthca_eq.c
hw/mthca/kernel/mthca_main.c
hw/mthca/kernel/mthca_provider.c
hw/mthca/kernel/mthca_provider.h
hw/mthca/kernel/mthca_qp.c
hw/mthca/kernel/mthca_srq.c
hw/mthca/kernel/mthca_user.h [deleted file]
hw/mthca/mx_abi.h
hw/mthca/user/SOURCES
hw/mthca/user/mlnx_ual_av.c
hw/mthca/user/mlnx_ual_main.c
hw/mthca/user/mlnx_ual_main.h
hw/mthca/user/mlnx_ual_osbypass.c
hw/mthca/user/mlnx_ual_qp.c
hw/mthca/user/mlnx_ual_srq.c [new file with mode: 0644]
hw/mthca/user/mlnx_uvp.h
hw/mthca/user/mlnx_uvp_abi.h [deleted file]
hw/mthca/user/mlnx_uvp_debug.h
hw/mthca/user/mlnx_uvp_srq.c
hw/mthca/user/mlnx_uvp_verbs.c
hw/mthca/user/mlnx_uvp_verbs.h
inc/iba/ib_al.h
inc/iba/ib_al_ioctl.h
inc/iba/ib_at_ioctl.h
inc/iba/ib_ci.h
inc/iba/ib_types.h
inc/kernel/iba/ib_al_ifc.h
inc/user/iba/ib_uvp.h
tests/alts/createanddestroyqp.c
tests/wsd/user/test2/ibwrap.c
tests/wsd/user/test3/ibwrap.c
tools/vstat/user/vstat_main.c
ulp/opensm/user/include/iba/ib_types.h
ulp/opensm/user/include/iba/ib_types_extended.h
ulp/srp/kernel/srp_connection.c
ulp/wsd/user/ibsp_iblow.c

index 354761e..41d7c0d 100644 (file)
@@ -45,6 +45,7 @@
 #include "al_mgr.h"\r
 #include "al_pnp.h"\r
 #include "al_qp.h"\r
+#include "al_srq.h"\r
 #include "ib_common.h"\r
 \r
 \r
@@ -284,9 +285,15 @@ ci_ca_process_event_cb(
        case IB_AE_SQ_DRAINED:\r
        case IB_AE_WQ_REQ_ERROR:\r
        case IB_AE_WQ_ACCESS_ERROR:\r
+       case IB_AE_SRQ_QP_LAST_WQE_REACHED:\r
                qp_async_event_cb( &p_event_item->event_rec );\r
                break;\r
 \r
+       case IB_AE_SRQ_LIMIT_REACHED:\r
+       case IB_AE_SRQ_CATAS_ERROR:\r
+               srq_async_event_cb( &p_event_item->event_rec );\r
+               break;\r
+\r
        case IB_AE_CQ_ERROR:\r
                cq_async_event_cb( &p_event_item->event_rec );\r
                break;\r
index 02f0442..d8cdcec 100644 (file)
@@ -121,7 +121,7 @@ typedef void
  * Different types of AL object's.  Note that the upper byte signifies\r
  * a subtype.\r
  */\r
-#define AL_OBJ_TYPE_UNKNOWN                    0\r
+#define AL_OBJ_TYPE_UNKNOWN            0\r
 #define AL_OBJ_TYPE_H_AL                       1\r
 #define AL_OBJ_TYPE_H_QP                       2\r
 #define AL_OBJ_TYPE_H_AV                       3\r
@@ -133,14 +133,14 @@ typedef void
 #define AL_OBJ_TYPE_H_CONN                     9\r
 #define AL_OBJ_TYPE_H_LISTEN           10\r
 #define AL_OBJ_TYPE_H_IOC                      11\r
-#define AL_OBJ_TYPE_H_SVC_ENTRY                12\r
+#define AL_OBJ_TYPE_H_SVC_ENTRY        12\r
 #define AL_OBJ_TYPE_H_PNP                      13\r
 #define AL_OBJ_TYPE_H_SA_REQ           14\r
-#define AL_OBJ_TYPE_H_MCAST                    15\r
+#define AL_OBJ_TYPE_H_MCAST            15\r
 #define AL_OBJ_TYPE_H_ATTACH           16\r
 #define AL_OBJ_TYPE_H_MAD                      17\r
-#define AL_OBJ_TYPE_H_MAD_POOL         18\r
-#define AL_OBJ_TYPE_H_POOL_KEY         19\r
+#define AL_OBJ_TYPE_H_MAD_POOL 18\r
+#define AL_OBJ_TYPE_H_POOL_KEY 19\r
 #define AL_OBJ_TYPE_H_MAD_SVC          20\r
 #define AL_OBJ_TYPE_CI_CA                      21\r
 #define AL_OBJ_TYPE_CM                         22\r
@@ -151,18 +151,19 @@ typedef void
 #define AL_OBJ_TYPE_MAD_POOL           27\r
 #define AL_OBJ_TYPE_MAD_DISP           28\r
 #define AL_OBJ_TYPE_AL_MGR                     29\r
-#define AL_OBJ_TYPE_PNP_MGR                    30\r
-#define AL_OBJ_TYPE_IOC_PNP_MGR                31\r
-#define AL_OBJ_TYPE_IOC_PNP_SVC                32\r
+#define AL_OBJ_TYPE_PNP_MGR            30\r
+#define AL_OBJ_TYPE_IOC_PNP_MGR        31\r
+#define AL_OBJ_TYPE_IOC_PNP_SVC        32\r
 #define AL_OBJ_TYPE_QUERY_SVC          33\r
 #define AL_OBJ_TYPE_MCAST_SVC          34\r
-#define AL_OBJ_TYPE_SA_REQ_SVC         35\r
-#define AL_OBJ_TYPE_RES_MGR                    36\r
+#define AL_OBJ_TYPE_SA_REQ_SVC 35\r
+#define AL_OBJ_TYPE_RES_MGR            36\r
 #define AL_OBJ_TYPE_H_CA_ATTR          37\r
-#define AL_OBJ_TYPE_H_PNP_EVENT                38\r
+#define AL_OBJ_TYPE_H_PNP_EVENT        38\r
 #define AL_OBJ_TYPE_H_SA_REG           39\r
 #define AL_OBJ_TYPE_H_FMR                      40\r
-#define AL_OBJ_TYPE_INVALID            41      /* Must be last type. */\r
+#define AL_OBJ_TYPE_H_SRQ                      41\r
+#define AL_OBJ_TYPE_INVALID            42      /* Must be last type. */\r
 \r
 /* Kernel object for a user-mode app. */\r
 #define AL_OBJ_SUBTYPE_UM_EXPORT       0x80000000\r
@@ -233,7 +234,6 @@ typedef struct _al_obj
         */\r
        boolean_t                                       hdl_valid;\r
 #endif\r
-\r
 }      al_obj_t;\r
 \r
 \r
index 96d002f..6d1ed6d 100644 (file)
@@ -74,7 +74,7 @@ extern uint32_t                       g_al_dbg_flags;
        WPP_DEFINE_BIT( AL_DBG_AV)\\r
        WPP_DEFINE_BIT( AL_DBG_CQ)\\r
        WPP_DEFINE_BIT( AL_DBG_QP)\\r
-       WPP_DEFINE_BIT( AL_DBG_RES3) \\r
+       WPP_DEFINE_BIT( AL_DBG_SRQ)\\r
        WPP_DEFINE_BIT( AL_DBG_MW)\\r
        WPP_DEFINE_BIT( AL_DBG_RES4) \\r
        WPP_DEFINE_BIT( AL_DBG_PROXY_CB)\\r
@@ -109,7 +109,7 @@ extern uint32_t                     g_al_dbg_flags;
        WPP_DEFINE_BIT( AL_DBG_AV)\\r
        WPP_DEFINE_BIT( AL_DBG_CQ)\\r
        WPP_DEFINE_BIT( AL_DBG_QP)\\r
-       WPP_DEFINE_BIT( AL_DBG_RES3) \\r
+       WPP_DEFINE_BIT( AL_DBG_SRQ)\\r
        WPP_DEFINE_BIT( AL_DBG_MW)\\r
        WPP_DEFINE_BIT( AL_DBG_RES4) \\r
        WPP_DEFINE_BIT( AL_DBG_PROXY_CB)\\r
@@ -168,6 +168,7 @@ extern uint32_t                     g_al_dbg_flags;
 #define AL_DBG_AV              (1 << 17)\r
 #define AL_DBG_CQ              (1 << 18)\r
 #define AL_DBG_QP              (1 << 19)\r
+#define AL_DBG_SRQ             (1 << 20)\r
 #define AL_DBG_MW              (1 << 21)\r
 #define AL_DBG_PROXY_CB        (1 << 23)\r
 #define AL_DBG_UAL             (1 << 24)\r
index 4c69fcc..c201f32 100644 (file)
@@ -55,7 +55,7 @@
 #define AL_DEVICE_NAME L"\\Device\\ibal"\r
 #define        ALDEV_KEY               (0x3B)  /* Matches FILE_DEVICE_INFINIBAND from wdm.h */\r
 \r
-#define AL_IOCTL_VERSION                       (2)\r
+#define AL_IOCTL_VERSION                       (3)\r
 \r
 #ifdef CL_KERNEL\r
 \r
@@ -192,6 +192,10 @@ typedef enum _al_verbs_ops
        ual_query_av_ioctl_cmd,\r
        ual_modify_av_ioctl_cmd,\r
        ual_destroy_av_ioctl_cmd,\r
+       ual_create_srq_ioctl_cmd,\r
+       ual_query_srq_ioctl_cmd,\r
+       ual_modify_srq_ioctl_cmd,\r
+       ual_destroy_srq_ioctl_cmd,\r
        ual_create_qp_ioctl_cmd,\r
        ual_query_qp_ioctl_cmd,\r
        ual_modify_qp_ioctl_cmd,\r
@@ -211,6 +215,7 @@ typedef enum _al_verbs_ops
        ual_destroy_mw_ioctl_cmd,\r
        ual_post_send_ioctl_cmd,\r
        ual_post_recv_ioctl_cmd,\r
+       ual_post_srq_recv_ioctl_cmd,\r
        ual_peek_cq_ioctl_cmd,\r
        ual_poll_cq_ioctl_cmd,\r
        ual_rearm_cq_ioctl_cmd,\r
@@ -386,6 +391,10 @@ typedef enum _al_dev_ops
 #define UAL_QUERY_AV           IOCTL_CODE(ALDEV_KEY, ual_query_av_ioctl_cmd)\r
 #define UAL_MODIFY_AV          IOCTL_CODE(ALDEV_KEY, ual_modify_av_ioctl_cmd)\r
 #define UAL_DESTROY_AV         IOCTL_CODE(ALDEV_KEY, ual_destroy_av_ioctl_cmd)\r
+#define UAL_CREATE_SRQ         IOCTL_CODE(ALDEV_KEY, ual_create_srq_ioctl_cmd)\r
+#define UAL_QUERY_SRQ          IOCTL_CODE(ALDEV_KEY, ual_query_srq_ioctl_cmd)\r
+#define UAL_MODIFY_SRQ         IOCTL_CODE(ALDEV_KEY, ual_modify_srq_ioctl_cmd)\r
+#define UAL_DESTROY_SRQ        IOCTL_CODE(ALDEV_KEY, ual_destroy_srq_ioctl_cmd)\r
 #define UAL_CREATE_QP          IOCTL_CODE(ALDEV_KEY, ual_create_qp_ioctl_cmd)\r
 #define UAL_QUERY_QP           IOCTL_CODE(ALDEV_KEY, ual_query_qp_ioctl_cmd)\r
 #define UAL_MODIFY_QP          IOCTL_CODE(ALDEV_KEY, ual_modify_qp_ioctl_cmd)\r
@@ -405,6 +414,7 @@ typedef enum _al_dev_ops
 #define UAL_DESTROY_MW         IOCTL_CODE(ALDEV_KEY, ual_destroy_mw_ioctl_cmd)\r
 #define UAL_POST_SEND          IOCTL_CODE(ALDEV_KEY, ual_post_send_ioctl_cmd)\r
 #define UAL_POST_RECV          IOCTL_CODE(ALDEV_KEY, ual_post_recv_ioctl_cmd)\r
+#define UAL_POST_SRQ_RECV      IOCTL_CODE(ALDEV_KEY, ual_post_srq_recv_ioctl_cmd)\r
 #define UAL_PEEK_CQ                    IOCTL_CODE(ALDEV_KEY, ual_peek_cq_ioctl_cmd)\r
 #define UAL_POLL_CQ                    IOCTL_CODE(ALDEV_KEY, ual_poll_cq_ioctl_cmd)\r
 #define UAL_REARM_CQ           IOCTL_CODE(ALDEV_KEY, ual_rearm_cq_ioctl_cmd)\r
index 16c426c..390f648 100644 (file)
@@ -48,6 +48,7 @@
 #include "al_mw.h"\r
 #include "al_pd.h"\r
 #include "al_qp.h"\r
+#include "al_srq.h"\r
 #include "al_verbs.h"\r
 \r
 #include "ib_common.h"\r
@@ -259,6 +260,60 @@ free_pd(
        cl_free( h_pd );\r
 }\r
 \r
+ib_api_status_t\r
+ib_create_srq(\r
+       IN              const   ib_pd_handle_t                  h_pd,\r
+       IN              const   ib_srq_attr_t* const            p_srq_attr,\r
+       IN              const   void* const                                     srq_context,\r
+       IN              const   ib_pfn_event_cb_t                       pfn_srq_event_cb OPTIONAL,\r
+               OUT                     ib_srq_handle_t* const          ph_srq )\r
+{\r
+       ib_api_status_t                 status;\r
+\r
+       AL_ENTER( AL_DBG_SRQ );\r
+\r
+       if( AL_OBJ_INVALID_HANDLE( h_pd, AL_OBJ_TYPE_H_PD ) )\r
+       {\r
+               AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PD_HANDLE\n") );\r
+               return IB_INVALID_PD_HANDLE;\r
+       }\r
+\r
+       if( !p_srq_attr || !ph_srq)\r
+       {\r
+               AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") );\r
+               return IB_INVALID_PARAMETER;\r
+       }\r
+\r
+       if( !p_srq_attr->max_wr)\r
+       {\r
+               AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_MAX_WRS\n") );\r
+               return IB_INVALID_MAX_WRS;\r
+       }\r
+\r
+       if (h_pd->obj.p_ci_ca && h_pd->obj.p_ci_ca->p_pnp_attr)\r
+       {\r
+               if (p_srq_attr->max_wr > h_pd->obj.p_ci_ca->p_pnp_attr->max_srq_wrs)\r
+               {\r
+                       AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_MAX_WRS\n") );\r
+                       return IB_INVALID_MAX_WRS;\r
+               }\r
+               if (p_srq_attr->max_sge > h_pd->obj.p_ci_ca->p_pnp_attr->max_srq_sges)\r
+               {\r
+                       AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_MAX_SGE\n") );\r
+                       return IB_INVALID_MAX_SGE;\r
+               }\r
+       }\r
+       \r
+       status = create_srq(\r
+               h_pd, p_srq_attr, srq_context, pfn_srq_event_cb, ph_srq, NULL );\r
+\r
+       /* Release the reference taken in init_al_obj (init_base_srq). */\r
+       if( status == IB_SUCCESS )\r
+               deref_al_obj( &(*ph_srq)->obj );\r
+\r
+       AL_EXIT( AL_DBG_SRQ );\r
+       return status;\r
+}\r
 \r
 \r
 ib_api_status_t\r
index 4c9af8c..3453aae 100644 (file)
@@ -59,6 +59,7 @@ typedef enum _misc_cb_rec_type
 {\r
        CA_ERROR_REC,\r
        QP_ERROR_REC,\r
+       SRQ_ERROR_REC,\r
        CQ_ERROR_REC,\r
        MCAST_REC,\r
        MAD_SEND_REC,\r
index 9ff407c..69aa89b 100644 (file)
@@ -294,6 +294,13 @@ create_qp(
                return IB_INVALID_PARAMETER;\r
        }\r
 \r
+       if (p_qp_create->h_srq && \r
+               AL_OBJ_INVALID_HANDLE( p_qp_create->h_srq, AL_OBJ_TYPE_H_SRQ ) )\r
+       {\r
+               AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_SRQ_HANDLE\n") );\r
+               return IB_INVALID_SRQ_HANDLE;\r
+       }\r
+       \r
        /* Allocate a QP. */\r
        status = alloc_qp( p_qp_create->qp_type, &h_qp );\r
        if( status != IB_SUCCESS )\r
@@ -721,6 +728,11 @@ init_raw_qp(
        cq_attach_qp( h_qp->h_recv_cq, &h_qp->recv_cq_rel );\r
        cq_attach_qp( h_qp->h_send_cq, &h_qp->send_cq_rel );\r
 \r
+       h_qp->h_srq = p_qp_create->h_srq;\r
+       h_qp->srq_rel.p_child_obj = (cl_obj_t*)h_qp;\r
+       if (h_qp->h_srq)\r
+               srq_attach_qp( h_qp->h_srq, &h_qp->srq_rel );\r
+\r
        h_qp->num = qp_attr.num;\r
 \r
        return IB_SUCCESS;\r
@@ -1118,6 +1130,8 @@ destroying_qp(
                /* Multicast membership gets cleaned up by object hierarchy. */\r
                cq_detach_qp( h_qp->h_recv_cq, &h_qp->recv_cq_rel );\r
                cq_detach_qp( h_qp->h_send_cq, &h_qp->send_cq_rel );\r
+               if (h_qp->h_srq)\r
+                       srq_detach_qp( h_qp->h_srq, &h_qp->srq_rel );\r
        }\r
 }\r
 \r
@@ -1210,6 +1224,8 @@ cleanup_qp(
                        deref_al_obj( &h_qp->h_recv_cq->obj );\r
                if( h_qp->h_send_cq )\r
                        deref_al_obj( &h_qp->h_send_cq->obj );\r
+               if( h_qp->h_srq )\r
+                       deref_al_obj( &h_qp->h_srq->obj );\r
        }\r
 }\r
 \r
@@ -1272,6 +1288,7 @@ query_qp(
        p_qp_attr->h_rq_cq = h_qp->h_recv_cq;\r
        p_qp_attr->h_sq_cq = h_qp->h_send_cq;\r
        p_qp_attr->qp_type = h_qp->type;\r
+       p_qp_attr->h_srq = h_qp->h_srq;\r
 \r
        AL_EXIT( AL_DBG_QP );\r
        return IB_SUCCESS;\r
index a8c3c58..8b18d58 100644 (file)
@@ -137,6 +137,9 @@ typedef struct _ib_qp
        cl_obj_rel_t                            recv_cq_rel;\r
        cl_obj_rel_t                            send_cq_rel;\r
 \r
+       ib_srq_handle_t                 h_srq;\r
+       cl_obj_rel_t                            srq_rel;\r
+\r
        ib_pfn_event_cb_t                       pfn_event_cb;\r
 \r
        ib_pfn_modify_qp_t                      pfn_modify_qp;\r
diff --git a/core/al/al_srq.c b/core/al/al_srq.c
new file mode 100644 (file)
index 0000000..94cbcfe
--- /dev/null
@@ -0,0 +1,438 @@
+/*
+ * Copyright (c) 2005 SilverStorm Technologies.  All rights reserved.
+ * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. 
+ *
+ * This software is available to you under the OpenIB.org BSD license
+ * below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * $Id: al_qp.c 1611 2006-08-20 14:48:55Z leonid $
+ */
+
+#include <complib/cl_async_proc.h>
+#include <complib/cl_memory.h>
+#include <complib/cl_timer.h>
+
+#include "al.h"
+#include "al_ca.h"
+#include "al_debug.h"
+#if defined(EVENT_TRACING)
+#ifdef offsetof
+#undef offsetof
+#endif
+#include "al_srq.tmh"
+#endif
+#include "al_mgr.h"
+#include "al_mr.h"
+#include "al_pd.h"
+#include "al_srq.h"
+#include "al_verbs.h"
+
+#include "ib_common.h"
+
+/*
+ * Function prototypes.
+ */
+void
+destroying_srq(
+       IN                              struct _al_obj                          *p_obj );
+
+void
+cleanup_srq(
+       IN                              al_obj_t                                        *p_obj );
+
+void
+free_srq(
+       IN                              al_obj_t                                        *p_obj );
+
+
+ib_destroy_srq(
+       IN              const   ib_srq_handle_t                         h_srq,
+       IN              const   ib_pfn_destroy_cb_t                     pfn_destroy_cb OPTIONAL )
+{
+       AL_ENTER( AL_DBG_SRQ );
+
+       if( AL_OBJ_INVALID_HANDLE( h_srq, AL_OBJ_TYPE_H_SRQ ) )
+       {
+               AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_SRQ_HANDLE\n") );
+               return IB_INVALID_SRQ_HANDLE;
+       }
+
+       /* Don't destroy while there are bound QPs. */
+       cl_spinlock_acquire( &h_srq->obj.lock );
+       if (!cl_is_qlist_empty( &h_srq->qp_list ))
+       {
+               cl_spinlock_release( &h_srq->obj.lock );
+               AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_RESOURCE_BUSY\n") );
+               return IB_RESOURCE_BUSY;
+       }
+       cl_spinlock_release( &h_srq->obj.lock );
+
+       ref_al_obj( &h_srq->obj );
+       h_srq->obj.pfn_destroy( &h_srq->obj, pfn_destroy_cb );
+
+       AL_EXIT( AL_DBG_SRQ );
+       return IB_SUCCESS;
+}
+
+
+void
+destroying_srq(
+       IN                              struct _al_obj                          *p_obj )
+{
+       ib_srq_handle_t         h_srq;
+       cl_list_item_t          *p_item;
+       cl_obj_rel_t            *p_rel;
+       ib_qp_handle_t          h_qp;
+
+       CL_ASSERT( p_obj );
+       h_srq = PARENT_STRUCT( p_obj, ib_srq_t, obj );
+
+       /* Initiate destruction of all bound QPs. */
+       cl_spinlock_acquire( &h_srq->obj.lock );
+       for( p_item = cl_qlist_remove_tail( &h_srq->qp_list );
+               p_item != cl_qlist_end( &h_srq->qp_list );
+               p_item = cl_qlist_remove_tail( &h_srq->qp_list ) )
+       {
+               p_rel = PARENT_STRUCT( p_item, cl_obj_rel_t, pool_item.list_item );
+               p_rel->p_parent_obj = NULL;
+               h_qp = (ib_qp_handle_t)p_rel->p_child_obj;
+               if( h_qp )
+               {
+                       /* Take a reference to prevent the QP from being destroyed. */
+                       ref_al_obj( &h_qp->obj );
+                       cl_spinlock_release( &h_srq->obj.lock );
+                       h_qp->obj.pfn_destroy( &h_qp->obj, NULL );
+                       cl_spinlock_acquire( &h_srq->obj.lock );
+               }
+       }
+       cl_spinlock_release( &h_srq->obj.lock );
+}
+
+void
+cleanup_srq(
+       IN                              struct _al_obj                          *p_obj )
+{
+       ib_srq_handle_t                 h_srq;
+       ib_api_status_t                 status;
+
+       CL_ASSERT( p_obj );
+       h_srq = PARENT_STRUCT( p_obj, ib_srq_t, obj );
+
+       /* Deallocate the CI srq. */
+       if( verbs_check_srq( h_srq ) )
+       {
+               status = verbs_destroy_srq( h_srq );
+               CL_ASSERT( status == IB_SUCCESS );
+       }
+}
+
+
+/*
+ * Release all resources associated with the completion queue.
+ */
+void
+free_srq(
+       IN                              al_obj_t                                        *p_obj )
+{
+       ib_srq_handle_t                 h_srq;
+
+       CL_ASSERT( p_obj );
+       h_srq = PARENT_STRUCT( p_obj, ib_srq_t, obj );
+
+       destroy_al_obj( &h_srq->obj );
+       cl_free( h_srq );
+}
+
+
+void
+srq_attach_qp(
+       IN              const   ib_srq_handle_t                         h_srq,
+       IN                              cl_obj_rel_t* const                     p_qp_rel )
+{
+       p_qp_rel->p_parent_obj = (cl_obj_t*)h_srq;
+       ref_al_obj( &h_srq->obj );
+       cl_spinlock_acquire( &h_srq->obj.lock );
+       cl_qlist_insert_tail( &h_srq->qp_list, &p_qp_rel->pool_item.list_item );
+       cl_spinlock_release( &h_srq->obj.lock );
+}
+
+
+void
+srq_detach_qp(
+       IN              const   ib_srq_handle_t                         h_srq,
+       IN                              cl_obj_rel_t* const                     p_qp_rel )
+{
+       if( p_qp_rel->p_parent_obj )
+       {
+               CL_ASSERT( p_qp_rel->p_parent_obj == (cl_obj_t*)h_srq );
+               p_qp_rel->p_parent_obj = NULL;
+               cl_spinlock_acquire( &h_srq->obj.lock );
+               cl_qlist_remove_item( &h_srq->qp_list, &p_qp_rel->pool_item.list_item );
+               cl_spinlock_release( &h_srq->obj.lock );
+       }
+}
+
+
+ib_api_status_t
+ib_modify_srq(
+       IN              const   ib_srq_handle_t                 h_srq,
+       IN              const   ib_srq_attr_t* const            p_srq_attr,
+       IN              const   ib_srq_attr_mask_t                      srq_attr_mask )
+{
+       return modify_srq( h_srq, p_srq_attr, srq_attr_mask, NULL );
+}
+
+
+ib_api_status_t
+modify_srq(
+       IN              const   ib_srq_handle_t                 h_srq,
+       IN              const   ib_srq_attr_t* const            p_srq_attr,
+       IN              const   ib_srq_attr_mask_t                      srq_attr_mask,
+       IN      OUT                     ci_umv_buf_t* const                     p_umv_buf )
+{
+       ib_api_status_t                 status;
+
+       AL_ENTER( AL_DBG_SRQ );
+
+       if( AL_OBJ_INVALID_HANDLE( h_srq, AL_OBJ_TYPE_H_SRQ ) )
+       {
+               AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_SRQ_HANDLE\n") );
+               return IB_INVALID_SRQ_HANDLE;
+       }
+
+       if( !p_srq_attr )
+       {
+               AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") );
+               return IB_INVALID_PARAMETER;
+       }
+
+       if( !( srq_attr_mask & (IB_SRQ_MAX_WR |IB_SRQ_LIMIT)) ||
+               ( srq_attr_mask & ~(IB_SRQ_MAX_WR |IB_SRQ_LIMIT)))
+       {
+               AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") );
+               return IB_INVALID_SETTING;
+       }
+
+       if((srq_attr_mask & IB_SRQ_LIMIT)  && h_srq->obj.p_ci_ca && h_srq->obj.p_ci_ca->p_pnp_attr )
+       {
+               if (p_srq_attr->srq_limit > h_srq->obj.p_ci_ca->p_pnp_attr->max_srq_wrs)
+               {
+                       AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_SETTING\n") );
+                       return IB_INVALID_SETTING;
+               }
+       }
+
+       if((srq_attr_mask & IB_SRQ_MAX_WR) &&  !p_srq_attr->max_wr)
+       {
+               AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_SETTING\n") );
+               return IB_INVALID_SETTING;
+       }
+
+       if ((srq_attr_mask & IB_SRQ_MAX_WR) && h_srq->obj.p_ci_ca && h_srq->obj.p_ci_ca->p_pnp_attr)
+       {
+               if (p_srq_attr->max_wr > h_srq->obj.p_ci_ca->p_pnp_attr->max_srq_wrs)
+               {
+                       AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_MAX_WRS\n") );
+                       return IB_INVALID_MAX_WRS;
+               }
+       }
+
+       status = verbs_modify_srq( h_srq, p_srq_attr, srq_attr_mask );
+
+       AL_EXIT( AL_DBG_SRQ );
+       return status;
+}
+
+
+
+ib_api_status_t
+ib_query_srq(
+       IN              const   ib_srq_handle_t                         h_srq,
+               OUT             ib_srq_attr_t* const                    p_srq_attr )
+{
+       return query_srq( h_srq, p_srq_attr, NULL );
+}
+
+
+
+ib_api_status_t
+query_srq(
+       IN              const   ib_srq_handle_t                         h_srq,
+               OUT             ib_srq_attr_t* const                    p_srq_attr,
+       IN      OUT             ci_umv_buf_t* const             p_umv_buf )
+{
+       ib_api_status_t                 status;
+
+       AL_ENTER( AL_DBG_SRQ );
+
+       if( AL_OBJ_INVALID_HANDLE( h_srq, AL_OBJ_TYPE_H_SRQ ) )
+       {
+               AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_SRQ_HANDLE\n") );
+               return IB_INVALID_SRQ_HANDLE;
+       }
+       if( !p_srq_attr )
+       {
+               AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") );
+               return IB_INVALID_PARAMETER;
+       }
+
+       status = verbs_query_srq( h_srq, p_srq_attr );
+
+       AL_EXIT( AL_DBG_SRQ );
+       return status;
+}
+
+
+/*
+ * Initializes the QP information structure.
+ */
+ib_api_status_t
+create_srq(
+       IN              const   ib_pd_handle_t                          h_pd,
+       IN              const   ib_srq_attr_t* const                    p_srq_attr,
+       IN              const   void* const                                     srq_context,
+       IN              const   ib_pfn_event_cb_t                               pfn_srq_event_cb,
+               OUT                     ib_srq_handle_t* const                  ph_srq,
+       IN      OUT                     ci_umv_buf_t* const                     p_umv_buf )
+{
+       ib_srq_handle_t                 h_srq;
+       ib_api_status_t                 status;
+       al_obj_type_t                   obj_type = AL_OBJ_TYPE_H_SRQ;
+
+       h_srq = cl_zalloc( sizeof( ib_srq_t ) );
+       if( !h_srq )
+       {
+               return IB_INSUFFICIENT_MEMORY;
+       }
+       
+       if( p_umv_buf )
+               obj_type |= AL_OBJ_SUBTYPE_UM_EXPORT;
+
+       /* Construct the SRQ. */
+       construct_al_obj( &h_srq->obj, obj_type );
+
+       cl_qlist_init( &h_srq->qp_list );
+       h_srq->pfn_event_cb = pfn_srq_event_cb;
+
+       /* Initialize the SRQ. */
+       status = init_al_obj( &h_srq->obj, srq_context, TRUE,
+               destroying_srq, cleanup_srq, free_srq );
+       if( status != IB_SUCCESS )
+       {
+               free_srq( &h_srq->obj );
+               return status;
+       }
+       status = attach_al_obj( &h_pd->obj, &h_srq->obj );
+       if( status != IB_SUCCESS )
+       {
+               h_srq->obj.pfn_destroy( &h_srq->obj, NULL );
+               return status;
+       }
+
+       status = verbs_create_srq( h_pd, h_srq, p_srq_attr, p_umv_buf );
+       if( status != IB_SUCCESS )
+       {
+               h_srq->obj.pfn_destroy( &h_srq->obj, NULL );
+               return status;
+       }
+
+       *ph_srq = h_srq;
+
+       /*
+        * Note that we don't release the reference taken in init_al_obj here.
+        * For kernel clients, it is release in ib_create_srq.  For user-mode
+        * clients is released by the proxy after the handle is extracted.
+        */
+       return IB_SUCCESS;
+}
+
+
+/*
+ * Process an asynchronous event on the QP.  Notify the user of the event.
+ */
+void
+srq_async_event_cb(
+       IN                              ib_async_event_rec_t* const     p_event_rec )
+{
+       ib_srq_handle_t                 h_srq;
+
+       CL_ASSERT( p_event_rec );
+       h_srq = (ib_srq_handle_t)p_event_rec->context;
+
+#if defined(CL_KERNEL)
+       switch( p_event_rec->code )
+       {
+       case IB_AE_SRQ_LIMIT_REACHED:
+               AL_PRINT_EXIT( TRACE_LEVEL_WARNING, AL_DBG_SRQ, 
+                       ("IB_AE_SRQ_LIMIT_REACHED for srq %p \n", h_srq) );
+               //TODO: handle this error.
+               break;
+       case IB_AE_SRQ_CATAS_ERROR:
+               AL_PRINT_EXIT( TRACE_LEVEL_WARNING, AL_DBG_SRQ, 
+                       ("IB_AE_SRQ_CATAS_ERROR for srq %p \n", h_srq) );
+               //TODO: handle this error.
+               break;
+       default:
+               break;
+       }
+#endif
+
+       p_event_rec->context = (void*)h_srq->obj.context;
+       p_event_rec->handle.h_srq = h_srq;
+
+       if( h_srq->pfn_event_cb )
+               h_srq->pfn_event_cb( p_event_rec );
+}
+
+ib_api_status_t
+ib_post_srq_recv(
+       IN              const   ib_srq_handle_t                         h_srq,
+       IN                              ib_recv_wr_t* const                     p_recv_wr,
+               OUT                     ib_recv_wr_t                            **pp_recv_failure OPTIONAL )
+{
+       ib_api_status_t                 status;
+
+       AL_ENTER( AL_DBG_SRQ );
+
+       if( AL_OBJ_INVALID_HANDLE( h_srq, AL_OBJ_TYPE_H_SRQ ) )
+       {
+               AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_SRQ_HANDLE\n") );
+               return IB_INVALID_QP_HANDLE;
+       }
+       if( !p_recv_wr || ( p_recv_wr->p_next && !pp_recv_failure ) )
+       {
+               AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") );
+               return IB_INVALID_PARAMETER;
+       }
+
+       status =
+               h_srq->pfn_post_srq_recv( h_srq->h_recv_srq, p_recv_wr, pp_recv_failure );
+
+       AL_EXIT( AL_DBG_SRQ );
+       return status;
+}
+
+
+
diff --git a/core/al/al_srq.h b/core/al/al_srq.h
new file mode 100644 (file)
index 0000000..28ad818
--- /dev/null
@@ -0,0 +1,108 @@
+/*
+ * Copyright (c) 2005 SilverStorm Technologies.  All rights reserved.
+ * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. 
+ *
+ * This software is available to you under the OpenIB.org BSD license
+ * below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * $Id: al_srq.h 1611 2006-08-20 14:48:55Z leonid $
+ */
+
+#if !defined(__AL_SRQ_H__)
+#define __AL_SRQ_H__
+
+#include <iba/ib_al.h>
+#include <iba/ib_ci.h>
+#include <complib/cl_qlist.h>
+#include <complib/cl_vector.h>
+
+#include "al_ca.h"
+#include "al_common.h"
+
+
+typedef ib_api_status_t
+(*ib_pfn_post_srq_recv_t)(
+       IN              const   ib_srq_handle_t                         h_srq,
+       IN                              ib_recv_wr_t* const                     p_recv_wr,
+       IN                              ib_recv_wr_t                            **p_recv_failure OPTIONAL );
+
+
+/*
+ * Shared queue pair information required by the access layer.  This structure
+ * is referenced by a user's SRQ handle.
+ */
+typedef struct _ib_srq
+{
+       al_obj_t                                        obj;                    /* Must be first. */
+
+       ib_srq_handle_t                 h_ci_srq;       /* kernel SRQ handle */
+       ib_pfn_post_srq_recv_t          pfn_post_srq_recv;      /* post_srq_recv call */
+       ib_srq_handle_t                 h_recv_srq;     /* srq handle for the post_srq_recv call */
+       ib_pfn_event_cb_t                       pfn_event_cb;   /* user async event handler */
+       cl_qlist_t                                      qp_list;        /* List of QPs bound to this CQ. */
+
+}      ib_srq_t;
+
+ib_api_status_t
+create_srq(
+       IN              const   ib_pd_handle_t                          h_pd,
+       IN              const   ib_srq_attr_t* const                    p_srq_attr,
+       IN              const   void* const                                     srq_context,
+       IN              const   ib_pfn_event_cb_t                               pfn_srq_event_cb,
+               OUT                     ib_srq_handle_t* const                  ph_srq,
+       IN      OUT                     ci_umv_buf_t* const                     p_umv_buf );
+
+
+ib_api_status_t
+query_srq(
+       IN              const   ib_srq_handle_t                         h_srq,
+               OUT                     ib_srq_attr_t* const                    p_srq_attr,
+       IN      OUT                     ci_umv_buf_t* const                     p_umv_buf );
+
+
+ib_api_status_t
+modify_srq(
+       IN              const   ib_srq_handle_t                         h_srq,
+       IN              const   ib_srq_attr_t* const                    p_srq_attr,
+       IN              const   ib_srq_attr_mask_t                              srq_attr_mask,
+       IN      OUT                     ci_umv_buf_t* const                     p_umv_buf );
+
+
+void
+srq_async_event_cb(
+       IN                              ib_async_event_rec_t* const     p_event_rec );
+
+void
+srq_attach_qp(
+       IN              const   ib_srq_handle_t                         h_srq,
+       IN                              cl_obj_rel_t* const                     p_qp_rel );
+
+void
+srq_detach_qp(
+       IN              const   ib_srq_handle_t                         h_srq,
+       IN                              cl_obj_rel_t* const                     p_qp_rel );
+
+#endif /* __AL_QP_H__ */
+
index f0b67fb..fe49937 100644 (file)
@@ -37,6 +37,7 @@
 #include "al_cq.h"\r
 #include "al_pd.h"\r
 #include "al_qp.h"\r
+#include "al_srq.h"\r
 \r
 #ifndef CL_KERNEL\r
 #include "ual_mad.h"\r
@@ -230,14 +231,50 @@ deallocate_pd_alias(
 #define verbs_deallocate_pd(h_pd) \\r
        h_pd->obj.p_ci_ca->verbs.deallocate_pd( h_pd->h_ci_pd )\r
 \r
+static inline ib_api_status_t\r
+verbs_create_srq(\r
+       IN              const   ib_pd_handle_t                          h_pd,\r
+       IN                              ib_srq_handle_t                         h_srq,\r
+       IN              const   ib_srq_attr_t* const                    p_srq_attr,\r
+       IN      OUT                     ci_umv_buf_t* const                     p_umv_buf )\r
+{\r
+       ib_api_status_t         status;\r
+\r
+       status = h_srq->obj.p_ci_ca->verbs.create_srq(\r
+               h_pd->h_ci_pd, h_srq, p_srq_attr,\r
+               &h_srq->h_ci_srq, p_umv_buf );\r
+\r
+       h_srq->h_recv_srq = h_srq->h_ci_srq;\r
+       h_srq->pfn_post_srq_recv = h_srq->obj.p_ci_ca->verbs.post_srq_recv;\r
+       return status;\r
+}\r
+\r
+#define verbs_check_srq(h_srq) ((h_srq)->h_ci_srq)\r
+\r
+#define verbs_destroy_srq(h_srq) \\r
+       h_srq->obj.p_ci_ca->verbs.destroy_srq( h_srq->h_ci_srq )\r
+\r
+#define verbs_query_srq(h_srq, p_srq_attr) \\r
+       h_srq->obj.p_ci_ca->verbs.query_srq( h_srq->h_ci_srq,\\r
+               p_srq_attr, p_umv_buf )\r
+\r
+#define verbs_modify_srq(h_srq, p_srq_attr, srq_attr_mask) \\r
+       h_srq->obj.p_ci_ca->verbs.modify_srq( h_srq->h_ci_srq,\\r
+               p_srq_attr, srq_attr_mask, p_umv_buf )\r
+\r
+#define verbs_post_srq_recv(h_srq, p_recv_wr, pp_recv_failure) \\r
+       h_srq->obj.p_ci_ca->verbs.post_srq_recv( h_srq->h_ci_srq,\\r
+               p_recv_wr, pp_recv_failure )\r
+\r
 #define convert_qp_handle( qp_create ) {\\r
        CL_ASSERT( qp_create.h_rq_cq ); \\r
        qp_create.h_rq_cq = qp_create.h_rq_cq->h_ci_cq; \\r
        CL_ASSERT( qp_create.h_sq_cq ); \\r
        qp_create.h_sq_cq = qp_create.h_sq_cq->h_ci_cq; \\r
+       if (qp_create.h_srq) \\r
+               qp_create.h_srq = qp_create.h_srq->h_ci_srq;    \\r
 }\r
 \r
-\r
 static inline ib_api_status_t\r
 verbs_get_spl_qp(\r
        IN                              ib_pd_handle_t                          h_pd,\r
@@ -283,7 +320,6 @@ verbs_create_qp(
        return status;\r
 }\r
 \r
-\r
 #define verbs_check_qp(h_qp)   ((h_qp)->h_ci_qp)\r
 #define verbs_destroy_qp(h_qp) \\r
        h_qp->obj.p_ci_ca->verbs.destroy_qp( h_qp->h_ci_qp, h_qp->timewait )\r
@@ -507,6 +543,27 @@ allocate_pd_alias(
 #define verbs_deallocate_pd(h_pd) \\r
        ual_deallocate_pd(h_pd)\r
 \r
+#define verbs_create_srq(h_pd, h_srq, p_srq_attr, p_umv_buf) \\r
+       ual_create_srq (h_pd, h_srq, p_srq_attr); \\r
+       UNUSED_PARAM( p_umv_buf )\r
+\r
+#define verbs_check_srq(h_srq) ((h_srq)->h_ci_srq || (h_srq)->obj.hdl)\r
+\r
+#define verbs_destroy_srq(h_srq) \\r
+       ual_destroy_srq(h_srq)\r
+\r
+#define verbs_query_srq(h_srq, p_srq_attr) \\r
+       ual_query_srq(h_srq, p_srq_attr); \\r
+       UNUSED_PARAM( p_umv_buf );\r
+\r
+#define verbs_modify_srq(h_srq, p_srq_attr, srq_attr_mask) \\r
+       ual_modify_srq(h_srq, p_srq_attr, srq_attr_mask); \\r
+       UNUSED_PARAM( p_umv_buf );\r
+\r
+#define verbs_post_srq_recv(h_srq, p_recv_wr, pp_recv_failure) \\r
+       ual_post_srq_recv(h_srq, p_recv_wr, pp_recv_failure)\r
+\r
+\r
 /* For user-mode, handle conversion is done in ual files */\r
 \r
 #define convert_qp_handle( qp_create )\r
index 6aad3bb..5740dff 100644 (file)
@@ -70,6 +70,7 @@ static const char* const __ib_error_str[] =
        "IB_INVALID_MAX_WRS",\r
        "IB_INVALID_MAX_SGE",\r
        "IB_INVALID_CQ_SIZE",\r
+       "IB_INVALID_SRQ_SIZE",\r
        "IB_INVALID_SERVICE_TYPE",\r
        "IB_INVALID_GID",\r
        "IB_INVALID_LID",\r
@@ -78,6 +79,7 @@ static const char* const __ib_error_str[] =
        "IB_INVALID_AV_HANDLE",\r
        "IB_INVALID_CQ_HANDLE",\r
        "IB_INVALID_QP_HANDLE",\r
+       "IB_INVALID_SRQ_HANDLE",\r
        "IB_INVALID_PD_HANDLE",\r
        "IB_INVALID_MR_HANDLE",\r
        "IB_INVALID_FMR_HANDLE",\r
@@ -87,7 +89,7 @@ static const char* const __ib_error_str[] =
        "IB_INVALID_AL_HANDLE",\r
        "IB_INVALID_HANDLE",\r
        "IB_ERROR",\r
-       "IB_REMOTE_ERROR",                                              /* Infiniband Access Layer */\r
+       "IB_REMOTE_ERROR",\r
        "IB_VERBS_PROCESSING_DONE",\r
        "IB_INVALID_WR_TYPE",\r
        "IB_QP_IN_TIMEWAIT",\r
@@ -228,3 +230,31 @@ ib_get_wr_type_str(
        return( __ib_wr_type_str[wr_type] );\r
 }\r
 \r
+static const char* const __ib_qp_type_str[] =\r
+{\r
+       "IB_QPT_RELIABLE_CONN"\r
+       "IB_QPT_UNRELIABLE_CONN",\r
+       "IB_QPT_UNKNOWN",\r
+       "IB_QPT_UNRELIABLE_DGRM",\r
+       "IB_QPT_QP0",\r
+       "IB_QPT_QP1",\r
+       "IB_QPT_RAW_IPV6",\r
+       "IB_QPT_RAW_ETHER",\r
+       "IB_QPT_MAD",\r
+       "IB_QPT_QP0_ALIAS",\r
+       "IB_QPT_QP1_ALIAS",\r
+       "IB_QPT_UNKNOWN"\r
+
+};\r
+\r
+\r
+const char* \r
+ib_get_qp_type_str(\r
+       IN                              uint8_t                                         qp_type )\r
+{\r
+       if( qp_type > IB_QPT_UNKNOWN )\r
+               qp_type = IB_QPT_UNKNOWN;\r
+       return( __ib_qp_type_str[qp_type] );\r
+}\r
+\r
+\r
index 1ca593f..3ff56ac 100644 (file)
@@ -49,6 +49,7 @@ SOURCES= ibal.rc                      \
        ..\al_query.c                   \\r
        ..\al_reg_svc.c                 \\r
        ..\al_res_mgr.c                 \\r
+       ..\al_srq.c                             \\r
        ..\al_sub.c                             \\r
        ..\ib_common.c                  \\r
        ..\ib_statustext.c\r
index 5203c9a..6babc91 100644 (file)
@@ -197,6 +197,10 @@ create_al_mgr()
        }\r
 \r
        /* Initialize the AL device management agent. */\r
+\r
+/*\r
+       Disable support of DM agent.\r
+\r
        status = create_dm_agent( &gp_al_mgr->obj );\r
        if( status != IB_SUCCESS )\r
        {\r
@@ -205,7 +209,7 @@ create_al_mgr()
                        ("create_dm_agent failed, status = 0x%x.\n", status) );\r
                return status;\r
        }\r
-\r
+*/\r
        status = create_ioc_pnp( &gp_al_mgr->obj );\r
        if( status != IB_SUCCESS )\r
        {\r
@@ -530,7 +534,8 @@ al_hdl_ref(
        if( type == AL_OBJ_TYPE_UNKNOWN &&\r
                p_h->type != AL_OBJ_TYPE_H_PD && p_h->type != AL_OBJ_TYPE_H_CQ &&\r
                p_h->type != AL_OBJ_TYPE_H_AV && p_h->type != AL_OBJ_TYPE_H_QP &&\r
-               p_h->type != AL_OBJ_TYPE_H_MR && p_h->type != AL_OBJ_TYPE_H_MW )\r
+               p_h->type != AL_OBJ_TYPE_H_MR && p_h->type != AL_OBJ_TYPE_H_MW &&\r
+               p_h->type != AL_OBJ_TYPE_H_SRQ )\r
        {\r
                cl_spinlock_release( &h_al->obj.lock );\r
                return NULL;\r
index 2a5e0cd..5bf7db8 100644 (file)
@@ -49,6 +49,7 @@
 #include "al_ca.h"\r
 #include "al_pd.h"\r
 #include "al_qp.h"\r
+#include "al_srq.h"\r
 #include "al_cq.h"\r
 #include "al_mr.h"\r
 #include "al_mw.h"\r
@@ -961,6 +962,309 @@ proxy_dealloc_pd(
 }\r
 \r
 \r
+/*\r
+ * Proxy's SRQ error handler\r
+ */\r
+static void\r
+proxy_srq_err_cb(\r
+       IN ib_async_event_rec_t *p_err_rec )\r
+{\r
+       ib_srq_handle_t h_srq = p_err_rec->handle.h_srq;\r
+       al_dev_open_context_t   *p_context = h_srq->obj.h_al->p_context;\r
+       misc_cb_ioctl_info_t    cb_info;\r
+\r
+       AL_ENTER( AL_DBG_QP );\r
+\r
+       /*\r
+        * If we're already closing the device - do not queue a callback, since\r
+        * we're cleaning up the callback lists.\r
+        */\r
+       if( !proxy_context_ref( p_context ) )\r
+       {\r
+               proxy_context_deref( p_context );\r
+               return;\r
+       }\r
+\r
+       /* Set up context and callback record type appropriate for UAL */\r
+       cb_info.rec_type = SRQ_ERROR_REC;\r
+       /* Return the Proxy's SRQ handle and the user's context */\r
+       cb_info.ioctl_rec.event_rec = *p_err_rec;\r
+       cb_info.ioctl_rec.event_rec.handle.h_srq = (ib_srq_handle_t)h_srq->obj.hdl;\r
+\r
+       /* The proxy handle must be valid now. */\r
+       if( !h_srq->obj.hdl_valid )\r
+               h_srq->obj.hdl_valid = TRUE;\r
+\r
+       proxy_queue_cb_buf(\r
+               UAL_GET_MISC_CB_INFO, p_context, &cb_info, &h_srq->obj );\r
+\r
+       proxy_context_deref( p_context );\r
+\r
+       AL_EXIT( AL_DBG_QP );\r
+}\r
+\r
+/*\r
+ * Process the ioctl UAL_CREATE_SRQ\r
+ *\r
+ * Returns the srq_list_obj as the handle to UAL\r
+ */\r
+static cl_status_t\r
+proxy_create_srq(\r
+       IN              void                                    *p_open_context,\r
+       IN              cl_ioctl_handle_t               h_ioctl,\r
+               OUT     size_t                                  *p_ret_bytes )\r
+{\r
+       ual_create_srq_ioctl_t  *p_ioctl =\r
+               (ual_create_srq_ioctl_t*)cl_ioctl_in_buf( h_ioctl );\r
+       al_dev_open_context_t   *p_context =\r
+               (al_dev_open_context_t *)p_open_context;\r
+       ib_pd_handle_t                  h_pd;\r
+       ib_srq_handle_t                 h_srq;\r
+       ci_umv_buf_t                    *p_umv_buf = NULL;\r
+       ib_api_status_t                 status;\r
+       ib_pfn_event_cb_t               pfn_ev;\r
+\r
+       AL_ENTER( AL_DBG_SRQ );\r
+\r
+       /* Validate input buffers. */\r
+       if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) ||\r
+               cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) ||\r
+               cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) )\r
+       {\r
+               AL_EXIT( AL_DBG_SRQ );\r
+               return CL_INVALID_PARAMETER;\r
+       }\r
+\r
+       /* Validate handles. */\r
+       h_pd = (ib_pd_handle_t)\r
+               al_hdl_ref( p_context->h_al, p_ioctl->in.h_pd, AL_OBJ_TYPE_H_PD );\r
+       if( !h_pd)\r
+       {\r
+               status = IB_INVALID_PD_HANDLE;\r
+               goto proxy_create_srq_err1;\r
+       }\r
+\r
+       status = cpyin_umvbuf( &p_ioctl->in.umv_buf, &p_umv_buf );\r
+       if( status != IB_SUCCESS )\r
+               goto proxy_create_srq_err1;\r
+\r
+       if( p_ioctl->in.ev_notify )\r
+               pfn_ev = proxy_srq_err_cb;\r
+       else\r
+               pfn_ev = NULL;\r
+\r
+       status = create_srq( h_pd, &p_ioctl->in.srq_attr, p_ioctl->in.context,\r
+               pfn_ev, &h_srq, p_umv_buf );\r
+       if( status != IB_SUCCESS )\r
+               goto proxy_create_srq_err1;\r
+\r
+       status = cpyout_umvbuf( &p_ioctl->out.umv_buf, p_umv_buf );\r
+       if( status == IB_SUCCESS )\r
+       {\r
+               p_ioctl->out.h_srq = h_srq->obj.hdl;\r
+               h_srq->obj.hdl_valid = TRUE;\r
+               /* Release the reference taken in create_srq (by init_al_obj) */\r
+               deref_al_obj( &h_srq->obj );\r
+       }\r
+       else\r
+       {\r
+proxy_create_srq_err1:\r
+               p_ioctl->out.umv_buf = p_ioctl->in.umv_buf;\r
+               p_ioctl->out.h_srq = AL_INVALID_HANDLE;\r
+       }\r
+       free_umvbuf( p_umv_buf );\r
+\r
+       p_ioctl->out.status = status;\r
+       *p_ret_bytes = sizeof(p_ioctl->out);\r
+\r
+       if( h_pd )\r
+               deref_al_obj( &h_pd->obj );\r
+\r
+       AL_EXIT( AL_DBG_SRQ );\r
+       return CL_SUCCESS;\r
+}\r
+\r
+\r
+/*\r
+ * Process the ioctl UAL_QUERY_SRQ:\r
+ */\r
+static\r
+cl_status_t\r
+proxy_query_srq(\r
+       IN              void                                    *p_open_context,\r
+       IN              cl_ioctl_handle_t               h_ioctl,\r
+               OUT     size_t                                  *p_ret_bytes )\r
+{\r
+       ual_query_srq_ioctl_t   *p_ioctl =\r
+               (ual_query_srq_ioctl_t *)cl_ioctl_in_buf( h_ioctl );\r
+       al_dev_open_context_t   *p_context =\r
+               (al_dev_open_context_t *)p_open_context;\r
+       ib_srq_handle_t                 h_srq;\r
+       ci_umv_buf_t                    *p_umv_buf = NULL;\r
+       ib_api_status_t                 status;\r
+\r
+       AL_ENTER( AL_DBG_SRQ );\r
+\r
+       /* Validate input buffers. */\r
+       if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) ||\r
+               cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) ||\r
+               cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) )\r
+       {\r
+               AL_EXIT( AL_DBG_SRQ );\r
+               return CL_INVALID_PARAMETER;\r
+       }\r
+\r
+       /* Validate SRQ handle */\r
+       h_srq = (ib_srq_handle_t)\r
+               al_hdl_ref( p_context->h_al, p_ioctl->in.h_srq, AL_OBJ_TYPE_H_SRQ );\r
+       if( !h_srq )\r
+       {\r
+               status = IB_INVALID_SRQ_HANDLE;\r
+               goto proxy_query_srq_err;\r
+       }\r
+\r
+       status = cpyin_umvbuf( &p_ioctl->in.umv_buf, &p_umv_buf );\r
+       if( status != IB_SUCCESS )\r
+               goto proxy_query_srq_err;\r
+\r
+       status = query_srq( h_srq, &p_ioctl->out.srq_attr, p_umv_buf );\r
+       if( status != IB_SUCCESS )\r
+               goto proxy_query_srq_err;\r
+\r
+       status = cpyout_umvbuf( &p_ioctl->out.umv_buf, p_umv_buf );\r
+       if( status != IB_SUCCESS )\r
+       {\r
+proxy_query_srq_err:\r
+               p_ioctl->out.umv_buf = p_ioctl->in.umv_buf;\r
+               cl_memclr( &p_ioctl->out.srq_attr, sizeof(ib_srq_attr_t) );\r
+       }\r
+       free_umvbuf( p_umv_buf );\r
+\r
+       if( h_srq )\r
+               deref_al_obj( &h_srq->obj );\r
+\r
+       p_ioctl->out.status = status;\r
+       *p_ret_bytes = sizeof(p_ioctl->out);\r
+\r
+       AL_EXIT( AL_DBG_SRQ );\r
+       return CL_SUCCESS;\r
+}\r
+\r
+\r
+\r
+/*\r
+ * Process the ioctl UAL_MODIFY_SRQ:\r
+ */\r
+static\r
+cl_status_t\r
+proxy_modify_srq(\r
+       IN              void                                    *p_open_context,\r
+       IN              cl_ioctl_handle_t               h_ioctl,\r
+               OUT     size_t                                  *p_ret_bytes )\r
+{\r
+       ual_modify_srq_ioctl_t  *p_ioctl =\r
+               (ual_modify_srq_ioctl_t *)cl_ioctl_in_buf( h_ioctl );\r
+       al_dev_open_context_t   *p_context =\r
+               (al_dev_open_context_t *)p_open_context;\r
+       ib_srq_handle_t                 h_srq;\r
+       ci_umv_buf_t                    *p_umv_buf = NULL;\r
+       ib_api_status_t                 status;\r
+\r
+       AL_ENTER( AL_DBG_SRQ );\r
+\r
+       /* Validate input buffers. */\r
+       if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) ||\r
+               cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) ||\r
+               cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) )\r
+       {\r
+               AL_EXIT( AL_DBG_SRQ );\r
+               return CL_INVALID_PARAMETER;\r
+       }\r
+\r
+       /* Validate SRQ handle */\r
+       h_srq = (ib_srq_handle_t)\r
+               al_hdl_ref( p_context->h_al, p_ioctl->in.h_srq, AL_OBJ_TYPE_H_SRQ );\r
+       if( !h_srq )\r
+       {\r
+               status = IB_INVALID_SRQ_HANDLE;\r
+               goto proxy_modify_srq_err;\r
+       }\r
+\r
+       status = cpyin_umvbuf( &p_ioctl->in.umv_buf, &p_umv_buf );\r
+       if( status != IB_SUCCESS )\r
+               goto proxy_modify_srq_err;\r
+\r
+       status = modify_srq( h_srq, &p_ioctl->in.srq_attr, p_ioctl->in.srq_attr_mask, p_umv_buf );\r
+\r
+       if( status != IB_SUCCESS )\r
+               goto proxy_modify_srq_err;\r
+       \r
+       status = cpyout_umvbuf( &p_ioctl->out.umv_buf, p_umv_buf );\r
+       if( status != IB_SUCCESS )\r
+       {\r
+proxy_modify_srq_err:\r
+               p_ioctl->out.umv_buf = p_ioctl->in.umv_buf;\r
+       }\r
+       free_umvbuf( p_umv_buf );\r
+\r
+       if( h_srq )\r
+               deref_al_obj( &h_srq->obj );\r
+\r
+       p_ioctl->out.status = status;\r
+       *p_ret_bytes = sizeof(p_ioctl->out);\r
+\r
+       AL_EXIT( AL_DBG_SRQ );\r
+       return CL_SUCCESS;\r
+}\r
+\r
+\r
+/*\r
+ * Process the ioctl UAL_DESTROY_SRQ\r
+ */\r
+static cl_status_t\r
+proxy_destroy_srq(\r
+       IN              void                                    *p_open_context,\r
+       IN              cl_ioctl_handle_t               h_ioctl,\r
+               OUT     size_t                                  *p_ret_bytes )\r
+{\r
+       ual_destroy_srq_ioctl_t *p_ioctl =\r
+               (ual_destroy_srq_ioctl_t *)cl_ioctl_in_buf( h_ioctl );\r
+       al_dev_open_context_t   *p_context =\r
+               (al_dev_open_context_t *)p_open_context;\r
+       ib_srq_handle_t                 h_srq;\r
+\r
+       AL_ENTER( AL_DBG_SRQ );\r
+\r
+       /* Validate input buffers. */\r
+       if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) ||\r
+               cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) ||\r
+               cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) )\r
+       {\r
+               AL_EXIT( AL_DBG_SRQ );\r
+               return CL_INVALID_PARAMETER;\r
+       }\r
+\r
+       /* Set the return bytes in all cases */\r
+       *p_ret_bytes = sizeof(p_ioctl->out);\r
+\r
+       /* Validate SRQ handle */\r
+       h_srq = (ib_srq_handle_t)\r
+               al_hdl_ref( p_context->h_al, p_ioctl->in.h_srq, AL_OBJ_TYPE_H_SRQ );\r
+       if( !h_srq )\r
+       {\r
+               AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_SRQ_HANDLE\n") );\r
+               p_ioctl->out.status = IB_INVALID_SRQ_HANDLE;\r
+       }\r
+       else\r
+       {\r
+               h_srq->obj.pfn_destroy( &h_srq->obj, ib_sync_destroy );\r
+               p_ioctl->out.status = IB_SUCCESS;\r
+       }\r
+\r
+       AL_EXIT( AL_DBG_SRQ );\r
+       return CL_SUCCESS;\r
+}\r
+\r
 \r
 /*\r
  * Proxy's QP error handler\r
@@ -1004,6 +1308,7 @@ proxy_qp_err_cb(
 }\r
 \r
 \r
+\r
 /*\r
  * Process the ioctl UAL_CREATE_QP\r
  *\r
@@ -1021,6 +1326,7 @@ proxy_create_qp(
                (al_dev_open_context_t *)p_open_context;\r
        ib_pd_handle_t                  h_pd;\r
        ib_qp_handle_t                  h_qp;\r
+       ib_srq_handle_t                 h_srq = NULL;\r
        ib_cq_handle_t                  h_sq_cq, h_rq_cq;\r
        ci_umv_buf_t                    *p_umv_buf = NULL;\r
        ib_api_status_t                 status;\r
@@ -1044,6 +1350,15 @@ proxy_create_qp(
                (uint64_t)p_ioctl->in.qp_create.h_sq_cq, AL_OBJ_TYPE_H_CQ );\r
        h_rq_cq = (ib_cq_handle_t)al_hdl_ref( p_context->h_al,\r
                (uint64_t)p_ioctl->in.qp_create.h_rq_cq, AL_OBJ_TYPE_H_CQ );\r
+       if (p_ioctl->in.qp_create.h_srq) {\r
+               h_srq = (ib_srq_handle_t)al_hdl_ref( p_context->h_al,\r
+                       (uint64_t)p_ioctl->in.qp_create.h_srq, AL_OBJ_TYPE_H_SRQ );\r
+               if( !h_srq)\r
+               {\r
+                       status = IB_INVALID_SRQ_HANDLE;\r
+                       goto proxy_create_qp_err1;\r
+               }\r
+       }\r
        if( !h_pd)\r
        {\r
                status = IB_INVALID_PD_HANDLE;\r
@@ -1064,6 +1379,8 @@ proxy_create_qp(
        p_ioctl->in.qp_create.h_sq_cq = h_sq_cq;\r
        /* Substitute rq_cq handle with AL's cq handle */\r
        p_ioctl->in.qp_create.h_rq_cq = h_rq_cq;\r
+       /* Substitute srq handle with AL's srq handle */\r
+       p_ioctl->in.qp_create.h_srq = h_srq;\r
 \r
        status = cpyin_umvbuf( &p_ioctl->in.umv_buf, &p_umv_buf );\r
        if( status != IB_SUCCESS )\r
@@ -1116,6 +1433,8 @@ proxy_create_qp_err1:
                deref_al_obj( &h_rq_cq->obj );\r
        if( h_sq_cq )\r
                deref_al_obj( &h_sq_cq->obj );\r
+       if( h_srq )\r
+               deref_al_obj( &h_srq->obj );\r
 \r
        AL_EXIT( AL_DBG_QP );\r
        return CL_SUCCESS;\r
@@ -1198,6 +1517,15 @@ proxy_query_qp(
                {\r
                        p_ioctl->out.attr.h_rq_cq = NULL;\r
                }\r
+               if( p_ioctl->out.attr.h_srq )\r
+               {\r
+                       p_ioctl->out.attr.h_srq =\r
+                               (ib_srq_handle_t)p_ioctl->out.attr.h_srq->obj.hdl;\r
+               }\r
+               else\r
+               {\r
+                       p_ioctl->out.attr.h_srq = NULL;\r
+               }\r
        }\r
        else\r
        {\r
@@ -2247,6 +2575,126 @@ proxy_post_recv_done:
 }\r
 \r
 \r
+/*\r
+ * Process the ioctl UAL_POST_SRQ_RECV\r
+ */\r
+static\r
+cl_status_t\r
+proxy_post_srq_recv(\r
+       IN              void                                    *p_open_context,\r
+       IN              cl_ioctl_handle_t               h_ioctl,\r
+               OUT     size_t                                  *p_ret_bytes )\r
+{\r
+       ual_post_srq_recv_ioctl_t       *p_ioctl =\r
+               (ual_post_srq_recv_ioctl_t *)cl_ioctl_in_buf( h_ioctl );\r
+       al_dev_open_context_t   *p_context =\r
+               (al_dev_open_context_t *)p_open_context;\r
+       ib_srq_handle_t                 h_srq;\r
+       ib_recv_wr_t                    *p_wr;\r
+       ib_recv_wr_t                    *p_recv_failure;\r
+       uintn_t                                 i;\r
+       ib_local_ds_t                   *p_ds;\r
+       uintn_t                                 num_ds = 0;\r
+       ib_api_status_t                 status;\r
+       size_t                                  in_buf_sz;\r
+\r
+       AL_ENTER( AL_DBG_QP );\r
+\r
+       /* Validate input buffers. */\r
+       if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) ||\r
+               cl_ioctl_in_size( h_ioctl ) < sizeof(p_ioctl->in) ||\r
+               cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) )\r
+       {\r
+               AL_EXIT( AL_DBG_QP );\r
+               return CL_INVALID_PARAMETER;\r
+       }\r
+\r
+       /*\r
+        * Additional input buffer validation based on actual settings.\r
+        * Note that this validates that work requests are actually\r
+        * being passed in.\r
+        */\r
+       in_buf_sz = sizeof(p_ioctl->in);\r
+       in_buf_sz += sizeof(ib_recv_wr_t) * (p_ioctl->in.num_wr - 1);\r
+       in_buf_sz += sizeof(ib_local_ds_t) * p_ioctl->in.num_ds;\r
+       if( cl_ioctl_in_size( h_ioctl ) != in_buf_sz )\r
+       {\r
+               AL_EXIT( AL_DBG_QP );\r
+               return CL_INVALID_PARAMETER;\r
+       }\r
+\r
+       /* Setup p_send_failure to head of list. */\r
+       p_recv_failure = p_wr = p_ioctl->in.recv_wr;\r
+\r
+       /* Validate SRQ handle */\r
+       h_srq = (ib_srq_handle_t)\r
+               al_hdl_ref( p_context->h_al, p_ioctl->in.h_srq, AL_OBJ_TYPE_H_QP );\r
+       if( !h_srq )\r
+       {\r
+               status = IB_INVALID_SRQ_HANDLE;\r
+               goto proxy_post_recv_done;\r
+       }\r
+\r
+       /* Setup the base data segment pointer. */\r
+       p_ds = (ib_local_ds_t*)&p_ioctl->in.recv_wr[p_ioctl->in.num_wr];\r
+\r
+       /* Setup the user's work requests and data segments and translate. */\r
+       for( i = 0; i < p_ioctl->in.num_wr; i++ )\r
+       {\r
+               /* Setup the data segments, if any. */\r
+               if( p_wr[i].num_ds )\r
+               {\r
+                       num_ds += p_wr[i].num_ds;\r
+                       if( num_ds > p_ioctl->in.num_ds )\r
+                       {\r
+                               /*\r
+                               * The work request submitted exceed the number of data\r
+                               * segments specified in the IOCTL.\r
+                               */\r
+                               status = IB_INVALID_PARAMETER;\r
+                               goto proxy_post_recv_done;\r
+                       }\r
+                       p_wr[i].ds_array = p_ds;\r
+                       p_ds += p_wr->num_ds;\r
+               }\r
+               else\r
+               {\r
+                       p_wr[i].ds_array = NULL;\r
+               }\r
+\r
+               p_wr[i].p_next = &p_wr[i + 1];\r
+       }\r
+\r
+       /* Mark the end of list. */\r
+       p_wr[i-1].p_next = NULL;\r
+\r
+       status = ib_post_srq_recv( h_srq, p_wr, &p_recv_failure );\r
+\r
+       if( status == IB_SUCCESS )\r
+       {\r
+               p_ioctl->out.failed_cnt = 0;\r
+       }\r
+       else\r
+       {\r
+proxy_post_recv_done:\r
+               /* First set up as if all failed. */\r
+               p_ioctl->out.failed_cnt = p_ioctl->in.num_wr;\r
+               /* Now subtract successful ones. */\r
+               p_ioctl->out.failed_cnt -= (uint32_t)(\r
+                       (((uintn_t)p_recv_failure) - ((uintn_t)p_wr))\r
+                       / sizeof(ib_recv_wr_t));\r
+       }\r
+\r
+       if( h_srq )\r
+               deref_al_obj( &h_srq->obj );\r
+\r
+       p_ioctl->out.status = status;\r
+       *p_ret_bytes = sizeof(p_ioctl->out);\r
+\r
+       AL_EXIT( AL_DBG_QP );\r
+       return CL_SUCCESS;\r
+}\r
+\r
 \r
 /*\r
  * Process the ioctl UAL_PEEK_CQ\r
@@ -3383,6 +3831,21 @@ verbs_ioctl(
        case UAL_MODIFY_AV:\r
                cl_status = proxy_modify_av( p_context, h_ioctl, p_ret_bytes );\r
                break;\r
+       case UAL_CREATE_SRQ:\r
+               cl_status = proxy_create_srq( p_context, h_ioctl, p_ret_bytes );\r
+               break;\r
+       case UAL_QUERY_SRQ:\r
+               cl_status = proxy_query_srq( p_context, h_ioctl, p_ret_bytes );\r
+               break;\r
+       case UAL_MODIFY_SRQ:\r
+               cl_status = proxy_modify_srq( p_context, h_ioctl, p_ret_bytes );\r
+               break;\r
+       case UAL_DESTROY_SRQ:\r
+               cl_status = proxy_destroy_srq( p_context, h_ioctl, p_ret_bytes );\r
+               break;\r
+       case UAL_POST_SRQ_RECV:\r
+               cl_status = proxy_post_srq_recv( p_context, h_ioctl, p_ret_bytes );\r
+               break;\r
        case UAL_CREATE_QP:\r
                cl_status = proxy_create_qp( p_context, h_ioctl, p_ret_bytes );\r
                break;\r
index 4dbe0db..bd1cfbf 100644 (file)
@@ -43,6 +43,7 @@ SOURCES=\
        ual_query.c                             \\r
        ual_reg_svc.c                   \\r
        ual_sa_req.c                    \\r
+       ual_srq.c                               \\r
        ual_sub.c                               \\r
        ..\al.c                                 \\r
        ..\al_av.c                              \\r
@@ -63,6 +64,7 @@ SOURCES=\
        ..\al_query.c                   \\r
        ..\al_reg_svc.c                 \\r
        ..\al_res_mgr.c                 \\r
+       ..\al_srq.c                             \\r
        ..\al_sub.c                             \\r
        ..\ib_common.c                  \\r
        ..\ib_statustext.c\r
index ea70219..87dc7e4 100644 (file)
@@ -112,6 +112,27 @@ ual_query_av(
                OUT                     ib_av_attr_t* const                     p_av_attr,\r
                OUT                     ib_pd_handle_t* const           ph_pd );\r
 \r
+ib_api_status_t\r
+ual_create_srq(\r
+       IN              const   ib_pd_handle_t                          h_pd,\r
+       IN      OUT                     ib_srq_handle_t                         h_srq,\r
+       IN              const   ib_srq_attr_t* const                    p_srq_attr);\r
+\r
+ib_api_status_t\r
+ual_modify_srq(\r
+       IN                              ib_srq_handle_t                         h_srq,\r
+       IN              const   ib_srq_attr_t*  const                   p_srq_attr,\r
+       IN              const   ib_srq_attr_mask_t                      srq_attr_mask );\r
+\r
+ib_api_status_t\r
+ual_query_srq(\r
+       IN                              ib_srq_handle_t                         h_srq,\r
+               OUT                     ib_srq_attr_t*                          p_srq_attr );\r
+\r
+ib_api_status_t\r
+ual_destroy_srq(\r
+       IN                              ib_srq_handle_t                         h_srq );\r
+\r
 ib_api_status_t\r
 ual_create_qp(\r
        IN              const   ib_pd_handle_t                          h_pd,\r
@@ -225,6 +246,12 @@ ual_post_recv(
        IN                              ib_recv_wr_t* const                     p_recv_wr,\r
                OUT                     ib_recv_wr_t                            **pp_recv_failure );\r
 \r
+ib_api_status_t\r
+ual_post_srq_recv(\r
+       IN              const   ib_srq_handle_t                         h_srq,\r
+       IN                              ib_recv_wr_t* const                     p_recv_wr,\r
+               OUT                     ib_recv_wr_t                            **pp_recv_failure );\r
+\r
 ib_api_status_t\r
 ual_peek_cq(\r
        IN              const   ib_cq_handle_t                          h_cq,\r
index 532e177..2abf6a2 100644 (file)
@@ -802,6 +802,7 @@ __process_misc_cb(
        {\r
        case CA_ERROR_REC:\r
        case QP_ERROR_REC:\r
+       case SRQ_ERROR_REC:\r
        case CQ_ERROR_REC:\r
        {\r
                /* Initiate user-mode asynchronous event processing. */\r
@@ -952,6 +953,7 @@ __process_misc_cb(
                uintn_t                                                 bytes_ret;\r
                cl_status_t                                             cl_status;\r
                ib_ca_attr_t                                    *p_old_ca_attr;\r
+               ib_api_status_t                                 status;\r
 \r
                pnp_event = p_misc_cb_info->ioctl_rec.pnp_cb_ioctl_rec.pnp_event;\r
                ca_guid = p_misc_cb_info->ioctl_rec.pnp_cb_ioctl_rec.pnp_info.ca.ca_guid;\r
@@ -1004,8 +1006,13 @@ __process_misc_cb(
                        ref_al_obj( &p_ci_ca->obj );\r
                        cl_spinlock_release( &gp_al_mgr->obj.lock );\r
 \r
-                       ci_ca_update_attr( p_ci_ca, &p_old_ca_attr );\r
-                       if( p_old_ca_attr )\r
+                       status = ci_ca_update_attr( p_ci_ca, &p_old_ca_attr );\r
+                       if( status != IB_SUCCESS) {\r
+                               AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR ,\r
+                                       ("update CA attributes returned %#x.\n", status) );\r
+                               break;\r
+                       }\r
+                       if ( p_old_ca_attr )\r
                                cl_free( p_old_ca_attr );\r
 \r
                        /*\r
index c8a409f..0ff6486 100644 (file)
@@ -37,6 +37,7 @@
 #include "al_cq.h"\r
 #include "al_pd.h"\r
 #include "al_qp.h"\r
+#include "al_srq.h"\r
 #include "ual_mad.h"\r
 #include "ual_support.h"\r
 \r
@@ -294,6 +295,8 @@ ual_create_qp(
                qp_create = *p_qp_create;\r
                qp_create.h_rq_cq = qp_create.h_rq_cq->h_ci_cq;\r
                qp_create.h_sq_cq = qp_create.h_sq_cq->h_ci_cq;\r
+               if (qp_create.h_srq)\r
+                       qp_create.h_srq = qp_create.h_srq->h_ci_srq;\r
                status = uvp_intf.pre_create_qp( h_pd->h_ci_pd,\r
                        &qp_create, &qp_ioctl.in.umv_buf );\r
                if( status != IB_SUCCESS )\r
@@ -312,6 +315,9 @@ ual_create_qp(
                (ib_cq_handle_t)p_qp_create->h_rq_cq->obj.hdl;\r
        qp_ioctl.in.qp_create.h_sq_cq =\r
                (ib_cq_handle_t)p_qp_create->h_sq_cq->obj.hdl;\r
+       if (p_qp_create->h_srq)\r
+               qp_ioctl.in.qp_create.h_srq =\r
+                       (ib_srq_handle_t)p_qp_create->h_srq->obj.hdl;\r
        qp_ioctl.in.context = h_qp;\r
        qp_ioctl.in.ev_notify = (h_qp->pfn_event_cb != NULL) ? TRUE : FALSE;\r
 \r
@@ -329,6 +335,12 @@ ual_create_qp(
        else\r
        {\r
                status = qp_ioctl.out.status;\r
+               \r
+               if( status == IB_SUCCESS )\r
+               {\r
+                       h_qp->obj.hdl = qp_ioctl.out.h_qp;\r
+                       *p_qp_attr = qp_ioctl.out.attr;\r
+               }\r
        }\r
 \r
        /* Post uvp call */\r
@@ -367,11 +379,6 @@ ual_create_qp(
                h_qp->pfn_post_send = ual_post_send;\r
        }\r
 \r
-       if( status == IB_SUCCESS )\r
-       {\r
-               h_qp->obj.hdl = qp_ioctl.out.h_qp;\r
-               *p_qp_attr = qp_ioctl.out.attr;\r
-       }\r
 \r
        AL_EXIT( AL_DBG_QP );\r
        return status;\r
@@ -554,6 +561,8 @@ ual_query_qp(
                p_attr->h_rq_cq = h_qp->h_recv_cq->h_ci_cq;\r
        if( h_qp->h_send_cq )\r
                p_attr->h_sq_cq = h_qp->h_send_cq->h_ci_cq;\r
+       if( h_qp->h_srq )\r
+               p_attr->h_srq = h_qp->h_srq->h_ci_srq;\r
 \r
        /* Post uvp call */\r
        if( h_qp->h_ci_qp && uvp_intf.post_query_qp )\r
diff --git a/core/al/user/ual_srq.c b/core/al/user/ual_srq.c
new file mode 100644 (file)
index 0000000..a575b7e
--- /dev/null
@@ -0,0 +1,438 @@
+/*
+ * Copyright (c) 2005 SilverStorm Technologies.  All rights reserved.
+ * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. 
+ *
+ * This software is available to you under the OpenIB.org BSD license
+ * below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * $Id: ual_qp.c 1611 2006-08-20 14:48:55Z sleybo $
+ */
+
+
+#include "al.h"
+#include "al_av.h"
+#include "al_ci_ca.h"
+#include "al_cq.h"
+#include "al_pd.h"
+#include "al_srq.h"
+#include "ual_mad.h"
+#include "ual_support.h"
+
+
+#include "al_debug.h"
+#if defined(EVENT_TRACING)
+#ifdef offsetof
+#undef offsetof
+#endif
+#include "ual_srq.tmh"
+#endif
+
+
+ib_api_status_t
+ual_post_srq_recv(
+       IN              const   ib_srq_handle_t                         h_srq,
+       IN                              ib_recv_wr_t*   const           p_recv_wr,
+               OUT                     ib_recv_wr_t                            **pp_recv_failure OPTIONAL )
+{
+       uintn_t                                 failed_index;
+       uintn_t                                 bytes_ret;
+       uint32_t                                num_wr          = 0;
+       uint32_t                                num_ds          = 0;
+       ib_recv_wr_t*                   p_wr;
+       ib_local_ds_t*                  p_ds;
+       ual_post_srq_recv_ioctl_t       *p_srq_ioctl;
+       size_t                                  ioctl_buf_sz;
+       cl_status_t                             cl_status;
+       ib_api_status_t                 status;
+
+       AL_ENTER( AL_DBG_SRQ );
+
+       /*
+        * Since the work request is a link list and we need to pass this
+        * to the kernel as a array of work requests.  So first walk through
+        * the list and find out how much memory we need to allocate.
+        */
+       for( p_wr = p_recv_wr; p_wr; p_wr = p_wr->p_next )
+       {
+               num_wr++;
+
+               /* Check for overflow */
+               if( !num_wr )
+                       break;
+               if( num_ds > num_ds + p_wr->num_ds )
+               {
+                       num_wr = 0;
+                       break;
+               }
+
+               num_ds += p_wr->num_ds;
+       }
+       if( !num_wr )
+       {
+               AL_EXIT( AL_DBG_SRQ );
+               return IB_INVALID_PARAMETER;
+       }
+
+       ioctl_buf_sz = sizeof(ual_post_recv_ioctl_t);
+       ioctl_buf_sz += sizeof(ib_recv_wr_t) * (num_wr - 1);
+       ioctl_buf_sz += sizeof(ib_local_ds_t) * num_ds;
+
+       p_srq_ioctl = (ual_post_srq_recv_ioctl_t*)cl_zalloc( ioctl_buf_sz );
+       if( !p_srq_ioctl )
+       {
+               AL_PRINT_EXIT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR,
+                       ("Failed to allocate IOCTL buffer.\n") );
+               return IB_INSUFFICIENT_MEMORY;
+       }
+       p_ds = (ib_local_ds_t*)&p_srq_ioctl->in.recv_wr[num_wr];
+
+       /* Now populate the ioctl buffer and send down the ioctl */
+       p_srq_ioctl->in.h_srq = h_srq->obj.hdl;
+       p_srq_ioctl->in.num_wr = num_wr;
+       p_srq_ioctl->in.num_ds = num_ds;
+       num_wr = 0;
+       for( p_wr = p_recv_wr; p_wr; p_wr = p_wr->p_next )
+       {
+               p_srq_ioctl->in.recv_wr[num_wr++] = *p_wr;
+               cl_memcpy(
+                       p_ds, p_wr->ds_array, sizeof(ib_local_ds_t) * p_wr->num_ds );
+               p_ds += p_wr->num_ds;
+       }
+
+       cl_status = do_al_dev_ioctl( UAL_POST_SRQ_RECV,
+               &p_srq_ioctl->in, ioctl_buf_sz,
+               &p_srq_ioctl->out, sizeof(p_srq_ioctl->out),
+               &bytes_ret );
+
+       if( cl_status != CL_SUCCESS || bytes_ret != sizeof(p_srq_ioctl->out) )
+       {
+               if( pp_recv_failure )
+                       *pp_recv_failure = p_recv_wr;
+
+               AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR ,
+                       ("UAL_POST_SRQ_RECV IOCTL returned %s.\n",
+                       CL_STATUS_MSG(cl_status)) );
+               status = IB_ERROR;
+       }
+       else
+       {
+               status = p_srq_ioctl->out.status;
+
+               if( status != IB_SUCCESS && pp_recv_failure )
+               {
+                       /* Get the failed index */
+                       failed_index = num_wr - p_srq_ioctl->out.failed_cnt;
+                       p_wr = p_recv_wr;
+                       while( failed_index-- )
+                               p_wr = p_wr->p_next;
+
+                       *pp_recv_failure = p_wr;
+               }
+       }
+
+       cl_free( p_srq_ioctl );
+       AL_EXIT( AL_DBG_SRQ );
+       return status;
+}
+
+
+
+ib_api_status_t
+ual_create_srq(
+       IN              const   ib_pd_handle_t                          h_pd,
+       IN      OUT                     ib_srq_handle_t                         h_srq,
+       IN              const   ib_srq_attr_t* const                    p_srq_attr)
+{
+       /* The first argument is probably not needed */
+       ual_create_srq_ioctl_t  srq_ioctl;
+       uintn_t                                 bytes_ret;
+       cl_status_t                             cl_status;
+       ib_api_status_t                 status;
+       uvp_interface_t                 uvp_intf = h_srq->obj.p_ci_ca->verbs.user_verbs;
+       ib_srq_attr_t                           srq_attr;
+
+       AL_ENTER( AL_DBG_SRQ );
+
+       /* Clear the srq_ioctl */
+       cl_memclr( &srq_ioctl, sizeof(srq_ioctl) );
+
+       /* Pre call to the UVP library */
+       if( h_pd->h_ci_pd && uvp_intf.pre_create_srq )
+       {
+               /* The post call MUST exist as it sets the UVP srq handle. */
+               CL_ASSERT( uvp_intf.post_create_srq );
+               /* Convert the handles to UVP handles */
+               srq_attr = *p_srq_attr;
+               status = uvp_intf.pre_create_srq( h_pd->h_ci_pd,
+                       &srq_attr, &srq_ioctl.in.umv_buf );
+               if( status != IB_SUCCESS )
+               {
+                       AL_EXIT( AL_DBG_SRQ );
+                       return status;
+               }
+       }
+       /*
+        * Convert the handles to KAL handles once again starting
+        * from the input srq attribute
+        */
+       srq_ioctl.in.h_pd = h_pd->obj.hdl;
+       srq_ioctl.in.srq_attr = *p_srq_attr;
+       srq_ioctl.in.context = h_srq;
+       srq_ioctl.in.ev_notify = (h_srq->pfn_event_cb != NULL) ? TRUE : FALSE;
+
+       cl_status = do_al_dev_ioctl( UAL_CREATE_SRQ,
+               &srq_ioctl.in, sizeof(srq_ioctl.in), &srq_ioctl.out, sizeof(srq_ioctl.out),
+               &bytes_ret );
+
+       if( cl_status != CL_SUCCESS || bytes_ret != sizeof(srq_ioctl.out) )
+       {
+               AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR ,
+                       ("UAL_CREATE_SRQ IOCTL returned %s.\n",
+                       CL_STATUS_MSG(cl_status)) );
+               status = IB_ERROR;
+       }
+       else
+       {
+               status = srq_ioctl.out.status;
+       }
+
+       /* Post uvp call */
+       if( h_pd->h_ci_pd && uvp_intf.post_create_srq )
+       {
+               uvp_intf.post_create_srq( h_pd->h_ci_pd,
+                       status, &h_srq->h_ci_srq, &srq_ioctl.out.umv_buf );
+
+               if( uvp_intf.post_recv )
+               {
+                       h_srq->h_recv_srq = h_srq->h_ci_srq;
+                       h_srq->pfn_post_srq_recv = uvp_intf.post_srq_recv;
+               }
+               else
+               {
+                       h_srq->h_recv_srq = h_srq;
+                       h_srq->pfn_post_srq_recv = ual_post_srq_recv;
+               }
+       }
+       else
+       {
+               h_srq->h_recv_srq = h_srq;
+               h_srq->pfn_post_srq_recv = ual_post_srq_recv;
+       }
+
+       if( status == IB_SUCCESS )
+       {
+               h_srq->obj.hdl = srq_ioctl.out.h_srq;
+       }
+
+       AL_EXIT( AL_DBG_SRQ );
+       return status;
+}
+
+
+ib_api_status_t
+ual_modify_srq(
+       IN                                      ib_srq_handle_t                 h_srq,
+       IN              const           ib_srq_attr_t*          const   p_srq_attr,
+       IN              const           ib_srq_attr_mask_t                      srq_attr_mask)
+{
+       ual_modify_srq_ioctl_t          srq_ioctl;
+       uintn_t                                 bytes_ret;
+       cl_status_t                             cl_status;
+       ib_api_status_t                 status;
+       uvp_interface_t                 uvp_intf = h_srq->obj.p_ci_ca->verbs.user_verbs;
+
+       AL_ENTER( AL_DBG_SRQ );
+
+       /* Clear the srq_ioctl */
+       cl_memclr( &srq_ioctl, sizeof(srq_ioctl) );
+
+       /* Call the uvp pre call if the vendor library provided a valid srq handle */
+       if( h_srq->h_ci_srq && uvp_intf.pre_modify_srq )
+       {
+               /* Pre call to the UVP library */
+               status = uvp_intf.pre_modify_srq( h_srq->h_ci_srq,
+                       p_srq_attr, srq_attr_mask, &srq_ioctl.in.umv_buf );
+               if( status != IB_SUCCESS )
+               {
+                       AL_EXIT( AL_DBG_SRQ );
+                       return status;
+               }
+       }
+
+       srq_ioctl.in.h_srq = h_srq->obj.hdl;
+       srq_ioctl.in.srq_attr = *p_srq_attr;
+       srq_ioctl.in.srq_attr_mask = srq_attr_mask;
+
+       cl_status = do_al_dev_ioctl( UAL_MODIFY_SRQ,
+               &srq_ioctl.in, sizeof(srq_ioctl.in), &srq_ioctl.out, sizeof(srq_ioctl.out),
+               &bytes_ret );
+
+       if( cl_status != CL_SUCCESS || bytes_ret != sizeof(srq_ioctl.out) )
+       {
+               AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR ,
+                       ("UAL_MODIFY_SRQ IOCTL returned %s.\n",
+                       CL_STATUS_MSG(cl_status)) );
+               status = IB_ERROR;
+       }
+       else
+       {
+               status = srq_ioctl.out.status;
+       }
+
+       /* Post uvp call */
+       if( h_srq->h_ci_srq && uvp_intf.post_modify_srq )
+       {
+               uvp_intf.post_modify_srq( h_srq->h_ci_srq, status,
+                       &srq_ioctl.out.umv_buf );
+       }
+
+       //if( status == IB_SUCCESS )
+       //{
+       //      *p_srq_attr = srq_ioctl.out.srq_attr;
+       //}
+
+       AL_EXIT( AL_DBG_SRQ );
+       return status;
+}
+
+
+ib_api_status_t
+ual_query_srq(
+       IN                      ib_srq_handle_t                         h_srq,
+               OUT             ib_srq_attr_t*                          p_srq_attr )
+{
+       ual_query_srq_ioctl_t           srq_ioctl;
+       uintn_t                                 bytes_ret;
+       cl_status_t                             cl_status;
+       ib_api_status_t                 status;
+       uvp_interface_t                 uvp_intf = h_srq->obj.p_ci_ca->verbs.user_verbs;
+       ib_srq_attr_t*                          p_attr;
+
+       AL_ENTER( AL_DBG_SRQ );
+
+       /* Clear the srq_ioctl */
+       cl_memclr( &srq_ioctl, sizeof(srq_ioctl) );
+
+       /* Call the uvp pre call if the vendor library provided a valid ca handle */
+       if( h_srq->h_ci_srq && uvp_intf.pre_query_srq )
+       {
+               /* Pre call to the UVP library */
+               status = uvp_intf.pre_query_srq( h_srq->h_ci_srq, &srq_ioctl.in.umv_buf );
+               if( status != IB_SUCCESS )
+               {
+                       AL_EXIT( AL_DBG_SRQ );
+                       return status;
+               }
+       }
+
+       srq_ioctl.in.h_srq = h_srq->obj.hdl;
+
+       cl_status = do_al_dev_ioctl( UAL_QUERY_SRQ,
+               &srq_ioctl.in, sizeof(srq_ioctl.in), &srq_ioctl.out, sizeof(srq_ioctl.out),
+               &bytes_ret );
+
+       if( cl_status != CL_SUCCESS || bytes_ret != sizeof(srq_ioctl.out) )
+       {
+               AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR ,
+                       ("UAL_QUERY_SRQ IOCTL returned %s.\n",
+                       CL_STATUS_MSG(cl_status)) );
+               status = IB_ERROR;
+       }
+       else
+       {
+               status = srq_ioctl.out.status;
+       }
+
+       p_attr = &srq_ioctl.out.srq_attr;
+
+       /* Post uvp call */
+       if( h_srq->h_ci_srq && uvp_intf.post_query_srq )
+       {
+               uvp_intf.post_query_srq( h_srq->h_ci_srq, status,
+                       p_attr, &srq_ioctl.out.umv_buf );
+       }
+
+       if( IB_SUCCESS == status )
+       {
+               /* UVP handles in srq_attr will be converted to UAL's handles
+                * by the common code
+                */
+               *p_srq_attr = *p_attr;
+       }
+
+       AL_EXIT( AL_DBG_SRQ );
+       return status;
+}
+
+
+ib_api_status_t
+ual_destroy_srq(
+       IN                      ib_srq_handle_t                         h_srq )
+{
+       ual_destroy_srq_ioctl_t         srq_ioctl;
+       uintn_t                                 bytes_ret;
+       cl_status_t                             cl_status;
+       ib_api_status_t                 status;
+       uvp_interface_t                 uvp_intf = h_srq->obj.p_ci_ca->verbs.user_verbs;
+
+       AL_ENTER( AL_DBG_SRQ );
+
+       /* Call the uvp pre call if the vendor library provided a valid srq handle */
+       if( h_srq->h_ci_srq && uvp_intf.pre_destroy_srq )
+       {
+               status = uvp_intf.pre_destroy_srq( h_srq->h_ci_srq );
+               if (status != IB_SUCCESS)
+               {
+                       AL_EXIT( AL_DBG_SRQ );
+                       return status;
+               }
+       }
+
+       cl_memclr( &srq_ioctl, sizeof(srq_ioctl) );
+       srq_ioctl.in.h_srq = h_srq->obj.hdl;
+       cl_status = do_al_dev_ioctl( UAL_DESTROY_SRQ,
+               &srq_ioctl.in, sizeof(srq_ioctl.in), &srq_ioctl.out, sizeof(srq_ioctl.out),
+               &bytes_ret );
+
+       if( cl_status != CL_SUCCESS || bytes_ret != sizeof(srq_ioctl.out) )
+       {
+               AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR ,
+                       ("UAL_DESTROY_SRQ IOCTL returned %s.\n",
+                       CL_STATUS_MSG(cl_status)) );
+               status = IB_ERROR;
+       }
+       else
+       {
+               status = srq_ioctl.out.status;
+       }
+
+       /* Call vendor's post_destroy_srq */
+       if( h_srq->h_ci_srq && uvp_intf.post_destroy_srq )
+               uvp_intf.post_destroy_srq( h_srq->h_ci_srq, status );
+
+       AL_EXIT( AL_DBG_SRQ );
+       return status;
+}
+
index 9eea76c..ee23556 100644 (file)
@@ -679,6 +679,11 @@ al_set_ifc(
        p_ifc->map_phys_mlnx_fmr = mlnx_map_phys_fmr;\r
        p_ifc->unmap_mlnx_fmr = mlnx_unmap_fmr;\r
        p_ifc->destroy_mlnx_fmr = mlnx_destroy_fmr;\r
+       p_ifc->create_srq = ib_create_srq;\r
+       p_ifc->modify_srq = ib_modify_srq;\r
+       p_ifc->query_srq = ib_query_srq;\r
+       p_ifc->destroy_srq = ib_destroy_srq;\r
+       p_ifc->post_srq_recv = ib_post_srq_recv;\r
 \r
        BUS_EXIT( BUS_DBG_PNP );\r
 }\r
index 3b8014b..5e639a3 100644 (file)
@@ -270,7 +270,10 @@ mlnx_conv_hca_cap(
        ca_attr_p->max_qps_per_mcast_grp = hca_info_p->max_mcast_qp_attach;\r
        ca_attr_p->max_fmr   = hca_info_p->max_fmr;\r
        ca_attr_p->max_map_per_fmr   = hca_info_p->max_map_per_fmr;\r
-       \r
+       ca_attr_p->max_srq = hca_info_p->max_srq;\r
+       ca_attr_p->max_srq_wrs = hca_info_p->max_srq_wr;\r
+       ca_attr_p->max_srq_sges = hca_info_p->max_srq_sge;\r
+\r
        ca_attr_p->local_ack_delay = hca_info_p->local_ca_ack_delay;\r
        ca_attr_p->bad_pkey_ctr_support = hca_info_p->device_cap_flags & IB_DEVICE_BAD_PKEY_CNTR;\r
        ca_attr_p->bad_qkey_ctr_support = hca_info_p->device_cap_flags & IB_DEVICE_BAD_QKEY_CNTR;\r
@@ -279,6 +282,7 @@ mlnx_conv_hca_cap(
        ca_attr_p->av_port_check        = hca_info_p->device_cap_flags & IB_DEVICE_UD_AV_PORT_ENFORCE;\r
        ca_attr_p->change_primary_port  = hca_info_p->device_cap_flags & IB_DEVICE_CHANGE_PHY_PORT;\r
        ca_attr_p->modify_wr_depth      = hca_info_p->device_cap_flags & IB_DEVICE_RESIZE_MAX_WR;\r
+       ca_attr_p->modify_srq_depth      = hca_info_p->device_cap_flags & IB_DEVICE_SRQ_RESIZE;\r
        ca_attr_p->hw_agents            = FALSE; // in the context of IBAL then agent is implemented on the host\r
 \r
        ca_attr_p->num_page_sizes = 1;\r
@@ -356,6 +360,27 @@ void ca_event_handler(struct ib_event *ev, void *context)
        }\r
 }\r
 \r
+void srq_event_handler(struct ib_event *ev, void *context)\r
+{\r
+       mlnx_hob_t *hob_p = (mlnx_hob_t *)context;\r
+       ib_event_rec_t event_rec;\r
+       struct mthca_srq *srq_p;\r
+\r
+       // prepare parameters\r
+       event_rec.type = ev->event;\r
+       event_rec.vendor_specific = ev->vendor_specific;\r
+       srq_p = (struct mthca_srq *)ev->element.srq;\r
+       event_rec.context = srq_p->srq_context;\r
+\r
+       // call the user callback\r
+       if (hob_p)\r
+               (hob_p->async_cb_p)(&event_rec);\r
+       else {\r
+               HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,("Incorrect context. Async callback was not invoked\n"));\r
+       }\r
+}\r
+\r
+\r
 void qp_event_handler(struct ib_event *ev, void *context)\r
 {\r
        mlnx_hob_t *hob_p = (mlnx_hob_t *)context;\r
index b3ce1da..3f18144 100644 (file)
@@ -353,6 +353,8 @@ void cq_comp_handler(struct ib_cq *cq, void *context);
 \r
 void ca_event_handler(struct ib_event *ev, void *context);\r
 \r
+void srq_event_handler(struct ib_event *ev, void *context);\r
+\r
 void qp_event_handler(struct ib_event *ev, void *context);\r
 \r
 void cq_event_handler(struct ib_event *ev, void *context);\r
index 7ecfed4..5fe3181 100644 (file)
@@ -73,14 +73,15 @@ static void _build_str( const char *        format, ... )
 #define WPP_CONTROL_GUIDS \\r
        WPP_DEFINE_CONTROL_GUID(HCACtlGuid,(8BF1F640,63FE,4743,B9EF,FA38C695BFDE),  \\r
        WPP_DEFINE_BIT( HCA_DBG_DEV) \\r
-       WPP_DEFINE_BIT( HCA_DBG_INIT) \\r
        WPP_DEFINE_BIT( HCA_DBG_PNP) \\r
+       WPP_DEFINE_BIT( HCA_DBG_INIT) \\r
        WPP_DEFINE_BIT( HCA_DBG_MAD) \\r
        WPP_DEFINE_BIT( HCA_DBG_PO) \\r
        WPP_DEFINE_BIT( HCA_DBG_CQ) \\r
        WPP_DEFINE_BIT( HCA_DBG_QP) \\r
        WPP_DEFINE_BIT( HCA_DBG_MEMORY) \\r
        WPP_DEFINE_BIT( HCA_DBG_AV) \\r
+       WPP_DEFINE_BIT( HCA_DBG_SRQ) \\r
        WPP_DEFINE_BIT( HCA_DBG_LOW) \\r
        WPP_DEFINE_BIT( HCA_DBG_SHIM))\r
 \r
@@ -97,10 +98,10 @@ static void _build_str( const char *        format, ... )
 // HCA_ENTER(FLAG);\r
 // HCA_EXIT(FLAG);\r
 // USEPREFIX(HCA_PRINT, "%!STDPREFIX! [MTHCA] :%!FUNC!() :");\r
-// USEPREFIX(HCA_PRINT_EXIT, "%!STDPREFIX! [MTHCA] :%!FUNC!() :");\r
-// USESUFFIX(HCA_PRINT_EXIT, "[MTHCA] :%!FUNC!():]");\r
 // USESUFFIX(HCA_ENTER, " [MTHCA] :%!FUNC!()[");\r
 // USESUFFIX(HCA_EXIT, " [MTHCA] :%!FUNC!()]");\r
+// USEPREFIX(HCA_PRINT_EXIT, "%!STDPREFIX! [MTHCA] :%!FUNC!() :");\r
+// USESUFFIX(HCA_PRINT_EXIT, "[MTHCA] :%!FUNC!():]");\r
 // end_wpp\r
 \r
 \r
@@ -121,16 +122,17 @@ static void _build_str( const char *      format, ... )
 \r
 \r
 #define HCA_DBG_DEV    (1 << 0)\r
-#define HCA_DBG_INIT   (1<<1)\r
-#define HCA_DBG_PNP    (1 << 2)\r
+#define HCA_DBG_PNP    (1<<1)\r
+#define HCA_DBG_INIT   (1 << 2)\r
 #define HCA_DBG_MAD    (1 << 3)\r
 #define HCA_DBG_PO     (1 << 4)\r
 #define HCA_DBG_QP     (1 << 5)\r
 #define HCA_DBG_CQ     (1 << 6)\r
 #define HCA_DBG_MEMORY (1 << 7)\r
 #define HCA_DBG_AV     (1<<8)\r
-#define HCA_DBG_LOW    (1 << 9)\r
-#define HCA_DBG_SHIM   (1 << 10)\r
+#define HCA_DBG_SRQ    (1 << 9)\r
+#define HCA_DBG_LOW    (1 << 10)\r
+#define HCA_DBG_SHIM   (1 << 11)\r
 \r
 \r
 #if DBG\r
index 5153419..6e1eb47 100644 (file)
@@ -52,6 +52,8 @@
 /*\r
 * Work Request Processing Verbs.\r
 */\r
+\r
+\r
 ib_api_status_t\r
 mlnx_post_send (\r
        IN      const   ib_qp_handle_t                                  h_qp,\r
@@ -65,9 +67,6 @@ mlnx_post_send (
 \r
        HCA_ENTER(HCA_DBG_QP);\r
        \r
-       // sanity checks\r
-\r
-       // create CQ\r
        err = ib_dev->post_send(ib_qp_p, p_send_wr, pp_failed );\r
        if (err) {\r
                HCA_PRINT(TRACE_LEVEL_ERROR  , HCA_DBG_QP,\r
@@ -102,9 +101,6 @@ mlnx_post_recv (
        \r
        HCA_ENTER(HCA_DBG_QP);\r
 \r
-       // sanity checks\r
-       \r
-       // create CQ\r
        err = ib_dev->post_recv(ib_qp_p, p_recv_wr, pp_failed );\r
        if (err) {\r
                HCA_PRINT(TRACE_LEVEL_ERROR  ,HCA_DBG_QP,\r
@@ -125,6 +121,39 @@ err_post_recv:
                                                                                                                                                        \r
 }\r
 \r
+ib_api_status_t \r
+mlnx_post_srq_recv (\r
+       IN              const   ib_srq_handle_t                         h_srq,\r
+       IN                              ib_recv_wr_t                            *p_recv_wr,\r
+               OUT                     ib_recv_wr_t                            **pp_failed OPTIONAL )\r
+{\r
+       int err;\r
+       ib_api_status_t         status;\r
+       struct ib_srq *ib_srq_p = (struct ib_srq *)h_srq;\r
+       struct ib_device *ib_dev = ib_srq_p->device;\r
+       \r
+       HCA_ENTER(HCA_DBG_QP);\r
+\r
+       err = ib_dev->post_srq_recv(ib_srq_p, p_recv_wr, pp_failed );\r
+       if (err) {\r
+               HCA_PRINT(TRACE_LEVEL_ERROR  ,HCA_DBG_QP,\r
+                       ("post_srq_recv failed (%d)\n", err));\r
+               if (err == -ENOMEM)\r
+                       status = IB_INSUFFICIENT_RESOURCES;\r
+               else\r
+                       status = errno_to_iberr(err);\r
+               goto err_post_recv;\r
+       }\r
+\r
+       status = IB_SUCCESS;\r
+               \r
+err_post_recv: \r
+       HCA_PRINT_EXIT(TRACE_LEVEL_ERROR  , HCA_DBG_QP,\r
+               ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
+       return status;\r
+                                                                                                                                                       \r
+}\r
+\r
 /*\r
 * Completion Processing and Completion Notification Request Verbs.\r
 */\r
@@ -249,6 +278,7 @@ mlnx_direct_if(
 {\r
        p_interface->post_send = mlnx_post_send;\r
        p_interface->post_recv = mlnx_post_recv;\r
+       p_interface->post_srq_recv = mlnx_post_srq_recv;\r
 \r
        p_interface->enable_ncomp_cq_notify = mlnx_enable_ncomp_cq_notify;\r
        p_interface->peek_cq =  NULL; /* mlnx_peek_cq: Not implemented */\r
index 02b0f70..cb395c8 100644 (file)
@@ -233,6 +233,7 @@ static inline errno_to_iberr(int err)
                MAP_ERR( ENODEV, IB_UNSUPPORTED );\r
                MAP_ERR( EINVAL, IB_INVALID_PARAMETER );\r
                MAP_ERR( ENOSYS, IB_UNSUPPORTED );\r
+               MAP_ERR( ERANGE, IB_INVALID_SETTING );\r
                default:\r
                        //HCA_PRINT(TRACE_LEVEL_ERROR, HCA_DBG_SHIM,\r
                        //      "Unmapped errno (%d)\n", err);\r
index f309577..5c73110 100644 (file)
@@ -70,6 +70,12 @@ mlnx_attach_mcast (
                status = IB_UNSUPPORTED;\r
                goto err_user_unsupported;\r
        }\r
+\r
+       if( !cl_is_blockable() ) {\r
+                       status = IB_UNSUPPORTED;\r
+                       goto err_unsupported;\r
+       }\r
+\r
        if (!p_mcast_gid || !ph_mcast) {\r
                status = IB_INVALID_PARAMETER;\r
                goto err_invalid_param;\r
@@ -102,8 +108,8 @@ mlnx_attach_mcast (
        RtlCopyMemory(mcast_p->mcast_gid.raw, p_mcast_gid->raw, sizeof *p_mcast_gid);\r
        HCA_PRINT(TRACE_LEVEL_INFORMATION, HCA_DBG_SHIM, ("mcasth %p, qp_p %p, mlid %hx, mgid %I64x`%I64x\n", \r
                mcast_p, mcast_p->ib_qp_p, mcast_p->mcast_lid,\r
-               *(uint64_t*)&mcast_p->mcast_gid.raw[0],\r
-               *(uint64_t*)&mcast_p->mcast_gid.raw[8] ));\r
+               cl_ntoh64(*(uint64_t*)&mcast_p->mcast_gid.raw[0]),\r
+               cl_ntoh64(*(uint64_t*)&mcast_p->mcast_gid.raw[8] )));\r
        \r
        // return the result\r
        if (ph_mcast) *ph_mcast = (ib_mcast_handle_t)mcast_p;\r
@@ -115,6 +121,7 @@ err_attach:
        kfree(mcast_p);\r
 err_no_mem:    \r
 err_invalid_param:\r
+err_unsupported:       \r
 err_user_unsupported:\r
 end:           \r
        HCA_PRINT_EXIT(TRACE_LEVEL_ERROR , HCA_DBG_SHIM,\r
@@ -134,33 +141,41 @@ mlnx_detach_mcast (
        // sanity check\r
        if (!mcast_p || !mcast_p->ib_qp_p)\r
        {\r
-               HCA_PRINT_EXIT(TRACE_LEVEL_ERROR , HCA_DBG_SHIM,\r
+               HCA_PRINT(TRACE_LEVEL_ERROR , HCA_DBG_SHIM,\r
                        ("completes with ERROR status IB_INVALID_PARAMETER\n"));\r
-               return IB_INVALID_PARAMETER;\r
+               status =  IB_INVALID_PARAMETER;\r
+               goto err_invalid_param;\r
        }\r
-\r
        ib_dev = mcast_p->ib_qp_p->device;\r
 \r
+       if( !cl_is_blockable() ) {\r
+                       status = IB_UNSUPPORTED;\r
+                       goto err_unsupported;\r
+       }\r
+\r
+\r
        HCA_PRINT(TRACE_LEVEL_INFORMATION, HCA_DBG_SHIM,("mcasth %p, qp_p %p, mlid %hx, mgid %I64x`%I64x\n", \r
                mcast_p, mcast_p->ib_qp_p, mcast_p->mcast_lid,\r
                *(uint64_t*)&mcast_p->mcast_gid.raw[0],\r
                *(uint64_t*)&mcast_p->mcast_gid.raw[8] ));\r
        \r
        // detach\r
-  err = ibv_detach_mcast( mcast_p->ib_qp_p, \r
-      (union ib_gid *)&mcast_p->mcast_gid, mcast_p->mcast_lid );\r
-  if (err) {\r
-    HCA_PRINT(TRACE_LEVEL_ERROR  ,HCA_DBG_SHIM  ,("ibv_detach_mcast failed (%d)\n", err));\r
-    status = errno_to_iberr(err);\r
-    goto err_detach_mcast;\r
-  }\r
+       err = ibv_detach_mcast( mcast_p->ib_qp_p, \r
+               (union ib_gid *)&mcast_p->mcast_gid, mcast_p->mcast_lid );\r
+       if (err) {\r
+               HCA_PRINT(TRACE_LEVEL_ERROR  ,HCA_DBG_SHIM  ,("ibv_detach_mcast failed (%d)\n", err));\r
+               status = errno_to_iberr(err);\r
+               goto err_detach_mcast;\r
+       }\r
 \r
        status = IB_SUCCESS;\r
 \r
 err_detach_mcast:\r
        kfree(mcast_p);\r
-       HCA_PRINT_EXIT(TRACE_LEVEL_ERROR , HCA_DBG_SHIM ,\r
+err_unsupported:       \r
+       HCA_PRINT(TRACE_LEVEL_ERROR , HCA_DBG_SHIM ,\r
                ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
+err_invalid_param:\r
        return status;\r
 }\r
 \r
index 7955157..d7f0737 100644 (file)
@@ -793,6 +793,153 @@ err_destroy_ah:
        return status;\r
 }\r
 \r
+/*\r
+*      Shared Queue Pair Management Verbs\r
+*/\r
+\r
+\r
+ib_api_status_t\r
+mlnx_create_srq (\r
+       IN              const   ib_pd_handle_t                  h_pd,\r
+       IN              const   void                                            *srq_context,\r
+       IN              const   ib_srq_attr_t * const           p_srq_attr,\r
+               OUT                     ib_srq_handle_t                 *ph_srq,\r
+       IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
+{\r
+       int err;\r
+       ib_api_status_t         status;\r
+       struct ib_srq *ib_srq_p;\r
+       struct mthca_srq *srq_p;\r
+       struct ib_srq_init_attr srq_init_attr;\r
+       struct ib_ucontext *p_context = NULL;\r
+       struct ib_pd *ib_pd_p = (struct ib_pd *)h_pd;\r
+       struct ib_device *ib_dev = ib_pd_p->device;\r
+       mlnx_hob_t       *hob_p = HOB_FROM_IBDEV(ib_dev);\r
+\r
+       HCA_ENTER(HCA_DBG_SRQ);\r
+\r
+       if( p_umv_buf  && p_umv_buf->command) {\r
+\r
+               // sanity checks \r
+               if (p_umv_buf->input_size < sizeof(struct ibv_create_srq) ||\r
+                       p_umv_buf->output_size < sizeof(struct ibv_create_srq_resp) ||\r
+                       !p_umv_buf->p_inout_buf) {\r
+                       status = IB_INVALID_PARAMETER;\r
+                       goto err_inval_params;\r
+               }\r
+               p_context = ib_pd_p->ucontext;\r
+       }\r
+\r
+       // prepare the parameters\r
+       RtlZeroMemory(&srq_init_attr, sizeof(srq_init_attr));\r
+       srq_init_attr.event_handler = srq_event_handler;\r
+       srq_init_attr.srq_context = hob_p;\r
+       srq_init_attr.attr = *p_srq_attr;\r
+\r
+       // allocate srq \r
+       ib_srq_p = ibv_create_srq(ib_pd_p, &srq_init_attr, p_context, p_umv_buf );\r
+       if (IS_ERR(ib_srq_p)) {\r
+               err = PTR_ERR(ib_srq_p);\r
+               HCA_PRINT (TRACE_LEVEL_ERROR ,HCA_DBG_SRQ, ("ibv_create_srq failed (%d)\n", err));\r
+               status = errno_to_iberr(err);\r
+               goto err_create_srq;\r
+       }\r
+\r
+       // fill the object\r
+       srq_p = (struct mthca_srq *)ib_srq_p;\r
+       srq_p->srq_context = (void*)srq_context;\r
+       \r
+       // return the result\r
+       if (ph_srq) *ph_srq = (ib_srq_handle_t)srq_p;\r
+\r
+       status = IB_SUCCESS;\r
+       \r
+err_create_srq:\r
+err_inval_params:\r
+       if (p_umv_buf && p_umv_buf->command) \r
+               p_umv_buf->status = status;\r
+       HCA_PRINT_EXIT(TRACE_LEVEL_ERROR ,HCA_DBG_SRQ,\r
+               ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
+       return status;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+mlnx_modify_srq (\r
+               IN              const   ib_srq_handle_t                         h_srq,\r
+               IN              const   ib_srq_attr_t* const                    p_srq_attr,\r
+               IN              const   ib_srq_attr_mask_t                      srq_attr_mask,\r
+               IN      OUT             ci_umv_buf_t                            *p_umv_buf OPTIONAL )\r
+{\r
+       int err;\r
+       ib_api_status_t         status = IB_SUCCESS;\r
+       struct ib_srq *ib_srq = (struct ib_srq *)h_srq;\r
+       struct ib_device *ib_dev = ib_srq->device;\r
+       UNUSED_PARAM(p_umv_buf);\r
+\r
+       HCA_ENTER(HCA_DBG_SRQ);\r
+\r
+       err = ibv_modify_srq(ib_srq, (void*)p_srq_attr, srq_attr_mask);\r
+       if (err) {\r
+               HCA_PRINT (TRACE_LEVEL_ERROR, HCA_DBG_AV,\r
+                       ("ibv_modify_srq failed (%d)\n", err));\r
+               status = errno_to_iberr(err);\r
+       }\r
+\r
+       HCA_PRINT_EXIT(TRACE_LEVEL_ERROR  , HCA_DBG_SRQ,\r
+               ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
+       return status;\r
+}\r
+\r
+ib_api_status_t\r
+mlnx_query_srq (\r
+       IN              const   ib_srq_handle_t                         h_srq,\r
+               OUT                     ib_srq_attr_t* const                    p_srq_attr,\r
+       IN      OUT                     ci_umv_buf_t                            *p_umv_buf OPTIONAL )\r
+{\r
+       int err;\r
+       ib_api_status_t         status = IB_SUCCESS;\r
+       struct ib_srq *ib_srq = (struct ib_srq *)h_srq;\r
+       struct ib_device *ib_dev = ib_srq->device;\r
+       UNUSED_PARAM(p_umv_buf);\r
+\r
+       HCA_ENTER(HCA_DBG_SRQ);\r
+\r
+       err = ibv_query_srq(ib_srq, p_srq_attr);\r
+       if (err) {\r
+               HCA_PRINT (TRACE_LEVEL_ERROR, HCA_DBG_AV,\r
+                       ("ibv_query_srq failed (%d)\n", err));\r
+               status = errno_to_iberr(err);\r
+       }\r
+\r
+       HCA_PRINT_EXIT(TRACE_LEVEL_ERROR  , HCA_DBG_SRQ,\r
+               ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
+       return status;\r
+}\r
+\r
+ib_api_status_t\r
+mlnx_destroy_srq (\r
+       IN      const   ib_srq_handle_t         h_srq )\r
+{\r
+       int err;\r
+       ib_api_status_t         status = IB_SUCCESS;\r
+       struct ib_srq *ib_srq = (struct ib_srq *)h_srq;\r
+       struct ib_device *ib_dev = ib_srq->device;\r
+\r
+       HCA_ENTER(HCA_DBG_SRQ);\r
+\r
+       err = ibv_destroy_srq(ib_srq);\r
+       if (err) {\r
+               HCA_PRINT (TRACE_LEVEL_ERROR, HCA_DBG_AV,\r
+                       ("ibv_destroy_srq failed (%d)\n", err));\r
+               status = errno_to_iberr(err);\r
+       }\r
+\r
+       HCA_PRINT_EXIT(TRACE_LEVEL_ERROR  , HCA_DBG_SRQ,\r
+               ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
+       return status;\r
+}\r
+\r
 /*\r
 *      Queue Pair Management Verbs\r
 */\r
@@ -830,8 +977,6 @@ _create_qp (
                }\r
                p_context = ib_pd_p->ucontext;\r
        }\r
-       else \r
-               p_context = NULL;\r
 \r
        // prepare the parameters\r
        RtlZeroMemory(&qp_init_attr, sizeof(qp_init_attr));\r
@@ -840,6 +985,7 @@ _create_qp (
        qp_init_attr.qp_context = hob_p;\r
        qp_init_attr.recv_cq = (struct ib_cq *)p_create_attr->h_rq_cq;\r
        qp_init_attr.send_cq = (struct ib_cq *)p_create_attr->h_sq_cq;\r
+       qp_init_attr.srq = (struct ib_srq *)p_create_attr->h_srq;\r
        qp_init_attr.cap.max_recv_sge = p_create_attr->rq_sge;\r
        qp_init_attr.cap.max_send_sge = p_create_attr->sq_sge;\r
        qp_init_attr.cap.max_recv_wr = p_create_attr->rq_depth;\r
@@ -1148,7 +1294,7 @@ mlnx_create_cq (
        }\r
 \r
        /* sanity check */\r
-       if (*p_size > (uint32_t)ib_dev->mdev->limits.max_cqes) {\r
+       if (!*p_size || *p_size > (uint32_t)ib_dev->mdev->limits.max_cqes) {\r
                status = IB_INVALID_CQ_SIZE;\r
                goto err_cqe;\r
        }\r
@@ -1290,6 +1436,11 @@ setup_ci_interface(
                p_interface->modify_av = mlnx_modify_av;\r
                p_interface->destroy_av = mlnx_destroy_av;\r
 \r
+               p_interface->create_srq = mlnx_create_srq;\r
+               p_interface->modify_srq = mlnx_modify_srq;\r
+               p_interface->query_srq = mlnx_query_srq;\r
+               p_interface->destroy_srq = mlnx_destroy_srq;\r
+\r
                p_interface->create_qp = mlnx_create_qp;\r
                p_interface->create_spl_qp = mlnx_create_spl_qp;\r
                p_interface->modify_qp = mlnx_modify_qp;\r
index f8fe892..192c24a 100644 (file)
@@ -236,12 +236,12 @@ enum ib_event_type {
        IB_EVENT_DEVICE_FATAL                                           = IB_AE_LOCAL_FATAL,
        IB_EVENT_PORT_ACTIVE                                            = IB_AE_PORT_ACTIVE,
        IB_EVENT_PORT_ERR                                                               = IB_AE_PORT_DOWN,
+       IB_EVENT_SRQ_LIMIT_REACHED                              = IB_AE_SRQ_LIMIT_REACHED,
+       IB_EVENT_SRQ_CATAS_ERROR                                        = IB_AE_SRQ_CATAS_ERROR,
+       IB_EVENT_SRQ_QP_LAST_WQE_REACHED                = IB_AE_SRQ_QP_LAST_WQE_REACHED,
        IB_EVENT_LID_CHANGE                                                     = IB_AE_UNKNOWN + 1,
        IB_EVENT_PKEY_CHANGE,
-       IB_EVENT_SM_CHANGE,
-       IB_EVENT_SRQ_ERR,
-       IB_EVENT_SRQ_LIMIT_REACHED,
-       IB_EVENT_QP_LAST_WQE_REACHED
+       IB_EVENT_SM_CHANGE
 };
 
 struct ib_event {
@@ -335,21 +335,10 @@ enum ib_cq_notify {
        IB_CQ_NEXT_COMP
 };
 
-enum ib_srq_attr_mask {
-       IB_SRQ_MAX_WR   = 1 << 0,
-       IB_SRQ_LIMIT    = 1 << 1,
-};
-
-struct ib_srq_attr {
-       u32     max_wr;
-       u32     max_sge;
-       u32     srq_limit;
-};
-
 struct ib_srq_init_attr {
-       void                  (*event_handler)(struct ib_event *, void *);
-       void                   *srq_context;
-       struct ib_srq_attr      attr;
+       void                                    (*event_handler)(struct ib_event *, void *);
+       void                                    *srq_context;
+       ib_srq_attr_t                   attr;
 };
 
 struct ib_qp_cap {
@@ -566,23 +555,11 @@ struct ib_umem_chunk {
 };
 #pragma warning( default : 4200 )
 
-struct ib_udata {
-       void *inbuf;
-       void *outbuf;
-       size_t       inlen;
-       size_t       outlen;
-};
-
 #define IB_UMEM_MAX_PAGE_CHUNK                                         \
        ((PAGE_SIZE - offsetof(struct ib_umem_chunk, page_list)) /      \
         ((char *) &((struct ib_umem_chunk *) 0)->page_list[1] -        \
          (char *) &((struct ib_umem_chunk *) 0)->page_list[0]))
 
-struct ib_umem_object {
-       struct ib_uobject       uobject;
-       struct ib_umem          umem;
-};
-
 struct ib_pd {
        struct list_head        list;           /* for chaining AV MRs (for user mode only) */
        struct ib_device       *device;
@@ -613,10 +590,11 @@ struct ib_cq {
 struct ib_srq {
        struct ib_device       *device;
        struct ib_pd           *pd;
-       struct ib_uobject      *uobject;
+       struct ib_ucontext      *ucontext;
+       struct ib_mr *ib_mr;
        void                  (*event_handler)(struct ib_event *, void *);
        void                   *srq_context;
-       atomic_t                usecnt;
+       atomic_t                usecnt; /* count number of work queues */
 };
 
 struct ib_qp {
@@ -733,10 +711,10 @@ struct ib_device {
                                                 struct ib_srq_init_attr *srq_init_attr,
                                                 ci_umv_buf_t* const    p_umv_buf);
        int                        (*modify_srq)(struct ib_srq *srq,
-                                                struct ib_srq_attr *srq_attr,
-                                                enum ib_srq_attr_mask srq_attr_mask);
+                                                ib_srq_attr_t *srq_attr,
+                                                ib_srq_attr_mask_t srq_attr_mask);
        int                        (*query_srq)(struct ib_srq *srq,
-                                               struct ib_srq_attr *srq_attr);
+                                               ib_srq_attr_t *srq_attr);
        int                        (*destroy_srq)(struct ib_srq *srq);
        int                        (*post_srq_recv)(struct ib_srq *srq,
                                                    struct _ib_recv_wr *recv_wr,
@@ -951,6 +929,8 @@ int ibv_destroy_ah(struct ib_ah *ah);
  * @srq_init_attr: A list of initial attributes required to create the
  *   SRQ.  If SRQ creation succeeds, then the attributes are updated to 
  *   the actual capabilities of the created SRQ.
+ * @context: user process context (for application calls only)
+ * @p_umv_buf: parameters structure (for application calls only)
  *
  * srq_attr->max_wr and srq_attr->max_sge are read the determine the
  * requested size of the SRQ, and set to the actual values allocated
@@ -958,7 +938,9 @@ int ibv_destroy_ah(struct ib_ah *ah);
  * will always be at least as large as the requested values.
  */
 struct ib_srq *ibv_create_srq(struct ib_pd *pd,
-                            struct ib_srq_init_attr *srq_init_attr);
+       struct ib_srq_init_attr *srq_init_attr,
+       struct ib_ucontext *context, ci_umv_buf_t* const p_umv_buf);
+
 
 /**
  * ibv_modify_srq - Modifies the attributes for the specified SRQ.
@@ -973,8 +955,8 @@ struct ib_srq *ibv_create_srq(struct ib_pd *pd,
  * the number of receives queued drops below the limit.
  */
 int ibv_modify_srq(struct ib_srq *srq,
-                 struct ib_srq_attr *srq_attr,
-                 enum ib_srq_attr_mask srq_attr_mask);
+       ib_srq_attr_t *srq_attr,
+       ib_srq_attr_mask_t srq_attr_mask);
 
 /**
  * ibv_query_srq - Returns the attribute list and current values for the
@@ -983,7 +965,7 @@ int ibv_modify_srq(struct ib_srq *srq,
  * @srq_attr: The attributes of the specified SRQ.
  */
 int ibv_query_srq(struct ib_srq *srq,
-                struct ib_srq_attr *srq_attr);
+       ib_srq_attr_t *srq_attr);
 
 /**
  * ibv_destroy_srq - Destroys the specified SRQ.
@@ -999,8 +981,8 @@ int ibv_destroy_srq(struct ib_srq *srq);
  *   the work request that failed to be posted on the QP.
  */
 static inline int ibv_post_srq_recv(struct ib_srq *srq,
-                                  struct _ib_recv_wr *recv_wr,
-                                  struct _ib_recv_wr **bad_recv_wr)
+       struct _ib_recv_wr *recv_wr,
+       struct _ib_recv_wr **bad_recv_wr)
 {
        return srq->device->post_srq_recv(srq, recv_wr, bad_recv_wr);
 }
@@ -1015,9 +997,9 @@ static inline int ibv_post_srq_recv(struct ib_srq *srq,
  * @context: user process context (for application calls only)
  * @p_umv_buf: parameters structure (for application calls only)
  */
-       struct ib_qp *ibv_create_qp(struct ib_pd *pd,
-               struct ib_qp_init_attr *qp_init_attr,
-               struct ib_ucontext *context, ci_umv_buf_t* const p_umv_buf);
+struct ib_qp *ibv_create_qp(struct ib_pd *pd,
+       struct ib_qp_init_attr *qp_init_attr,
+       struct ib_ucontext *context, ci_umv_buf_t* const p_umv_buf);
 
 /**
  * ibv_modify_qp - Modifies the attributes for the specified QP and then
index 4392879..f1b2f02 100644 (file)
-#include <mt_l2w.h>\r
-#include <hca_data.h>\r
-#if defined(EVENT_TRACING)\r
-#ifdef offsetof\r
-#undef offsetof\r
-#endif\r
-#include "mt_l2w.tmh"\r
-#endif\r
-\r
-pci_pool_t *\r
-pci_pool_create (const char *name, struct mthca_dev *mdev,\r
-        size_t size, size_t align, size_t allocation)\r
-{\r
-       pci_pool_t *pool;\r
-       UNREFERENCED_PARAMETER(align);\r
-       UNREFERENCED_PARAMETER(allocation);\r
-\r
-       MT_ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL);\r
-       \r
-       // allocation parameter is not handled yet\r
-       ASSERT(allocation == 0);\r
-\r
-       // allocate object\r
-       pool = (pci_pool_t *)ExAllocatePoolWithTag( NonPagedPool, sizeof(pci_pool_t), MT_TAG_PCIPOOL );\r
-       if (pool == NULL) \r
-               return NULL;\r
-\r
-       //TODO: not absolutely correct: Linux's pci_pool_alloc provides contiguous physical memory,\r
-       // while default alloc function  - ExAllocatePoolWithTag -doesn't.\r
-       // But for now it is used for elements of size <= PAGE_SIZE\r
-       // Anyway - a sanity check:\r
-       ASSERT(size <= PAGE_SIZE);\r
-       if (size > PAGE_SIZE)\r
-               return NULL;\r
-\r
-       //TODO: not too effective: one can read its own alloc/free functions\r
-       ExInitializeNPagedLookasideList( &pool->pool_hdr, NULL, NULL, 0, size, MT_TAG_PCIPOOL, 0 );\r
-       \r
-       // fill the object\r
-       pool->mdev = mdev;\r
-       pool->size = size;\r
-       strncpy( pool->name, name, sizeof pool->name );\r
-\r
-       return pool;            \r
-}\r
-\r
-// from lib/string.c\r
-/**\r
-* strlcpy - Copy a %NUL terminated string into a sized buffer\r
-* @dest: Where to copy the string to\r
-* @src: Where to copy the string from\r
-* @size: size of destination buffer\r
-*\r
-* Compatible with *BSD: the result is always a valid\r
-* NUL-terminated string that fits in the buffer (unless,\r
-* of course, the buffer size is zero). It does not pad\r
-* out the result like strncpy() does.\r
-*/\r
-SIZE_T strlcpy(char *dest, const char *src, SIZE_T size)\r
-{\r
-        SIZE_T ret = strlen(src);\r
-\r
-        if (size) {\r
-                SIZE_T len = (ret >= size) ? size-1 : ret;\r
-                memcpy(dest, src, len);\r
-                dest[len] = '\0';\r
-        }\r
-        return ret;\r
-}\r
-\r
-\r
-int __bitmap_full(const unsigned long *bitmap, int bits)\r
-{\r
-       int k, lim = bits/BITS_PER_LONG;\r
-       for (k = 0; k < lim; ++k)\r
-               if (~bitmap[k])\r
-               return 0;\r
-\r
-       if (bits % BITS_PER_LONG)\r
-               if (~bitmap[k] & BITMAP_LAST_WORD_MASK(bits))\r
-               return 0;\r
-\r
-       return 1;\r
-}\r
-\r
-int __bitmap_empty(const unsigned long *bitmap, int bits)\r
-{\r
-       int k, lim = bits/BITS_PER_LONG;\r
-       for (k = 0; k < lim; ++k)\r
-               if (bitmap[k])\r
-                       return 0;\r
-\r
-       if (bits % BITS_PER_LONG)\r
-               if (bitmap[k] & BITMAP_LAST_WORD_MASK(bits))\r
-                       return 0;\r
-\r
-       return 1;\r
-}\r
-\r
-int request_irq(\r
-       IN      CM_PARTIAL_RESOURCE_DESCRIPTOR  *int_info,      /* interrupt resources */\r
-       IN              KSPIN_LOCK      *isr_lock,              /* spin lock for ISR */                 \r
-       IN              PKSERVICE_ROUTINE isr,          /* ISR */\r
-       IN              void *isr_ctx,                                          /* ISR context */\r
-       OUT     PKINTERRUPT *int_obj                    /* interrupt object */\r
-       )\r
-{\r
-       NTSTATUS                status;\r
-\r
-       status = IoConnectInterrupt(\r
-               int_obj,                                                                                                                /* InterruptObject */\r
-               isr,                                                                                                                            /* ISR */ \r
-               isr_ctx,                                                                                                                /* ISR context */\r
-               isr_lock,                                                                                                       /* spinlock */\r
-               int_info->u.Interrupt.Vector,                                   /* interrupt vector */\r
-               (KIRQL)int_info->u.Interrupt.Level,             /* IRQL */\r
-               (KIRQL)int_info->u.Interrupt.Level,             /* Synchronize IRQL */\r
-               (BOOLEAN)((int_info->Flags == CM_RESOURCE_INTERRUPT_LATCHED) ? \r
-               Latched : LevelSensitive),                                                      /* interrupt type: LATCHED or LEVEL */\r
-               (BOOLEAN)(int_info->ShareDisposition == CmResourceShareShared),         /* vector shared or not */\r
-               g_processor_affinity ? g_processor_affinity : (KAFFINITY)int_info->u.Interrupt.Affinity,        /* interrupt affinity */\r
-               FALSE                                                                                                                   /* whether to save Float registers */\r
-               );\r
-\r
-       if (!NT_SUCCESS(status)) {\r
-        HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_INIT ,("IoConnectInterrupt  failed status %d (did you change the processor_affinity ? )\n",status));\r
-               return -EFAULT;         /* failed to connect interrupt */\r
-    } \r
-       else\r
-               return 0;\r
-}\r
-\r
+#include <mt_l2w.h>
+#include <hca_data.h>
+#if defined(EVENT_TRACING)
+#ifdef offsetof
+#undef offsetof
+#endif
+#include "mt_l2w.tmh"
+#endif
+
+pci_pool_t *
+pci_pool_create (const char *name, struct mthca_dev *mdev,
+        size_t size, size_t align, size_t allocation)
+{
+       pci_pool_t *pool;
+       UNREFERENCED_PARAMETER(align);
+       UNREFERENCED_PARAMETER(allocation);
+
+       MT_ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL);
+       
+       // allocation parameter is not handled yet
+       ASSERT(allocation == 0);
+
+       // allocate object
+       pool = (pci_pool_t *)ExAllocatePoolWithTag( NonPagedPool, sizeof(pci_pool_t), MT_TAG_PCIPOOL );
+       if (pool == NULL) 
+               return NULL;
+
+       //TODO: not absolutely correct: Linux's pci_pool_alloc provides contiguous physical memory,
+       // while default alloc function  - ExAllocatePoolWithTag -doesn't.
+       // But for now it is used for elements of size <= PAGE_SIZE
+       // Anyway - a sanity check:
+       ASSERT(size <= PAGE_SIZE);
+       if (size > PAGE_SIZE)
+               return NULL;
+
+       //TODO: not too effective: one can read its own alloc/free functions
+       ExInitializeNPagedLookasideList( &pool->pool_hdr, NULL, NULL, 0, size, MT_TAG_PCIPOOL, 0 );
+       
+       // fill the object
+       pool->mdev = mdev;
+       pool->size = size;
+       strncpy( pool->name, name, sizeof pool->name );
+
+       return pool;            
+}
+
+// from lib/string.c
+/**
+* strlcpy - Copy a %NUL terminated string into a sized buffer
+* @dest: Where to copy the string to
+* @src: Where to copy the string from
+* @size: size of destination buffer
+*
+* Compatible with *BSD: the result is always a valid
+* NUL-terminated string that fits in the buffer (unless,
+* of course, the buffer size is zero). It does not pad
+* out the result like strncpy() does.
+*/
+SIZE_T strlcpy(char *dest, const char *src, SIZE_T size)
+{
+        SIZE_T ret = strlen(src);
+
+        if (size) {
+                SIZE_T len = (ret >= size) ? size-1 : ret;
+                memcpy(dest, src, len);
+                dest[len] = '\0';
+        }
+        return ret;
+}
+
+
+int __bitmap_full(const unsigned long *bitmap, int bits)
+{
+       int k, lim = bits/BITS_PER_LONG;
+       for (k = 0; k < lim; ++k)
+               if (~bitmap[k])
+               return 0;
+
+       if (bits % BITS_PER_LONG)
+               if (~bitmap[k] & BITMAP_LAST_WORD_MASK(bits))
+               return 0;
+
+       return 1;
+}
+
+int __bitmap_empty(const unsigned long *bitmap, int bits)
+{
+       int k, lim = bits/BITS_PER_LONG;
+       for (k = 0; k < lim; ++k)
+               if (bitmap[k])
+                       return 0;
+
+       if (bits % BITS_PER_LONG)
+               if (bitmap[k] & BITMAP_LAST_WORD_MASK(bits))
+                       return 0;
+
+       return 1;
+}
+
+int request_irq(
+       IN      CM_PARTIAL_RESOURCE_DESCRIPTOR  *int_info,      /* interrupt resources */
+       IN              KSPIN_LOCK      *isr_lock,              /* spin lock for ISR */                 
+       IN              PKSERVICE_ROUTINE isr,          /* ISR */
+       IN              void *isr_ctx,                                          /* ISR context */
+       OUT     PKINTERRUPT *int_obj                    /* interrupt object */
+       )
+{
+       NTSTATUS                status;
+
+       status = IoConnectInterrupt(
+               int_obj,                                                                                                                /* InterruptObject */
+               isr,                                                                                                                            /* ISR */ 
+               isr_ctx,                                                                                                                /* ISR context */
+               isr_lock,                                                                                                       /* spinlock */
+               int_info->u.Interrupt.Vector,                                   /* interrupt vector */
+               (KIRQL)int_info->u.Interrupt.Level,             /* IRQL */
+               (KIRQL)int_info->u.Interrupt.Level,             /* Synchronize IRQL */
+               (BOOLEAN)((int_info->Flags == CM_RESOURCE_INTERRUPT_LATCHED) ? 
+               Latched : LevelSensitive),                                                      /* interrupt type: LATCHED or LEVEL */
+               (BOOLEAN)(int_info->ShareDisposition == CmResourceShareShared),         /* vector shared or not */
+               g_processor_affinity ? g_processor_affinity : (KAFFINITY)int_info->u.Interrupt.Affinity,        /* interrupt affinity */
+               FALSE                                                                                                                   /* whether to save Float registers */
+               );
+
+       if (!NT_SUCCESS(status)) {
+        HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_INIT ,("IoConnectInterrupt  failed status %d (did you change the processor_affinity ? )\n",status));
+               return -EFAULT;         /* failed to connect interrupt */
+    } 
+       else
+               return 0;
+}
+
index fbe1162..faf3405 100644 (file)
@@ -74,7 +74,7 @@ typedef void (*MT_EMPTY_FUNC)();
 #define CPU_2_BE64_PREP                
 #define CPU_2_BE64(x)                  cl_hton64(x)
 #else
-#define CPU_2_BE64_PREP        unsigned __int64 __tmp__;       
+#define CPU_2_BE64_PREP        unsigned __int64 __tmp__        
 #define CPU_2_BE64(x)                  ( __tmp__ = x, cl_hton64(__tmp__) )
 #endif
 
index 8ae746f..3257b81 100644 (file)
@@ -157,6 +157,7 @@ struct ib_ah *ibv_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
        struct ib_mr *ib_mr = NULL;
        u64 start = 0;
        u64 user_handle = 0;
+       struct ibv_create_ah_resp *create_ah_resp = 0;
 
        // for user call we need also allocate MR
        if (context && p_umv_buf && p_umv_buf->p_inout_buf) {
@@ -185,6 +186,12 @@ struct ib_ah *ibv_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
 
        ah = pd->device->create_ah(pd, ah_attr);
 
+       /* fill obligatory fields */
+       if (context && p_umv_buf && p_umv_buf->p_inout_buf) {
+               create_ah_resp = (struct ibv_create_ah_resp *)(void*)p_umv_buf->p_inout_buf;
+               create_ah_resp->user_handle = user_handle;
+       }
+
        if (IS_ERR(ah)) {
                err = PTR_ERR(ah);
                HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_AV ,("create_ah failed (%d)\n", err));
@@ -203,7 +210,6 @@ struct ib_ah *ibv_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
        if (context && p_umv_buf && p_umv_buf->p_inout_buf) {
                struct ibv_create_ah_resp *create_ah_resp = (struct ibv_create_ah_resp *)(void*)p_umv_buf->p_inout_buf;
                create_ah_resp->start = start;
-               create_ah_resp->user_handle = user_handle;
                create_ah_resp->mr.lkey = ib_mr->lkey;
                create_ah_resp->mr.rkey = ib_mr->rkey;
                create_ah_resp->mr.mr_handle = (u64)(ULONG_PTR)ib_mr;
@@ -304,59 +310,119 @@ int ibv_destroy_ah(struct ib_ah *ah)
 /* Shared receive queues */
 
 struct ib_srq *ibv_create_srq(struct ib_pd *pd,
-                            struct ib_srq_init_attr *srq_init_attr)
+       struct ib_srq_init_attr *srq_init_attr,
+       struct ib_ucontext *context, ci_umv_buf_t* const p_umv_buf)
 {
-       struct ib_srq *srq;
+       int err;
+       struct ib_srq *ib_srq;
+       struct ib_mr *ib_mr = NULL;
+       u64 user_handle = 0;
+       struct ibv_create_srq_resp *create_srq_resp = 0;
 
-       if (!pd->device->create_srq)
-               return ERR_PTR(-ENOSYS);
+       // for user call we need also allocate MR
+       if (context && p_umv_buf && p_umv_buf->p_inout_buf) {
+               struct ibv_create_srq *create_srp = (struct ibv_create_srq *)(void*)p_umv_buf->p_inout_buf;
+               
+               // create region
+               ib_mr = ibv_reg_mr( 
+                       (struct ib_pd *)(ULONG_PTR)create_srp->mr.pd_handle, 
+                       create_srp->mr.access_flags, 
+                       (void*)(ULONG_PTR)create_srp->mr.start,
+                       create_srp->mr.length, create_srp->mr.hca_va, TRUE );
+               if (IS_ERR(ib_mr)) {
+                       err = PTR_ERR(ib_mr);
+                       HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_QP ,("ibv_reg_mr failed (%d)\n", err));
+                       goto err_alloc_mr;
+               }
+               create_srp->lkey = ib_mr->lkey;
+               user_handle = create_srp->user_handle;
+       }
 
-       srq = pd->device->create_srq(pd, srq_init_attr, NULL);
+       ib_srq = pd->device->create_srq(pd, srq_init_attr, p_umv_buf);
 
-       if (!IS_ERR(srq)) {
-               srq->device        = pd->device;
-               srq->pd            = pd;
-               srq->uobject       = NULL;
-               srq->event_handler = srq_init_attr->event_handler;
-               srq->srq_context   = srq_init_attr->srq_context;
-               atomic_inc(&pd->usecnt);
-               atomic_set(&srq->usecnt, 0);
+       /* fill obligatory fields */
+       if (context && p_umv_buf && p_umv_buf->p_inout_buf) {
+               create_srq_resp = (struct ibv_create_srq_resp *)(void*)p_umv_buf->p_inout_buf;
+               create_srq_resp->user_handle = user_handle;
+       }
+
+       if (IS_ERR(ib_srq)) {
+               err = PTR_ERR(ib_srq);
+               HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_QP ,("create_srq failed (%d)\n", err));
+               goto err_create_srq;
+       }
+
+       // fill results
+       ib_srq->device                          = pd->device;
+       ib_srq->pd                              = pd;
+       ib_srq->ucontext                        = context;
+       ib_srq->event_handler           = srq_init_attr->event_handler;
+       ib_srq->srq_context             = srq_init_attr->srq_context;
+       atomic_inc(&pd->usecnt);
+       atomic_set(&ib_srq->usecnt, 0);
+       if (context)
+               atomic_inc(&context->usecnt);
+               HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_QP ,("PD%d use cnt %d, pd_handle %p, ctx %p \n", 
+                       ((struct mthca_pd*)pd)->pd_num, pd->usecnt, pd, pd->ucontext));
+
+       HCA_PRINT(TRACE_LEVEL_INFORMATION, HCA_DBG_SRQ ,
+               ("uctx %p, qhndl %p, qnum %#x \n", 
+               pd->ucontext, ib_srq, ((struct mthca_srq*)ib_srq)->srqn ) );
+
+       // fill results for user
+       if (context && p_umv_buf && p_umv_buf->p_inout_buf) {
+               struct mthca_srq *srq = (struct mthca_srq *)ib_srq;
+               ib_srq->ib_mr = ib_mr;
+               create_srq_resp->mr.lkey = ib_mr->lkey;
+               create_srq_resp->mr.rkey = ib_mr->rkey;
+               create_srq_resp->mr.mr_handle = (u64)(ULONG_PTR)ib_mr;
+               create_srq_resp->srq_handle = (__u64)(ULONG_PTR)srq;
+               create_srq_resp->max_wr = (mthca_is_memfree(to_mdev(pd->device))) ? srq->max - 1 : srq->max;
+               create_srq_resp->max_sge = srq->max_gs;
+               create_srq_resp->srqn= srq->srqn;
+               p_umv_buf->output_size = sizeof(struct ibv_create_srq_resp);
                HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_QP ,("PD%d use cnt %d \n", 
                        ((struct mthca_pd*)pd)->pd_num, pd->usecnt));
        }
 
-       return srq;
+       return ib_srq;
+       
+err_create_srq:
+       if (ib_mr)
+               ibv_dereg_mr(ib_mr);
+err_alloc_mr:
+       if( p_umv_buf && p_umv_buf->command ) 
+               p_umv_buf->status = IB_ERROR;
+       HCA_EXIT(HCA_DBG_QP);
+       return ERR_PTR(err);
 }
 
 int ibv_modify_srq(struct ib_srq *srq,
-                 struct ib_srq_attr *srq_attr,
-                 enum ib_srq_attr_mask srq_attr_mask)
+       ib_srq_attr_t *srq_attr,
+       ib_srq_attr_mask_t srq_attr_mask)
 {
        return srq->device->modify_srq(srq, srq_attr, srq_attr_mask);
 }
 
 int ibv_query_srq(struct ib_srq *srq,
-                struct ib_srq_attr *srq_attr)
+       ib_srq_attr_t *srq_attr)
 {
-       return srq->device->query_srq ?
-               srq->device->query_srq(srq, srq_attr) : -ENOSYS;
+       return srq->device->query_srq(srq, srq_attr);
 }
 
 int ibv_destroy_srq(struct ib_srq *srq)
 {
-       struct ib_pd *pd;
        int ret;
-
-       if (atomic_read(&srq->usecnt))
-               return -EBUSY;
-
-       pd = srq->pd;
+       struct ib_pd *pd = srq->pd;
+       struct ib_ucontext      *ucontext = pd->ucontext;
+       struct ib_mr * ib_mr = srq->ib_mr;
 
        ret = srq->device->destroy_srq(srq);
        if (!ret) {
                atomic_dec(&pd->usecnt);
-               HCA_PRINT(TRACE_LEVEL_INFORMATION  ,HCA_DBG_SHIM        ,("PD%d use cnt %d \n", 
-                       ((struct mthca_pd*)pd)->pd_num, pd->usecnt));
+               HCA_PRINT(TRACE_LEVEL_INFORMATION  ,HCA_DBG_SRQ ,("PD%d use cnt %d, pd_handle %p, ctx %p \n", 
+                       ((struct mthca_pd*)pd)->pd_num, pd->usecnt, pd, pd->ucontext));
+               release_user_cq_qp_resources(ucontext, ib_mr);
        }
 
        return ret;
index ae2fad9..7ecba1b 100644 (file)
@@ -1571,6 +1571,13 @@ int mthca_HW2SW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
                             CMD_TIME_CLASS_A, status);
 }
 
+int mthca_QUERY_SRQ(struct mthca_dev *dev, u32 num,
+                   struct mthca_mailbox *mailbox, u8 *status)
+{
+       return mthca_cmd_box(dev, 0, mailbox->dma, num, 0,
+                            CMD_QUERY_SRQ, CMD_TIME_CLASS_A, status);
+}
+
 int mthca_ARM_SRQ(struct mthca_dev *dev, int srq_num, int limit, u8 *status)
 {
        return mthca_cmd(dev, limit, srq_num, 0, CMD_ARM_SRQ,
index 3658434..fdeef83 100644 (file)
@@ -302,6 +302,8 @@ int mthca_SW2HW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
                    int srq_num, u8 *status);
 int mthca_HW2SW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
                    int srq_num, u8 *status);
+int mthca_QUERY_SRQ(struct mthca_dev *dev, u32 num,
+                   struct mthca_mailbox *mailbox, u8 *status);
 int mthca_ARM_SRQ(struct mthca_dev *dev, int srq_num, int limit, u8 *status);
 int mthca_MODIFY_QP(struct mthca_dev *dev, int trans, u32 num,
                    int is_ee, struct mthca_mailbox *mailbox, u32 optmask,
index 719e21a..8e1801f 100644 (file)
@@ -912,13 +912,8 @@ void mthca_free_cq(struct mthca_dev *dev,
        spin_unlock_irq(&lh);
 
        /* wait for all RUNNING DPCs on that EQ to complete */
-       {
-               ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL);
-               // wait for DPCs, using this EQ, to complete
-               spin_lock_sync( &dev->eq_table.eq[MTHCA_EQ_COMP].lock);
-               //TODO: do we need that ? 
-               spin_lock_sync( &dev->eq_table.eq[MTHCA_EQ_ASYNC].lock );
-       }
+       ASSERT(KeGetCurrentIrql() == PASSIVE_LEVEL);
+       KeFlushQueuedDpcs();
 
        atomic_dec(&cq->refcount);
        wait_event(&cq->wait, !atomic_read(&cq->refcount));
index 4698c3b..c464ced 100644 (file)
@@ -137,8 +137,9 @@ struct mthca_limits {
        int      max_qp_init_rdma;
        int      reserved_qps;
        int      num_srqs;
-       int      reserved_srqs;
        int      max_srq_wqes;
+       int      max_srq_sge;
+       int      reserved_srqs;
        int      num_eecs;
        int      reserved_eecs;
        int      num_cqs;
@@ -487,12 +488,12 @@ void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn,
            struct mthca_srq *srq);
 
 int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd,
-                   struct ib_srq_attr *attr, struct mthca_srq *srq);
+       ib_srq_attr_t *attr, struct mthca_srq *srq);
 void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq);
-int mthca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
-       enum ib_srq_attr_mask attr_mask);
+int mthca_modify_srq(struct ib_srq *ibsrq, ib_srq_attr_t *attr,
+       ib_srq_attr_mask_t attr_mask);
 void mthca_srq_event(struct mthca_dev *dev, u32 srqn,
-                    enum ib_event_type event_type);
+                    enum ib_event_type event_type, u8 vendor_code);
 void mthca_free_srq_wqe(struct mthca_srq *srq, u32 wqe_addr);
 int mthca_tavor_post_srq_recv(struct ib_srq *srq, struct _ib_recv_wr *wr,
                              struct _ib_recv_wr **bad_wr);
@@ -594,5 +595,7 @@ int ib_uverbs_init(void);
 void ib_uverbs_cleanup(void);
 int mthca_ah_grh_present(struct mthca_ah *ah);
 
+int mthca_max_srq_sge(struct mthca_dev *dev);
+
 
 #endif /* MTHCA_DEV_H */
index 7c33e3d..a4eb6ee 100644 (file)
@@ -179,6 +179,11 @@ struct mthca_eqe {
                } qp;\r
                struct {                        \r
                        __be32 srqn;            \r
+                       u32    reserved1;\r
+                       u32    reserved2;\r
+                       u8     reserved3[1];\r
+                       u8     vendor_code;\r
+                       u8     reserved4[2];\r
                }       srq;\r
                struct {\r
                        __be32 cqn;\r
@@ -351,12 +356,17 @@ static int mthca_eq_int(struct mthca_dev *dev, struct mthca_eq *eq)
 \r
                case MTHCA_EVENT_TYPE_SRQ_QP_LAST_WQE:\r
                        mthca_qp_event(dev, cl_ntoh32(eqe->event.qp.qpn) & 0xffffff,\r
-                                      IB_EVENT_QP_LAST_WQE_REACHED, eqe->event.qp.vendor_code);\r
+                                      IB_EVENT_SRQ_QP_LAST_WQE_REACHED, eqe->event.qp.vendor_code);\r
+                       break;\r
+\r
+               case MTHCA_EVENT_TYPE_SRQ_CATAS_ERROR:\r
+                       mthca_srq_event(dev, cl_ntoh32(eqe->event.srq.srqn) & 0xffffff,\r
+                               IB_EVENT_SRQ_LIMIT_REACHED, eqe->event.qp.vendor_code);\r
                        break;\r
 \r
                case MTHCA_EVENT_TYPE_SRQ_LIMIT:\r
                        mthca_srq_event(dev, cl_ntoh32(eqe->event.srq.srqn) & 0xffffff,\r
-                                               IB_EVENT_SRQ_LIMIT_REACHED);\r
+                               IB_EVENT_SRQ_LIMIT_REACHED, eqe->event.qp.vendor_code);\r
                        break;\r
 \r
                case MTHCA_EVENT_TYPE_WQ_CATAS_ERROR:\r
@@ -406,7 +416,6 @@ static int mthca_eq_int(struct mthca_dev *dev, struct mthca_eq *eq)
                        break;\r
 \r
                case MTHCA_EVENT_TYPE_EEC_CATAS_ERROR:\r
-               case MTHCA_EVENT_TYPE_SRQ_CATAS_ERROR:\r
                case MTHCA_EVENT_TYPE_LOCAL_CATAS_ERROR:\r
                case MTHCA_EVENT_TYPE_ECC_DETECT:\r
                default:\r
@@ -437,7 +446,7 @@ static int mthca_eq_int(struct mthca_dev *dev, struct mthca_eq *eq)
                }\r
         loops++;\r
         if (cl_get_time_stamp() - start > g_max_DPC_time_us ) {\r
-            HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_LOW ,("Handeling of EQ stopped, and a new DPC is entered after %d loops\n", loops));\r
+            HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_LOW ,("Handling of EQ stopped, and a new DPC is entered after %d loops\n", loops));\r
             KeInsertQueueDpc(&dev->eq_table.eq[eq->eq_num].dpc, NULL, NULL);\r
             break;\r
         }       \r
index 1a34c14..fe8829c 100644 (file)
@@ -209,6 +209,7 @@ static int  mthca_dev_lim(struct mthca_dev *mdev, struct mthca_dev_lim *dev_lim)
        mdev->limits.reserved_srqs      = dev_lim->reserved_srqs;
        mdev->limits.reserved_eecs      = dev_lim->reserved_eecs;
        mdev->limits.max_desc_sz      = dev_lim->max_desc_sz;
+       mdev->limits.max_srq_sge        = mthca_max_srq_sge(mdev);
        /*
         * Subtract 1 from the limit because we need to allocate a
         * spare CQE so the HCA HW can tell the difference between an
index a919c2f..81f811e 100644 (file)
 #include "mthca_cmd.h"
 #include "mthca_memfree.h"
 
- static void init_query_mad(struct ib_smp *mad)
- {
+static void init_query_mad(struct ib_smp *mad)
+{
         mad->base_version      = 1;
         mad->mgmt_class                = IB_MGMT_CLASS_SUBN_LID_ROUTED;
         mad->class_version = 1;
         mad->method                            = IB_MGMT_METHOD_GET;
- }
+}
 
- int mthca_query_device(struct ib_device *ibdev,
+int mthca_query_device(struct ib_device *ibdev,
                              struct ib_device_attr *props)
 {
        struct ib_smp *in_mad  = NULL;
        props->max_res_rd_atom     = props->max_qp_rd_atom * props->max_qp;
        props->max_srq             = mdev->limits.num_srqs - mdev->limits.reserved_srqs;
        props->max_srq_wr          = mdev->limits.max_srq_wqes;
-       props->max_srq_sge         = mdev->limits.max_sg;
+       if (mthca_is_memfree(mdev))
+               --props->max_srq_wr;
+       props->max_srq_sge         = mdev->limits.max_srq_sge;
        props->local_ca_ack_delay  = (u8)mdev->limits.local_ca_ack_delay;
        props->atomic_cap          = mdev->limits.flags & DEV_LIM_FLAG_ATOMIC ? 
                                        IB_ATOMIC_LOCAL : IB_ATOMIC_NONE;
@@ -220,7 +222,7 @@ out:
        return err;
 }
 
-int mthca_query_pkey_chunk(struct ib_device *ibdev,
+static int mthca_query_pkey_chunk(struct ib_device *ibdev,
                            u8 port, u16 index, u16 pkey[32])
 {
        struct ib_smp *in_mad  = NULL;
@@ -260,7 +262,7 @@ int mthca_query_pkey_chunk(struct ib_device *ibdev,
        return err;
 }
 
-int mthca_query_gid_chunk(struct ib_device *ibdev, u8 port,
+static int mthca_query_gid_chunk(struct ib_device *ibdev, u8 port,
                           int index, union ib_gid gid[8])
 {
        struct ib_smp *in_mad  = NULL;
@@ -493,7 +495,7 @@ done:
        return 0;
 }
 
-struct ib_ah *mthca_ah_create(struct ib_pd *pd,
+static struct ib_ah *mthca_ah_create(struct ib_pd *pd,
                                     struct ib_ah_attr *ah_attr)
 {
        int err;
@@ -512,7 +514,7 @@ struct ib_ah *mthca_ah_create(struct ib_pd *pd,
        return &ah->ibah;
 }
 
-int mthca_ah_destroy(struct ib_ah *ah)
+static int mthca_ah_destroy(struct ib_ah *ah)
 {
        mthca_destroy_ah(to_mdev(ah->device), to_mah(ah));
        kfree(ah);
@@ -520,17 +522,16 @@ int mthca_ah_destroy(struct ib_ah *ah)
        return 0;
 }
 
-struct ib_srq *mthca_create_srq(struct ib_pd *pd,
-                                      struct ib_srq_init_attr *init_attr,
-                                      ci_umv_buf_t* const                      p_umv_buf)
+static struct ib_srq *mthca_create_srq(struct ib_pd *pd,
+                               struct ib_srq_init_attr *init_attr,
+                               ci_umv_buf_t* const p_umv_buf)
 {
-#ifdef WIN_TO_BE_CHANGED
-       struct mthca_create_srq ucmd;
+       struct ibv_create_srq ucmd = { 0 };
        struct mthca_ucontext *context = NULL;
        struct mthca_srq *srq;
        int err;
 
-       srq = kmalloc(sizeof *srq, GFP_KERNEL);
+       srq = kzalloc(sizeof *srq, GFP_KERNEL);
        if (!srq)
                return ERR_PTR(-ENOMEM);
 
@@ -553,11 +554,11 @@ struct ib_srq *mthca_create_srq(struct ib_pd *pd,
        }
 
        err = mthca_alloc_srq(to_mdev(pd->device), to_mpd(pd),
-                             &init_attr->attr, srq);
+               &init_attr->attr, srq);
 
        if (err && pd->ucontext)
                mthca_unmap_user_db(to_mdev(pd->device), &context->uar,
-                                   context->db_tab, ucmd.db_index);
+                       context->db_tab, ucmd.db_index);
 
        if (err)
                goto err_free;
@@ -574,23 +575,17 @@ err_free:
        kfree(srq);
 
        return ERR_PTR(err);
-#else
-       UNREFERENCED_PARAMETER(p_umv_buf);
-       UNREFERENCED_PARAMETER(init_attr);
-       UNREFERENCED_PARAMETER(pd);
-       return NULL;
-#endif
 }
 
-int mthca_destroy_srq(struct ib_srq *srq)
+static int mthca_destroy_srq(struct ib_srq *srq)
 {
        struct mthca_ucontext *context;
 
-       if (srq->uobject) {
-               context = to_mucontext(srq->uobject->context);
+       if (srq->ucontext) {
+               context = to_mucontext(srq->ucontext);
 
                mthca_unmap_user_db(to_mdev(srq->device), &context->uar,
-                                   context->db_tab, to_msrq(srq)->db_index);
+                       context->db_tab, to_msrq(srq)->db_index);
        }
 
        mthca_free_srq(to_mdev(srq->device), to_msrq(srq));
@@ -599,7 +594,7 @@ int mthca_destroy_srq(struct ib_srq *srq)
        return 0;
 }
 
-struct ib_qp *mthca_create_qp(struct ib_pd *pd,
+static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
                                     struct ib_qp_init_attr *init_attr,
                                      ci_umv_buf_t* const                       p_umv_buf)
 {
@@ -718,7 +713,7 @@ err_mem: err_inval: err_unsupported:
        return ERR_PTR(err);
 }
 
-int mthca_destroy_qp(struct ib_qp *qp)
+static int mthca_destroy_qp(struct ib_qp *qp)
 {
        if (qp->ucontext) {
                mthca_unmap_user_db(to_mdev(qp->device),
@@ -735,7 +730,7 @@ int mthca_destroy_qp(struct ib_qp *qp)
        return 0;
 }
 
-struct ib_cq *mthca_create_cq(struct ib_device *ibdev, int entries,
+static struct ib_cq *mthca_create_cq(struct ib_device *ibdev, int entries,
                                     struct ib_ucontext *context,
                                     ci_umv_buf_t* const                        p_umv_buf)
 {
@@ -829,7 +824,7 @@ err_unmap_set:
        return ERR_PTR(err);
 }
 
-int mthca_destroy_cq(struct ib_cq *cq)
+static int mthca_destroy_cq(struct ib_cq *cq)
 {
        if (cq->ucontext) {
                mthca_unmap_user_db(to_mdev(cq->device),
@@ -888,7 +883,7 @@ struct ib_mr *mthca_get_dma_mr(struct ib_pd *pd, mthca_qp_access_t acc)
        return &mr->ibmr;
 }
 
-struct ib_mr *mthca_reg_phys_mr(struct ib_pd       *pd,
+static struct ib_mr *mthca_reg_phys_mr(struct ib_pd       *pd,
                                       struct ib_phys_buf *buffer_list,
                                       int                 num_phys_buf,
                                       mthca_qp_access_t                 acc,
@@ -987,7 +982,7 @@ struct ib_mr *mthca_reg_phys_mr(struct ib_pd       *pd,
        return &mr->ibmr;
 }
 
-struct ib_mr *mthca_reg_virt_mr(struct ib_pd *pd, 
+static struct ib_mr *mthca_reg_virt_mr(struct ib_pd *pd, 
        void* __ptr64   vaddr, uint64_t length, uint64_t hca_va,
        mthca_qp_access_t acc, boolean_t um_call)
 {
@@ -1130,7 +1125,7 @@ int mthca_dereg_mr(struct ib_mr *mr)
        return 0;
 }
 
-struct ib_fmr *mthca_alloc_fmr(struct ib_pd *pd, mthca_qp_access_t acc,
+static struct ib_fmr *mthca_alloc_fmr(struct ib_pd *pd, mthca_qp_access_t acc,
                                      struct ib_fmr_attr *fmr_attr)
 {
        struct mthca_fmr *fmr;
@@ -1152,7 +1147,7 @@ struct ib_fmr *mthca_alloc_fmr(struct ib_pd *pd, mthca_qp_access_t acc,
        return &fmr->ibmr;
 }
 
-int mthca_dealloc_fmr(struct ib_fmr *fmr)
+static int mthca_dealloc_fmr(struct ib_fmr *fmr)
 {
        struct mthca_fmr *mfmr = to_mfmr(fmr);
        int err;
@@ -1165,7 +1160,7 @@ int mthca_dealloc_fmr(struct ib_fmr *fmr)
        return 0;
 }
 
-int mthca_unmap_fmr(struct list_head *fmr_list)
+static int mthca_unmap_fmr(struct list_head *fmr_list)
 {
        struct ib_fmr *fmr;
        int err;
@@ -1258,6 +1253,7 @@ int mthca_register_device(struct mthca_dev *dev)
        if (dev->mthca_flags & MTHCA_FLAG_SRQ) {
                dev->ib_dev.create_srq           = mthca_create_srq;
                dev->ib_dev.modify_srq           = mthca_modify_srq;
+               dev->ib_dev.query_srq            = mthca_query_srq;
                dev->ib_dev.destroy_srq          = mthca_destroy_srq;
 
                if (mthca_is_memfree(dev))
index b321a7e..d24e0e4 100644 (file)
-/*\r
- * Copyright (c) 2004 Topspin Communications.  All rights reserved.\r
- * Copyright (c) 2005 Cisco Systems.  All rights reserved.\r
- * Copyright (c) 2005 Mellanox Technologies. All rights reserved.\r
- *\r
- * This software is available to you under a choice of one of two\r
- * licenses.  You may choose to be licensed under the terms of the GNU\r
- * General Public License (GPL) Version 2, available from the file\r
- * COPYING in the main directory of this source tree, or the\r
- * OpenIB.org BSD license below:\r
- *\r
- *     Redistribution and use in source and binary forms, with or\r
- *     without modification, are permitted provided that the following\r
- *     conditions are met:\r
- *\r
- *      - Redistributions of source code must retain the above\r
- *        copyright notice, this list of conditions and the following\r
- *        disclaimer.\r
- *\r
- *      - Redistributions in binary form must reproduce the above\r
- *        copyright notice, this list of conditions and the following\r
- *        disclaimer in the documentation and/or other materials\r
- *        provided with the distribution.\r
- *\r
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
- * SOFTWARE.\r
- *\r
- * $Id$\r
- */\r
-\r
-#ifndef MTHCA_PROVIDER_H\r
-#define MTHCA_PROVIDER_H\r
-\r
-#include <ib_verbs.h>\r
-#include <ib_pack.h>\r
-#include <iba/ib_ci.h>\r
-\r
-typedef uint32_t mthca_mpt_access_t;\r
-#define MTHCA_MPT_FLAG_ATOMIC        (1 << 14)\r
-#define MTHCA_MPT_FLAG_REMOTE_WRITE  (1 << 13)\r
-#define MTHCA_MPT_FLAG_REMOTE_READ   (1 << 12)\r
-#define MTHCA_MPT_FLAG_LOCAL_WRITE   (1 << 11)\r
-#define MTHCA_MPT_FLAG_LOCAL_READ    (1 << 10)\r
-\r
-union mthca_buf {\r
-       struct scatterlist direct;\r
-       struct scatterlist *page_list;\r
-};\r
-\r
-struct mthca_uar {\r
-       PFN_NUMBER pfn;\r
-       int           index;\r
-};\r
-\r
-struct mthca_user_db_table;\r
-\r
-struct mthca_ucontext {\r
-       struct ib_ucontext          ibucontext;\r
-       struct mthca_uar            uar;\r
-       struct mthca_user_db_table *db_tab;\r
-       // for user UAR \r
-       PMDL    mdl;\r
-       PVOID   kva;\r
-       SIZE_T uar_size;        \r
-};\r
-\r
-struct mthca_mtt;\r
-\r
-struct mthca_mr {\r
-       //NB: the start of this structure is to be equal to mlnx_mro_t !\r
-       //NB: the structure was not inserted here for not to mix driver and provider structures\r
-       struct ib_mr      ibmr;\r
-       struct mthca_mtt *mtt;\r
-       int                     iobuf_used;\r
-       mt_iobuf_t      iobuf;\r
-       void *secure_handle;\r
-};\r
-\r
-struct mthca_fmr {\r
-       struct ib_fmr      ibmr;\r
-       struct ib_fmr_attr attr;\r
-       struct mthca_mtt  *mtt;\r
-       int                maps;\r
-       union {\r
-               struct {\r
-                       struct mthca_mpt_entry __iomem *mpt;\r
-                       u64 __iomem *mtts;\r
-               } tavor;\r
-               struct {\r
-                       struct mthca_mpt_entry *mpt;\r
-                       __be64 *mtts;\r
-               } arbel;\r
-       } mem;\r
-};\r
-\r
-struct mthca_pd {\r
-       struct ib_pd    ibpd;\r
-       u32             pd_num;\r
-       atomic_t        sqp_count;\r
-       struct mthca_mr ntmr;\r
-       int             privileged;\r
-};\r
-\r
-struct mthca_eq {\r
-       struct mthca_dev      *dev;\r
-       int                    eqn;\r
-       int                    eq_num;\r
-       u32                    eqn_mask;\r
-       u32                    cons_index;\r
-       u16                    msi_x_vector;\r
-       u16                    msi_x_entry;\r
-       int                    have_irq;\r
-       int                    nent;\r
-       struct scatterlist *page_list;\r
-       struct mthca_mr        mr;\r
-       KDPC                            dpc;                    /* DPC for MSI-X interrupts */\r
-       spinlock_t  lock;                       /* spinlock for simult DPCs */\r
-};\r
-\r
-struct mthca_av;\r
-\r
-enum mthca_ah_type {\r
-       MTHCA_AH_ON_HCA,\r
-       MTHCA_AH_PCI_POOL,\r
-       MTHCA_AH_KMALLOC\r
-};\r
-\r
-struct mthca_ah {\r
-       struct ib_ah       ibah;\r
-       enum mthca_ah_type type;\r
-       u32                key;\r
-       struct mthca_av   *av;\r
-       dma_addr_t         avdma;\r
-};\r
-\r
-/*\r
- * Quick description of our CQ/QP locking scheme:\r
- *\r
- * We have one global lock that protects dev->cq/qp_table.  Each\r
- * struct mthca_cq/qp also has its own lock.  An individual qp lock\r
- * may be taken inside of an individual cq lock.  Both cqs attached to\r
- * a qp may be locked, with the send cq locked first.  No other\r
- * nesting should be done.\r
- *\r
- * Each struct mthca_cq/qp also has an atomic_t ref count.  The\r
- * pointer from the cq/qp_table to the struct counts as one reference.\r
- * This reference also is good for access through the consumer API, so\r
- * modifying the CQ/QP etc doesn't need to take another reference.\r
- * Access because of a completion being polled does need a reference.\r
- *\r
- * Finally, each struct mthca_cq/qp has a wait_queue_head_t for the\r
- * destroy function to sleep on.\r
- *\r
- * This means that access from the consumer API requires nothing but\r
- * taking the struct's lock.\r
- *\r
- * Access because of a completion event should go as follows:\r
- * - lock cq/qp_table and look up struct\r
- * - increment ref count in struct\r
- * - drop cq/qp_table lock\r
- * - lock struct, do your thing, and unlock struct\r
- * - decrement ref count; if zero, wake up waiters\r
- *\r
- * To destroy a CQ/QP, we can do the following:\r
- * - lock cq/qp_table, remove pointer, unlock cq/qp_table lock\r
- * - decrement ref count\r
- * - wait_event until ref count is zero\r
- *\r
- * It is the consumer's responsibilty to make sure that no QP\r
- * operations (WQE posting or state modification) are pending when the\r
- * QP is destroyed.  Also, the consumer must make sure that calls to\r
- * qp_modify are serialized.\r
- *\r
- * Possible optimizations (wait for profile data to see if/where we\r
- * have locks bouncing between CPUs):\r
- * - split cq/qp table lock into n separate (cache-aligned) locks,\r
- *   indexed (say) by the page in the table\r
- * - split QP struct lock into three (one for common info, one for the\r
- *   send queue and one for the receive queue)\r
- */\r
-//TODO: check correctness of the above requirement: "It is the consumer's responsibilty to make sure that no QP\r
-// operations (WQE posting or state modification) are pending when the QP is destroyed"\r
-\r
-struct mthca_cq {\r
-       struct ib_cq           ibcq;\r
-       void                                            *cq_context;    // leo: for IBAL shim\r
-       spinlock_t             lock;\r
-       atomic_t               refcount;\r
-       int                    cqn;\r
-       u32                    cons_index;\r
-       int                    is_direct;\r
-       int                    is_kernel;\r
-\r
-       /* Next fields are Arbel only */\r
-       int                    set_ci_db_index;\r
-       __be32                *set_ci_db;\r
-       int                    arm_db_index;\r
-       __be32                *arm_db;\r
-       int                    arm_sn;\r
-       int                    u_arm_db_index;\r
-       int                *p_u_arm_sn;\r
-\r
-       union mthca_buf        queue;\r
-       struct mthca_mr        mr;\r
-       wait_queue_head_t      wait;\r
-       KMUTEX                      mutex;\r
-};\r
-\r
-struct mthca_srq {\r
-       struct ib_srq           ibsrq;\r
-       spinlock_t              lock;\r
-       atomic_t                refcount;\r
-       int                     srqn;\r
-       int                     max;\r
-       int                     max_gs;\r
-       int                     wqe_shift;\r
-       int                     first_free;\r
-       int                     last_free;\r
-       u16                     counter;  /* Arbel only */\r
-       int                     db_index; /* Arbel only */\r
-       __be32                 *db;       /* Arbel only */\r
-       void                   *last;\r
-\r
-       int                     is_direct;\r
-       u64                    *wrid;\r
-       union mthca_buf         queue;\r
-       struct mthca_mr         mr;\r
-\r
-       wait_queue_head_t       wait;\r
-       KMUTEX                      mutex;\r
-};\r
-\r
-struct mthca_wq {\r
-       spinlock_t lock;\r
-       int        max;\r
-       unsigned   next_ind;\r
-       unsigned   last_comp;\r
-       unsigned   head;\r
-       unsigned   tail;\r
-       void      *last;\r
-       int        max_gs;\r
-       int        wqe_shift;\r
-\r
-       int        db_index;    /* Arbel only */\r
-       __be32    *db;\r
-};\r
-\r
-struct mthca_qp {\r
-       struct ib_qp           ibqp;\r
-       void                                            *qp_context;    // leo: for IBAL shim\r
-       //TODO: added just because absense of ibv_query_qp\r
-       // thereafter it may be worth to be replaced by struct ib_qp_attr qp_attr;\r
-       struct ib_qp_init_attr qp_init_attr;    // leo: for query_qp\r
-       atomic_t               refcount;\r
-       u32                    qpn;\r
-       int                    is_direct;\r
-       u8                     transport;\r
-       u8                     state;\r
-       u8                     atomic_rd_en;\r
-       u8                     resp_depth;\r
-\r
-       struct mthca_mr        mr;\r
-\r
-       struct mthca_wq        rq;\r
-       struct mthca_wq        sq;\r
-       enum ib_sig_type       sq_policy;\r
-       int                    send_wqe_offset;\r
-       int                    max_inline_data;\r
-\r
-       u64                   *wrid;\r
-       union mthca_buf        queue;\r
-\r
-       wait_queue_head_t      wait;\r
-       KMUTEX                      mutex;\r
-};\r
-\r
-struct mthca_sqp {\r
-       struct mthca_qp qp;\r
-       int             port;\r
-       int             pkey_index;\r
-       u32             qkey;\r
-       u32             send_psn;\r
-       struct ib_ud_header ud_header;\r
-       struct scatterlist sg;\r
-};\r
-\r
-static inline struct mthca_ucontext *to_mucontext(struct ib_ucontext *ibucontext)\r
-{\r
-       return container_of(ibucontext, struct mthca_ucontext, ibucontext);\r
-}\r
-\r
-static inline struct mthca_fmr *to_mfmr(struct ib_fmr *ibmr)\r
-{\r
-       return container_of(ibmr, struct mthca_fmr, ibmr);\r
-}\r
-\r
-static inline struct mthca_mr *to_mmr(struct ib_mr *ibmr)\r
-{\r
-       return container_of(ibmr, struct mthca_mr, ibmr);\r
-}\r
-\r
-static inline struct mthca_pd *to_mpd(struct ib_pd *ibpd)\r
-{\r
-       return container_of(ibpd, struct mthca_pd, ibpd);\r
-}\r
-\r
-static inline struct mthca_ah *to_mah(struct ib_ah *ibah)\r
-{\r
-       return container_of(ibah, struct mthca_ah, ibah);\r
-}\r
-\r
-static inline struct mthca_cq *to_mcq(struct ib_cq *ibcq)\r
-{\r
-       return container_of(ibcq, struct mthca_cq, ibcq);\r
-}\r
-\r
-static inline struct mthca_srq *to_msrq(struct ib_srq *ibsrq)\r
-{\r
-       return container_of(ibsrq, struct mthca_srq, ibsrq);\r
-}\r
-\r
-static inline struct mthca_qp *to_mqp(struct ib_qp *ibqp)\r
-{\r
-       return container_of(ibqp, struct mthca_qp, ibqp);\r
-}\r
-\r
-static inline struct mthca_sqp *to_msqp(struct mthca_qp *qp)\r
-{\r
-       return container_of(qp, struct mthca_sqp, qp);\r
-}\r
-\r
-static inline uint8_t start_port(struct ib_device *device)\r
-{\r
-       return device->node_type == IB_NODE_SWITCH ? 0 : 1;\r
-}\r
-\r
-static inline uint8_t end_port(struct ib_device *device)\r
-{\r
-       return device->node_type == IB_NODE_SWITCH ? 0 : device->phys_port_cnt;\r
-}\r
-\r
-static inline int ib_copy_from_umv_buf(void *dest, ci_umv_buf_t* const p_umv_buf, size_t len)\r
-{\r
-       RtlCopyMemory(dest, p_umv_buf->p_inout_buf,  len);\r
-       return 0;\r
-}\r
-\r
-static inline int ib_copy_to_umv_buf(ci_umv_buf_t* const p_umv_buf, void *src, size_t len)\r
-{\r
-       if (p_umv_buf->output_size < len) {\r
-               p_umv_buf->status = IB_INSUFFICIENT_MEMORY;\r
-               p_umv_buf->output_size = 0;\r
-               return -EFAULT;\r
-       }\r
-       RtlCopyMemory(p_umv_buf->p_inout_buf,  src, len);\r
-       p_umv_buf->status = IB_SUCCESS;\r
-       p_umv_buf->output_size = (uint32_t)len;\r
-       return 0;\r
-}\r
-\r
-\r
-\r
-// API\r
-int mthca_query_device(struct ib_device *ibdev,\r
-                                        struct ib_device_attr *props);\r
-\r
-int mthca_query_port(struct ib_device *ibdev,\r
-                           u8 port, struct ib_port_attr *props);\r
-\r
-int mthca_modify_port(struct ib_device *ibdev,\r
-                            u8 port, int port_modify_mask,\r
-                            struct ib_port_modify *props);\r
-\r
-int mthca_query_pkey_chunk(struct ib_device *ibdev,\r
-                           u8 port, u16 index, u16 pkey[32]);\r
-\r
-int mthca_query_gid_chunk(struct ib_device *ibdev, u8 port,\r
-                          int index, union ib_gid gid[8]);\r
-\r
-struct ib_ucontext *mthca_alloc_ucontext(struct ib_device *ibdev,\r
-                                               ci_umv_buf_t* const                     p_umv_buf);\r
-\r
-int mthca_dealloc_ucontext(struct ib_ucontext *context);\r
-\r
-struct ib_pd *mthca_alloc_pd(struct ib_device *ibdev,\r
-                                   struct ib_ucontext *context,\r
-                                   ci_umv_buf_t* const                 p_umv_buf);\r
-\r
-int mthca_dealloc_pd(struct ib_pd *pd);\r
-\r
-struct ib_ah *mthca_ah_create(struct ib_pd *pd,\r
-                                    struct ib_ah_attr *ah_attr);\r
-\r
-int mthca_ah_destroy(struct ib_ah *ah);\r
-\r
-struct ib_srq *mthca_create_srq(struct ib_pd *pd,\r
-                                      struct ib_srq_init_attr *init_attr,\r
-                                      ci_umv_buf_t* const                      p_umv_buf);\r
-\r
-int mthca_destroy_srq(struct ib_srq *srq);\r
-\r
-struct ib_qp *mthca_create_qp(struct ib_pd *pd,\r
-                                    struct ib_qp_init_attr *init_attr,\r
-                                    ci_umv_buf_t* const                        p_umv_buf);\r
-\r
-int mthca_destroy_qp(struct ib_qp *qp);\r
-\r
-struct ib_cq *mthca_create_cq(struct ib_device *ibdev, int entries,\r
-                                    struct ib_ucontext *context,\r
-                                    ci_umv_buf_t* const                        p_umv_buf);\r
-\r
-int mthca_destroy_cq(struct ib_cq *cq);\r
-\r
-struct ib_mr *mthca_get_dma_mr(struct ib_pd *pd, mthca_qp_access_t acc);\r
-\r
-struct ib_mr *mthca_reg_phys_mr(struct ib_pd       *pd,\r
-                                      struct ib_phys_buf *buffer_list,\r
-                                      int                 num_phys_buf,\r
-                                      mthca_qp_access_t                 acc,\r
-                                      u64                *iova_start);\r
-\r
-struct ib_mr *mthca_reg_virt_mr(struct ib_pd *pd, \r
-        void* __ptr64  vaddr, uint64_t length, uint64_t hca_va,\r
-        mthca_qp_access_t acc, boolean_t um_call);\r
-\r
-int mthca_dereg_mr(struct ib_mr *mr);\r
-\r
-struct ib_fmr *mthca_alloc_fmr(struct ib_pd *pd, mthca_qp_access_t acc,\r
-                                     struct ib_fmr_attr *fmr_attr);\r
-\r
-int mthca_dealloc_fmr(struct ib_fmr *fmr);\r
-\r
-int mthca_unmap_fmr(struct list_head *fmr_list);\r
-\r
-int mthca_poll_cq_list(\r
-       IN              struct ib_cq *ibcq, \r
-       IN      OUT                     ib_wc_t** const                         pp_free_wclist,\r
-               OUT                     ib_wc_t** const                         pp_done_wclist );\r
-\r
-\r
-#endif /* MTHCA_PROVIDER_H */\r
+/*
+ * Copyright (c) 2004 Topspin Communications.  All rights reserved.
+ * Copyright (c) 2005 Cisco Systems.  All rights reserved.
+ * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * $Id$
+ */
+
+#ifndef MTHCA_PROVIDER_H
+#define MTHCA_PROVIDER_H
+
+#include <ib_verbs.h>
+#include <ib_pack.h>
+#include <iba/ib_ci.h>
+
+typedef uint32_t mthca_mpt_access_t;
+#define MTHCA_MPT_FLAG_ATOMIC        (1 << 14)
+#define MTHCA_MPT_FLAG_REMOTE_WRITE  (1 << 13)
+#define MTHCA_MPT_FLAG_REMOTE_READ   (1 << 12)
+#define MTHCA_MPT_FLAG_LOCAL_WRITE   (1 << 11)
+#define MTHCA_MPT_FLAG_LOCAL_READ    (1 << 10)
+
+union mthca_buf {
+       struct scatterlist direct;
+       struct scatterlist *page_list;
+};
+
+struct mthca_uar {
+       PFN_NUMBER pfn;
+       int           index;
+};
+
+struct mthca_user_db_table;
+
+struct mthca_ucontext {
+       struct ib_ucontext          ibucontext;
+       struct mthca_uar            uar;
+       struct mthca_user_db_table *db_tab;
+       // for user UAR 
+       PMDL    mdl;
+       PVOID   kva;
+       SIZE_T uar_size;        
+};
+
+struct mthca_mtt;
+
+struct mthca_mr {
+       //NB: the start of this structure is to be equal to mlnx_mro_t !
+       //NB: the structure was not inserted here for not to mix driver and provider structures
+       struct ib_mr      ibmr;
+       struct mthca_mtt *mtt;
+       int                     iobuf_used;
+       mt_iobuf_t      iobuf;
+       void *secure_handle;
+};
+
+struct mthca_fmr {
+       struct ib_fmr      ibmr;
+       struct ib_fmr_attr attr;
+       struct mthca_mtt  *mtt;
+       int                maps;
+       union {
+               struct {
+                       struct mthca_mpt_entry __iomem *mpt;
+                       u64 __iomem *mtts;
+               } tavor;
+               struct {
+                       struct mthca_mpt_entry *mpt;
+                       __be64 *mtts;
+               } arbel;
+       } mem;
+};
+
+struct mthca_pd {
+       struct ib_pd    ibpd;
+       u32             pd_num;
+       atomic_t        sqp_count;
+       struct mthca_mr ntmr;
+       int             privileged;
+};
+
+struct mthca_eq {
+       struct mthca_dev      *dev;
+       int                    eqn;
+       int                    eq_num;
+       u32                    eqn_mask;
+       u32                    cons_index;
+       u16                    msi_x_vector;
+       u16                    msi_x_entry;
+       int                    have_irq;
+       int                    nent;
+       struct scatterlist *page_list;
+       struct mthca_mr        mr;
+       KDPC                            dpc;                    /* DPC for MSI-X interrupts */
+       spinlock_t  lock;                       /* spinlock for simult DPCs */
+};
+
+struct mthca_av;
+
+enum mthca_ah_type {
+       MTHCA_AH_ON_HCA,
+       MTHCA_AH_PCI_POOL,
+       MTHCA_AH_KMALLOC
+};
+
+struct mthca_ah {
+       struct ib_ah       ibah;
+       enum mthca_ah_type type;
+       u32                key;
+       struct mthca_av   *av;
+       dma_addr_t         avdma;
+};
+
+/*
+ * Quick description of our CQ/QP locking scheme:
+ *
+ * We have one global lock that protects dev->cq/qp_table.  Each
+ * struct mthca_cq/qp also has its own lock.  An individual qp lock
+ * may be taken inside of an individual cq lock.  Both cqs attached to
+ * a qp may be locked, with the send cq locked first.  No other
+ * nesting should be done.
+ *
+ * Each struct mthca_cq/qp also has an atomic_t ref count.  The
+ * pointer from the cq/qp_table to the struct counts as one reference.
+ * This reference also is good for access through the consumer API, so
+ * modifying the CQ/QP etc doesn't need to take another reference.
+ * Access because of a completion being polled does need a reference.
+ *
+ * Finally, each struct mthca_cq/qp has a wait_queue_head_t for the
+ * destroy function to sleep on.
+ *
+ * This means that access from the consumer API requires nothing but
+ * taking the struct's lock.
+ *
+ * Access because of a completion event should go as follows:
+ * - lock cq/qp_table and look up struct
+ * - increment ref count in struct
+ * - drop cq/qp_table lock
+ * - lock struct, do your thing, and unlock struct
+ * - decrement ref count; if zero, wake up waiters
+ *
+ * To destroy a CQ/QP, we can do the following:
+ * - lock cq/qp_table, remove pointer, unlock cq/qp_table lock
+ * - decrement ref count
+ * - wait_event until ref count is zero
+ *
+ * It is the consumer's responsibilty to make sure that no QP
+ * operations (WQE posting or state modification) are pending when the
+ * QP is destroyed.  Also, the consumer must make sure that calls to
+ * qp_modify are serialized.
+ *
+ * Possible optimizations (wait for profile data to see if/where we
+ * have locks bouncing between CPUs):
+ * - split cq/qp table lock into n separate (cache-aligned) locks,
+ *   indexed (say) by the page in the table
+ * - split QP struct lock into three (one for common info, one for the
+ *   send queue and one for the receive queue)
+ */
+//TODO: check correctness of the above requirement: "It is the consumer's responsibilty to make sure that no QP
+// operations (WQE posting or state modification) are pending when the QP is destroyed"
+
+struct mthca_cq {
+       struct ib_cq           ibcq;
+       void                                            *cq_context;    // leo: for IBAL shim
+       spinlock_t             lock;
+       atomic_t               refcount;
+       int                    cqn;
+       u32                    cons_index;
+       int                    is_direct;
+       int                    is_kernel;
+
+       /* Next fields are Arbel only */
+       int                    set_ci_db_index;
+       __be32                *set_ci_db;
+       int                    arm_db_index;
+       __be32                *arm_db;
+       int                    arm_sn;
+       int                    u_arm_db_index;
+       int                *p_u_arm_sn;
+
+       union mthca_buf        queue;
+       struct mthca_mr        mr;
+       wait_queue_head_t      wait;
+       KMUTEX                      mutex;
+};
+
+struct mthca_srq {
+       struct ib_srq           ibsrq;
+       spinlock_t              lock;
+       atomic_t                refcount;
+       int                     srqn;
+       int                     max;
+       int                     max_gs;
+       int                     wqe_shift;
+       int                     first_free;
+       int                     last_free;
+       u16                     counter;  /* Arbel only */
+       int                     db_index; /* Arbel only */
+       __be32                 *db;       /* Arbel only */
+       void                   *last;
+
+       int                     is_direct;
+       u64                    *wrid;
+       union mthca_buf         queue;
+       struct mthca_mr         mr;
+
+       wait_queue_head_t       wait;
+       KMUTEX                  mutex;
+       void                            *srq_context;   
+};
+
+struct mthca_wq {
+       spinlock_t lock;
+       int        max;
+       unsigned   next_ind;
+       unsigned   last_comp;
+       unsigned   head;
+       unsigned   tail;
+       void      *last;
+       int        max_gs;
+       int        wqe_shift;
+
+       int        db_index;    /* Arbel only */
+       __be32    *db;
+};
+
+struct mthca_qp {
+       struct ib_qp           ibqp;
+       void                                            *qp_context;    // leo: for IBAL shim
+       //TODO: added just because absense of ibv_query_qp
+       // thereafter it may be worth to be replaced by struct ib_qp_attr qp_attr;
+       struct ib_qp_init_attr qp_init_attr;    // leo: for query_qp
+       atomic_t               refcount;
+       u32                    qpn;
+       int                    is_direct;
+       u8                     transport;
+       u8                     state;
+       u8                     atomic_rd_en;
+       u8                     resp_depth;
+
+       struct mthca_mr        mr;
+
+       struct mthca_wq        rq;
+       struct mthca_wq        sq;
+       enum ib_sig_type       sq_policy;
+       int                    send_wqe_offset;
+       int                    max_inline_data;
+
+       u64                   *wrid;
+       union mthca_buf        queue;
+
+       wait_queue_head_t      wait;
+       KMUTEX                      mutex;
+};
+
+struct mthca_sqp {
+       struct mthca_qp qp;
+       int             port;
+       int             pkey_index;
+       u32             qkey;
+       u32             send_psn;
+       struct ib_ud_header ud_header;
+       struct scatterlist sg;
+};
+
+static inline struct mthca_ucontext *to_mucontext(struct ib_ucontext *ibucontext)
+{
+       return container_of(ibucontext, struct mthca_ucontext, ibucontext);
+}
+
+static inline struct mthca_fmr *to_mfmr(struct ib_fmr *ibmr)
+{
+       return container_of(ibmr, struct mthca_fmr, ibmr);
+}
+
+static inline struct mthca_mr *to_mmr(struct ib_mr *ibmr)
+{
+       return container_of(ibmr, struct mthca_mr, ibmr);
+}
+
+static inline struct mthca_pd *to_mpd(struct ib_pd *ibpd)
+{
+       return container_of(ibpd, struct mthca_pd, ibpd);
+}
+
+static inline struct mthca_ah *to_mah(struct ib_ah *ibah)
+{
+       return container_of(ibah, struct mthca_ah, ibah);
+}
+
+static inline struct mthca_cq *to_mcq(struct ib_cq *ibcq)
+{
+       return container_of(ibcq, struct mthca_cq, ibcq);
+}
+
+static inline struct mthca_srq *to_msrq(struct ib_srq *ibsrq)
+{
+       return container_of(ibsrq, struct mthca_srq, ibsrq);
+}
+
+static inline struct mthca_qp *to_mqp(struct ib_qp *ibqp)
+{
+       return container_of(ibqp, struct mthca_qp, ibqp);
+}
+
+static inline struct mthca_sqp *to_msqp(struct mthca_qp *qp)
+{
+       return container_of(qp, struct mthca_sqp, qp);
+}
+
+static inline uint8_t start_port(struct ib_device *device)
+{
+       return device->node_type == IB_NODE_SWITCH ? 0 : 1;
+}
+
+static inline uint8_t end_port(struct ib_device *device)
+{
+       return device->node_type == IB_NODE_SWITCH ? 0 : device->phys_port_cnt;
+}
+
+static inline int ib_copy_from_umv_buf(void *dest, ci_umv_buf_t* const p_umv_buf, size_t len)
+{
+       RtlCopyMemory(dest, p_umv_buf->p_inout_buf,  len);
+       return 0;
+}
+
+static inline int ib_copy_to_umv_buf(ci_umv_buf_t* const p_umv_buf, void *src, size_t len)
+{
+       if (p_umv_buf->output_size < len) {
+               p_umv_buf->status = IB_INSUFFICIENT_MEMORY;
+               p_umv_buf->output_size = 0;
+               return -EFAULT;
+       }
+       RtlCopyMemory(p_umv_buf->p_inout_buf,  src, len);
+       p_umv_buf->status = IB_SUCCESS;
+       p_umv_buf->output_size = (uint32_t)len;
+       return 0;
+}
+
+
+
+// API
+int mthca_query_device(struct ib_device *ibdev,
+                             struct ib_device_attr *props);
+
+int mthca_query_port(struct ib_device *ibdev,
+                           u8 port, struct ib_port_attr *props);
+
+int mthca_modify_port(struct ib_device *ibdev,
+                            u8 port, int port_modify_mask,
+                            struct ib_port_modify *props);
+
+struct ib_pd *mthca_alloc_pd(struct ib_device *ibdev,
+                                   struct ib_ucontext *context,
+                                   ci_umv_buf_t* const                 p_umv_buf);
+
+int mthca_dealloc_pd(struct ib_pd *pd);
+
+int mthca_dereg_mr(struct ib_mr *mr);
+
+int mthca_query_srq(struct ib_srq *ibsrq, ib_srq_attr_t *srq_attr);
+
+struct ib_ucontext *mthca_alloc_ucontext(struct ib_device *ibdev,
+                                               ci_umv_buf_t* const                     p_umv_buf);
+
+int mthca_dealloc_ucontext(struct ib_ucontext *context);
+
+struct ib_mr *mthca_get_dma_mr(struct ib_pd *pd, mthca_qp_access_t acc);
+
+int mthca_poll_cq_list(
+       IN              struct ib_cq *ibcq, 
+       IN      OUT                     ib_wc_t** const                         pp_free_wclist,
+               OUT                     ib_wc_t** const                         pp_done_wclist );
+
+
+#endif /* MTHCA_PROVIDER_H */
index 276c587..f263049 100644 (file)
@@ -807,11 +807,18 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
 
        err = mthca_MODIFY_QP(dev, state_table[cur_state][new_state].trans,
                              qp->qpn, 0, mailbox, sqd_event, &status);
-       if (err)
+       if (err) {
+               HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_QP,("mthca_MODIFY_QP returned error (qp-num = 0x%x) returned status %02x "
+                       "cur_state  = %d  new_state = %d attr_mask = %d req_param = %d opt_param = %d\n",
+                       ibqp->qp_num, status, cur_state, new_state, 
+                       attr_mask, req_param, opt_param));        
                goto out_mailbox;
+       }
        if (status) {
-               HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_QP,("modify QP %d returned status %02x.\n",
-                       state_table[cur_state][new_state].trans, status));
+               HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_QP,("mthca_MODIFY_QP bad status(qp-num = 0x%x) returned status %02x "
+                       "cur_state  = %d  new_state = %d attr_mask = %d req_param = %d opt_param = %d\n",
+                       ibqp->qp_num, status, cur_state, new_state, 
+                       attr_mask, req_param, opt_param));
                err = -EINVAL;
                goto out_mailbox;
        }
@@ -1114,7 +1121,7 @@ static int mthca_alloc_qp_common(struct mthca_dev *dev,
        atomic_set(&qp->refcount, 1);
        init_waitqueue_head(&qp->wait);
        KeInitializeMutex(&qp->mutex, 0);
-       
+
        qp->state        = IBQPS_RESET;
        qp->atomic_rd_en = 0;
        qp->resp_depth   = 0;
@@ -1371,8 +1378,9 @@ void mthca_free_qp(struct mthca_dev *dev,
        atomic_dec(&qp->refcount);
        wait_event(&qp->wait, !atomic_read(&qp->refcount));
 
-       if (qp->state != IBQPS_RESET)
+       if (qp->state != IBQPS_RESET) {
                mthca_MODIFY_QP(dev, MTHCA_TRANS_ANY2RST, qp->qpn, 0, NULL, 0, &status);
+       }
 
        /*
         * If this is a userspace QP, the buffers, MR, CQs and so on
index 46f1bd0..2a8dfdc 100644 (file)
@@ -50,6 +50,7 @@
 #pragma alloc_text (PAGE, mthca_cleanup_srq_table)
 #endif
 
+
 enum {
        MTHCA_MAX_DIRECT_SRQ_SIZE = 4 * PAGE_SIZE
 };
@@ -59,7 +60,8 @@ struct mthca_tavor_srq_context {
        __be32 state_pd;
        __be32 lkey;
        __be32 uar;
-       __be32 wqe_cnt;
+       __be16 limit_watermark;
+       __be16 wqe_cnt;
        u32    reserved[2];
 };
 
@@ -129,7 +131,7 @@ static void mthca_arbel_init_srq_context(struct mthca_dev *dev,
 
        RtlZeroMemory(context, sizeof *context);
 
-       logsize = long_log2(srq->max) + srq->wqe_shift;
+       logsize = long_log2(srq->max);
        context->state_logsize_srqn = cl_hton32(logsize << 24 | srq->srqn);
        context->lkey = cl_hton32(srq->mr.ibmr.lkey);
        context->db_index = cl_hton32(srq->db_index);
@@ -194,7 +196,7 @@ static int mthca_alloc_srq_buf(struct mthca_dev *dev, struct mthca_pd *pd,
 }
 
 int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd,
-                   struct ib_srq_attr *attr, struct mthca_srq *srq)
+       ib_srq_attr_t *attr, struct mthca_srq *srq)
 {
        struct mthca_mailbox *mailbox;
        u8 status;
@@ -204,7 +206,7 @@ int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd,
 
        /* Sanity check SRQ size before proceeding */
        if ((int)attr->max_wr  > dev->limits.max_srq_wqes ||
-               (int)attr->max_sge > dev->limits.max_sg)
+               (int)attr->max_sge > dev->limits.max_srq_sge)
                return -EINVAL;
 
        srq->max      = attr->max_wr;
@@ -217,6 +219,10 @@ int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd,
        ds = max(64UL,
                 roundup_pow_of_two(sizeof (struct mthca_next_seg) +
                                    srq->max_gs * sizeof (struct mthca_data_seg)));
+
+       if (!mthca_is_memfree(dev) && (ds > dev->limits.max_desc_sz))
+               return -EINVAL;
+
        srq->wqe_shift = long_log2(ds);
 
        srq->srqn = mthca_alloc(&dev->srq_table.alloc);
@@ -261,11 +267,11 @@ int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd,
        err = mthca_SW2HW_SRQ(dev, mailbox, srq->srqn, &status);
 
        if (err) {
-               HCA_PRINT(TRACE_LEVEL_WARNING  ,HCA_DBG_LOW  ,("SW2HW_SRQ failed (%d)\n", err));
+               HCA_PRINT(TRACE_LEVEL_WARNING  ,HCA_DBG_SRQ  ,( "SW2HW_SRQ failed (%d)\n", err));
                goto err_out_free_buf;
        }
        if (status) {
-               HCA_PRINT(TRACE_LEVEL_WARNING,HCA_DBG_LOW,("SW2HW_SRQ returned status 0x%02x\n",
+               HCA_PRINT(TRACE_LEVEL_WARNING  ,HCA_DBG_SRQ  ,( "SW2HW_SRQ returned status 0x%02x\n",
                           status));
                err = -EINVAL;
                goto err_out_free_buf;
@@ -285,17 +291,17 @@ int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd,
        srq->first_free = 0;
        srq->last_free  = srq->max - 1;
 
-       attr->max_wr    = srq->max;
+       attr->max_wr    = (mthca_is_memfree(dev)) ? srq->max - 1 : srq->max;
        attr->max_sge   = srq->max_gs;
 
        return 0;
 
 err_out_free_srq:
        err = mthca_HW2SW_SRQ(dev, mailbox, srq->srqn, &status);
-       if (err){
-               HCA_PRINT(TRACE_LEVEL_WARNING  ,HCA_DBG_LOW  ,("HW2SW_SRQ failed (%d)\n", err));
-       }else if (status){
-               HCA_PRINT(TRACE_LEVEL_WARNING  ,HCA_DBG_LOW  ,("HW2SW_SRQ returned status 0x%02x\n", status));
+       if (err) {
+               HCA_PRINT(TRACE_LEVEL_WARNING  ,HCA_DBG_SRQ  ,( "HW2SW_SRQ failed (%d)\n", err));
+       } else if (status) {
+               HCA_PRINT(TRACE_LEVEL_WARNING  ,HCA_DBG_SRQ  ,( "HW2SW_SRQ returned status 0x%02x\n", status));
        }
 
 err_out_free_buf:
@@ -327,26 +333,26 @@ void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq)
 
        mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
        if (IS_ERR(mailbox)) {
-               HCA_PRINT(TRACE_LEVEL_WARNING  ,HCA_DBG_LOW  ,("No memory for mailbox to free SRQ.\n"));
+               HCA_PRINT(TRACE_LEVEL_WARNING  ,HCA_DBG_SRQ  ,( "No memory for mailbox to free SRQ.\n"));
                return;
        }
 
        err = mthca_HW2SW_SRQ(dev, mailbox, srq->srqn, &status);
-       if (err){
-               HCA_PRINT(TRACE_LEVEL_WARNING  ,HCA_DBG_LOW  ,("HW2SW_SRQ failed (%d)\n", err));
-       }else if (status){
-               HCA_PRINT(TRACE_LEVEL_WARNING  ,HCA_DBG_LOW  ,("HW2SW_SRQ returned status 0x%02x\n", status));
+       if (err) {
+               HCA_PRINT(TRACE_LEVEL_WARNING  ,HCA_DBG_SRQ  ,( "HW2SW_SRQ failed (%d)\n", err));
+       } else if (status) {
+               HCA_PRINT(TRACE_LEVEL_WARNING  ,HCA_DBG_SRQ  ,( "HW2SW_SRQ returned status 0x%02x\n", status));
        }
 
        spin_lock_irq(&dev->srq_table.lock, &lh);
        mthca_array_clear(&dev->srq_table.srq,
                          srq->srqn & (dev->limits.num_srqs - 1));
+       atomic_dec(&srq->refcount);
        spin_unlock_irq(&lh);
 
-       atomic_dec(&srq->refcount);
        wait_event(&srq->wait, !atomic_read(&srq->refcount));
 
-       if (!srq->ibsrq.uobject) {
+       if (!srq->ibsrq.ucontext) {
                mthca_free_srq_buf(dev, srq);
                if (mthca_is_memfree(dev))
                        mthca_free_db(dev, MTHCA_DB_TYPE_SRQ, srq->db_index);
@@ -357,9 +363,9 @@ void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq)
        mthca_free_mailbox(dev, mailbox);
 }
 
-int mthca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
-                    enum ib_srq_attr_mask attr_mask)
-{      
+int mthca_modify_srq(struct ib_srq *ibsrq, ib_srq_attr_t *attr,
+               ib_srq_attr_mask_t attr_mask)
+{
        struct mthca_dev *dev = to_mdev(ibsrq->device);
        struct mthca_srq *srq = to_msrq(ibsrq);
        int ret;
@@ -367,11 +373,12 @@ int mthca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
 
        /* We don't support resizing SRQs (yet?) */
        if (attr_mask & IB_SRQ_MAX_WR)
-               return -EINVAL;
+               return -ENOSYS;
 
        if (attr_mask & IB_SRQ_LIMIT) {
-               if (attr->srq_limit > (u32)srq->max)
-                       return -EINVAL;
+               u32 max_wr = mthca_is_memfree(dev) ? srq->max - 1 : srq->max;
+               if (attr->srq_limit > max_wr)
+                       return -ERANGE;
 
                down(&srq->mutex);
                ret = mthca_ARM_SRQ(dev, srq->srqn, attr->srq_limit, &status);
@@ -386,8 +393,43 @@ int mthca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
        return 0;
 }
 
+int mthca_query_srq(struct ib_srq *ibsrq, ib_srq_attr_t *srq_attr)
+{
+       struct mthca_dev *dev = to_mdev(ibsrq->device);
+       struct mthca_srq *srq = to_msrq(ibsrq);
+       struct mthca_mailbox *mailbox;
+       struct mthca_arbel_srq_context *arbel_ctx;
+       struct mthca_tavor_srq_context *tavor_ctx;
+       u8 status;
+       int err;
+
+       mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
+       if (IS_ERR(mailbox))
+               return PTR_ERR(mailbox);
+
+       err = mthca_QUERY_SRQ(dev, srq->srqn, mailbox, &status);
+       if (err)
+               goto out;
+
+       if (mthca_is_memfree(dev)) {
+               arbel_ctx = mailbox->buf;
+               srq_attr->srq_limit = cl_ntoh16(arbel_ctx->limit_watermark);
+       } else {
+               tavor_ctx = mailbox->buf;
+               srq_attr->srq_limit = cl_ntoh16(tavor_ctx->limit_watermark);
+       }
+
+       srq_attr->max_wr  = (mthca_is_memfree(dev)) ? srq->max - 1 : srq->max;
+       srq_attr->max_sge = srq->max_gs;
+
+out:
+       mthca_free_mailbox(dev, mailbox);
+
+       return err;
+}
+
 void mthca_srq_event(struct mthca_dev *dev, u32 srqn,
-                    enum ib_event_type event_type)
+                    enum ib_event_type event_type, u8 vendor_code)
 {
        struct mthca_srq *srq;
        struct ib_event event;
@@ -400,7 +442,7 @@ void mthca_srq_event(struct mthca_dev *dev, u32 srqn,
        spin_unlock(&lh);
 
        if (!srq) {
-               HCA_PRINT(TRACE_LEVEL_WARNING  ,HCA_DBG_LOW  ,("Async event for bogus SRQ %08x\n", srqn));
+               HCA_PRINT(TRACE_LEVEL_WARNING  ,HCA_DBG_SRQ  ,( "Async event for bogus SRQ %08x\n", srqn));
                return;
        }
 
@@ -409,8 +451,13 @@ void mthca_srq_event(struct mthca_dev *dev, u32 srqn,
 
        event.device      = &dev->ib_dev;
        event.event       = event_type;
-       event.element.srq  = &srq->ibsrq;
-       srq->ibsrq.event_handler(&event, srq->ibsrq.srq_context);
+       event.element.srq = &srq->ibsrq;
+       event.vendor_specific = vendor_code;
+       HCA_PRINT(TRACE_LEVEL_WARNING,HCA_DBG_SRQ,
+               ("SRQ %06x Async event  event_type 0x%x vendor_code 0x%x\n",
+               srqn,event_type,vendor_code));
+       if (srq->ibsrq.event_handler)
+               srq->ibsrq.event_handler(&event, srq->ibsrq.srq_context);
 
 out:
        if (atomic_dec_and_test(&srq->refcount))
@@ -440,13 +487,12 @@ void mthca_free_srq_wqe(struct mthca_srq *srq, u32 wqe_addr)
        spin_unlock(&lh);
 }
 
-//TODO: is this code correct at all ?
 int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct _ib_recv_wr *wr,
                              struct _ib_recv_wr **bad_wr)
 {
        struct mthca_dev *dev = to_mdev(ibsrq->device);
        struct mthca_srq *srq = to_msrq(ibsrq);
-       __be32 doorbell[2];     
+       __be32 doorbell[2];
        int err = 0;
        int first_ind;
        int ind;
@@ -455,52 +501,34 @@ int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct _ib_recv_wr *wr,
        int i;
        u8 *wqe;
        u8 *prev_wqe;
+       CPU_2_BE64_PREP;
        SPIN_LOCK_PREP(lh);
 
        spin_lock_irqsave(&srq->lock, &lh);
 
        first_ind = srq->first_free;
 
-       for (nreq = 0; wr; ++nreq, wr = wr->p_next) {
-               if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) {
-                       nreq = 0;
-
-                       doorbell[0] = cl_hton32(first_ind << srq->wqe_shift);
-                       doorbell[1] = cl_hton32(srq->srqn << 8);
-
-                       /*
-                        * Make sure that descriptors are written
-                        * before doorbell is rung.
-                        */
-                       wmb();
-
-                       mthca_write64(doorbell,
-                                     dev->kar + MTHCA_RECEIVE_DOORBELL,
-                                     MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
-
-                       first_ind = srq->first_free;
-               }
-
+       for (nreq = 0; wr; wr = wr->p_next) {
                ind = srq->first_free;
 
                if (ind < 0) {
-                       HCA_PRINT(TRACE_LEVEL_ERROR  ,HCA_DBG_QP ,("SRQ %06x full\n", srq->srqn));
+                       HCA_PRINT(TRACE_LEVEL_ERROR  ,HCA_DBG_SRQ  ,( "SRQ %06x full\n", srq->srqn));
                        err = -ENOMEM;
                        *bad_wr = wr;
-                       goto out;
+                       break;
                }
 
-               wqe       = get_wqe(srq, ind);
-               next_ind  = *wqe_to_link(wqe);
+               wqe = get_wqe(srq, ind);
+               next_ind = *wqe_to_link(wqe);
 
                if (next_ind < 0) {
-                       HCA_PRINT(TRACE_LEVEL_ERROR  ,HCA_DBG_QP  ,("SRQ %06x full\n", srq->srqn));
+                       HCA_PRINT(TRACE_LEVEL_ERROR  ,HCA_DBG_SRQ  ,( "SRQ %06x full\n", srq->srqn));
                        err = -ENOMEM;
                        *bad_wr = wr;
                        break;
                }
 
-               prev_wqe  = srq->last;
+               prev_wqe = srq->last;
                srq->last = wqe;
 
                ((struct mthca_next_seg *) wqe)->nda_op = 0;
@@ -513,7 +541,7 @@ int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct _ib_recv_wr *wr,
                        err = -EINVAL;
                        *bad_wr = wr;
                        srq->last = prev_wqe;
-                       goto out;
+                       break;
                }
 
                for (i = 0; i < (int)wr->num_ds; ++i) {
@@ -522,7 +550,7 @@ int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct _ib_recv_wr *wr,
                        ((struct mthca_data_seg *) wqe)->lkey =
                                cl_hton32(wr->ds_array[i].lkey);
                        ((struct mthca_data_seg *) wqe)->addr =
-                               cl_hton64(wr->ds_array[i].vaddr);
+                               CPU_2_BE64(wr->ds_array[i].vaddr);
                        wqe += sizeof (struct mthca_data_seg);
                }
 
@@ -540,9 +568,28 @@ int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct _ib_recv_wr *wr,
 
                srq->wrid[ind]  = wr->wr_id;
                srq->first_free = next_ind;
+
+               ++nreq;
+               if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) {
+                       nreq = 0;
+
+                       doorbell[0] = cl_hton32(first_ind << srq->wqe_shift);
+                       doorbell[1] = cl_hton32(srq->srqn << 8);
+
+                       /*
+                        * Make sure that descriptors are written
+                        * before doorbell is rung.
+                        */
+                       wmb();
+
+                       mthca_write64(doorbell,
+                                     dev->kar + MTHCA_RECEIVE_DOORBELL,
+                                     MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
+
+                       first_ind = srq->first_free;
+               }
        }
 
-out:
        if (likely(nreq)) {
                doorbell[0] = cl_hton32(first_ind << srq->wqe_shift);
                doorbell[1] = cl_hton32((srq->srqn << 8) | nreq);
@@ -562,7 +609,6 @@ out:
        return err;
 }
 
-//TODO: is this code correct at all ?
 int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, struct _ib_recv_wr *wr,
                              struct _ib_recv_wr **bad_wr)
 {
@@ -573,6 +619,7 @@ int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, struct _ib_recv_wr *wr,
        int nreq;
        int i;
        u8 *wqe;
+       CPU_2_BE64_PREP;
        SPIN_LOCK_PREP(lh);
 
        spin_lock_irqsave(&srq->lock, &lh);
@@ -581,23 +628,23 @@ int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, struct _ib_recv_wr *wr,
                ind = srq->first_free;
 
                if (ind < 0) {
-                       HCA_PRINT(TRACE_LEVEL_ERROR  ,HCA_DBG_QP ,("SRQ %06x full\n", srq->srqn));
+                       HCA_PRINT(TRACE_LEVEL_ERROR  ,HCA_DBG_SRQ  ,( "SRQ %06x full\n", srq->srqn));
                        err = -ENOMEM;
                        *bad_wr = wr;
-                       goto out;
+                       break;
                }
 
                wqe       = get_wqe(srq, ind);
                next_ind  = *wqe_to_link(wqe);
 
                if (next_ind < 0) {
-                       HCA_PRINT(TRACE_LEVEL_ERROR  ,HCA_DBG_LOW  ,("SRQ %06x full\n", srq->srqn));
+                       HCA_PRINT(TRACE_LEVEL_ERROR  ,HCA_DBG_SRQ  ,( "SRQ %06x full\n", srq->srqn));
                        err = -ENOMEM;
                        *bad_wr = wr;
                        break;
                }
 
-               ((struct mthca_next_seg *) wqe)->nda_op = 
+               ((struct mthca_next_seg *) wqe)->nda_op =
                        cl_hton32((next_ind << srq->wqe_shift) | 1);
                ((struct mthca_next_seg *) wqe)->ee_nds = 0;
                /* flags field will always remain 0 */
@@ -607,7 +654,7 @@ int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, struct _ib_recv_wr *wr,
                if (unlikely((int)wr->num_ds > srq->max_gs)) {
                        err = -EINVAL;
                        *bad_wr = wr;
-                       goto out;
+                       break;
                }
 
                for (i = 0; i < (int)wr->num_ds; ++i) {
@@ -616,7 +663,7 @@ int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, struct _ib_recv_wr *wr,
                        ((struct mthca_data_seg *) wqe)->lkey =
                                cl_hton32(wr->ds_array[i].lkey);
                        ((struct mthca_data_seg *) wqe)->addr =
-                               cl_hton64(wr->ds_array[i].vaddr);
+                               CPU_2_BE64(wr->ds_array[i].vaddr);
                        wqe += sizeof (struct mthca_data_seg);
                }
 
@@ -630,9 +677,8 @@ int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, struct _ib_recv_wr *wr,
                srq->first_free = next_ind;
        }
 
-out:   
        if (likely(nreq)) {
-               srq->counter = srq->counter + (u16)nreq;
+               srq->counter = (u16)(srq->counter  + nreq);
 
                /*
                 * Make sure that descriptors are written before
@@ -646,6 +692,31 @@ out:
        return err;
 }
 
+int mthca_max_srq_sge(struct mthca_dev *dev)
+{
+       if (mthca_is_memfree(dev))
+               return dev->limits.max_sg;
+
+       /*
+        * SRQ allocations are based on powers of 2 for Tavor,
+        * (although they only need to be multiples of 16 bytes).
+        *
+        * Therefore, we need to base the max number of sg entries on
+        * the largest power of 2 descriptor size that is <= to the
+        * actual max WQE descriptor size, rather than return the
+        * max_sg value given by the firmware (which is based on WQE
+        * sizes as multiples of 16, not powers of 2).
+        *
+        * If SRQ implementation is changed for Tavor to be based on
+        * multiples of 16, the calculation below can be deleted and
+        * the FW max_sg value returned.
+        */
+       return min( (uint32_t)dev->limits.max_sg,
+                    ((1 << (fls(dev->limits.max_desc_sz) - 1)) -
+                     sizeof (struct mthca_next_seg)) /
+                    sizeof (struct mthca_data_seg));
+}
+
 int mthca_init_srq_table(struct mthca_dev *dev)
 {
        int err;
@@ -678,4 +749,3 @@ void mthca_cleanup_srq_table(struct mthca_dev *dev)
        mthca_array_cleanup(&dev->srq_table.srq, dev->limits.num_srqs);
        mthca_alloc_cleanup(&dev->srq_table.alloc);
 }
-
diff --git a/hw/mthca/kernel/mthca_user.h b/hw/mthca/kernel/mthca_user.h
deleted file mode 100644 (file)
index e603f47..0000000
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * Copyright (c) 2005 Topspin Communications.  All rights reserved.
- * Copyright (c) 2005 Cisco Systems.  All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#ifndef MTHCA_USER_H
-#define MTHCA_USER_H
-
-/*
- * Make sure that all structs defined in this file remain laid out so
- * that they pack the same way on 32-bit and 64-bit architectures (to
- * avoid incompatibility between 32-bit userspace and 64-bit kernels).
- * In particular do not use pointer types -- pass pointers in u64
- * instead.
- */
-
-struct mthca_alloc_ucontext_resp {
-       uint64_t uar_addr;
-       u64 pd_handle;
-       u32 pdn;
-       u32 qp_tab_size;
-       u32 uarc_size;
-       u32 vend_id;
-       u16 dev_id;
-};
-
-struct mthca_create_srq {
-       u32 lkey;
-       u32 db_index;
-       u64 db_page;
-};
-
-struct mthca_create_srq_resp {
-       u32 srqn;
-       u32 reserved;
-};
-
-#endif /* MTHCA_USER_H */
index 9473db6..c746737 100644 (file)
@@ -104,6 +104,24 @@ struct ibv_create_cq_resp {
        uint32_t cqn;
 };
 
+struct ibv_create_srq {
+       uint64_t user_handle;
+       struct ibv_reg_mr mr;
+       uint32_t lkey;  /* used only in kernel */
+       uint32_t db_index;
+       uint64_t db_page;
+};
+
+struct ibv_create_srq_resp {
+       struct ibv_reg_mr_resp mr;
+       uint64_t srq_handle;
+       uint64_t user_handle;
+       uint32_t max_wr;
+       uint32_t max_sge;
+       uint32_t srqn;
+       uint32_t reserved;
+};
+
 struct ibv_create_qp {
        uint64_t sq_db_page;
        uint64_t rq_db_page;
index 1ab5197..92f6c65 100644 (file)
@@ -31,7 +31,8 @@ SOURCES= \
        mlnx_ual_mrw.c \\r
        mlnx_ual_osbypass.c \\r
        mlnx_ual_pd.c \\r
-       mlnx_ual_qp.c   \\r
+       mlnx_ual_qp.c    \\r
+       mlnx_ual_srq.c \\r
                                \\r
        mlnx_uvp_debug.c \\r
        mlnx_uvp.c \\r
index dc90991..8bc46a5 100644 (file)
@@ -263,6 +263,10 @@ mlnx_post_create_av (
                }\r
                *ph_uvp_av = (ib_av_handle_t)ah;\r
        }\r
+       else {\r
+               mthca_free_av(ah);\r
+               cl_free(ah);\r
+       }\r
        goto end;\r
        \r
 end:   \r
index 3dedca7..03d9057 100644 (file)
@@ -156,6 +156,11 @@ uvp_get_interface (
      */\r
     mlnx_get_pd_interface (p_uvp);\r
 \r
+    /*\r
+     * SRQ Management Verbs\r
+     */\r
+    mlnx_get_srq_interface (p_uvp);\r
+\r
     /*\r
      * QP Management Verbs\r
      */\r
index b638266..bbc5fdc 100644 (file)
@@ -271,6 +271,59 @@ mlnx_post_destroy_cq (
     IN         const ib_cq_handle_t            h_uvp_cq,\r
     IN         ib_api_status_t                 ioctl_status);\r
 \r
+/************* SRQ Management *************************/\r
+void  \r
+mlnx_get_srq_interface (\r
+    IN OUT     uvp_interface_t                         *p_uvp );\r
+\r
+ib_api_status_t  \r
+mlnx_pre_create_srq (\r
+    IN         const   ib_pd_handle_t          h_uvp_pd,// Fix me: if needed\r
+    IN         const   ib_srq_attr_t           *p_srq_attr,\r
+    IN OUT     ci_umv_buf_t                            *p_umv_buf);\r
+\r
+void  \r
+mlnx_post_create_srq (\r
+       IN              const   ib_pd_handle_t                          h_uvp_pd,\r
+       IN                              ib_api_status_t                         ioctl_status,\r
+               OUT                     ib_srq_handle_t                         *ph_uvp_srq,\r
+       IN                              ci_umv_buf_t                            *p_umv_buf );\r
+\r
+ib_api_status_t  \r
+mlnx_pre_modify_srq (\r
+    IN         const ib_srq_handle_t           h_uvp_srq,\r
+    IN         const ib_srq_attr_mask_t        srq_attr_attr,  // Fixme\r
+    IN         const ib_srq_attr_t                     *p_srq_attr,    // Fixme\r
+    IN OUT     ci_umv_buf_t                            *p_umv_buf);\r
+\r
+void  \r
+mlnx_post_modify_srq (\r
+       IN              const ib_srq_handle_t           h_uvp_srq,\r
+       IN              ib_api_status_t                         ioctl_status,\r
+       IN OUT  ci_umv_buf_t                            *p_umv_buf);\r
+\r
+ib_api_status_t  \r
+mlnx_pre_query_srq (\r
+    IN         ib_srq_handle_t                         h_uvp_srq,\r
+    IN OUT     ci_umv_buf_t                            *p_umv_buf);\r
+\r
+void  \r
+mlnx_post_query_srq (\r
+    IN         ib_srq_handle_t                         h_uvp_srq,\r
+    IN         ib_api_status_t                         ioctl_status,\r
+    IN         ib_srq_attr_t                           *p_query_attr,\r
+    IN OUT     ci_umv_buf_t                            *p_umv_buf);\r
+\r
+ib_api_status_t  \r
+mlnx_pre_destroy_srq (\r
+    IN         const ib_srq_handle_t           h_uvp_srq);\r
+\r
+void  \r
+mlnx_post_destroy_srq (\r
+    IN         const ib_srq_handle_t           h_uvp_srq,\r
+    IN         ib_api_status_t                 ioctl_status );\r
+\r
+\r
 /************* QP Management *************************/\r
 void  \r
 mlnx_get_qp_interface (\r
@@ -486,6 +539,12 @@ mlnx_post_recv (
        IN                              ib_recv_wr_t*   const           p_recv_wr,\r
                OUT                     ib_recv_wr_t**                          pp_recv_failure );\r
 \r
+ib_api_status_t   \r
+mlnx_post_srq_recv (\r
+       IN              const   void* __ptr64                           h_srq,\r
+       IN                              ib_recv_wr_t*   const           p_recv_wr,\r
+               OUT                     ib_recv_wr_t**                          pp_recv_failure );\r
+\r
 ib_api_status_t  \r
 mlnx_bind_mw (\r
        IN              const   ib_mw_handle_t                          h_uvp_mw,\r
index bae962e..08fba46 100644 (file)
@@ -53,6 +53,7 @@ mlnx_get_osbypass_interface (
      */\r
     p_uvp->post_send = mlnx_post_send;\r
     p_uvp->post_recv = mlnx_post_recv;\r
+    p_uvp->post_srq_recv = mlnx_post_srq_recv;\r
 \r
     /*\r
      * Completion Processing and \r
@@ -107,6 +108,42 @@ mlnx_post_send (
 }\r
 \r
 \r
+ib_api_status_t\r
+mlnx_post_srq_recv (\r
+       IN              const   void*           __ptr64                 h_srq,\r
+       IN                              ib_recv_wr_t*   const           p_recv_wr,\r
+               OUT                     ib_recv_wr_t**                          pp_recv_failure )\r
+{\r
+       int err;\r
+       ib_api_status_t status = IB_SUCCESS;\r
+       struct mthca_srq *srq = (struct mthca_srq *) ((void*)h_srq);\r
+\r
+    UVP_ENTER(UVP_DBG_QP);\r
+\r
+    CL_ASSERT (srq);\r
+\r
+       CL_ASSERT( p_recv_wr );\r
+\r
+       err = srq->ibv_srq.context->ops.post_srq_recv(&srq->ibv_srq, p_recv_wr, pp_recv_failure );\r
+       if (err) {\r
+               UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_QP, ("mthca_post_recv failed (%d)\n", err));\r
+               if (err == -ENOMEM)\r
+                       status = IB_INSUFFICIENT_RESOURCES;\r
+               else if (err == -EINVAL) \r
+                       status = IB_INVALID_WR_TYPE;\r
+               else if (err == -ERANGE)\r
+                       status = IB_INVALID_MAX_SGE;\r
+               else if (err == -EBUSY)\r
+                       status = IB_INVALID_QP_STATE;\r
+               else\r
+                       status = errno_to_iberr(err);\r
+       }\r
+\r
+    UVP_EXIT(UVP_DBG_QP);\r
+    return status;\r
+}\r
+\r
+\r
 ib_api_status_t\r
 mlnx_post_recv (\r
        IN              const   void*           __ptr64                 h_qp,\r
index 39aa925..4bf3fb5 100644 (file)
@@ -111,7 +111,7 @@ ib_api_status_t
        /* convert attributes */\r
        attr.send_cq                            = p_create_attr->h_sq_cq->ibv_cq;\r
        attr.recv_cq                            = p_create_attr->h_rq_cq->ibv_cq;\r
-       attr.srq                                        = NULL; /* absent in IBAL */\r
+       attr.srq                                        = (struct ibv_srq*)p_create_attr->h_srq;\r
        attr.cap.max_send_wr            = p_create_attr->sq_depth;\r
        attr.cap.max_recv_wr            = p_create_attr->rq_depth;\r
        attr.cap.max_send_sge           = p_create_attr->sq_sge;\r
@@ -347,7 +347,7 @@ mlnx_post_destroy_qp (
        if (ioctl_status == IB_SUCCESS) \r
                cl_free (p_qp_info);\r
        else\r
-               UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_SHIM , ("mthca_destroy_qp failed (%d)\n", ioctl_status));\r
+               UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_SHIM , ("mthca_destroy_qp_post failed (%d)\n", ioctl_status));\r
 \r
        UVP_EXIT(UVP_DBG_SHIM);\r
        return;\r
diff --git a/hw/mthca/user/mlnx_ual_srq.c b/hw/mthca/user/mlnx_ual_srq.c
new file mode 100644 (file)
index 0000000..196da79
--- /dev/null
@@ -0,0 +1,269 @@
+/*
+ * Copyright (c) 2005 SilverStorm Technologies.  All rights reserved.
+ * Copyright (c) 2004-2005 Mellanox Technologies, Inc. All rights reserved. 
+ *
+ * This software is available to you under the OpenIB.org BSD license
+ * below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * $Id: mlnx_ual_srq.c 1611 2006-08-20 14:48:55Z leonid $
+ */
+
+#include "mt_l2w.h"
+#include "mlnx_ual_main.h"
+#include "mlnx_uvp.h"
+#include "mx_abi.h"
+
+#if defined(EVENT_TRACING)
+#include "mlnx_ual_srq.tmh"
+#endif
+
+
+extern uint32_t        mlnx_dbg_lvl;
+
+void
+mlnx_get_srq_interface (
+       IN OUT  uvp_interface_t         *p_uvp )
+{
+       UVP_ENTER(UVP_DBG_DEV);
+
+       CL_ASSERT(p_uvp);
+
+       /*
+        * Completion Queue Management Verbs
+        */
+       p_uvp->pre_create_srq  = mlnx_pre_create_srq;
+       p_uvp->post_create_srq = mlnx_post_create_srq;
+
+       p_uvp->pre_query_srq  = NULL; /* mlnx_pre_query_srq; */
+       p_uvp->post_query_srq = NULL; /*mlnx_post_query_srq;*/
+
+       p_uvp->pre_modify_srq  = NULL; /* mlnx_modify_srq;*/
+       p_uvp->post_modify_srq = NULL; /*mlnx_post_modify_srq;*/
+
+       p_uvp->pre_destroy_srq  = NULL; /* mlnx_pre_destroy_srq; */
+       p_uvp->post_destroy_srq = mlnx_post_destroy_srq;
+
+       UVP_EXIT(UVP_DBG_DEV);
+}
+
+static void __free_srq(struct mthca_srq *srq)
+{
+       /* srq may be NULL, when ioctl returned with some kind of error, e.g. IB_INVALID_PARAM */
+       if (!srq)
+               return;
+       
+       if (mthca_is_memfree(srq->ibv_srq.context)) {
+               mthca_free_db(to_mctx(srq->ibv_srq.context)->db_tab, MTHCA_DB_TYPE_SRQ,
+               srq->db_index);
+       }
+
+       if (srq->buf) {
+#ifdef NOT_USE_VIRTUAL_ALLOC   
+               cl_free(srq->buf);
+#else
+               VirtualFree( srq->buf, 0, MEM_RELEASE);
+#endif
+       }
+
+       if (srq->wrid) 
+               cl_free(srq->wrid);
+
+       cl_spinlock_destroy(&srq->lock);
+       cl_free (srq);
+}
+
+ib_api_status_t  
+mlnx_pre_create_srq (
+    IN         const   ib_pd_handle_t          h_uvp_pd,// Fix me: if needed
+    IN         const   ib_srq_attr_t           *p_srq_attr,
+    IN OUT     ci_umv_buf_t                            *p_umv_buf)
+{
+       struct mthca_srq *srq;
+       ib_api_status_t status = IB_SUCCESS;
+       size_t size = max( sizeof(struct ibv_create_srq), sizeof(struct ibv_create_srq_resp) );
+       mlnx_ual_pd_info_t *p_pd = (mlnx_ual_pd_info_t *)h_uvp_pd;
+       struct ibv_pd *ibv_pd = p_pd->ibv_pd;
+       struct ibv_create_srq *p_create_srq;
+       int err;
+
+       UVP_ENTER(UVP_DBG_SRQ);
+
+       CL_ASSERT(p_umv_buf);
+
+       /* Sanity check SRQ size before proceeding */
+       if (p_srq_attr->max_wr > 1 << 16 || p_srq_attr->max_sge > 64)
+       {
+               status = IB_INVALID_PARAMETER;
+               goto err_params;
+       }
+
+       if( !p_umv_buf->p_inout_buf )
+       {
+               p_umv_buf->p_inout_buf = cl_zalloc( size );
+               if( !p_umv_buf->p_inout_buf )
+               {
+                       status = IB_INSUFFICIENT_MEMORY;
+                       goto err_memory;
+               }
+       }
+       p_umv_buf->input_size = sizeof(struct ibv_create_srq);
+       p_umv_buf->output_size = sizeof(struct ibv_create_srq_resp);
+       p_umv_buf->command = TRUE;
+
+       /* allocate srq */
+       srq = cl_zalloc(sizeof *srq);
+       if (!srq)
+       {
+               status = IB_INSUFFICIENT_MEMORY;
+               goto err_alloc_srq;
+       }
+
+       /* init fields */
+       cl_spinlock_construct(&srq->lock);
+       if (cl_spinlock_init(&srq->lock))
+               goto err_lock;
+
+       srq->ibv_srq.pd = ibv_pd;
+       srq->ibv_srq.context                    = ibv_pd->context;
+       srq->max     = align_queue_size(ibv_pd->context, p_srq_attr->max_wr, 1);
+       srq->max_gs  = p_srq_attr->max_sge;
+       srq->counter = 0;
+
+       if (mthca_alloc_srq_buf(ibv_pd, (void*)p_srq_attr, srq))
+       {
+               status = IB_INSUFFICIENT_MEMORY;
+               goto err_alloc_buf;
+       }
+
+       // fill the parameters for ioctl
+       p_create_srq = (struct ibv_create_srq *)p_umv_buf->p_inout_buf;
+       p_create_srq->user_handle = (uint64_t)(ULONG_PTR)srq;
+       p_create_srq->mr.start = (uint64_t)(ULONG_PTR)srq->buf;
+       p_create_srq->mr.length = srq->buf_size;
+       p_create_srq->mr.hca_va = 0;
+       p_create_srq->mr.pd_handle       = p_pd->ibv_pd->handle;
+       p_create_srq->mr.pdn = to_mpd(p_pd->ibv_pd)->pdn;
+       p_create_srq->mr.access_flags = 0;      //local read
+
+       if (mthca_is_memfree(ibv_pd->context)) {
+               srq->db_index = mthca_alloc_db(to_mctx(ibv_pd->context)->db_tab,
+                       MTHCA_DB_TYPE_SRQ, &srq->db);
+               if (srq->db_index < 0)
+                       goto err_alloc_db;
+
+               p_create_srq->db_page  = db_align(srq->db);
+               p_create_srq->db_index = srq->db_index;
+       }
+
+       status = IB_SUCCESS;
+       goto end;
+
+err_alloc_db:
+#ifdef NOT_USE_VIRTUAL_ALLOC   
+               cl_free(srq->buf);
+#else
+               VirtualFree( srq->buf, 0, MEM_RELEASE);
+#endif
+       cl_free(srq->wrid);
+err_alloc_buf:
+       cl_spinlock_destroy(&srq->lock);
+err_lock:
+       cl_free(srq);
+err_alloc_srq:
+       cl_free(p_umv_buf->p_inout_buf);
+err_memory:
+err_params:
+end:
+       UVP_EXIT(UVP_DBG_SRQ);
+       return status;
+}
+
+
+void
+mlnx_post_create_srq (
+       IN              const   ib_pd_handle_t                          h_uvp_pd,
+       IN                              ib_api_status_t                         ioctl_status,
+               OUT                     ib_srq_handle_t                         *ph_uvp_srq,
+       IN                              ci_umv_buf_t                            *p_umv_buf )
+{
+       int err;
+       struct mthca_srq *srq;
+       struct ibv_create_srq_resp *p_resp;
+       mlnx_ual_pd_info_t *p_pd = (mlnx_ual_pd_info_t *)h_uvp_pd;
+       struct ibv_pd *ibv_pd = p_pd->ibv_pd;
+       ib_api_status_t status = IB_SUCCESS;
+
+       UVP_ENTER(UVP_DBG_SRQ);
+
+       CL_ASSERT(p_umv_buf);
+       p_resp = (struct ibv_create_srq_resp *)p_umv_buf->p_inout_buf;
+       srq = (struct mthca_srq *)(ULONG_PTR)p_resp->user_handle;
+
+       if (IB_SUCCESS == ioctl_status) {
+
+               /* complete filling SRQ object */
+               srq->ibv_srq.handle                     = p_resp->srq_handle;
+               srq->srqn                                       = p_resp->srqn;
+               srq->max                                        = p_resp->max_wr;
+               srq->max_gs                                     = p_resp->max_sge;
+               srq->mr.handle = p_resp->mr.mr_handle;
+               srq->mr.lkey = p_resp->mr.lkey;
+               srq->mr.rkey = p_resp->mr.rkey;
+               srq->mr.pd = ibv_pd;
+               srq->mr.context = ibv_pd->context;
+
+               if (mthca_is_memfree(ibv_pd->context))
+                       mthca_set_db_qn(srq->db, MTHCA_DB_TYPE_SRQ, srq->srqn);
+               
+               *ph_uvp_srq = (ib_srq_handle_t)srq;
+       }
+       else
+               __free_srq(srq);
+
+       if (p_resp)
+               cl_free( p_resp );
+       UVP_EXIT(UVP_DBG_SRQ);
+       return;
+}
+
+void
+mlnx_post_destroy_srq (
+       IN              const ib_srq_handle_t           h_uvp_srq,
+       IN              ib_api_status_t                 ioctl_status)
+{
+       int err;
+       struct mthca_srq *srq = (struct mthca_srq *) ((void*)h_uvp_srq);
+
+       UVP_ENTER(UVP_DBG_CQ);
+
+       CL_ASSERT(srq);
+
+       if (IB_SUCCESS == ioctl_status) 
+               __free_srq(srq);
+
+       UVP_EXIT(UVP_DBG_CQ);
+}
+
+
index 2512ed0..6470a39 100644 (file)
@@ -137,7 +137,7 @@ struct mthca_srq {
        void              *buf;
        void              *last;
        cl_spinlock_t lock;
-       struct ibv_mr     *mr;
+       struct ibv_mr     mr;
        uint64_t          *wrid;
        uint32_t           srqn;
        int                max;
diff --git a/hw/mthca/user/mlnx_uvp_abi.h b/hw/mthca/user/mlnx_uvp_abi.h
deleted file mode 100644 (file)
index 155dae1..0000000
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- * $Id$
- */
-
-#ifndef MTHCA_UVP_ABI_H
-#define MTHCA_ABI_H
-
-#include "mlnx_uvp_kern_abi.h"
-
-struct mthca_alloc_ucontext_resp {
-       struct ibv_get_context_resp     ibv_resp;
-};
-
-struct mthca_create_srq {
-       uint32_t                                lkey;
-       uint32_t                                db_index;
-       uint64_t                                db_page;
-       struct ibv_create_srq           ibv_cmd;
-};
-
-struct mthca_create_srq_resp {
-       struct ibv_create_srq_resp      ibv_resp;
-       uint32_t                                srqn;
-       uint32_t                                reserved;
-};
-
-struct ibv_context *mthca_alloc_context(struct ibv_get_context_resp *resp_p);
-void mthca_free_context(struct ibv_context *ibctx);
-
-
-#endif /* MTHCA_ABI_H */
index d7c0a4b..2a9cbc5 100644 (file)
@@ -54,6 +54,7 @@ extern uint32_t               g_mlnx_dbg_flags;
        WPP_DEFINE_BIT( UVP_DBG_CQ) \\r
        WPP_DEFINE_BIT( UVP_DBG_QP) \\r
        WPP_DEFINE_BIT( UVP_DBG_MEMORY) \\r
+       WPP_DEFINE_BIT( UVP_DBG_SRQ) \\r
        WPP_DEFINE_BIT( UVP_DBG_AV) \\r
        WPP_DEFINE_BIT( UVP_DBG_SEND) \\r
        WPP_DEFINE_BIT( UVP_DBG_RECV) \\r
@@ -93,11 +94,12 @@ extern uint32_t             g_mlnx_dbg_flags;
 #define UVP_DBG_QP     (1 << 4)\r
 #define UVP_DBG_CQ     (1 << 5)\r
 #define UVP_DBG_MEMORY (1 << 6)\r
-#define UVP_DBG_AV     (1 << 7)\r
-#define UVP_DBG_SEND   (1 << 8)\r
-#define UVP_DBG_RECV   (1 << 9)\r
-#define UVP_DBG_LOW    (1 << 10)\r
-#define UVP_DBG_SHIM   (1 << 11)\r
+#define UVP_DBG_SRQ    (1 << 7)\r
+#define UVP_DBG_AV     (1 << 8)\r
+#define UVP_DBG_SEND   (1 << 9)\r
+#define UVP_DBG_RECV   (1 << 10)\r
+#define UVP_DBG_LOW    (1 << 11)\r
+#define UVP_DBG_SHIM   (1 << 12)\r
 \r
 \r
 VOID\r
index dd2fdc0..bb81e0c 100644 (file)
@@ -95,24 +95,7 @@ int mthca_tavor_post_srq_recv(struct ibv_srq *ibsrq,
 
        first_ind = srq->first_free;
 
-       for (nreq = 0; wr; ++nreq, wr = wr->p_next) {
-               if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) {
-                       nreq = 0;
-
-                       doorbell[0] = cl_hton32(first_ind << srq->wqe_shift);
-                       doorbell[1] = cl_hton32(srq->srqn << 8);
-
-                       /*
-                        * Make sure that descriptors are written
-                        * before doorbell is rung.
-                        */
-                       wmb();
-
-                       mthca_write64(doorbell, to_mctx(ibsrq->context), MTHCA_RECV_DOORBELL);
-
-                       first_ind = srq->first_free;
-               }
-
+       for (nreq = 0; wr; wr = wr->p_next) {
                ind = srq->first_free;
 
                if (ind < 0) {
@@ -172,6 +155,23 @@ int mthca_tavor_post_srq_recv(struct ibv_srq *ibsrq,
 
                srq->wrid[ind]  = wr->wr_id;
                srq->first_free = next_ind;
+
+               if (++nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB) {
+                       nreq = 0;
+               
+                       doorbell[0] = cl_hton32(first_ind << srq->wqe_shift);
+                       doorbell[1] = cl_hton32(srq->srqn << 8);
+               
+                       /*
+                        * Make sure that descriptors are written
+                        * before doorbell is rung.
+                        */
+                       wmb();
+               
+                       mthca_write64(doorbell, to_mctx(ibsrq->context), MTHCA_RECV_DOORBELL);
+               
+                       first_ind = srq->first_free;
+               }
        }
 
        if (nreq) {
@@ -294,12 +294,12 @@ int mthca_alloc_srq_buf(struct ibv_pd *pd, struct ibv_srq_attr *attr,
        srq->buf_size = srq->max << srq->wqe_shift;
 
        if (posix_memalign(&srq->buf, g_page_size,
-                          align(srq->buf_size, g_page_size))) {
+                       align(srq->buf_size, g_page_size))) {
                cl_free(srq->wrid);
                return -1;
        }
 
-       memset(srq->buf, 0, srq->buf_size);
+       cl_memclr(srq->buf, srq->buf_size);
 
        /*
         * Now initialize the SRQ buffer so that all of the WQEs are
index 783ff72..528222f 100644 (file)
@@ -244,7 +244,7 @@ int mthca_destroy_cq(struct ibv_cq *cq)
        return 0;
 }
 
-static int align_queue_size(struct ibv_context *context, int size, int spare)
+int align_queue_size(struct ibv_context *context, int size, int spare)
 {
        int ret;
 
@@ -333,7 +333,7 @@ struct ibv_qp *mthca_create_qp_pre(struct ibv_pd *pd,
        }
 
        // fill the rest qp fields
-       qp->ibv_qp      .pd = pd;
+       qp->ibv_qp.pd = pd;
        qp->ibv_qp.send_cq = attr->send_cq;
        qp->ibv_qp.recv_cq = attr->recv_cq;
        qp->ibv_qp.srq = attr->srq;
@@ -526,3 +526,4 @@ int mthca_detach_mcast(struct ibv_qp *qp, union ibv_gid *gid, uint16_t lid)
        return -ENOSYS;
 #endif
 }
+
index 40ccf3b..19ecb83 100644 (file)
@@ -405,16 +405,9 @@ typedef enum MTHCA_QP_ACCESS_FLAGS {
 
 
 struct ibv_srq {
-       struct ibv_context     *context;
-       void                   *srq_context;
        struct ibv_pd          *pd; 
        uint64_t                handle;
-       HANDLE          mutex;
-
-#ifdef WIN_TO_BE_CHANGED       
-       pthread_cond_t          cond;
-       uint32_t                events_completed;
-#endif
+       struct ibv_context     *context;
 };
 
 struct ibv_qp {
@@ -489,6 +482,8 @@ struct ibv_context {
        void                      *abi_compat;
 };
 
+int align_queue_size(struct ibv_context *context, int size, int spare);
+
 END_C_DECLS
 
 #endif /* INFINIBAND_VERBS_H */
index 75132f1..a63ecb6 100644 (file)
@@ -43,7 +43,7 @@ extern "C"
 {\r
 #endif /* __cplusplus */\r
 \r
-/****h* IB Access Layer API/Overview\r
+/****h* IB Access Layer API/Access Layer\r
 * NAME\r
 *      InfiniBand Access Layer\r
 * COPYRIGHT\r
@@ -469,6 +469,7 @@ typedef struct _ib_async_event_rec
                ib_ca_handle_t                                                  h_ca;\r
                ib_cq_handle_t                                                  h_cq;\r
                ib_qp_handle_t                                                  h_qp;\r
+               ib_srq_handle_t                                                 h_srq;\r
 \r
        } handle;\r
 \r
@@ -1097,6 +1098,293 @@ ib_destroy_av(
 *****/\r
 \r
 \r
+/****f* Access Layer/ib_create_srq\r
+* NAME\r
+*      ib_create_srq\r
+*\r
+* DESCRIPTION\r
+*      Creates a shared receive queue and returns its handle to the user.\r
+*\r
+* SYNOPSIS\r
+*/\r
+AL_EXPORT ib_api_status_t AL_API\r
+ib_create_srq(\r
+       IN              const   ib_pd_handle_t                  h_pd,\r
+       IN              const   ib_srq_attr_t* const            p_srq_attr,\r
+       IN              const   void* const                             srq_context,\r
+       IN              const   ib_pfn_event_cb_t                       pfn_srq_event_cb OPTIONAL,\r
+               OUT             ib_srq_handle_t* const          ph_srq );\r
+/*\r
+* PARAMETERS\r
+*      h_pd\r
+*              [in] This is a handle to a protection domain associated with the shared queue\r
+*              pair.\r
+*\r
+*      p_srq_attr\r
+*              [in] Attributes necessary to allocate and initialize a shared receive queue.\r
+*\r
+*      srq_context\r
+*              [in] A user-specified context information associated with the shared\r
+*              receive queue.\r
+*\r
+*      pfn_qp_event_cb\r
+*              [in] User-specified error callback routine invoked after an\r
+*              asynchronous event has occurred on the shared receive queue.\r
+*\r
+*      ph_srq\r
+*              [out] Upon successful completion of this call, this references a\r
+*              handle to the newly created shared receive queue.\r
+*\r
+* RETURN VALUES\r
+*      IB_SUCCESS\r
+*              The receive queue was successfully created.\r
+*\r
+*      IB_INVALID_PD_HANDLE\r
+*              The protection domain to associate with the shared receive queue was invalid.\r
+*\r
+*      IB_INVALID_PARAMETER\r
+*              A reference to the shared receive queue attributes or handle was not provided.\r
+*\r
+*      IB_INSUFFICIENT_MEMORY\r
+*              There was insufficient memory to create the shared receive queue.\r
+*\r
+*      IB_INSUFFICIENT_RESOURCES\r
+*              There were insufficient resources currently available on the channel\r
+*              adapter to create the shared receive queue.\r
+*\r
+*      IB_INVALID_SETTING\r
+*              The specified shared receive queue creation attributes are invalid.\r
+*\r
+*      IB_INVALID_MAX_WRS\r
+*              The requested maximum send or receive work request depth could not be\r
+*              supported.\r
+*\r
+*      IB_INVALID_MAX_SGE\r
+*              The requested maximum number of scatter-gather entries for the send or\r
+*              receive queue could not be supported.\r
+*\r
+* NOTES\r
+*      This routine allocates a shared receive queue with the specified attributes.  If\r
+*      the shared receive queue cannot be allocated, an error is returned.  When creating\r
+*      the shared receive queue, users associate a context with the shared receive queue.  This\r
+*      context is returned to the user through the asynchronous event callback\r
+*      if an event occurs.\r
+*\r
+*      This routine is used to create receive queues, which work with QPs of type:\r
+*\r
+*      IB_QPT_RELIABLE_CONN\r
+*      IB_QPT_UNRELIABLE_CONN\r
+*      IB_QPT_UNRELIABLE_DGRM\r
+*\r
+* SEE ALSO\r
+*      ib_query_srq, ib_modify_srq, ib_destroy_srq, ib_srq_attr_t,\r
+*      ib_srq_attr_mask_t, ib_pfn_event_cb_t, ib_qp_attr_t\r
+*****/\r
+\r
+\r
+/****f* Access Layer/ib_query_srq\r
+* NAME\r
+*      ib_query_srq\r
+*\r
+* DESCRIPTION\r
+*      Query the current attributes of the shared receive queue.\r
+*\r
+* SYNOPSIS\r
+*/\r
+AL_EXPORT ib_api_status_t AL_API\r
+ib_query_srq(\r
+       IN              const   ib_srq_handle_t                         h_srq,\r
+               OUT                     ib_srq_attr_t* const                    p_srq_attr );\r
+/*\r
+* PARAMETERS\r
+*      h_srq\r
+*              [in] A handle to an existing shared receive queue.\r
+*\r
+*      p_srq_attr\r
+*              [out] Upon successful completion of this call, the structure\r
+*              referenced by this parameter contains the attributes of the specified\r
+*              quere pair.\r
+*\r
+* RETURN VALUES\r
+*      IB_SUCCESS\r
+*              The shared receive queue attributes were returned successfully.\r
+*\r
+*      IB_INVALID_SRQ_HANDLE\r
+*              The shared receive queue handle was invalid.\r
+*\r
+*      IB_INVALID_PARAMETER\r
+*              A reference to the shared receive queue attributes structure was not provided.\r
+*\r
+* NOTES\r
+*      This routine returns information about the specified shared receive queue.\r
+*\r
+* SEE ALSO\r
+*      ib_query_srq, ib_modify_srq, ib_destroy_srq, ib_srq_attr_t,\r
+*      ib_srq_attr_mask_t, ib_pfn_event_cb_t, ib_qp_attr_t\r
+*****/\r
+\r
+\r
+/****f* Access Layer/ib_modify_srq\r
+* NAME\r
+*      ib_modify_srq\r
+*\r
+* DESCRIPTION\r
+*      Modifies the attributes of an existing shared receive queue.\r
+*\r
+* SYNOPSIS\r
+*/\r
+AL_EXPORT ib_api_status_t AL_API\r
+ib_modify_srq(\r
+       IN              const   ib_srq_handle_t                 h_srq,\r
+       IN              const   ib_srq_attr_t* const            p_srq_attr,\r
+       IN              const   ib_srq_attr_mask_t                      srq_attr_mask );\r
+/*\r
+* PARAMETERS\r
+*      h_srq\r
+*              [in] A handle to an existing shared receive queue.\r
+*\r
+*      p_srq_attr\r
+*              [in] Attributes necessary to allocate and initialize a shared receive queue.\r
+*\r
+*      srq_attr_mask\r
+*              [in] Flags, indicating which fields in the previous structure are valid.\r
+*\r
+* RETURN VALUES\r
+*      IB_SUCCESS\r
+*              The shared receive queue was successfully modified.\r
+*\r
+*      IB_INVALID_SRQ_HANDLE\r
+*              The shared receive queue handle was invalid.\r
+*\r
+*      IB_INVALID_PARAMETER\r
+*              A reference to the shared receive queue attributes was not provided.\r
+*\r
+*      IB_INVALID_SETTING\r
+*              The specified shared receive queue attributes were invalid.\r
+*\r
+*      IB_UNSUPPORTED\r
+*              The required action is not supported yet.\r
+*\r
+*      IB_INSUFFICIENT_RESOURCES\r
+*              There were insufficient resources currently available on the channel\r
+*              adapter to register the modify the shared receive queue.\r
+*\r
+* NOTES\r
+*      This routine modifies the attributes of an existing shared receive queue and\r
+*      transitions it to a new state.  The new state and attributes are\r
+*      specified through the p_qp_mod parameter.  Upon successful completion,\r
+*      the shared receive queue is in the requested state.\r
+*\r
+* SEE ALSO\r
+*      ib_query_srq, ib_modify_srq, ib_destroy_srq, ib_srq_attr_t,\r
+*      ib_srq_attr_mask_t, ib_pfn_event_cb_t, ib_qp_attr_t\r
+*****/\r
+\r
+\r
+/****f* Access Layer/ib_destroy_srq\r
+* NAME\r
+*      ib_destroy_srq\r
+*\r
+* DESCRIPTION\r
+*      Release a shared receive queue.  Once destroyed, no further access to this\r
+*      shared receive queue is possible.\r
+*\r
+* SYNOPSIS\r
+*/\r
+AL_EXPORT ib_api_status_t AL_API\r
+ib_destroy_srq(\r
+       IN              const   ib_srq_handle_t                         h_srq,\r
+       IN              const   ib_pfn_destroy_cb_t                     pfn_destroy_cb OPTIONAL );\r
+/*\r
+* PARAMETERS\r
+*      h_srq\r
+*              [in] A handle to an existing shared shared receive queue.\r
+*\r
+*      pfn_destroy_cb\r
+*              [in] A user-specified callback that is invoked after the shared receive queue\r
+*              has been successfully destroyed.\r
+*\r
+* RETURN VALUES\r
+*      IB_SUCCESS\r
+*              The destroy request was registered.\r
+*\r
+*      IB_INVALID_SRQ_HANDLE\r
+*              The shared receive queue handle was invalid.\r
+*\r
+*      IB_RESOURCE_BUSY\r
+*              There are QPs, bound to the shared receive queue\r
+*\r
+* NOTES\r
+*      This call destroys an existing shared receive queue.  Since callbacks may be\r
+*      outstanding against the shared receive queue at the time the destroy operation is\r
+*      invoked, then this call operates asynchronously.  The user will be notified\r
+*      through a callback once the destroy operation completes, indicating that\r
+*      no additional callbacks will be invoked for the specified shared receive queue.\r
+*\r
+* SEE ALSO\r
+*      ib_query_srq, ib_modify_srq, ib_destroy_srq, ib_srq_attr_t,\r
+*      ib_srq_attr_mask_t, ib_pfn_event_cb_t, ib_qp_attr_t\r
+*****/\r
+\r
+\r
+/****f* Access Layer/ib_post_srq_recv\r
+* NAME\r
+*      ib_post_srq_recv\r
+*\r
+* DESCRIPTION\r
+*      This routine posts a work request to the shared receive queue of a shared receive queue.\r
+*\r
+* SYNOPSIS\r
+*/\r
+AL_EXPORT ib_api_status_t AL_API\r
+ib_post_srq_recv(\r
+       IN              const   ib_srq_handle_t                         h_srq,\r
+       IN                              ib_recv_wr_t* const                     p_recv_wr,\r
+               OUT                     ib_recv_wr_t                            **pp_recv_failure OPTIONAL );\r
+/*\r
+* PARAMETERS\r
+*      h_srq\r
+*              [in] The shared receive queue to which this work request is being submitted.\r
+*\r
+*      p_recv_wr\r
+*              [in] A reference to the head of the work request list.\r
+*\r
+*      pp_recv_failure\r
+*              [out] If the post receive operation failed, this references the work\r
+*              request in the p_recv_wr list where the first failure occurred.\r
+*              This parameter may be NULL if only a single work request is being\r
+*              posted to the QP.\r
+*\r
+* RETURN VALUES\r
+*      IB_SUCCESS\r
+*              All work requests were successfully posted.\r
+*\r
+*      IB_INVALID_QP_HANDLE\r
+*              The shared receive queue handle was invalid.\r
+*\r
+*      IB_INVALID_PARAMETER\r
+*              A reference to the receive work request list was not provided.\r
+*\r
+*      IB_INSUFFICIENT_RESOURCES\r
+*              The number of posted work requests exceed the current depth available\r
+*              on the receive queue.\r
+*\r
+*      IB_INVALID_WR_TYPE\r
+*              The work request type was invalid.\r
+*\r
+*      IB_INVALID_QP_STATE\r
+*              The current shared receive queue state does not allow posting receives.\r
+*\r
+* NOTES\r
+*      This routine posts a work request to the shared receive queue.\r
+*      The type of work to perform is defined by the p_recv_wr parameter.  This\r
+*      call is used to post data buffers to receive incoming message sends.\r
+*\r
+* SEE ALSO\r
+*      ib_recv_wr_t\r
+*****/\r
+\r
+\r
 /****f* Access Layer/ib_create_qp\r
 * NAME\r
 *      ib_create_qp\r
@@ -1155,6 +1443,10 @@ ib_create_qp(
 *              The send or receive completion queue to associate with the queue pair\r
 *              was invalid.\r
 *\r
+*      IB_INVALID_SRQ_HANDLE\r
+*              The shared receive queue to be associated with the queue pair\r
+*              was invalid.\r
+*\r
 *      IB_INVALID_SETTING\r
 *              The specified queue pair creation attributes are invalid.\r
 *\r
@@ -1170,20 +1462,27 @@ ib_create_qp(
 *              receive queue could not be supported.\r
 *\r
 * NOTES\r
-*      This routine allocates a queue pair with the specified attributes.  If\r
+*      1. This routine allocates a queue pair with the specified attributes.  If\r
 *      the queue pair cannot be allocated, an error is returned.  When creating\r
 *      the queue pair, users associate a context with the queue pair.  This\r
 *      context is returned to the user through the asynchronous event callback\r
 *      if an event occurs.\r
 *\r
-*      This routine is used to create queue pairs of type:\r
-*\r
-*      IB_QPT_RELIABLE_CONN\r
-*      IB_QPT_UNRELIABLE_CONN\r
-*      IB_QPT_UNRELIABLE_DGRM\r
-*      IB_QPT_MAD\r
-*\r
-*      Callers of ib_create_qp should call ib_init_dgrm_svc if the queue pair\r
+*      2. For QPs that are associated with an SRQ, the Consumer should take\r
+*      the QP through the Error State before invoking a Destroy QP or a Modify\r
+*      QP to the Reset State. The Consumer may invoke the Destroy QP without\r
+*      first performing a Modify QP to the Error State and waiting for the Affiliated \r
+*      Asynchronous Last WQE Reached Event. However, if the Consumer\r
+*      does not wait for the Affiliated Asynchronous Last WQE Reached Event,\r
+*      then WQE and Data Segment leakage may occur.\r
+*\r
+*      3. This routine is used to create queue pairs of type:\r
+*              IB_QPT_RELIABLE_CONN\r
+*              IB_QPT_UNRELIABLE_CONN\r
+*              IB_QPT_UNRELIABLE_DGRM\r
+*              IB_QPT_MAD\r
+*\r
+*      4. Callers of ib_create_qp should call ib_init_dgrm_svc if the queue pair\r
 *      is of type IB_QPT_UNRELIABLE_DGRM or IB_QPT_MAD before sending or\r
 *      receiving data.  IB_QPT_RELIABLE_CONN, IB_QPT_UNRELIABLE_CONN type\r
 *      queue pairs should be used by the connection establishment process\r
@@ -8485,8 +8784,10 @@ typedef struct _ib_pnp_rec
 \r
        void* __ptr64                           pnp_context;\r
        void* __ptr64                           context;\r
-\r
+       //NOTE:\r
+       //guid and ca_guid use as key to flexi map need to keep these field together\r
        ib_net64_t                                      guid;\r
+       ib_net64_t                                      ca_guid;\r
 \r
 }      ib_pnp_rec_t;\r
 /*\r
@@ -8520,6 +8821,9 @@ typedef struct _ib_pnp_rec
 *              The GUID of the adapter, port, IOU, or IOC for which\r
 *              the PnP event occurred.\r
 *\r
+*      ca_guid\r
+*              The  GUID of the HCA \r
+*\r
 * NOTES\r
 *      This structure is returned to the user to notify them of: the addition\r
 *      of a channel adapter, the removal of a channel adapter, a port up or down\r
@@ -8656,6 +8960,7 @@ typedef struct _ib_pnp_port_rec
 typedef struct _ib_pnp_iou_rec\r
 {\r
        ib_pnp_rec_t                            pnp_rec;\r
+       net64_t                                         guid;\r
        net64_t                                         ca_guid;\r
        net64_t                                         chassis_guid;\r
        uint8_t                                         slot;\r
index e6b5cc9..c006a62 100644 (file)
@@ -673,6 +673,200 @@ typedef union _ual_destroy_av_ioctl
 *              Status of the operation.\r
 *****/\r
 \r
+/****s* User-mode Access Layer/ual_create_srq_ioctl_t\r
+* NAME\r
+*      ual_create_srq_ioctl_t\r
+*\r
+* DESCRIPTION\r
+*      IOCTL structure containing the input and output parameters for\r
+*      ib_create_srq\r
+*\r
+* SYNOPSIS\r
+*/\r
+typedef union _ual_create_srq_ioctl\r
+{\r
+       struct _ual_create_srq_ioctl_in\r
+       {\r
+               ci_umv_buf_t                            umv_buf;\r
+               uint64_t                                        h_pd;\r
+               ib_srq_attr_t                           srq_attr;\r
+               void* __ptr64                           context;\r
+       &