[MTHCA] 1. bugfix: add multi-threading support for MODIDY QP, MODIFY SRQ and, partly...
authorleonidk <leonidk@ad392aa1-c5ef-ae45-8dd8-e69d62a5ef86>
Mon, 14 Aug 2006 18:14:45 +0000 (18:14 +0000)
committerleonidk <leonidk@ad392aa1-c5ef-ae45-8dd8-e69d62a5ef86>
Mon, 14 Aug 2006 18:14:45 +0000 (18:14 +0000)
2. add sanity check for path MTU value in modify_qp;
3. Added a sanity check of existence of AV while building UD header.

git-svn-id: svn://openib.tc.cornell.edu/gen1/trunk@450 ad392aa1-c5ef-ae45-8dd8-e69d62a5ef86

hw/mthca/kernel/mthca_cq.c
hw/mthca/kernel/mthca_provider.h
hw/mthca/kernel/mthca_qp.c
hw/mthca/kernel/mthca_srq.c

index dac4020..3d9febe 100644 (file)
@@ -795,6 +795,7 @@ int mthca_init_cq(struct mthca_dev *dev, int nent,
        spin_lock_init(&cq->lock);
        atomic_set(&cq->refcount, 1);
        init_waitqueue_head(&cq->wait);
+       KeInitializeMutex(&cq->mutex, 0);
 
        RtlZeroMemory(cq_context, sizeof *cq_context);
        cq_context->flags           = cl_hton32(MTHCA_CQ_STATUS_OK      |
index 574968f..51cf73f 100644 (file)
@@ -208,6 +208,7 @@ struct mthca_cq {
        union mthca_buf        queue;
        struct mthca_mr        mr;
        wait_queue_head_t      wait;
+       KMUTEX                      mutex;
 };
 
 struct mthca_srq {
@@ -231,6 +232,7 @@ struct mthca_srq {
        struct mthca_mr         mr;
 
        wait_queue_head_t       wait;
+       KMUTEX                      mutex;
 };
 
 struct mthca_wq {
@@ -274,6 +276,7 @@ struct mthca_qp {
        union mthca_buf        queue;
 
        wait_queue_head_t      wait;
+       KMUTEX                      mutex;
 };
 
 struct mthca_sqp {
index 16a0976..e16ff15 100644 (file)
@@ -514,16 +514,18 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
        u32 req_param, opt_param;
        u32 sqd_event = 0;
        u8 status;
-       int err;
+       int err = -EINVAL;
        SPIN_LOCK_PREP(lhs);
        SPIN_LOCK_PREP(lhr);
 
+       down( &qp->mutex );
+
        if (attr_mask & IB_QP_CUR_STATE) {
                if (attr->cur_qp_state != IBQPS_RTR &&
-                   attr->cur_qp_state != IBQPS_RTS &&
-                   attr->cur_qp_state != IBQPS_SQD &&
-                   attr->cur_qp_state != IBQPS_SQE)
-                       return -EINVAL;
+                       attr->cur_qp_state != IBQPS_RTS &&
+                       attr->cur_qp_state != IBQPS_SQD &&
+                       attr->cur_qp_state != IBQPS_SQE)
+                       goto out;
                else
                        cur_state = attr->cur_qp_state;
        } else {
@@ -535,8 +537,8 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
        }
 
        if (attr_mask & IB_QP_STATE) {
-               if (attr->qp_state < 0 || attr->qp_state > IBQPS_ERR)
-                       return -EINVAL;
+               if (attr->qp_state < 0 || attr->qp_state > IBQPS_ERR)
+                       goto out;
                new_state = attr->qp_state;
        } else
                new_state = cur_state;
@@ -544,7 +546,7 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
        if (state_table[cur_state][new_state].trans == MTHCA_TRANS_INVALID) {
                HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_QP,("Illegal QP transition "
                          "%d->%d\n", cur_state, new_state));
-               return -EINVAL;
+               goto out;
        }
 
        req_param = state_table[cur_state][new_state].req_param[qp->transport];
@@ -556,7 +558,7 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
                          cur_state, new_state,
                          req_param & ~attr_mask));
                //NB: IBAL doesn't use all the fields, so we can miss some mandatory flags
-               return -EINVAL;
+               goto out;
        }
 
        if (attr_mask & ~(req_param | opt_param | IB_QP_STATE)) {
@@ -567,39 +569,41 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
                          attr_mask & ~(req_param | opt_param |
                                                 IB_QP_STATE)));
                //NB: The old code sometimes uses optional flags that are not so in this code
-               return -EINVAL;
+               goto out;
        }
 
        if ((attr_mask & IB_QP_PKEY_INDEX) && 
-            attr->pkey_index >= dev->limits.pkey_table_len) {
+               attr->pkey_index >= dev->limits.pkey_table_len) {
                HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_QP,("PKey index (%u) too large. max is %d\n",
                          attr->pkey_index,dev->limits.pkey_table_len-1)); 
-               return -EINVAL;
+               goto out;
        }
 
        if ((attr_mask & IB_QP_PORT) &&
-           (attr->port_num == 0 || attr->port_num > dev->limits.num_ports)) {
+               (attr->port_num == 0 || attr->port_num > dev->limits.num_ports)) {
                HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_QP,("Port number (%u) is invalid\n", attr->port_num));
-               return -EINVAL;
+               goto out;
        }
 
        if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
-           attr->max_rd_atomic > dev->limits.max_qp_init_rdma) {
+               attr->max_rd_atomic > dev->limits.max_qp_init_rdma) {
                HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_QP,("Max rdma_atomic as initiator %u too large (max is %d)\n",
                          attr->max_rd_atomic, dev->limits.max_qp_init_rdma));
-               return -EINVAL;
+               goto out;
        }
 
        if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
            attr->max_dest_rd_atomic > 1 << dev->qp_table.rdb_shift) {
                HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_QP,("Max rdma_atomic as responder %u too large (max %d)\n",
                          attr->max_dest_rd_atomic, 1 << dev->qp_table.rdb_shift));
-               return -EINVAL;
+               goto out;
        }
 
        mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
-       if (IS_ERR(mailbox))
-               return PTR_ERR(mailbox);
+       if (IS_ERR(mailbox)) {
+               err = PTR_ERR(mailbox);
+               goto out;
+       }
        qp_param = mailbox->buf;
        qp_context = &qp_param->context;
        RtlZeroMemory(qp_param, sizeof *qp_param);
@@ -628,8 +632,14 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
 
        if (qp->transport == MLX || qp->transport == UD)
                qp_context->mtu_msgmax = (IB_MTU_2048 << 5) | 11;
-       else if (attr_mask & IB_QP_PATH_MTU)
+       else if (attr_mask & IB_QP_PATH_MTU) {
+               if (attr->path_mtu < IB_MTU_256 || attr->path_mtu > IB_MTU_2048) {
+                       HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_QP,
+                               ("path MTU (%u) is invalid\n", attr->path_mtu));
+                       goto out_mailbox;
+               }
                qp_context->mtu_msgmax = (u8)((attr->path_mtu << 5) | 31);
+       }
 
        if (mthca_is_memfree(dev)) {
                if (qp->rq.max)
@@ -794,21 +804,20 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
 
        err = mthca_MODIFY_QP(dev, state_table[cur_state][new_state].trans,
                              qp->qpn, 0, mailbox, sqd_event, &status);
+       if (err)
+               goto out_mailbox;
        if (status) {
                HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_QP,("modify QP %d returned status %02x.\n",
-                          state_table[cur_state][new_state].trans, status));
+                       state_table[cur_state][new_state].trans, status));
                err = -EINVAL;
+               goto out_mailbox;
        }
 
-       if (!err) {
-               qp->state = new_state;
-               if (attr_mask & IB_QP_ACCESS_FLAGS)
-                       qp->atomic_rd_en = (u8)attr->qp_access_flags;
-               if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
-                       qp->resp_depth = attr->max_dest_rd_atomic;
-       }
-
-       mthca_free_mailbox(dev, mailbox);
+       qp->state = new_state;
+       if (attr_mask & IB_QP_ACCESS_FLAGS)
+               qp->atomic_rd_en = (u8)attr->qp_access_flags;
+       if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
+               qp->resp_depth = attr->max_dest_rd_atomic;
 
        if (is_sqp(dev, qp))
                store_attrs(to_msqp(qp), attr, attr_mask);
@@ -819,13 +828,13 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
         */
        if (is_qp0(dev, qp)) {
                if (cur_state != IBQPS_RTR &&
-                   new_state == IBQPS_RTR)
+                       new_state == IBQPS_RTR)
                        init_port(dev, to_msqp(qp)->port);
 
                if (cur_state != IBQPS_RESET &&
-                   cur_state != IBQPS_ERR &&
-                   (new_state == IBQPS_RESET ||
-                    new_state == IBQPS_ERR))
+                       cur_state != IBQPS_ERR &&
+                       (new_state == IBQPS_RESET ||
+                       new_state == IBQPS_ERR))
                        mthca_CLOSE_IB(dev, to_msqp(qp)->port, &status);
        }
 
@@ -833,7 +842,7 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
         * If we moved a kernel QP to RESET, clean up all old CQ
         * entries and reinitialize the QP.
         */
-       if (!err && new_state == IB_QPS_RESET && !qp->ibqp.ucontext) {
+       if (new_state == IB_QPS_RESET && !qp->ibqp.ucontext) {
                mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq)->cqn, qp->qpn,
                               qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
                if (qp->ibqp.send_cq != qp->ibqp.recv_cq)
@@ -851,6 +860,11 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
                }
        }
 
+out_mailbox:
+       mthca_free_mailbox(dev, mailbox);
+
+out:
+       up( &qp->mutex );
        return err;
 }
 
@@ -1096,6 +1110,8 @@ static int mthca_alloc_qp_common(struct mthca_dev *dev,
 
        atomic_set(&qp->refcount, 1);
        init_waitqueue_head(&qp->wait);
+       KeInitializeMutex(&qp->mutex, 0);
+       
        qp->state        = IBQPS_RESET;
        qp->atomic_rd_en = 0;
        qp->resp_depth   = 0;
@@ -1412,6 +1428,12 @@ static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp,
        u16 pkey;
        CPU_2_BE64_PREP;
 
+       if (!wr->dgrm.ud.h_av) {
+               HCA_PRINT(TRACE_LEVEL_ERROR , HCA_DBG_AV, 
+                       ("absent AV in send wr %p\n", wr));
+               return -EINVAL;
+       }
+               
        ib_ud_header_init(256, /* assume a MAD */
                mthca_ah_grh_present(to_mah((struct ib_ah *)wr->dgrm.ud.h_av)),
                &sqp->ud_header);
index c731fce..407e0ca 100644 (file)
@@ -251,6 +251,7 @@ int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd,
        spin_lock_init(&srq->lock);
        atomic_set(&srq->refcount, 1);
        init_waitqueue_head(&srq->wait);
+       KeInitializeMutex(&srq->mutex, 0);
 
        if (mthca_is_memfree(dev))
                mthca_arbel_init_srq_context(dev, pd, srq, mailbox->buf);
@@ -369,7 +370,13 @@ int mthca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
                return -EINVAL;
 
        if (attr_mask & IB_SRQ_LIMIT) {
+               if (attr->srq_limit > (u32)srq->max)
+                       return -EINVAL;
+
+               down(&srq->mutex);
                ret = mthca_ARM_SRQ(dev, srq->srqn, attr->srq_limit, &status);
+               up(&srq->mutex);
+
                if (ret)
                        return ret;
                if (status)