( virt_to_bus ( arbel_qp->recv.wqe ) >> 6 ) );
MLX_FILL_1 ( &qpctx, 43, qpc_eec_data.rcv_db_record_index,
arbel_qp->recv.doorbell_idx );
- MLX_FILL_1 ( &qpctx, 44, qpc_eec_data.q_key, qp->qkey );
if ( ( rc = arbel_cmd_rst2init_qpee ( arbel, qp->qpn, &qpctx )) != 0 ){
DBGC ( arbel, "Arbel %p RST2INIT_QPEE failed: %s\n",
arbel, strerror ( rc ) );
*
* @v ibdev Infiniband device
* @v qp Queue pair
- * @v mod_list Modification list
* @ret rc Return status code
*/
static int arbel_modify_qp ( struct ib_device *ibdev,
- struct ib_queue_pair *qp,
- unsigned long mod_list ) {
+ struct ib_queue_pair *qp ) {
struct arbel *arbel = ib_get_drvdata ( ibdev );
struct arbelprm_qp_ee_state_transitions qpctx;
- unsigned long optparammask = 0;
int rc;
- /* Construct optparammask */
- if ( mod_list & IB_MODIFY_QKEY )
- optparammask |= ARBEL_QPEE_OPT_PARAM_QKEY;
-
/* Issue RTS2RTS_QP */
memset ( &qpctx, 0, sizeof ( qpctx ) );
- MLX_FILL_1 ( &qpctx, 0, opt_param_mask, optparammask );
+ MLX_FILL_1 ( &qpctx, 0, opt_param_mask, ARBEL_QPEE_OPT_PARAM_QKEY );
MLX_FILL_1 ( &qpctx, 44, qpc_eec_data.q_key, qp->qkey );
if ( ( rc = arbel_cmd_rts2rts_qp ( arbel, qp->qpn, &qpctx ) ) != 0 ){
DBGC ( arbel, "Arbel %p RTS2RTS_QP failed: %s\n",
MLX_FILL_1 ( &qpctx, 41, qpc_eec_data.cqn_rcv, qp->recv.cq->cqn );
MLX_FILL_1 ( &qpctx, 43, qpc_eec_data.db_record_addr_l,
( virt_to_phys ( &hermon_qp->recv.doorbell ) >> 2 ) );
- MLX_FILL_1 ( &qpctx, 44, qpc_eec_data.q_key, qp->qkey );
MLX_FILL_1 ( &qpctx, 53, qpc_eec_data.mtt_base_addr_l,
( hermon_qp->mtt.mtt_base_addr >> 3 ) );
if ( ( rc = hermon_cmd_rst2init_qp ( hermon, qp->qpn,
*
* @v ibdev Infiniband device
* @v qp Queue pair
- * @v mod_list Modification list
* @ret rc Return status code
*/
static int hermon_modify_qp ( struct ib_device *ibdev,
- struct ib_queue_pair *qp,
- unsigned long mod_list ) {
+ struct ib_queue_pair *qp ) {
struct hermon *hermon = ib_get_drvdata ( ibdev );
struct hermonprm_qp_ee_state_transitions qpctx;
- unsigned long optparammask = 0;
int rc;
- /* Construct optparammask */
- if ( mod_list & IB_MODIFY_QKEY )
- optparammask |= HERMON_QP_OPT_PARAM_QKEY;
-
/* Issue RTS2RTS_QP */
memset ( &qpctx, 0, sizeof ( qpctx ) );
- MLX_FILL_1 ( &qpctx, 0, opt_param_mask, optparammask );
+ MLX_FILL_1 ( &qpctx, 0, opt_param_mask, HERMON_QP_OPT_PARAM_QKEY );
MLX_FILL_1 ( &qpctx, 44, qpc_eec_data.q_key, qp->qkey );
if ( ( rc = hermon_cmd_rts2rts_qp ( hermon, qp->qpn, &qpctx ) ) != 0 ){
DBGC ( hermon, "Hermon %p RTS2RTS_QP failed: %s\n",
return 0;
}
+/**
+ * Set partition key table
+ *
+ * @v ibdev Infiniband device
+ * @v mad Set partition key table MAD
+ */
+static int linda_set_pkey_table ( struct ib_device *ibdev __unused,
+ union ib_mad *mad __unused ) {
+ /* Nothing to do */
+ return 0;
+}
+
/***************************************************************************
*
* Context allocation
*
* @v ibdev Infiniband device
* @v qp Queue pair
- * @v mod_list Modification list
* @ret rc Return status code
*/
static int linda_modify_qp ( struct ib_device *ibdev,
- struct ib_queue_pair *qp,
- unsigned long mod_list __unused ) {
+ struct ib_queue_pair *qp ) {
struct linda *linda = ib_get_drvdata ( ibdev );
/* Nothing to do; the hardware doesn't have a notion of queue
.mcast_attach = linda_mcast_attach,
.mcast_detach = linda_mcast_detach,
.set_port_info = linda_set_port_info,
+ .set_pkey_table = linda_set_pkey_table,
};
/***************************************************************************
/* Allocate queue pair */
ipoib->qp = ib_create_qp ( ibdev, IB_QPT_UD,
IPOIB_NUM_SEND_WQES, ipoib->cq,
- IPOIB_NUM_RECV_WQES, ipoib->cq, 0 );
+ IPOIB_NUM_RECV_WQES, ipoib->cq );
if ( ! ipoib->qp ) {
DBGC ( ipoib, "IPoIB %p could not allocate queue pair\n",
ipoib );
struct ib_completion_queue;
struct ib_gma;
+/** Infiniband transmission rates */
+enum ib_rate {
+ IB_RATE_2_5 = 2,
+ IB_RATE_10 = 3,
+ IB_RATE_30 = 4,
+ IB_RATE_5 = 5,
+ IB_RATE_20 = 6,
+ IB_RATE_40 = 7,
+ IB_RATE_60 = 8,
+ IB_RATE_80 = 9,
+ IB_RATE_120 = 10,
+};
+
+/** An Infiniband Address Vector */
+struct ib_address_vector {
+ /** Queue Pair Number */
+ unsigned long qpn;
+ /** Queue key
+ *
+ * Not specified for received packets.
+ */
+ unsigned long qkey;
+ /** Local ID */
+ unsigned int lid;
+ /** Rate
+ *
+ * Not specified for received packets.
+ */
+ enum ib_rate rate;
+ /** Service level */
+ unsigned int sl;
+ /** GID is present */
+ unsigned int gid_present;
+ /** GID, if present */
+ struct ib_gid gid;
+};
+
/** An Infiniband Work Queue */
struct ib_work_queue {
/** Containing queue pair */
struct ib_completion_queue *cq;
/** List of work queues on this completion queue */
struct list_head list;
+ /** Packet sequence number */
+ uint32_t psn;
/** Number of work queue entries */
unsigned int num_wqes;
/** Number of occupied work queue entries */
IB_QPT_SMA,
IB_QPT_GMA,
IB_QPT_UD,
+ IB_QPT_RC,
};
/** An Infiniband Queue Pair */
struct ib_work_queue recv;
/** List of multicast GIDs */
struct list_head mgids;
+ /** Address vector */
+ struct ib_address_vector av;
/** Driver private data */
void *drv_priv;
/** Queue owner private data */
void *owner_priv;
};
-/** Infiniband queue pair modification flags */
-enum ib_queue_pair_mods {
- IB_MODIFY_QKEY = 0x0001,
-};
-
-/** An Infiniband Address Vector */
-struct ib_address_vector {
- /** Queue Pair Number */
- unsigned long qpn;
- /** Queue key
- *
- * Not specified for received packets.
- */
- unsigned long qkey;
- /** Local ID */
- unsigned int lid;
- /** Rate
- *
- * Not specified for received packets.
- */
- unsigned int rate;
- /** Service level */
- unsigned int sl;
- /** GID is present */
- unsigned int gid_present;
- /** GID, if present */
- struct ib_gid gid;
-};
-
-/** Infiniband transmission rates */
-enum ib_rate {
- IB_RATE_2_5 = 2,
- IB_RATE_10 = 3,
- IB_RATE_30 = 4,
- IB_RATE_5 = 5,
- IB_RATE_20 = 6,
- IB_RATE_40 = 7,
- IB_RATE_60 = 8,
- IB_RATE_80 = 9,
- IB_RATE_120 = 10,
-};
-
/** Infiniband completion queue operations */
struct ib_completion_queue_operations {
/**
*
* @v ibdev Infiniband device
* @v qp Queue pair
- * @v mod_list Modification list
* @ret rc Return status code
*/
int ( * modify_qp ) ( struct ib_device *ibdev,
- struct ib_queue_pair *qp,
- unsigned long mod_list );
+ struct ib_queue_pair *qp );
/** Destroy queue pair
*
* @v ibdev Infiniband device
* an embedded SMA.
*/
int ( * set_port_info ) ( struct ib_device *ibdev, union ib_mad *mad );
+ /** Set partition key table
+ *
+ * @v ibdev Infiniband device
+ * @v mad Set partition key table MAD
+ *
+ * This method is required only by adapters that do not have
+ * an embedded SMA.
+ */
+ int ( * set_pkey_table ) ( struct ib_device *ibdev,
+ union ib_mad *mad );
};
/** An Infiniband device */
/** Partition key */
uint16_t pkey;
- /** Outbound packet sequence number */
- uint32_t psn;
+ /** RDMA key
+ *
+ * This is a single key allowing unrestricted access to
+ * memory.
+ */
+ uint32_t rdma_key;
/** Subnet management agent */
struct ib_gma *sma;
extern struct ib_queue_pair *
ib_create_qp ( struct ib_device *ibdev, enum ib_queue_pair_type type,
unsigned int num_send_wqes, struct ib_completion_queue *send_cq,
- unsigned int num_recv_wqes, struct ib_completion_queue *recv_cq,
- unsigned long qkey );
-extern int ib_modify_qp ( struct ib_device *ibdev, struct ib_queue_pair *qp,
- unsigned long mod_list, unsigned long qkey );
+ unsigned int num_recv_wqes,
+ struct ib_completion_queue *recv_cq );
+extern int ib_modify_qp ( struct ib_device *ibdev, struct ib_queue_pair *qp );
extern void ib_destroy_qp ( struct ib_device *ibdev,
struct ib_queue_pair *qp );
extern struct ib_queue_pair * ib_find_qp_qpn ( struct ib_device *ibdev,
extern int ib_get_hca_info ( struct ib_device *ibdev,
struct ib_gid_half *hca_guid );
extern int ib_set_port_info ( struct ib_device *ibdev, union ib_mad *mad );
+extern int ib_set_pkey_table ( struct ib_device *ibdev, union ib_mad *mad );
extern struct ib_device * alloc_ibdev ( size_t priv_size );
extern int register_ibdev ( struct ib_device *ibdev );
extern void unregister_ibdev ( struct ib_device *ibdev );
* @v send_cq Send completion queue
* @v num_recv_wqes Number of receive work queue entries
* @v recv_cq Receive completion queue
- * @v qkey Queue key
* @ret qp Queue pair
+ *
+ * The queue pair will be left in the INIT state; you must call
+ * ib_modify_qp() before it is ready to use for sending and receiving.
*/
struct ib_queue_pair * ib_create_qp ( struct ib_device *ibdev,
enum ib_queue_pair_type type,
unsigned int num_send_wqes,
struct ib_completion_queue *send_cq,
unsigned int num_recv_wqes,
- struct ib_completion_queue *recv_cq,
- unsigned long qkey ) {
+ struct ib_completion_queue *recv_cq ) {
struct ib_queue_pair *qp;
size_t total_size;
int rc;
qp->ibdev = ibdev;
list_add ( &qp->list, &ibdev->qps );
qp->type = type;
- qp->qkey = qkey;
qp->send.qp = qp;
qp->send.is_send = 1;
qp->send.cq = send_cq;
list_add ( &qp->send.list, &send_cq->work_queues );
+ qp->send.psn = ( random() & 0xffffffUL );
qp->send.num_wqes = num_send_wqes;
qp->send.iobufs = ( ( ( void * ) qp ) + sizeof ( *qp ) );
qp->recv.qp = qp;
qp->recv.cq = recv_cq;
list_add ( &qp->recv.list, &recv_cq->work_queues );
+ qp->recv.psn = ( random() & 0xffffffUL );
qp->recv.num_wqes = num_recv_wqes;
qp->recv.iobufs = ( ( ( void * ) qp ) + sizeof ( *qp ) +
( num_send_wqes * sizeof ( qp->send.iobufs[0] ) ));
*
* @v ibdev Infiniband device
* @v qp Queue pair
- * @v mod_list Modification list
- * @v qkey New queue key, if applicable
+ * @v av New address vector, if applicable
* @ret rc Return status code
*/
-int ib_modify_qp ( struct ib_device *ibdev, struct ib_queue_pair *qp,
- unsigned long mod_list, unsigned long qkey ) {
+int ib_modify_qp ( struct ib_device *ibdev, struct ib_queue_pair *qp ) {
int rc;
DBGC ( ibdev, "IBDEV %p modifying QPN %#lx\n", ibdev, qp->qpn );
- if ( mod_list & IB_MODIFY_QKEY )
- qp->qkey = qkey;
-
- if ( ( rc = ibdev->op->modify_qp ( ibdev, qp, mod_list ) ) != 0 ) {
+ if ( ( rc = ibdev->op->modify_qp ( ibdev, qp ) ) != 0 ) {
DBGC ( ibdev, "IBDEV %p could not modify QPN %#lx: %s\n",
ibdev, qp->qpn, strerror ( rc ) );
return rc;
int ib_post_send ( struct ib_device *ibdev, struct ib_queue_pair *qp,
struct ib_address_vector *av,
struct io_buffer *iobuf ) {
+ struct ib_address_vector av_copy;
int rc;
/* Check queue fill level */
return -ENOBUFS;
}
+ /* Use default address vector if none specified */
+ if ( ! av )
+ av = &qp->av;
+
+ /* Make modifiable copy of address vector */
+ memcpy ( &av_copy, av, sizeof ( av_copy ) );
+ av = &av_copy;
+
/* Fill in optional parameters in address vector */
if ( ! av->qkey )
av->qkey = qp->qkey;
return 0;
};
+/**
+ * Set partition key table
+ *
+ * @v ibdev Infiniband device
+ * @v mad Set partition key table MAD
+ */
+int ib_set_pkey_table ( struct ib_device *ibdev, union ib_mad *mad ) {
+ int rc;
+
+ /* Adapters with embedded SMAs do not need to support this method */
+ if ( ! ibdev->op->set_pkey_table ) {
+ DBGC ( ibdev, "IBDEV %p does not support setting partition "
+ "key table\n", ibdev );
+ return -ENOTSUP;
+ }
+
+ if ( ( rc = ibdev->op->set_pkey_table ( ibdev, mad ) ) != 0 ) {
+ DBGC ( ibdev, "IBDEV %p could not set partition key table: "
+ "%s\n", ibdev, strerror ( rc ) );
+ return rc;
+ }
+
+ return 0;
+};
+
/***************************************************************************
*
* Event queues
union ib_mad *mad ) {
struct ib_device *ibdev = gma->ibdev;
struct ib_pkey_table *pkey_table = &mad->smp.smp_data.pkey_table;
+ int rc;
ibdev->pkey = ntohs ( pkey_table->pkey[0] );
+ DBGC ( gma, "GMA %p set pkey %04x\n", gma, ibdev->pkey );
+
+ if ( ( rc = ib_set_pkey_table ( ibdev, mad ) ) != 0 ) {
+ DBGC ( gma, "GMA %p could not set pkey table: %s\n",
+ gma, strerror ( rc ) );
+ mad->hdr.status =
+ htons ( IB_MGMT_STATUS_UNSUPPORTED_METHOD_ATTR );
+ }
return ib_sma_get_pkey_table ( gma, mad );
}
struct ib_gma * ib_create_gma ( struct ib_device *ibdev,
enum ib_queue_pair_type type ) {
struct ib_gma *gma;
- unsigned long qkey;
+ int rc;
/* Allocate and initialise fields */
gma = zalloc ( sizeof ( *gma ) );
}
/* Create queue pair */
- qkey = ( ( type == IB_QPT_SMA ) ? IB_QKEY_SMA : IB_QKEY_GMA );
gma->qp = ib_create_qp ( ibdev, type, IB_GMA_NUM_SEND_WQES, gma->cq,
- IB_GMA_NUM_RECV_WQES, gma->cq, qkey );
+ IB_GMA_NUM_RECV_WQES, gma->cq );
if ( ! gma->qp ) {
DBGC ( gma, "GMA %p could not allocate queue pair\n", gma );
goto err_create_qp;
}
ib_qp_set_ownerdata ( gma->qp, gma );
-
DBGC ( gma, "GMA %p running on QPN %#lx\n", gma, gma->qp->qpn );
+ /* Set queue key */
+ gma->qp->qkey = ( ( type == IB_QPT_SMA ) ? IB_QKEY_SMA : IB_QKEY_GMA );
+ if ( ( rc = ib_modify_qp ( ibdev, gma->qp ) ) != 0 ) {
+ DBGC ( gma, "GMA %p could not set queue key: %s\n",
+ gma, strerror ( rc ) );
+ goto err_modify_qp;
+ }
+
/* Fill receive ring */
ib_refill_recv ( ibdev, gma->qp );
return gma;
+ err_modify_qp:
ib_destroy_qp ( ibdev, gma->qp );
err_create_qp:
ib_destroy_cq ( ibdev, gma->cq );
ntohl ( gid->u.dwords[3] ), qkey );
/* Set queue key */
- if ( ( rc = ib_modify_qp ( ibdev, qp, IB_MODIFY_QKEY, qkey ) ) != 0 ) {
+ qp->qkey = qkey;
+ if ( ( rc = ib_modify_qp ( ibdev, qp ) ) != 0 ) {
DBGC ( gma, "GMA %p QPN %lx could not modify qkey: %s\n",
gma, qp->qpn, strerror ( rc ) );
return NULL;
bth->se__m__padcnt__tver = ( pad_len << 4 );
bth->pkey = htons ( ibdev->pkey );
bth->dest_qp = htonl ( av->qpn );
- bth->ack__psn = htonl ( ( ibdev->psn++ ) & 0xffffffUL );
+ bth->ack__psn = htonl ( ( qp->send.psn++ ) & 0xffffffUL );
/* Construct DETH */
deth->qkey = htonl ( av->qkey );