#define ARBEL_HCR_INIT2RTR_QPEE 0x001a
#define ARBEL_HCR_RTR2RTS_QPEE 0x001b
#define ARBEL_HCR_2RST_QPEE 0x0021
+#define ARBEL_HCR_READ_MGM 0x0025
+#define ARBEL_HCR_WRITE_MGM 0x0026
+#define ARBEL_HCR_MGID_HASH 0x0027
/* Service types */
#define ARBEL_ST_UD 0x03
#define ARBEL_INVALID_LKEY 0x00000100UL
+/*
+ * Datatypes that seem to be missing from the autogenerated documentation
+ *
+ */
+struct arbelprm_mgm_hash_st {
+ pseudo_bit_t reserved0[0x00020];
+/* -------------- */
+ pseudo_bit_t hash[0x00010];
+ pseudo_bit_t reserved1[0x00010];
+};
+
/*
* Wrapper structures for hardware datatypes
*
struct MLX_DECLARE_STRUCT ( arbelprm_cq_arm_db_record );
struct MLX_DECLARE_STRUCT ( arbelprm_cq_ci_db_record );
struct MLX_DECLARE_STRUCT ( arbelprm_hca_command_register );
+struct MLX_DECLARE_STRUCT ( arbelprm_mgm_entry );
+struct MLX_DECLARE_STRUCT ( arbelprm_mgm_hash );
struct MLX_DECLARE_STRUCT ( arbelprm_qp_db_record );
struct MLX_DECLARE_STRUCT ( arbelprm_qp_ee_state_transitions );
struct MLX_DECLARE_STRUCT ( arbelprm_query_dev_lim );
#define ARBEL_HCR_OUT_LEN( _command ) ( ( (_command) >> 21 ) & 0x7fc )
/** Build HCR command from component parts */
-#define ARBEL_HCR_CMD( _opcode, _in_mbox, _in_len, _out_mbox, _out_len ) \
+#define ARBEL_HCR_INOUT_CMD( _opcode, _in_mbox, _in_len, \
+ _out_mbox, _out_len ) \
( (_opcode) | \
( (_in_mbox) ? ARBEL_HCR_IN_MBOX : 0 ) | \
( ( (_in_len) / 4 ) << 14 ) | \
( ( (_out_len) / 4 ) << 23 ) )
#define ARBEL_HCR_IN_CMD( _opcode, _in_mbox, _in_len ) \
- ARBEL_HCR_CMD ( _opcode, _in_mbox, _in_len, 0, 0 )
+ ARBEL_HCR_INOUT_CMD ( _opcode, _in_mbox, _in_len, 0, 0 )
#define ARBEL_HCR_OUT_CMD( _opcode, _out_mbox, _out_len ) \
- ARBEL_HCR_CMD ( _opcode, 0, 0, _out_mbox, _out_len )
+ ARBEL_HCR_INOUT_CMD ( _opcode, 0, 0, _out_mbox, _out_len )
#define ARBEL_HCR_VOID_CMD( _opcode ) \
- ARBEL_HCR_CMD ( _opcode, 0, 0, 0, 0 )
+ ARBEL_HCR_INOUT_CMD ( _opcode, 0, 0, 0, 0 )
/*
* Doorbell record allocation
0x03, NULL, qpn, NULL );
}
+static inline int
+arbel_cmd_read_mgm ( struct arbel *arbel, unsigned int index,
+ struct arbelprm_mgm_entry *mgm ) {
+ return arbel_cmd ( arbel,
+ ARBEL_HCR_OUT_CMD ( ARBEL_HCR_READ_MGM,
+ 1, sizeof ( *mgm ) ),
+ 0, NULL, index, mgm );
+}
+
+static inline int
+arbel_cmd_write_mgm ( struct arbel *arbel, unsigned int index,
+ const struct arbelprm_mgm_entry *mgm ) {
+ return arbel_cmd ( arbel,
+ ARBEL_HCR_IN_CMD ( ARBEL_HCR_WRITE_MGM,
+ 1, sizeof ( *mgm ) ),
+ 0, mgm, index, NULL );
+}
+
+static inline int
+arbel_cmd_mgid_hash ( struct arbel *arbel, const struct ib_gid *gid,
+ struct arbelprm_mgm_hash *hash ) {
+ return arbel_cmd ( arbel,
+ ARBEL_HCR_INOUT_CMD ( ARBEL_HCR_MGID_HASH,
+ 1, sizeof ( *gid ),
+ 0, sizeof ( *hash ) ),
+ 0, gid, 0, hash );
+}
+
/***************************************************************************
*
* Completion queue operations
}
}
+/***************************************************************************
+ *
+ * Multicast group operations
+ *
+ ***************************************************************************
+ */
+
+/**
+ * Attach to multicast group
+ *
+ * @v ibdev Infiniband device
+ * @v qp Queue pair
+ * @v gid Multicast GID
+ * @ret rc Return status code
+ */
+static int arbel_mcast_attach ( struct ib_device *ibdev,
+ struct ib_queue_pair *qp,
+ struct ib_gid *gid ) {
+ struct arbel *arbel = ibdev->dev_priv;
+ struct arbelprm_mgm_hash hash;
+ struct arbelprm_mgm_entry mgm;
+ unsigned int index;
+ int rc;
+
+ /* Generate hash table index */
+ if ( ( rc = arbel_cmd_mgid_hash ( arbel, gid, &hash ) ) != 0 ) {
+ DBGC ( arbel, "Arbel %p could not hash GID: %s\n",
+ arbel, strerror ( rc ) );
+ return rc;
+ }
+ index = MLX_GET ( &hash, hash );
+
+ /* Check for existing hash table entry */
+ if ( ( rc = arbel_cmd_read_mgm ( arbel, index, &mgm ) ) != 0 ) {
+ DBGC ( arbel, "Arbel %p could not read MGM %#x: %s\n",
+ arbel, index, strerror ( rc ) );
+ return rc;
+ }
+ if ( MLX_GET ( &mgm, mgmqp_0.qi ) != 0 ) {
+ /* FIXME: this implementation allows only a single QP
+ * per multicast group, and doesn't handle hash
+ * collisions. Sufficient for IPoIB but may need to
+ * be extended in future.
+ */
+ DBGC ( arbel, "Arbel %p MGID index %#x already in use\n",
+ arbel, index );
+ return -EBUSY;
+ }
+
+ /* Update hash table entry */
+ MLX_FILL_2 ( &mgm, 8,
+ mgmqp_0.qpn_i, qp->qpn,
+ mgmqp_0.qi, 1 );
+ memcpy ( &mgm.u.dwords[4], gid, sizeof ( *gid ) );
+ if ( ( rc = arbel_cmd_write_mgm ( arbel, index, &mgm ) ) != 0 ) {
+ DBGC ( arbel, "Arbel %p could not write MGM %#x: %s\n",
+ arbel, index, strerror ( rc ) );
+ return rc;
+ }
+
+ return 0;
+}
+
+/**
+ * Detach from multicast group
+ *
+ * @v ibdev Infiniband device
+ * @v qp Queue pair
+ * @v gid Multicast GID
+ */
+static void arbel_mcast_detach ( struct ib_device *ibdev,
+ struct ib_queue_pair *qp __unused,
+ struct ib_gid *gid ) {
+ struct arbel *arbel = ibdev->dev_priv;
+ struct arbelprm_mgm_hash hash;
+ struct arbelprm_mgm_entry mgm;
+ unsigned int index;
+ int rc;
+
+ /* Generate hash table index */
+ if ( ( rc = arbel_cmd_mgid_hash ( arbel, gid, &hash ) ) != 0 ) {
+ DBGC ( arbel, "Arbel %p could not hash GID: %s\n",
+ arbel, strerror ( rc ) );
+ return;
+ }
+ index = MLX_GET ( &hash, hash );
+
+ /* Clear hash table entry */
+ memset ( &mgm, 0, sizeof ( mgm ) );
+ if ( ( rc = arbel_cmd_write_mgm ( arbel, index, &mgm ) ) != 0 ) {
+ DBGC ( arbel, "Arbel %p could not write MGM %#x: %s\n",
+ arbel, index, strerror ( rc ) );
+ return;
+ }
+}
+
+
+
/** Arbel Infiniband operations */
static struct ib_device_operations arbel_ib_operations = {
.create_cq = arbel_create_cq,
.post_send = arbel_post_send,
.post_recv = arbel_post_recv,
.poll_cq = arbel_poll_cq,
+ .mcast_attach = arbel_mcast_attach,
+ .mcast_detach = arbel_mcast_detach,
};
/**
return -EIO;
}
mlx->own_qp->owner_priv = netdev;
+ struct ib_gid *bcast_gid = ( struct ib_gid * ) &ib_data.bcast_gid;
+ if ( ( rc = ib_mcast_attach ( ibdev, mlx->own_qp,
+ bcast_gid ) ) != 0 ) {
+ DBG ( "Could not attach to broadcast GID: %s\n",
+ strerror ( rc ) );
+ return rc;
+ }
+
mac = ( ( struct ib_mac * ) netdev->ll_addr );
mac->qpn = htonl ( mlx->own_qp->qpn );
struct ib_completion_queue *cq,
ib_completer_t complete_send,
ib_completer_t complete_recv );
+ /**
+ * Attach to multicast group
+ *
+ * @v ibdev Infiniband device
+ * @v qp Queue pair
+ * @v gid Multicast GID
+ * @ret rc Return status code
+ */
+ int ( * mcast_attach ) ( struct ib_device *ibdev,
+ struct ib_queue_pair *qp,
+ struct ib_gid *gid );
+ /**
+ * Detach from multicast group
+ *
+ * @v ibdev Infiniband device
+ * @v qp Queue pair
+ * @v gid Multicast GID
+ */
+ void ( * mcast_detach ) ( struct ib_device *ibdev,
+ struct ib_queue_pair *qp,
+ struct ib_gid *gid );
};
/** An Infiniband device */
extern struct ib_work_queue * ib_find_wq ( struct ib_completion_queue *cq,
unsigned long qpn, int is_send );
+/**
+ * Attach to multicast group
+ *
+ * @v ibdev Infiniband device
+ * @v qp Queue pair
+ * @v gid Multicast GID
+ * @ret rc Return status code
+ */
+static inline __attribute__ (( always_inline )) int
+ib_mcast_attach ( struct ib_device *ibdev, struct ib_queue_pair *qp,
+ struct ib_gid *gid ) {
+ return ibdev->op->mcast_attach ( ibdev, qp, gid );
+}
+
+/**
+ * Detach from multicast group
+ *
+ * @v ibdev Infiniband device
+ * @v qp Queue pair
+ * @v gid Multicast GID
+ */
+static inline __attribute__ (( always_inline )) void
+ib_mcast_detach ( struct ib_device *ibdev, struct ib_queue_pair *qp,
+ struct ib_gid *gid ) {
+ ibdev->op->mcast_detach ( ibdev, qp, gid );
+}
extern struct ll_protocol infiniband_protocol;