*
*/
+/** Arbel device limits */
+struct arbel_dev_limits {
+ /** Number of reserved CQs */
+ unsigned long reserved_cqs;
+};
+
/** Alignment of Arbel send work queue entries */
#define ARBEL_SEND_WQE_ALIGN 128
struct arbel_recv_work_queue recv;
};
+/** Maximum number of allocatable completion queues
+ *
+ * This is a policy decision, not a device limit.
+ */
+#define ARBEL_MAX_CQS 8
+
/** An Arbel completion queue */
struct arbel_completion_queue {
/** Infiniband completion queue */
union arbelprm_completion_entry *cqe;
};
+/** An Arbel resource bitmask */
+typedef uint32_t arbel_bitmask_t;
+
+/** Size of an Arbel resource bitmask */
+#define ARBEL_BITMASK_SIZE(max_entries) \
+ ( ( (max_entries) + ( 8 * sizeof ( arbel_bitmask_t ) ) - 1 ) / \
+ ( 8 * sizeof ( arbel_bitmask_t ) ) )
+
/** An Arbel device */
struct arbel {
/** Configuration registers */
* Used to get unrestricted memory access.
*/
unsigned long reserved_lkey;
+
+ /** Completion queue in-use bitmask */
+ arbel_bitmask_t cq_inuse[ ARBEL_BITMASK_SIZE ( ARBEL_MAX_CQS ) ];
+ /** Device limits */
+ struct arbel_dev_limits limits;
};
/*
#define ARBEL_HCR_OUT_CMD( _opcode, _out_mbox, _out_len ) \
ARBEL_HCR_CMD ( _opcode, 0, 0, _out_mbox, _out_len )
+/*
+ * Doorbell record allocation
+ *
+ * The doorbell record map looks like:
+ *
+ * ARBEL_MAX_CQS * Arm completion queue doorbell
+ * ARBEL_MAX_QPS * Send work request doorbell
+ * Group separator
+ * ...(empty space)...
+ * ARBEL_MAX_QPS * Receive work request doorbell
+ * ARBEL_MAX_CQS * Completion queue consumer counter update doorbell
+ */
+
+#define ARBEL_MAX_DOORBELL_RECORDS 512
+#define ARBEL_GROUP_SEPARATOR_DOORBELL ( ARBEL_MAX_CQS + ARBEL_MAX_QPS )
+
+/**
+ * Get arm completion queue doorbell index
+ *
+ * @v cqn_offset Completion queue number offset
+ * @ret doorbell_idx Doorbell index
+ */
+static inline unsigned int
+arbel_arm_cq_doorbell_idx ( unsigned int cqn_offset ) {
+ return cqn_offset;
+}
+
+/**
+ * Get send work request doorbell index
+ *
+ * @v qpn_offset Queue pair number offset
+ * @ret doorbell_idx Doorbell index
+ */
+static inline unsigned int
+arbel_send_doorbell_idx ( unsigned int qpn_offset ) {
+ return ( ARBEL_MAX_CQS + qpn_offset );
+}
+
+/**
+ * Get receive work request doorbell index
+ *
+ * @v qpn_offset Queue pair number offset
+ * @ret doorbell_idx Doorbell index
+ */
+static inline unsigned int
+arbel_recv_doorbell_idx ( unsigned int qpn_offset ) {
+ return ( ARBEL_MAX_DOORBELL_RECORDS - ARBEL_MAX_CQS - qpn_offset - 1 );
+}
+
+/**
+ * Get commpletion queue consumer counter doorbell index
+ *
+ * @v cqn_offset Completion queue number offset
+ * @ret doorbell_idx Doorbell index
+ */
+static inline unsigned int
+arbel_cq_ci_doorbell_idx ( unsigned int cqn_offset ) {
+ return ( ARBEL_MAX_DOORBELL_RECORDS - cqn_offset - 1 );
+}
+
#endif /* _ARBEL_H */
#include <errno.h>
#include <gpxe/pci.h>
+#include <gpxe/malloc.h>
#include <gpxe/iobuf.h>
#include <gpxe/netdevice.h>
#include <gpxe/infiniband.h>
.irq = mlx_irq,
};
+
+
+
+/**
+ * Allocate queue number
+ *
+ * @v q_inuse Queue usage bitmask
+ * @v max_inuse Maximum number of in-use queues
+ * @ret qn_offset Free queue number offset, or negative error
+ */
+static int arbel_alloc_qn_offset ( arbel_bitmask_t *q_inuse,
+ unsigned int max_inuse ) {
+ unsigned int qn_offset = 0;
+ arbel_bitmask_t mask = 1;
+
+ while ( qn_offset < max_inuse ) {
+ if ( ( mask & *q_inuse ) == 0 ) {
+ *q_inuse |= mask;
+ return qn_offset;
+ }
+ qn_offset++;
+ mask <<= 1;
+ if ( ! mask ) {
+ mask = 1;
+ q_inuse++;
+ }
+ }
+ return -ENFILE;
+}
+
+/**
+ * Free queue number
+ *
+ * @v q_inuse Queue usage bitmask
+ * @v qn_offset Queue number offset
+ */
+static void arbel_free_qn_offset ( arbel_bitmask_t *q_inuse, int qn_offset ) {
+ arbel_bitmask_t mask;
+
+ mask = ( 1 << ( qn_offset % ( 8 * sizeof ( mask ) ) ) );
+ q_inuse += ( qn_offset / ( 8 * sizeof ( mask ) ) );
+ *q_inuse &= ~mask;
+}
+
/***************************************************************************
*
* HCA commands
* @v ibdev Infiniband device
* @v
*/
-static int arbel_create_cq ( struct ib_device *ibdev,
+static int arbel_create_cq ( struct ib_device *ibdev, unsigned int num_cqes,
struct ib_completion_queue **new_cq ) {
struct arbel *arbel = ibdev->priv;
+ struct arbel_completion_queue *arbel_cq;
struct arbelprm_completion_queue_context cqctx;
- struct ib_completion_queue *cq;
+ int cqn_offset;
+ unsigned int cqn;
+ size_t cqe_size;
+ unsigned int i;
+ int rc;
- cq = zalloc ( sizeof ( *cq ) );
- if ( ! cq )
- return -ENOMEM;
+ /* Find a free completion queue number */
+ cqn_offset = arbel_alloc_qn_offset ( arbel->cq_inuse, ARBEL_MAX_CQS );
+ if ( cqn_offset < 0 ) {
+ rc = cqn_offset;
+ goto err_cqn_offset;
+ }
+ cqn = ( arbel->limits.reserved_cqs + cqn_offset );
-
+ /* Allocate control structures */
+ arbel_cq = zalloc ( sizeof ( *arbel_cq ) );
+ if ( ! arbel_cq ) {
+ rc = -ENOMEM;
+ goto err_arbel_cq;
+ }
+ arbel_cq->cq.cqn = cqn;
+ arbel_cq->cq.num_cqes = num_cqes;
+ INIT_LIST_HEAD ( &arbel_cq->cq.work_queues );
+ arbel_cq->doorbell_idx = arbel_cq_ci_doorbell_idx ( cqn_offset );
+
+ /* Allocate completion queue itself */
+ cqe_size = ( num_cqes * sizeof ( arbel_cq->cqe[0] ) );
+ arbel_cq->cqe = malloc_dma ( cqe_size, sizeof ( arbel_cq->cqe[0] ) );
+ if ( ! arbel_cq->cqe ) {
+ rc = -ENOMEM;
+ goto err_cqe;
+ }
+ memset ( arbel_cq->cqe, 0, cqe_size );
+ for ( i = 0 ; i < num_cqes ; i++ ) {
+ MLX_FILL_1 ( &arbel_cq->cqe[i].normal, 7, owner, 1 );
+ }
+ barrier();
+
+ /* Initialise doorbell records */
+ // ...
+ /* Hand queue over to hardware */
memset ( &cqctx, 0, sizeof ( cqctx ) );
-
+ MLX_FILL_1 ( &cqctx, 0, st, 0xa /* "Event fired" */ );
+ MLX_FILL_1 ( &cqctx, 2, start_address_l,
+ virt_to_bus ( arbel_cq->cqe ) );
+ /// ....
+
+ if ( ( rc = arbel_cmd_sw2hw_cq ( arbel, cqn, &cqctx ) ) != 0 ) {
+ // ...
+ }
- return arbel_cmd_sw2hw_cq ( arbel, 0, &cqctx );
+
+ // completion queue number
+ // doorbell index
+
+ *new_cq = &arbel_cq->cq;
+
+
+ return 0;
+
+ err_cqe:
+ free ( arbel_cq );
+ err_arbel_cq:
+ arbel_free_qn_offset ( arbel->cq_inuse, cqn_offset );
+ err_cqn_offset:
+ return rc;
}
static int arbel_probe ( struct pci_device *pci,
const struct pci_device_id *id __unused ) {
struct net_device *netdev;
+ struct arbelprm_query_dev_lim dev_lim;
+ struct arbel *arbel = &static_arbel;
struct mlx_nic *mlx;
struct ib_mac *mac;
udqp_t qph;
list_add ( &static_ipoib_qp.qp.recv.list,
&static_ipoib_recv_cq.cq.work_queues );
- struct arbelprm_query_dev_lim dev_lim;
- memset ( &dev_lim, 0xaa, sizeof ( dev_lim ) );
- if ( ( rc = arbel_cmd_query_dev_lim ( &static_arbel,
- &dev_lim ) ) != 0 ) {
- DBG ( "QUERY_DEV_LIM failed: %s\n", strerror ( rc ) );
+ /* Get device limits */
+ if ( ( rc = arbel_cmd_query_dev_lim ( arbel, &dev_lim ) ) != 0 ) {
+ DBGC ( arbel, "Arbel %p could not get device limits: %s\n",
+ arbel, strerror ( rc ) );
+ goto err_query_dev_lim;
}
+ arbel->limits.reserved_cqs =
+ ( 1 << MLX_GET ( &dev_lim, log2_rsvd_cqs ) );
DBG ( "Device limits:\n ");
DBG_HD ( &dev_lim, sizeof ( dev_lim ) );
return 0;
+ err_query_dev_lim:
err_register_netdev:
err_ipoib_init:
ib_driver_close ( 0 );
/* uar context indexes */
enum {
- MADS_RCV_CQ_ARM_DB_IDX,
MADS_SND_CQ_ARM_DB_IDX,
- IPOIB_RCV_CQ_ARM_DB_IDX,
+ MADS_RCV_CQ_ARM_DB_IDX,
IPOIB_SND_CQ_ARM_DB_IDX,
+ IPOIB_RCV_CQ_ARM_DB_IDX,
MADS_SND_QP_DB_IDX,
IPOIB_SND_QP_DB_IDX,
GROUP_SEP_IDX,
unmapped doorbell records
-------------------------- */
END_UNMAPPED_DB_IDX = 505,
- MADS_RCV_QP_DB_IDX = 506,
- IPOIB_RCV_QP_DB_IDX = 507,
- MADS_RCV_CQ_CI_DB_IDX = 508,
- MADS_SND_CQ_CI_DB_IDX = 509,
- IPOIB_RCV_CQ_CI_DB_IDX = 510,
- IPOIB_SND_CQ_CI_DB_IDX = 511
+ IPOIB_RCV_QP_DB_IDX = 506,
+ MADS_RCV_QP_DB_IDX = 507,
+ IPOIB_RCV_CQ_CI_DB_IDX = 508,
+ IPOIB_SND_CQ_CI_DB_IDX = 509,
+ MADS_RCV_CQ_CI_DB_IDX = 510,
+ MADS_SND_CQ_CI_DB_IDX = 511,
};
/* uar resources types */