1 #ifndef _GPXE_INFINIBAND_H
2 #define _GPXE_INFINIBAND_H
10 FILE_LICENCE ( GPL2_OR_LATER );
13 #include <gpxe/refcnt.h>
14 #include <gpxe/device.h>
15 #include <gpxe/ib_packet.h>
16 #include <gpxe/ib_mad.h>
18 /** Subnet management QPN */
21 /** Subnet management queue key */
24 /** General management QPN */
27 /** General management queue key */
28 #define IB_QKEY_GMA 0x80010000UL
31 #define IB_QPN_BROADCAST 0xffffffUL
33 /** Default Infiniband partition key */
34 #define IB_PKEY_NONE 0xffff
37 * Maximum payload size
39 * This is currently hard-coded in various places (drivers, subnet
40 * management agent, etc.) to 2048.
42 #define IB_MAX_PAYLOAD_SIZE 2048
46 struct ib_address_vector;
47 struct ib_completion_queue;
50 /** An Infiniband Work Queue */
51 struct ib_work_queue {
52 /** Containing queue pair */
53 struct ib_queue_pair *qp;
54 /** "Is a send queue" flag */
56 /** Associated completion queue */
57 struct ib_completion_queue *cq;
58 /** List of work queues on this completion queue */
59 struct list_head list;
60 /** Number of work queue entries */
61 unsigned int num_wqes;
62 /** Number of occupied work queue entries */
64 /** Next work queue entry index
66 * This is the index of the next entry to be filled (i.e. the
67 * first empty entry). This value is not bounded by num_wqes;
68 * users must logical-AND with (num_wqes-1) to generate an
71 unsigned long next_idx;
72 /** I/O buffers assigned to work queue */
73 struct io_buffer **iobufs;
74 /** Driver private data */
78 /** An Infiniband multicast GID */
79 struct ib_multicast_gid {
80 /** List of multicast GIDs on this QP */
81 struct list_head list;
86 /** An Infiniband queue pair type */
87 enum ib_queue_pair_type {
93 /** An Infiniband Queue Pair */
94 struct ib_queue_pair {
95 /** Containing Infiniband device */
96 struct ib_device *ibdev;
97 /** List of queue pairs on this Infiniband device */
98 struct list_head list;
99 /** Queue pair number */
101 /** Externally-visible queue pair number
103 * This may differ from the real queue pair number (e.g. when
104 * the HCA cannot use the management QPNs 0 and 1 as hardware
105 * QPNs and needs to remap them).
107 unsigned long ext_qpn;
108 /** Queue pair type */
109 enum ib_queue_pair_type type;
113 struct ib_work_queue send;
115 struct ib_work_queue recv;
116 /** List of multicast GIDs */
117 struct list_head mgids;
118 /** Driver private data */
120 /** Queue owner private data */
124 /** Infiniband queue pair modification flags */
125 enum ib_queue_pair_mods {
126 IB_MODIFY_QKEY = 0x0001,
129 /** An Infiniband Address Vector */
130 struct ib_address_vector {
131 /** Queue Pair Number */
135 * Not specified for received packets.
142 * Not specified for received packets.
147 /** GID is present */
148 unsigned int gid_present;
149 /** GID, if present */
153 /** Infiniband transmission rates */
166 /** Infiniband completion queue operations */
167 struct ib_completion_queue_operations {
171 * @v ibdev Infiniband device
173 * @v iobuf I/O buffer
174 * @v rc Completion status code
176 void ( * complete_send ) ( struct ib_device *ibdev,
177 struct ib_queue_pair *qp,
178 struct io_buffer *iobuf, int rc );
180 * Complete Receive WQE
182 * @v ibdev Infiniband device
184 * @v av Address vector, or NULL
185 * @v iobuf I/O buffer
186 * @v rc Completion status code
188 void ( * complete_recv ) ( struct ib_device *ibdev,
189 struct ib_queue_pair *qp,
190 struct ib_address_vector *av,
191 struct io_buffer *iobuf, int rc );
194 /** An Infiniband Completion Queue */
195 struct ib_completion_queue {
196 /** Containing Infiniband device */
197 struct ib_device *ibdev;
198 /** List of completion queues on this Infiniband device */
199 struct list_head list;
200 /** Completion queue number */
202 /** Number of completion queue entries */
203 unsigned int num_cqes;
204 /** Next completion queue entry index
206 * This is the index of the next entry to be filled (i.e. the
207 * first empty entry). This value is not bounded by num_wqes;
208 * users must logical-AND with (num_wqes-1) to generate an
211 unsigned long next_idx;
212 /** List of work queues completing to this queue */
213 struct list_head work_queues;
214 /** Completion queue operations */
215 struct ib_completion_queue_operations *op;
216 /** Driver private data */
221 * Infiniband device operations
223 * These represent a subset of the Infiniband Verbs.
225 struct ib_device_operations {
226 /** Create completion queue
228 * @v ibdev Infiniband device
229 * @v cq Completion queue
230 * @ret rc Return status code
232 int ( * create_cq ) ( struct ib_device *ibdev,
233 struct ib_completion_queue *cq );
234 /** Destroy completion queue
236 * @v ibdev Infiniband device
237 * @v cq Completion queue
239 void ( * destroy_cq ) ( struct ib_device *ibdev,
240 struct ib_completion_queue *cq );
241 /** Create queue pair
243 * @v ibdev Infiniband device
245 * @ret rc Return status code
247 int ( * create_qp ) ( struct ib_device *ibdev,
248 struct ib_queue_pair *qp );
249 /** Modify queue pair
251 * @v ibdev Infiniband device
253 * @v mod_list Modification list
254 * @ret rc Return status code
256 int ( * modify_qp ) ( struct ib_device *ibdev,
257 struct ib_queue_pair *qp,
258 unsigned long mod_list );
259 /** Destroy queue pair
261 * @v ibdev Infiniband device
264 void ( * destroy_qp ) ( struct ib_device *ibdev,
265 struct ib_queue_pair *qp );
266 /** Post send work queue entry
268 * @v ibdev Infiniband device
270 * @v av Address vector
271 * @v iobuf I/O buffer
272 * @ret rc Return status code
274 * If this method returns success, the I/O buffer remains
275 * owned by the queue pair. If this method returns failure,
276 * the I/O buffer is immediately released; the failure is
277 * interpreted as "failure to enqueue buffer".
279 int ( * post_send ) ( struct ib_device *ibdev,
280 struct ib_queue_pair *qp,
281 struct ib_address_vector *av,
282 struct io_buffer *iobuf );
283 /** Post receive work queue entry
285 * @v ibdev Infiniband device
287 * @v iobuf I/O buffer
288 * @ret rc Return status code
290 * If this method returns success, the I/O buffer remains
291 * owned by the queue pair. If this method returns failure,
292 * the I/O buffer is immediately released; the failure is
293 * interpreted as "failure to enqueue buffer".
295 int ( * post_recv ) ( struct ib_device *ibdev,
296 struct ib_queue_pair *qp,
297 struct io_buffer *iobuf );
298 /** Poll completion queue
300 * @v ibdev Infiniband device
301 * @v cq Completion queue
303 * The relevant completion handler (specified at completion
304 * queue creation time) takes ownership of the I/O buffer.
306 void ( * poll_cq ) ( struct ib_device *ibdev,
307 struct ib_completion_queue *cq );
311 * @v ibdev Infiniband device
313 void ( * poll_eq ) ( struct ib_device *ibdev );
317 * @v ibdev Infiniband device
318 * @ret rc Return status code
320 int ( * open ) ( struct ib_device *ibdev );
324 * @v ibdev Infiniband device
326 void ( * close ) ( struct ib_device *ibdev );
327 /** Attach to multicast group
329 * @v ibdev Infiniband device
331 * @v gid Multicast GID
332 * @ret rc Return status code
334 int ( * mcast_attach ) ( struct ib_device *ibdev,
335 struct ib_queue_pair *qp,
336 struct ib_gid *gid );
337 /** Detach from multicast group
339 * @v ibdev Infiniband device
341 * @v gid Multicast GID
343 void ( * mcast_detach ) ( struct ib_device *ibdev,
344 struct ib_queue_pair *qp,
345 struct ib_gid *gid );
346 /** Set port information
348 * @v ibdev Infiniband device
349 * @v port_info New port information
351 * This method is required only by adapters that do not have
354 int ( * set_port_info ) ( struct ib_device *ibdev,
355 const struct ib_port_info *port_info );
358 /** An Infiniband device */
360 /** Reference counter */
361 struct refcnt refcnt;
362 /** List of Infiniband devices */
363 struct list_head list;
364 /** Underlying device */
366 /** List of completion queues */
367 struct list_head cqs;
368 /** List of queue pairs */
369 struct list_head qps;
370 /** Infiniband operations */
371 struct ib_device_operations *op;
374 /** Port open request counter */
375 unsigned int open_count;
387 /** Subnet manager LID */
389 /** Subnet manager SL */
394 /** Outbound packet sequence number */
397 /** Subnet management agent */
399 /** General management agent */
402 /** Driver private data */
404 /** Owner private data */
408 extern struct ib_completion_queue *
409 ib_create_cq ( struct ib_device *ibdev, unsigned int num_cqes,
410 struct ib_completion_queue_operations *op );
411 extern void ib_destroy_cq ( struct ib_device *ibdev,
412 struct ib_completion_queue *cq );
413 extern void ib_poll_cq ( struct ib_device *ibdev,
414 struct ib_completion_queue *cq );
415 extern struct ib_queue_pair *
416 ib_create_qp ( struct ib_device *ibdev, enum ib_queue_pair_type type,
417 unsigned int num_send_wqes, struct ib_completion_queue *send_cq,
418 unsigned int num_recv_wqes, struct ib_completion_queue *recv_cq,
419 unsigned long qkey );
420 extern int ib_modify_qp ( struct ib_device *ibdev, struct ib_queue_pair *qp,
421 unsigned long mod_list, unsigned long qkey );
422 extern void ib_destroy_qp ( struct ib_device *ibdev,
423 struct ib_queue_pair *qp );
424 extern struct ib_queue_pair * ib_find_qp_qpn ( struct ib_device *ibdev,
426 extern struct ib_queue_pair * ib_find_qp_mgid ( struct ib_device *ibdev,
427 struct ib_gid *gid );
428 extern struct ib_work_queue * ib_find_wq ( struct ib_completion_queue *cq,
429 unsigned long qpn, int is_send );
430 extern int ib_post_send ( struct ib_device *ibdev, struct ib_queue_pair *qp,
431 struct ib_address_vector *av,
432 struct io_buffer *iobuf );
433 extern int ib_post_recv ( struct ib_device *ibdev, struct ib_queue_pair *qp,
434 struct io_buffer *iobuf );
435 extern void ib_complete_send ( struct ib_device *ibdev,
436 struct ib_queue_pair *qp,
437 struct io_buffer *iobuf, int rc );
438 extern void ib_complete_recv ( struct ib_device *ibdev,
439 struct ib_queue_pair *qp,
440 struct ib_address_vector *av,
441 struct io_buffer *iobuf, int rc );
442 extern void ib_refill_recv ( struct ib_device *ibdev,
443 struct ib_queue_pair *qp );
444 extern int ib_open ( struct ib_device *ibdev );
445 extern void ib_close ( struct ib_device *ibdev );
446 extern int ib_mcast_attach ( struct ib_device *ibdev, struct ib_queue_pair *qp,
447 struct ib_gid *gid );
448 extern void ib_mcast_detach ( struct ib_device *ibdev,
449 struct ib_queue_pair *qp, struct ib_gid *gid );
450 extern int ib_get_hca_info ( struct ib_device *ibdev,
451 struct ib_gid_half *hca_guid );
452 extern int ib_set_port_info ( struct ib_device *ibdev,
453 const struct ib_port_info *port_info );
454 extern struct ib_device * alloc_ibdev ( size_t priv_size );
455 extern int register_ibdev ( struct ib_device *ibdev );
456 extern void unregister_ibdev ( struct ib_device *ibdev );
457 extern void ib_link_state_changed ( struct ib_device *ibdev );
458 extern void ib_poll_eq ( struct ib_device *ibdev );
459 extern struct list_head ib_devices;
461 /** Iterate over all network devices */
462 #define for_each_ibdev( ibdev ) \
463 list_for_each_entry ( (ibdev), &ib_devices, list )
468 * @v ibdev Infiniband device
469 * @ret link_up Link is up
471 static inline __always_inline int
472 ib_link_ok ( struct ib_device *ibdev ) {
473 return ( ibdev->port_state == IB_PORT_STATE_ACTIVE );
477 * Get reference to Infiniband device
479 * @v ibdev Infiniband device
480 * @ret ibdev Infiniband device
482 static inline __always_inline struct ib_device *
483 ibdev_get ( struct ib_device *ibdev ) {
484 ref_get ( &ibdev->refcnt );
489 * Drop reference to Infiniband device
491 * @v ibdev Infiniband device
493 static inline __always_inline void
494 ibdev_put ( struct ib_device *ibdev ) {
495 ref_put ( &ibdev->refcnt );
499 * Set Infiniband work queue driver-private data
502 * @v priv Private data
504 static inline __always_inline void
505 ib_wq_set_drvdata ( struct ib_work_queue *wq, void *priv ) {
510 * Get Infiniband work queue driver-private data
513 * @ret priv Private data
515 static inline __always_inline void *
516 ib_wq_get_drvdata ( struct ib_work_queue *wq ) {
521 * Set Infiniband queue pair driver-private data
524 * @v priv Private data
526 static inline __always_inline void
527 ib_qp_set_drvdata ( struct ib_queue_pair *qp, void *priv ) {
532 * Get Infiniband queue pair driver-private data
535 * @ret priv Private data
537 static inline __always_inline void *
538 ib_qp_get_drvdata ( struct ib_queue_pair *qp ) {
543 * Set Infiniband queue pair owner-private data
546 * @v priv Private data
548 static inline __always_inline void
549 ib_qp_set_ownerdata ( struct ib_queue_pair *qp, void *priv ) {
550 qp->owner_priv = priv;
554 * Get Infiniband queue pair owner-private data
557 * @ret priv Private data
559 static inline __always_inline void *
560 ib_qp_get_ownerdata ( struct ib_queue_pair *qp ) {
561 return qp->owner_priv;
565 * Set Infiniband completion queue driver-private data
567 * @v cq Completion queue
568 * @v priv Private data
570 static inline __always_inline void
571 ib_cq_set_drvdata ( struct ib_completion_queue *cq, void *priv ) {
576 * Get Infiniband completion queue driver-private data
578 * @v cq Completion queue
579 * @ret priv Private data
581 static inline __always_inline void *
582 ib_cq_get_drvdata ( struct ib_completion_queue *cq ) {
587 * Set Infiniband device driver-private data
589 * @v ibdev Infiniband device
590 * @v priv Private data
592 static inline __always_inline void
593 ib_set_drvdata ( struct ib_device *ibdev, void *priv ) {
594 ibdev->drv_priv = priv;
598 * Get Infiniband device driver-private data
600 * @v ibdev Infiniband device
601 * @ret priv Private data
603 static inline __always_inline void *
604 ib_get_drvdata ( struct ib_device *ibdev ) {
605 return ibdev->drv_priv;
609 * Set Infiniband device owner-private data
611 * @v ibdev Infiniband device
612 * @v priv Private data
614 static inline __always_inline void
615 ib_set_ownerdata ( struct ib_device *ibdev, void *priv ) {
616 ibdev->owner_priv = priv;
620 * Get Infiniband device owner-private data
622 * @v ibdev Infiniband device
623 * @ret priv Private data
625 static inline __always_inline void *
626 ib_get_ownerdata ( struct ib_device *ibdev ) {
627 return ibdev->owner_priv;
630 #endif /* _GPXE_INFINIBAND_H */