1 #ifndef _GPXE_INFINIBAND_H
2 #define _GPXE_INFINIBAND_H
11 #include <gpxe/device.h>
13 /** Subnet administrator QPN */
17 #define IB_BROADCAST_QPN 0xffffffUL
19 /** Subnet administrator queue key */
20 #define IB_GLOBAL_QKEY 0x80010000UL
22 /** An Infiniband Global Identifier */
31 /** An Infiniband Global Route Header */
32 struct ib_global_route_header {
33 /** IP version, traffic class, and flow label
35 * 4 bits : Version of the GRH
36 * 8 bits : Traffic class
37 * 20 bits : Flow label
39 uint32_t ipver_tclass_flowlabel;
48 /** Destiniation GID */
50 } __attribute__ (( packed ));
54 struct ib_completion_queue;
56 /** An Infiniband Work Queue */
57 struct ib_work_queue {
58 /** Containing queue pair */
59 struct ib_queue_pair *qp;
60 /** "Is a send queue" flag */
62 /** Associated completion queue */
63 struct ib_completion_queue *cq;
64 /** List of work queues on this completion queue */
65 struct list_head list;
66 /** Number of work queue entries */
67 unsigned int num_wqes;
68 /** Next work queue entry index
70 * This is the index of the next entry to be filled (i.e. the
71 * first empty entry). This value is not bounded by num_wqes;
72 * users must logical-AND with (num_wqes-1) to generate an
75 unsigned long next_idx;
76 /** I/O buffers assigned to work queue */
77 struct io_buffer **iobufs;
78 /** Device private data */
82 /** An Infiniband Queue Pair */
83 struct ib_queue_pair {
84 /** Queue Pair Number */
89 struct ib_work_queue send;
91 struct ib_work_queue recv;
92 /** Device private data */
94 /** Queue owner private data */
98 /** An Infiniband Completion Queue */
99 struct ib_completion_queue {
100 /** Completion queue number */
102 /** Number of completion queue entries */
103 unsigned int num_cqes;
104 /** Next completion queue entry index
106 * This is the index of the next entry to be filled (i.e. the
107 * first empty entry). This value is not bounded by num_wqes;
108 * users must logical-AND with (num_wqes-1) to generate an
111 unsigned long next_idx;
112 /** List of work queues completing to this queue */
113 struct list_head work_queues;
114 /** Device private data */
118 /** An Infiniband completion */
119 struct ib_completion {
122 * If non-zero, then the completion is in error.
124 unsigned int syndrome;
129 /** An Infiniband completion handler
131 * @v ibdev Infiniband device
133 * @v completion Completion
134 * @v iobuf I/O buffer
136 typedef void ( * ib_completer_t ) ( struct ib_device *ibdev,
137 struct ib_queue_pair *qp,
138 struct ib_completion *completion,
139 struct io_buffer *iobuf );
141 /** An Infiniband Address Vector */
142 struct ib_address_vector {
143 /** Destination Queue Pair */
144 unsigned int dest_qp;
147 /** Destination Local ID */
153 /** GID is present */
154 unsigned int gid_present;
162 * Infiniband device operations
164 * These represent a subset of the Infiniband Verbs.
166 struct ib_device_operations {
167 /** Create completion queue
169 * @v ibdev Infiniband device
170 * @v cq Completion queue
171 * @ret rc Return status code
173 int ( * create_cq ) ( struct ib_device *ibdev,
174 struct ib_completion_queue *cq );
175 /** Destroy completion queue
177 * @v ibdev Infiniband device
178 * @v cq Completion queue
180 void ( * destroy_cq ) ( struct ib_device *ibdev,
181 struct ib_completion_queue *cq );
182 /** Create queue pair
184 * @v ibdev Infiniband device
186 * @ret rc Return status code
188 int ( * create_qp ) ( struct ib_device *ibdev,
189 struct ib_queue_pair *qp );
190 /** Destroy queue pair
192 * @v ibdev Infiniband device
195 void ( * destroy_qp ) ( struct ib_device *ibdev,
196 struct ib_queue_pair *qp );
197 /** Post send work queue entry
199 * @v ibdev Infiniband device
201 * @v av Address vector
202 * @v iobuf I/O buffer
203 * @ret rc Return status code
205 * If this method returns success, the I/O buffer remains
206 * owned by the queue pair. If this method returns failure,
207 * the I/O buffer is immediately released; the failure is
208 * interpreted as "failure to enqueue buffer".
210 int ( * post_send ) ( struct ib_device *ibdev,
211 struct ib_queue_pair *qp,
212 struct ib_address_vector *av,
213 struct io_buffer *iobuf );
214 /** Post receive work queue entry
216 * @v ibdev Infiniband device
218 * @v iobuf I/O buffer
219 * @ret rc Return status code
221 * If this method returns success, the I/O buffer remains
222 * owned by the queue pair. If this method returns failure,
223 * the I/O buffer is immediately released; the failure is
224 * interpreted as "failure to enqueue buffer".
226 int ( * post_recv ) ( struct ib_device *ibdev,
227 struct ib_queue_pair *qp,
228 struct io_buffer *iobuf );
229 /** Poll completion queue
231 * @v ibdev Infiniband device
232 * @v cq Completion queue
233 * @v complete_send Send completion handler
234 * @v complete_recv Receive completion handler
236 * The completion handler takes ownership of the I/O buffer.
238 void ( * poll_cq ) ( struct ib_device *ibdev,
239 struct ib_completion_queue *cq,
240 ib_completer_t complete_send,
241 ib_completer_t complete_recv );
245 * @v ibdev Infiniband device
246 * @ret rc Return status code
248 int ( * open ) ( struct ib_device *ibdev );
252 * @v ibdev Infiniband device
254 void ( * close ) ( struct ib_device *ibdev );
255 /** Attach to multicast group
257 * @v ibdev Infiniband device
259 * @v gid Multicast GID
260 * @ret rc Return status code
262 int ( * mcast_attach ) ( struct ib_device *ibdev,
263 struct ib_queue_pair *qp,
264 struct ib_gid *gid );
265 /** Detach from multicast group
267 * @v ibdev Infiniband device
269 * @v gid Multicast GID
271 void ( * mcast_detach ) ( struct ib_device *ibdev,
272 struct ib_queue_pair *qp,
273 struct ib_gid *gid );
275 * Issue management datagram
277 * @v ibdev Infiniband device
278 * @v mad Management datagram
279 * @v len Length of management datagram
280 * @ret rc Return status code
282 int ( * mad ) ( struct ib_device *ibdev, struct ib_mad_hdr *mad,
286 /** An Infiniband device */
288 /** Underlying device */
290 /** Infiniband operations */
291 struct ib_device_operations *op;
295 struct ib_gid port_gid;
296 /** Subnet manager LID */
297 unsigned long sm_lid;
300 /** Device private data */
302 /** Owner private data */
306 extern struct ib_completion_queue * ib_create_cq ( struct ib_device *ibdev,
307 unsigned int num_cqes );
308 extern void ib_destroy_cq ( struct ib_device *ibdev,
309 struct ib_completion_queue *cq );
310 extern struct ib_queue_pair *
311 ib_create_qp ( struct ib_device *ibdev, unsigned int num_send_wqes,
312 struct ib_completion_queue *send_cq, unsigned int num_recv_wqes,
313 struct ib_completion_queue *recv_cq, unsigned long qkey );
314 extern void ib_destroy_qp ( struct ib_device *ibdev,
315 struct ib_queue_pair *qp );
316 extern struct ib_work_queue * ib_find_wq ( struct ib_completion_queue *cq,
317 unsigned long qpn, int is_send );
318 extern struct ib_device * alloc_ibdev ( size_t priv_size );
319 extern int register_ibdev ( struct ib_device *ibdev );
320 extern void unregister_ibdev ( struct ib_device *ibdev );
321 extern void free_ibdev ( struct ib_device *ibdev );
324 * Post send work queue entry
326 * @v ibdev Infiniband device
328 * @v av Address vector
329 * @v iobuf I/O buffer
330 * @ret rc Return status code
332 static inline __attribute__ (( always_inline )) int
333 ib_post_send ( struct ib_device *ibdev, struct ib_queue_pair *qp,
334 struct ib_address_vector *av, struct io_buffer *iobuf ) {
335 return ibdev->op->post_send ( ibdev, qp, av, iobuf );
339 * Post receive work queue entry
341 * @v ibdev Infiniband device
343 * @v iobuf I/O buffer
344 * @ret rc Return status code
346 static inline __attribute__ (( always_inline )) int
347 ib_post_recv ( struct ib_device *ibdev, struct ib_queue_pair *qp,
348 struct io_buffer *iobuf ) {
349 return ibdev->op->post_recv ( ibdev, qp, iobuf );
353 * Poll completion queue
355 * @v ibdev Infiniband device
356 * @v cq Completion queue
357 * @v complete_send Send completion handler
358 * @v complete_recv Receive completion handler
360 static inline __attribute__ (( always_inline )) void
361 ib_poll_cq ( struct ib_device *ibdev, struct ib_completion_queue *cq,
362 ib_completer_t complete_send, ib_completer_t complete_recv ) {
363 ibdev->op->poll_cq ( ibdev, cq, complete_send, complete_recv );
369 * @v ibdev Infiniband device
370 * @ret rc Return status code
372 static inline __attribute__ (( always_inline )) int
373 ib_open ( struct ib_device *ibdev ) {
374 return ibdev->op->open ( ibdev );
380 * @v ibdev Infiniband device
382 static inline __attribute__ (( always_inline )) void
383 ib_close ( struct ib_device *ibdev ) {
384 ibdev->op->close ( ibdev );
388 * Attach to multicast group
390 * @v ibdev Infiniband device
392 * @v gid Multicast GID
393 * @ret rc Return status code
395 static inline __attribute__ (( always_inline )) int
396 ib_mcast_attach ( struct ib_device *ibdev, struct ib_queue_pair *qp,
397 struct ib_gid *gid ) {
398 return ibdev->op->mcast_attach ( ibdev, qp, gid );
402 * Detach from multicast group
404 * @v ibdev Infiniband device
406 * @v gid Multicast GID
408 static inline __attribute__ (( always_inline )) void
409 ib_mcast_detach ( struct ib_device *ibdev, struct ib_queue_pair *qp,
410 struct ib_gid *gid ) {
411 ibdev->op->mcast_detach ( ibdev, qp, gid );
415 * Issue management datagram
417 * @v ibdev Infiniband device
418 * @v mad Management datagram
419 * @v len Length of management datagram
420 * @ret rc Return status code
422 static inline __attribute__ (( always_inline )) int
423 ib_mad ( struct ib_device *ibdev, struct ib_mad_hdr *mad, size_t len ) {
424 return ibdev->op->mad ( ibdev, mad, len );
428 * Set Infiniband owner-private data
430 * @v pci Infiniband device
431 * @v priv Private data
433 static inline __attribute__ (( always_inline )) void
434 ib_set_ownerdata ( struct ib_device *ibdev, void *owner_priv ) {
435 ibdev->owner_priv = owner_priv;
439 * Get Infiniband owner-private data
441 * @v pci Infiniband device
442 * @ret priv Private data
444 static inline __attribute__ (( always_inline )) void *
445 ib_get_ownerdata ( struct ib_device *ibdev ) {
446 return ibdev->owner_priv;
449 /*****************************************************************************
451 * Management datagrams
453 * Portions Copyright (c) 2004 Mellanox Technologies Ltd. All rights
458 /* Management base version */
459 #define IB_MGMT_BASE_VERSION 1
461 /* Management classes */
462 #define IB_MGMT_CLASS_SUBN_LID_ROUTED 0x01
463 #define IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE 0x81
464 #define IB_MGMT_CLASS_SUBN_ADM 0x03
465 #define IB_MGMT_CLASS_PERF_MGMT 0x04
466 #define IB_MGMT_CLASS_BM 0x05
467 #define IB_MGMT_CLASS_DEVICE_MGMT 0x06
468 #define IB_MGMT_CLASS_CM 0x07
469 #define IB_MGMT_CLASS_SNMP 0x08
470 #define IB_MGMT_CLASS_VENDOR_RANGE2_START 0x30
471 #define IB_MGMT_CLASS_VENDOR_RANGE2_END 0x4F
473 /* Management methods */
474 #define IB_MGMT_METHOD_GET 0x01
475 #define IB_MGMT_METHOD_SET 0x02
476 #define IB_MGMT_METHOD_GET_RESP 0x81
477 #define IB_MGMT_METHOD_SEND 0x03
478 #define IB_MGMT_METHOD_TRAP 0x05
479 #define IB_MGMT_METHOD_REPORT 0x06
480 #define IB_MGMT_METHOD_REPORT_RESP 0x86
481 #define IB_MGMT_METHOD_TRAP_REPRESS 0x07
482 #define IB_MGMT_METHOD_DELETE 0x15
483 #define IB_MGMT_METHOD_RESP 0x80
485 /* Subnet management attributes */
486 #define IB_SMP_ATTR_NOTICE 0x0002
487 #define IB_SMP_ATTR_NODE_DESC 0x0010
488 #define IB_SMP_ATTR_NODE_INFO 0x0011
489 #define IB_SMP_ATTR_SWITCH_INFO 0x0012
490 #define IB_SMP_ATTR_GUID_INFO 0x0014
491 #define IB_SMP_ATTR_PORT_INFO 0x0015
492 #define IB_SMP_ATTR_PKEY_TABLE 0x0016
493 #define IB_SMP_ATTR_SL_TO_VL_TABLE 0x0017
494 #define IB_SMP_ATTR_VL_ARB_TABLE 0x0018
495 #define IB_SMP_ATTR_LINEAR_FORWARD_TABLE 0x0019
496 #define IB_SMP_ATTR_RANDOM_FORWARD_TABLE 0x001A
497 #define IB_SMP_ATTR_MCAST_FORWARD_TABLE 0x001B
498 #define IB_SMP_ATTR_SM_INFO 0x0020
499 #define IB_SMP_ATTR_VENDOR_DIAG 0x0030
500 #define IB_SMP_ATTR_LED_INFO 0x0031
501 #define IB_SMP_ATTR_VENDOR_MASK 0xFF00
503 #define IB_SA_ATTR_MC_MEMBER_REC 0x38
504 #define IB_SA_ATTR_PATH_REC 0x35
506 #define IB_SA_MCMEMBER_REC_MGID (1<<0)
507 #define IB_SA_MCMEMBER_REC_PORT_GID (1<<1)
508 #define IB_SA_MCMEMBER_REC_QKEY (1<<2)
509 #define IB_SA_MCMEMBER_REC_MLID (1<<3)
510 #define IB_SA_MCMEMBER_REC_MTU_SELECTOR (1<<4)
511 #define IB_SA_MCMEMBER_REC_MTU (1<<5)
512 #define IB_SA_MCMEMBER_REC_TRAFFIC_CLASS (1<<6)
513 #define IB_SA_MCMEMBER_REC_PKEY (1<<7)
514 #define IB_SA_MCMEMBER_REC_RATE_SELECTOR (1<<8)
515 #define IB_SA_MCMEMBER_REC_RATE (1<<9)
516 #define IB_SA_MCMEMBER_REC_PACKET_LIFE_TIME_SELECTOR (1<<10)
517 #define IB_SA_MCMEMBER_REC_PACKET_LIFE_TIME (1<<11)
518 #define IB_SA_MCMEMBER_REC_SL (1<<12)
519 #define IB_SA_MCMEMBER_REC_FLOW_LABEL (1<<13)
520 #define IB_SA_MCMEMBER_REC_HOP_LIMIT (1<<14)
521 #define IB_SA_MCMEMBER_REC_SCOPE (1<<15)
522 #define IB_SA_MCMEMBER_REC_JOIN_STATE (1<<16)
523 #define IB_SA_MCMEMBER_REC_PROXY_JOIN (1<<17)
525 #define IB_SA_PATH_REC_DGID (1<<2)
526 #define IB_SA_PATH_REC_SGID (1<<3)
529 uint8_t base_version;
531 uint8_t class_version;
534 uint16_t class_specific;
539 } __attribute__ (( packed ));
544 uint16_t attrib_offset;
545 uint32_t comp_mask[2];
546 } __attribute__ (( packed ));
550 } __attribute__ (( packed ));
553 struct ib_mad_hdr mad_hdr;
555 } __attribute__ (( packed ));
557 struct ib_mad_guid_info {
558 struct ib_mad_hdr mad_hdr;
560 uint32_t reserved[8];
561 uint8_t gid_local[8];
562 } __attribute__ (( packed ));
564 struct ib_mad_port_info {
565 struct ib_mad_hdr mad_hdr;
567 uint32_t reserved[8];
569 uint8_t gid_prefix[8];
571 uint16_t mastersm_lid;
574 uint16_t mkey_lease_period;
575 uint8_t local_port_num;
576 uint8_t link_width_enabled;
577 uint8_t link_width_supported;
578 uint8_t link_width_active;
579 uint8_t port_state__link_speed_supported;
580 uint8_t link_down_def_state__port_phys_state;
581 uint8_t lmc__r1__mkey_prot_bits;
582 uint8_t link_speed_enabled__link_speed_active;
583 } __attribute__ (( packed ));
585 struct ib_mad_pkey_table {
586 struct ib_mad_hdr mad_hdr;
588 uint32_t reserved[8];
589 uint16_t pkey[16][2];
590 } __attribute__ (( packed ));
592 struct ib_mad_path_record {
593 struct ib_mad_hdr mad_hdr;
594 struct ib_rmpp_hdr rmpp_hdr;
595 struct ib_sa_hdr sa_hdr;
596 uint32_t reserved0[2];
601 uint32_t hop_limit__flow_label__raw_traffic;
602 uint32_t pkey__numb_path__reversible__tclass;
604 uint8_t reserved__sl;
605 uint8_t mtu_selector__mtu;
606 uint8_t rate_selector__rate;
607 uint32_t preference__packet_lifetime__packet_lifetime_selector;
608 uint32_t reserved2[35];
609 } __attribute__ (( packed ));
611 struct ib_mad_mc_member_record {
612 struct ib_mad_hdr mad_hdr;
613 struct ib_rmpp_hdr rmpp_hdr;
614 struct ib_sa_hdr sa_hdr;
616 struct ib_gid port_gid;
619 uint8_t mtu_selector__mtu;
622 uint8_t rate_selector__rate;
623 uint8_t packet_lifetime_selector__packet_lifetime;
624 uint32_t sl__flow_label__hop_limit;
625 uint8_t scope__join_state;
626 uint8_t proxy_join__reserved;
628 uint32_t reserved1[37];
629 } __attribute__ (( packed ));
632 struct ib_mad_hdr mad_hdr;
633 struct ib_mad_data data;
634 struct ib_mad_guid_info guid_info;
635 struct ib_mad_port_info port_info;
636 struct ib_mad_pkey_table pkey_table;
637 struct ib_mad_path_record path_record;
638 struct ib_mad_mc_member_record mc_member_record;
639 } __attribute__ (( packed ));
641 #endif /* _GPXE_INFINIBAND_H */