2 * Copyright (C) 2009 Michael Brown <mbrown@fensystems.co.uk>.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as
6 * published by the Free Software Foundation; either version 2 of the
7 * License, or any later version.
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 FILE_LICENCE ( GPL2_OR_LATER );
28 #include <gpxe/infiniband.h>
29 #include <gpxe/iobuf.h>
30 #include <gpxe/ib_gma.h>
35 * Infiniband General Management Agent
40 struct ib_mad_request {
43 /** List of outstanding MAD requests */
44 struct list_head list;
46 struct retry_timer timer;
47 /** Destination address */
48 struct ib_address_vector av;
53 /** GMA number of send WQEs
55 * This is a policy decision.
57 #define IB_GMA_NUM_SEND_WQES 4
59 /** GMA number of receive WQEs
61 * This is a policy decision.
63 #define IB_GMA_NUM_RECV_WQES 2
65 /** GMA number of completion queue entries
67 * This is a policy decision
69 #define IB_GMA_NUM_CQES 8
71 /** TID magic signature */
72 #define IB_GMA_TID_MAGIC ( ( 'g' << 24 ) | ( 'P' << 16 ) | ( 'X' << 8 ) | 'E' )
74 /** TID to use for next MAD request */
75 static unsigned int next_request_tid;
77 /*****************************************************************************
79 * Subnet management MAD handlers
81 *****************************************************************************
85 * Get node information
87 * @v gma General management agent
90 static void ib_sma_get_node_info ( struct ib_gma *gma,
92 struct ib_device *ibdev = gma->ibdev;
93 struct ib_node_info *node_info = &mad->smp.smp_data.node_info;
95 memset ( node_info, 0, sizeof ( *node_info ) );
96 node_info->base_version = IB_MGMT_BASE_VERSION;
97 node_info->class_version = IB_SMP_CLASS_VERSION;
98 node_info->node_type = IB_NODE_TYPE_HCA;
99 node_info->num_ports = ib_get_hca_info ( ibdev, &node_info->sys_guid );
100 memcpy ( &node_info->node_guid, &node_info->sys_guid,
101 sizeof ( node_info->node_guid ) );
102 memcpy ( &node_info->port_guid, &ibdev->gid.u.half[1],
103 sizeof ( node_info->port_guid ) );
104 node_info->partition_cap = htons ( 1 );
105 node_info->local_port_num = ibdev->port;
109 * Get node description
111 * @v gma General management agent
114 static void ib_sma_get_node_desc ( struct ib_gma *gma,
115 union ib_mad *mad ) {
116 struct ib_device *ibdev = gma->ibdev;
117 struct ib_node_desc *node_desc = &mad->smp.smp_data.node_desc;
118 struct ib_gid_half *guid = &ibdev->gid.u.half[1];
120 memset ( node_desc, 0, sizeof ( *node_desc ) );
121 snprintf ( node_desc->node_string, sizeof ( node_desc->node_string ),
122 "gPXE %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x (%s)",
123 guid->bytes[0], guid->bytes[1], guid->bytes[2],
124 guid->bytes[3], guid->bytes[4], guid->bytes[5],
125 guid->bytes[6], guid->bytes[7], ibdev->dev->name );
129 * Get GUID information
131 * @v gma General management agent
134 static void ib_sma_get_guid_info ( struct ib_gma *gma,
135 union ib_mad *mad ) {
136 struct ib_device *ibdev = gma->ibdev;
137 struct ib_guid_info *guid_info = &mad->smp.smp_data.guid_info;
139 memset ( guid_info, 0, sizeof ( *guid_info ) );
140 memcpy ( guid_info->guid[0], &ibdev->gid.u.half[1],
141 sizeof ( guid_info->guid[0] ) );
145 * Get port information
147 * @v gma General management agent
150 static void ib_sma_get_port_info ( struct ib_gma *gma,
151 union ib_mad *mad ) {
152 struct ib_device *ibdev = gma->ibdev;
153 struct ib_port_info *port_info = &mad->smp.smp_data.port_info;
155 memset ( port_info, 0, sizeof ( *port_info ) );
156 memcpy ( port_info->gid_prefix, &ibdev->gid.u.half[0],
157 sizeof ( port_info->gid_prefix ) );
158 port_info->lid = ntohs ( ibdev->lid );
159 port_info->mastersm_lid = ntohs ( ibdev->sm_lid );
160 port_info->local_port_num = ibdev->port;
161 port_info->link_width_enabled = ibdev->link_width;
162 port_info->link_width_supported = ibdev->link_width;
163 port_info->link_width_active = ibdev->link_width;
164 port_info->link_speed_supported__port_state =
165 ( ( ibdev->link_speed << 4 ) | ibdev->port_state );
166 port_info->port_phys_state__link_down_def_state =
167 ( ( IB_PORT_PHYS_STATE_POLLING << 4 ) |
168 IB_PORT_PHYS_STATE_POLLING );
169 port_info->link_speed_active__link_speed_enabled =
170 ( ( ibdev->link_speed << 4 ) | ibdev->link_speed );
171 port_info->neighbour_mtu__mastersm_sl =
172 ( ( IB_MTU_2048 << 4 ) | ibdev->sm_sl );
173 port_info->vl_cap__init_type = ( IB_VL_0 << 4 );
174 port_info->init_type_reply__mtu_cap = IB_MTU_2048;
175 port_info->operational_vls__enforcement = ( IB_VL_0 << 4 );
176 port_info->guid_cap = 1;
180 * Set port information
182 * @v gma General management agent
185 static void ib_sma_set_port_info ( struct ib_gma *gma,
186 union ib_mad *mad ) {
187 struct ib_device *ibdev = gma->ibdev;
188 const struct ib_port_info *port_info = &mad->smp.smp_data.port_info;
191 memcpy ( &ibdev->gid.u.half[0], port_info->gid_prefix,
192 sizeof ( ibdev->gid.u.half[0] ) );
193 ibdev->lid = ntohs ( port_info->lid );
194 ibdev->sm_lid = ntohs ( port_info->mastersm_lid );
195 ibdev->sm_sl = ( port_info->neighbour_mtu__mastersm_sl & 0xf );
197 if ( ( rc = ib_set_port_info ( ibdev, port_info ) ) != 0 ) {
198 DBGC ( ibdev, "IBDEV %p could not set port information: %s\n",
199 ibdev, strerror ( rc ) );
201 htons ( IB_MGMT_STATUS_UNSUPPORTED_METHOD_ATTR );
204 ib_sma_get_port_info ( gma, mad );
208 * Get partition key table
210 * @v gma General management agent
213 static void ib_sma_get_pkey_table ( struct ib_gma *gma,
214 union ib_mad *mad ) {
215 struct ib_device *ibdev = gma->ibdev;
216 struct ib_pkey_table *pkey_table = &mad->smp.smp_data.pkey_table;
218 memset ( pkey_table, 0, sizeof ( *pkey_table ) );
219 pkey_table->pkey[0] = htons ( ibdev->pkey );
223 * Set partition key table
225 * @v gma General management agent
228 static void ib_sma_set_pkey_table ( struct ib_gma *gma,
229 union ib_mad *mad ) {
230 struct ib_device *ibdev = gma->ibdev;
231 struct ib_pkey_table *pkey_table = &mad->smp.smp_data.pkey_table;
233 ibdev->pkey = ntohs ( pkey_table->pkey[0] );
235 ib_sma_get_pkey_table ( gma, mad );
238 /** List of attribute handlers */
239 struct ib_gma_handler ib_sma_handlers[] __ib_gma_handler = {
241 .mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED,
242 .mgmt_class_ignore = IB_SMP_CLASS_IGNORE,
243 .class_version = IB_SMP_CLASS_VERSION,
244 .method = IB_MGMT_METHOD_GET,
245 .resp_method = IB_MGMT_METHOD_GET_RESP,
246 .attr_id = htons ( IB_SMP_ATTR_NODE_INFO ),
247 .handle = ib_sma_get_node_info,
250 .mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED,
251 .mgmt_class_ignore = IB_SMP_CLASS_IGNORE,
252 .class_version = IB_SMP_CLASS_VERSION,
253 .method = IB_MGMT_METHOD_GET,
254 .resp_method = IB_MGMT_METHOD_GET_RESP,
255 .attr_id = htons ( IB_SMP_ATTR_NODE_DESC ),
256 .handle = ib_sma_get_node_desc,
259 .mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED,
260 .mgmt_class_ignore = IB_SMP_CLASS_IGNORE,
261 .class_version = IB_SMP_CLASS_VERSION,
262 .method = IB_MGMT_METHOD_GET,
263 .resp_method = IB_MGMT_METHOD_GET_RESP,
264 .attr_id = htons ( IB_SMP_ATTR_GUID_INFO ),
265 .handle = ib_sma_get_guid_info,
268 .mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED,
269 .mgmt_class_ignore = IB_SMP_CLASS_IGNORE,
270 .class_version = IB_SMP_CLASS_VERSION,
271 .method = IB_MGMT_METHOD_GET,
272 .resp_method = IB_MGMT_METHOD_GET_RESP,
273 .attr_id = htons ( IB_SMP_ATTR_PORT_INFO ),
274 .handle = ib_sma_get_port_info,
277 .mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED,
278 .mgmt_class_ignore = IB_SMP_CLASS_IGNORE,
279 .class_version = IB_SMP_CLASS_VERSION,
280 .method = IB_MGMT_METHOD_SET,
281 .resp_method = IB_MGMT_METHOD_GET_RESP,
282 .attr_id = htons ( IB_SMP_ATTR_PORT_INFO ),
283 .handle = ib_sma_set_port_info,
286 .mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED,
287 .mgmt_class_ignore = IB_SMP_CLASS_IGNORE,
288 .class_version = IB_SMP_CLASS_VERSION,
289 .method = IB_MGMT_METHOD_GET,
290 .resp_method = IB_MGMT_METHOD_GET_RESP,
291 .attr_id = htons ( IB_SMP_ATTR_PKEY_TABLE ),
292 .handle = ib_sma_get_pkey_table,
295 .mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED,
296 .mgmt_class_ignore = IB_SMP_CLASS_IGNORE,
297 .class_version = IB_SMP_CLASS_VERSION,
298 .method = IB_MGMT_METHOD_SET,
299 .resp_method = IB_MGMT_METHOD_GET_RESP,
300 .attr_id = htons ( IB_SMP_ATTR_PKEY_TABLE ),
301 .handle = ib_sma_set_pkey_table,
305 /*****************************************************************************
307 * General management agent
309 *****************************************************************************
313 * Call attribute handler
315 * @v gma General management agent
318 static void ib_handle_mad ( struct ib_gma *gma, union ib_mad *mad ) {
319 struct ib_mad_hdr *hdr = &mad->hdr;
320 struct ib_gma_handler *handler;
322 for_each_table_entry ( handler, IB_GMA_HANDLERS ) {
323 if ( ( ( handler->mgmt_class & ~handler->mgmt_class_ignore ) ==
324 ( hdr->mgmt_class & ~handler->mgmt_class_ignore ) ) &&
325 ( handler->class_version == hdr->class_version ) &&
326 ( handler->method == hdr->method ) &&
327 ( handler->attr_id == hdr->attr_id ) ) {
328 hdr->method = handler->resp_method;
329 handler->handle ( gma, mad );
334 hdr->method = IB_MGMT_METHOD_TRAP;
335 hdr->status = htons ( IB_MGMT_STATUS_UNSUPPORTED_METHOD_ATTR );
339 * Complete GMA receive
342 * @v ibdev Infiniband device
344 * @v av Address vector
345 * @v iobuf I/O buffer
346 * @v rc Completion status code
348 static void ib_gma_complete_recv ( struct ib_device *ibdev,
349 struct ib_queue_pair *qp,
350 struct ib_address_vector *av,
351 struct io_buffer *iobuf, int rc ) {
352 struct ib_gma *gma = ib_qp_get_ownerdata ( qp );
353 struct ib_mad_request *request;
355 struct ib_mad_hdr *hdr;
356 unsigned int hop_pointer;
357 unsigned int hop_count;
361 DBGC ( gma, "GMA %p RX error: %s\n", gma, strerror ( rc ) );
366 if ( iob_len ( iobuf ) != sizeof ( *mad ) ) {
367 DBGC ( gma, "GMA %p RX bad size (%zd bytes)\n",
368 gma, iob_len ( iobuf ) );
369 DBGC_HDA ( gma, 0, iobuf->data, iob_len ( iobuf ) );
374 if ( hdr->base_version != IB_MGMT_BASE_VERSION ) {
375 DBGC ( gma, "GMA %p unsupported base version %x\n",
376 gma, hdr->base_version );
377 DBGC_HDA ( gma, 0, mad, sizeof ( *mad ) );
380 DBGC ( gma, "GMA %p RX TID %08x%08x (%02x,%02x,%02x,%04x) status "
381 "%04x\n", gma, ntohl ( hdr->tid[0] ), ntohl ( hdr->tid[1] ),
382 hdr->mgmt_class, hdr->class_version, hdr->method,
383 ntohs ( hdr->attr_id ), ntohs ( hdr->status ) );
384 DBGC2_HDA ( gma, 0, mad, sizeof ( *mad ) );
386 /* Dequeue request if applicable */
387 list_for_each_entry ( request, &gma->requests, list ) {
388 if ( memcmp ( &request->mad.hdr.tid, &hdr->tid,
389 sizeof ( request->mad.hdr.tid ) ) == 0 ) {
390 stop_timer ( &request->timer );
391 list_del ( &request->list );
398 ib_handle_mad ( gma, mad );
400 /* Finish processing if we have no response to send */
404 DBGC ( gma, "GMA %p TX TID %08x%08x (%02x,%02x,%02x,%04x) status "
405 "%04x\n", gma, ntohl ( hdr->tid[0] ), ntohl ( hdr->tid[1] ),
406 hdr->mgmt_class, hdr->class_version, hdr->method,
407 ntohs ( hdr->attr_id ), ntohs ( hdr->status ) );
408 DBGC2_HDA ( gma, 0, mad, sizeof ( *mad ) );
410 /* Set response fields for directed route SMPs */
411 if ( hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE ) {
412 struct ib_mad_smp *smp = &mad->smp;
414 hdr->status |= htons ( IB_SMP_STATUS_D_INBOUND );
415 hop_pointer = smp->mad_hdr.class_specific.smp.hop_pointer;
416 hop_count = smp->mad_hdr.class_specific.smp.hop_count;
417 assert ( hop_count == hop_pointer );
418 if ( hop_pointer < ( sizeof ( smp->return_path.hops ) /
419 sizeof ( smp->return_path.hops[0] ) ) ) {
420 smp->return_path.hops[hop_pointer] = ibdev->port;
422 DBGC ( gma, "GMA %p invalid hop pointer %d\n",
428 /* Send MAD response, if applicable */
429 if ( ( rc = ib_post_send ( ibdev, qp, av,
430 iob_disown ( iobuf ) ) ) != 0 ) {
431 DBGC ( gma, "GMA %p could not send MAD response: %s\n",
432 gma, strerror ( rc ) );
440 /** GMA completion operations */
441 static struct ib_completion_queue_operations ib_gma_completion_ops = {
442 .complete_recv = ib_gma_complete_recv,
446 * Transmit MAD request
448 * @v gma General management agent
449 * @v request MAD request
450 * @ret rc Return status code
452 static int ib_gma_send ( struct ib_gma *gma, struct ib_mad_request *request ) {
453 struct io_buffer *iobuf;
456 DBGC ( gma, "GMA %p TX TID %08x%08x (%02x,%02x,%02x,%04x)\n",
457 gma, ntohl ( request->mad.hdr.tid[0] ),
458 ntohl ( request->mad.hdr.tid[1] ), request->mad.hdr.mgmt_class,
459 request->mad.hdr.class_version, request->mad.hdr.method,
460 ntohs ( request->mad.hdr.attr_id ) );
461 DBGC2_HDA ( gma, 0, &request->mad, sizeof ( request->mad ) );
463 /* Construct I/O buffer */
464 iobuf = alloc_iob ( sizeof ( request->mad ) );
466 DBGC ( gma, "GMA %p could not allocate buffer for TID "
467 "%08x%08x\n", gma, ntohl ( request->mad.hdr.tid[0] ),
468 ntohl ( request->mad.hdr.tid[1] ) );
471 memcpy ( iob_put ( iobuf, sizeof ( request->mad ) ), &request->mad,
472 sizeof ( request->mad ) );
474 /* Send I/O buffer */
475 if ( ( rc = ib_post_send ( gma->ibdev, gma->qp, &request->av,
477 DBGC ( gma, "GMA %p could not send TID %08x%08x: %s\n",
478 gma, ntohl ( request->mad.hdr.tid[0] ),
479 ntohl ( request->mad.hdr.tid[1] ), strerror ( rc ) );
488 * Handle MAD request timer expiry
490 * @v timer Retry timer
491 * @v expired Failure indicator
493 static void ib_gma_timer_expired ( struct retry_timer *timer, int expired ) {
494 struct ib_mad_request *request =
495 container_of ( timer, struct ib_mad_request, timer );
496 struct ib_gma *gma = request->gma;
498 /* Abandon TID if we have tried too many times */
500 DBGC ( gma, "GMA %p abandoning TID %08x%08x\n",
501 gma, ntohl ( request->mad.hdr.tid[0] ),
502 ntohl ( request->mad.hdr.tid[1] ) );
503 list_del ( &request->list );
508 /* Restart retransmission timer */
509 start_timer ( timer );
512 ib_gma_send ( gma, request );
518 * @v gma General management agent
520 * @v av Destination address, or NULL for SM
521 * @v retry Request should be retried until a response arrives
522 * @ret rc Return status code
524 int ib_gma_request ( struct ib_gma *gma, union ib_mad *mad,
525 struct ib_address_vector *av, int retry ) {
526 struct ib_device *ibdev = gma->ibdev;
527 struct ib_mad_request *request;
529 /* Allocate and initialise structure */
530 request = zalloc ( sizeof ( *request ) );
532 DBGC ( gma, "GMA %p could not allocate MAD request\n", gma );
536 request->timer.expired = ib_gma_timer_expired;
538 /* Determine address vector */
540 memcpy ( &request->av, av, sizeof ( request->av ) );
542 request->av.lid = ibdev->sm_lid;
543 request->av.sl = ibdev->sm_sl;
544 request->av.qpn = IB_QPN_GMA;
545 request->av.qkey = IB_QKEY_GMA;
549 memcpy ( &request->mad, mad, sizeof ( request->mad ) );
552 request->mad.hdr.tid[0] = htonl ( IB_GMA_TID_MAGIC );
553 request->mad.hdr.tid[1] = htonl ( ++next_request_tid );
555 /* Send initial request. Ignore errors; the retry timer will
556 * take care of those we care about.
558 ib_gma_send ( gma, request );
560 /* Add to list and start timer if applicable */
562 list_add ( &request->list, &gma->requests );
563 start_timer ( &request->timer );
574 * @v ibdev Infiniband device
575 * @v type Queue pair type
576 * @ret gma General management agent, or NULL
578 struct ib_gma * ib_create_gma ( struct ib_device *ibdev,
579 enum ib_queue_pair_type type ) {
583 /* Allocate and initialise fields */
584 gma = zalloc ( sizeof ( *gma ) );
588 INIT_LIST_HEAD ( &gma->requests );
590 /* Create completion queue */
591 gma->cq = ib_create_cq ( ibdev, IB_GMA_NUM_CQES,
592 &ib_gma_completion_ops );
594 DBGC ( gma, "GMA %p could not allocate completion queue\n",
599 /* Create queue pair */
600 qkey = ( ( type == IB_QPT_SMA ) ? IB_QKEY_SMA : IB_QKEY_GMA );
601 gma->qp = ib_create_qp ( ibdev, type, IB_GMA_NUM_SEND_WQES, gma->cq,
602 IB_GMA_NUM_RECV_WQES, gma->cq, qkey );
604 DBGC ( gma, "GMA %p could not allocate queue pair\n", gma );
607 ib_qp_set_ownerdata ( gma->qp, gma );
609 DBGC ( gma, "GMA %p running on QPN %#lx\n", gma, gma->qp->qpn );
611 /* Fill receive ring */
612 ib_refill_recv ( ibdev, gma->qp );
615 ib_destroy_qp ( ibdev, gma->qp );
617 ib_destroy_cq ( ibdev, gma->cq );
627 * @v gma General management agent
629 void ib_destroy_gma ( struct ib_gma *gma ) {
630 struct ib_device *ibdev = gma->ibdev;
631 struct ib_mad_request *request;
632 struct ib_mad_request *tmp;
634 /* Flush any outstanding requests */
635 list_for_each_entry_safe ( request, tmp, &gma->requests, list ) {
636 stop_timer ( &request->timer );
637 list_del ( &request->list );
641 ib_destroy_qp ( ibdev, gma->qp );
642 ib_destroy_cq ( ibdev, gma->cq );