2 * Copyright (C) 2009 Michael Brown <mbrown@fensystems.co.uk>.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as
6 * published by the Free Software Foundation; either version 2 of the
7 * License, or any later version.
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 FILE_LICENCE ( GPL2_OR_LATER );
28 #include <gpxe/infiniband.h>
29 #include <gpxe/iobuf.h>
30 #include <gpxe/ib_gma.h>
35 * Infiniband General Management Agent
40 struct ib_mad_request {
43 /** List of outstanding MAD requests */
44 struct list_head list;
46 struct retry_timer timer;
47 /** Destination address */
48 struct ib_address_vector av;
53 /** GMA number of send WQEs
55 * This is a policy decision.
57 #define IB_GMA_NUM_SEND_WQES 4
59 /** GMA number of receive WQEs
61 * This is a policy decision.
63 #define IB_GMA_NUM_RECV_WQES 2
65 /** GMA number of completion queue entries
67 * This is a policy decision
69 #define IB_GMA_NUM_CQES 8
71 /** TID magic signature */
72 #define IB_GMA_TID_MAGIC ( ( 'g' << 24 ) | ( 'P' << 16 ) | ( 'X' << 8 ) | 'E' )
74 /** TID to use for next MAD request */
75 static unsigned int next_request_tid;
77 /*****************************************************************************
79 * Subnet management MAD handlers
81 *****************************************************************************
85 * Construct directed route response, if necessary
87 * @v gma General management agent
88 * @v mad MAD response without DR fields filled in
89 * @ret mad MAD response with DR fields filled in
91 static union ib_mad * ib_sma_dr_response ( struct ib_gma *gma,
93 struct ib_mad_hdr *hdr = &mad->hdr;
94 struct ib_mad_smp *smp = &mad->smp;
95 unsigned int hop_pointer;
96 unsigned int hop_count;
98 /* Set response fields for directed route SMPs */
99 if ( hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE ) {
100 hdr->status |= htons ( IB_SMP_STATUS_D_INBOUND );
101 hop_pointer = smp->mad_hdr.class_specific.smp.hop_pointer;
102 hop_count = smp->mad_hdr.class_specific.smp.hop_count;
103 assert ( hop_count == hop_pointer );
104 if ( hop_pointer < ( sizeof ( smp->return_path.hops ) /
105 sizeof ( smp->return_path.hops[0] ) ) ) {
106 smp->return_path.hops[hop_pointer] = gma->ibdev->port;
108 DBGC ( gma, "GMA %p invalid hop pointer %d\n",
118 * Get node information
120 * @v gma General management agent
122 * @ret response MAD response
124 static union ib_mad * ib_sma_get_node_info ( struct ib_gma *gma,
125 union ib_mad *mad ) {
126 struct ib_device *ibdev = gma->ibdev;
127 struct ib_node_info *node_info = &mad->smp.smp_data.node_info;
129 memset ( node_info, 0, sizeof ( *node_info ) );
130 node_info->base_version = IB_MGMT_BASE_VERSION;
131 node_info->class_version = IB_SMP_CLASS_VERSION;
132 node_info->node_type = IB_NODE_TYPE_HCA;
133 node_info->num_ports = ib_get_hca_info ( ibdev, &node_info->sys_guid );
134 memcpy ( &node_info->node_guid, &node_info->sys_guid,
135 sizeof ( node_info->node_guid ) );
136 memcpy ( &node_info->port_guid, &ibdev->gid.u.half[1],
137 sizeof ( node_info->port_guid ) );
138 node_info->partition_cap = htons ( 1 );
139 node_info->local_port_num = ibdev->port;
141 mad->hdr.method = IB_MGMT_METHOD_GET_RESP;
142 return ib_sma_dr_response ( gma, mad );
146 * Get node description
148 * @v gma General management agent
150 * @ret response MAD response
152 static union ib_mad * ib_sma_get_node_desc ( struct ib_gma *gma,
153 union ib_mad *mad ) {
154 struct ib_device *ibdev = gma->ibdev;
155 struct ib_node_desc *node_desc = &mad->smp.smp_data.node_desc;
156 struct ib_gid_half *guid = &ibdev->gid.u.half[1];
158 memset ( node_desc, 0, sizeof ( *node_desc ) );
159 snprintf ( node_desc->node_string, sizeof ( node_desc->node_string ),
160 "gPXE %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x (%s)",
161 guid->bytes[0], guid->bytes[1], guid->bytes[2],
162 guid->bytes[3], guid->bytes[4], guid->bytes[5],
163 guid->bytes[6], guid->bytes[7], ibdev->dev->name );
165 mad->hdr.method = IB_MGMT_METHOD_GET_RESP;
166 return ib_sma_dr_response ( gma, mad );
170 * Get GUID information
172 * @v gma General management agent
174 * @ret response MAD response
176 static union ib_mad * ib_sma_get_guid_info ( struct ib_gma *gma,
177 union ib_mad *mad ) {
178 struct ib_device *ibdev = gma->ibdev;
179 struct ib_guid_info *guid_info = &mad->smp.smp_data.guid_info;
181 memset ( guid_info, 0, sizeof ( *guid_info ) );
182 memcpy ( guid_info->guid[0], &ibdev->gid.u.half[1],
183 sizeof ( guid_info->guid[0] ) );
185 mad->hdr.method = IB_MGMT_METHOD_GET_RESP;
186 return ib_sma_dr_response ( gma, mad );
190 * Get port information
192 * @v gma General management agent
194 * @ret response MAD response
196 static union ib_mad * ib_sma_get_port_info ( struct ib_gma *gma,
197 union ib_mad *mad ) {
198 struct ib_device *ibdev = gma->ibdev;
199 struct ib_port_info *port_info = &mad->smp.smp_data.port_info;
201 memset ( port_info, 0, sizeof ( *port_info ) );
202 memcpy ( port_info->gid_prefix, &ibdev->gid.u.half[0],
203 sizeof ( port_info->gid_prefix ) );
204 port_info->lid = ntohs ( ibdev->lid );
205 port_info->mastersm_lid = ntohs ( ibdev->sm_lid );
206 port_info->local_port_num = ibdev->port;
207 port_info->link_width_enabled = ibdev->link_width;
208 port_info->link_width_supported = ibdev->link_width;
209 port_info->link_width_active = ibdev->link_width;
210 port_info->link_speed_supported__port_state =
211 ( ( ibdev->link_speed << 4 ) | ibdev->port_state );
212 port_info->port_phys_state__link_down_def_state =
213 ( ( IB_PORT_PHYS_STATE_POLLING << 4 ) |
214 IB_PORT_PHYS_STATE_POLLING );
215 port_info->link_speed_active__link_speed_enabled =
216 ( ( ibdev->link_speed << 4 ) | ibdev->link_speed );
217 port_info->neighbour_mtu__mastersm_sl =
218 ( ( IB_MTU_2048 << 4 ) | ibdev->sm_sl );
219 port_info->vl_cap__init_type = ( IB_VL_0 << 4 );
220 port_info->init_type_reply__mtu_cap = IB_MTU_2048;
221 port_info->operational_vls__enforcement = ( IB_VL_0 << 4 );
222 port_info->guid_cap = 1;
224 mad->hdr.method = IB_MGMT_METHOD_GET_RESP;
225 return ib_sma_dr_response ( gma, mad );
229 * Set port information
231 * @v gma General management agent
233 * @ret response MAD response
235 static union ib_mad * ib_sma_set_port_info ( struct ib_gma *gma,
236 union ib_mad *mad ) {
237 struct ib_device *ibdev = gma->ibdev;
238 const struct ib_port_info *port_info = &mad->smp.smp_data.port_info;
241 memcpy ( &ibdev->gid.u.half[0], port_info->gid_prefix,
242 sizeof ( ibdev->gid.u.half[0] ) );
243 ibdev->lid = ntohs ( port_info->lid );
244 ibdev->sm_lid = ntohs ( port_info->mastersm_lid );
245 ibdev->sm_sl = ( port_info->neighbour_mtu__mastersm_sl & 0xf );
247 if ( ( rc = ib_set_port_info ( ibdev, port_info ) ) != 0 ) {
248 DBGC ( ibdev, "IBDEV %p could not set port information: %s\n",
249 ibdev, strerror ( rc ) );
251 htons ( IB_MGMT_STATUS_UNSUPPORTED_METHOD_ATTR );
254 return ib_sma_get_port_info ( gma, mad );
258 * Get partition key table
260 * @v gma General management agent
262 * @ret response MAD response
264 static union ib_mad * ib_sma_get_pkey_table ( struct ib_gma *gma,
265 union ib_mad *mad ) {
266 struct ib_device *ibdev = gma->ibdev;
267 struct ib_pkey_table *pkey_table = &mad->smp.smp_data.pkey_table;
269 mad->hdr.method = IB_MGMT_METHOD_GET_RESP;
270 memset ( pkey_table, 0, sizeof ( *pkey_table ) );
271 pkey_table->pkey[0] = htons ( ibdev->pkey );
273 mad->hdr.method = IB_MGMT_METHOD_GET_RESP;
274 return ib_sma_dr_response ( gma, mad );
278 * Set partition key table
280 * @v gma General management agent
282 * @ret response MAD response
284 static union ib_mad * ib_sma_set_pkey_table ( struct ib_gma *gma,
285 union ib_mad *mad ) {
286 struct ib_device *ibdev = gma->ibdev;
287 struct ib_pkey_table *pkey_table = &mad->smp.smp_data.pkey_table;
289 ibdev->pkey = ntohs ( pkey_table->pkey[0] );
291 return ib_sma_get_pkey_table ( gma, mad );
294 /** List of attribute handlers */
295 struct ib_gma_handler ib_sma_handlers[] __ib_gma_handler = {
297 .mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED,
298 .mgmt_class_ignore = IB_SMP_CLASS_IGNORE,
299 .class_version = IB_SMP_CLASS_VERSION,
300 .method = IB_MGMT_METHOD_GET,
301 .attr_id = htons ( IB_SMP_ATTR_NODE_INFO ),
302 .handle = ib_sma_get_node_info,
305 .mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED,
306 .mgmt_class_ignore = IB_SMP_CLASS_IGNORE,
307 .class_version = IB_SMP_CLASS_VERSION,
308 .method = IB_MGMT_METHOD_GET,
309 .attr_id = htons ( IB_SMP_ATTR_NODE_DESC ),
310 .handle = ib_sma_get_node_desc,
313 .mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED,
314 .mgmt_class_ignore = IB_SMP_CLASS_IGNORE,
315 .class_version = IB_SMP_CLASS_VERSION,
316 .method = IB_MGMT_METHOD_GET,
317 .attr_id = htons ( IB_SMP_ATTR_GUID_INFO ),
318 .handle = ib_sma_get_guid_info,
321 .mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED,
322 .mgmt_class_ignore = IB_SMP_CLASS_IGNORE,
323 .class_version = IB_SMP_CLASS_VERSION,
324 .method = IB_MGMT_METHOD_GET,
325 .attr_id = htons ( IB_SMP_ATTR_PORT_INFO ),
326 .handle = ib_sma_get_port_info,
329 .mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED,
330 .mgmt_class_ignore = IB_SMP_CLASS_IGNORE,
331 .class_version = IB_SMP_CLASS_VERSION,
332 .method = IB_MGMT_METHOD_SET,
333 .attr_id = htons ( IB_SMP_ATTR_PORT_INFO ),
334 .handle = ib_sma_set_port_info,
337 .mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED,
338 .mgmt_class_ignore = IB_SMP_CLASS_IGNORE,
339 .class_version = IB_SMP_CLASS_VERSION,
340 .method = IB_MGMT_METHOD_GET,
341 .attr_id = htons ( IB_SMP_ATTR_PKEY_TABLE ),
342 .handle = ib_sma_get_pkey_table,
345 .mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED,
346 .mgmt_class_ignore = IB_SMP_CLASS_IGNORE,
347 .class_version = IB_SMP_CLASS_VERSION,
348 .method = IB_MGMT_METHOD_SET,
349 .attr_id = htons ( IB_SMP_ATTR_PKEY_TABLE ),
350 .handle = ib_sma_set_pkey_table,
354 /*****************************************************************************
356 * General management agent
358 *****************************************************************************
362 * Call attribute handler
364 * @v gma General management agent
366 * @ret mad MAD response
368 static union ib_mad * ib_handle_mad ( struct ib_gma *gma, union ib_mad *mad ) {
369 struct ib_mad_hdr *hdr = &mad->hdr;
370 struct ib_gma_handler *handler;
372 for_each_table_entry ( handler, IB_GMA_HANDLERS ) {
373 if ( ( ( handler->mgmt_class & ~handler->mgmt_class_ignore ) ==
374 ( hdr->mgmt_class & ~handler->mgmt_class_ignore ) ) &&
375 ( handler->class_version == hdr->class_version ) &&
376 ( handler->method == hdr->method ) &&
377 ( handler->attr_id == hdr->attr_id ) ) {
378 return handler->handle ( gma, mad );
382 hdr->method = IB_MGMT_METHOD_TRAP;
383 hdr->status = htons ( IB_MGMT_STATUS_UNSUPPORTED_METHOD_ATTR );
388 * Complete GMA receive
391 * @v ibdev Infiniband device
393 * @v av Address vector
394 * @v iobuf I/O buffer
395 * @v rc Completion status code
397 static void ib_gma_complete_recv ( struct ib_device *ibdev,
398 struct ib_queue_pair *qp,
399 struct ib_address_vector *av,
400 struct io_buffer *iobuf, int rc ) {
401 struct ib_gma *gma = ib_qp_get_ownerdata ( qp );
402 struct ib_mad_request *request;
404 struct ib_mad_hdr *hdr;
405 union ib_mad *response;
409 DBGC ( gma, "GMA %p RX error: %s\n", gma, strerror ( rc ) );
414 if ( iob_len ( iobuf ) != sizeof ( *mad ) ) {
415 DBGC ( gma, "GMA %p RX bad size (%zd bytes)\n",
416 gma, iob_len ( iobuf ) );
417 DBGC_HDA ( gma, 0, iobuf->data, iob_len ( iobuf ) );
422 if ( hdr->base_version != IB_MGMT_BASE_VERSION ) {
423 DBGC ( gma, "GMA %p unsupported base version %x\n",
424 gma, hdr->base_version );
425 DBGC_HDA ( gma, 0, mad, sizeof ( *mad ) );
428 DBGC ( gma, "GMA %p RX TID %08x%08x (%02x,%02x,%02x,%04x) status "
429 "%04x\n", gma, ntohl ( hdr->tid[0] ), ntohl ( hdr->tid[1] ),
430 hdr->mgmt_class, hdr->class_version, hdr->method,
431 ntohs ( hdr->attr_id ), ntohs ( hdr->status ) );
432 DBGC2_HDA ( gma, 0, mad, sizeof ( *mad ) );
434 /* Dequeue request if applicable */
435 list_for_each_entry ( request, &gma->requests, list ) {
436 if ( memcmp ( &request->mad.hdr.tid, &hdr->tid,
437 sizeof ( request->mad.hdr.tid ) ) == 0 ) {
438 stop_timer ( &request->timer );
439 list_del ( &request->list );
446 if ( ( response = ib_handle_mad ( gma, mad ) ) == NULL )
449 /* Re-use I/O buffer for response */
450 memcpy ( mad, response, sizeof ( *mad ) );
451 DBGC ( gma, "GMA %p TX TID %08x%08x (%02x,%02x,%02x,%04x) status "
452 "%04x\n", gma, ntohl ( hdr->tid[0] ), ntohl ( hdr->tid[1] ),
453 hdr->mgmt_class, hdr->class_version, hdr->method,
454 ntohs ( hdr->attr_id ), ntohs ( hdr->status ) );
455 DBGC2_HDA ( gma, 0, mad, sizeof ( *mad ) );
457 /* Send MAD response, if applicable */
458 if ( ( rc = ib_post_send ( ibdev, qp, av,
459 iob_disown ( iobuf ) ) ) != 0 ) {
460 DBGC ( gma, "GMA %p could not send MAD response: %s\n",
461 gma, strerror ( rc ) );
469 /** GMA completion operations */
470 static struct ib_completion_queue_operations ib_gma_completion_ops = {
471 .complete_recv = ib_gma_complete_recv,
475 * Transmit MAD request
477 * @v gma General management agent
478 * @v request MAD request
479 * @ret rc Return status code
481 static int ib_gma_send ( struct ib_gma *gma, struct ib_mad_request *request ) {
482 struct io_buffer *iobuf;
485 DBGC ( gma, "GMA %p TX TID %08x%08x (%02x,%02x,%02x,%04x)\n",
486 gma, ntohl ( request->mad.hdr.tid[0] ),
487 ntohl ( request->mad.hdr.tid[1] ), request->mad.hdr.mgmt_class,
488 request->mad.hdr.class_version, request->mad.hdr.method,
489 ntohs ( request->mad.hdr.attr_id ) );
490 DBGC2_HDA ( gma, 0, &request->mad, sizeof ( request->mad ) );
492 /* Construct I/O buffer */
493 iobuf = alloc_iob ( sizeof ( request->mad ) );
495 DBGC ( gma, "GMA %p could not allocate buffer for TID "
496 "%08x%08x\n", gma, ntohl ( request->mad.hdr.tid[0] ),
497 ntohl ( request->mad.hdr.tid[1] ) );
500 memcpy ( iob_put ( iobuf, sizeof ( request->mad ) ), &request->mad,
501 sizeof ( request->mad ) );
503 /* Send I/O buffer */
504 if ( ( rc = ib_post_send ( gma->ibdev, gma->qp, &request->av,
506 DBGC ( gma, "GMA %p could not send TID %08x%08x: %s\n",
507 gma, ntohl ( request->mad.hdr.tid[0] ),
508 ntohl ( request->mad.hdr.tid[1] ), strerror ( rc ) );
517 * Handle MAD request timer expiry
519 * @v timer Retry timer
520 * @v expired Failure indicator
522 static void ib_gma_timer_expired ( struct retry_timer *timer, int expired ) {
523 struct ib_mad_request *request =
524 container_of ( timer, struct ib_mad_request, timer );
525 struct ib_gma *gma = request->gma;
527 /* Abandon TID if we have tried too many times */
529 DBGC ( gma, "GMA %p abandoning TID %08x%08x\n",
530 gma, ntohl ( request->mad.hdr.tid[0] ),
531 ntohl ( request->mad.hdr.tid[1] ) );
532 list_del ( &request->list );
537 /* Restart retransmission timer */
538 start_timer ( timer );
541 ib_gma_send ( gma, request );
547 * @v gma General management agent
549 * @v av Destination address, or NULL for SM
550 * @v retry Request should be retried until a response arrives
551 * @ret rc Return status code
553 int ib_gma_request ( struct ib_gma *gma, union ib_mad *mad,
554 struct ib_address_vector *av, int retry ) {
555 struct ib_device *ibdev = gma->ibdev;
556 struct ib_mad_request *request;
558 /* Allocate and initialise structure */
559 request = zalloc ( sizeof ( *request ) );
561 DBGC ( gma, "GMA %p could not allocate MAD request\n", gma );
565 request->timer.expired = ib_gma_timer_expired;
567 /* Determine address vector */
569 memcpy ( &request->av, av, sizeof ( request->av ) );
571 request->av.lid = ibdev->sm_lid;
572 request->av.sl = ibdev->sm_sl;
573 request->av.qpn = IB_QPN_GMA;
574 request->av.qkey = IB_QKEY_GMA;
578 memcpy ( &request->mad, mad, sizeof ( request->mad ) );
581 request->mad.hdr.tid[0] = htonl ( IB_GMA_TID_MAGIC );
582 request->mad.hdr.tid[1] = htonl ( ++next_request_tid );
584 /* Send initial request. Ignore errors; the retry timer will
585 * take care of those we care about.
587 ib_gma_send ( gma, request );
589 /* Add to list and start timer if applicable */
591 list_add ( &request->list, &gma->requests );
592 start_timer ( &request->timer );
603 * @v ibdev Infiniband device
604 * @v type Queue pair type
605 * @ret gma General management agent, or NULL
607 struct ib_gma * ib_create_gma ( struct ib_device *ibdev,
608 enum ib_queue_pair_type type ) {
612 /* Allocate and initialise fields */
613 gma = zalloc ( sizeof ( *gma ) );
617 INIT_LIST_HEAD ( &gma->requests );
619 /* Create completion queue */
620 gma->cq = ib_create_cq ( ibdev, IB_GMA_NUM_CQES,
621 &ib_gma_completion_ops );
623 DBGC ( gma, "GMA %p could not allocate completion queue\n",
628 /* Create queue pair */
629 qkey = ( ( type == IB_QPT_SMA ) ? IB_QKEY_SMA : IB_QKEY_GMA );
630 gma->qp = ib_create_qp ( ibdev, type, IB_GMA_NUM_SEND_WQES, gma->cq,
631 IB_GMA_NUM_RECV_WQES, gma->cq, qkey );
633 DBGC ( gma, "GMA %p could not allocate queue pair\n", gma );
636 ib_qp_set_ownerdata ( gma->qp, gma );
638 DBGC ( gma, "GMA %p running on QPN %#lx\n", gma, gma->qp->qpn );
640 /* Fill receive ring */
641 ib_refill_recv ( ibdev, gma->qp );
644 ib_destroy_qp ( ibdev, gma->qp );
646 ib_destroy_cq ( ibdev, gma->cq );
656 * @v gma General management agent
658 void ib_destroy_gma ( struct ib_gma *gma ) {
659 struct ib_device *ibdev = gma->ibdev;
660 struct ib_mad_request *request;
661 struct ib_mad_request *tmp;
663 /* Flush any outstanding requests */
664 list_for_each_entry_safe ( request, tmp, &gma->requests, list ) {
665 stop_timer ( &request->timer );
666 list_del ( &request->list );
670 ib_destroy_qp ( ibdev, gma->qp );
671 ib_destroy_cq ( ibdev, gma->cq );