2 * Copyright (C) 2009 Michael Brown <mbrown@fensystems.co.uk>.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as
6 * published by the Free Software Foundation; either version 2 of the
7 * License, or any later version.
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 FILE_LICENCE ( GPL2_OR_LATER );
28 #include <gpxe/infiniband.h>
29 #include <gpxe/iobuf.h>
30 #include <gpxe/ib_gma.h>
35 * Infiniband General Management Agent
40 struct ib_mad_request {
43 /** List of outstanding MAD requests */
44 struct list_head list;
46 struct retry_timer timer;
47 /** Destination address */
48 struct ib_address_vector av;
53 /** GMA number of send WQEs
55 * This is a policy decision.
57 #define IB_GMA_NUM_SEND_WQES 4
59 /** GMA number of receive WQEs
61 * This is a policy decision.
63 #define IB_GMA_NUM_RECV_WQES 2
65 /** GMA number of completion queue entries
67 * This is a policy decision
69 #define IB_GMA_NUM_CQES 8
71 /** TID magic signature */
72 #define IB_GMA_TID_MAGIC ( ( 'g' << 24 ) | ( 'P' << 16 ) | ( 'X' << 8 ) | 'E' )
74 /** TID to use for next MAD request */
75 static unsigned int next_request_tid;
77 /*****************************************************************************
79 * Subnet management MAD handlers
81 *****************************************************************************
85 * Construct directed route response, if necessary
87 * @v gma General management agent
88 * @v mad MAD response without DR fields filled in
89 * @ret mad MAD response with DR fields filled in
91 static union ib_mad * ib_sma_dr_response ( struct ib_gma *gma,
93 struct ib_mad_hdr *hdr = &mad->hdr;
94 struct ib_mad_smp *smp = &mad->smp;
95 unsigned int hop_pointer;
96 unsigned int hop_count;
98 /* Set response fields for directed route SMPs */
99 if ( hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE ) {
100 hdr->status |= htons ( IB_SMP_STATUS_D_INBOUND );
101 hop_pointer = smp->mad_hdr.class_specific.smp.hop_pointer;
102 hop_count = smp->mad_hdr.class_specific.smp.hop_count;
103 assert ( hop_count == hop_pointer );
104 if ( hop_pointer < ( sizeof ( smp->return_path.hops ) /
105 sizeof ( smp->return_path.hops[0] ) ) ) {
106 smp->return_path.hops[hop_pointer] = gma->ibdev->port;
108 DBGC ( gma, "GMA %p invalid hop pointer %d\n",
118 * Get node information
120 * @v gma General management agent
122 * @ret response MAD response
124 static union ib_mad * ib_sma_get_node_info ( struct ib_gma *gma,
125 union ib_mad *mad ) {
126 struct ib_device *ibdev = gma->ibdev;
127 struct ib_node_info *node_info = &mad->smp.smp_data.node_info;
129 memset ( node_info, 0, sizeof ( *node_info ) );
130 node_info->base_version = IB_MGMT_BASE_VERSION;
131 node_info->class_version = IB_SMP_CLASS_VERSION;
132 node_info->node_type = IB_NODE_TYPE_HCA;
133 node_info->num_ports = ib_get_hca_info ( ibdev, &node_info->sys_guid );
134 memcpy ( &node_info->node_guid, &node_info->sys_guid,
135 sizeof ( node_info->node_guid ) );
136 memcpy ( &node_info->port_guid, &ibdev->gid.u.half[1],
137 sizeof ( node_info->port_guid ) );
138 node_info->partition_cap = htons ( 1 );
139 node_info->local_port_num = ibdev->port;
141 mad->hdr.method = IB_MGMT_METHOD_GET_RESP;
142 return ib_sma_dr_response ( gma, mad );
146 * Get node description
148 * @v gma General management agent
150 * @ret response MAD response
152 static union ib_mad * ib_sma_get_node_desc ( struct ib_gma *gma,
153 union ib_mad *mad ) {
154 struct ib_device *ibdev = gma->ibdev;
155 struct ib_node_desc *node_desc = &mad->smp.smp_data.node_desc;
156 struct ib_gid_half *guid = &ibdev->gid.u.half[1];
158 memset ( node_desc, 0, sizeof ( *node_desc ) );
159 snprintf ( node_desc->node_string, sizeof ( node_desc->node_string ),
160 "gPXE %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x (%s)",
161 guid->bytes[0], guid->bytes[1], guid->bytes[2],
162 guid->bytes[3], guid->bytes[4], guid->bytes[5],
163 guid->bytes[6], guid->bytes[7], ibdev->dev->name );
165 mad->hdr.method = IB_MGMT_METHOD_GET_RESP;
166 return ib_sma_dr_response ( gma, mad );
170 * Get GUID information
172 * @v gma General management agent
174 * @ret response MAD response
176 static union ib_mad * ib_sma_get_guid_info ( struct ib_gma *gma,
177 union ib_mad *mad ) {
178 struct ib_device *ibdev = gma->ibdev;
179 struct ib_guid_info *guid_info = &mad->smp.smp_data.guid_info;
181 memset ( guid_info, 0, sizeof ( *guid_info ) );
182 memcpy ( guid_info->guid[0], &ibdev->gid.u.half[1],
183 sizeof ( guid_info->guid[0] ) );
185 mad->hdr.method = IB_MGMT_METHOD_GET_RESP;
186 return ib_sma_dr_response ( gma, mad );
190 * Get port information
192 * @v gma General management agent
194 * @ret response MAD response
196 static union ib_mad * ib_sma_get_port_info ( struct ib_gma *gma,
197 union ib_mad *mad ) {
198 struct ib_device *ibdev = gma->ibdev;
199 struct ib_port_info *port_info = &mad->smp.smp_data.port_info;
201 memset ( port_info, 0, sizeof ( *port_info ) );
202 memcpy ( port_info->gid_prefix, &ibdev->gid.u.half[0],
203 sizeof ( port_info->gid_prefix ) );
204 port_info->lid = ntohs ( ibdev->lid );
205 port_info->mastersm_lid = ntohs ( ibdev->sm_lid );
206 port_info->local_port_num = ibdev->port;
207 port_info->link_width_enabled = ibdev->link_width_enabled;
208 port_info->link_width_supported = ibdev->link_width_supported;
209 port_info->link_width_active = ibdev->link_width_active;
210 port_info->link_speed_supported__port_state =
211 ( ( ibdev->link_speed_supported << 4 ) | ibdev->port_state );
212 port_info->port_phys_state__link_down_def_state =
213 ( ( IB_PORT_PHYS_STATE_POLLING << 4 ) |
214 IB_PORT_PHYS_STATE_POLLING );
215 port_info->link_speed_active__link_speed_enabled =
216 ( ( ibdev->link_speed_active << 4 ) |
217 ibdev->link_speed_enabled );
218 port_info->neighbour_mtu__mastersm_sl =
219 ( ( IB_MTU_2048 << 4 ) | ibdev->sm_sl );
220 port_info->vl_cap__init_type = ( IB_VL_0 << 4 );
221 port_info->init_type_reply__mtu_cap = IB_MTU_2048;
222 port_info->operational_vls__enforcement = ( IB_VL_0 << 4 );
223 port_info->guid_cap = 1;
225 mad->hdr.method = IB_MGMT_METHOD_GET_RESP;
226 return ib_sma_dr_response ( gma, mad );
230 * Set port information
232 * @v gma General management agent
234 * @ret response MAD response
236 static union ib_mad * ib_sma_set_port_info ( struct ib_gma *gma,
237 union ib_mad *mad ) {
238 struct ib_device *ibdev = gma->ibdev;
239 const struct ib_port_info *port_info = &mad->smp.smp_data.port_info;
240 unsigned int link_width_enabled;
241 unsigned int link_speed_enabled;
244 memcpy ( &ibdev->gid.u.half[0], port_info->gid_prefix,
245 sizeof ( ibdev->gid.u.half[0] ) );
246 ibdev->lid = ntohs ( port_info->lid );
247 ibdev->sm_lid = ntohs ( port_info->mastersm_lid );
248 if ( ( link_width_enabled = port_info->link_width_enabled ) )
249 ibdev->link_width_enabled = link_width_enabled;
250 if ( ( link_speed_enabled =
251 ( port_info->link_speed_active__link_speed_enabled & 0xf ) ) )
252 ibdev->link_speed_enabled = link_speed_enabled;
253 ibdev->sm_sl = ( port_info->neighbour_mtu__mastersm_sl & 0xf );
254 DBGC ( gma, "GMA %p set LID %04x SMLID %04x link width %02x speed "
255 "%02x\n", gma, ibdev->lid, ibdev->sm_lid,
256 ibdev->link_width_enabled, ibdev->link_speed_enabled );
258 if ( ( rc = ib_set_port_info ( ibdev, mad ) ) != 0 ) {
259 DBGC ( gma, "GMA %p could not set port information: %s\n",
260 gma, strerror ( rc ) );
262 htons ( IB_MGMT_STATUS_UNSUPPORTED_METHOD_ATTR );
265 return ib_sma_get_port_info ( gma, mad );
269 * Get partition key table
271 * @v gma General management agent
273 * @ret response MAD response
275 static union ib_mad * ib_sma_get_pkey_table ( struct ib_gma *gma,
276 union ib_mad *mad ) {
277 struct ib_device *ibdev = gma->ibdev;
278 struct ib_pkey_table *pkey_table = &mad->smp.smp_data.pkey_table;
280 mad->hdr.method = IB_MGMT_METHOD_GET_RESP;
281 memset ( pkey_table, 0, sizeof ( *pkey_table ) );
282 pkey_table->pkey[0] = htons ( ibdev->pkey );
284 mad->hdr.method = IB_MGMT_METHOD_GET_RESP;
285 return ib_sma_dr_response ( gma, mad );
289 * Set partition key table
291 * @v gma General management agent
293 * @ret response MAD response
295 static union ib_mad * ib_sma_set_pkey_table ( struct ib_gma *gma,
296 union ib_mad *mad ) {
297 struct ib_device *ibdev = gma->ibdev;
298 struct ib_pkey_table *pkey_table = &mad->smp.smp_data.pkey_table;
300 ibdev->pkey = ntohs ( pkey_table->pkey[0] );
302 return ib_sma_get_pkey_table ( gma, mad );
305 /** List of attribute handlers */
306 struct ib_gma_handler ib_sma_handlers[] __ib_gma_handler = {
308 .mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED,
309 .mgmt_class_ignore = IB_SMP_CLASS_IGNORE,
310 .class_version = IB_SMP_CLASS_VERSION,
311 .method = IB_MGMT_METHOD_GET,
312 .attr_id = htons ( IB_SMP_ATTR_NODE_INFO ),
313 .handle = ib_sma_get_node_info,
316 .mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED,
317 .mgmt_class_ignore = IB_SMP_CLASS_IGNORE,
318 .class_version = IB_SMP_CLASS_VERSION,
319 .method = IB_MGMT_METHOD_GET,
320 .attr_id = htons ( IB_SMP_ATTR_NODE_DESC ),
321 .handle = ib_sma_get_node_desc,
324 .mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED,
325 .mgmt_class_ignore = IB_SMP_CLASS_IGNORE,
326 .class_version = IB_SMP_CLASS_VERSION,
327 .method = IB_MGMT_METHOD_GET,
328 .attr_id = htons ( IB_SMP_ATTR_GUID_INFO ),
329 .handle = ib_sma_get_guid_info,
332 .mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED,
333 .mgmt_class_ignore = IB_SMP_CLASS_IGNORE,
334 .class_version = IB_SMP_CLASS_VERSION,
335 .method = IB_MGMT_METHOD_GET,
336 .attr_id = htons ( IB_SMP_ATTR_PORT_INFO ),
337 .handle = ib_sma_get_port_info,
340 .mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED,
341 .mgmt_class_ignore = IB_SMP_CLASS_IGNORE,
342 .class_version = IB_SMP_CLASS_VERSION,
343 .method = IB_MGMT_METHOD_SET,
344 .attr_id = htons ( IB_SMP_ATTR_PORT_INFO ),
345 .handle = ib_sma_set_port_info,
348 .mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED,
349 .mgmt_class_ignore = IB_SMP_CLASS_IGNORE,
350 .class_version = IB_SMP_CLASS_VERSION,
351 .method = IB_MGMT_METHOD_GET,
352 .attr_id = htons ( IB_SMP_ATTR_PKEY_TABLE ),
353 .handle = ib_sma_get_pkey_table,
356 .mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED,
357 .mgmt_class_ignore = IB_SMP_CLASS_IGNORE,
358 .class_version = IB_SMP_CLASS_VERSION,
359 .method = IB_MGMT_METHOD_SET,
360 .attr_id = htons ( IB_SMP_ATTR_PKEY_TABLE ),
361 .handle = ib_sma_set_pkey_table,
365 /*****************************************************************************
367 * General management agent
369 *****************************************************************************
373 * Call attribute handler
375 * @v gma General management agent
377 * @ret mad MAD response
379 static union ib_mad * ib_handle_mad ( struct ib_gma *gma, union ib_mad *mad ) {
380 struct ib_mad_hdr *hdr = &mad->hdr;
381 struct ib_gma_handler *handler;
383 for_each_table_entry ( handler, IB_GMA_HANDLERS ) {
384 if ( ( ( handler->mgmt_class & ~handler->mgmt_class_ignore ) ==
385 ( hdr->mgmt_class & ~handler->mgmt_class_ignore ) ) &&
386 ( handler->class_version == hdr->class_version ) &&
387 ( handler->method == hdr->method ) &&
388 ( handler->attr_id == hdr->attr_id ) ) {
389 return handler->handle ( gma, mad );
393 hdr->method = IB_MGMT_METHOD_TRAP;
394 hdr->status = htons ( IB_MGMT_STATUS_UNSUPPORTED_METHOD_ATTR );
399 * Complete GMA receive
402 * @v ibdev Infiniband device
404 * @v av Address vector
405 * @v iobuf I/O buffer
406 * @v rc Completion status code
408 static void ib_gma_complete_recv ( struct ib_device *ibdev,
409 struct ib_queue_pair *qp,
410 struct ib_address_vector *av,
411 struct io_buffer *iobuf, int rc ) {
412 struct ib_gma *gma = ib_qp_get_ownerdata ( qp );
413 struct ib_mad_request *request;
415 struct ib_mad_hdr *hdr;
416 union ib_mad *response;
420 DBGC ( gma, "GMA %p RX error: %s\n", gma, strerror ( rc ) );
425 if ( iob_len ( iobuf ) != sizeof ( *mad ) ) {
426 DBGC ( gma, "GMA %p RX bad size (%zd bytes)\n",
427 gma, iob_len ( iobuf ) );
428 DBGC_HDA ( gma, 0, iobuf->data, iob_len ( iobuf ) );
433 if ( hdr->base_version != IB_MGMT_BASE_VERSION ) {
434 DBGC ( gma, "GMA %p unsupported base version %x\n",
435 gma, hdr->base_version );
436 DBGC_HDA ( gma, 0, mad, sizeof ( *mad ) );
439 DBGC ( gma, "GMA %p RX TID %08x%08x (%02x,%02x,%02x,%04x) status "
440 "%04x\n", gma, ntohl ( hdr->tid[0] ), ntohl ( hdr->tid[1] ),
441 hdr->mgmt_class, hdr->class_version, hdr->method,
442 ntohs ( hdr->attr_id ), ntohs ( hdr->status ) );
443 DBGC2_HDA ( gma, 0, mad, sizeof ( *mad ) );
445 /* Dequeue request if applicable */
446 list_for_each_entry ( request, &gma->requests, list ) {
447 if ( memcmp ( &request->mad.hdr.tid, &hdr->tid,
448 sizeof ( request->mad.hdr.tid ) ) == 0 ) {
449 stop_timer ( &request->timer );
450 list_del ( &request->list );
457 if ( ( response = ib_handle_mad ( gma, mad ) ) == NULL )
460 /* Re-use I/O buffer for response */
461 memcpy ( mad, response, sizeof ( *mad ) );
462 DBGC ( gma, "GMA %p TX TID %08x%08x (%02x,%02x,%02x,%04x) status "
463 "%04x\n", gma, ntohl ( hdr->tid[0] ), ntohl ( hdr->tid[1] ),
464 hdr->mgmt_class, hdr->class_version, hdr->method,
465 ntohs ( hdr->attr_id ), ntohs ( hdr->status ) );
466 DBGC2_HDA ( gma, 0, mad, sizeof ( *mad ) );
468 /* Send MAD response, if applicable */
469 if ( ( rc = ib_post_send ( ibdev, qp, av,
470 iob_disown ( iobuf ) ) ) != 0 ) {
471 DBGC ( gma, "GMA %p could not send MAD response: %s\n",
472 gma, strerror ( rc ) );
480 /** GMA completion operations */
481 static struct ib_completion_queue_operations ib_gma_completion_ops = {
482 .complete_recv = ib_gma_complete_recv,
486 * Transmit MAD request
488 * @v gma General management agent
489 * @v request MAD request
490 * @ret rc Return status code
492 static int ib_gma_send ( struct ib_gma *gma, struct ib_mad_request *request ) {
493 struct io_buffer *iobuf;
496 DBGC ( gma, "GMA %p TX TID %08x%08x (%02x,%02x,%02x,%04x)\n",
497 gma, ntohl ( request->mad.hdr.tid[0] ),
498 ntohl ( request->mad.hdr.tid[1] ), request->mad.hdr.mgmt_class,
499 request->mad.hdr.class_version, request->mad.hdr.method,
500 ntohs ( request->mad.hdr.attr_id ) );
501 DBGC2_HDA ( gma, 0, &request->mad, sizeof ( request->mad ) );
503 /* Construct I/O buffer */
504 iobuf = alloc_iob ( sizeof ( request->mad ) );
506 DBGC ( gma, "GMA %p could not allocate buffer for TID "
507 "%08x%08x\n", gma, ntohl ( request->mad.hdr.tid[0] ),
508 ntohl ( request->mad.hdr.tid[1] ) );
511 memcpy ( iob_put ( iobuf, sizeof ( request->mad ) ), &request->mad,
512 sizeof ( request->mad ) );
514 /* Send I/O buffer */
515 if ( ( rc = ib_post_send ( gma->ibdev, gma->qp, &request->av,
517 DBGC ( gma, "GMA %p could not send TID %08x%08x: %s\n",
518 gma, ntohl ( request->mad.hdr.tid[0] ),
519 ntohl ( request->mad.hdr.tid[1] ), strerror ( rc ) );
528 * Handle MAD request timer expiry
530 * @v timer Retry timer
531 * @v expired Failure indicator
533 static void ib_gma_timer_expired ( struct retry_timer *timer, int expired ) {
534 struct ib_mad_request *request =
535 container_of ( timer, struct ib_mad_request, timer );
536 struct ib_gma *gma = request->gma;
538 /* Abandon TID if we have tried too many times */
540 DBGC ( gma, "GMA %p abandoning TID %08x%08x\n",
541 gma, ntohl ( request->mad.hdr.tid[0] ),
542 ntohl ( request->mad.hdr.tid[1] ) );
543 list_del ( &request->list );
548 /* Restart retransmission timer */
549 start_timer ( timer );
552 ib_gma_send ( gma, request );
558 * @v gma General management agent
560 * @v av Destination address, or NULL for SM
561 * @v retry Request should be retried until a response arrives
562 * @ret rc Return status code
564 int ib_gma_request ( struct ib_gma *gma, union ib_mad *mad,
565 struct ib_address_vector *av, int retry ) {
566 struct ib_device *ibdev = gma->ibdev;
567 struct ib_mad_request *request;
569 /* Allocate and initialise structure */
570 request = zalloc ( sizeof ( *request ) );
572 DBGC ( gma, "GMA %p could not allocate MAD request\n", gma );
576 request->timer.expired = ib_gma_timer_expired;
578 /* Determine address vector */
580 memcpy ( &request->av, av, sizeof ( request->av ) );
582 request->av.lid = ibdev->sm_lid;
583 request->av.sl = ibdev->sm_sl;
584 request->av.qpn = IB_QPN_GMA;
585 request->av.qkey = IB_QKEY_GMA;
589 memcpy ( &request->mad, mad, sizeof ( request->mad ) );
592 request->mad.hdr.tid[0] = htonl ( IB_GMA_TID_MAGIC );
593 request->mad.hdr.tid[1] = htonl ( ++next_request_tid );
595 /* Send initial request. Ignore errors; the retry timer will
596 * take care of those we care about.
598 ib_gma_send ( gma, request );
600 /* Add to list and start timer if applicable */
602 list_add ( &request->list, &gma->requests );
603 start_timer ( &request->timer );
614 * @v ibdev Infiniband device
615 * @v type Queue pair type
616 * @ret gma General management agent, or NULL
618 struct ib_gma * ib_create_gma ( struct ib_device *ibdev,
619 enum ib_queue_pair_type type ) {
623 /* Allocate and initialise fields */
624 gma = zalloc ( sizeof ( *gma ) );
628 INIT_LIST_HEAD ( &gma->requests );
630 /* Create completion queue */
631 gma->cq = ib_create_cq ( ibdev, IB_GMA_NUM_CQES,
632 &ib_gma_completion_ops );
634 DBGC ( gma, "GMA %p could not allocate completion queue\n",
639 /* Create queue pair */
640 qkey = ( ( type == IB_QPT_SMA ) ? IB_QKEY_SMA : IB_QKEY_GMA );
641 gma->qp = ib_create_qp ( ibdev, type, IB_GMA_NUM_SEND_WQES, gma->cq,
642 IB_GMA_NUM_RECV_WQES, gma->cq, qkey );
644 DBGC ( gma, "GMA %p could not allocate queue pair\n", gma );
647 ib_qp_set_ownerdata ( gma->qp, gma );
649 DBGC ( gma, "GMA %p running on QPN %#lx\n", gma, gma->qp->qpn );
651 /* Fill receive ring */
652 ib_refill_recv ( ibdev, gma->qp );
655 ib_destroy_qp ( ibdev, gma->qp );
657 ib_destroy_cq ( ibdev, gma->cq );
667 * @v gma General management agent
669 void ib_destroy_gma ( struct ib_gma *gma ) {
670 struct ib_device *ibdev = gma->ibdev;
671 struct ib_mad_request *request;
672 struct ib_mad_request *tmp;
674 /* Flush any outstanding requests */
675 list_for_each_entry_safe ( request, tmp, &gma->requests, list ) {
676 stop_timer ( &request->timer );
677 list_del ( &request->list );
681 ib_destroy_qp ( ibdev, gma->qp );
682 ib_destroy_cq ( ibdev, gma->cq );