2 * Copyright (C) 2009 Michael Brown <mbrown@fensystems.co.uk>.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as
6 * published by the Free Software Foundation; either version 2 of the
7 * License, or any later version.
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 FILE_LICENCE ( GPL2_OR_LATER );
28 #include <gpxe/infiniband.h>
29 #include <gpxe/iobuf.h>
30 #include <gpxe/ib_gma.h>
35 * Infiniband General Management Agent
40 struct ib_mad_request {
43 /** List of outstanding MAD requests */
44 struct list_head list;
46 struct retry_timer timer;
47 /** Destination address */
48 struct ib_address_vector av;
53 /** GMA number of send WQEs
55 * This is a policy decision.
57 #define IB_GMA_NUM_SEND_WQES 4
59 /** GMA number of receive WQEs
61 * This is a policy decision.
63 #define IB_GMA_NUM_RECV_WQES 2
65 /** GMA number of completion queue entries
67 * This is a policy decision
69 #define IB_GMA_NUM_CQES 8
71 /** GMA TID magic signature */
72 #define IB_GMA_TID_MAGIC ( ( 'g' << 24 ) | ( 'P' << 16 ) | ( 'X' << 8 ) | 'E' )
74 /** TID to use for next MAD request */
75 static unsigned int next_request_tid;
78 * Identify attribute handler
80 * @v mgmt_class Management class
81 * @v class_version Class version
83 * @v attr_id Attribute ID (in network byte order)
84 * @ret handler Attribute handler (or NULL)
86 static int ib_handle_mad ( struct ib_device *ibdev,
88 struct ib_mad_hdr *hdr = &mad->hdr;
89 struct ib_mad_handler *handler;
91 for_each_table_entry ( handler, IB_MAD_HANDLERS ) {
92 if ( ( handler->mgmt_class == hdr->mgmt_class ) &&
93 ( handler->class_version == hdr->class_version ) &&
94 ( handler->method == hdr->method ) &&
95 ( handler->attr_id == hdr->attr_id ) ) {
96 hdr->method = handler->resp_method;
97 return handler->handle ( ibdev, mad );
101 hdr->method = IB_MGMT_METHOD_TRAP;
102 hdr->status = htons ( IB_MGMT_STATUS_UNSUPPORTED_METHOD_ATTR );
107 * Complete GMA receive
110 * @v ibdev Infiniband device
112 * @v av Address vector
113 * @v iobuf I/O buffer
114 * @v rc Completion status code
116 static void ib_gma_complete_recv ( struct ib_device *ibdev,
117 struct ib_queue_pair *qp,
118 struct ib_address_vector *av,
119 struct io_buffer *iobuf, int rc ) {
120 struct ib_gma *gma = ib_qp_get_ownerdata ( qp );
121 struct ib_mad_request *request;
123 struct ib_mad_hdr *hdr;
124 unsigned int hop_pointer;
125 unsigned int hop_count;
129 DBGC ( gma, "GMA %p RX error: %s\n", gma, strerror ( rc ) );
134 if ( iob_len ( iobuf ) != sizeof ( *mad ) ) {
135 DBGC ( gma, "GMA %p RX bad size (%zd bytes)\n",
136 gma, iob_len ( iobuf ) );
137 DBGC_HDA ( gma, 0, iobuf->data, iob_len ( iobuf ) );
142 if ( hdr->base_version != IB_MGMT_BASE_VERSION ) {
143 DBGC ( gma, "GMA %p unsupported base version %x\n",
144 gma, hdr->base_version );
145 DBGC_HDA ( gma, 0, mad, sizeof ( *mad ) );
148 DBGC ( gma, "GMA %p RX TID %08x%08x (%02x,%02x,%02x,%04x) status "
149 "%04x\n", gma, ntohl ( hdr->tid[0] ), ntohl ( hdr->tid[1] ),
150 hdr->mgmt_class, hdr->class_version, hdr->method,
151 ntohs ( hdr->attr_id ), ntohs ( hdr->status ) );
152 DBGC2_HDA ( gma, 0, mad, sizeof ( *mad ) );
154 /* Dequeue request if applicable */
155 list_for_each_entry ( request, &gma->requests, list ) {
156 if ( memcmp ( &request->mad.hdr.tid, &hdr->tid,
157 sizeof ( request->mad.hdr.tid ) ) == 0 ) {
158 stop_timer ( &request->timer );
159 list_del ( &request->list );
165 /* Handle MAD, if possible */
166 if ( ( rc = ib_handle_mad ( ibdev, mad ) ) != 0 ) {
167 DBGC ( gma, "GMA %p could not handle TID %08x%08x: %s\n",
168 gma, ntohl ( hdr->tid[0] ), ntohl ( hdr->tid[1] ),
170 /* Do not abort; we may want to send an error response */
173 /* Finish processing if we have no response to send */
177 DBGC ( gma, "GMA %p TX TID %08x%08x (%02x,%02x,%02x,%04x)\n", gma,
178 ntohl ( hdr->tid[0] ), ntohl ( hdr->tid[1] ), hdr->mgmt_class,
179 hdr->class_version, hdr->method, ntohs ( hdr->attr_id ) );
180 DBGC2_HDA ( gma, 0, mad, sizeof ( *mad ) );
182 /* Set response fields for directed route SMPs */
183 if ( hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE ) {
184 struct ib_mad_smp *smp = &mad->smp;
186 hdr->status |= htons ( IB_SMP_STATUS_D_INBOUND );
187 hop_pointer = smp->mad_hdr.class_specific.smp.hop_pointer;
188 hop_count = smp->mad_hdr.class_specific.smp.hop_count;
189 assert ( hop_count == hop_pointer );
190 if ( hop_pointer < ( sizeof ( smp->return_path.hops ) /
191 sizeof ( smp->return_path.hops[0] ) ) ) {
192 smp->return_path.hops[hop_pointer] = ibdev->port;
194 DBGC ( gma, "GMA %p invalid hop pointer %d\n",
200 /* Send MAD response, if applicable */
201 if ( ( rc = ib_post_send ( ibdev, qp, av,
202 iob_disown ( iobuf ) ) ) != 0 ) {
203 DBGC ( gma, "GMA %p could not send MAD response: %s\n",
204 gma, strerror ( rc ) );
216 * @v ibdev Infiniband device
218 * @v iobuf I/O buffer
219 * @v rc Completion status code
221 static void ib_gma_complete_send ( struct ib_device *ibdev __unused,
222 struct ib_queue_pair *qp,
223 struct io_buffer *iobuf, int rc ) {
224 struct ib_gma *gma = ib_qp_get_ownerdata ( qp );
227 DBGC ( gma, "GMA %p send completion error: %s\n",
228 gma, strerror ( rc ) );
233 /** GMA completion operations */
234 static struct ib_completion_queue_operations ib_gma_completion_ops = {
235 .complete_send = ib_gma_complete_send,
236 .complete_recv = ib_gma_complete_recv,
240 * Transmit MAD request
242 * @v gma General management agent
243 * @v request MAD request
244 * @ret rc Return status code
246 static int ib_gma_send ( struct ib_gma *gma, struct ib_mad_request *request ) {
247 struct io_buffer *iobuf;
250 DBGC ( gma, "GMA %p TX TID %08x%08x (%02x,%02x,%02x,%04x)\n",
251 gma, ntohl ( request->mad.hdr.tid[0] ),
252 ntohl ( request->mad.hdr.tid[1] ), request->mad.hdr.mgmt_class,
253 request->mad.hdr.class_version, request->mad.hdr.method,
254 ntohs ( request->mad.hdr.attr_id ) );
255 DBGC2_HDA ( gma, 0, &request->mad, sizeof ( request->mad ) );
257 /* Construct I/O buffer */
258 iobuf = alloc_iob ( sizeof ( request->mad ) );
260 DBGC ( gma, "GMA %p could not allocate buffer for TID "
261 "%08x%08x\n", gma, ntohl ( request->mad.hdr.tid[0] ),
262 ntohl ( request->mad.hdr.tid[1] ) );
265 memcpy ( iob_put ( iobuf, sizeof ( request->mad ) ), &request->mad,
266 sizeof ( request->mad ) );
268 /* Send I/O buffer */
269 if ( ( rc = ib_post_send ( gma->ibdev, gma->qp, &request->av,
271 DBGC ( gma, "GMA %p could not send TID %08x%08x: %s\n",
272 gma, ntohl ( request->mad.hdr.tid[0] ),
273 ntohl ( request->mad.hdr.tid[1] ), strerror ( rc ) );
282 * Handle MAD request timer expiry
284 * @v timer Retry timer
285 * @v expired Failure indicator
287 static void ib_gma_timer_expired ( struct retry_timer *timer, int expired ) {
288 struct ib_mad_request *request =
289 container_of ( timer, struct ib_mad_request, timer );
290 struct ib_gma *gma = request->gma;
292 /* Abandon TID if we have tried too many times */
294 DBGC ( gma, "GMA %p abandoning TID %08x%08x\n",
295 gma, ntohl ( request->mad.hdr.tid[0] ),
296 ntohl ( request->mad.hdr.tid[1] ) );
297 list_del ( &request->list );
302 /* Restart retransmission timer */
303 start_timer ( timer );
306 ib_gma_send ( gma, request );
312 * @v gma General management agent
314 * @v av Destination address, or NULL for SM
315 * @v retry Request should be retried until a response arrives
316 * @ret rc Return status code
318 int ib_gma_request ( struct ib_gma *gma, union ib_mad *mad,
319 struct ib_address_vector *av, int retry ) {
320 struct ib_device *ibdev = gma->ibdev;
321 struct ib_mad_request *request;
323 /* Allocate and initialise structure */
324 request = zalloc ( sizeof ( *request ) );
326 DBGC ( gma, "GMA %p could not allocate MAD request\n", gma );
330 request->timer.expired = ib_gma_timer_expired;
332 /* Determine address vector */
334 memcpy ( &request->av, av, sizeof ( request->av ) );
336 request->av.lid = ibdev->sm_lid;
337 request->av.sl = ibdev->sm_sl;
338 request->av.qpn = IB_QPN_GMA;
339 request->av.qkey = IB_QKEY_GMA;
343 memcpy ( &request->mad, mad, sizeof ( request->mad ) );
346 request->mad.hdr.tid[0] = htonl ( IB_GMA_TID_MAGIC );
347 request->mad.hdr.tid[1] = htonl ( ++next_request_tid );
349 /* Send initial request. Ignore errors; the retry timer will
350 * take care of those we care about.
352 ib_gma_send ( gma, request );
354 /* Add to list and start timer if applicable */
356 list_add ( &request->list, &gma->requests );
357 start_timer ( &request->timer );
368 * @v gma General management agent
369 * @v ibdev Infiniband device
371 * @ret rc Return status code
373 int ib_create_gma ( struct ib_gma *gma, struct ib_device *ibdev,
374 unsigned long qkey ) {
377 /* Initialise fields */
378 memset ( gma, 0, sizeof ( *gma ) );
380 INIT_LIST_HEAD ( &gma->requests );
382 /* Create completion queue */
383 gma->cq = ib_create_cq ( ibdev, IB_GMA_NUM_CQES,
384 &ib_gma_completion_ops );
386 DBGC ( gma, "GMA %p could not allocate completion queue\n",
392 /* Create queue pair */
393 gma->qp = ib_create_qp ( ibdev, IB_GMA_NUM_SEND_WQES, gma->cq,
394 IB_GMA_NUM_RECV_WQES, gma->cq, qkey );
396 DBGC ( gma, "GMA %p could not allocate queue pair\n", gma );
400 ib_qp_set_ownerdata ( gma->qp, gma );
402 DBGC ( gma, "GMA %p running on QPN %#lx\n", gma, gma->qp->qpn );
404 /* Fill receive ring */
405 ib_refill_recv ( ibdev, gma->qp );
408 ib_destroy_qp ( ibdev, gma->qp );
410 ib_destroy_cq ( ibdev, gma->cq );
418 * @v gma General management agent
420 void ib_destroy_gma ( struct ib_gma *gma ) {
421 struct ib_device *ibdev = gma->ibdev;
422 struct ib_mad_request *request;
423 struct ib_mad_request *tmp;
425 /* Flush any outstanding requests */
426 list_for_each_entry_safe ( request, tmp, &gma->requests, list ) {
427 stop_timer ( &request->timer );
428 list_del ( &request->list );
432 ib_destroy_qp ( ibdev, gma->qp );
433 ib_destroy_cq ( ibdev, gma->cq );