2 * Copyright (C) 2009 Michael Brown <mbrown@fensystems.co.uk>.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as
6 * published by the Free Software Foundation; either version 2 of the
7 * License, or any later version.
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 FILE_LICENCE ( GPL2_OR_LATER );
28 #include <gpxe/infiniband.h>
29 #include <gpxe/iobuf.h>
30 #include <gpxe/ib_gma.h>
35 * Infiniband General Management Agent
40 struct ib_mad_request {
43 /** List of outstanding MAD requests */
44 struct list_head list;
46 struct retry_timer timer;
47 /** Destination address */
48 struct ib_address_vector av;
53 /** GMA number of send WQEs
55 * This is a policy decision.
57 #define IB_GMA_NUM_SEND_WQES 4
59 /** GMA number of receive WQEs
61 * This is a policy decision.
63 #define IB_GMA_NUM_RECV_WQES 2
65 /** GMA number of completion queue entries
67 * This is a policy decision
69 #define IB_GMA_NUM_CQES 8
71 /** GMA TID magic signature */
72 #define IB_GMA_TID_MAGIC ( ( 'g' << 24 ) | ( 'P' << 16 ) | ( 'X' << 8 ) | 'E' )
74 /** TID to use for next MAD request */
75 static unsigned int next_request_tid;
78 * Identify attribute handler
80 * @v mgmt_class Management class
81 * @v class_version Class version
83 * @v attr_id Attribute ID (in network byte order)
84 * @ret handler Attribute handler (or NULL)
86 static int ib_handle_mad ( struct ib_device *ibdev,
88 struct ib_mad_hdr *hdr = &mad->hdr;
89 struct ib_mad_handler *handler;
91 for_each_table_entry ( handler, IB_MAD_HANDLERS ) {
92 if ( ( handler->mgmt_class == hdr->mgmt_class ) &&
93 ( handler->class_version == hdr->class_version ) &&
94 ( handler->method == hdr->method ) &&
95 ( handler->attr_id == hdr->attr_id ) ) {
96 hdr->method = handler->resp_method;
97 return handler->handle ( ibdev, mad );
101 hdr->method = IB_MGMT_METHOD_TRAP;
102 hdr->status = htons ( IB_MGMT_STATUS_UNSUPPORTED_METHOD_ATTR );
107 * Complete GMA receive
110 * @v ibdev Infiniband device
112 * @v av Address vector
113 * @v iobuf I/O buffer
114 * @v rc Completion status code
116 static void ib_gma_complete_recv ( struct ib_device *ibdev,
117 struct ib_queue_pair *qp,
118 struct ib_address_vector *av,
119 struct io_buffer *iobuf, int rc ) {
120 struct ib_gma *gma = ib_qp_get_ownerdata ( qp );
121 struct ib_mad_request *request;
123 struct ib_mad_hdr *hdr;
124 unsigned int hop_pointer;
125 unsigned int hop_count;
129 DBGC ( gma, "GMA %p RX error: %s\n", gma, strerror ( rc ) );
134 if ( iob_len ( iobuf ) != sizeof ( *mad ) ) {
135 DBGC ( gma, "GMA %p RX bad size (%zd bytes)\n",
136 gma, iob_len ( iobuf ) );
137 DBGC_HDA ( gma, 0, iobuf->data, iob_len ( iobuf ) );
142 if ( hdr->base_version != IB_MGMT_BASE_VERSION ) {
143 DBGC ( gma, "GMA %p unsupported base version %x\n",
144 gma, hdr->base_version );
145 DBGC_HDA ( gma, 0, mad, sizeof ( *mad ) );
148 DBGC ( gma, "GMA %p RX TID %08x%08x (%02x,%02x,%02x,%04x) status "
149 "%04x\n", gma, ntohl ( hdr->tid[0] ), ntohl ( hdr->tid[1] ),
150 hdr->mgmt_class, hdr->class_version, hdr->method,
151 ntohs ( hdr->attr_id ), ntohs ( hdr->status ) );
152 DBGC2_HDA ( gma, 0, mad, sizeof ( *mad ) );
154 /* Dequeue request if applicable */
155 list_for_each_entry ( request, &gma->requests, list ) {
156 if ( memcmp ( &request->mad.hdr.tid, &hdr->tid,
157 sizeof ( request->mad.hdr.tid ) ) == 0 ) {
158 stop_timer ( &request->timer );
159 list_del ( &request->list );
165 /* Handle MAD, if possible */
166 if ( ( rc = ib_handle_mad ( ibdev, mad ) ) != 0 ) {
167 DBGC ( gma, "GMA %p could not handle TID %08x%08x: %s\n",
168 gma, ntohl ( hdr->tid[0] ), ntohl ( hdr->tid[1] ),
170 /* Do not abort; we may want to send an error response */
173 /* Finish processing if we have no response to send */
177 DBGC ( gma, "GMA %p TX TID %08x%08x (%02x,%02x,%02x,%04x)\n", gma,
178 ntohl ( hdr->tid[0] ), ntohl ( hdr->tid[1] ), hdr->mgmt_class,
179 hdr->class_version, hdr->method, ntohs ( hdr->attr_id ) );
180 DBGC2_HDA ( gma, 0, mad, sizeof ( *mad ) );
182 /* Set response fields for directed route SMPs */
183 if ( hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE ) {
184 struct ib_mad_smp *smp = &mad->smp;
186 hdr->status |= htons ( IB_SMP_STATUS_D_INBOUND );
187 hop_pointer = smp->mad_hdr.class_specific.smp.hop_pointer;
188 hop_count = smp->mad_hdr.class_specific.smp.hop_count;
189 assert ( hop_count == hop_pointer );
190 if ( hop_pointer < ( sizeof ( smp->return_path.hops ) /
191 sizeof ( smp->return_path.hops[0] ) ) ) {
192 smp->return_path.hops[hop_pointer] = ibdev->port;
194 DBGC ( gma, "GMA %p invalid hop pointer %d\n",
200 /* Send MAD response, if applicable */
201 if ( ( rc = ib_post_send ( ibdev, qp, av,
202 iob_disown ( iobuf ) ) ) != 0 ) {
203 DBGC ( gma, "GMA %p could not send MAD response: %s\n",
204 gma, strerror ( rc ) );
216 * @v ibdev Infiniband device
218 * @v iobuf I/O buffer
219 * @v rc Completion status code
221 static void ib_gma_complete_send ( struct ib_device *ibdev __unused,
222 struct ib_queue_pair *qp,
223 struct io_buffer *iobuf, int rc ) {
224 struct ib_gma *gma = ib_qp_get_ownerdata ( qp );
227 DBGC ( gma, "GMA %p send completion error: %s\n",
228 gma, strerror ( rc ) );
233 /** GMA completion operations */
234 static struct ib_completion_queue_operations ib_gma_completion_ops = {
235 .complete_send = ib_gma_complete_send,
236 .complete_recv = ib_gma_complete_recv,
240 * Handle MAD request timer expiry
242 * @v timer Retry timer
243 * @v expired Failure indicator
245 static void ib_gma_timer_expired ( struct retry_timer *timer, int expired ) {
246 struct ib_mad_request *request =
247 container_of ( timer, struct ib_mad_request, timer );
248 struct ib_gma *gma = request->gma;
249 struct ib_device *ibdev = gma->ibdev;
250 struct io_buffer *iobuf;
253 /* Abandon TID if we have tried too many times */
255 DBGC ( gma, "GMA %p abandoning TID %08x%08x\n",
256 gma, ntohl ( request->mad.hdr.tid[0] ),
257 ntohl ( request->mad.hdr.tid[1] ) );
258 list_del ( &request->list );
263 DBGC ( gma, "GMA %p TX TID %08x%08x (%02x,%02x,%02x,%04x)\n",
264 gma, ntohl ( request->mad.hdr.tid[0] ),
265 ntohl ( request->mad.hdr.tid[1] ), request->mad.hdr.mgmt_class,
266 request->mad.hdr.class_version, request->mad.hdr.method,
267 ntohs ( request->mad.hdr.attr_id ) );
268 DBGC2_HDA ( gma, 0, &request->mad, sizeof ( request->mad ) );
270 /* Restart retransmission timer */
271 start_timer ( timer );
273 /* Construct I/O buffer */
274 iobuf = alloc_iob ( sizeof ( request->mad ) );
276 DBGC ( gma, "GMA %p could not allocate buffer for TID "
277 "%08x%08x\n", gma, ntohl ( request->mad.hdr.tid[0] ),
278 ntohl ( request->mad.hdr.tid[1] ) );
281 memcpy ( iob_put ( iobuf, sizeof ( request->mad ) ), &request->mad,
282 sizeof ( request->mad ) );
284 /* Post send request */
285 if ( ( rc = ib_post_send ( ibdev, gma->qp, &request->av,
287 DBGC ( gma, "GMA %p could not send TID %08x%08x: %s\n",
288 gma, ntohl ( request->mad.hdr.tid[0] ),
289 ntohl ( request->mad.hdr.tid[1] ), strerror ( rc ) );
298 * @v gma General management agent
300 * @v av Destination address, or NULL for SM
301 * @ret rc Return status code
303 int ib_gma_request ( struct ib_gma *gma, union ib_mad *mad,
304 struct ib_address_vector *av ) {
305 struct ib_device *ibdev = gma->ibdev;
306 struct ib_mad_request *request;
308 /* Allocate and initialise structure */
309 request = zalloc ( sizeof ( *request ) );
311 DBGC ( gma, "GMA %p could not allocate MAD request\n", gma );
315 list_add ( &request->list, &gma->requests );
316 request->timer.expired = ib_gma_timer_expired;
318 /* Determine address vector */
320 memcpy ( &request->av, av, sizeof ( request->av ) );
322 request->av.lid = ibdev->sm_lid;
323 request->av.sl = ibdev->sm_sl;
324 request->av.qpn = IB_QPN_GMA;
325 request->av.qkey = IB_QKEY_GMA;
329 memcpy ( &request->mad, mad, sizeof ( request->mad ) );
332 request->mad.hdr.tid[0] = htonl ( IB_GMA_TID_MAGIC );
333 request->mad.hdr.tid[1] = htonl ( ++next_request_tid );
335 /* Start timer to initiate transmission */
336 start_timer_nodelay ( &request->timer );
344 * @v gma General management agent
345 * @v ibdev Infiniband device
347 * @ret rc Return status code
349 int ib_create_gma ( struct ib_gma *gma, struct ib_device *ibdev,
350 unsigned long qkey ) {
353 /* Initialise fields */
354 memset ( gma, 0, sizeof ( *gma ) );
356 INIT_LIST_HEAD ( &gma->requests );
358 /* Create completion queue */
359 gma->cq = ib_create_cq ( ibdev, IB_GMA_NUM_CQES,
360 &ib_gma_completion_ops );
362 DBGC ( gma, "GMA %p could not allocate completion queue\n",
368 /* Create queue pair */
369 gma->qp = ib_create_qp ( ibdev, IB_GMA_NUM_SEND_WQES, gma->cq,
370 IB_GMA_NUM_RECV_WQES, gma->cq, qkey );
372 DBGC ( gma, "GMA %p could not allocate queue pair\n", gma );
376 ib_qp_set_ownerdata ( gma->qp, gma );
378 DBGC ( gma, "GMA %p running on QPN %#lx\n", gma, gma->qp->qpn );
380 /* Fill receive ring */
381 ib_refill_recv ( ibdev, gma->qp );
384 ib_destroy_qp ( ibdev, gma->qp );
386 ib_destroy_cq ( ibdev, gma->cq );
394 * @v gma General management agent
396 void ib_destroy_gma ( struct ib_gma *gma ) {
397 struct ib_device *ibdev = gma->ibdev;
398 struct ib_mad_request *request;
399 struct ib_mad_request *tmp;
401 /* Flush any outstanding requests */
402 list_for_each_entry_safe ( request, tmp, &gma->requests, list ) {
403 stop_timer ( &request->timer );
404 list_del ( &request->list );
408 ib_destroy_qp ( ibdev, gma->qp );
409 ib_destroy_cq ( ibdev, gma->cq );