2 * Copyright (C) 2009 Michael Brown <mbrown@fensystems.co.uk>.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as
6 * published by the Free Software Foundation; either version 2 of the
7 * License, or any later version.
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 FILE_LICENCE ( GPL2_OR_LATER );
28 #include <gpxe/infiniband.h>
29 #include <gpxe/iobuf.h>
30 #include <gpxe/ib_gma.h>
35 * Infiniband General Management Agent
40 struct ib_mad_request {
43 /** List of outstanding MAD requests */
44 struct list_head list;
46 struct retry_timer timer;
47 /** Destination address */
48 struct ib_address_vector av;
53 /** GMA number of send WQEs
55 * This is a policy decision.
57 #define IB_GMA_NUM_SEND_WQES 4
59 /** GMA number of receive WQEs
61 * This is a policy decision.
63 #define IB_GMA_NUM_RECV_WQES 2
65 /** GMA number of completion queue entries
67 * This is a policy decision
69 #define IB_GMA_NUM_CQES 8
71 /** GMA TID magic signature */
72 #define IB_GMA_TID_MAGIC ( ( 'g' << 24 ) | ( 'P' << 16 ) | ( 'X' << 8 ) | 'E' )
74 /** TID to use for next MAD request */
75 static unsigned int next_request_tid;
78 * Identify attribute handler
80 * @v mgmt_class Management class
81 * @v class_version Class version
83 * @v attr_id Attribute ID (in network byte order)
84 * @ret handler Attribute handler (or NULL)
86 static int ib_handle_mad ( struct ib_device *ibdev,
88 struct ib_mad_hdr *hdr = &mad->hdr;
89 struct ib_mad_handler *handler;
91 for_each_table_entry ( handler, IB_MAD_HANDLERS ) {
92 if ( ( handler->mgmt_class == hdr->mgmt_class ) &&
93 ( handler->class_version == hdr->class_version ) &&
94 ( handler->method == hdr->method ) &&
95 ( handler->attr_id == hdr->attr_id ) ) {
96 hdr->method = handler->resp_method;
97 return handler->handle ( ibdev, mad );
101 hdr->method = IB_MGMT_METHOD_TRAP;
102 hdr->status = htons ( IB_MGMT_STATUS_UNSUPPORTED_METHOD_ATTR );
107 * Complete GMA receive
110 * @v ibdev Infiniband device
112 * @v av Address vector
113 * @v iobuf I/O buffer
114 * @v rc Completion status code
116 static void ib_gma_complete_recv ( struct ib_device *ibdev,
117 struct ib_queue_pair *qp,
118 struct ib_address_vector *av,
119 struct io_buffer *iobuf, int rc ) {
120 struct ib_gma *gma = ib_qp_get_ownerdata ( qp );
121 struct ib_mad_request *request;
123 struct ib_mad_hdr *hdr;
124 unsigned int hop_pointer;
125 unsigned int hop_count;
129 DBGC ( gma, "GMA %p RX error: %s\n", gma, strerror ( rc ) );
134 if ( iob_len ( iobuf ) != sizeof ( *mad ) ) {
135 DBGC ( gma, "GMA %p RX bad size (%zd bytes)\n",
136 gma, iob_len ( iobuf ) );
137 DBGC_HDA ( gma, 0, iobuf->data, iob_len ( iobuf ) );
142 if ( hdr->base_version != IB_MGMT_BASE_VERSION ) {
143 DBGC ( gma, "GMA %p unsupported base version %x\n",
144 gma, hdr->base_version );
145 DBGC_HDA ( gma, 0, mad, sizeof ( *mad ) );
148 DBGC ( gma, "GMA %p RX TID %08x%08x (%02x,%02x,%02x,%04x) status "
149 "%04x\n", gma, ntohl ( hdr->tid[0] ), ntohl ( hdr->tid[1] ),
150 hdr->mgmt_class, hdr->class_version, hdr->method,
151 ntohs ( hdr->attr_id ), ntohs ( hdr->status ) );
152 DBGC2_HDA ( gma, 0, mad, sizeof ( *mad ) );
154 /* Dequeue request if applicable */
155 list_for_each_entry ( request, &gma->requests, list ) {
156 if ( memcmp ( &request->mad.hdr.tid, &hdr->tid,
157 sizeof ( request->mad.hdr.tid ) ) == 0 ) {
158 stop_timer ( &request->timer );
159 list_del ( &request->list );
165 /* Handle MAD, if possible */
166 if ( ( rc = ib_handle_mad ( ibdev, mad ) ) != 0 ) {
167 DBGC ( gma, "GMA %p could not handle TID %08x%08x: %s\n",
168 gma, ntohl ( hdr->tid[0] ), ntohl ( hdr->tid[1] ),
170 /* Do not abort; we may want to send an error response */
173 /* Finish processing if we have no response to send */
177 DBGC ( gma, "GMA %p TX TID %08x%08x (%02x,%02x,%02x,%04x)\n", gma,
178 ntohl ( hdr->tid[0] ), ntohl ( hdr->tid[1] ), hdr->mgmt_class,
179 hdr->class_version, hdr->method, ntohs ( hdr->attr_id ) );
180 DBGC2_HDA ( gma, 0, mad, sizeof ( *mad ) );
182 /* Set response fields for directed route SMPs */
183 if ( hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE ) {
184 struct ib_mad_smp *smp = &mad->smp;
186 hdr->status |= htons ( IB_SMP_STATUS_D_INBOUND );
187 hop_pointer = smp->mad_hdr.class_specific.smp.hop_pointer;
188 hop_count = smp->mad_hdr.class_specific.smp.hop_count;
189 assert ( hop_count == hop_pointer );
190 if ( hop_pointer < ( sizeof ( smp->return_path.hops ) /
191 sizeof ( smp->return_path.hops[0] ) ) ) {
192 smp->return_path.hops[hop_pointer] = ibdev->port;
194 DBGC ( gma, "GMA %p invalid hop pointer %d\n",
200 /* Construct return address */
201 av->qkey = ( ( av->qpn == IB_QPN_SMA ) ? IB_QKEY_SMA : IB_QKEY_GMA );
202 av->rate = IB_RATE_2_5;
204 /* Send MAD response, if applicable */
205 if ( ( rc = ib_post_send ( ibdev, qp, av,
206 iob_disown ( iobuf ) ) ) != 0 ) {
207 DBGC ( gma, "GMA %p could not send MAD response: %s\n",
208 gma, strerror ( rc ) );
220 * @v ibdev Infiniband device
222 * @v iobuf I/O buffer
223 * @v rc Completion status code
225 static void ib_gma_complete_send ( struct ib_device *ibdev __unused,
226 struct ib_queue_pair *qp,
227 struct io_buffer *iobuf, int rc ) {
228 struct ib_gma *gma = ib_qp_get_ownerdata ( qp );
231 DBGC ( gma, "GMA %p send completion error: %s\n",
232 gma, strerror ( rc ) );
237 /** GMA completion operations */
238 static struct ib_completion_queue_operations ib_gma_completion_ops = {
239 .complete_send = ib_gma_complete_send,
240 .complete_recv = ib_gma_complete_recv,
244 * Handle MAD request timer expiry
246 * @v timer Retry timer
247 * @v expired Failure indicator
249 static void ib_gma_timer_expired ( struct retry_timer *timer, int expired ) {
250 struct ib_mad_request *request =
251 container_of ( timer, struct ib_mad_request, timer );
252 struct ib_gma *gma = request->gma;
253 struct ib_device *ibdev = gma->ibdev;
254 struct io_buffer *iobuf;
257 /* Abandon TID if we have tried too many times */
259 DBGC ( gma, "GMA %p abandoning TID %08x%08x\n",
260 gma, ntohl ( request->mad.hdr.tid[0] ),
261 ntohl ( request->mad.hdr.tid[1] ) );
262 list_del ( &request->list );
267 DBGC ( gma, "GMA %p TX TID %08x%08x (%02x,%02x,%02x,%04x)\n",
268 gma, ntohl ( request->mad.hdr.tid[0] ),
269 ntohl ( request->mad.hdr.tid[1] ), request->mad.hdr.mgmt_class,
270 request->mad.hdr.class_version, request->mad.hdr.method,
271 ntohs ( request->mad.hdr.attr_id ) );
272 DBGC2_HDA ( gma, 0, &request->mad, sizeof ( request->mad ) );
274 /* Restart retransmission timer */
275 start_timer ( timer );
277 /* Construct I/O buffer */
278 iobuf = alloc_iob ( sizeof ( request->mad ) );
280 DBGC ( gma, "GMA %p could not allocate buffer for TID "
281 "%08x%08x\n", gma, ntohl ( request->mad.hdr.tid[0] ),
282 ntohl ( request->mad.hdr.tid[1] ) );
285 memcpy ( iob_put ( iobuf, sizeof ( request->mad ) ), &request->mad,
286 sizeof ( request->mad ) );
288 /* Post send request */
289 if ( ( rc = ib_post_send ( ibdev, gma->qp, &request->av,
291 DBGC ( gma, "GMA %p could not send TID %08x%08x: %s\n",
292 gma, ntohl ( request->mad.hdr.tid[0] ),
293 ntohl ( request->mad.hdr.tid[1] ), strerror ( rc ) );
302 * @v gma General management agent
304 * @v av Destination address, or NULL for SM
305 * @ret rc Return status code
307 int ib_gma_request ( struct ib_gma *gma, union ib_mad *mad,
308 struct ib_address_vector *av ) {
309 struct ib_device *ibdev = gma->ibdev;
310 struct ib_mad_request *request;
312 /* Allocate and initialise structure */
313 request = zalloc ( sizeof ( *request ) );
315 DBGC ( gma, "GMA %p could not allocate MAD request\n", gma );
319 list_add ( &request->list, &gma->requests );
320 request->timer.expired = ib_gma_timer_expired;
322 /* Determine address vector */
324 memcpy ( &request->av, av, sizeof ( request->av ) );
326 request->av.lid = ibdev->sm_lid;
327 request->av.sl = ibdev->sm_sl;
328 request->av.qpn = IB_QPN_GMA;
329 request->av.qkey = IB_QKEY_GMA;
333 memcpy ( &request->mad, mad, sizeof ( request->mad ) );
336 request->mad.hdr.tid[0] = htonl ( IB_GMA_TID_MAGIC );
337 request->mad.hdr.tid[1] = htonl ( ++next_request_tid );
339 /* Start timer to initiate transmission */
340 start_timer_nodelay ( &request->timer );
348 * @v gma General management agent
349 * @v ibdev Infiniband device
351 * @ret rc Return status code
353 int ib_create_gma ( struct ib_gma *gma, struct ib_device *ibdev,
354 unsigned long qkey ) {
357 /* Initialise fields */
358 memset ( gma, 0, sizeof ( *gma ) );
360 INIT_LIST_HEAD ( &gma->requests );
362 /* Create completion queue */
363 gma->cq = ib_create_cq ( ibdev, IB_GMA_NUM_CQES,
364 &ib_gma_completion_ops );
366 DBGC ( gma, "GMA %p could not allocate completion queue\n",
372 /* Create queue pair */
373 gma->qp = ib_create_qp ( ibdev, IB_GMA_NUM_SEND_WQES, gma->cq,
374 IB_GMA_NUM_RECV_WQES, gma->cq, qkey );
376 DBGC ( gma, "GMA %p could not allocate queue pair\n", gma );
380 ib_qp_set_ownerdata ( gma->qp, gma );
382 DBGC ( gma, "GMA %p running on QPN %#lx\n", gma, gma->qp->qpn );
384 /* Fill receive ring */
385 ib_refill_recv ( ibdev, gma->qp );
388 ib_destroy_qp ( ibdev, gma->qp );
390 ib_destroy_cq ( ibdev, gma->cq );
398 * @v gma General management agent
400 void ib_destroy_gma ( struct ib_gma *gma ) {
401 struct ib_device *ibdev = gma->ibdev;
402 struct ib_mad_request *request;
403 struct ib_mad_request *tmp;
405 /* Flush any outstanding requests */
406 list_for_each_entry_safe ( request, tmp, &gma->requests, list ) {
407 stop_timer ( &request->timer );
408 list_del ( &request->list );
412 ib_destroy_qp ( ibdev, gma->qp );
413 ib_destroy_cq ( ibdev, gma->cq );