- Implemented kernel module parameter one_guid_per_ioc. Setting this
[mirror/scst/.git] / srpt / src / ib_srpt.c
1 /*
2  * Copyright (c) 2006 - 2009 Mellanox Technology Inc.  All rights reserved.
3  * Copyright (C) 2008 Vladislav Bolkhovitin <vst@vlnb.net>
4  * Copyright (C) 2008 - 2009 Bart Van Assche <bart.vanassche@gmail.com>
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  *
34  */
35
36 #include <linux/module.h>
37 #include <linux/init.h>
38 #include <linux/slab.h>
39 #include <linux/err.h>
40 #include <linux/ctype.h>
41 #include <linux/string.h>
42 #include <linux/kthread.h>
43 #include <asm/atomic.h>
44 #if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
45 #include <linux/proc_fs.h>
46 #include <linux/seq_file.h>
47 #endif
48 #include "ib_srpt.h"
49 #include "scst_debug.h"
50
51 /* Name of this kernel module. */
52 #define DRV_NAME                "ib_srpt"
53 /* Prefix for printk() kernel messages. */
54 #define PFX                     DRV_NAME ": "
55 #define DRV_VERSION             "1.0.1"
56 #define DRV_RELDATE             "July 10, 2008"
57 #if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
58 /* Flags to be used in SCST debug tracing statements. */
59 #define DEFAULT_SRPT_TRACE_FLAGS (TRACE_OUT_OF_MEM | TRACE_MINOR \
60                                   | TRACE_MGMT | TRACE_SPECIAL)
61 /* Name of the entry that will be created under /proc/scsi_tgt/ib_srpt. */
62 #define SRPT_PROC_TRACE_LEVEL_NAME      "trace_level"
63 #endif
64
65 #define MELLANOX_SRPT_ID_STRING "Mellanox OFED SRP target"
66
67 MODULE_AUTHOR("Vu Pham");
68 MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol target "
69                    "v" DRV_VERSION " (" DRV_RELDATE ")");
70 MODULE_LICENSE("Dual BSD/GPL");
71
72 struct srpt_thread {
73         /* Protects thread_ioctx_list. */
74         spinlock_t thread_lock;
75         /* I/O contexts to be processed by the kernel thread. */
76         struct list_head thread_ioctx_list;
77         /* SRPT kernel thread. */
78         struct task_struct *thread;
79 };
80
81 /*
82  * Global Variables
83  */
84
85 static u64 global_ioc_guid;
86 /* List of srpt_device structures. */
87 static atomic_t srpt_device_count;
88 static int thread;
89 static int one_guid_per_ioc;
90 static struct srpt_thread srpt_thread;
91 static DECLARE_WAIT_QUEUE_HEAD(ioctx_list_waitQ);
92 #if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
93 static unsigned long trace_flag = DEFAULT_SRPT_TRACE_FLAGS;
94 module_param(trace_flag, long, 0644);
95 MODULE_PARM_DESC(trace_flag,
96                  "Trace flags for the ib_srpt kernel module.");
97 #endif
98
99 module_param(thread, int, 0444);
100 MODULE_PARM_DESC(thread,
101                  "Executing ioctx in thread context. Default 0, i.e. soft IRQ, "
102                  "where possible");
103 module_param(one_guid_per_ioc, bool, 0444);
104 MODULE_PARM_DESC(one_guid_per_ioc,
105                  "Assign a unique GUID to each IOC instead of using one GUID "
106                  "for all IOCs.");
107
108 static void srpt_add_one(struct ib_device *device);
109 static void srpt_remove_one(struct ib_device *device);
110 static void srpt_unregister_mad_agent(struct srpt_device *sdev);
111 static void srpt_unregister_procfs_entry(struct scst_tgt_template *tgt);
112
113 static struct ib_client srpt_client = {
114         .name = DRV_NAME,
115         .add = srpt_add_one,
116         .remove = srpt_remove_one
117 };
118
119 /**
120  * Atomically test and set the channel state.
121  * @ch: RDMA channel.
122  * @old: channel state to compare with.
123  * @new: state to change the channel state to if the current state matches the
124  *       argument 'old'.
125  *
126  * Returns true if the channel state matched old upon entry of this function,
127  * and false otherwise.
128  */
129 static bool srpt_test_and_set_channel_state(struct srpt_rdma_ch *ch,
130                                             enum rdma_ch_state old,
131                                             enum rdma_ch_state new)
132 {
133         unsigned long flags;
134         enum rdma_ch_state cur;
135
136         spin_lock_irqsave(&ch->spinlock, flags);
137         cur = ch->state;
138         if (cur == old)
139                 ch->state = new;
140         spin_unlock_irqrestore(&ch->spinlock, flags);
141
142         return cur == old;
143 }
144
145 /*
146  * Callback function called by the InfiniBand core when an asynchronous IB
147  * event occurs. This callback may occur in interrupt context. See also
148  * section 11.5.2, Set Asynchronous Event Handler in the InfiniBand
149  * Architecture Specification.
150  */
151 static void srpt_event_handler(struct ib_event_handler *handler,
152                                struct ib_event *event)
153 {
154         struct srpt_device *sdev;
155         struct srpt_port *sport;
156
157         sdev = ib_get_client_data(event->device, &srpt_client);
158         if (!sdev || sdev->device != event->device)
159                 return;
160
161         TRACE_DBG("ASYNC event= %d on device= %s",
162                   event->event, sdev->device->name);
163
164         switch (event->event) {
165         case IB_EVENT_PORT_ERR:
166                 if (event->element.port_num <= sdev->device->phys_port_cnt) {
167                         sport = &sdev->port[event->element.port_num - 1];
168                         sport->lid = 0;
169                         sport->sm_lid = 0;
170                 }
171                 break;
172         case IB_EVENT_PORT_ACTIVE:
173         case IB_EVENT_LID_CHANGE:
174         case IB_EVENT_PKEY_CHANGE:
175         case IB_EVENT_SM_CHANGE:
176         case IB_EVENT_CLIENT_REREGISTER:
177                 /*
178                  * Refresh port data asynchronously. Note: it is safe to call
179                  * schedule_work() even if &sport->work is already on the
180                  * global workqueue because schedule_work() tests for the
181                  * work_pending() condition before adding &sport->work to the
182                  * global work queue.
183                  */
184                 if (event->element.port_num <= sdev->device->phys_port_cnt) {
185                         sport = &sdev->port[event->element.port_num - 1];
186                         if (!sport->lid && !sport->sm_lid)
187                                 schedule_work(&sport->work);
188                 }
189                 break;
190         default:
191                 break;
192         }
193
194 }
195
196 /*
197  * Callback function called by the InfiniBand core for SRQ (shared receive
198  * queue) events.
199  */
200 static void srpt_srq_event(struct ib_event *event, void *ctx)
201 {
202         TRACE_DBG("SRQ event %d", event->event);
203 }
204
205 /*
206  * Callback function called by the InfiniBand core for QP (queue pair) events.
207  */
208 static void srpt_qp_event(struct ib_event *event, void *ctx)
209 {
210         struct srpt_rdma_ch *ch = ctx;
211
212         TRACE_DBG("QP event %d on cm_id=%p sess_name=%s state=%d",
213                   event->event, ch->cm_id, ch->sess_name, ch->state);
214
215         switch (event->event) {
216         case IB_EVENT_COMM_EST:
217 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20) || defined(BACKPORT_LINUX_WORKQUEUE_TO_2_6_19)
218                 ib_cm_notify(ch->cm_id, event->event);
219 #else
220                 /* Vanilla 2.6.19 kernel (or before) without OFED. */
221                 printk(KERN_ERR PFX "how to perform ib_cm_notify() on a"
222                         " vanilla 2.6.18 kernel ???\n");
223 #endif
224                 break;
225         case IB_EVENT_QP_LAST_WQE_REACHED:
226                 if (srpt_test_and_set_channel_state(ch, RDMA_CHANNEL_LIVE,
227                                         RDMA_CHANNEL_DISCONNECTING)) {
228                         TRACE_DBG("%s", "Disconnecting channel.");
229                         ib_send_cm_dreq(ch->cm_id, NULL, 0);
230                 }
231                 break;
232         default:
233                 break;
234         }
235 }
236
237 /*
238  * Helper function for filling in an InfiniBand IOUnitInfo structure. Copies
239  * the lowest four bits of value in element slot of the array of four bit
240  * elements called c_list (controller list). The index slot is one-based.
241  *
242  * @pre 1 <= slot && 0 <= value && value < 16
243  */
244 static void srpt_set_ioc(u8 *c_list, u32 slot, u8 value)
245 {
246         u16 id;
247         u8 tmp;
248
249         id = (slot - 1) / 2;
250         if (slot & 0x1) {
251                 tmp = c_list[id] & 0xf;
252                 c_list[id] = (value << 4) | tmp;
253         } else {
254                 tmp = c_list[id] & 0xf0;
255                 c_list[id] = (value & 0xf) | tmp;
256         }
257 }
258
259 /*
260  * Write InfiniBand ClassPortInfo to mad. See also section 16.3.3.1
261  * ClassPortInfo in the InfiniBand Architecture Specification.
262  */
263 static void srpt_get_class_port_info(struct ib_dm_mad *mad)
264 {
265         struct ib_class_port_info *cif;
266
267         cif = (struct ib_class_port_info *)mad->data;
268         memset(cif, 0, sizeof *cif);
269         cif->base_version = 1;
270         cif->class_version = 1;
271         cif->resp_time_value = 20;
272
273         mad->mad_hdr.status = 0;
274 }
275
276 /*
277  * Write IOUnitInfo to mad. See also section 16.3.3.3 IOUnitInfo in the
278  * InfiniBand Architecture Specification. See also section B.7,
279  * table B.6 in the T10 SRP r16a document.
280  */
281 static void srpt_get_iou(struct ib_dm_mad *mad)
282 {
283         struct ib_dm_iou_info *ioui;
284         u8 slot;
285         int i;
286
287         ioui = (struct ib_dm_iou_info *)mad->data;
288         ioui->change_id = 1;
289         ioui->max_controllers = 16;
290
291         /* set present for slot 1 and empty for the rest */
292         srpt_set_ioc(ioui->controller_list, 1, 1);
293         for (i = 1, slot = 2; i < 16; i++, slot++)
294                 srpt_set_ioc(ioui->controller_list, slot, 0);
295
296         mad->mad_hdr.status = 0;
297 }
298
299 /**
300  * Return the GUID that will be communicated to the initiator for identifying
301  * the SRPT target. Depending on the mode variable one_guid_per_ioc, either the
302  * GUID of the specified IOC is returned or the GUID of the first IOC.
303  */
304 static u64 srpt_get_ioc_guid(struct ib_device *device)
305 {
306         BUG_ON(!device);
307         WARN_ON(!global_ioc_guid);
308         WARN_ON(!device->node_guid);
309
310         return one_guid_per_ioc ? device->node_guid : global_ioc_guid;
311 }
312
313 /*
314  * Write IOControllerprofile to mad for I/O controller (sdev, slot). See also
315  * section 16.3.3.4 IOControllerProfile in the InfiniBand Architecture
316  * Specification. See also section B.7, table B.7 in the T10 SRP r16a
317  * document.
318  */
319 static void srpt_get_ioc(struct srpt_device *sdev, u32 slot,
320                          struct ib_dm_mad *mad)
321 {
322         struct ib_dm_ioc_profile *iocp;
323
324         iocp = (struct ib_dm_ioc_profile *)mad->data;
325
326         if (!slot || slot > 16) {
327                 mad->mad_hdr.status = cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD);
328                 return;
329         }
330
331         if (slot > 2) {
332                 mad->mad_hdr.status = cpu_to_be16(DM_MAD_STATUS_NO_IOC);
333                 return;
334         }
335
336         memset(iocp, 0, sizeof *iocp);
337         strcpy(iocp->id_string, MELLANOX_SRPT_ID_STRING);
338         iocp->guid = cpu_to_be64(srpt_get_ioc_guid(sdev->device));
339         iocp->vendor_id = cpu_to_be32(sdev->dev_attr.vendor_id);
340         iocp->device_id = cpu_to_be32(sdev->dev_attr.vendor_part_id);
341         iocp->device_version = cpu_to_be16(sdev->dev_attr.hw_ver);
342         iocp->subsys_vendor_id = cpu_to_be32(sdev->dev_attr.vendor_id);
343         iocp->subsys_device_id = 0x0;
344         iocp->io_class = cpu_to_be16(SRP_REV16A_IB_IO_CLASS);
345         iocp->io_subclass = cpu_to_be16(SRP_IO_SUBCLASS);
346         iocp->protocol = cpu_to_be16(SRP_PROTOCOL);
347         iocp->protocol_version = cpu_to_be16(SRP_PROTOCOL_VERSION);
348         iocp->send_queue_depth = cpu_to_be16(SRPT_SRQ_SIZE);
349         iocp->rdma_read_depth = 4;
350         iocp->send_size = cpu_to_be32(MAX_MESSAGE_SIZE);
351         iocp->rdma_size = cpu_to_be32(MAX_RDMA_SIZE);
352         iocp->num_svc_entries = 1;
353         iocp->op_cap_mask = SRP_SEND_TO_IOC | SRP_SEND_FROM_IOC |
354                 SRP_RDMA_READ_FROM_IOC | SRP_RDMA_WRITE_FROM_IOC;
355
356         mad->mad_hdr.status = 0;
357 }
358
359 /*
360  * Device management: write ServiceEntries to mad for the given slot. See also
361  * section 16.3.3.5 ServiceEntries in the InfiniBand Architecture
362  * Specification. See also section B.7, table B.8 in the T10 SRP r16a document.
363  */
364 static void srpt_get_svc_entries(u64 ioc_guid,
365                                  u16 slot, u8 hi, u8 lo, struct ib_dm_mad *mad)
366 {
367         struct ib_dm_svc_entries *svc_entries;
368
369         WARN_ON(!ioc_guid);
370
371         if (!slot || slot > 16) {
372                 mad->mad_hdr.status = cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD);
373                 return;
374         }
375
376         if (slot > 2 || lo > hi || hi > 1) {
377                 mad->mad_hdr.status = cpu_to_be16(DM_MAD_STATUS_NO_IOC);
378                 return;
379         }
380
381         svc_entries = (struct ib_dm_svc_entries *)mad->data;
382         memset(svc_entries, 0, sizeof *svc_entries);
383         svc_entries->service_entries[0].id = cpu_to_be64(ioc_guid);
384         snprintf(svc_entries->service_entries[0].name,
385                  sizeof(svc_entries->service_entries[0].name),
386                  "%s%016llx",
387                  SRP_SERVICE_NAME_PREFIX,
388                  (unsigned long long)ioc_guid);
389
390         mad->mad_hdr.status = 0;
391 }
392
393 /*
394  * Actual processing of a received MAD *rq_mad received through source port *sp
395  * (MAD = InfiniBand management datagram). The response to be sent back is
396  * written to *rsp_mad.
397  */
398 static void srpt_mgmt_method_get(struct srpt_port *sp, struct ib_mad *rq_mad,
399                                  struct ib_dm_mad *rsp_mad)
400 {
401         u16 attr_id;
402         u32 slot;
403         u8 hi, lo;
404
405         attr_id = be16_to_cpu(rq_mad->mad_hdr.attr_id);
406         switch (attr_id) {
407         case DM_ATTR_CLASS_PORT_INFO:
408                 srpt_get_class_port_info(rsp_mad);
409                 break;
410         case DM_ATTR_IOU_INFO:
411                 srpt_get_iou(rsp_mad);
412                 break;
413         case DM_ATTR_IOC_PROFILE:
414                 slot = be32_to_cpu(rq_mad->mad_hdr.attr_mod);
415                 srpt_get_ioc(sp->sdev, slot, rsp_mad);
416                 break;
417         case DM_ATTR_SVC_ENTRIES:
418                 slot = be32_to_cpu(rq_mad->mad_hdr.attr_mod);
419                 hi = (u8) ((slot >> 8) & 0xff);
420                 lo = (u8) (slot & 0xff);
421                 slot = (u16) ((slot >> 16) & 0xffff);
422                 srpt_get_svc_entries(srpt_get_ioc_guid(sp->sdev->device),
423                                      slot, hi, lo, rsp_mad);
424                 break;
425         default:
426                 rsp_mad->mad_hdr.status =
427                     cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR);
428                 break;
429         }
430 }
431
432 /*
433  * Callback function that is called by the InfiniBand core after transmission of
434  * a MAD. (MAD = management datagram; AH = address handle.)
435  */
436 static void srpt_mad_send_handler(struct ib_mad_agent *mad_agent,
437                                   struct ib_mad_send_wc *mad_wc)
438 {
439         ib_destroy_ah(mad_wc->send_buf->ah);
440         ib_free_send_mad(mad_wc->send_buf);
441 }
442
443 /*
444  * Callback function that is called by the InfiniBand core after reception of
445  * a MAD (management datagram).
446  */
447 static void srpt_mad_recv_handler(struct ib_mad_agent *mad_agent,
448                                   struct ib_mad_recv_wc *mad_wc)
449 {
450         struct srpt_port *sport = (struct srpt_port *)mad_agent->context;
451         struct ib_ah *ah;
452         struct ib_mad_send_buf *rsp;
453         struct ib_dm_mad *dm_mad;
454
455         if (!mad_wc || !mad_wc->recv_buf.mad)
456                 return;
457
458         ah = ib_create_ah_from_wc(mad_agent->qp->pd, mad_wc->wc,
459                                   mad_wc->recv_buf.grh, mad_agent->port_num);
460         if (IS_ERR(ah))
461                 goto err;
462
463         BUILD_BUG_ON(offsetof(struct ib_dm_mad, data) != IB_MGMT_DEVICE_HDR);
464
465         rsp = ib_create_send_mad(mad_agent, mad_wc->wc->src_qp,
466                                  mad_wc->wc->pkey_index, 0,
467                                  IB_MGMT_DEVICE_HDR, IB_MGMT_DEVICE_DATA,
468                                  GFP_KERNEL);
469         if (IS_ERR(rsp))
470                 goto err_rsp;
471
472         rsp->ah = ah;
473
474         dm_mad = rsp->mad;
475         memcpy(dm_mad, mad_wc->recv_buf.mad, sizeof *dm_mad);
476         dm_mad->mad_hdr.method = IB_MGMT_METHOD_GET_RESP;
477         dm_mad->mad_hdr.status = 0;
478
479         switch (mad_wc->recv_buf.mad->mad_hdr.method) {
480         case IB_MGMT_METHOD_GET:
481                 srpt_mgmt_method_get(sport, mad_wc->recv_buf.mad, dm_mad);
482                 break;
483         case IB_MGMT_METHOD_SET:
484                 dm_mad->mad_hdr.status =
485                     cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR);
486                 break;
487         default:
488                 dm_mad->mad_hdr.status =
489                     cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD);
490                 break;
491         }
492
493         if (!ib_post_send_mad(rsp, NULL)) {
494                 ib_free_recv_mad(mad_wc);
495                 /* will destroy_ah & free_send_mad in send completion */
496                 return;
497         }
498
499         ib_free_send_mad(rsp);
500
501 err_rsp:
502         ib_destroy_ah(ah);
503 err:
504         ib_free_recv_mad(mad_wc);
505 }
506
507 /*
508  * Enable InfiniBand management datagram processing, update the cached sm_lid,
509  * lid and gid values, and register a callback function for processing MADs
510  * on the specified port. It is safe to call this function more than once for
511  * the same port.
512  */
513 static int srpt_refresh_port(struct srpt_port *sport)
514 {
515         struct ib_mad_reg_req reg_req;
516         struct ib_port_modify port_modify;
517         struct ib_port_attr port_attr;
518         int ret;
519
520         memset(&port_modify, 0, sizeof port_modify);
521         port_modify.set_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP;
522         port_modify.clr_port_cap_mask = 0;
523
524         ret = ib_modify_port(sport->sdev->device, sport->port, 0, &port_modify);
525         if (ret)
526                 goto err_mod_port;
527
528         ret = ib_query_port(sport->sdev->device, sport->port, &port_attr);
529         if (ret)
530                 goto err_query_port;
531
532         sport->sm_lid = port_attr.sm_lid;
533         sport->lid = port_attr.lid;
534
535         ret = ib_query_gid(sport->sdev->device, sport->port, 0, &sport->gid);
536         if (ret)
537                 goto err_query_port;
538
539         if (!sport->mad_agent) {
540                 memset(&reg_req, 0, sizeof reg_req);
541                 reg_req.mgmt_class = IB_MGMT_CLASS_DEVICE_MGMT;
542                 reg_req.mgmt_class_version = IB_MGMT_BASE_VERSION;
543                 set_bit(IB_MGMT_METHOD_GET, reg_req.method_mask);
544                 set_bit(IB_MGMT_METHOD_SET, reg_req.method_mask);
545
546                 sport->mad_agent = ib_register_mad_agent(sport->sdev->device,
547                                                          sport->port,
548                                                          IB_QPT_GSI,
549                                                          &reg_req, 0,
550                                                          srpt_mad_send_handler,
551                                                          srpt_mad_recv_handler,
552                                                          sport);
553                 if (IS_ERR(sport->mad_agent)) {
554                         ret = PTR_ERR(sport->mad_agent);
555                         sport->mad_agent = NULL;
556                         goto err_query_port;
557                 }
558         }
559
560         return 0;
561
562 err_query_port:
563
564         port_modify.set_port_cap_mask = 0;
565         port_modify.clr_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP;
566         ib_modify_port(sport->sdev->device, sport->port, 0, &port_modify);
567
568 err_mod_port:
569
570         return ret;
571 }
572
573 /*
574  * Unregister the callback function for processing MADs and disable MAD
575  * processing for all ports of the specified device. It is safe to call this
576  * function more than once for the same device.
577  */
578 static void srpt_unregister_mad_agent(struct srpt_device *sdev)
579 {
580         struct ib_port_modify port_modify = {
581                 .clr_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP,
582         };
583         struct srpt_port *sport;
584         int i;
585
586         for (i = 1; i <= sdev->device->phys_port_cnt; i++) {
587                 sport = &sdev->port[i - 1];
588                 WARN_ON(sport->port != i);
589                 if (ib_modify_port(sdev->device, i, 0, &port_modify) < 0)
590                         printk(KERN_ERR PFX "disabling MAD processing"
591                                " failed.\n");
592                 if (sport->mad_agent) {
593                         ib_unregister_mad_agent(sport->mad_agent);
594                         sport->mad_agent = NULL;
595                 }
596         }
597 }
598
599 /*
600  * Allocate and initialize an SRPT I/O context structure.
601  */
602 static struct srpt_ioctx *srpt_alloc_ioctx(struct srpt_device *sdev)
603 {
604         struct srpt_ioctx *ioctx;
605
606         ioctx = kmalloc(sizeof *ioctx, GFP_KERNEL);
607         if (!ioctx)
608                 goto out;
609
610         ioctx->buf = kzalloc(MAX_MESSAGE_SIZE, GFP_KERNEL);
611         if (!ioctx->buf)
612                 goto out_free_ioctx;
613
614         ioctx->dma = dma_map_single(sdev->device->dma_device, ioctx->buf,
615                                     MAX_MESSAGE_SIZE, DMA_BIDIRECTIONAL);
616 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
617         if (dma_mapping_error(sdev->device->dma_device, ioctx->dma))
618 #else
619         if (dma_mapping_error(ioctx->dma))
620 #endif
621                 goto out_free_buf;
622
623         return ioctx;
624
625 out_free_buf:
626         kfree(ioctx->buf);
627 out_free_ioctx:
628         kfree(ioctx);
629 out:
630         return NULL;
631 }
632
633 /*
634  * Deallocate an SRPT I/O context structure.
635  */
636 static void srpt_free_ioctx(struct srpt_device *sdev, struct srpt_ioctx *ioctx)
637 {
638         if (!ioctx)
639                 return;
640
641         dma_unmap_single(sdev->device->dma_device, ioctx->dma,
642                          MAX_MESSAGE_SIZE, DMA_BIDIRECTIONAL);
643         kfree(ioctx->buf);
644         kfree(ioctx);
645 }
646
647 /*
648  * Associate a ring of SRPT I/O context structures with the specified device.
649  */
650 static int srpt_alloc_ioctx_ring(struct srpt_device *sdev)
651 {
652         int i;
653
654         for (i = 0; i < SRPT_SRQ_SIZE; ++i) {
655                 sdev->ioctx_ring[i] = srpt_alloc_ioctx(sdev);
656
657                 if (!sdev->ioctx_ring[i])
658                         goto err;
659
660                 sdev->ioctx_ring[i]->index = i;
661         }
662
663         return 0;
664
665 err:
666         while (--i > 0) {
667                 srpt_free_ioctx(sdev, sdev->ioctx_ring[i]);
668                 sdev->ioctx_ring[i] = NULL;
669         }
670         return -ENOMEM;
671 }
672
673 /* Free the ring of SRPT I/O context structures. */
674 static void srpt_free_ioctx_ring(struct srpt_device *sdev)
675 {
676         int i;
677
678         for (i = 0; i < SRPT_SRQ_SIZE; ++i) {
679                 srpt_free_ioctx(sdev, sdev->ioctx_ring[i]);
680                 sdev->ioctx_ring[i] = NULL;
681         }
682 }
683
684 /*
685  * Post a receive request on the work queue of InfiniBand device 'sdev'.
686  */
687 static int srpt_post_recv(struct srpt_device *sdev, struct srpt_ioctx *ioctx)
688 {
689         struct ib_sge list;
690         struct ib_recv_wr wr, *bad_wr;
691
692         wr.wr_id = ioctx->index | SRPT_OP_RECV;
693
694         list.addr = ioctx->dma;
695         list.length = MAX_MESSAGE_SIZE;
696         list.lkey = sdev->mr->lkey;
697
698         wr.next = NULL;
699         wr.sg_list = &list;
700         wr.num_sge = 1;
701
702         return ib_post_srq_recv(sdev->srq, &wr, &bad_wr);
703 }
704
705 /*
706  * Post an IB send request.
707  * @ch: RDMA channel to post the send request on.
708  * @ioctx: I/O context of the send request.
709  * @len: length of the request to be sent in bytes.
710  *
711  * Returns zero upon success and a non-zero value upon failure.
712  */
713 static int srpt_post_send(struct srpt_rdma_ch *ch, struct srpt_ioctx *ioctx,
714                           int len)
715 {
716         struct ib_sge list;
717         struct ib_send_wr wr, *bad_wr;
718         struct srpt_device *sdev = ch->sport->sdev;
719
720         dma_sync_single_for_device(sdev->device->dma_device, ioctx->dma,
721                                    MAX_MESSAGE_SIZE, DMA_TO_DEVICE);
722
723         list.addr = ioctx->dma;
724         list.length = len;
725         list.lkey = sdev->mr->lkey;
726
727         wr.next = NULL;
728         wr.wr_id = ioctx->index;
729         wr.sg_list = &list;
730         wr.num_sge = 1;
731         wr.opcode = IB_WR_SEND;
732         wr.send_flags = IB_SEND_SIGNALED;
733
734         return ib_post_send(ch->qp, &wr, &bad_wr);
735 }
736
737 static int srpt_get_desc_tbl(struct srpt_ioctx *ioctx, struct srp_cmd *srp_cmd,
738                              int *ind)
739 {
740         struct srp_indirect_buf *idb;
741         struct srp_direct_buf *db;
742
743         *ind = 0;
744         if (((srp_cmd->buf_fmt & 0xf) == SRP_DATA_DESC_DIRECT) ||
745             ((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_DIRECT)) {
746                 ioctx->n_rbuf = 1;
747                 ioctx->rbufs = &ioctx->single_rbuf;
748
749                 db = (void *)srp_cmd->add_data;
750                 memcpy(ioctx->rbufs, db, sizeof *db);
751                 ioctx->data_len = be32_to_cpu(db->len);
752         } else {
753                 idb = (void *)srp_cmd->add_data;
754
755                 ioctx->n_rbuf = be32_to_cpu(idb->table_desc.len) / sizeof *db;
756
757                 if (ioctx->n_rbuf >
758                     (srp_cmd->data_out_desc_cnt + srp_cmd->data_in_desc_cnt)) {
759                         *ind = 1;
760                         ioctx->n_rbuf = 0;
761                         goto out;
762                 }
763
764                 if (ioctx->n_rbuf == 1)
765                         ioctx->rbufs = &ioctx->single_rbuf;
766                 else
767                         ioctx->rbufs =
768                                 kmalloc(ioctx->n_rbuf * sizeof *db, GFP_ATOMIC);
769                 if (!ioctx->rbufs) {
770                         ioctx->n_rbuf = 0;
771                         return -ENOMEM;
772                 }
773
774                 db = idb->desc_list;
775                 memcpy(ioctx->rbufs, db, ioctx->n_rbuf * sizeof *db);
776                 ioctx->data_len = be32_to_cpu(idb->len);
777         }
778 out:
779         return 0;
780 }
781
782 /*
783  * Modify the attributes of queue pair 'qp': allow local write, remote read,
784  * and remote write. Also transition 'qp' to state IB_QPS_INIT.
785  */
786 static int srpt_init_ch_qp(struct srpt_rdma_ch *ch, struct ib_qp *qp)
787 {
788         struct ib_qp_attr *attr;
789         int ret;
790
791         attr = kzalloc(sizeof *attr, GFP_KERNEL);
792         if (!attr)
793                 return -ENOMEM;
794
795         attr->qp_state = IB_QPS_INIT;
796         attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_READ |
797             IB_ACCESS_REMOTE_WRITE;
798         attr->port_num = ch->sport->port;
799         attr->pkey_index = 0;
800
801         ret = ib_modify_qp(qp, attr,
802                            IB_QP_STATE | IB_QP_ACCESS_FLAGS | IB_QP_PORT |
803                            IB_QP_PKEY_INDEX);
804
805         kfree(attr);
806         return ret;
807 }
808
809 /**
810  * Change the state of a channel to 'ready to receive' (RTR).
811  * @ch: channel of the queue pair.
812  * @qp: queue pair to change the state of.
813  *
814  * Returns zero upon success and a negative value upon failure.
815  */
816 static int srpt_ch_qp_rtr(struct srpt_rdma_ch *ch, struct ib_qp *qp)
817 {
818         struct ib_qp_attr qp_attr;
819         int attr_mask;
820         int ret;
821
822         qp_attr.qp_state = IB_QPS_RTR;
823         ret = ib_cm_init_qp_attr(ch->cm_id, &qp_attr, &attr_mask);
824         if (ret)
825                 goto out;
826
827         qp_attr.max_dest_rd_atomic = 4;
828
829         ret = ib_modify_qp(qp, &qp_attr, attr_mask);
830
831 out:
832         return ret;
833 }
834
835 /**
836  * Change the state of a channel to 'ready to send' (RTS).
837  * @ch: channel of the queue pair.
838  * @qp: queue pair to change the state of.
839  *
840  * Returns zero upon success and a negative value upon failure.
841  */
842 static int srpt_ch_qp_rts(struct srpt_rdma_ch *ch, struct ib_qp *qp)
843 {
844         struct ib_qp_attr qp_attr;
845         int attr_mask;
846         int ret;
847
848         qp_attr.qp_state = IB_QPS_RTS;
849         ret = ib_cm_init_qp_attr(ch->cm_id, &qp_attr, &attr_mask);
850         if (ret)
851                 goto out;
852
853         qp_attr.max_rd_atomic = 4;
854
855         ret = ib_modify_qp(qp, &qp_attr, attr_mask);
856
857 out:
858         return ret;
859 }
860
861 static void srpt_reset_ioctx(struct srpt_rdma_ch *ch, struct srpt_ioctx *ioctx)
862 {
863         int i;
864
865         if (ioctx->n_rdma_ius > 0 && ioctx->rdma_ius) {
866                 struct rdma_iu *riu = ioctx->rdma_ius;
867
868                 for (i = 0; i < ioctx->n_rdma_ius; ++i, ++riu)
869                         kfree(riu->sge);
870                 kfree(ioctx->rdma_ius);
871         }
872
873         if (ioctx->n_rbuf > 1)
874                 kfree(ioctx->rbufs);
875
876         if (srpt_post_recv(ch->sport->sdev, ioctx))
877                 printk(KERN_ERR PFX "SRQ post_recv failed - this is serious\n");
878                 /* we should queue it back to free_ioctx queue */
879         else
880                 atomic_inc(&ch->req_lim_delta);
881 }
882
883 static void srpt_abort_scst_cmd(struct srpt_device *sdev,
884                                 struct scst_cmd *scmnd,
885                                 bool tell_initiator)
886 {
887         struct srpt_ioctx *ioctx;
888         scst_data_direction dir;
889
890         ioctx = scst_cmd_get_tgt_priv(scmnd);
891         BUG_ON(!ioctx);
892         dir = scst_cmd_get_data_direction(scmnd);
893         if (dir != SCST_DATA_NONE) {
894                 dma_unmap_sg(sdev->device->dma_device,
895                              scst_cmd_get_sg(scmnd),
896                              scst_cmd_get_sg_cnt(scmnd),
897                              scst_to_tgt_dma_dir(dir));
898
899 #if 1
900                 switch (scmnd->state) {
901                 case SCST_CMD_STATE_DATA_WAIT:
902                         WARN_ON(ioctx->state != SRPT_STATE_NEED_DATA);
903                         break;
904                 case SCST_CMD_STATE_XMIT_WAIT:
905                         WARN_ON(ioctx->state != SRPT_STATE_PROCESSED);
906                         break;
907                 default:
908                         WARN_ON(ioctx->state == SRPT_STATE_NEED_DATA ||
909                                 ioctx->state == SRPT_STATE_PROCESSED);
910                 }
911 #endif
912
913                 if (ioctx->state == SRPT_STATE_NEED_DATA) {
914                         scst_rx_data(scmnd,
915                                      tell_initiator ? SCST_RX_STATUS_ERROR
916                                      : SCST_RX_STATUS_ERROR_FATAL,
917                                      SCST_CONTEXT_THREAD);
918                         goto out;
919                 } else if (ioctx->state == SRPT_STATE_PROCESSED)
920                         ;
921                 else {
922                         printk(KERN_ERR PFX
923                                "unexpected cmd state %d (SCST) %d (SRPT)\n",
924                                scmnd->state, ioctx->state);
925                         WARN_ON("unexpected cmd state");
926                 }
927         }
928
929         scst_set_delivery_status(scmnd, SCST_CMD_DELIVERY_FAILED);
930         scst_tgt_cmd_done(scmnd, scst_estimate_context());
931 out:
932         return;
933 }
934
935 static void srpt_handle_err_comp(struct srpt_rdma_ch *ch, struct ib_wc *wc)
936 {
937         struct srpt_ioctx *ioctx;
938         struct srpt_device *sdev = ch->sport->sdev;
939
940         if (wc->wr_id & SRPT_OP_RECV) {
941                 ioctx = sdev->ioctx_ring[wc->wr_id & ~SRPT_OP_RECV];
942                 printk(KERN_ERR PFX "This is serious - SRQ is in bad state\n");
943         } else {
944                 ioctx = sdev->ioctx_ring[wc->wr_id];
945
946                 if (ioctx->scmnd)
947                         srpt_abort_scst_cmd(sdev, ioctx->scmnd, true);
948                 else
949                         srpt_reset_ioctx(ch, ioctx);
950         }
951 }
952
953 static void srpt_handle_send_comp(struct srpt_rdma_ch *ch,
954                                   struct srpt_ioctx *ioctx,
955                                   enum scst_exec_context context)
956 {
957         if (ioctx->scmnd) {
958                 scst_data_direction dir =
959                         scst_cmd_get_data_direction(ioctx->scmnd);
960
961                 if (dir != SCST_DATA_NONE)
962                         dma_unmap_sg(ch->sport->sdev->device->dma_device,
963                                      scst_cmd_get_sg(ioctx->scmnd),
964                                      scst_cmd_get_sg_cnt(ioctx->scmnd),
965                                      scst_to_tgt_dma_dir(dir));
966
967                 scst_tgt_cmd_done(ioctx->scmnd, context);
968         } else
969                 srpt_reset_ioctx(ch, ioctx);
970 }
971
972 static void srpt_handle_rdma_comp(struct srpt_rdma_ch *ch,
973                                   struct srpt_ioctx *ioctx)
974 {
975         if (!ioctx->scmnd) {
976                 srpt_reset_ioctx(ch, ioctx);
977                 return;
978         }
979
980         if (scst_cmd_get_data_direction(ioctx->scmnd) == SCST_DATA_WRITE)
981                 scst_rx_data(ioctx->scmnd, SCST_RX_STATUS_SUCCESS,
982                         scst_estimate_context());
983 }
984
985 /**
986  * Build an SRP_RSP response PDU.
987  * @ch: RDMA channel through which the request has been received.
988  * @ioctx: I/O context in which the SRP_RSP PDU will be built.
989  * @s_key: sense key that will be stored in the response.
990  * @s_code: value that will be stored in the asc_ascq field of the sense data.
991  * @tag: tag of the request for which this response is being generated.
992  *
993  * Returns the size in bytes of the SRP_RSP response PDU.
994  *
995  * An SRP_RSP PDU contains a SCSI status or service response. See also
996  * section 6.9 in the T10 SRP r16a document for the format of an SRP_RSP PDU.
997  * See also SPC-2 for more information about sense data.
998  */
999 static int srpt_build_cmd_rsp(struct srpt_rdma_ch *ch,
1000                               struct srpt_ioctx *ioctx, u8 s_key, u8 s_code,
1001                               u64 tag)
1002 {
1003         struct srp_rsp *srp_rsp;
1004         struct sense_data *sense;
1005         int limit_delta;
1006         int sense_data_len = 0;
1007
1008         srp_rsp = ioctx->buf;
1009         memset(srp_rsp, 0, sizeof *srp_rsp);
1010
1011         limit_delta = atomic_read(&ch->req_lim_delta);
1012         atomic_sub(limit_delta, &ch->req_lim_delta);
1013
1014         srp_rsp->opcode = SRP_RSP;
1015         srp_rsp->req_lim_delta = cpu_to_be32(limit_delta);
1016         srp_rsp->tag = tag;
1017
1018         if (s_key != NO_SENSE) {
1019                 sense_data_len = sizeof *sense + (sizeof *sense % 4);
1020                 srp_rsp->flags |= SRP_RSP_FLAG_SNSVALID;
1021                 srp_rsp->status = SAM_STAT_CHECK_CONDITION;
1022                 srp_rsp->sense_data_len = cpu_to_be32(sense_data_len);
1023
1024                 sense = (struct sense_data *)(srp_rsp + 1);
1025                 sense->err_code = 0x70;
1026                 sense->key = s_key;
1027                 sense->asc_ascq = s_code;
1028         }
1029
1030         return sizeof(*srp_rsp) + sense_data_len;
1031 }
1032
1033 /**
1034  * Build a task management response, which is a specific SRP_RSP response PDU.
1035  * @ch: RDMA channel through which the request has been received.
1036  * @ioctx: I/O context in which the SRP_RSP PDU will be built.
1037  * @rsp_code: RSP_CODE that will be stored in the response.
1038  * @tag: tag of the request for which this response is being generated.
1039  *
1040  * Returns the size in bytes of the SRP_RSP response PDU.
1041  *
1042  * An SRP_RSP PDU contains a SCSI status or service response. See also
1043  * section 6.9 in the T10 SRP r16a document for the format of an SRP_RSP PDU.
1044  */
1045 static int srpt_build_tskmgmt_rsp(struct srpt_rdma_ch *ch,
1046                                   struct srpt_ioctx *ioctx, u8 rsp_code,
1047                                   u64 tag)
1048 {
1049         struct srp_rsp *srp_rsp;
1050         int limit_delta;
1051         int resp_data_len = 0;
1052
1053         dma_sync_single_for_cpu(ch->sport->sdev->device->dma_device, ioctx->dma,
1054                                 MAX_MESSAGE_SIZE, DMA_TO_DEVICE);
1055
1056         srp_rsp = ioctx->buf;
1057         memset(srp_rsp, 0, sizeof *srp_rsp);
1058
1059         limit_delta = atomic_read(&ch->req_lim_delta);
1060         atomic_sub(limit_delta, &ch->req_lim_delta);
1061
1062         srp_rsp->opcode = SRP_RSP;
1063         srp_rsp->req_lim_delta = cpu_to_be32(limit_delta);
1064         srp_rsp->tag = tag;
1065
1066         if (rsp_code != SRP_TSK_MGMT_SUCCESS) {
1067                 resp_data_len = 4;
1068                 srp_rsp->flags |= SRP_RSP_FLAG_RSPVALID;
1069                 srp_rsp->resp_data_len = cpu_to_be32(resp_data_len);
1070                 srp_rsp->data[3] = rsp_code;
1071         }
1072
1073         return sizeof(*srp_rsp) + resp_data_len;
1074 }
1075
1076 /*
1077  * Process SRP_CMD.
1078  */
1079 static int srpt_handle_cmd(struct srpt_rdma_ch *ch, struct srpt_ioctx *ioctx)
1080 {
1081         struct scst_cmd *scmnd;
1082         struct srp_cmd *srp_cmd;
1083         struct srp_rsp *srp_rsp;
1084         scst_data_direction dir = SCST_DATA_NONE;
1085         int indirect_desc = 0;
1086         int ret;
1087         unsigned long flags;
1088
1089         srp_cmd = ioctx->buf;
1090         srp_rsp = ioctx->buf;
1091
1092         if (srp_cmd->buf_fmt) {
1093                 ret = srpt_get_desc_tbl(ioctx, srp_cmd, &indirect_desc);
1094                 if (ret) {
1095                         srpt_build_cmd_rsp(ch, ioctx, NO_SENSE,
1096                                            NO_ADD_SENSE, srp_cmd->tag);
1097                         srp_rsp->status = SAM_STAT_TASK_SET_FULL;
1098                         goto err;
1099                 }
1100
1101                 if (indirect_desc) {
1102                         srpt_build_cmd_rsp(ch, ioctx, NO_SENSE,
1103                                            NO_ADD_SENSE, srp_cmd->tag);
1104                         srp_rsp->status = SAM_STAT_TASK_SET_FULL;
1105                         goto err;
1106                 }
1107
1108                 if (srp_cmd->buf_fmt & 0xf)
1109                         dir = SCST_DATA_READ;
1110                 else if (srp_cmd->buf_fmt >> 4)
1111                         dir = SCST_DATA_WRITE;
1112                 else
1113                         dir = SCST_DATA_NONE;
1114         } else
1115                 dir = SCST_DATA_NONE;
1116
1117         scmnd = scst_rx_cmd(ch->scst_sess, (u8 *) &srp_cmd->lun,
1118                             sizeof srp_cmd->lun, srp_cmd->cdb, 16,
1119                             thread ? SCST_NON_ATOMIC : SCST_ATOMIC);
1120         if (!scmnd) {
1121                 srpt_build_cmd_rsp(ch, ioctx, NO_SENSE,
1122                                    NO_ADD_SENSE, srp_cmd->tag);
1123                 srp_rsp->status = SAM_STAT_TASK_SET_FULL;
1124                 goto err;
1125         }
1126
1127         ioctx->scmnd = scmnd;
1128
1129         switch (srp_cmd->task_attr) {
1130         case SRP_CMD_HEAD_OF_Q:
1131                 scmnd->queue_type = SCST_CMD_QUEUE_HEAD_OF_QUEUE;
1132                 break;
1133         case SRP_CMD_ORDERED_Q:
1134                 scmnd->queue_type = SCST_CMD_QUEUE_ORDERED;
1135                 break;
1136         case SRP_CMD_SIMPLE_Q:
1137                 scmnd->queue_type = SCST_CMD_QUEUE_SIMPLE;
1138                 break;
1139         case SRP_CMD_ACA:
1140                 scmnd->queue_type = SCST_CMD_QUEUE_ACA;
1141                 break;
1142         default:
1143                 scmnd->queue_type = SCST_CMD_QUEUE_ORDERED;
1144                 break;
1145         }
1146
1147         scst_cmd_set_tag(scmnd, srp_cmd->tag);
1148         scst_cmd_set_tgt_priv(scmnd, ioctx);
1149         scst_cmd_set_expected(scmnd, dir, ioctx->data_len);
1150
1151         spin_lock_irqsave(&ch->spinlock, flags);
1152         list_add_tail(&ioctx->scmnd_list, &ch->active_scmnd_list);
1153         ch->active_scmnd_cnt++;
1154         spin_unlock_irqrestore(&ch->spinlock, flags);
1155
1156         scst_cmd_init_done(scmnd, scst_estimate_context());
1157
1158         return 0;
1159
1160 err:
1161         WARN_ON(srp_rsp->opcode != SRP_RSP);
1162
1163         return -1;
1164 }
1165
1166 /*
1167  * Process an SRP_TSK_MGMT request PDU.
1168  *
1169  * Returns 0 upon success and -1 upon failure.
1170  *
1171  * Each task management function is performed by calling one of the
1172  * scst_rx_mgmt_fn*() functions. These functions will either report failure
1173  * or process the task management function asynchronously. The function
1174  * srpt_tsk_mgmt_done() will be called by the SCST core upon completion of the
1175  * task management function. When srpt_handle_tsk_mgmt() reports failure
1176  * (i.e. returns -1) a response PDU will have been built in ioctx->buf. This
1177  * PDU has to be sent back by the caller.
1178  *
1179  * For more information about SRP_TSK_MGMT PDU's, see also section 6.7 in
1180  * the T10 SRP r16a document.
1181  */
1182 static int srpt_handle_tsk_mgmt(struct srpt_rdma_ch *ch,
1183                                 struct srpt_ioctx *ioctx)
1184 {
1185         struct srp_tsk_mgmt *srp_tsk;
1186         struct srpt_mgmt_ioctx *mgmt_ioctx;
1187         int ret;
1188
1189         srp_tsk = ioctx->buf;
1190
1191         TRACE_DBG("recv_tsk_mgmt= %d for task_tag= %lld"
1192                   " using tag= %lld cm_id= %p sess= %p",
1193                   srp_tsk->tsk_mgmt_func,
1194                   (unsigned long long) srp_tsk->task_tag,
1195                   (unsigned long long) srp_tsk->tag,
1196                   ch->cm_id, ch->scst_sess);
1197
1198         mgmt_ioctx = kmalloc(sizeof *mgmt_ioctx, GFP_ATOMIC);
1199         if (!mgmt_ioctx) {
1200                 srpt_build_tskmgmt_rsp(ch, ioctx, SRP_TSK_MGMT_FAILED,
1201                                        srp_tsk->tag);
1202                 goto err;
1203         }
1204
1205         mgmt_ioctx->ioctx = ioctx;
1206         mgmt_ioctx->ch = ch;
1207         mgmt_ioctx->tag = srp_tsk->tag;
1208
1209         switch (srp_tsk->tsk_mgmt_func) {
1210         case SRP_TSK_ABORT_TASK:
1211                 TRACE_DBG("%s", "Processing SRP_TSK_ABORT_TASK");
1212                 ret = scst_rx_mgmt_fn_tag(ch->scst_sess,
1213                                           SCST_ABORT_TASK,
1214                                           srp_tsk->task_tag,
1215                                           thread ?
1216                                           SCST_NON_ATOMIC : SCST_ATOMIC,
1217                                           mgmt_ioctx);
1218                 break;
1219         case SRP_TSK_ABORT_TASK_SET:
1220                 TRACE_DBG("%s", "Processing SRP_TSK_ABORT_TASK_SET");
1221                 ret = scst_rx_mgmt_fn_lun(ch->scst_sess,
1222                                           SCST_ABORT_TASK_SET,
1223                                           (u8 *) &srp_tsk->lun,
1224                                           sizeof srp_tsk->lun,
1225                                           thread ?
1226                                           SCST_NON_ATOMIC : SCST_ATOMIC,
1227                                           mgmt_ioctx);
1228                 break;
1229         case SRP_TSK_CLEAR_TASK_SET:
1230                 TRACE_DBG("%s", "Processing SRP_TSK_CLEAR_TASK_SET");
1231                 ret = scst_rx_mgmt_fn_lun(ch->scst_sess,
1232                                           SCST_CLEAR_TASK_SET,
1233                                           (u8 *) &srp_tsk->lun,
1234                                           sizeof srp_tsk->lun,
1235                                           thread ?
1236                                           SCST_NON_ATOMIC : SCST_ATOMIC,
1237                                           mgmt_ioctx);
1238                 break;
1239         case SRP_TSK_LUN_RESET:
1240                 TRACE_DBG("%s", "Processing SRP_TSK_LUN_RESET");
1241                 ret = scst_rx_mgmt_fn_lun(ch->scst_sess,
1242                                           SCST_LUN_RESET,
1243                                           (u8 *) &srp_tsk->lun,
1244                                           sizeof srp_tsk->lun,
1245                                           thread ?
1246                                           SCST_NON_ATOMIC : SCST_ATOMIC,
1247                                           mgmt_ioctx);
1248                 break;
1249         case SRP_TSK_CLEAR_ACA:
1250                 TRACE_DBG("%s", "Processing SRP_TSK_CLEAR_ACA");
1251                 ret = scst_rx_mgmt_fn_lun(ch->scst_sess,
1252                                           SCST_CLEAR_ACA,
1253                                           (u8 *) &srp_tsk->lun,
1254                                           sizeof srp_tsk->lun,
1255                                           thread ?
1256                                           SCST_NON_ATOMIC : SCST_ATOMIC,
1257                                           mgmt_ioctx);
1258                 break;
1259         default:
1260                 TRACE_DBG("%s", "Unsupported task management function.");
1261                 srpt_build_tskmgmt_rsp(ch, ioctx,
1262                                        SRP_TSK_MGMT_FUNC_NOT_SUPP,
1263                                        srp_tsk->tag);
1264                 goto err;
1265         }
1266
1267         if (ret) {
1268                 TRACE_DBG("%s", "Processing task management function failed.");
1269                 srpt_build_tskmgmt_rsp(ch, ioctx, SRP_TSK_MGMT_FAILED,
1270                                        srp_tsk->tag);
1271                 goto err;
1272         }
1273
1274         WARN_ON(srp_tsk->opcode == SRP_RSP);
1275
1276         return 0;
1277
1278 err:
1279         WARN_ON(srp_tsk->opcode != SRP_RSP);
1280
1281         kfree(mgmt_ioctx);
1282         return -1;
1283 }
1284
1285 /**
1286  * Process a receive completion event.
1287  * @ch: RDMA channel for which the completion event has been received.
1288  * @ioctx: SRPT I/O context for which the completion event has been received.
1289  */
1290 static void srpt_handle_new_iu(struct srpt_rdma_ch *ch,
1291                                struct srpt_ioctx *ioctx)
1292 {
1293         struct srp_cmd *srp_cmd;
1294         struct srp_rsp *srp_rsp;
1295         unsigned long flags;
1296         int len;
1297
1298         spin_lock_irqsave(&ch->spinlock, flags);
1299         if (ch->state != RDMA_CHANNEL_LIVE) {
1300                 if (ch->state == RDMA_CHANNEL_CONNECTING) {
1301                         list_add_tail(&ioctx->wait_list, &ch->cmd_wait_list);
1302                         spin_unlock_irqrestore(&ch->spinlock, flags);
1303                         return;
1304                 } else {
1305                         spin_unlock_irqrestore(&ch->spinlock, flags);
1306                         srpt_reset_ioctx(ch, ioctx);
1307                         return;
1308                 }
1309         }
1310         spin_unlock_irqrestore(&ch->spinlock, flags);
1311
1312         dma_sync_single_for_cpu(ch->sport->sdev->device->dma_device, ioctx->dma,
1313                                 MAX_MESSAGE_SIZE, DMA_FROM_DEVICE);
1314
1315         ioctx->data_len = 0;
1316         ioctx->n_rbuf = 0;
1317         ioctx->rbufs = NULL;
1318         ioctx->n_rdma = 0;
1319         ioctx->n_rdma_ius = 0;
1320         ioctx->rdma_ius = NULL;
1321         ioctx->scmnd = NULL;
1322         ioctx->state = SRPT_STATE_NEW;
1323
1324         srp_cmd = ioctx->buf;
1325         srp_rsp = ioctx->buf;
1326
1327         switch (srp_cmd->opcode) {
1328         case SRP_CMD:
1329                 if (srpt_handle_cmd(ch, ioctx) < 0)
1330                         goto err;
1331                 break;
1332
1333         case SRP_TSK_MGMT:
1334                 if (srpt_handle_tsk_mgmt(ch, ioctx) < 0)
1335                         goto err;
1336                 break;
1337
1338         case SRP_I_LOGOUT:
1339         case SRP_AER_REQ:
1340         default:
1341                 srpt_build_cmd_rsp(ch, ioctx, ILLEGAL_REQUEST, INVALID_CDB,
1342                                    srp_cmd->tag);
1343                 goto err;
1344         }
1345
1346         dma_sync_single_for_device(ch->sport->sdev->device->dma_device,
1347                                    ioctx->dma, MAX_MESSAGE_SIZE,
1348                                    DMA_FROM_DEVICE);
1349
1350         return;
1351
1352 err:
1353         WARN_ON(srp_rsp->opcode != SRP_RSP);
1354         len = (sizeof *srp_rsp) + be32_to_cpu(srp_rsp->sense_data_len);
1355
1356         if (ch->state != RDMA_CHANNEL_LIVE) {
1357                 /* Give up if another thread modified the channel state. */
1358                 printk(KERN_ERR PFX "%s: channel is in state %d",
1359                        __func__, ch->state);
1360                 srpt_reset_ioctx(ch, ioctx);
1361         } else if (srpt_post_send(ch, ioctx, len)) {
1362                 printk(KERN_ERR PFX "%s: sending SRP_RSP PDU failed",
1363                        __func__);
1364                 srpt_reset_ioctx(ch, ioctx);
1365         }
1366 }
1367
1368 /*
1369  * Returns true if the ioctx list is non-empty or if the ib_srpt kernel thread
1370  * should stop.
1371  * @pre thread != 0
1372  */
1373 static inline int srpt_test_ioctx_list(void)
1374 {
1375         int res = (!list_empty(&srpt_thread.thread_ioctx_list) ||
1376                    unlikely(kthread_should_stop()));
1377         return res;
1378 }
1379
1380 /*
1381  * Add 'ioctx' to the tail of the ioctx list and wake up the kernel thread.
1382  *
1383  * @pre thread != 0
1384  */
1385 static inline void srpt_schedule_thread(struct srpt_ioctx *ioctx)
1386 {
1387         unsigned long flags;
1388
1389         spin_lock_irqsave(&srpt_thread.thread_lock, flags);
1390         list_add_tail(&ioctx->comp_list, &srpt_thread.thread_ioctx_list);
1391         spin_unlock_irqrestore(&srpt_thread.thread_lock, flags);
1392         wake_up(&ioctx_list_waitQ);
1393 }
1394
1395 /**
1396  * InfiniBand completion queue callback function.
1397  * @cq: completion queue.
1398  * @ctx: completion queue context, which was passed as the fourth argument of
1399  *       the function ib_create_cq().
1400  */
1401 static void srpt_completion(struct ib_cq *cq, void *ctx)
1402 {
1403         struct srpt_rdma_ch *ch = ctx;
1404         struct srpt_device *sdev = ch->sport->sdev;
1405         struct ib_wc wc;
1406         struct srpt_ioctx *ioctx;
1407
1408         ib_req_notify_cq(ch->cq, IB_CQ_NEXT_COMP);
1409         while (ib_poll_cq(ch->cq, 1, &wc) > 0) {
1410                 if (wc.status) {
1411                         printk(KERN_ERR PFX "failed %s status= %d\n",
1412                                wc.wr_id & SRPT_OP_RECV ? "receive" : "send",
1413                                wc.status);
1414                         srpt_handle_err_comp(ch, &wc);
1415                         break;
1416                 }
1417
1418                 if (wc.wr_id & SRPT_OP_RECV) {
1419                         ioctx = sdev->ioctx_ring[wc.wr_id & ~SRPT_OP_RECV];
1420                         if (thread) {
1421                                 ioctx->ch = ch;
1422                                 ioctx->op = IB_WC_RECV;
1423                                 srpt_schedule_thread(ioctx);
1424                         } else
1425                                 srpt_handle_new_iu(ch, ioctx);
1426                         continue;
1427                 } else
1428                         ioctx = sdev->ioctx_ring[wc.wr_id];
1429
1430                 if (thread) {
1431                         ioctx->ch = ch;
1432                         ioctx->op = wc.opcode;
1433                         srpt_schedule_thread(ioctx);
1434                 } else {
1435                         switch (wc.opcode) {
1436                         case IB_WC_SEND:
1437                                 srpt_handle_send_comp(ch, ioctx,
1438                                         scst_estimate_context());
1439                                 break;
1440                         case IB_WC_RDMA_WRITE:
1441                         case IB_WC_RDMA_READ:
1442                                 srpt_handle_rdma_comp(ch, ioctx);
1443                                 break;
1444                         default:
1445                                 break;
1446                         }
1447                 }
1448         }
1449 }
1450
1451 /*
1452  * Create a completion queue on the specified device.
1453  */
1454 static int srpt_create_ch_ib(struct srpt_rdma_ch *ch)
1455 {
1456         struct ib_qp_init_attr *qp_init;
1457         struct srpt_device *sdev = ch->sport->sdev;
1458         int cqe;
1459         int ret;
1460
1461         qp_init = kzalloc(sizeof *qp_init, GFP_KERNEL);
1462         if (!qp_init)
1463                 return -ENOMEM;
1464
1465         /* Create a completion queue (CQ). */
1466
1467         cqe = SRPT_RQ_SIZE + SRPT_SQ_SIZE - 1;
1468 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20) && ! defined(RHEL_RELEASE_CODE)
1469         ch->cq = ib_create_cq(sdev->device, srpt_completion, NULL, ch, cqe);
1470 #else
1471         ch->cq = ib_create_cq(sdev->device, srpt_completion, NULL, ch, cqe, 0);
1472 #endif
1473         if (IS_ERR(ch->cq)) {
1474                 ret = PTR_ERR(ch->cq);
1475                 printk(KERN_ERR PFX "failed to create_cq cqe= %d ret= %d\n",
1476                         cqe, ret);
1477                 goto out;
1478         }
1479
1480         /* Request completion notification. */
1481
1482         ib_req_notify_cq(ch->cq, IB_CQ_NEXT_COMP);
1483
1484         /* Create a queue pair (QP). */
1485
1486         qp_init->qp_context = (void *)ch;
1487         qp_init->event_handler = srpt_qp_event;
1488         qp_init->send_cq = ch->cq;
1489         qp_init->recv_cq = ch->cq;
1490         qp_init->srq = sdev->srq;
1491         qp_init->sq_sig_type = IB_SIGNAL_REQ_WR;
1492         qp_init->qp_type = IB_QPT_RC;
1493         qp_init->cap.max_send_wr = SRPT_SQ_SIZE;
1494         qp_init->cap.max_send_sge = SRPT_DEF_SG_PER_WQE;
1495
1496         ch->qp = ib_create_qp(sdev->pd, qp_init);
1497         if (IS_ERR(ch->qp)) {
1498                 ret = PTR_ERR(ch->qp);
1499                 ib_destroy_cq(ch->cq);
1500                 printk(KERN_ERR PFX "failed to create_qp ret= %d\n", ret);
1501                 goto out;
1502         }
1503
1504         TRACE_DBG("%s: max_cqe= %d max_sge= %d cm_id= %p",
1505                __func__, ch->cq->cqe, qp_init->cap.max_send_sge,
1506                ch->cm_id);
1507
1508         /* Modify the attributes and the state of queue pair ch->qp. */
1509
1510         ret = srpt_init_ch_qp(ch, ch->qp);
1511         if (ret) {
1512                 ib_destroy_qp(ch->qp);
1513                 ib_destroy_cq(ch->cq);
1514                 goto out;
1515         }
1516
1517         atomic_set(&ch->req_lim_delta, SRPT_RQ_SIZE);
1518 out:
1519         kfree(qp_init);
1520         return ret;
1521 }
1522
1523 /**
1524  * Look up the RDMA channel that corresponds to the specified cm_id.
1525  *
1526  * Return NULL if no matching RDMA channel has been found.
1527  */
1528 static struct srpt_rdma_ch *srpt_find_channel(struct ib_cm_id *cm_id, bool del)
1529 {
1530         struct srpt_device *sdev = cm_id->context;
1531         struct srpt_rdma_ch *ch;
1532
1533         spin_lock_irq(&sdev->spinlock);
1534         list_for_each_entry(ch, &sdev->rch_list, list) {
1535                 if (ch->cm_id == cm_id) {
1536                         if (del)
1537                                 list_del(&ch->list);
1538                         spin_unlock_irq(&sdev->spinlock);
1539                         return ch;
1540                 }
1541         }
1542
1543         spin_unlock_irq(&sdev->spinlock);
1544
1545         return NULL;
1546 }
1547
1548 /**
1549  * Release all resources associated with the specified RDMA channel.
1550  *
1551  * Note: the caller must have removed the channel from the channel list
1552  * before calling this function.
1553  */
1554 static void srpt_release_channel(struct srpt_rdma_ch *ch, int destroy_cmid)
1555 {
1556         TRACE_ENTRY();
1557
1558         WARN_ON(srpt_find_channel(ch->cm_id, false) == ch);
1559
1560         if (ch->cm_id && destroy_cmid) {
1561                 TRACE_DBG("%s: destroy cm_id= %p", __func__, ch->cm_id);
1562                 ib_destroy_cm_id(ch->cm_id);
1563                 ch->cm_id = NULL;
1564         }
1565
1566         ib_destroy_qp(ch->qp);
1567         ib_destroy_cq(ch->cq);
1568
1569         if (ch->scst_sess) {
1570                 struct srpt_ioctx *ioctx, *ioctx_tmp;
1571
1572                 TRACE_DBG("%s: release sess= %p sess_name= %s active_cmd= %d",
1573                           __func__, ch->scst_sess, ch->sess_name,
1574                           ch->active_scmnd_cnt);
1575
1576                 spin_lock_irq(&ch->spinlock);
1577                 list_for_each_entry_safe(ioctx, ioctx_tmp,
1578                                          &ch->active_scmnd_list, scmnd_list) {
1579                         spin_unlock_irq(&ch->spinlock);
1580
1581                         if (ioctx->scmnd)
1582                                 srpt_abort_scst_cmd(ch->sport->sdev,
1583                                                     ioctx->scmnd, true);
1584
1585                         spin_lock_irq(&ch->spinlock);
1586                 }
1587                 WARN_ON(!list_empty(&ch->active_scmnd_list));
1588                 WARN_ON(ch->active_scmnd_cnt != 0);
1589                 spin_unlock_irq(&ch->spinlock);
1590
1591                 scst_unregister_session(ch->scst_sess, 0, NULL);
1592                 ch->scst_sess = NULL;
1593         }
1594
1595         kfree(ch);
1596
1597         TRACE_EXIT();
1598 }
1599
1600 static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
1601                             struct ib_cm_req_event_param *param,
1602                             void *private_data)
1603 {
1604         struct srpt_device *sdev = cm_id->context;
1605         struct srp_login_req *req;
1606         struct srp_login_rsp *rsp;
1607         struct srp_login_rej *rej;
1608         struct ib_cm_rep_param *rep_param;
1609         struct srpt_rdma_ch *ch, *tmp_ch;
1610         u32 it_iu_len;
1611         u64 ioc_guid;
1612         int ret = 0;
1613
1614         if (!sdev || !private_data)
1615                 return -EINVAL;
1616
1617         rsp = kzalloc(sizeof *rsp, GFP_KERNEL);
1618         rej = kzalloc(sizeof *rej, GFP_KERNEL);
1619         rep_param = kzalloc(sizeof *rep_param, GFP_KERNEL);
1620
1621         if (!rsp || !rej || !rep_param) {
1622                 ret = -ENOMEM;
1623                 goto out;
1624         }
1625
1626         req = (struct srp_login_req *)private_data;
1627
1628         it_iu_len = be32_to_cpu(req->req_it_iu_len);
1629
1630         TRACE_DBG("Host login i_port_id=0x%llx:0x%llx t_port_id=0x%llx:0x%llx"
1631             " it_iu_len=%d",
1632             (unsigned long long)be64_to_cpu(*(u64 *)&req->initiator_port_id[0]),
1633             (unsigned long long)be64_to_cpu(*(u64 *)&req->initiator_port_id[8]),
1634             (unsigned long long)be64_to_cpu(*(u64 *)&req->target_port_id[0]),
1635             (unsigned long long)be64_to_cpu(*(u64 *)&req->target_port_id[8]),
1636             it_iu_len);
1637
1638         if (it_iu_len > MAX_MESSAGE_SIZE || it_iu_len < 64) {
1639                 rej->reason =
1640                     cpu_to_be32(SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE);
1641                 ret = -EINVAL;
1642                 TRACE_DBG("Reject invalid it_iu_len=%d", it_iu_len);
1643                 goto reject;
1644         }
1645
1646         if ((req->req_flags & 0x3) == SRP_MULTICHAN_SINGLE) {
1647                 rsp->rsp_flags = SRP_LOGIN_RSP_MULTICHAN_NO_CHAN;
1648
1649                 spin_lock_irq(&sdev->spinlock);
1650
1651                 list_for_each_entry_safe(ch, tmp_ch, &sdev->rch_list, list) {
1652                         if (!memcmp(ch->i_port_id, req->initiator_port_id, 16)
1653                             && !memcmp(ch->t_port_id, req->target_port_id, 16)
1654                             && param->port == ch->sport->port
1655                             && param->listen_id == ch->sport->sdev->cm_id
1656                             && ch->cm_id) {
1657                                 enum rdma_ch_state prev_state;
1658
1659                                 /* found an existing channel */
1660                                 TRACE_DBG("Found existing channel name= %s"
1661                                           " cm_id= %p state= %d",
1662                                           ch->sess_name, ch->cm_id, ch->state);
1663
1664                                 prev_state = ch->state;
1665                                 if (ch->state == RDMA_CHANNEL_LIVE)
1666                                         ch->state = RDMA_CHANNEL_DISCONNECTING;
1667                                 else if (ch->state == RDMA_CHANNEL_CONNECTING)
1668                                         list_del(&ch->list);
1669
1670                                 spin_unlock_irq(&sdev->spinlock);
1671
1672                                 rsp->rsp_flags =
1673                                         SRP_LOGIN_RSP_MULTICHAN_TERMINATED;
1674
1675                                 if (prev_state == RDMA_CHANNEL_LIVE)
1676                                         ib_send_cm_dreq(ch->cm_id, NULL, 0);
1677                                 else if (prev_state ==
1678                                          RDMA_CHANNEL_CONNECTING) {
1679                                         ib_send_cm_rej(ch->cm_id,
1680                                                        IB_CM_REJ_NO_RESOURCES,
1681                                                        NULL, 0, NULL, 0);
1682                                         srpt_release_channel(ch, 1);
1683                                 }
1684
1685                                 spin_lock_irq(&sdev->spinlock);
1686                         }
1687                 }
1688
1689                 spin_unlock_irq(&sdev->spinlock);
1690
1691         } else
1692                 rsp->rsp_flags = SRP_LOGIN_RSP_MULTICHAN_MAINTAINED;
1693
1694         ioc_guid = srpt_get_ioc_guid(sdev->device);
1695
1696         if (((u64) (*(u64 *) req->target_port_id) != cpu_to_be64(ioc_guid)) ||
1697             ((u64) (*(u64 *) (req->target_port_id + 8)) !=
1698              cpu_to_be64(ioc_guid))) {
1699                 rej->reason =
1700                     cpu_to_be32(SRP_LOGIN_REJ_UNABLE_ASSOCIATE_CHANNEL);
1701                 ret = -ENOMEM;
1702                 TRACE_DBG("%s", "Reject invalid target_port_id");
1703                 goto reject;
1704         }
1705
1706         ch = kzalloc(sizeof *ch, GFP_KERNEL);
1707         if (!ch) {
1708                 rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
1709                 TRACE_DBG("%s", "Reject failed allocate rdma_ch");
1710                 ret = -ENOMEM;
1711                 goto reject;
1712         }
1713
1714         spin_lock_init(&ch->spinlock);
1715         memcpy(ch->i_port_id, req->initiator_port_id, 16);
1716         memcpy(ch->t_port_id, req->target_port_id, 16);
1717         ch->sport = &sdev->port[param->port - 1];
1718         ch->cm_id = cm_id;
1719         ch->state = RDMA_CHANNEL_CONNECTING;
1720         INIT_LIST_HEAD(&ch->cmd_wait_list);
1721         INIT_LIST_HEAD(&ch->active_scmnd_list);
1722
1723         ret = srpt_create_ch_ib(ch);
1724         if (ret) {
1725                 rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
1726                 TRACE_DBG("%s", "Reject failed to create rdma_ch");
1727                 goto free_ch;
1728         }
1729
1730         ret = srpt_ch_qp_rtr(ch, ch->qp);
1731         if (ret) {
1732                 rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
1733                 TRACE_DBG("Reject failed qp to rtr/rts ret=%d", ret);
1734                 goto destroy_ib;
1735         }
1736
1737         snprintf(ch->sess_name, sizeof(ch->sess_name),
1738                  "0x%016llx%016llx",
1739                  (unsigned long long)be64_to_cpu(*(u64 *)ch->i_port_id),
1740                  (unsigned long long)be64_to_cpu(*(u64 *)(ch->i_port_id + 8)));
1741
1742         TRACE_DBG("registering session %s", ch->sess_name);
1743
1744         BUG_ON(!sdev->scst_tgt);
1745         ch->scst_sess = scst_register_session(sdev->scst_tgt, 0, ch->sess_name,
1746                                   NULL, NULL);
1747         if (!ch->scst_sess) {
1748                 rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
1749                 TRACE_DBG("%s", "Failed to create scst sess");
1750                 goto destroy_ib;
1751         }
1752
1753         TRACE_DBG("Establish connection sess=%p name=%s cm_id=%p",
1754                   ch->scst_sess, ch->sess_name, ch->cm_id);
1755
1756         scst_sess_set_tgt_priv(ch->scst_sess, ch);
1757
1758         /* create srp_login_response */
1759         rsp->opcode = SRP_LOGIN_RSP;
1760         rsp->tag = req->tag;
1761         rsp->max_it_iu_len = req->req_it_iu_len;
1762         rsp->max_ti_iu_len = req->req_it_iu_len;
1763         rsp->buf_fmt =
1764             cpu_to_be16(SRP_BUF_FORMAT_DIRECT | SRP_BUF_FORMAT_INDIRECT);
1765         rsp->req_lim_delta = cpu_to_be32(SRPT_RQ_SIZE);
1766         atomic_set(&ch->req_lim_delta, 0);
1767
1768         /* create cm reply */
1769         rep_param->qp_num = ch->qp->qp_num;
1770         rep_param->private_data = (void *)rsp;
1771         rep_param->private_data_len = sizeof *rsp;
1772         rep_param->rnr_retry_count = 7;
1773         rep_param->flow_control = 1;
1774         rep_param->failover_accepted = 0;
1775         rep_param->srq = 1;
1776         rep_param->responder_resources = 4;
1777         rep_param->initiator_depth = 4;
1778
1779         ret = ib_send_cm_rep(cm_id, rep_param);
1780         if (ret)
1781                 goto release_channel;
1782
1783         spin_lock_irq(&sdev->spinlock);
1784         list_add_tail(&ch->list, &sdev->rch_list);
1785         spin_unlock_irq(&sdev->spinlock);
1786
1787         goto out;
1788
1789 release_channel:
1790         scst_unregister_session(ch->scst_sess, 0, NULL);
1791         ch->scst_sess = NULL;
1792
1793 destroy_ib:
1794         ib_destroy_qp(ch->qp);
1795         ib_destroy_cq(ch->cq);
1796
1797 free_ch:
1798         kfree(ch);
1799
1800 reject:
1801         rej->opcode = SRP_LOGIN_REJ;
1802         rej->tag = req->tag;
1803         rej->buf_fmt =
1804             cpu_to_be16(SRP_BUF_FORMAT_DIRECT | SRP_BUF_FORMAT_INDIRECT);
1805
1806         ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, NULL, 0,
1807                              (void *)rej, sizeof *rej);
1808
1809 out:
1810         kfree(rep_param);
1811         kfree(rsp);
1812         kfree(rej);
1813
1814         return ret;
1815 }
1816
1817 /**
1818  * Release the channel with the specified cm_id.
1819  *
1820  * Returns one to indicate that the caller of srpt_cm_handler() should destroy
1821  * the cm_id.
1822  */
1823 static void srpt_find_and_release_channel(struct ib_cm_id *cm_id)
1824 {
1825         struct srpt_rdma_ch *ch;
1826
1827         ch = srpt_find_channel(cm_id, true);
1828         if (ch)
1829                 srpt_release_channel(ch, 0);
1830 }
1831
1832 static void srpt_cm_rej_recv(struct ib_cm_id *cm_id)
1833 {
1834         TRACE_DBG("%s: cm_id=%p", __func__, cm_id);
1835         srpt_find_and_release_channel(cm_id);
1836 }
1837
1838 /**
1839  * Process an IB_CM_RTU_RECEIVED or IB_CM_USER_ESTABLISHED event.
1840  *
1841  * An IB_CM_RTU_RECEIVED message indicates that the connection is established
1842  * and that the recipient may begin transmitting (RTU = ready to use).
1843  */
1844 static int srpt_cm_rtu_recv(struct ib_cm_id *cm_id)
1845 {
1846         struct srpt_rdma_ch *ch;
1847         int ret;
1848
1849         ch = srpt_find_channel(cm_id, false);
1850         if (!ch)
1851                 return -EINVAL;
1852
1853         if (srpt_test_and_set_channel_state(ch, RDMA_CHANNEL_CONNECTING,
1854                                             RDMA_CHANNEL_LIVE)) {
1855                 struct srpt_ioctx *ioctx, *ioctx_tmp;
1856
1857                 ret = srpt_ch_qp_rts(ch, ch->qp);
1858
1859                 list_for_each_entry_safe(ioctx, ioctx_tmp, &ch->cmd_wait_list,
1860                                          wait_list) {
1861                         list_del(&ioctx->wait_list);
1862                         srpt_handle_new_iu(ch, ioctx);
1863                 }
1864                 if (ret && srpt_test_and_set_channel_state(ch,
1865                                         RDMA_CHANNEL_LIVE,
1866                                         RDMA_CHANNEL_DISCONNECTING)) {
1867                         TRACE_DBG("cm_id=%p sess_name=%s state=%d",
1868                                   cm_id, ch->sess_name, ch->state);
1869                         ib_send_cm_dreq(ch->cm_id, NULL, 0);
1870                 }
1871         } else if (ch->state == RDMA_CHANNEL_DISCONNECTING) {
1872                 TRACE_DBG("cm_id=%p sess_name=%s state=%d",
1873                           cm_id, ch->sess_name, ch->state);
1874                 ib_send_cm_dreq(ch->cm_id, NULL, 0);
1875                 ret = -EAGAIN;
1876         } else
1877                 ret = 0;
1878
1879         return ret;
1880 }
1881
1882 static void srpt_cm_timewait_exit(struct ib_cm_id *cm_id)
1883 {
1884         TRACE_DBG("%s: cm_id=%p", __func__, cm_id);
1885         srpt_find_and_release_channel(cm_id);
1886 }
1887
1888 static void srpt_cm_rep_error(struct ib_cm_id *cm_id)
1889 {
1890         TRACE_DBG("%s: cm_id=%p", __func__, cm_id);
1891         srpt_find_and_release_channel(cm_id);
1892 }
1893
1894 static int srpt_cm_dreq_recv(struct ib_cm_id *cm_id)
1895 {
1896         struct srpt_rdma_ch *ch;
1897
1898         ch = srpt_find_channel(cm_id, false);
1899         if (!ch)
1900                 return -EINVAL;
1901
1902         TRACE_DBG("%s: cm_id= %p ch->state= %d",
1903                  __func__, cm_id, ch->state);
1904
1905         switch (ch->state) {
1906         case RDMA_CHANNEL_LIVE:
1907         case RDMA_CHANNEL_CONNECTING:
1908                 ib_send_cm_drep(ch->cm_id, NULL, 0);
1909                 break;
1910         case RDMA_CHANNEL_DISCONNECTING:
1911         default:
1912                 break;
1913         }
1914
1915         return 0;
1916 }
1917
1918 static void srpt_cm_drep_recv(struct ib_cm_id *cm_id)
1919 {
1920         TRACE_DBG("%s: cm_id=%p", __func__, cm_id);
1921         srpt_find_and_release_channel(cm_id);
1922 }
1923
1924 /**
1925  * IB connection manager callback function.
1926  *
1927  * A non-zero return value will make the caller destroy the CM ID.
1928  *
1929  * Note: srpt_add_one passes a struct srpt_device* as the third argument to
1930  * the ib_create_cm_id() call.
1931  */
1932 static int srpt_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
1933 {
1934         int ret = 0;
1935
1936         switch (event->event) {
1937         case IB_CM_REQ_RECEIVED:
1938                 ret = srpt_cm_req_recv(cm_id, &event->param.req_rcvd,
1939                                        event->private_data);
1940                 break;
1941         case IB_CM_REJ_RECEIVED:
1942                 srpt_cm_rej_recv(cm_id);
1943                 ret = -EINVAL;
1944                 break;
1945         case IB_CM_RTU_RECEIVED:
1946         case IB_CM_USER_ESTABLISHED:
1947                 ret = srpt_cm_rtu_recv(cm_id);
1948                 break;
1949         case IB_CM_DREQ_RECEIVED:
1950                 ret = srpt_cm_dreq_recv(cm_id);
1951                 break;
1952         case IB_CM_DREP_RECEIVED:
1953                 srpt_cm_drep_recv(cm_id);
1954                 ret = -EINVAL;
1955                 break;
1956         case IB_CM_TIMEWAIT_EXIT:
1957                 srpt_cm_timewait_exit(cm_id);
1958                 ret = -EINVAL;
1959                 break;
1960         case IB_CM_REP_ERROR:
1961                 srpt_cm_rep_error(cm_id);
1962                 ret = -EINVAL;
1963                 break;
1964         default:
1965                 break;
1966         }
1967
1968         return ret;
1969 }
1970
1971 static int srpt_map_sg_to_ib_sge(struct srpt_rdma_ch *ch,
1972                                  struct srpt_ioctx *ioctx,
1973                                  struct scst_cmd *scmnd)
1974 {
1975         struct scatterlist *scat;
1976         scst_data_direction dir;
1977         struct rdma_iu *riu;
1978         struct srp_direct_buf *db;
1979         dma_addr_t dma_addr;
1980         struct ib_sge *sge;
1981         u64 raddr;
1982         u32 rsize;
1983         u32 tsize;
1984         u32 dma_len;
1985         int count, nrdma;
1986         int i, j, k;
1987
1988         scat = scst_cmd_get_sg(scmnd);
1989         dir = scst_cmd_get_data_direction(scmnd);
1990         count = dma_map_sg(ch->sport->sdev->device->dma_device, scat,
1991                            scst_cmd_get_sg_cnt(scmnd),
1992                            scst_to_tgt_dma_dir(dir));
1993         if (unlikely(!count))
1994                 return -EBUSY;
1995
1996         if (ioctx->rdma_ius && ioctx->n_rdma_ius)
1997                 nrdma = ioctx->n_rdma_ius;
1998         else {
1999                 nrdma = count / SRPT_DEF_SG_PER_WQE + ioctx->n_rbuf;
2000
2001                 ioctx->rdma_ius = kzalloc(nrdma * sizeof *riu,
2002                                           scst_cmd_atomic(scmnd)
2003                                           ? GFP_ATOMIC : GFP_KERNEL);
2004                 if (!ioctx->rdma_ius) {
2005                         dma_unmap_sg(ch->sport->sdev->device->dma_device,
2006                                      scat, scst_cmd_get_sg_cnt(scmnd),
2007                                      scst_to_tgt_dma_dir(dir));
2008                         return -ENOMEM;
2009                 }
2010
2011                 ioctx->n_rdma_ius = nrdma;
2012         }
2013
2014         db = ioctx->rbufs;
2015         tsize = (dir == SCST_DATA_READ) ?
2016                 scst_cmd_get_resp_data_len(scmnd) : scst_cmd_get_bufflen(scmnd);
2017         dma_len = sg_dma_len(&scat[0]);
2018         riu = ioctx->rdma_ius;
2019
2020         /*
2021          * For each remote desc - calculate the #ib_sge.
2022          * If #ib_sge < SRPT_DEF_SG_PER_WQE per rdma operation then
2023          *      each remote desc rdma_iu is required a rdma wr;
2024          * else
2025          *      we need to allocate extra rdma_iu to carry extra #ib_sge in
2026          *      another rdma wr
2027          */
2028         for (i = 0, j = 0;
2029              j < count && i < ioctx->n_rbuf && tsize > 0; ++i, ++riu, ++db) {
2030                 rsize = be32_to_cpu(db->len);
2031                 raddr = be64_to_cpu(db->va);
2032                 riu->raddr = raddr;
2033                 riu->rkey = be32_to_cpu(db->key);
2034                 riu->sge_cnt = 0;
2035
2036                 /* calculate how many sge required for this remote_buf */
2037                 while (rsize > 0 && tsize > 0) {
2038
2039                         if (rsize >= dma_len) {
2040                                 tsize -= dma_len;
2041                                 rsize -= dma_len;
2042                                 raddr += dma_len;
2043
2044                                 if (tsize > 0) {
2045                                         ++j;
2046                                         if (j < count)
2047                                                 dma_len = sg_dma_len(&scat[j]);
2048                                 }
2049                         } else {
2050                                 tsize -= rsize;
2051                                 dma_len -= rsize;
2052                                 rsize = 0;
2053                         }
2054
2055                         ++riu->sge_cnt;
2056
2057                         if (rsize > 0 && riu->sge_cnt == SRPT_DEF_SG_PER_WQE) {
2058                                 riu->sge =
2059                                     kmalloc(riu->sge_cnt * sizeof *riu->sge,
2060                                             scst_cmd_atomic(scmnd)
2061                                             ? GFP_ATOMIC : GFP_KERNEL);
2062                                 if (!riu->sge)
2063                                         goto free_mem;
2064
2065                                 ++ioctx->n_rdma;
2066                                 ++riu;
2067                                 riu->sge_cnt = 0;
2068                                 riu->raddr = raddr;
2069                                 riu->rkey = be32_to_cpu(db->key);
2070                         }
2071                 }
2072
2073                 riu->sge = kmalloc(riu->sge_cnt * sizeof *riu->sge,
2074                                    scst_cmd_atomic(scmnd)
2075                                    ? GFP_ATOMIC : GFP_KERNEL);
2076
2077                 if (!riu->sge)
2078                         goto free_mem;
2079
2080                 ++ioctx->n_rdma;
2081         }
2082
2083         db = ioctx->rbufs;
2084         scat = scst_cmd_get_sg(scmnd);
2085         tsize = (dir == SCST_DATA_READ) ?
2086                 scst_cmd_get_resp_data_len(scmnd) : scst_cmd_get_bufflen(scmnd);
2087         riu = ioctx->rdma_ius;
2088         dma_len = sg_dma_len(&scat[0]);
2089         dma_addr = sg_dma_address(&scat[0]);
2090
2091         /* this second loop is really mapped sg_addres to rdma_iu->ib_sge */
2092         for (i = 0, j = 0;
2093              j < count && i < ioctx->n_rbuf && tsize > 0; ++i, ++riu, ++db) {
2094                 rsize = be32_to_cpu(db->len);
2095                 sge = riu->sge;
2096                 k = 0;
2097
2098                 while (rsize > 0 && tsize > 0) {
2099                         sge->addr = dma_addr;
2100                         sge->lkey = ch->sport->sdev->mr->lkey;
2101
2102                         if (rsize >= dma_len) {
2103                                 sge->length =
2104                                         (tsize < dma_len) ? tsize : dma_len;
2105                                 tsize -= dma_len;
2106                                 rsize -= dma_len;
2107
2108                                 if (tsize > 0) {
2109                                         ++j;
2110                                         if (j < count) {
2111                                                 dma_len = sg_dma_len(&scat[j]);
2112                                                 dma_addr =
2113                                                     sg_dma_address(&scat[j]);
2114                                         }
2115                                 }
2116                         } else {
2117                                 sge->length = (tsize < rsize) ? tsize : rsize;
2118                                 tsize -= rsize;
2119                                 dma_len -= rsize;
2120                                 dma_addr += rsize;
2121                                 rsize = 0;
2122                         }
2123
2124                         ++k;
2125                         if (k == riu->sge_cnt && rsize > 0) {
2126                                 ++riu;
2127                                 sge = riu->sge;
2128                                 k = 0;
2129                         } else if (rsize > 0)
2130                                 ++sge;
2131                 }
2132         }
2133
2134         return 0;
2135
2136 free_mem:
2137         while (ioctx->n_rdma)
2138                 kfree(ioctx->rdma_ius[ioctx->n_rdma--].sge);
2139
2140         kfree(ioctx->rdma_ius);
2141
2142         dma_unmap_sg(ch->sport->sdev->device->dma_device,
2143                      scat, scst_cmd_get_sg_cnt(scmnd),
2144                      scst_to_tgt_dma_dir(dir));
2145
2146         return -ENOMEM;
2147 }
2148
2149 static int srpt_perform_rdmas(struct srpt_rdma_ch *ch, struct srpt_ioctx *ioctx,
2150                               scst_data_direction dir)
2151 {
2152         struct ib_send_wr wr;
2153         struct ib_send_wr *bad_wr;
2154         struct rdma_iu *riu;
2155         int i;
2156         int ret = 0;
2157
2158         riu = ioctx->rdma_ius;
2159         memset(&wr, 0, sizeof wr);
2160
2161         for (i = 0; i < ioctx->n_rdma; ++i, ++riu) {
2162                 wr.opcode = (dir == SCST_DATA_READ) ?
2163                     IB_WR_RDMA_WRITE : IB_WR_RDMA_READ;
2164                 wr.next = NULL;
2165                 wr.wr_id = ioctx->index;
2166                 wr.wr.rdma.remote_addr = riu->raddr;
2167                 wr.wr.rdma.rkey = riu->rkey;
2168                 wr.num_sge = riu->sge_cnt;
2169                 wr.sg_list = riu->sge;
2170
2171                 /* only get completion event for the last rdma wr */
2172                 if (i == (ioctx->n_rdma - 1) && dir == SCST_DATA_WRITE)
2173                         wr.send_flags = IB_SEND_SIGNALED;
2174
2175                 ret = ib_post_send(ch->qp, &wr, &bad_wr);
2176                 if (ret)
2177                         break;
2178         }
2179
2180         return ret;
2181 }
2182
2183 /*
2184  * Start data reception. Must not block.
2185  */
2186 static int srpt_xfer_data(struct srpt_rdma_ch *ch, struct srpt_ioctx *ioctx,
2187                           struct scst_cmd *scmnd)
2188 {
2189         int ret;
2190
2191         ret = srpt_map_sg_to_ib_sge(ch, ioctx, scmnd);
2192         if (ret) {
2193                 printk(KERN_ERR PFX "%s[%d] ret=%d\n", __func__, __LINE__, ret);
2194                 ret = SCST_TGT_RES_QUEUE_FULL;
2195                 goto out;
2196         }
2197
2198         ret = srpt_perform_rdmas(ch, ioctx, scst_cmd_get_data_direction(scmnd));
2199         if (ret) {
2200                 printk(KERN_ERR PFX "%s[%d] ret=%d\n", __func__, __LINE__, ret);
2201                 if (ret == -EAGAIN || ret == -ENOMEM)
2202                         ret = SCST_TGT_RES_QUEUE_FULL;
2203                 else
2204                         ret = SCST_TGT_RES_FATAL_ERROR;
2205                 goto out;
2206         }
2207
2208         ret = SCST_TGT_RES_SUCCESS;
2209
2210 out:
2211         return ret;
2212 }
2213
2214 /*
2215  * Called by the SCST core to inform ib_srpt that data reception should start.
2216  * Must not block.
2217  */
2218 static int srpt_rdy_to_xfer(struct scst_cmd *scmnd)
2219 {
2220         struct srpt_rdma_ch *ch;
2221         struct srpt_ioctx *ioctx;
2222
2223         ioctx = scst_cmd_get_tgt_priv(scmnd);
2224         BUG_ON(!ioctx);
2225
2226         ch = scst_sess_get_tgt_priv(scst_cmd_get_session(scmnd));
2227         BUG_ON(!ch);
2228
2229         if (ch->state == RDMA_CHANNEL_DISCONNECTING)
2230                 return SCST_TGT_RES_FATAL_ERROR;
2231         else if (ch->state == RDMA_CHANNEL_CONNECTING)
2232                 return SCST_TGT_RES_QUEUE_FULL;
2233
2234         ioctx->state = SRPT_STATE_NEED_DATA;
2235
2236         return srpt_xfer_data(ch, ioctx, scmnd);
2237 }
2238
2239 /*
2240  * Called by the SCST core. Transmits the response buffer and status held in
2241  * 'scmnd'. Must not block.
2242  */
2243 static int srpt_xmit_response(struct scst_cmd *scmnd)
2244 {
2245         struct srpt_rdma_ch *ch;
2246         struct srpt_ioctx *ioctx;
2247         struct srp_rsp *srp_rsp;
2248         u64 tag;
2249         int ret = SCST_TGT_RES_SUCCESS;
2250         int dir;
2251         int status;
2252
2253         ioctx = scst_cmd_get_tgt_priv(scmnd);
2254         BUG_ON(!ioctx);
2255
2256         ch = scst_sess_get_tgt_priv(scst_cmd_get_session(scmnd));
2257         BUG_ON(!ch);
2258
2259         tag = scst_cmd_get_tag(scmnd);
2260
2261         if (ch->state != RDMA_CHANNEL_LIVE) {
2262                 printk(KERN_ERR PFX
2263                        "%s: tag= %lld channel in bad state %d\n",
2264                        __func__, (unsigned long long)tag, ch->state);
2265
2266                 if (ch->state == RDMA_CHANNEL_DISCONNECTING)
2267                         ret = SCST_TGT_RES_FATAL_ERROR;
2268                 else if (ch->state == RDMA_CHANNEL_CONNECTING)
2269                         ret = SCST_TGT_RES_QUEUE_FULL;
2270
2271                 if (unlikely(scst_cmd_aborted(scmnd)))
2272                         goto out_aborted;
2273
2274                 goto out;
2275         }
2276
2277         dma_sync_single_for_cpu(ch->sport->sdev->device->dma_device, ioctx->dma,
2278                                 MAX_MESSAGE_SIZE, DMA_TO_DEVICE);
2279
2280         srp_rsp = ioctx->buf;
2281
2282         if (unlikely(scst_cmd_aborted(scmnd))) {
2283                 printk(KERN_ERR PFX
2284                        "%s: tag= %lld already get aborted\n",
2285                        __func__, (unsigned long long)tag);
2286                 goto out_aborted;
2287         }
2288
2289         dir = scst_cmd_get_data_direction(scmnd);
2290         status = scst_cmd_get_status(scmnd) & 0xff;
2291
2292         srpt_build_cmd_rsp(ch, ioctx, NO_SENSE, NO_ADD_SENSE, tag);
2293
2294         if (SCST_SENSE_VALID(scst_cmd_get_sense_buffer(scmnd))) {
2295                 srp_rsp->sense_data_len = scst_cmd_get_sense_buffer_len(scmnd);
2296                 if (srp_rsp->sense_data_len >
2297                     (MAX_MESSAGE_SIZE - sizeof *srp_rsp))
2298                         srp_rsp->sense_data_len =
2299                             MAX_MESSAGE_SIZE - sizeof *srp_rsp;
2300
2301                 memcpy((u8 *) (srp_rsp + 1), scst_cmd_get_sense_buffer(scmnd),
2302                        srp_rsp->sense_data_len);
2303
2304                 srp_rsp->sense_data_len = cpu_to_be32(srp_rsp->sense_data_len);
2305                 srp_rsp->flags |= SRP_RSP_FLAG_SNSVALID;
2306
2307                 if (!status)
2308                         status = SAM_STAT_CHECK_CONDITION;
2309         }
2310
2311         srp_rsp->status = status;
2312
2313         /* transfer read data if any */
2314         if (dir == SCST_DATA_READ && scst_cmd_get_resp_data_len(scmnd)) {
2315                 ret = srpt_xfer_data(ch, ioctx, scmnd);
2316                 if (ret != SCST_TGT_RES_SUCCESS) {
2317                         printk(KERN_ERR PFX
2318                                "%s: tag= %lld xfer_data failed\n",
2319                                __func__, (unsigned long long)tag);
2320                         goto out;
2321                 }
2322         }
2323
2324         ioctx->state = SRPT_STATE_PROCESSED;
2325
2326         if (srpt_post_send(ch, ioctx,
2327                            sizeof *srp_rsp +
2328                            be32_to_cpu(srp_rsp->sense_data_len))) {
2329                 printk(KERN_ERR PFX "%s: ch->state= %d tag= %lld\n",
2330                        __func__, ch->state,
2331                        (unsigned long long)tag);
2332                 ret = SCST_TGT_RES_FATAL_ERROR;
2333         }
2334
2335 out:
2336         return ret;
2337
2338 out_aborted:
2339         ret = SCST_TGT_RES_SUCCESS;
2340         scst_set_delivery_status(scmnd, SCST_CMD_DELIVERY_ABORTED);
2341         ioctx->state = SRPT_STATE_ABORTED;
2342         scst_tgt_cmd_done(scmnd, SCST_CONTEXT_SAME);
2343         goto out;
2344 }
2345
2346 /*
2347  * Called by the SCST core to inform ib_srpt that a received task management
2348  * function has been completed. Must not block.
2349  */
2350 static void srpt_tsk_mgmt_done(struct scst_mgmt_cmd *mcmnd)
2351 {
2352         struct srpt_rdma_ch *ch;
2353         struct srpt_mgmt_ioctx *mgmt_ioctx;
2354         struct srpt_ioctx *ioctx;
2355         int rsp_len;
2356
2357         mgmt_ioctx = scst_mgmt_cmd_get_tgt_priv(mcmnd);
2358         BUG_ON(!mgmt_ioctx);
2359
2360         ch = mgmt_ioctx->ch;
2361         BUG_ON(!ch);
2362
2363         ioctx = mgmt_ioctx->ioctx;
2364         BUG_ON(!ioctx);
2365
2366         TRACE_DBG("%s: tsk_mgmt_done for tag= %lld status=%d\n",
2367                   __func__, (unsigned long long)mgmt_ioctx->tag,
2368                   scst_mgmt_cmd_get_status(mcmnd));
2369
2370         rsp_len = srpt_build_tskmgmt_rsp(ch, ioctx,
2371                                          (scst_mgmt_cmd_get_status(mcmnd) ==
2372                                           SCST_MGMT_STATUS_SUCCESS) ?
2373                                          SRP_TSK_MGMT_SUCCESS :
2374                                          SRP_TSK_MGMT_FAILED,
2375                                          mgmt_ioctx->tag);
2376         srpt_post_send(ch, ioctx, rsp_len);
2377
2378         scst_mgmt_cmd_set_tgt_priv(mcmnd, NULL);
2379
2380         kfree(mgmt_ioctx);
2381 }
2382
2383 /*
2384  * Called by the SCST core to inform ib_srpt that the command 'scmnd' is about
2385  * to be freed. May be called in IRQ context.
2386  */
2387 static void srpt_on_free_cmd(struct scst_cmd *scmnd)
2388 {
2389         struct srpt_rdma_ch *ch;
2390         struct srpt_ioctx *ioctx;
2391
2392         ioctx = scst_cmd_get_tgt_priv(scmnd);
2393         BUG_ON(!ioctx);
2394
2395         ch = scst_sess_get_tgt_priv(scst_cmd_get_session(scmnd));
2396         BUG_ON(!ch);
2397
2398         spin_lock_irq(&ch->spinlock);
2399         list_del(&ioctx->scmnd_list);
2400         ch->active_scmnd_cnt--;
2401         spin_unlock_irq(&ch->spinlock);
2402
2403         srpt_reset_ioctx(ch, ioctx);
2404         scst_cmd_set_tgt_priv(scmnd, NULL);
2405 }
2406
2407 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20) && ! defined(BACKPORT_LINUX_WORKQUEUE_TO_2_6_19)
2408 /* A vanilla 2.6.19 or older kernel without backported OFED kernel headers. */
2409 static void srpt_refresh_port_work(void *ctx)
2410 #else
2411 static void srpt_refresh_port_work(struct work_struct *work)
2412 #endif
2413 {
2414 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20) && ! defined(BACKPORT_LINUX_WORKQUEUE_TO_2_6_19)
2415         struct srpt_port *sport = (struct srpt_port *)ctx;
2416 #else
2417         struct srpt_port *sport = container_of(work, struct srpt_port, work);
2418 #endif
2419
2420         srpt_refresh_port(sport);
2421 }
2422
2423 /*
2424  * Called by the SCST core to detect target adapters. Returns the number of
2425  * detected target adapters.
2426  */
2427 static int srpt_detect(struct scst_tgt_template *tp)
2428 {
2429         int device_count;
2430
2431         TRACE_ENTRY();
2432
2433         device_count = atomic_read(&srpt_device_count);
2434
2435         TRACE_EXIT_RES(device_count);
2436
2437         return device_count;
2438 }
2439
2440 /*
2441  * Callback function called by the SCST core from scst_unregister() to free up
2442  * the resources associated with device scst_tgt.
2443  */
2444 static int srpt_release(struct scst_tgt *scst_tgt)
2445 {
2446         struct srpt_device *sdev = scst_tgt_get_tgt_priv(scst_tgt);
2447         struct srpt_rdma_ch *ch, *tmp_ch;
2448
2449         TRACE_ENTRY();
2450
2451         BUG_ON(!scst_tgt);
2452 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 18)
2453         WARN_ON(!sdev);
2454         if (!sdev)
2455                 return -ENODEV;
2456 #else
2457         if (WARN_ON(!sdev))
2458                 return -ENODEV;
2459 #endif
2460
2461         srpt_unregister_procfs_entry(scst_tgt->tgtt);
2462
2463         spin_lock_irq(&sdev->spinlock);
2464         list_for_each_entry_safe(ch, tmp_ch, &sdev->rch_list, list) {
2465                 list_del(&ch->list);
2466                 spin_unlock_irq(&sdev->spinlock);
2467                 srpt_release_channel(ch, 1);
2468                 spin_lock_irq(&sdev->spinlock);
2469         }
2470         spin_unlock_irq(&sdev->spinlock);
2471
2472         srpt_unregister_mad_agent(sdev);
2473
2474         scst_tgt_set_tgt_priv(scst_tgt, NULL);
2475
2476         TRACE_EXIT();
2477
2478         return 0;
2479 }
2480
2481 /*
2482  * Entry point for ib_srpt's kernel thread. This kernel thread is only created
2483  * when the module parameter 'thread' is not zero (the default is zero).
2484  * This thread processes the ioctx list srpt_thread.thread_ioctx_list.
2485  *
2486  * @pre thread != 0
2487  */
2488 static int srpt_ioctx_thread(void *arg)
2489 {
2490         struct srpt_ioctx *ioctx;
2491
2492         /* Hibernation / freezing of the SRPT kernel thread is not supported. */
2493         current->flags |= PF_NOFREEZE;
2494
2495         spin_lock_irq(&srpt_thread.thread_lock);
2496         while (!kthread_should_stop()) {
2497                 wait_queue_t wait;
2498                 init_waitqueue_entry(&wait, current);
2499
2500                 if (!srpt_test_ioctx_list()) {
2501                         add_wait_queue_exclusive(&ioctx_list_waitQ, &wait);
2502
2503                         for (;;) {
2504                                 set_current_state(TASK_INTERRUPTIBLE);
2505                                 if (srpt_test_ioctx_list())
2506                                         break;
2507                                 spin_unlock_irq(&srpt_thread.thread_lock);
2508                                 schedule();
2509                                 spin_lock_irq(&srpt_thread.thread_lock);
2510                         }
2511                         set_current_state(TASK_RUNNING);
2512                         remove_wait_queue(&ioctx_list_waitQ, &wait);
2513                 }
2514
2515                 while (!list_empty(&srpt_thread.thread_ioctx_list)) {
2516                         ioctx = list_entry(srpt_thread.thread_ioctx_list.next,
2517                                            struct srpt_ioctx, comp_list);
2518
2519                         list_del(&ioctx->comp_list);
2520
2521                         spin_unlock_irq(&srpt_thread.thread_lock);
2522                         switch (ioctx->op) {
2523                         case IB_WC_SEND:
2524                                 srpt_handle_send_comp(ioctx->ch, ioctx,
2525                                         SCST_CONTEXT_DIRECT);
2526                                 break;
2527                         case IB_WC_RDMA_WRITE:
2528                         case IB_WC_RDMA_READ:
2529                                 srpt_handle_rdma_comp(ioctx->ch, ioctx);
2530                                 break;
2531                         case IB_WC_RECV:
2532                                 srpt_handle_new_iu(ioctx->ch, ioctx);
2533                                 break;
2534                         default:
2535                                 break;
2536                         }
2537                         spin_lock_irq(&srpt_thread.thread_lock);
2538                 }
2539         }
2540         spin_unlock_irq(&srpt_thread.thread_lock);
2541
2542         return 0;
2543 }
2544
2545 /* SCST target template for the SRP target implementation. */
2546 static struct scst_tgt_template srpt_template = {
2547         .name = DRV_NAME,
2548         .sg_tablesize = SRPT_DEF_SG_TABLESIZE,
2549         .xmit_response_atomic = 1,
2550         .rdy_to_xfer_atomic = 1,
2551         .no_proc_entry = 0,
2552         .detect = srpt_detect,
2553         .release = srpt_release,
2554         .xmit_response = srpt_xmit_response,
2555         .rdy_to_xfer = srpt_rdy_to_xfer,
2556         .on_free_cmd = srpt_on_free_cmd,
2557         .task_mgmt_fn_done = srpt_tsk_mgmt_done
2558 };
2559
2560 /*
2561  * The callback function srpt_release_class_dev() is called whenever a
2562  * device is removed from the /sys/class/infiniband_srpt device class.
2563  */
2564 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
2565 static void srpt_release_class_dev(struct class_device *class_dev)
2566 #else
2567 static void srpt_release_class_dev(struct device *dev)
2568 #endif
2569 {
2570 }
2571
2572 #if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
2573 static int srpt_trace_level_show(struct seq_file *seq, void *v)
2574 {
2575         return scst_proc_log_entry_read(seq, trace_flag, NULL);
2576 }
2577
2578 static ssize_t srpt_proc_trace_level_write(struct file *file,
2579         const char __user *buf, size_t length, loff_t *off)
2580 {
2581         return scst_proc_log_entry_write(file, buf, length, &trace_flag,
2582                 DEFAULT_SRPT_TRACE_FLAGS, NULL);
2583 }
2584
2585 static struct scst_proc_data srpt_log_proc_data = {
2586         SCST_DEF_RW_SEQ_OP(srpt_proc_trace_level_write)
2587         .show = srpt_trace_level_show,
2588 };
2589 #endif
2590
2591 static struct class_attribute srpt_class_attrs[] = {
2592         __ATTR_NULL,
2593 };
2594
2595 static struct class srpt_class = {
2596         .name = "infiniband_srpt",
2597 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
2598         .release = srpt_release_class_dev,
2599 #else
2600         .dev_release = srpt_release_class_dev,
2601 #endif
2602         .class_attrs = srpt_class_attrs,
2603 };
2604
2605 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
2606 static ssize_t show_login_info(struct class_device *class_dev, char *buf)
2607 #else
2608 static ssize_t show_login_info(struct device *dev,
2609                                struct device_attribute *attr, char *buf)
2610 #endif
2611 {
2612         struct srpt_device *sdev =
2613 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
2614                 container_of(class_dev, struct srpt_device, class_dev);
2615 #else
2616                 container_of(dev, struct srpt_device, dev);
2617 #endif
2618         struct srpt_port *sport;
2619         u64 ioc_guid;
2620         int i;
2621         int len = 0;
2622
2623         for (i = 0; i < sdev->device->phys_port_cnt; i++) {
2624                 sport = &sdev->port[i];
2625
2626                 ioc_guid = srpt_get_ioc_guid(sdev->device);
2627                 len += sprintf(buf + len,
2628                                "tid_ext=%016llx,ioc_guid=%016llx,pkey=ffff,"
2629                                "dgid=%04x%04x%04x%04x%04x%04x%04x%04x,"
2630                                "service_id=%016llx\n",
2631                                (unsigned long long) ioc_guid,
2632                                (unsigned long long) ioc_guid,
2633                                be16_to_cpu(((__be16 *) sport->gid.raw)[0]),
2634                                be16_to_cpu(((__be16 *) sport->gid.raw)[1]),
2635                                be16_to_cpu(((__be16 *) sport->gid.raw)[2]),
2636                                be16_to_cpu(((__be16 *) sport->gid.raw)[3]),
2637                                be16_to_cpu(((__be16 *) sport->gid.raw)[4]),
2638                                be16_to_cpu(((__be16 *) sport->gid.raw)[5]),
2639                                be16_to_cpu(((__be16 *) sport->gid.raw)[6]),
2640                                be16_to_cpu(((__be16 *) sport->gid.raw)[7]),
2641                                (unsigned long long) ioc_guid);
2642         }
2643
2644         return len;
2645 }
2646
2647 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
2648 static CLASS_DEVICE_ATTR(login_info, S_IRUGO, show_login_info, NULL);
2649 #else
2650 static DEVICE_ATTR(login_info, S_IRUGO, show_login_info, NULL);
2651 #endif
2652
2653 /*
2654  * Callback function called by the InfiniBand core when either an InfiniBand
2655  * device has been added or during the ib_register_client() call for each
2656  * registered InfiniBand device.
2657  */
2658 static void srpt_add_one(struct ib_device *device)
2659 {
2660         struct srpt_device *sdev;
2661         struct srpt_port *sport;
2662         struct ib_srq_init_attr srq_attr;
2663         int i;
2664
2665         TRACE_ENTRY();
2666
2667         sdev = kzalloc(sizeof *sdev, GFP_KERNEL);
2668         if (!sdev)
2669                 return;
2670
2671         sdev->device = device;
2672
2673 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
2674         sdev->class_dev.class = &srpt_class;
2675         sdev->class_dev.dev = device->dma_device;
2676         snprintf(sdev->class_dev.class_id, BUS_ID_SIZE,
2677                  "srpt-%s", device->name);
2678 #else
2679         sdev->dev.class = &srpt_class;
2680         sdev->dev.parent = device->dma_device;
2681 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 30)
2682         snprintf(sdev->dev.bus_id, BUS_ID_SIZE, "srpt-%s", device->name);
2683 #else
2684         snprintf(sdev->init_name, sizeof(sdev->init_name),
2685                  "srpt-%s", device->name);
2686         sdev->dev.init_name = sdev->init_name;
2687 #endif
2688 #endif
2689
2690 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
2691         if (class_device_register(&sdev->class_dev))
2692                 goto free_dev;
2693         if (class_device_create_file(&sdev->class_dev,
2694                                      &class_device_attr_login_info))
2695                 goto err_dev;
2696 #else
2697         if (device_register(&sdev->dev))
2698                 goto free_dev;
2699         if (device_create_file(&sdev->dev, &dev_attr_login_info))
2700                 goto err_dev;
2701 #endif
2702
2703         if (ib_query_device(device, &sdev->dev_attr))
2704                 goto err_dev;
2705
2706         sdev->pd = ib_alloc_pd(device);
2707         if (IS_ERR(sdev->pd))
2708                 goto err_dev;
2709
2710         sdev->mr = ib_get_dma_mr(sdev->pd, IB_ACCESS_LOCAL_WRITE);
2711         if (IS_ERR(sdev->mr))
2712                 goto err_pd;
2713
2714         srq_attr.event_handler = srpt_srq_event;
2715         srq_attr.srq_context = (void *)sdev;
2716         srq_attr.attr.max_wr = min(SRPT_SRQ_SIZE, sdev->dev_attr.max_srq_wr);
2717         srq_attr.attr.max_sge = 1;
2718         srq_attr.attr.srq_limit = 0;
2719
2720         sdev->srq = ib_create_srq(sdev->pd, &srq_attr);
2721         if (IS_ERR(sdev->srq))
2722                 goto err_mr;
2723
2724         TRACE_DBG("%s: create SRQ #wr= %d max_allow=%d dev= %s",
2725                __func__, srq_attr.attr.max_wr,
2726               sdev->dev_attr.max_srq_wr, device->name);
2727
2728         if (!global_ioc_guid)
2729                 global_ioc_guid = be64_to_cpu(device->node_guid);
2730
2731         sdev->cm_id = ib_create_cm_id(device, srpt_cm_handler, sdev);
2732         if (IS_ERR(sdev->cm_id))
2733                 goto err_srq;
2734
2735         /* print out target login information */
2736         TRACE_DBG("Target login info: id_ext=%016llx,"
2737                   "ioc_guid=%016llx,pkey=ffff,service_id=%016llx",
2738                   (unsigned long long) srpt_get_ioc_guid(sdev->device),
2739                   (unsigned long long) srpt_get_ioc_guid(sdev->device),
2740                   (unsigned long long) srpt_get_ioc_guid(sdev->device));
2741
2742         /*
2743          * We do not have a consistent service_id (ie. also id_ext of target_id)
2744          * to identify this target. We currently use the guid of the first HCA
2745          * in the system as service_id; therefore, the target_id will change
2746          * if this HCA is gone bad and replaced by different HCA
2747          */
2748         if (ib_cm_listen(sdev->cm_id, cpu_to_be64(global_ioc_guid), 0, NULL))
2749                 goto err_cm;
2750
2751         INIT_IB_EVENT_HANDLER(&sdev->event_handler, sdev->device,
2752                               srpt_event_handler);
2753         if (ib_register_event_handler(&sdev->event_handler))
2754                 goto err_cm;
2755
2756         if (srpt_alloc_ioctx_ring(sdev))
2757                 goto err_event;
2758
2759         INIT_LIST_HEAD(&sdev->rch_list);
2760         spin_lock_init(&sdev->spinlock);
2761
2762         for (i = 0; i < SRPT_SRQ_SIZE; ++i)
2763                 srpt_post_recv(sdev, sdev->ioctx_ring[i]);
2764
2765         ib_set_client_data(device, &srpt_client, sdev);
2766
2767         sdev->scst_tgt = scst_register(&srpt_template, NULL);
2768         if (!sdev->scst_tgt) {
2769                 printk(KERN_ERR PFX "SCST registration failed for %s.\n",
2770                         sdev->device->name);
2771                 goto err_ring;
2772         }
2773
2774         scst_tgt_set_tgt_priv(sdev->scst_tgt, sdev);
2775
2776         for (i = 1; i <= sdev->device->phys_port_cnt; i++) {
2777                 sport = &sdev->port[i - 1];
2778                 sport->sdev = sdev;
2779                 sport->port = i;
2780 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20) && ! defined(BACKPORT_LINUX_WORKQUEUE_TO_2_6_19)
2781                 /*
2782                  * A vanilla 2.6.19 or older kernel without backported OFED
2783                  * kernel headers.
2784                  */
2785                 INIT_WORK(&sport->work, srpt_refresh_port_work, sport);
2786 #else
2787                 INIT_WORK(&sport->work, srpt_refresh_port_work);
2788 #endif
2789                 if (srpt_refresh_port(sport)) {
2790                         printk(KERN_ERR PFX "MAD registration failed"
2791                                " for %s-%d.\n", sdev->device->name, i);
2792                         goto err_refresh_port;
2793                 }
2794         }
2795
2796         atomic_inc(&srpt_device_count);
2797
2798         TRACE_EXIT();
2799
2800         return;
2801
2802 err_refresh_port:
2803         scst_unregister(sdev->scst_tgt);
2804 err_ring:
2805         ib_set_client_data(device, &srpt_client, NULL);
2806         srpt_free_ioctx_ring(sdev);
2807 err_event:
2808         ib_unregister_event_handler(&sdev->event_handler);
2809 err_cm:
2810         ib_destroy_cm_id(sdev->cm_id);
2811 err_srq:
2812         ib_destroy_srq(sdev->srq);
2813 err_mr:
2814         ib_dereg_mr(sdev->mr);
2815 err_pd:
2816         ib_dealloc_pd(sdev->pd);
2817 err_dev:
2818 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
2819         class_device_unregister(&sdev->class_dev);
2820 #else
2821         device_unregister(&sdev->dev);
2822 #endif
2823 free_dev:
2824         kfree(sdev);
2825
2826         TRACE_EXIT();
2827 }
2828
2829 /*
2830  * Callback function called by the InfiniBand core when either an InfiniBand
2831  * device has been removed or during the ib_unregister_client() call for each
2832  * registered InfiniBand device.
2833  */
2834 static void srpt_remove_one(struct ib_device *device)
2835 {
2836         int i;
2837         struct srpt_device *sdev;
2838
2839         TRACE_ENTRY();
2840
2841         sdev = ib_get_client_data(device, &srpt_client);
2842 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 18)
2843         WARN_ON(!sdev);
2844         if (!sdev)
2845                 return;
2846 #else
2847         if (WARN_ON(!sdev))
2848                 return;
2849 #endif
2850
2851         /*
2852          * Cancel the work if it is queued. Wait until srpt_refresh_port_work()
2853          * finished if it is running.
2854          */
2855         for (i = 0; i < sdev->device->phys_port_cnt; i++)
2856 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
2857                 cancel_work_sync(&sdev->port[i].work);
2858 #else
2859                 /*
2860                  * cancel_work_sync() was introduced in kernel 2.6.22. Older
2861                  * kernels do not have a facility to cancel scheduled work.
2862                  */
2863                 printk(KERN_ERR PFX
2864                        "your kernel does not provide cancel_work_sync().\n");
2865 #endif
2866
2867         scst_unregister(sdev->scst_tgt);
2868         sdev->scst_tgt = NULL;
2869
2870         ib_unregister_event_handler(&sdev->event_handler);
2871         ib_destroy_cm_id(sdev->cm_id);
2872         ib_destroy_srq(sdev->srq);
2873         ib_dereg_mr(sdev->mr);
2874         ib_dealloc_pd(sdev->pd);
2875 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
2876         class_device_unregister(&sdev->class_dev);
2877 #else
2878         device_unregister(&sdev->dev);
2879 #endif
2880
2881         srpt_free_ioctx_ring(sdev);
2882         kfree(sdev);
2883
2884         TRACE_EXIT();
2885 }
2886
2887 /**
2888  * Create procfs entries for srpt. Currently the only procfs entry created
2889  * by this function is the "trace_level" entry.
2890  */
2891 static int srpt_register_procfs_entry(struct scst_tgt_template *tgt)
2892 {
2893         int res = 0;
2894 #if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
2895         struct proc_dir_entry *p, *root;
2896
2897         root = scst_proc_get_tgt_root(tgt);
2898         WARN_ON(!root);
2899         if (root) {
2900                 /*
2901                  * Fill in the scst_proc_data::data pointer, which is used in
2902                  * a printk(KERN_INFO ...) statement in
2903                  * scst_proc_log_entry_write() in scst_proc.c.
2904                  */
2905                 srpt_log_proc_data.data = (char *)tgt->name;
2906                 p = scst_create_proc_entry(root, SRPT_PROC_TRACE_LEVEL_NAME,
2907                                            &srpt_log_proc_data);
2908                 if (!p)
2909                         res = -ENOMEM;
2910         } else
2911                 res = -ENOMEM;
2912
2913 #endif
2914         return res;
2915 }
2916
2917 static void srpt_unregister_procfs_entry(struct scst_tgt_template *tgt)
2918 {
2919 #if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
2920         struct proc_dir_entry *root;
2921
2922         root = scst_proc_get_tgt_root(tgt);
2923         WARN_ON(!root);
2924         if (root)
2925                 remove_proc_entry(SRPT_PROC_TRACE_LEVEL_NAME, root);
2926 #endif
2927 }
2928
2929 /*
2930  * Module initialization.
2931  *
2932  * Note: since ib_register_client() registers callback functions, and since at
2933  * least one of these callback functions (srpt_add_one()) calls SCST functions,
2934  * the SCST target template must be registered before ib_register_client() is
2935  * called.
2936  */
2937 static int __init srpt_init_module(void)
2938 {
2939         int ret;
2940
2941         ret = class_register(&srpt_class);
2942         if (ret) {
2943                 printk(KERN_ERR PFX "couldn't register class ib_srpt\n");
2944                 goto out;
2945         }
2946
2947         ret = scst_register_target_template(&srpt_template);
2948         if (ret < 0) {
2949                 printk(KERN_ERR PFX "couldn't register with scst\n");
2950                 ret = -ENODEV;
2951                 goto out_unregister_class;
2952         }
2953
2954         ret = srpt_register_procfs_entry(&srpt_template);
2955         if (ret) {
2956                 printk(KERN_ERR PFX "couldn't register procfs entry\n");
2957                 goto out_unregister_target;
2958         }
2959
2960         ret = ib_register_client(&srpt_client);
2961         if (ret) {
2962                 printk(KERN_ERR PFX "couldn't register IB client\n");
2963                 goto out_unregister_target;
2964         }
2965
2966         if (thread) {
2967                 spin_lock_init(&srpt_thread.thread_lock);
2968                 INIT_LIST_HEAD(&srpt_thread.thread_ioctx_list);
2969                 srpt_thread.thread = kthread_run(srpt_ioctx_thread,
2970                                                  NULL, "srpt_thread");
2971                 if (IS_ERR(srpt_thread.thread)) {
2972                         srpt_thread.thread = NULL;
2973                         thread = 0;
2974                 }
2975         }
2976
2977         return 0;
2978
2979 out_unregister_target:
2980         /*
2981          * Note: the procfs entry is unregistered in srpt_release(), which is
2982          * called by scst_unregister_target_template().
2983          */
2984         scst_unregister_target_template(&srpt_template);
2985 out_unregister_class:
2986         class_unregister(&srpt_class);
2987 out:
2988         return ret;
2989 }
2990
2991 static void __exit srpt_cleanup_module(void)
2992 {
2993         TRACE_ENTRY();
2994
2995         if (srpt_thread.thread)
2996                 kthread_stop(srpt_thread.thread);
2997         ib_unregister_client(&srpt_client);
2998         scst_unregister_target_template(&srpt_template);
2999         class_unregister(&srpt_class);
3000
3001         TRACE_EXIT();
3002 }
3003
3004 module_init(srpt_init_module);
3005 module_exit(srpt_cleanup_module);