9b528cdae5fd2cfd63dc4491670c8d96bb855da9
[mirror/scst/.git] / srpt / src / ib_srpt.c
1 /*
2  * Copyright (c) 2006 - 2009 Mellanox Technology Inc.  All rights reserved.
3  * Copyright (C) 2008 Vladislav Bolkhovitin <vst@vlnb.net>
4  * Copyright (C) 2008 - 2009 Bart Van Assche <bart.vanassche@gmail.com>
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  *
34  */
35
36 #include <linux/module.h>
37 #include <linux/init.h>
38 #include <linux/slab.h>
39 #include <linux/err.h>
40 #include <linux/ctype.h>
41 #include <linux/string.h>
42 #include <linux/kthread.h>
43 #include <asm/atomic.h>
44 #if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
45 #include <linux/proc_fs.h>
46 #include <linux/seq_file.h>
47 #endif
48 #include "ib_srpt.h"
49 #include "scst_debug.h"
50
51 /* Name of this kernel module. */
52 #define DRV_NAME                "ib_srpt"
53 /* Prefix for printk() kernel messages. */
54 #define PFX                     DRV_NAME ": "
55 #define DRV_VERSION             "1.0.1"
56 #define DRV_RELDATE             "July 10, 2008"
57 #if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
58 /* Flags to be used in SCST debug tracing statements. */
59 #define DEFAULT_SRPT_TRACE_FLAGS (TRACE_OUT_OF_MEM | TRACE_MINOR \
60                                   | TRACE_MGMT | TRACE_SPECIAL)
61 /* Name of the entry that will be created under /proc/scsi_tgt/ib_srpt. */
62 #define SRPT_PROC_TRACE_LEVEL_NAME      "trace_level"
63 #endif
64
65 #define MELLANOX_SRPT_ID_STRING "Mellanox OFED SRP target"
66
67 MODULE_AUTHOR("Vu Pham");
68 MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol target "
69                    "v" DRV_VERSION " (" DRV_RELDATE ")");
70 MODULE_LICENSE("Dual BSD/GPL");
71
72 struct srpt_thread {
73         /* Protects thread_ioctx_list. */
74         spinlock_t thread_lock;
75         /* I/O contexts to be processed by the kernel thread. */
76         struct list_head thread_ioctx_list;
77         /* SRPT kernel thread. */
78         struct task_struct *thread;
79 };
80
81 /*
82  * Global Variables
83  */
84
85 static u64 global_ioc_guid;
86 /* List of srpt_device structures. */
87 static atomic_t srpt_device_count;
88 static int thread;
89 static int one_guid_per_ioc;
90 static struct srpt_thread srpt_thread;
91 static DECLARE_WAIT_QUEUE_HEAD(ioctx_list_waitQ);
92 #if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
93 static unsigned long trace_flag = DEFAULT_SRPT_TRACE_FLAGS;
94 module_param(trace_flag, long, 0644);
95 MODULE_PARM_DESC(trace_flag,
96                  "Trace flags for the ib_srpt kernel module.");
97 #endif
98
99 module_param(thread, int, 0444);
100 MODULE_PARM_DESC(thread,
101                  "Executing ioctx in thread context. Default 0, i.e. soft IRQ, "
102                  "where possible");
103 module_param(one_guid_per_ioc, bool, 0444);
104 MODULE_PARM_DESC(one_guid_per_ioc,
105                  "Assign a unique GUID to each IOC instead of using one GUID "
106                  "for all IOCs.");
107
108 static void srpt_add_one(struct ib_device *device);
109 static void srpt_remove_one(struct ib_device *device);
110 static void srpt_unregister_mad_agent(struct srpt_device *sdev);
111 static void srpt_unregister_procfs_entry(struct scst_tgt_template *tgt);
112
113 static struct ib_client srpt_client = {
114         .name = DRV_NAME,
115         .add = srpt_add_one,
116         .remove = srpt_remove_one
117 };
118
119 /**
120  * Atomically test and set the channel state.
121  * @ch: RDMA channel.
122  * @old: channel state to compare with.
123  * @new: state to change the channel state to if the current state matches the
124  *       argument 'old'.
125  *
126  * Returns true if the channel state matched old upon entry of this function,
127  * and false otherwise.
128  */
129 static bool srpt_test_and_set_channel_state(struct srpt_rdma_ch *ch,
130                                             enum rdma_ch_state old,
131                                             enum rdma_ch_state new)
132 {
133         unsigned long flags;
134         enum rdma_ch_state cur;
135
136         spin_lock_irqsave(&ch->spinlock, flags);
137         cur = ch->state;
138         if (cur == old)
139                 ch->state = new;
140         spin_unlock_irqrestore(&ch->spinlock, flags);
141
142         return cur == old;
143 }
144
145 /*
146  * Callback function called by the InfiniBand core when an asynchronous IB
147  * event occurs. This callback may occur in interrupt context. See also
148  * section 11.5.2, Set Asynchronous Event Handler in the InfiniBand
149  * Architecture Specification.
150  */
151 static void srpt_event_handler(struct ib_event_handler *handler,
152                                struct ib_event *event)
153 {
154         struct srpt_device *sdev;
155         struct srpt_port *sport;
156
157         sdev = ib_get_client_data(event->device, &srpt_client);
158         if (!sdev || sdev->device != event->device)
159                 return;
160
161         TRACE_DBG("ASYNC event= %d on device= %s",
162                   event->event, sdev->device->name);
163
164         switch (event->event) {
165         case IB_EVENT_PORT_ERR:
166                 if (event->element.port_num <= sdev->device->phys_port_cnt) {
167                         sport = &sdev->port[event->element.port_num - 1];
168                         sport->lid = 0;
169                         sport->sm_lid = 0;
170                 }
171                 break;
172         case IB_EVENT_PORT_ACTIVE:
173         case IB_EVENT_LID_CHANGE:
174         case IB_EVENT_PKEY_CHANGE:
175         case IB_EVENT_SM_CHANGE:
176         case IB_EVENT_CLIENT_REREGISTER:
177                 /*
178                  * Refresh port data asynchronously. Note: it is safe to call
179                  * schedule_work() even if &sport->work is already on the
180                  * global workqueue because schedule_work() tests for the
181                  * work_pending() condition before adding &sport->work to the
182                  * global work queue.
183                  */
184                 if (event->element.port_num <= sdev->device->phys_port_cnt) {
185                         sport = &sdev->port[event->element.port_num - 1];
186                         if (!sport->lid && !sport->sm_lid)
187                                 schedule_work(&sport->work);
188                 }
189                 break;
190         default:
191                 break;
192         }
193
194 }
195
196 /*
197  * Callback function called by the InfiniBand core for SRQ (shared receive
198  * queue) events.
199  */
200 static void srpt_srq_event(struct ib_event *event, void *ctx)
201 {
202         TRACE_DBG("SRQ event %d", event->event);
203 }
204
205 /*
206  * Callback function called by the InfiniBand core for QP (queue pair) events.
207  */
208 static void srpt_qp_event(struct ib_event *event, void *ctx)
209 {
210         struct srpt_rdma_ch *ch = ctx;
211
212         TRACE_DBG("QP event %d on cm_id=%p sess_name=%s state=%d",
213                   event->event, ch->cm_id, ch->sess_name, ch->state);
214
215         switch (event->event) {
216         case IB_EVENT_COMM_EST:
217 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20) || defined(BACKPORT_LINUX_WORKQUEUE_TO_2_6_19)
218                 ib_cm_notify(ch->cm_id, event->event);
219 #else
220                 /* Vanilla 2.6.19 kernel (or before) without OFED. */
221                 printk(KERN_ERR PFX "how to perform ib_cm_notify() on a"
222                         " vanilla 2.6.18 kernel ???\n");
223 #endif
224                 break;
225         case IB_EVENT_QP_LAST_WQE_REACHED:
226                 if (srpt_test_and_set_channel_state(ch, RDMA_CHANNEL_LIVE,
227                                         RDMA_CHANNEL_DISCONNECTING)) {
228                         TRACE_DBG("%s", "Disconnecting channel.");
229                         ib_send_cm_dreq(ch->cm_id, NULL, 0);
230                 }
231                 break;
232         default:
233                 break;
234         }
235 }
236
237 /*
238  * Helper function for filling in an InfiniBand IOUnitInfo structure. Copies
239  * the lowest four bits of value in element slot of the array of four bit
240  * elements called c_list (controller list). The index slot is one-based.
241  *
242  * @pre 1 <= slot && 0 <= value && value < 16
243  */
244 static void srpt_set_ioc(u8 *c_list, u32 slot, u8 value)
245 {
246         u16 id;
247         u8 tmp;
248
249         id = (slot - 1) / 2;
250         if (slot & 0x1) {
251                 tmp = c_list[id] & 0xf;
252                 c_list[id] = (value << 4) | tmp;
253         } else {
254                 tmp = c_list[id] & 0xf0;
255                 c_list[id] = (value & 0xf) | tmp;
256         }
257 }
258
259 /*
260  * Write InfiniBand ClassPortInfo to mad. See also section 16.3.3.1
261  * ClassPortInfo in the InfiniBand Architecture Specification.
262  */
263 static void srpt_get_class_port_info(struct ib_dm_mad *mad)
264 {
265         struct ib_class_port_info *cif;
266
267         cif = (struct ib_class_port_info *)mad->data;
268         memset(cif, 0, sizeof *cif);
269         cif->base_version = 1;
270         cif->class_version = 1;
271         cif->resp_time_value = 20;
272
273         mad->mad_hdr.status = 0;
274 }
275
276 /*
277  * Write IOUnitInfo to mad. See also section 16.3.3.3 IOUnitInfo in the
278  * InfiniBand Architecture Specification. See also section B.7,
279  * table B.6 in the T10 SRP r16a document.
280  */
281 static void srpt_get_iou(struct ib_dm_mad *mad)
282 {
283         struct ib_dm_iou_info *ioui;
284         u8 slot;
285         int i;
286
287         ioui = (struct ib_dm_iou_info *)mad->data;
288         ioui->change_id = 1;
289         ioui->max_controllers = 16;
290
291         /* set present for slot 1 and empty for the rest */
292         srpt_set_ioc(ioui->controller_list, 1, 1);
293         for (i = 1, slot = 2; i < 16; i++, slot++)
294                 srpt_set_ioc(ioui->controller_list, slot, 0);
295
296         mad->mad_hdr.status = 0;
297 }
298
299 /**
300  * Return the GUID that will be communicated to the initiator for identifying
301  * the SRPT target. Depending on the mode variable one_guid_per_ioc, either the
302  * GUID of the specified IOC is returned or the GUID of the first IOC.
303  */
304 static u64 srpt_get_ioc_guid(struct ib_device *device)
305 {
306         BUG_ON(!device);
307         WARN_ON(!global_ioc_guid);
308         WARN_ON(!device->node_guid);
309
310         return one_guid_per_ioc ? be64_to_cpu(device->node_guid)
311                 : global_ioc_guid;
312 }
313
314 /*
315  * Write IOControllerprofile to mad for I/O controller (sdev, slot). See also
316  * section 16.3.3.4 IOControllerProfile in the InfiniBand Architecture
317  * Specification. See also section B.7, table B.7 in the T10 SRP r16a
318  * document.
319  */
320 static void srpt_get_ioc(struct srpt_device *sdev, u32 slot,
321                          struct ib_dm_mad *mad)
322 {
323         struct ib_dm_ioc_profile *iocp;
324
325         iocp = (struct ib_dm_ioc_profile *)mad->data;
326
327         if (!slot || slot > 16) {
328                 mad->mad_hdr.status = cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD);
329                 return;
330         }
331
332         if (slot > 2) {
333                 mad->mad_hdr.status = cpu_to_be16(DM_MAD_STATUS_NO_IOC);
334                 return;
335         }
336
337         memset(iocp, 0, sizeof *iocp);
338         strcpy(iocp->id_string, MELLANOX_SRPT_ID_STRING);
339         iocp->guid = cpu_to_be64(srpt_get_ioc_guid(sdev->device));
340         iocp->vendor_id = cpu_to_be32(sdev->dev_attr.vendor_id);
341         iocp->device_id = cpu_to_be32(sdev->dev_attr.vendor_part_id);
342         iocp->device_version = cpu_to_be16(sdev->dev_attr.hw_ver);
343         iocp->subsys_vendor_id = cpu_to_be32(sdev->dev_attr.vendor_id);
344         iocp->subsys_device_id = 0x0;
345         iocp->io_class = cpu_to_be16(SRP_REV16A_IB_IO_CLASS);
346         iocp->io_subclass = cpu_to_be16(SRP_IO_SUBCLASS);
347         iocp->protocol = cpu_to_be16(SRP_PROTOCOL);
348         iocp->protocol_version = cpu_to_be16(SRP_PROTOCOL_VERSION);
349         iocp->send_queue_depth = cpu_to_be16(SRPT_SRQ_SIZE);
350         iocp->rdma_read_depth = 4;
351         iocp->send_size = cpu_to_be32(MAX_MESSAGE_SIZE);
352         iocp->rdma_size = cpu_to_be32(MAX_RDMA_SIZE);
353         iocp->num_svc_entries = 1;
354         iocp->op_cap_mask = SRP_SEND_TO_IOC | SRP_SEND_FROM_IOC |
355                 SRP_RDMA_READ_FROM_IOC | SRP_RDMA_WRITE_FROM_IOC;
356
357         mad->mad_hdr.status = 0;
358 }
359
360 /*
361  * Device management: write ServiceEntries to mad for the given slot. See also
362  * section 16.3.3.5 ServiceEntries in the InfiniBand Architecture
363  * Specification. See also section B.7, table B.8 in the T10 SRP r16a document.
364  */
365 static void srpt_get_svc_entries(u64 ioc_guid,
366                                  u16 slot, u8 hi, u8 lo, struct ib_dm_mad *mad)
367 {
368         struct ib_dm_svc_entries *svc_entries;
369
370         WARN_ON(!ioc_guid);
371
372         if (!slot || slot > 16) {
373                 mad->mad_hdr.status = cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD);
374                 return;
375         }
376
377         if (slot > 2 || lo > hi || hi > 1) {
378                 mad->mad_hdr.status = cpu_to_be16(DM_MAD_STATUS_NO_IOC);
379                 return;
380         }
381
382         svc_entries = (struct ib_dm_svc_entries *)mad->data;
383         memset(svc_entries, 0, sizeof *svc_entries);
384         svc_entries->service_entries[0].id = cpu_to_be64(ioc_guid);
385         snprintf(svc_entries->service_entries[0].name,
386                  sizeof(svc_entries->service_entries[0].name),
387                  "%s%016llx",
388                  SRP_SERVICE_NAME_PREFIX,
389                  (unsigned long long)ioc_guid);
390
391         mad->mad_hdr.status = 0;
392 }
393
394 /*
395  * Actual processing of a received MAD *rq_mad received through source port *sp
396  * (MAD = InfiniBand management datagram). The response to be sent back is
397  * written to *rsp_mad.
398  */
399 static void srpt_mgmt_method_get(struct srpt_port *sp, struct ib_mad *rq_mad,
400                                  struct ib_dm_mad *rsp_mad)
401 {
402         u16 attr_id;
403         u32 slot;
404         u8 hi, lo;
405
406         attr_id = be16_to_cpu(rq_mad->mad_hdr.attr_id);
407         switch (attr_id) {
408         case DM_ATTR_CLASS_PORT_INFO:
409                 srpt_get_class_port_info(rsp_mad);
410                 break;
411         case DM_ATTR_IOU_INFO:
412                 srpt_get_iou(rsp_mad);
413                 break;
414         case DM_ATTR_IOC_PROFILE:
415                 slot = be32_to_cpu(rq_mad->mad_hdr.attr_mod);
416                 srpt_get_ioc(sp->sdev, slot, rsp_mad);
417                 break;
418         case DM_ATTR_SVC_ENTRIES:
419                 slot = be32_to_cpu(rq_mad->mad_hdr.attr_mod);
420                 hi = (u8) ((slot >> 8) & 0xff);
421                 lo = (u8) (slot & 0xff);
422                 slot = (u16) ((slot >> 16) & 0xffff);
423                 srpt_get_svc_entries(srpt_get_ioc_guid(sp->sdev->device),
424                                      slot, hi, lo, rsp_mad);
425                 break;
426         default:
427                 rsp_mad->mad_hdr.status =
428                     cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR);
429                 break;
430         }
431 }
432
433 /*
434  * Callback function that is called by the InfiniBand core after transmission of
435  * a MAD. (MAD = management datagram; AH = address handle.)
436  */
437 static void srpt_mad_send_handler(struct ib_mad_agent *mad_agent,
438                                   struct ib_mad_send_wc *mad_wc)
439 {
440         ib_destroy_ah(mad_wc->send_buf->ah);
441         ib_free_send_mad(mad_wc->send_buf);
442 }
443
444 /*
445  * Callback function that is called by the InfiniBand core after reception of
446  * a MAD (management datagram).
447  */
448 static void srpt_mad_recv_handler(struct ib_mad_agent *mad_agent,
449                                   struct ib_mad_recv_wc *mad_wc)
450 {
451         struct srpt_port *sport = (struct srpt_port *)mad_agent->context;
452         struct ib_ah *ah;
453         struct ib_mad_send_buf *rsp;
454         struct ib_dm_mad *dm_mad;
455
456         if (!mad_wc || !mad_wc->recv_buf.mad)
457                 return;
458
459         ah = ib_create_ah_from_wc(mad_agent->qp->pd, mad_wc->wc,
460                                   mad_wc->recv_buf.grh, mad_agent->port_num);
461         if (IS_ERR(ah))
462                 goto err;
463
464         BUILD_BUG_ON(offsetof(struct ib_dm_mad, data) != IB_MGMT_DEVICE_HDR);
465
466         rsp = ib_create_send_mad(mad_agent, mad_wc->wc->src_qp,
467                                  mad_wc->wc->pkey_index, 0,
468                                  IB_MGMT_DEVICE_HDR, IB_MGMT_DEVICE_DATA,
469                                  GFP_KERNEL);
470         if (IS_ERR(rsp))
471                 goto err_rsp;
472
473         rsp->ah = ah;
474
475         dm_mad = rsp->mad;
476         memcpy(dm_mad, mad_wc->recv_buf.mad, sizeof *dm_mad);
477         dm_mad->mad_hdr.method = IB_MGMT_METHOD_GET_RESP;
478         dm_mad->mad_hdr.status = 0;
479
480         switch (mad_wc->recv_buf.mad->mad_hdr.method) {
481         case IB_MGMT_METHOD_GET:
482                 srpt_mgmt_method_get(sport, mad_wc->recv_buf.mad, dm_mad);
483                 break;
484         case IB_MGMT_METHOD_SET:
485                 dm_mad->mad_hdr.status =
486                     cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR);
487                 break;
488         default:
489                 dm_mad->mad_hdr.status =
490                     cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD);
491                 break;
492         }
493
494         if (!ib_post_send_mad(rsp, NULL)) {
495                 ib_free_recv_mad(mad_wc);
496                 /* will destroy_ah & free_send_mad in send completion */
497                 return;
498         }
499
500         ib_free_send_mad(rsp);
501
502 err_rsp:
503         ib_destroy_ah(ah);
504 err:
505         ib_free_recv_mad(mad_wc);
506 }
507
508 /*
509  * Enable InfiniBand management datagram processing, update the cached sm_lid,
510  * lid and gid values, and register a callback function for processing MADs
511  * on the specified port. It is safe to call this function more than once for
512  * the same port.
513  */
514 static int srpt_refresh_port(struct srpt_port *sport)
515 {
516         struct ib_mad_reg_req reg_req;
517         struct ib_port_modify port_modify;
518         struct ib_port_attr port_attr;
519         int ret;
520
521         memset(&port_modify, 0, sizeof port_modify);
522         port_modify.set_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP;
523         port_modify.clr_port_cap_mask = 0;
524
525         ret = ib_modify_port(sport->sdev->device, sport->port, 0, &port_modify);
526         if (ret)
527                 goto err_mod_port;
528
529         ret = ib_query_port(sport->sdev->device, sport->port, &port_attr);
530         if (ret)
531                 goto err_query_port;
532
533         sport->sm_lid = port_attr.sm_lid;
534         sport->lid = port_attr.lid;
535
536         ret = ib_query_gid(sport->sdev->device, sport->port, 0, &sport->gid);
537         if (ret)
538                 goto err_query_port;
539
540         if (!sport->mad_agent) {
541                 memset(&reg_req, 0, sizeof reg_req);
542                 reg_req.mgmt_class = IB_MGMT_CLASS_DEVICE_MGMT;
543                 reg_req.mgmt_class_version = IB_MGMT_BASE_VERSION;
544                 set_bit(IB_MGMT_METHOD_GET, reg_req.method_mask);
545                 set_bit(IB_MGMT_METHOD_SET, reg_req.method_mask);
546
547                 sport->mad_agent = ib_register_mad_agent(sport->sdev->device,
548                                                          sport->port,
549                                                          IB_QPT_GSI,
550                                                          &reg_req, 0,
551                                                          srpt_mad_send_handler,
552                                                          srpt_mad_recv_handler,
553                                                          sport);
554                 if (IS_ERR(sport->mad_agent)) {
555                         ret = PTR_ERR(sport->mad_agent);
556                         sport->mad_agent = NULL;
557                         goto err_query_port;
558                 }
559         }
560
561         return 0;
562
563 err_query_port:
564
565         port_modify.set_port_cap_mask = 0;
566         port_modify.clr_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP;
567         ib_modify_port(sport->sdev->device, sport->port, 0, &port_modify);
568
569 err_mod_port:
570
571         return ret;
572 }
573
574 /*
575  * Unregister the callback function for processing MADs and disable MAD
576  * processing for all ports of the specified device. It is safe to call this
577  * function more than once for the same device.
578  */
579 static void srpt_unregister_mad_agent(struct srpt_device *sdev)
580 {
581         struct ib_port_modify port_modify = {
582                 .clr_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP,
583         };
584         struct srpt_port *sport;
585         int i;
586
587         for (i = 1; i <= sdev->device->phys_port_cnt; i++) {
588                 sport = &sdev->port[i - 1];
589                 WARN_ON(sport->port != i);
590                 if (ib_modify_port(sdev->device, i, 0, &port_modify) < 0)
591                         printk(KERN_ERR PFX "disabling MAD processing"
592                                " failed.\n");
593                 if (sport->mad_agent) {
594                         ib_unregister_mad_agent(sport->mad_agent);
595                         sport->mad_agent = NULL;
596                 }
597         }
598 }
599
600 /*
601  * Allocate and initialize an SRPT I/O context structure.
602  */
603 static struct srpt_ioctx *srpt_alloc_ioctx(struct srpt_device *sdev)
604 {
605         struct srpt_ioctx *ioctx;
606
607         ioctx = kmalloc(sizeof *ioctx, GFP_KERNEL);
608         if (!ioctx)
609                 goto out;
610
611         ioctx->buf = kzalloc(MAX_MESSAGE_SIZE, GFP_KERNEL);
612         if (!ioctx->buf)
613                 goto out_free_ioctx;
614
615         ioctx->dma = dma_map_single(sdev->device->dma_device, ioctx->buf,
616                                     MAX_MESSAGE_SIZE, DMA_BIDIRECTIONAL);
617 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
618         if (dma_mapping_error(sdev->device->dma_device, ioctx->dma))
619 #else
620         if (dma_mapping_error(ioctx->dma))
621 #endif
622                 goto out_free_buf;
623
624         return ioctx;
625
626 out_free_buf:
627         kfree(ioctx->buf);
628 out_free_ioctx:
629         kfree(ioctx);
630 out:
631         return NULL;
632 }
633
634 /*
635  * Deallocate an SRPT I/O context structure.
636  */
637 static void srpt_free_ioctx(struct srpt_device *sdev, struct srpt_ioctx *ioctx)
638 {
639         if (!ioctx)
640                 return;
641
642         dma_unmap_single(sdev->device->dma_device, ioctx->dma,
643                          MAX_MESSAGE_SIZE, DMA_BIDIRECTIONAL);
644         kfree(ioctx->buf);
645         kfree(ioctx);
646 }
647
648 /*
649  * Associate a ring of SRPT I/O context structures with the specified device.
650  */
651 static int srpt_alloc_ioctx_ring(struct srpt_device *sdev)
652 {
653         int i;
654
655         for (i = 0; i < SRPT_SRQ_SIZE; ++i) {
656                 sdev->ioctx_ring[i] = srpt_alloc_ioctx(sdev);
657
658                 if (!sdev->ioctx_ring[i])
659                         goto err;
660
661                 sdev->ioctx_ring[i]->index = i;
662         }
663
664         return 0;
665
666 err:
667         while (--i > 0) {
668                 srpt_free_ioctx(sdev, sdev->ioctx_ring[i]);
669                 sdev->ioctx_ring[i] = NULL;
670         }
671         return -ENOMEM;
672 }
673
674 /* Free the ring of SRPT I/O context structures. */
675 static void srpt_free_ioctx_ring(struct srpt_device *sdev)
676 {
677         int i;
678
679         for (i = 0; i < SRPT_SRQ_SIZE; ++i) {
680                 srpt_free_ioctx(sdev, sdev->ioctx_ring[i]);
681                 sdev->ioctx_ring[i] = NULL;
682         }
683 }
684
685 /*
686  * Post a receive request on the work queue of InfiniBand device 'sdev'.
687  */
688 static int srpt_post_recv(struct srpt_device *sdev, struct srpt_ioctx *ioctx)
689 {
690         struct ib_sge list;
691         struct ib_recv_wr wr, *bad_wr;
692
693         wr.wr_id = ioctx->index | SRPT_OP_RECV;
694
695         list.addr = ioctx->dma;
696         list.length = MAX_MESSAGE_SIZE;
697         list.lkey = sdev->mr->lkey;
698
699         wr.next = NULL;
700         wr.sg_list = &list;
701         wr.num_sge = 1;
702
703         return ib_post_srq_recv(sdev->srq, &wr, &bad_wr);
704 }
705
706 /*
707  * Post an IB send request.
708  * @ch: RDMA channel to post the send request on.
709  * @ioctx: I/O context of the send request.
710  * @len: length of the request to be sent in bytes.
711  *
712  * Returns zero upon success and a non-zero value upon failure.
713  */
714 static int srpt_post_send(struct srpt_rdma_ch *ch, struct srpt_ioctx *ioctx,
715                           int len)
716 {
717         struct ib_sge list;
718         struct ib_send_wr wr, *bad_wr;
719         struct srpt_device *sdev = ch->sport->sdev;
720
721         dma_sync_single_for_device(sdev->device->dma_device, ioctx->dma,
722                                    MAX_MESSAGE_SIZE, DMA_TO_DEVICE);
723
724         list.addr = ioctx->dma;
725         list.length = len;
726         list.lkey = sdev->mr->lkey;
727
728         wr.next = NULL;
729         wr.wr_id = ioctx->index;
730         wr.sg_list = &list;
731         wr.num_sge = 1;
732         wr.opcode = IB_WR_SEND;
733         wr.send_flags = IB_SEND_SIGNALED;
734
735         return ib_post_send(ch->qp, &wr, &bad_wr);
736 }
737
738 static int srpt_get_desc_tbl(struct srpt_ioctx *ioctx, struct srp_cmd *srp_cmd,
739                              int *ind)
740 {
741         struct srp_indirect_buf *idb;
742         struct srp_direct_buf *db;
743
744         *ind = 0;
745         if (((srp_cmd->buf_fmt & 0xf) == SRP_DATA_DESC_DIRECT) ||
746             ((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_DIRECT)) {
747                 ioctx->n_rbuf = 1;
748                 ioctx->rbufs = &ioctx->single_rbuf;
749
750                 db = (void *)srp_cmd->add_data;
751                 memcpy(ioctx->rbufs, db, sizeof *db);
752                 ioctx->data_len = be32_to_cpu(db->len);
753         } else {
754                 idb = (void *)srp_cmd->add_data;
755
756                 ioctx->n_rbuf = be32_to_cpu(idb->table_desc.len) / sizeof *db;
757
758                 if (ioctx->n_rbuf >
759                     (srp_cmd->data_out_desc_cnt + srp_cmd->data_in_desc_cnt)) {
760                         *ind = 1;
761                         ioctx->n_rbuf = 0;
762                         goto out;
763                 }
764
765                 if (ioctx->n_rbuf == 1)
766                         ioctx->rbufs = &ioctx->single_rbuf;
767                 else
768                         ioctx->rbufs =
769                                 kmalloc(ioctx->n_rbuf * sizeof *db, GFP_ATOMIC);
770                 if (!ioctx->rbufs) {
771                         ioctx->n_rbuf = 0;
772                         return -ENOMEM;
773                 }
774
775                 db = idb->desc_list;
776                 memcpy(ioctx->rbufs, db, ioctx->n_rbuf * sizeof *db);
777                 ioctx->data_len = be32_to_cpu(idb->len);
778         }
779 out:
780         return 0;
781 }
782
783 /*
784  * Modify the attributes of queue pair 'qp': allow local write, remote read,
785  * and remote write. Also transition 'qp' to state IB_QPS_INIT.
786  */
787 static int srpt_init_ch_qp(struct srpt_rdma_ch *ch, struct ib_qp *qp)
788 {
789         struct ib_qp_attr *attr;
790         int ret;
791
792         attr = kzalloc(sizeof *attr, GFP_KERNEL);
793         if (!attr)
794                 return -ENOMEM;
795
796         attr->qp_state = IB_QPS_INIT;
797         attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_READ |
798             IB_ACCESS_REMOTE_WRITE;
799         attr->port_num = ch->sport->port;
800         attr->pkey_index = 0;
801
802         ret = ib_modify_qp(qp, attr,
803                            IB_QP_STATE | IB_QP_ACCESS_FLAGS | IB_QP_PORT |
804                            IB_QP_PKEY_INDEX);
805
806         kfree(attr);
807         return ret;
808 }
809
810 /**
811  * Change the state of a channel to 'ready to receive' (RTR).
812  * @ch: channel of the queue pair.
813  * @qp: queue pair to change the state of.
814  *
815  * Returns zero upon success and a negative value upon failure.
816  */
817 static int srpt_ch_qp_rtr(struct srpt_rdma_ch *ch, struct ib_qp *qp)
818 {
819         struct ib_qp_attr qp_attr;
820         int attr_mask;
821         int ret;
822
823         qp_attr.qp_state = IB_QPS_RTR;
824         ret = ib_cm_init_qp_attr(ch->cm_id, &qp_attr, &attr_mask);
825         if (ret)
826                 goto out;
827
828         qp_attr.max_dest_rd_atomic = 4;
829
830         ret = ib_modify_qp(qp, &qp_attr, attr_mask);
831
832 out:
833         return ret;
834 }
835
836 /**
837  * Change the state of a channel to 'ready to send' (RTS).
838  * @ch: channel of the queue pair.
839  * @qp: queue pair to change the state of.
840  *
841  * Returns zero upon success and a negative value upon failure.
842  */
843 static int srpt_ch_qp_rts(struct srpt_rdma_ch *ch, struct ib_qp *qp)
844 {
845         struct ib_qp_attr qp_attr;
846         int attr_mask;
847         int ret;
848
849         qp_attr.qp_state = IB_QPS_RTS;
850         ret = ib_cm_init_qp_attr(ch->cm_id, &qp_attr, &attr_mask);
851         if (ret)
852                 goto out;
853
854         qp_attr.max_rd_atomic = 4;
855
856         ret = ib_modify_qp(qp, &qp_attr, attr_mask);
857
858 out:
859         return ret;
860 }
861
862 static void srpt_reset_ioctx(struct srpt_rdma_ch *ch, struct srpt_ioctx *ioctx)
863 {
864         int i;
865
866         if (ioctx->n_rdma_ius > 0 && ioctx->rdma_ius) {
867                 struct rdma_iu *riu = ioctx->rdma_ius;
868
869                 for (i = 0; i < ioctx->n_rdma_ius; ++i, ++riu)
870                         kfree(riu->sge);
871                 kfree(ioctx->rdma_ius);
872         }
873
874         if (ioctx->n_rbuf > 1)
875                 kfree(ioctx->rbufs);
876
877         if (srpt_post_recv(ch->sport->sdev, ioctx))
878                 printk(KERN_ERR PFX "SRQ post_recv failed - this is serious\n");
879                 /* we should queue it back to free_ioctx queue */
880         else
881                 atomic_inc(&ch->req_lim_delta);
882 }
883
884 static void srpt_abort_scst_cmd(struct srpt_device *sdev,
885                                 struct scst_cmd *scmnd,
886                                 bool tell_initiator)
887 {
888         struct srpt_ioctx *ioctx;
889         scst_data_direction dir;
890
891         ioctx = scst_cmd_get_tgt_priv(scmnd);
892         BUG_ON(!ioctx);
893         dir = scst_cmd_get_data_direction(scmnd);
894         if (dir != SCST_DATA_NONE) {
895                 dma_unmap_sg(sdev->device->dma_device,
896                              scst_cmd_get_sg(scmnd),
897                              scst_cmd_get_sg_cnt(scmnd),
898                              scst_to_tgt_dma_dir(dir));
899
900 #if 1
901                 switch (scmnd->state) {
902                 case SCST_CMD_STATE_DATA_WAIT:
903                         WARN_ON(ioctx->state != SRPT_STATE_NEED_DATA);
904                         break;
905                 case SCST_CMD_STATE_XMIT_WAIT:
906                         WARN_ON(ioctx->state != SRPT_STATE_PROCESSED);
907                         break;
908                 default:
909                         WARN_ON(ioctx->state == SRPT_STATE_NEED_DATA ||
910                                 ioctx->state == SRPT_STATE_PROCESSED);
911                 }
912 #endif
913
914                 if (ioctx->state == SRPT_STATE_NEED_DATA) {
915                         scst_rx_data(scmnd,
916                                      tell_initiator ? SCST_RX_STATUS_ERROR
917                                      : SCST_RX_STATUS_ERROR_FATAL,
918                                      SCST_CONTEXT_THREAD);
919                         goto out;
920                 } else if (ioctx->state == SRPT_STATE_PROCESSED)
921                         ;
922                 else {
923                         printk(KERN_ERR PFX
924                                "unexpected cmd state %d (SCST) %d (SRPT)\n",
925                                scmnd->state, ioctx->state);
926                         WARN_ON("unexpected cmd state");
927                 }
928         }
929
930         scst_set_delivery_status(scmnd, SCST_CMD_DELIVERY_FAILED);
931         scst_tgt_cmd_done(scmnd, scst_estimate_context());
932 out:
933         return;
934 }
935
936 static void srpt_handle_err_comp(struct srpt_rdma_ch *ch, struct ib_wc *wc)
937 {
938         struct srpt_ioctx *ioctx;
939         struct srpt_device *sdev = ch->sport->sdev;
940
941         if (wc->wr_id & SRPT_OP_RECV) {
942                 ioctx = sdev->ioctx_ring[wc->wr_id & ~SRPT_OP_RECV];
943                 printk(KERN_ERR PFX "This is serious - SRQ is in bad state\n");
944         } else {
945                 ioctx = sdev->ioctx_ring[wc->wr_id];
946
947                 if (ioctx->scmnd)
948                         srpt_abort_scst_cmd(sdev, ioctx->scmnd, true);
949                 else
950                         srpt_reset_ioctx(ch, ioctx);
951         }
952 }
953
954 static void srpt_handle_send_comp(struct srpt_rdma_ch *ch,
955                                   struct srpt_ioctx *ioctx,
956                                   enum scst_exec_context context)
957 {
958         if (ioctx->scmnd) {
959                 scst_data_direction dir =
960                         scst_cmd_get_data_direction(ioctx->scmnd);
961
962                 if (dir != SCST_DATA_NONE)
963                         dma_unmap_sg(ch->sport->sdev->device->dma_device,
964                                      scst_cmd_get_sg(ioctx->scmnd),
965                                      scst_cmd_get_sg_cnt(ioctx->scmnd),
966                                      scst_to_tgt_dma_dir(dir));
967
968                 scst_tgt_cmd_done(ioctx->scmnd, context);
969         } else
970                 srpt_reset_ioctx(ch, ioctx);
971 }
972
973 static void srpt_handle_rdma_comp(struct srpt_rdma_ch *ch,
974                                   struct srpt_ioctx *ioctx)
975 {
976         if (!ioctx->scmnd) {
977                 srpt_reset_ioctx(ch, ioctx);
978                 return;
979         }
980
981         if (scst_cmd_get_data_direction(ioctx->scmnd) == SCST_DATA_WRITE)
982                 scst_rx_data(ioctx->scmnd, SCST_RX_STATUS_SUCCESS,
983                         scst_estimate_context());
984 }
985
986 /**
987  * Build an SRP_RSP response PDU.
988  * @ch: RDMA channel through which the request has been received.
989  * @ioctx: I/O context in which the SRP_RSP PDU will be built.
990  * @s_key: sense key that will be stored in the response.
991  * @s_code: value that will be stored in the asc_ascq field of the sense data.
992  * @tag: tag of the request for which this response is being generated.
993  *
994  * Returns the size in bytes of the SRP_RSP response PDU.
995  *
996  * An SRP_RSP PDU contains a SCSI status or service response. See also
997  * section 6.9 in the T10 SRP r16a document for the format of an SRP_RSP PDU.
998  * See also SPC-2 for more information about sense data.
999  */
1000 static int srpt_build_cmd_rsp(struct srpt_rdma_ch *ch,
1001                               struct srpt_ioctx *ioctx, u8 s_key, u8 s_code,
1002                               u64 tag)
1003 {
1004         struct srp_rsp *srp_rsp;
1005         struct sense_data *sense;
1006         int limit_delta;
1007         int sense_data_len = 0;
1008
1009         srp_rsp = ioctx->buf;
1010         memset(srp_rsp, 0, sizeof *srp_rsp);
1011
1012         limit_delta = atomic_read(&ch->req_lim_delta);
1013         atomic_sub(limit_delta, &ch->req_lim_delta);
1014
1015         srp_rsp->opcode = SRP_RSP;
1016         srp_rsp->req_lim_delta = cpu_to_be32(limit_delta);
1017         srp_rsp->tag = tag;
1018
1019         if (s_key != NO_SENSE) {
1020                 sense_data_len = sizeof *sense + (sizeof *sense % 4);
1021                 srp_rsp->flags |= SRP_RSP_FLAG_SNSVALID;
1022                 srp_rsp->status = SAM_STAT_CHECK_CONDITION;
1023                 srp_rsp->sense_data_len = cpu_to_be32(sense_data_len);
1024
1025                 sense = (struct sense_data *)(srp_rsp + 1);
1026                 sense->err_code = 0x70;
1027                 sense->key = s_key;
1028                 sense->asc_ascq = s_code;
1029         }
1030
1031         return sizeof(*srp_rsp) + sense_data_len;
1032 }
1033
1034 /**
1035  * Build a task management response, which is a specific SRP_RSP response PDU.
1036  * @ch: RDMA channel through which the request has been received.
1037  * @ioctx: I/O context in which the SRP_RSP PDU will be built.
1038  * @rsp_code: RSP_CODE that will be stored in the response.
1039  * @tag: tag of the request for which this response is being generated.
1040  *
1041  * Returns the size in bytes of the SRP_RSP response PDU.
1042  *
1043  * An SRP_RSP PDU contains a SCSI status or service response. See also
1044  * section 6.9 in the T10 SRP r16a document for the format of an SRP_RSP PDU.
1045  */
1046 static int srpt_build_tskmgmt_rsp(struct srpt_rdma_ch *ch,
1047                                   struct srpt_ioctx *ioctx, u8 rsp_code,
1048                                   u64 tag)
1049 {
1050         struct srp_rsp *srp_rsp;
1051         int limit_delta;
1052         int resp_data_len = 0;
1053
1054         dma_sync_single_for_cpu(ch->sport->sdev->device->dma_device, ioctx->dma,
1055                                 MAX_MESSAGE_SIZE, DMA_TO_DEVICE);
1056
1057         srp_rsp = ioctx->buf;
1058         memset(srp_rsp, 0, sizeof *srp_rsp);
1059
1060         limit_delta = atomic_read(&ch->req_lim_delta);
1061         atomic_sub(limit_delta, &ch->req_lim_delta);
1062
1063         srp_rsp->opcode = SRP_RSP;
1064         srp_rsp->req_lim_delta = cpu_to_be32(limit_delta);
1065         srp_rsp->tag = tag;
1066
1067         if (rsp_code != SRP_TSK_MGMT_SUCCESS) {
1068                 resp_data_len = 4;
1069                 srp_rsp->flags |= SRP_RSP_FLAG_RSPVALID;
1070                 srp_rsp->resp_data_len = cpu_to_be32(resp_data_len);
1071                 srp_rsp->data[3] = rsp_code;
1072         }
1073
1074         return sizeof(*srp_rsp) + resp_data_len;
1075 }
1076
1077 /*
1078  * Process SRP_CMD.
1079  */
1080 static int srpt_handle_cmd(struct srpt_rdma_ch *ch, struct srpt_ioctx *ioctx)
1081 {
1082         struct scst_cmd *scmnd;
1083         struct srp_cmd *srp_cmd;
1084         struct srp_rsp *srp_rsp;
1085         scst_data_direction dir = SCST_DATA_NONE;
1086         int indirect_desc = 0;
1087         int ret;
1088         unsigned long flags;
1089
1090         srp_cmd = ioctx->buf;
1091         srp_rsp = ioctx->buf;
1092
1093         if (srp_cmd->buf_fmt) {
1094                 ret = srpt_get_desc_tbl(ioctx, srp_cmd, &indirect_desc);
1095                 if (ret) {
1096                         srpt_build_cmd_rsp(ch, ioctx, NO_SENSE,
1097                                            NO_ADD_SENSE, srp_cmd->tag);
1098                         srp_rsp->status = SAM_STAT_TASK_SET_FULL;
1099                         goto err;
1100                 }
1101
1102                 if (indirect_desc) {
1103                         srpt_build_cmd_rsp(ch, ioctx, NO_SENSE,
1104                                            NO_ADD_SENSE, srp_cmd->tag);
1105                         srp_rsp->status = SAM_STAT_TASK_SET_FULL;
1106                         goto err;
1107                 }
1108
1109                 if (srp_cmd->buf_fmt & 0xf)
1110                         dir = SCST_DATA_READ;
1111                 else if (srp_cmd->buf_fmt >> 4)
1112                         dir = SCST_DATA_WRITE;
1113                 else
1114                         dir = SCST_DATA_NONE;
1115         } else
1116                 dir = SCST_DATA_NONE;
1117
1118         scmnd = scst_rx_cmd(ch->scst_sess, (u8 *) &srp_cmd->lun,
1119                             sizeof srp_cmd->lun, srp_cmd->cdb, 16,
1120                             thread ? SCST_NON_ATOMIC : SCST_ATOMIC);
1121         if (!scmnd) {
1122                 srpt_build_cmd_rsp(ch, ioctx, NO_SENSE,
1123                                    NO_ADD_SENSE, srp_cmd->tag);
1124                 srp_rsp->status = SAM_STAT_TASK_SET_FULL;
1125                 goto err;
1126         }
1127
1128         ioctx->scmnd = scmnd;
1129
1130         switch (srp_cmd->task_attr) {
1131         case SRP_CMD_HEAD_OF_Q:
1132                 scmnd->queue_type = SCST_CMD_QUEUE_HEAD_OF_QUEUE;
1133                 break;
1134         case SRP_CMD_ORDERED_Q:
1135                 scmnd->queue_type = SCST_CMD_QUEUE_ORDERED;
1136                 break;
1137         case SRP_CMD_SIMPLE_Q:
1138                 scmnd->queue_type = SCST_CMD_QUEUE_SIMPLE;
1139                 break;
1140         case SRP_CMD_ACA:
1141                 scmnd->queue_type = SCST_CMD_QUEUE_ACA;
1142                 break;
1143         default:
1144                 scmnd->queue_type = SCST_CMD_QUEUE_ORDERED;
1145                 break;
1146         }
1147
1148         scst_cmd_set_tag(scmnd, srp_cmd->tag);
1149         scst_cmd_set_tgt_priv(scmnd, ioctx);
1150         scst_cmd_set_expected(scmnd, dir, ioctx->data_len);
1151
1152         spin_lock_irqsave(&ch->spinlock, flags);
1153         list_add_tail(&ioctx->scmnd_list, &ch->active_scmnd_list);
1154         ch->active_scmnd_cnt++;
1155         spin_unlock_irqrestore(&ch->spinlock, flags);
1156
1157         scst_cmd_init_done(scmnd, scst_estimate_context());
1158
1159         return 0;
1160
1161 err:
1162         WARN_ON(srp_rsp->opcode != SRP_RSP);
1163
1164         return -1;
1165 }
1166
1167 /*
1168  * Process an SRP_TSK_MGMT request PDU.
1169  *
1170  * Returns 0 upon success and -1 upon failure.
1171  *
1172  * Each task management function is performed by calling one of the
1173  * scst_rx_mgmt_fn*() functions. These functions will either report failure
1174  * or process the task management function asynchronously. The function
1175  * srpt_tsk_mgmt_done() will be called by the SCST core upon completion of the
1176  * task management function. When srpt_handle_tsk_mgmt() reports failure
1177  * (i.e. returns -1) a response PDU will have been built in ioctx->buf. This
1178  * PDU has to be sent back by the caller.
1179  *
1180  * For more information about SRP_TSK_MGMT PDU's, see also section 6.7 in
1181  * the T10 SRP r16a document.
1182  */
1183 static int srpt_handle_tsk_mgmt(struct srpt_rdma_ch *ch,
1184                                 struct srpt_ioctx *ioctx)
1185 {
1186         struct srp_tsk_mgmt *srp_tsk;
1187         struct srpt_mgmt_ioctx *mgmt_ioctx;
1188         int ret;
1189
1190         srp_tsk = ioctx->buf;
1191
1192         TRACE_DBG("recv_tsk_mgmt= %d for task_tag= %lld"
1193                   " using tag= %lld cm_id= %p sess= %p",
1194                   srp_tsk->tsk_mgmt_func,
1195                   (unsigned long long) srp_tsk->task_tag,
1196                   (unsigned long long) srp_tsk->tag,
1197                   ch->cm_id, ch->scst_sess);
1198
1199         mgmt_ioctx = kmalloc(sizeof *mgmt_ioctx, GFP_ATOMIC);
1200         if (!mgmt_ioctx) {
1201                 srpt_build_tskmgmt_rsp(ch, ioctx, SRP_TSK_MGMT_FAILED,
1202                                        srp_tsk->tag);
1203                 goto err;
1204         }
1205
1206         mgmt_ioctx->ioctx = ioctx;
1207         mgmt_ioctx->ch = ch;
1208         mgmt_ioctx->tag = srp_tsk->tag;
1209
1210         switch (srp_tsk->tsk_mgmt_func) {
1211         case SRP_TSK_ABORT_TASK:
1212                 TRACE_DBG("%s", "Processing SRP_TSK_ABORT_TASK");
1213                 ret = scst_rx_mgmt_fn_tag(ch->scst_sess,
1214                                           SCST_ABORT_TASK,
1215                                           srp_tsk->task_tag,
1216                                           thread ?
1217                                           SCST_NON_ATOMIC : SCST_ATOMIC,
1218                                           mgmt_ioctx);
1219                 break;
1220         case SRP_TSK_ABORT_TASK_SET:
1221                 TRACE_DBG("%s", "Processing SRP_TSK_ABORT_TASK_SET");
1222                 ret = scst_rx_mgmt_fn_lun(ch->scst_sess,
1223                                           SCST_ABORT_TASK_SET,
1224                                           (u8 *) &srp_tsk->lun,
1225                                           sizeof srp_tsk->lun,
1226                                           thread ?
1227                                           SCST_NON_ATOMIC : SCST_ATOMIC,
1228                                           mgmt_ioctx);
1229                 break;
1230         case SRP_TSK_CLEAR_TASK_SET:
1231                 TRACE_DBG("%s", "Processing SRP_TSK_CLEAR_TASK_SET");
1232                 ret = scst_rx_mgmt_fn_lun(ch->scst_sess,
1233                                           SCST_CLEAR_TASK_SET,
1234                                           (u8 *) &srp_tsk->lun,
1235                                           sizeof srp_tsk->lun,
1236                                           thread ?
1237                                           SCST_NON_ATOMIC : SCST_ATOMIC,
1238                                           mgmt_ioctx);
1239                 break;
1240         case SRP_TSK_LUN_RESET:
1241                 TRACE_DBG("%s", "Processing SRP_TSK_LUN_RESET");
1242                 ret = scst_rx_mgmt_fn_lun(ch->scst_sess,
1243                                           SCST_LUN_RESET,
1244                                           (u8 *) &srp_tsk->lun,
1245                                           sizeof srp_tsk->lun,
1246                                           thread ?
1247                                           SCST_NON_ATOMIC : SCST_ATOMIC,
1248                                           mgmt_ioctx);
1249                 break;
1250         case SRP_TSK_CLEAR_ACA:
1251                 TRACE_DBG("%s", "Processing SRP_TSK_CLEAR_ACA");
1252                 ret = scst_rx_mgmt_fn_lun(ch->scst_sess,
1253                                           SCST_CLEAR_ACA,
1254                                           (u8 *) &srp_tsk->lun,
1255                                           sizeof srp_tsk->lun,
1256                                           thread ?
1257                                           SCST_NON_ATOMIC : SCST_ATOMIC,
1258                                           mgmt_ioctx);
1259                 break;
1260         default:
1261                 TRACE_DBG("%s", "Unsupported task management function.");
1262                 srpt_build_tskmgmt_rsp(ch, ioctx,
1263                                        SRP_TSK_MGMT_FUNC_NOT_SUPP,
1264                                        srp_tsk->tag);
1265                 goto err;
1266         }
1267
1268         if (ret) {
1269                 TRACE_DBG("%s", "Processing task management function failed.");
1270                 srpt_build_tskmgmt_rsp(ch, ioctx, SRP_TSK_MGMT_FAILED,
1271                                        srp_tsk->tag);
1272                 goto err;
1273         }
1274
1275         WARN_ON(srp_tsk->opcode == SRP_RSP);
1276
1277         return 0;
1278
1279 err:
1280         WARN_ON(srp_tsk->opcode != SRP_RSP);
1281
1282         kfree(mgmt_ioctx);
1283         return -1;
1284 }
1285
1286 /**
1287  * Process a receive completion event.
1288  * @ch: RDMA channel for which the completion event has been received.
1289  * @ioctx: SRPT I/O context for which the completion event has been received.
1290  */
1291 static void srpt_handle_new_iu(struct srpt_rdma_ch *ch,
1292                                struct srpt_ioctx *ioctx)
1293 {
1294         struct srp_cmd *srp_cmd;
1295         struct srp_rsp *srp_rsp;
1296         unsigned long flags;
1297         int len;
1298
1299         spin_lock_irqsave(&ch->spinlock, flags);
1300         if (ch->state != RDMA_CHANNEL_LIVE) {
1301                 if (ch->state == RDMA_CHANNEL_CONNECTING) {
1302                         list_add_tail(&ioctx->wait_list, &ch->cmd_wait_list);
1303                         spin_unlock_irqrestore(&ch->spinlock, flags);
1304                         return;
1305                 } else {
1306                         spin_unlock_irqrestore(&ch->spinlock, flags);
1307                         srpt_reset_ioctx(ch, ioctx);
1308                         return;
1309                 }
1310         }
1311         spin_unlock_irqrestore(&ch->spinlock, flags);
1312
1313         dma_sync_single_for_cpu(ch->sport->sdev->device->dma_device, ioctx->dma,
1314                                 MAX_MESSAGE_SIZE, DMA_FROM_DEVICE);
1315
1316         ioctx->data_len = 0;
1317         ioctx->n_rbuf = 0;
1318         ioctx->rbufs = NULL;
1319         ioctx->n_rdma = 0;
1320         ioctx->n_rdma_ius = 0;
1321         ioctx->rdma_ius = NULL;
1322         ioctx->scmnd = NULL;
1323         ioctx->state = SRPT_STATE_NEW;
1324
1325         srp_cmd = ioctx->buf;
1326         srp_rsp = ioctx->buf;
1327
1328         switch (srp_cmd->opcode) {
1329         case SRP_CMD:
1330                 if (srpt_handle_cmd(ch, ioctx) < 0)
1331                         goto err;
1332                 break;
1333
1334         case SRP_TSK_MGMT:
1335                 if (srpt_handle_tsk_mgmt(ch, ioctx) < 0)
1336                         goto err;
1337                 break;
1338
1339         case SRP_I_LOGOUT:
1340         case SRP_AER_REQ:
1341         default:
1342                 srpt_build_cmd_rsp(ch, ioctx, ILLEGAL_REQUEST, INVALID_CDB,
1343                                    srp_cmd->tag);
1344                 goto err;
1345         }
1346
1347         dma_sync_single_for_device(ch->sport->sdev->device->dma_device,
1348                                    ioctx->dma, MAX_MESSAGE_SIZE,
1349                                    DMA_FROM_DEVICE);
1350
1351         return;
1352
1353 err:
1354         WARN_ON(srp_rsp->opcode != SRP_RSP);
1355         len = (sizeof *srp_rsp) + be32_to_cpu(srp_rsp->sense_data_len);
1356
1357         if (ch->state != RDMA_CHANNEL_LIVE) {
1358                 /* Give up if another thread modified the channel state. */
1359                 printk(KERN_ERR PFX "%s: channel is in state %d",
1360                        __func__, ch->state);
1361                 srpt_reset_ioctx(ch, ioctx);
1362         } else if (srpt_post_send(ch, ioctx, len)) {
1363                 printk(KERN_ERR PFX "%s: sending SRP_RSP PDU failed",
1364                        __func__);
1365                 srpt_reset_ioctx(ch, ioctx);
1366         }
1367 }
1368
1369 /*
1370  * Returns true if the ioctx list is non-empty or if the ib_srpt kernel thread
1371  * should stop.
1372  * @pre thread != 0
1373  */
1374 static inline int srpt_test_ioctx_list(void)
1375 {
1376         int res = (!list_empty(&srpt_thread.thread_ioctx_list) ||
1377                    unlikely(kthread_should_stop()));
1378         return res;
1379 }
1380
1381 /*
1382  * Add 'ioctx' to the tail of the ioctx list and wake up the kernel thread.
1383  *
1384  * @pre thread != 0
1385  */
1386 static inline void srpt_schedule_thread(struct srpt_ioctx *ioctx)
1387 {
1388         unsigned long flags;
1389
1390         spin_lock_irqsave(&srpt_thread.thread_lock, flags);
1391         list_add_tail(&ioctx->comp_list, &srpt_thread.thread_ioctx_list);
1392         spin_unlock_irqrestore(&srpt_thread.thread_lock, flags);
1393         wake_up(&ioctx_list_waitQ);
1394 }
1395
1396 /**
1397  * InfiniBand completion queue callback function.
1398  * @cq: completion queue.
1399  * @ctx: completion queue context, which was passed as the fourth argument of
1400  *       the function ib_create_cq().
1401  */
1402 static void srpt_completion(struct ib_cq *cq, void *ctx)
1403 {
1404         struct srpt_rdma_ch *ch = ctx;
1405         struct srpt_device *sdev = ch->sport->sdev;
1406         struct ib_wc wc;
1407         struct srpt_ioctx *ioctx;
1408
1409         ib_req_notify_cq(ch->cq, IB_CQ_NEXT_COMP);
1410         while (ib_poll_cq(ch->cq, 1, &wc) > 0) {
1411                 if (wc.status) {
1412                         printk(KERN_ERR PFX "failed %s status= %d\n",
1413                                wc.wr_id & SRPT_OP_RECV ? "receive" : "send",
1414                                wc.status);
1415                         srpt_handle_err_comp(ch, &wc);
1416                         break;
1417                 }
1418
1419                 if (wc.wr_id & SRPT_OP_RECV) {
1420                         ioctx = sdev->ioctx_ring[wc.wr_id & ~SRPT_OP_RECV];
1421                         if (thread) {
1422                                 ioctx->ch = ch;
1423                                 ioctx->op = IB_WC_RECV;
1424                                 srpt_schedule_thread(ioctx);
1425                         } else
1426                                 srpt_handle_new_iu(ch, ioctx);
1427                         continue;
1428                 } else
1429                         ioctx = sdev->ioctx_ring[wc.wr_id];
1430
1431                 if (thread) {
1432                         ioctx->ch = ch;
1433                         ioctx->op = wc.opcode;
1434                         srpt_schedule_thread(ioctx);
1435                 } else {
1436                         switch (wc.opcode) {
1437                         case IB_WC_SEND:
1438                                 srpt_handle_send_comp(ch, ioctx,
1439                                         scst_estimate_context());
1440                                 break;
1441                         case IB_WC_RDMA_WRITE:
1442                         case IB_WC_RDMA_READ:
1443                                 srpt_handle_rdma_comp(ch, ioctx);
1444                                 break;
1445                         default:
1446                                 break;
1447                         }
1448                 }
1449         }
1450 }
1451
1452 /*
1453  * Create a completion queue on the specified device.
1454  */
1455 static int srpt_create_ch_ib(struct srpt_rdma_ch *ch)
1456 {
1457         struct ib_qp_init_attr *qp_init;
1458         struct srpt_device *sdev = ch->sport->sdev;
1459         int cqe;
1460         int ret;
1461
1462         qp_init = kzalloc(sizeof *qp_init, GFP_KERNEL);
1463         if (!qp_init)
1464                 return -ENOMEM;
1465
1466         /* Create a completion queue (CQ). */
1467
1468         cqe = SRPT_RQ_SIZE + SRPT_SQ_SIZE - 1;
1469 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20) && ! defined(RHEL_RELEASE_CODE)
1470         ch->cq = ib_create_cq(sdev->device, srpt_completion, NULL, ch, cqe);
1471 #else
1472         ch->cq = ib_create_cq(sdev->device, srpt_completion, NULL, ch, cqe, 0);
1473 #endif
1474         if (IS_ERR(ch->cq)) {
1475                 ret = PTR_ERR(ch->cq);
1476                 printk(KERN_ERR PFX "failed to create_cq cqe= %d ret= %d\n",
1477                         cqe, ret);
1478                 goto out;
1479         }
1480
1481         /* Request completion notification. */
1482
1483         ib_req_notify_cq(ch->cq, IB_CQ_NEXT_COMP);
1484
1485         /* Create a queue pair (QP). */
1486
1487         qp_init->qp_context = (void *)ch;
1488         qp_init->event_handler = srpt_qp_event;
1489         qp_init->send_cq = ch->cq;
1490         qp_init->recv_cq = ch->cq;
1491         qp_init->srq = sdev->srq;
1492         qp_init->sq_sig_type = IB_SIGNAL_REQ_WR;
1493         qp_init->qp_type = IB_QPT_RC;
1494         qp_init->cap.max_send_wr = SRPT_SQ_SIZE;
1495         qp_init->cap.max_send_sge = SRPT_DEF_SG_PER_WQE;
1496
1497         ch->qp = ib_create_qp(sdev->pd, qp_init);
1498         if (IS_ERR(ch->qp)) {
1499                 ret = PTR_ERR(ch->qp);
1500                 ib_destroy_cq(ch->cq);
1501                 printk(KERN_ERR PFX "failed to create_qp ret= %d\n", ret);
1502                 goto out;
1503         }
1504
1505         TRACE_DBG("%s: max_cqe= %d max_sge= %d cm_id= %p",
1506                __func__, ch->cq->cqe, qp_init->cap.max_send_sge,
1507                ch->cm_id);
1508
1509         /* Modify the attributes and the state of queue pair ch->qp. */
1510
1511         ret = srpt_init_ch_qp(ch, ch->qp);
1512         if (ret) {
1513                 ib_destroy_qp(ch->qp);
1514                 ib_destroy_cq(ch->cq);
1515                 goto out;
1516         }
1517
1518         atomic_set(&ch->req_lim_delta, SRPT_RQ_SIZE);
1519 out:
1520         kfree(qp_init);
1521         return ret;
1522 }
1523
1524 /**
1525  * Look up the RDMA channel that corresponds to the specified cm_id.
1526  *
1527  * Return NULL if no matching RDMA channel has been found.
1528  */
1529 static struct srpt_rdma_ch *srpt_find_channel(struct ib_cm_id *cm_id, bool del)
1530 {
1531         struct srpt_device *sdev = cm_id->context;
1532         struct srpt_rdma_ch *ch;
1533
1534         spin_lock_irq(&sdev->spinlock);
1535         list_for_each_entry(ch, &sdev->rch_list, list) {
1536                 if (ch->cm_id == cm_id) {
1537                         if (del)
1538                                 list_del(&ch->list);
1539                         spin_unlock_irq(&sdev->spinlock);
1540                         return ch;
1541                 }
1542         }
1543
1544         spin_unlock_irq(&sdev->spinlock);
1545
1546         return NULL;
1547 }
1548
1549 /**
1550  * Release all resources associated with the specified RDMA channel.
1551  *
1552  * Note: the caller must have removed the channel from the channel list
1553  * before calling this function.
1554  */
1555 static void srpt_release_channel(struct srpt_rdma_ch *ch, int destroy_cmid)
1556 {
1557         TRACE_ENTRY();
1558
1559         WARN_ON(srpt_find_channel(ch->cm_id, false) == ch);
1560
1561         if (ch->cm_id && destroy_cmid) {
1562                 TRACE_DBG("%s: destroy cm_id= %p", __func__, ch->cm_id);
1563                 ib_destroy_cm_id(ch->cm_id);
1564                 ch->cm_id = NULL;
1565         }
1566
1567         ib_destroy_qp(ch->qp);
1568         ib_destroy_cq(ch->cq);
1569
1570         if (ch->scst_sess) {
1571                 struct srpt_ioctx *ioctx, *ioctx_tmp;
1572
1573                 TRACE_DBG("%s: release sess= %p sess_name= %s active_cmd= %d",
1574                           __func__, ch->scst_sess, ch->sess_name,
1575                           ch->active_scmnd_cnt);
1576
1577                 spin_lock_irq(&ch->spinlock);
1578                 list_for_each_entry_safe(ioctx, ioctx_tmp,
1579                                          &ch->active_scmnd_list, scmnd_list) {
1580                         spin_unlock_irq(&ch->spinlock);
1581
1582                         if (ioctx->scmnd)
1583                                 srpt_abort_scst_cmd(ch->sport->sdev,
1584                                                     ioctx->scmnd, true);
1585
1586                         spin_lock_irq(&ch->spinlock);
1587                 }
1588                 WARN_ON(!list_empty(&ch->active_scmnd_list));
1589                 WARN_ON(ch->active_scmnd_cnt != 0);
1590                 spin_unlock_irq(&ch->spinlock);
1591
1592                 scst_unregister_session(ch->scst_sess, 0, NULL);
1593                 ch->scst_sess = NULL;
1594         }
1595
1596         kfree(ch);
1597
1598         TRACE_EXIT();
1599 }
1600
1601 static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
1602                             struct ib_cm_req_event_param *param,
1603                             void *private_data)
1604 {
1605         struct srpt_device *sdev = cm_id->context;
1606         struct srp_login_req *req;
1607         struct srp_login_rsp *rsp;
1608         struct srp_login_rej *rej;
1609         struct ib_cm_rep_param *rep_param;
1610         struct srpt_rdma_ch *ch, *tmp_ch;
1611         u32 it_iu_len;
1612         u64 ioc_guid;
1613         int ret = 0;
1614
1615         if (!sdev || !private_data)
1616                 return -EINVAL;
1617
1618         rsp = kzalloc(sizeof *rsp, GFP_KERNEL);
1619         rej = kzalloc(sizeof *rej, GFP_KERNEL);
1620         rep_param = kzalloc(sizeof *rep_param, GFP_KERNEL);
1621
1622         if (!rsp || !rej || !rep_param) {
1623                 ret = -ENOMEM;
1624                 goto out;
1625         }
1626
1627         req = (struct srp_login_req *)private_data;
1628
1629         it_iu_len = be32_to_cpu(req->req_it_iu_len);
1630
1631         TRACE_DBG("Host login i_port_id=0x%llx:0x%llx t_port_id=0x%llx:0x%llx"
1632             " it_iu_len=%d",
1633             (unsigned long long)be64_to_cpu(*(u64 *)&req->initiator_port_id[0]),
1634             (unsigned long long)be64_to_cpu(*(u64 *)&req->initiator_port_id[8]),
1635             (unsigned long long)be64_to_cpu(*(u64 *)&req->target_port_id[0]),
1636             (unsigned long long)be64_to_cpu(*(u64 *)&req->target_port_id[8]),
1637             it_iu_len);
1638
1639         if (it_iu_len > MAX_MESSAGE_SIZE || it_iu_len < 64) {
1640                 rej->reason =
1641                     cpu_to_be32(SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE);
1642                 ret = -EINVAL;
1643                 TRACE_DBG("Reject invalid it_iu_len=%d", it_iu_len);
1644                 goto reject;
1645         }
1646
1647         if ((req->req_flags & 0x3) == SRP_MULTICHAN_SINGLE) {
1648                 rsp->rsp_flags = SRP_LOGIN_RSP_MULTICHAN_NO_CHAN;
1649
1650                 spin_lock_irq(&sdev->spinlock);
1651
1652                 list_for_each_entry_safe(ch, tmp_ch, &sdev->rch_list, list) {
1653                         if (!memcmp(ch->i_port_id, req->initiator_port_id, 16)
1654                             && !memcmp(ch->t_port_id, req->target_port_id, 16)
1655                             && param->port == ch->sport->port
1656                             && param->listen_id == ch->sport->sdev->cm_id
1657                             && ch->cm_id) {
1658                                 enum rdma_ch_state prev_state;
1659
1660                                 /* found an existing channel */
1661                                 TRACE_DBG("Found existing channel name= %s"
1662                                           " cm_id= %p state= %d",
1663                                           ch->sess_name, ch->cm_id, ch->state);
1664
1665                                 prev_state = ch->state;
1666                                 if (ch->state == RDMA_CHANNEL_LIVE)
1667                                         ch->state = RDMA_CHANNEL_DISCONNECTING;
1668                                 else if (ch->state == RDMA_CHANNEL_CONNECTING)
1669                                         list_del(&ch->list);
1670
1671                                 spin_unlock_irq(&sdev->spinlock);
1672
1673                                 rsp->rsp_flags =
1674                                         SRP_LOGIN_RSP_MULTICHAN_TERMINATED;
1675
1676                                 if (prev_state == RDMA_CHANNEL_LIVE)
1677                                         ib_send_cm_dreq(ch->cm_id, NULL, 0);
1678                                 else if (prev_state ==
1679                                          RDMA_CHANNEL_CONNECTING) {
1680                                         ib_send_cm_rej(ch->cm_id,
1681                                                        IB_CM_REJ_NO_RESOURCES,
1682                                                        NULL, 0, NULL, 0);
1683                                         srpt_release_channel(ch, 1);
1684                                 }
1685
1686                                 spin_lock_irq(&sdev->spinlock);
1687                         }
1688                 }
1689
1690                 spin_unlock_irq(&sdev->spinlock);
1691
1692         } else
1693                 rsp->rsp_flags = SRP_LOGIN_RSP_MULTICHAN_MAINTAINED;
1694
1695         ioc_guid = srpt_get_ioc_guid(sdev->device);
1696
1697         if (((u64) (*(u64 *) req->target_port_id) != cpu_to_be64(ioc_guid)) ||
1698             ((u64) (*(u64 *) (req->target_port_id + 8)) !=
1699              cpu_to_be64(ioc_guid))) {
1700                 rej->reason =
1701                     cpu_to_be32(SRP_LOGIN_REJ_UNABLE_ASSOCIATE_CHANNEL);
1702                 ret = -ENOMEM;
1703                 TRACE_DBG("%s", "Reject invalid target_port_id");
1704                 goto reject;
1705         }
1706
1707         ch = kzalloc(sizeof *ch, GFP_KERNEL);
1708         if (!ch) {
1709                 rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
1710                 TRACE_DBG("%s", "Reject failed allocate rdma_ch");
1711                 ret = -ENOMEM;
1712                 goto reject;
1713         }
1714
1715         spin_lock_init(&ch->spinlock);
1716         memcpy(ch->i_port_id, req->initiator_port_id, 16);
1717         memcpy(ch->t_port_id, req->target_port_id, 16);
1718         ch->sport = &sdev->port[param->port - 1];
1719         ch->cm_id = cm_id;
1720         ch->state = RDMA_CHANNEL_CONNECTING;
1721         INIT_LIST_HEAD(&ch->cmd_wait_list);
1722         INIT_LIST_HEAD(&ch->active_scmnd_list);
1723
1724         ret = srpt_create_ch_ib(ch);
1725         if (ret) {
1726                 rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
1727                 TRACE_DBG("%s", "Reject failed to create rdma_ch");
1728                 goto free_ch;
1729         }
1730
1731         ret = srpt_ch_qp_rtr(ch, ch->qp);
1732         if (ret) {
1733                 rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
1734                 TRACE_DBG("Reject failed qp to rtr/rts ret=%d", ret);
1735                 goto destroy_ib;
1736         }
1737
1738         snprintf(ch->sess_name, sizeof(ch->sess_name),
1739                  "0x%016llx%016llx",
1740                  (unsigned long long)be64_to_cpu(*(u64 *)ch->i_port_id),
1741                  (unsigned long long)be64_to_cpu(*(u64 *)(ch->i_port_id + 8)));
1742
1743         TRACE_DBG("registering session %s", ch->sess_name);
1744
1745         BUG_ON(!sdev->scst_tgt);
1746         ch->scst_sess = scst_register_session(sdev->scst_tgt, 0, ch->sess_name,
1747                                   NULL, NULL);
1748         if (!ch->scst_sess) {
1749                 rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
1750                 TRACE_DBG("%s", "Failed to create scst sess");
1751                 goto destroy_ib;
1752         }
1753
1754         TRACE_DBG("Establish connection sess=%p name=%s cm_id=%p",
1755                   ch->scst_sess, ch->sess_name, ch->cm_id);
1756
1757         scst_sess_set_tgt_priv(ch->scst_sess, ch);
1758
1759         /* create srp_login_response */
1760         rsp->opcode = SRP_LOGIN_RSP;
1761         rsp->tag = req->tag;
1762         rsp->max_it_iu_len = req->req_it_iu_len;
1763         rsp->max_ti_iu_len = req->req_it_iu_len;
1764         rsp->buf_fmt =
1765             cpu_to_be16(SRP_BUF_FORMAT_DIRECT | SRP_BUF_FORMAT_INDIRECT);
1766         rsp->req_lim_delta = cpu_to_be32(SRPT_RQ_SIZE);
1767         atomic_set(&ch->req_lim_delta, 0);
1768
1769         /* create cm reply */
1770         rep_param->qp_num = ch->qp->qp_num;
1771         rep_param->private_data = (void *)rsp;
1772         rep_param->private_data_len = sizeof *rsp;
1773         rep_param->rnr_retry_count = 7;
1774         rep_param->flow_control = 1;
1775         rep_param->failover_accepted = 0;
1776         rep_param->srq = 1;
1777         rep_param->responder_resources = 4;
1778         rep_param->initiator_depth = 4;
1779
1780         ret = ib_send_cm_rep(cm_id, rep_param);
1781         if (ret)
1782                 goto release_channel;
1783
1784         spin_lock_irq(&sdev->spinlock);
1785         list_add_tail(&ch->list, &sdev->rch_list);
1786         spin_unlock_irq(&sdev->spinlock);
1787
1788         goto out;
1789
1790 release_channel:
1791         scst_unregister_session(ch->scst_sess, 0, NULL);
1792         ch->scst_sess = NULL;
1793
1794 destroy_ib:
1795         ib_destroy_qp(ch->qp);
1796         ib_destroy_cq(ch->cq);
1797
1798 free_ch:
1799         kfree(ch);
1800
1801 reject:
1802         rej->opcode = SRP_LOGIN_REJ;
1803         rej->tag = req->tag;
1804         rej->buf_fmt =
1805             cpu_to_be16(SRP_BUF_FORMAT_DIRECT | SRP_BUF_FORMAT_INDIRECT);
1806
1807         ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, NULL, 0,
1808                              (void *)rej, sizeof *rej);
1809
1810 out:
1811         kfree(rep_param);
1812         kfree(rsp);
1813         kfree(rej);
1814
1815         return ret;
1816 }
1817
1818 /**
1819  * Release the channel with the specified cm_id.
1820  *
1821  * Returns one to indicate that the caller of srpt_cm_handler() should destroy
1822  * the cm_id.
1823  */
1824 static void srpt_find_and_release_channel(struct ib_cm_id *cm_id)
1825 {
1826         struct srpt_rdma_ch *ch;
1827
1828         ch = srpt_find_channel(cm_id, true);
1829         if (ch)
1830                 srpt_release_channel(ch, 0);
1831 }
1832
1833 static void srpt_cm_rej_recv(struct ib_cm_id *cm_id)
1834 {
1835         TRACE_DBG("%s: cm_id=%p", __func__, cm_id);
1836         srpt_find_and_release_channel(cm_id);
1837 }
1838
1839 /**
1840  * Process an IB_CM_RTU_RECEIVED or IB_CM_USER_ESTABLISHED event.
1841  *
1842  * An IB_CM_RTU_RECEIVED message indicates that the connection is established
1843  * and that the recipient may begin transmitting (RTU = ready to use).
1844  */
1845 static int srpt_cm_rtu_recv(struct ib_cm_id *cm_id)
1846 {
1847         struct srpt_rdma_ch *ch;
1848         int ret;
1849
1850         ch = srpt_find_channel(cm_id, false);
1851         if (!ch)
1852                 return -EINVAL;
1853
1854         if (srpt_test_and_set_channel_state(ch, RDMA_CHANNEL_CONNECTING,
1855                                             RDMA_CHANNEL_LIVE)) {
1856                 struct srpt_ioctx *ioctx, *ioctx_tmp;
1857
1858                 ret = srpt_ch_qp_rts(ch, ch->qp);
1859
1860                 list_for_each_entry_safe(ioctx, ioctx_tmp, &ch->cmd_wait_list,
1861                                          wait_list) {
1862                         list_del(&ioctx->wait_list);
1863                         srpt_handle_new_iu(ch, ioctx);
1864                 }
1865                 if (ret && srpt_test_and_set_channel_state(ch,
1866                                         RDMA_CHANNEL_LIVE,
1867                                         RDMA_CHANNEL_DISCONNECTING)) {
1868                         TRACE_DBG("cm_id=%p sess_name=%s state=%d",
1869                                   cm_id, ch->sess_name, ch->state);
1870                         ib_send_cm_dreq(ch->cm_id, NULL, 0);
1871                 }
1872         } else if (ch->state == RDMA_CHANNEL_DISCONNECTING) {
1873                 TRACE_DBG("cm_id=%p sess_name=%s state=%d",
1874                           cm_id, ch->sess_name, ch->state);
1875                 ib_send_cm_dreq(ch->cm_id, NULL, 0);
1876                 ret = -EAGAIN;
1877         } else
1878                 ret = 0;
1879
1880         return ret;
1881 }
1882
1883 static void srpt_cm_timewait_exit(struct ib_cm_id *cm_id)
1884 {
1885         TRACE_DBG("%s: cm_id=%p", __func__, cm_id);
1886         srpt_find_and_release_channel(cm_id);
1887 }
1888
1889 static void srpt_cm_rep_error(struct ib_cm_id *cm_id)
1890 {
1891         TRACE_DBG("%s: cm_id=%p", __func__, cm_id);
1892         srpt_find_and_release_channel(cm_id);
1893 }
1894
1895 static int srpt_cm_dreq_recv(struct ib_cm_id *cm_id)
1896 {
1897         struct srpt_rdma_ch *ch;
1898
1899         ch = srpt_find_channel(cm_id, false);
1900         if (!ch)
1901                 return -EINVAL;
1902
1903         TRACE_DBG("%s: cm_id= %p ch->state= %d",
1904                  __func__, cm_id, ch->state);
1905
1906         switch (ch->state) {
1907         case RDMA_CHANNEL_LIVE:
1908         case RDMA_CHANNEL_CONNECTING:
1909                 ib_send_cm_drep(ch->cm_id, NULL, 0);
1910                 break;
1911         case RDMA_CHANNEL_DISCONNECTING:
1912         default:
1913                 break;
1914         }
1915
1916         return 0;
1917 }
1918
1919 static void srpt_cm_drep_recv(struct ib_cm_id *cm_id)
1920 {
1921         TRACE_DBG("%s: cm_id=%p", __func__, cm_id);
1922         srpt_find_and_release_channel(cm_id);
1923 }
1924
1925 /**
1926  * IB connection manager callback function.
1927  *
1928  * A non-zero return value will make the caller destroy the CM ID.
1929  *
1930  * Note: srpt_add_one passes a struct srpt_device* as the third argument to
1931  * the ib_create_cm_id() call.
1932  */
1933 static int srpt_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
1934 {
1935         int ret = 0;
1936
1937         switch (event->event) {
1938         case IB_CM_REQ_RECEIVED:
1939                 ret = srpt_cm_req_recv(cm_id, &event->param.req_rcvd,
1940                                        event->private_data);
1941                 break;
1942         case IB_CM_REJ_RECEIVED:
1943                 srpt_cm_rej_recv(cm_id);
1944                 ret = -EINVAL;
1945                 break;
1946         case IB_CM_RTU_RECEIVED:
1947         case IB_CM_USER_ESTABLISHED:
1948                 ret = srpt_cm_rtu_recv(cm_id);
1949                 break;
1950         case IB_CM_DREQ_RECEIVED:
1951                 ret = srpt_cm_dreq_recv(cm_id);
1952                 break;
1953         case IB_CM_DREP_RECEIVED:
1954                 srpt_cm_drep_recv(cm_id);
1955                 ret = -EINVAL;
1956                 break;
1957         case IB_CM_TIMEWAIT_EXIT:
1958                 srpt_cm_timewait_exit(cm_id);
1959                 ret = -EINVAL;
1960                 break;
1961         case IB_CM_REP_ERROR:
1962                 srpt_cm_rep_error(cm_id);
1963                 ret = -EINVAL;
1964                 break;
1965         default:
1966                 break;
1967         }
1968
1969         return ret;
1970 }
1971
1972 static int srpt_map_sg_to_ib_sge(struct srpt_rdma_ch *ch,
1973                                  struct srpt_ioctx *ioctx,
1974                                  struct scst_cmd *scmnd)
1975 {
1976         struct scatterlist *scat;
1977         scst_data_direction dir;
1978         struct rdma_iu *riu;
1979         struct srp_direct_buf *db;
1980         dma_addr_t dma_addr;
1981         struct ib_sge *sge;
1982         u64 raddr;
1983         u32 rsize;
1984         u32 tsize;
1985         u32 dma_len;
1986         int count, nrdma;
1987         int i, j, k;
1988
1989         scat = scst_cmd_get_sg(scmnd);
1990         dir = scst_cmd_get_data_direction(scmnd);
1991         count = dma_map_sg(ch->sport->sdev->device->dma_device, scat,
1992                            scst_cmd_get_sg_cnt(scmnd),
1993                            scst_to_tgt_dma_dir(dir));
1994         if (unlikely(!count))
1995                 return -EBUSY;
1996
1997         if (ioctx->rdma_ius && ioctx->n_rdma_ius)
1998                 nrdma = ioctx->n_rdma_ius;
1999         else {
2000                 nrdma = count / SRPT_DEF_SG_PER_WQE + ioctx->n_rbuf;
2001
2002                 ioctx->rdma_ius = kzalloc(nrdma * sizeof *riu,
2003                                           scst_cmd_atomic(scmnd)
2004                                           ? GFP_ATOMIC : GFP_KERNEL);
2005                 if (!ioctx->rdma_ius) {
2006                         dma_unmap_sg(ch->sport->sdev->device->dma_device,
2007                                      scat, scst_cmd_get_sg_cnt(scmnd),
2008                                      scst_to_tgt_dma_dir(dir));
2009                         return -ENOMEM;
2010                 }
2011
2012                 ioctx->n_rdma_ius = nrdma;
2013         }
2014
2015         db = ioctx->rbufs;
2016         tsize = (dir == SCST_DATA_READ) ?
2017                 scst_cmd_get_resp_data_len(scmnd) : scst_cmd_get_bufflen(scmnd);
2018         dma_len = sg_dma_len(&scat[0]);
2019         riu = ioctx->rdma_ius;
2020
2021         /*
2022          * For each remote desc - calculate the #ib_sge.
2023          * If #ib_sge < SRPT_DEF_SG_PER_WQE per rdma operation then
2024          *      each remote desc rdma_iu is required a rdma wr;
2025          * else
2026          *      we need to allocate extra rdma_iu to carry extra #ib_sge in
2027          *      another rdma wr
2028          */
2029         for (i = 0, j = 0;
2030              j < count && i < ioctx->n_rbuf && tsize > 0; ++i, ++riu, ++db) {
2031                 rsize = be32_to_cpu(db->len);
2032                 raddr = be64_to_cpu(db->va);
2033                 riu->raddr = raddr;
2034                 riu->rkey = be32_to_cpu(db->key);
2035                 riu->sge_cnt = 0;
2036
2037                 /* calculate how many sge required for this remote_buf */
2038                 while (rsize > 0 && tsize > 0) {
2039
2040                         if (rsize >= dma_len) {
2041                                 tsize -= dma_len;
2042                                 rsize -= dma_len;
2043                                 raddr += dma_len;
2044
2045                                 if (tsize > 0) {
2046                                         ++j;
2047                                         if (j < count)
2048                                                 dma_len = sg_dma_len(&scat[j]);
2049                                 }
2050                         } else {
2051                                 tsize -= rsize;
2052                                 dma_len -= rsize;
2053                                 rsize = 0;
2054                         }
2055
2056                         ++riu->sge_cnt;
2057
2058                         if (rsize > 0 && riu->sge_cnt == SRPT_DEF_SG_PER_WQE) {
2059                                 riu->sge =
2060                                     kmalloc(riu->sge_cnt * sizeof *riu->sge,
2061                                             scst_cmd_atomic(scmnd)
2062                                             ? GFP_ATOMIC : GFP_KERNEL);
2063                                 if (!riu->sge)
2064                                         goto free_mem;
2065
2066                                 ++ioctx->n_rdma;
2067                                 ++riu;
2068                                 riu->sge_cnt = 0;
2069                                 riu->raddr = raddr;
2070                                 riu->rkey = be32_to_cpu(db->key);
2071                         }
2072                 }
2073
2074                 riu->sge = kmalloc(riu->sge_cnt * sizeof *riu->sge,
2075                                    scst_cmd_atomic(scmnd)
2076                                    ? GFP_ATOMIC : GFP_KERNEL);
2077
2078                 if (!riu->sge)
2079                         goto free_mem;
2080
2081                 ++ioctx->n_rdma;
2082         }
2083
2084         db = ioctx->rbufs;
2085         scat = scst_cmd_get_sg(scmnd);
2086         tsize = (dir == SCST_DATA_READ) ?
2087                 scst_cmd_get_resp_data_len(scmnd) : scst_cmd_get_bufflen(scmnd);
2088         riu = ioctx->rdma_ius;
2089         dma_len = sg_dma_len(&scat[0]);
2090         dma_addr = sg_dma_address(&scat[0]);
2091
2092         /* this second loop is really mapped sg_addres to rdma_iu->ib_sge */
2093         for (i = 0, j = 0;
2094              j < count && i < ioctx->n_rbuf && tsize > 0; ++i, ++riu, ++db) {
2095                 rsize = be32_to_cpu(db->len);
2096                 sge = riu->sge;
2097                 k = 0;
2098
2099                 while (rsize > 0 && tsize > 0) {
2100                         sge->addr = dma_addr;
2101                         sge->lkey = ch->sport->sdev->mr->lkey;
2102
2103                         if (rsize >= dma_len) {
2104                                 sge->length =
2105                                         (tsize < dma_len) ? tsize : dma_len;
2106                                 tsize -= dma_len;
2107                                 rsize -= dma_len;
2108
2109                                 if (tsize > 0) {
2110                                         ++j;
2111                                         if (j < count) {
2112                                                 dma_len = sg_dma_len(&scat[j]);
2113                                                 dma_addr =
2114                                                     sg_dma_address(&scat[j]);
2115                                         }
2116                                 }
2117                         } else {
2118                                 sge->length = (tsize < rsize) ? tsize : rsize;
2119                                 tsize -= rsize;
2120                                 dma_len -= rsize;
2121                                 dma_addr += rsize;
2122                                 rsize = 0;
2123                         }
2124
2125                         ++k;
2126                         if (k == riu->sge_cnt && rsize > 0) {
2127                                 ++riu;
2128                                 sge = riu->sge;
2129                                 k = 0;
2130                         } else if (rsize > 0)
2131                                 ++sge;
2132                 }
2133         }
2134
2135         return 0;
2136
2137 free_mem:
2138         while (ioctx->n_rdma)
2139                 kfree(ioctx->rdma_ius[ioctx->n_rdma--].sge);
2140
2141         kfree(ioctx->rdma_ius);
2142
2143         dma_unmap_sg(ch->sport->sdev->device->dma_device,
2144                      scat, scst_cmd_get_sg_cnt(scmnd),
2145                      scst_to_tgt_dma_dir(dir));
2146
2147         return -ENOMEM;
2148 }
2149
2150 static int srpt_perform_rdmas(struct srpt_rdma_ch *ch, struct srpt_ioctx *ioctx,
2151                               scst_data_direction dir)
2152 {
2153         struct ib_send_wr wr;
2154         struct ib_send_wr *bad_wr;
2155         struct rdma_iu *riu;
2156         int i;
2157         int ret = 0;
2158
2159         riu = ioctx->rdma_ius;
2160         memset(&wr, 0, sizeof wr);
2161
2162         for (i = 0; i < ioctx->n_rdma; ++i, ++riu) {
2163                 wr.opcode = (dir == SCST_DATA_READ) ?
2164                     IB_WR_RDMA_WRITE : IB_WR_RDMA_READ;
2165                 wr.next = NULL;
2166                 wr.wr_id = ioctx->index;
2167                 wr.wr.rdma.remote_addr = riu->raddr;
2168                 wr.wr.rdma.rkey = riu->rkey;
2169                 wr.num_sge = riu->sge_cnt;
2170                 wr.sg_list = riu->sge;
2171
2172                 /* only get completion event for the last rdma wr */
2173                 if (i == (ioctx->n_rdma - 1) && dir == SCST_DATA_WRITE)
2174                         wr.send_flags = IB_SEND_SIGNALED;
2175
2176                 ret = ib_post_send(ch->qp, &wr, &bad_wr);
2177                 if (ret)
2178                         break;
2179         }
2180
2181         return ret;
2182 }
2183
2184 /*
2185  * Start data reception. Must not block.
2186  */
2187 static int srpt_xfer_data(struct srpt_rdma_ch *ch, struct srpt_ioctx *ioctx,
2188                           struct scst_cmd *scmnd)
2189 {
2190         int ret;
2191
2192         ret = srpt_map_sg_to_ib_sge(ch, ioctx, scmnd);
2193         if (ret) {
2194                 printk(KERN_ERR PFX "%s[%d] ret=%d\n", __func__, __LINE__, ret);
2195                 ret = SCST_TGT_RES_QUEUE_FULL;
2196                 goto out;
2197         }
2198
2199         ret = srpt_perform_rdmas(ch, ioctx, scst_cmd_get_data_direction(scmnd));
2200         if (ret) {
2201                 printk(KERN_ERR PFX "%s[%d] ret=%d\n", __func__, __LINE__, ret);
2202                 if (ret == -EAGAIN || ret == -ENOMEM)
2203                         ret = SCST_TGT_RES_QUEUE_FULL;
2204                 else
2205                         ret = SCST_TGT_RES_FATAL_ERROR;
2206                 goto out;
2207         }
2208
2209         ret = SCST_TGT_RES_SUCCESS;
2210
2211 out:
2212         return ret;
2213 }
2214
2215 /*
2216  * Called by the SCST core to inform ib_srpt that data reception should start.
2217  * Must not block.
2218  */
2219 static int srpt_rdy_to_xfer(struct scst_cmd *scmnd)
2220 {
2221         struct srpt_rdma_ch *ch;
2222         struct srpt_ioctx *ioctx;
2223
2224         ioctx = scst_cmd_get_tgt_priv(scmnd);
2225         BUG_ON(!ioctx);
2226
2227         ch = scst_sess_get_tgt_priv(scst_cmd_get_session(scmnd));
2228         BUG_ON(!ch);
2229
2230         if (ch->state == RDMA_CHANNEL_DISCONNECTING)
2231                 return SCST_TGT_RES_FATAL_ERROR;
2232         else if (ch->state == RDMA_CHANNEL_CONNECTING)
2233                 return SCST_TGT_RES_QUEUE_FULL;
2234
2235         ioctx->state = SRPT_STATE_NEED_DATA;
2236
2237         return srpt_xfer_data(ch, ioctx, scmnd);
2238 }
2239
2240 /*
2241  * Called by the SCST core. Transmits the response buffer and status held in
2242  * 'scmnd'. Must not block.
2243  */
2244 static int srpt_xmit_response(struct scst_cmd *scmnd)
2245 {
2246         struct srpt_rdma_ch *ch;
2247         struct srpt_ioctx *ioctx;
2248         struct srp_rsp *srp_rsp;
2249         u64 tag;
2250         int ret = SCST_TGT_RES_SUCCESS;
2251         int dir;
2252         int status;
2253
2254         ioctx = scst_cmd_get_tgt_priv(scmnd);
2255         BUG_ON(!ioctx);
2256
2257         ch = scst_sess_get_tgt_priv(scst_cmd_get_session(scmnd));
2258         BUG_ON(!ch);
2259
2260         tag = scst_cmd_get_tag(scmnd);
2261
2262         if (ch->state != RDMA_CHANNEL_LIVE) {
2263                 printk(KERN_ERR PFX
2264                        "%s: tag= %lld channel in bad state %d\n",
2265                        __func__, (unsigned long long)tag, ch->state);
2266
2267                 if (ch->state == RDMA_CHANNEL_DISCONNECTING)
2268                         ret = SCST_TGT_RES_FATAL_ERROR;
2269                 else if (ch->state == RDMA_CHANNEL_CONNECTING)
2270                         ret = SCST_TGT_RES_QUEUE_FULL;
2271
2272                 if (unlikely(scst_cmd_aborted(scmnd)))
2273                         goto out_aborted;
2274
2275                 goto out;
2276         }
2277
2278         dma_sync_single_for_cpu(ch->sport->sdev->device->dma_device, ioctx->dma,
2279                                 MAX_MESSAGE_SIZE, DMA_TO_DEVICE);
2280
2281         srp_rsp = ioctx->buf;
2282
2283         if (unlikely(scst_cmd_aborted(scmnd))) {
2284                 printk(KERN_ERR PFX
2285                        "%s: tag= %lld already get aborted\n",
2286                        __func__, (unsigned long long)tag);
2287                 goto out_aborted;
2288         }
2289
2290         dir = scst_cmd_get_data_direction(scmnd);
2291         status = scst_cmd_get_status(scmnd) & 0xff;
2292
2293         srpt_build_cmd_rsp(ch, ioctx, NO_SENSE, NO_ADD_SENSE, tag);
2294
2295         if (SCST_SENSE_VALID(scst_cmd_get_sense_buffer(scmnd))) {
2296                 srp_rsp->sense_data_len = scst_cmd_get_sense_buffer_len(scmnd);
2297                 if (srp_rsp->sense_data_len >
2298                     (MAX_MESSAGE_SIZE - sizeof *srp_rsp))
2299                         srp_rsp->sense_data_len =
2300                             MAX_MESSAGE_SIZE - sizeof *srp_rsp;
2301
2302                 memcpy((u8 *) (srp_rsp + 1), scst_cmd_get_sense_buffer(scmnd),
2303                        srp_rsp->sense_data_len);
2304
2305                 srp_rsp->sense_data_len = cpu_to_be32(srp_rsp->sense_data_len);
2306                 srp_rsp->flags |= SRP_RSP_FLAG_SNSVALID;
2307
2308                 if (!status)
2309                         status = SAM_STAT_CHECK_CONDITION;
2310         }
2311
2312         srp_rsp->status = status;
2313
2314         /* transfer read data if any */
2315         if (dir == SCST_DATA_READ && scst_cmd_get_resp_data_len(scmnd)) {
2316                 ret = srpt_xfer_data(ch, ioctx, scmnd);
2317                 if (ret != SCST_TGT_RES_SUCCESS) {
2318                         printk(KERN_ERR PFX
2319                                "%s: tag= %lld xfer_data failed\n",
2320                                __func__, (unsigned long long)tag);
2321                         goto out;
2322                 }
2323         }
2324
2325         ioctx->state = SRPT_STATE_PROCESSED;
2326
2327         if (srpt_post_send(ch, ioctx,
2328                            sizeof *srp_rsp +
2329                            be32_to_cpu(srp_rsp->sense_data_len))) {
2330                 printk(KERN_ERR PFX "%s: ch->state= %d tag= %lld\n",
2331                        __func__, ch->state,
2332                        (unsigned long long)tag);
2333                 ret = SCST_TGT_RES_FATAL_ERROR;
2334         }
2335
2336 out:
2337         return ret;
2338
2339 out_aborted:
2340         ret = SCST_TGT_RES_SUCCESS;
2341         scst_set_delivery_status(scmnd, SCST_CMD_DELIVERY_ABORTED);
2342         ioctx->state = SRPT_STATE_ABORTED;
2343         scst_tgt_cmd_done(scmnd, SCST_CONTEXT_SAME);
2344         goto out;
2345 }
2346
2347 /*
2348  * Called by the SCST core to inform ib_srpt that a received task management
2349  * function has been completed. Must not block.
2350  */
2351 static void srpt_tsk_mgmt_done(struct scst_mgmt_cmd *mcmnd)
2352 {
2353         struct srpt_rdma_ch *ch;
2354         struct srpt_mgmt_ioctx *mgmt_ioctx;
2355         struct srpt_ioctx *ioctx;
2356         int rsp_len;
2357
2358         mgmt_ioctx = scst_mgmt_cmd_get_tgt_priv(mcmnd);
2359         BUG_ON(!mgmt_ioctx);
2360
2361         ch = mgmt_ioctx->ch;
2362         BUG_ON(!ch);
2363
2364         ioctx = mgmt_ioctx->ioctx;
2365         BUG_ON(!ioctx);
2366
2367         TRACE_DBG("%s: tsk_mgmt_done for tag= %lld status=%d\n",
2368                   __func__, (unsigned long long)mgmt_ioctx->tag,
2369                   scst_mgmt_cmd_get_status(mcmnd));
2370
2371         rsp_len = srpt_build_tskmgmt_rsp(ch, ioctx,
2372                                          (scst_mgmt_cmd_get_status(mcmnd) ==
2373                                           SCST_MGMT_STATUS_SUCCESS) ?
2374                                          SRP_TSK_MGMT_SUCCESS :
2375                                          SRP_TSK_MGMT_FAILED,
2376                                          mgmt_ioctx->tag);
2377         srpt_post_send(ch, ioctx, rsp_len);
2378
2379         scst_mgmt_cmd_set_tgt_priv(mcmnd, NULL);
2380
2381         kfree(mgmt_ioctx);
2382 }
2383
2384 /*
2385  * Called by the SCST core to inform ib_srpt that the command 'scmnd' is about
2386  * to be freed. May be called in IRQ context.
2387  */
2388 static void srpt_on_free_cmd(struct scst_cmd *scmnd)
2389 {
2390         struct srpt_rdma_ch *ch;
2391         struct srpt_ioctx *ioctx;
2392
2393         ioctx = scst_cmd_get_tgt_priv(scmnd);
2394         BUG_ON(!ioctx);
2395
2396         ch = scst_sess_get_tgt_priv(scst_cmd_get_session(scmnd));
2397         BUG_ON(!ch);
2398
2399         spin_lock_irq(&ch->spinlock);
2400         list_del(&ioctx->scmnd_list);
2401         ch->active_scmnd_cnt--;
2402         spin_unlock_irq(&ch->spinlock);
2403
2404         srpt_reset_ioctx(ch, ioctx);
2405         scst_cmd_set_tgt_priv(scmnd, NULL);
2406 }
2407
2408 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20) && ! defined(BACKPORT_LINUX_WORKQUEUE_TO_2_6_19)
2409 /* A vanilla 2.6.19 or older kernel without backported OFED kernel headers. */
2410 static void srpt_refresh_port_work(void *ctx)
2411 #else
2412 static void srpt_refresh_port_work(struct work_struct *work)
2413 #endif
2414 {
2415 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20) && ! defined(BACKPORT_LINUX_WORKQUEUE_TO_2_6_19)
2416         struct srpt_port *sport = (struct srpt_port *)ctx;
2417 #else
2418         struct srpt_port *sport = container_of(work, struct srpt_port, work);
2419 #endif
2420
2421         srpt_refresh_port(sport);
2422 }
2423
2424 /*
2425  * Called by the SCST core to detect target adapters. Returns the number of
2426  * detected target adapters.
2427  */
2428 static int srpt_detect(struct scst_tgt_template *tp)
2429 {
2430         int device_count;
2431
2432         TRACE_ENTRY();
2433
2434         device_count = atomic_read(&srpt_device_count);
2435
2436         TRACE_EXIT_RES(device_count);
2437
2438         return device_count;
2439 }
2440
2441 /*
2442  * Callback function called by the SCST core from scst_unregister() to free up
2443  * the resources associated with device scst_tgt.
2444  */
2445 static int srpt_release(struct scst_tgt *scst_tgt)
2446 {
2447         struct srpt_device *sdev = scst_tgt_get_tgt_priv(scst_tgt);
2448         struct srpt_rdma_ch *ch, *tmp_ch;
2449
2450         TRACE_ENTRY();
2451
2452         BUG_ON(!scst_tgt);
2453 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 18)
2454         WARN_ON(!sdev);
2455         if (!sdev)
2456                 return -ENODEV;
2457 #else
2458         if (WARN_ON(!sdev))
2459                 return -ENODEV;
2460 #endif
2461
2462         srpt_unregister_procfs_entry(scst_tgt->tgtt);
2463
2464         spin_lock_irq(&sdev->spinlock);
2465         list_for_each_entry_safe(ch, tmp_ch, &sdev->rch_list, list) {
2466                 list_del(&ch->list);
2467                 spin_unlock_irq(&sdev->spinlock);
2468                 srpt_release_channel(ch, 1);
2469                 spin_lock_irq(&sdev->spinlock);
2470         }
2471         spin_unlock_irq(&sdev->spinlock);
2472
2473         srpt_unregister_mad_agent(sdev);
2474
2475         scst_tgt_set_tgt_priv(scst_tgt, NULL);
2476
2477         TRACE_EXIT();
2478
2479         return 0;
2480 }
2481
2482 /*
2483  * Entry point for ib_srpt's kernel thread. This kernel thread is only created
2484  * when the module parameter 'thread' is not zero (the default is zero).
2485  * This thread processes the ioctx list srpt_thread.thread_ioctx_list.
2486  *
2487  * @pre thread != 0
2488  */
2489 static int srpt_ioctx_thread(void *arg)
2490 {
2491         struct srpt_ioctx *ioctx;
2492
2493         /* Hibernation / freezing of the SRPT kernel thread is not supported. */
2494         current->flags |= PF_NOFREEZE;
2495
2496         spin_lock_irq(&srpt_thread.thread_lock);
2497         while (!kthread_should_stop()) {
2498                 wait_queue_t wait;
2499                 init_waitqueue_entry(&wait, current);
2500
2501                 if (!srpt_test_ioctx_list()) {
2502                         add_wait_queue_exclusive(&ioctx_list_waitQ, &wait);
2503
2504                         for (;;) {
2505                                 set_current_state(TASK_INTERRUPTIBLE);
2506                                 if (srpt_test_ioctx_list())
2507                                         break;
2508                                 spin_unlock_irq(&srpt_thread.thread_lock);
2509                                 schedule();
2510                                 spin_lock_irq(&srpt_thread.thread_lock);
2511                         }
2512                         set_current_state(TASK_RUNNING);
2513                         remove_wait_queue(&ioctx_list_waitQ, &wait);
2514                 }
2515
2516                 while (!list_empty(&srpt_thread.thread_ioctx_list)) {
2517                         ioctx = list_entry(srpt_thread.thread_ioctx_list.next,
2518                                            struct srpt_ioctx, comp_list);
2519
2520                         list_del(&ioctx->comp_list);
2521
2522                         spin_unlock_irq(&srpt_thread.thread_lock);
2523                         switch (ioctx->op) {
2524                         case IB_WC_SEND:
2525                                 srpt_handle_send_comp(ioctx->ch, ioctx,
2526                                         SCST_CONTEXT_DIRECT);
2527                                 break;
2528                         case IB_WC_RDMA_WRITE:
2529                         case IB_WC_RDMA_READ:
2530                                 srpt_handle_rdma_comp(ioctx->ch, ioctx);
2531                                 break;
2532                         case IB_WC_RECV:
2533                                 srpt_handle_new_iu(ioctx->ch, ioctx);
2534                                 break;
2535                         default:
2536                                 break;
2537                         }
2538                         spin_lock_irq(&srpt_thread.thread_lock);
2539                 }
2540         }
2541         spin_unlock_irq(&srpt_thread.thread_lock);
2542
2543         return 0;
2544 }
2545
2546 /* SCST target template for the SRP target implementation. */
2547 static struct scst_tgt_template srpt_template = {
2548         .name = DRV_NAME,
2549         .sg_tablesize = SRPT_DEF_SG_TABLESIZE,
2550         .xmit_response_atomic = 1,
2551         .rdy_to_xfer_atomic = 1,
2552         .no_proc_entry = 0,
2553         .detect = srpt_detect,
2554         .release = srpt_release,
2555         .xmit_response = srpt_xmit_response,
2556         .rdy_to_xfer = srpt_rdy_to_xfer,
2557         .on_free_cmd = srpt_on_free_cmd,
2558         .task_mgmt_fn_done = srpt_tsk_mgmt_done
2559 };
2560
2561 /*
2562  * The callback function srpt_release_class_dev() is called whenever a
2563  * device is removed from the /sys/class/infiniband_srpt device class.
2564  */
2565 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
2566 static void srpt_release_class_dev(struct class_device *class_dev)
2567 #else
2568 static void srpt_release_class_dev(struct device *dev)
2569 #endif
2570 {
2571 }
2572
2573 #if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
2574 static int srpt_trace_level_show(struct seq_file *seq, void *v)
2575 {
2576         return scst_proc_log_entry_read(seq, trace_flag, NULL);
2577 }
2578
2579 static ssize_t srpt_proc_trace_level_write(struct file *file,
2580         const char __user *buf, size_t length, loff_t *off)
2581 {
2582         return scst_proc_log_entry_write(file, buf, length, &trace_flag,
2583                 DEFAULT_SRPT_TRACE_FLAGS, NULL);
2584 }
2585
2586 static struct scst_proc_data srpt_log_proc_data = {
2587         SCST_DEF_RW_SEQ_OP(srpt_proc_trace_level_write)
2588         .show = srpt_trace_level_show,
2589 };
2590 #endif
2591
2592 static struct class_attribute srpt_class_attrs[] = {
2593         __ATTR_NULL,
2594 };
2595
2596 static struct class srpt_class = {
2597         .name = "infiniband_srpt",
2598 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
2599         .release = srpt_release_class_dev,
2600 #else
2601         .dev_release = srpt_release_class_dev,
2602 #endif
2603         .class_attrs = srpt_class_attrs,
2604 };
2605
2606 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
2607 static ssize_t show_login_info(struct class_device *class_dev, char *buf)
2608 #else
2609 static ssize_t show_login_info(struct device *dev,
2610                                struct device_attribute *attr, char *buf)
2611 #endif
2612 {
2613         struct srpt_device *sdev =
2614 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
2615                 container_of(class_dev, struct srpt_device, class_dev);
2616 #else
2617                 container_of(dev, struct srpt_device, dev);
2618 #endif
2619         struct srpt_port *sport;
2620         u64 ioc_guid;
2621         int i;
2622         int len = 0;
2623
2624         for (i = 0; i < sdev->device->phys_port_cnt; i++) {
2625                 sport = &sdev->port[i];
2626
2627                 ioc_guid = srpt_get_ioc_guid(sdev->device);
2628                 len += sprintf(buf + len,
2629                                "tid_ext=%016llx,ioc_guid=%016llx,pkey=ffff,"
2630                                "dgid=%04x%04x%04x%04x%04x%04x%04x%04x,"
2631                                "service_id=%016llx\n",
2632                                (unsigned long long) ioc_guid,
2633                                (unsigned long long) ioc_guid,
2634                                be16_to_cpu(((__be16 *) sport->gid.raw)[0]),
2635                                be16_to_cpu(((__be16 *) sport->gid.raw)[1]),
2636                                be16_to_cpu(((__be16 *) sport->gid.raw)[2]),
2637                                be16_to_cpu(((__be16 *) sport->gid.raw)[3]),
2638                                be16_to_cpu(((__be16 *) sport->gid.raw)[4]),
2639                                be16_to_cpu(((__be16 *) sport->gid.raw)[5]),
2640                                be16_to_cpu(((__be16 *) sport->gid.raw)[6]),
2641                                be16_to_cpu(((__be16 *) sport->gid.raw)[7]),
2642                                (unsigned long long) ioc_guid);
2643         }
2644
2645         return len;
2646 }
2647
2648 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
2649 static CLASS_DEVICE_ATTR(login_info, S_IRUGO, show_login_info, NULL);
2650 #else
2651 static DEVICE_ATTR(login_info, S_IRUGO, show_login_info, NULL);
2652 #endif
2653
2654 /*
2655  * Callback function called by the InfiniBand core when either an InfiniBand
2656  * device has been added or during the ib_register_client() call for each
2657  * registered InfiniBand device.
2658  */
2659 static void srpt_add_one(struct ib_device *device)
2660 {
2661         struct srpt_device *sdev;
2662         struct srpt_port *sport;
2663         struct ib_srq_init_attr srq_attr;
2664         int i;
2665
2666         TRACE_ENTRY();
2667
2668         sdev = kzalloc(sizeof *sdev, GFP_KERNEL);
2669         if (!sdev)
2670                 return;
2671
2672         sdev->device = device;
2673
2674 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
2675         sdev->class_dev.class = &srpt_class;
2676         sdev->class_dev.dev = device->dma_device;
2677         snprintf(sdev->class_dev.class_id, BUS_ID_SIZE,
2678                  "srpt-%s", device->name);
2679 #else
2680         sdev->dev.class = &srpt_class;
2681         sdev->dev.parent = device->dma_device;
2682 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 30)
2683         snprintf(sdev->dev.bus_id, BUS_ID_SIZE, "srpt-%s", device->name);
2684 #else
2685         snprintf(sdev->init_name, sizeof(sdev->init_name),
2686                  "srpt-%s", device->name);
2687         sdev->dev.init_name = sdev->init_name;
2688 #endif
2689 #endif
2690
2691 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
2692         if (class_device_register(&sdev->class_dev))
2693                 goto free_dev;
2694         if (class_device_create_file(&sdev->class_dev,
2695                                      &class_device_attr_login_info))
2696                 goto err_dev;
2697 #else
2698         if (device_register(&sdev->dev))
2699                 goto free_dev;
2700         if (device_create_file(&sdev->dev, &dev_attr_login_info))
2701                 goto err_dev;
2702 #endif
2703
2704         if (ib_query_device(device, &sdev->dev_attr))
2705                 goto err_dev;
2706
2707         sdev->pd = ib_alloc_pd(device);
2708         if (IS_ERR(sdev->pd))
2709                 goto err_dev;
2710
2711         sdev->mr = ib_get_dma_mr(sdev->pd, IB_ACCESS_LOCAL_WRITE);
2712         if (IS_ERR(sdev->mr))
2713                 goto err_pd;
2714
2715         srq_attr.event_handler = srpt_srq_event;
2716         srq_attr.srq_context = (void *)sdev;
2717         srq_attr.attr.max_wr = min(SRPT_SRQ_SIZE, sdev->dev_attr.max_srq_wr);
2718         srq_attr.attr.max_sge = 1;
2719         srq_attr.attr.srq_limit = 0;
2720
2721         sdev->srq = ib_create_srq(sdev->pd, &srq_attr);
2722         if (IS_ERR(sdev->srq))
2723                 goto err_mr;
2724
2725         TRACE_DBG("%s: create SRQ #wr= %d max_allow=%d dev= %s",
2726                __func__, srq_attr.attr.max_wr,
2727               sdev->dev_attr.max_srq_wr, device->name);
2728
2729         if (!global_ioc_guid)
2730                 global_ioc_guid = be64_to_cpu(device->node_guid);
2731
2732         sdev->cm_id = ib_create_cm_id(device, srpt_cm_handler, sdev);
2733         if (IS_ERR(sdev->cm_id))
2734                 goto err_srq;
2735
2736         /* print out target login information */
2737         TRACE_DBG("Target login info: id_ext=%016llx,"
2738                   "ioc_guid=%016llx,pkey=ffff,service_id=%016llx",
2739                   (unsigned long long) srpt_get_ioc_guid(sdev->device),
2740                   (unsigned long long) srpt_get_ioc_guid(sdev->device),
2741                   (unsigned long long) srpt_get_ioc_guid(sdev->device));
2742
2743         /*
2744          * We do not have a consistent service_id (ie. also id_ext of target_id)
2745          * to identify this target. We currently use the guid of the first HCA
2746          * in the system as service_id; therefore, the target_id will change
2747          * if this HCA is gone bad and replaced by different HCA
2748          */
2749         if (ib_cm_listen(sdev->cm_id, cpu_to_be64(global_ioc_guid), 0, NULL))
2750                 goto err_cm;
2751
2752         INIT_IB_EVENT_HANDLER(&sdev->event_handler, sdev->device,
2753                               srpt_event_handler);
2754         if (ib_register_event_handler(&sdev->event_handler))
2755                 goto err_cm;
2756
2757         if (srpt_alloc_ioctx_ring(sdev))
2758                 goto err_event;
2759
2760         INIT_LIST_HEAD(&sdev->rch_list);
2761         spin_lock_init(&sdev->spinlock);
2762
2763         for (i = 0; i < SRPT_SRQ_SIZE; ++i)
2764                 srpt_post_recv(sdev, sdev->ioctx_ring[i]);
2765
2766         ib_set_client_data(device, &srpt_client, sdev);
2767
2768         sdev->scst_tgt = scst_register(&srpt_template, NULL);
2769         if (!sdev->scst_tgt) {
2770                 printk(KERN_ERR PFX "SCST registration failed for %s.\n",
2771                         sdev->device->name);
2772                 goto err_ring;
2773         }
2774
2775         scst_tgt_set_tgt_priv(sdev->scst_tgt, sdev);
2776
2777         for (i = 1; i <= sdev->device->phys_port_cnt; i++) {
2778                 sport = &sdev->port[i - 1];
2779                 sport->sdev = sdev;
2780                 sport->port = i;
2781 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20) && ! defined(BACKPORT_LINUX_WORKQUEUE_TO_2_6_19)
2782                 /*
2783                  * A vanilla 2.6.19 or older kernel without backported OFED
2784                  * kernel headers.
2785                  */
2786                 INIT_WORK(&sport->work, srpt_refresh_port_work, sport);
2787 #else
2788                 INIT_WORK(&sport->work, srpt_refresh_port_work);
2789 #endif
2790                 if (srpt_refresh_port(sport)) {
2791                         printk(KERN_ERR PFX "MAD registration failed"
2792                                " for %s-%d.\n", sdev->device->name, i);
2793                         goto err_refresh_port;
2794                 }
2795         }
2796
2797         atomic_inc(&srpt_device_count);
2798
2799         TRACE_EXIT();
2800
2801         return;
2802
2803 err_refresh_port:
2804         scst_unregister(sdev->scst_tgt);
2805 err_ring:
2806         ib_set_client_data(device, &srpt_client, NULL);
2807         srpt_free_ioctx_ring(sdev);
2808 err_event:
2809         ib_unregister_event_handler(&sdev->event_handler);
2810 err_cm:
2811         ib_destroy_cm_id(sdev->cm_id);
2812 err_srq:
2813         ib_destroy_srq(sdev->srq);
2814 err_mr:
2815         ib_dereg_mr(sdev->mr);
2816 err_pd:
2817         ib_dealloc_pd(sdev->pd);
2818 err_dev:
2819 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
2820         class_device_unregister(&sdev->class_dev);
2821 #else
2822         device_unregister(&sdev->dev);
2823 #endif
2824 free_dev:
2825         kfree(sdev);
2826
2827         TRACE_EXIT();
2828 }
2829
2830 /*
2831  * Callback function called by the InfiniBand core when either an InfiniBand
2832  * device has been removed or during the ib_unregister_client() call for each
2833  * registered InfiniBand device.
2834  */
2835 static void srpt_remove_one(struct ib_device *device)
2836 {
2837         int i;
2838         struct srpt_device *sdev;
2839
2840         TRACE_ENTRY();
2841
2842         sdev = ib_get_client_data(device, &srpt_client);
2843 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 18)
2844         WARN_ON(!sdev);
2845         if (!sdev)
2846                 return;
2847 #else
2848         if (WARN_ON(!sdev))
2849                 return;
2850 #endif
2851
2852         /*
2853          * Cancel the work if it is queued. Wait until srpt_refresh_port_work()
2854          * finished if it is running.
2855          */
2856         for (i = 0; i < sdev->device->phys_port_cnt; i++)
2857 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
2858                 cancel_work_sync(&sdev->port[i].work);
2859 #else
2860                 /*
2861                  * cancel_work_sync() was introduced in kernel 2.6.22. Older
2862                  * kernels do not have a facility to cancel scheduled work.
2863                  */
2864                 printk(KERN_ERR PFX
2865                        "your kernel does not provide cancel_work_sync().\n");
2866 #endif
2867
2868         scst_unregister(sdev->scst_tgt);
2869         sdev->scst_tgt = NULL;
2870
2871         ib_unregister_event_handler(&sdev->event_handler);
2872         ib_destroy_cm_id(sdev->cm_id);
2873         ib_destroy_srq(sdev->srq);
2874         ib_dereg_mr(sdev->mr);
2875         ib_dealloc_pd(sdev->pd);
2876 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
2877         class_device_unregister(&sdev->class_dev);
2878 #else
2879         device_unregister(&sdev->dev);
2880 #endif
2881
2882         srpt_free_ioctx_ring(sdev);
2883         kfree(sdev);
2884
2885         TRACE_EXIT();
2886 }
2887
2888 /**
2889  * Create procfs entries for srpt. Currently the only procfs entry created
2890  * by this function is the "trace_level" entry.
2891  */
2892 static int srpt_register_procfs_entry(struct scst_tgt_template *tgt)
2893 {
2894         int res = 0;
2895 #if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
2896         struct proc_dir_entry *p, *root;
2897
2898         root = scst_proc_get_tgt_root(tgt);
2899         WARN_ON(!root);
2900         if (root) {
2901                 /*
2902                  * Fill in the scst_proc_data::data pointer, which is used in
2903                  * a printk(KERN_INFO ...) statement in
2904                  * scst_proc_log_entry_write() in scst_proc.c.
2905                  */
2906                 srpt_log_proc_data.data = (char *)tgt->name;
2907                 p = scst_create_proc_entry(root, SRPT_PROC_TRACE_LEVEL_NAME,
2908                                            &srpt_log_proc_data);
2909                 if (!p)
2910                         res = -ENOMEM;
2911         } else
2912                 res = -ENOMEM;
2913
2914 #endif
2915         return res;
2916 }
2917
2918 static void srpt_unregister_procfs_entry(struct scst_tgt_template *tgt)
2919 {
2920 #if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
2921         struct proc_dir_entry *root;
2922
2923         root = scst_proc_get_tgt_root(tgt);
2924         WARN_ON(!root);
2925         if (root)
2926                 remove_proc_entry(SRPT_PROC_TRACE_LEVEL_NAME, root);
2927 #endif
2928 }
2929
2930 /*
2931  * Module initialization.
2932  *
2933  * Note: since ib_register_client() registers callback functions, and since at
2934  * least one of these callback functions (srpt_add_one()) calls SCST functions,
2935  * the SCST target template must be registered before ib_register_client() is
2936  * called.
2937  */
2938 static int __init srpt_init_module(void)
2939 {
2940         int ret;
2941
2942         ret = class_register(&srpt_class);
2943         if (ret) {
2944                 printk(KERN_ERR PFX "couldn't register class ib_srpt\n");
2945                 goto out;
2946         }
2947
2948         ret = scst_register_target_template(&srpt_template);
2949         if (ret < 0) {
2950                 printk(KERN_ERR PFX "couldn't register with scst\n");
2951                 ret = -ENODEV;
2952                 goto out_unregister_class;
2953         }
2954
2955         ret = srpt_register_procfs_entry(&srpt_template);
2956         if (ret) {
2957                 printk(KERN_ERR PFX "couldn't register procfs entry\n");
2958                 goto out_unregister_target;
2959         }
2960
2961         ret = ib_register_client(&srpt_client);
2962         if (ret) {
2963                 printk(KERN_ERR PFX "couldn't register IB client\n");
2964                 goto out_unregister_target;
2965         }
2966
2967         if (thread) {
2968                 spin_lock_init(&srpt_thread.thread_lock);
2969                 INIT_LIST_HEAD(&srpt_thread.thread_ioctx_list);
2970                 srpt_thread.thread = kthread_run(srpt_ioctx_thread,
2971                                                  NULL, "srpt_thread");
2972                 if (IS_ERR(srpt_thread.thread)) {
2973                         srpt_thread.thread = NULL;
2974                         thread = 0;
2975                 }
2976         }
2977
2978         return 0;
2979
2980 out_unregister_target:
2981         /*
2982          * Note: the procfs entry is unregistered in srpt_release(), which is
2983          * called by scst_unregister_target_template().
2984          */
2985         scst_unregister_target_template(&srpt_template);
2986 out_unregister_class:
2987         class_unregister(&srpt_class);
2988 out:
2989         return ret;
2990 }
2991
2992 static void __exit srpt_cleanup_module(void)
2993 {
2994         TRACE_ENTRY();
2995
2996         if (srpt_thread.thread)
2997                 kthread_stop(srpt_thread.thread);
2998         ib_unregister_client(&srpt_client);
2999         scst_unregister_target_template(&srpt_template);
3000         class_unregister(&srpt_class);
3001
3002         TRACE_EXIT();
3003 }
3004
3005 module_init(srpt_init_module);
3006 module_exit(srpt_cleanup_module);