2 * Copyright (c) 2006 - 2009 Mellanox Technology Inc. All rights reserved.
3 * Copyright (C) 2008 Vladislav Bolkhovitin <vst@vlnb.net>
4 * Copyright (C) 2008 - 2009 Bart Van Assche <bart.vanassche@gmail.com>
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/module.h>
37 #include <linux/init.h>
38 #include <linux/slab.h>
39 #include <linux/err.h>
40 #include <linux/ctype.h>
41 #include <linux/string.h>
42 #include <linux/kthread.h>
43 #include <asm/atomic.h>
44 #if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
45 #include <linux/proc_fs.h>
46 #include <linux/seq_file.h>
49 #include "scst_debug.h"
51 /* Name of this kernel module. */
52 #define DRV_NAME "ib_srpt"
53 /* Prefix for printk() kernel messages. */
54 #define LOG_PFX DRV_NAME ": "
55 #define DRV_VERSION "1.0.1"
56 #define DRV_RELDATE "July 10, 2008"
57 #if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
58 /* Flags to be used in SCST debug tracing statements. */
59 #define DEFAULT_SRPT_TRACE_FLAGS (TRACE_OUT_OF_MEM | TRACE_MINOR \
60 | TRACE_MGMT | TRACE_SPECIAL)
61 /* Name of the entry that will be created under /proc/scsi_tgt/ib_srpt. */
62 #define SRPT_PROC_TRACE_LEVEL_NAME "trace_level"
65 #define MELLANOX_SRPT_ID_STRING "SCST SRP target"
67 MODULE_AUTHOR("Vu Pham");
68 MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol target "
69 "v" DRV_VERSION " (" DRV_RELDATE ")");
70 MODULE_LICENSE("Dual BSD/GPL");
73 /* Protects thread_ioctx_list. */
74 spinlock_t thread_lock;
75 /* I/O contexts to be processed by the kernel thread. */
76 struct list_head thread_ioctx_list;
77 /* SRPT kernel thread. */
78 struct task_struct *thread;
85 static u64 srpt_service_guid;
86 /* List of srpt_device structures. */
87 static atomic_t srpt_device_count;
88 static int use_port_guid_in_session_name;
90 static struct srpt_thread srpt_thread;
91 static DECLARE_WAIT_QUEUE_HEAD(ioctx_list_waitQ);
92 #if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
93 static unsigned long trace_flag = DEFAULT_SRPT_TRACE_FLAGS;
94 module_param(trace_flag, long, 0644);
95 MODULE_PARM_DESC(trace_flag,
96 "Trace flags for the ib_srpt kernel module.");
99 module_param(thread, int, 0444);
100 MODULE_PARM_DESC(thread,
101 "Executing ioctx in thread context. Default 0, i.e. soft IRQ, "
104 module_param(use_port_guid_in_session_name, bool, 0444);
105 MODULE_PARM_DESC(use_port_guid_in_session_name,
106 "Use target port ID in the SCST session name such that"
107 " redundant paths between multiport systems can be masked.");
109 static void srpt_add_one(struct ib_device *device);
110 static void srpt_remove_one(struct ib_device *device);
111 static void srpt_unregister_mad_agent(struct srpt_device *sdev);
112 static void srpt_unregister_procfs_entry(struct scst_tgt_template *tgt);
114 static struct ib_client srpt_client = {
117 .remove = srpt_remove_one
121 * Atomically test and set the channel state.
123 * @old: channel state to compare with.
124 * @new: state to change the channel state to if the current state matches the
127 * Returns true if the channel state matched old upon entry of this function,
128 * and false otherwise.
130 static bool srpt_test_and_set_channel_state(struct srpt_rdma_ch *ch,
131 enum rdma_ch_state old,
132 enum rdma_ch_state new)
135 enum rdma_ch_state cur;
137 spin_lock_irqsave(&ch->spinlock, flags);
141 spin_unlock_irqrestore(&ch->spinlock, flags);
147 * Callback function called by the InfiniBand core when an asynchronous IB
148 * event occurs. This callback may occur in interrupt context. See also
149 * section 11.5.2, Set Asynchronous Event Handler in the InfiniBand
150 * Architecture Specification.
152 static void srpt_event_handler(struct ib_event_handler *handler,
153 struct ib_event *event)
155 struct srpt_device *sdev;
156 struct srpt_port *sport;
158 sdev = ib_get_client_data(event->device, &srpt_client);
159 if (!sdev || sdev->device != event->device)
162 TRACE_DBG("ASYNC event= %d on device= %s",
163 event->event, sdev->device->name);
165 switch (event->event) {
166 case IB_EVENT_PORT_ERR:
167 if (event->element.port_num <= sdev->device->phys_port_cnt) {
168 sport = &sdev->port[event->element.port_num - 1];
173 case IB_EVENT_PORT_ACTIVE:
174 case IB_EVENT_LID_CHANGE:
175 case IB_EVENT_PKEY_CHANGE:
176 case IB_EVENT_SM_CHANGE:
177 case IB_EVENT_CLIENT_REREGISTER:
179 * Refresh port data asynchronously. Note: it is safe to call
180 * schedule_work() even if &sport->work is already on the
181 * global workqueue because schedule_work() tests for the
182 * work_pending() condition before adding &sport->work to the
185 if (event->element.port_num <= sdev->device->phys_port_cnt) {
186 sport = &sdev->port[event->element.port_num - 1];
187 if (!sport->lid && !sport->sm_lid)
188 schedule_work(&sport->work);
198 * Callback function called by the InfiniBand core for SRQ (shared receive
201 static void srpt_srq_event(struct ib_event *event, void *ctx)
203 TRACE_DBG("SRQ event %d", event->event);
207 * Callback function called by the InfiniBand core for QP (queue pair) events.
209 static void srpt_qp_event(struct ib_event *event, void *ctx)
211 struct srpt_rdma_ch *ch = ctx;
213 TRACE_DBG("QP event %d on cm_id=%p sess_name=%s state=%d",
214 event->event, ch->cm_id, ch->sess_name, ch->state);
216 switch (event->event) {
217 case IB_EVENT_COMM_EST:
218 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20) || defined(BACKPORT_LINUX_WORKQUEUE_TO_2_6_19)
219 ib_cm_notify(ch->cm_id, event->event);
221 /* Vanilla 2.6.19 kernel (or before) without OFED. */
222 PRINT_ERROR("%s", "how to perform ib_cm_notify() on a"
223 " vanilla 2.6.18 kernel ???");
226 case IB_EVENT_QP_LAST_WQE_REACHED:
227 if (srpt_test_and_set_channel_state(ch, RDMA_CHANNEL_LIVE,
228 RDMA_CHANNEL_DISCONNECTING)) {
229 PRINT_INFO("disconnected session %s.", ch->sess_name);
230 ib_send_cm_dreq(ch->cm_id, NULL, 0);
239 * Helper function for filling in an InfiniBand IOUnitInfo structure. Copies
240 * the lowest four bits of value in element slot of the array of four bit
241 * elements called c_list (controller list). The index slot is one-based.
243 * @pre 1 <= slot && 0 <= value && value < 16
245 static void srpt_set_ioc(u8 *c_list, u32 slot, u8 value)
252 tmp = c_list[id] & 0xf;
253 c_list[id] = (value << 4) | tmp;
255 tmp = c_list[id] & 0xf0;
256 c_list[id] = (value & 0xf) | tmp;
261 * Write InfiniBand ClassPortInfo to mad. See also section 16.3.3.1
262 * ClassPortInfo in the InfiniBand Architecture Specification.
264 static void srpt_get_class_port_info(struct ib_dm_mad *mad)
266 struct ib_class_port_info *cif;
268 cif = (struct ib_class_port_info *)mad->data;
269 memset(cif, 0, sizeof *cif);
270 cif->base_version = 1;
271 cif->class_version = 1;
272 cif->resp_time_value = 20;
274 mad->mad_hdr.status = 0;
278 * Write IOUnitInfo to mad. See also section 16.3.3.3 IOUnitInfo in the
279 * InfiniBand Architecture Specification. See also section B.7,
280 * table B.6 in the T10 SRP r16a document.
282 static void srpt_get_iou(struct ib_dm_mad *mad)
284 struct ib_dm_iou_info *ioui;
288 ioui = (struct ib_dm_iou_info *)mad->data;
290 ioui->max_controllers = 16;
292 /* set present for slot 1 and empty for the rest */
293 srpt_set_ioc(ioui->controller_list, 1, 1);
294 for (i = 1, slot = 2; i < 16; i++, slot++)
295 srpt_set_ioc(ioui->controller_list, slot, 0);
297 mad->mad_hdr.status = 0;
301 * Write IOControllerprofile to mad for I/O controller (sdev, slot). See also
302 * section 16.3.3.4 IOControllerProfile in the InfiniBand Architecture
303 * Specification. See also section B.7, table B.7 in the T10 SRP r16a
306 static void srpt_get_ioc(struct srpt_device *sdev, u32 slot,
307 struct ib_dm_mad *mad)
309 struct ib_dm_ioc_profile *iocp;
311 iocp = (struct ib_dm_ioc_profile *)mad->data;
313 if (!slot || slot > 16) {
314 mad->mad_hdr.status = cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD);
319 mad->mad_hdr.status = cpu_to_be16(DM_MAD_STATUS_NO_IOC);
323 memset(iocp, 0, sizeof *iocp);
324 strcpy(iocp->id_string, MELLANOX_SRPT_ID_STRING);
325 iocp->guid = cpu_to_be64(srpt_service_guid);
326 iocp->vendor_id = cpu_to_be32(sdev->dev_attr.vendor_id);
327 iocp->device_id = cpu_to_be32(sdev->dev_attr.vendor_part_id);
328 iocp->device_version = cpu_to_be16(sdev->dev_attr.hw_ver);
329 iocp->subsys_vendor_id = cpu_to_be32(sdev->dev_attr.vendor_id);
330 iocp->subsys_device_id = 0x0;
331 iocp->io_class = cpu_to_be16(SRP_REV16A_IB_IO_CLASS);
332 iocp->io_subclass = cpu_to_be16(SRP_IO_SUBCLASS);
333 iocp->protocol = cpu_to_be16(SRP_PROTOCOL);
334 iocp->protocol_version = cpu_to_be16(SRP_PROTOCOL_VERSION);
335 iocp->send_queue_depth = cpu_to_be16(SRPT_SRQ_SIZE);
336 iocp->rdma_read_depth = 4;
337 iocp->send_size = cpu_to_be32(MAX_MESSAGE_SIZE);
338 iocp->rdma_size = cpu_to_be32(MAX_RDMA_SIZE);
339 iocp->num_svc_entries = 1;
340 iocp->op_cap_mask = SRP_SEND_TO_IOC | SRP_SEND_FROM_IOC |
341 SRP_RDMA_READ_FROM_IOC | SRP_RDMA_WRITE_FROM_IOC;
343 mad->mad_hdr.status = 0;
347 * Device management: write ServiceEntries to mad for the given slot. See also
348 * section 16.3.3.5 ServiceEntries in the InfiniBand Architecture
349 * Specification. See also section B.7, table B.8 in the T10 SRP r16a document.
351 static void srpt_get_svc_entries(u64 ioc_guid,
352 u16 slot, u8 hi, u8 lo, struct ib_dm_mad *mad)
354 struct ib_dm_svc_entries *svc_entries;
358 if (!slot || slot > 16) {
359 mad->mad_hdr.status = cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD);
363 if (slot > 2 || lo > hi || hi > 1) {
364 mad->mad_hdr.status = cpu_to_be16(DM_MAD_STATUS_NO_IOC);
368 svc_entries = (struct ib_dm_svc_entries *)mad->data;
369 memset(svc_entries, 0, sizeof *svc_entries);
370 svc_entries->service_entries[0].id = cpu_to_be64(ioc_guid);
371 snprintf(svc_entries->service_entries[0].name,
372 sizeof(svc_entries->service_entries[0].name),
374 SRP_SERVICE_NAME_PREFIX,
375 (unsigned long long)ioc_guid);
377 mad->mad_hdr.status = 0;
381 * Actual processing of a received MAD *rq_mad received through source port *sp
382 * (MAD = InfiniBand management datagram). The response to be sent back is
383 * written to *rsp_mad.
385 static void srpt_mgmt_method_get(struct srpt_port *sp, struct ib_mad *rq_mad,
386 struct ib_dm_mad *rsp_mad)
392 attr_id = be16_to_cpu(rq_mad->mad_hdr.attr_id);
394 case DM_ATTR_CLASS_PORT_INFO:
395 srpt_get_class_port_info(rsp_mad);
397 case DM_ATTR_IOU_INFO:
398 srpt_get_iou(rsp_mad);
400 case DM_ATTR_IOC_PROFILE:
401 slot = be32_to_cpu(rq_mad->mad_hdr.attr_mod);
402 srpt_get_ioc(sp->sdev, slot, rsp_mad);
404 case DM_ATTR_SVC_ENTRIES:
405 slot = be32_to_cpu(rq_mad->mad_hdr.attr_mod);
406 hi = (u8) ((slot >> 8) & 0xff);
407 lo = (u8) (slot & 0xff);
408 slot = (u16) ((slot >> 16) & 0xffff);
409 srpt_get_svc_entries(srpt_service_guid,
410 slot, hi, lo, rsp_mad);
413 rsp_mad->mad_hdr.status =
414 cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR);
420 * Callback function that is called by the InfiniBand core after transmission of
421 * a MAD. (MAD = management datagram; AH = address handle.)
423 static void srpt_mad_send_handler(struct ib_mad_agent *mad_agent,
424 struct ib_mad_send_wc *mad_wc)
426 ib_destroy_ah(mad_wc->send_buf->ah);
427 ib_free_send_mad(mad_wc->send_buf);
431 * Callback function that is called by the InfiniBand core after reception of
432 * a MAD (management datagram).
434 static void srpt_mad_recv_handler(struct ib_mad_agent *mad_agent,
435 struct ib_mad_recv_wc *mad_wc)
437 struct srpt_port *sport = (struct srpt_port *)mad_agent->context;
439 struct ib_mad_send_buf *rsp;
440 struct ib_dm_mad *dm_mad;
442 if (!mad_wc || !mad_wc->recv_buf.mad)
445 ah = ib_create_ah_from_wc(mad_agent->qp->pd, mad_wc->wc,
446 mad_wc->recv_buf.grh, mad_agent->port_num);
450 BUILD_BUG_ON(offsetof(struct ib_dm_mad, data) != IB_MGMT_DEVICE_HDR);
452 rsp = ib_create_send_mad(mad_agent, mad_wc->wc->src_qp,
453 mad_wc->wc->pkey_index, 0,
454 IB_MGMT_DEVICE_HDR, IB_MGMT_DEVICE_DATA,
462 memcpy(dm_mad, mad_wc->recv_buf.mad, sizeof *dm_mad);
463 dm_mad->mad_hdr.method = IB_MGMT_METHOD_GET_RESP;
464 dm_mad->mad_hdr.status = 0;
466 switch (mad_wc->recv_buf.mad->mad_hdr.method) {
467 case IB_MGMT_METHOD_GET:
468 srpt_mgmt_method_get(sport, mad_wc->recv_buf.mad, dm_mad);
470 case IB_MGMT_METHOD_SET:
471 dm_mad->mad_hdr.status =
472 cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR);
475 dm_mad->mad_hdr.status =
476 cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD);
480 if (!ib_post_send_mad(rsp, NULL)) {
481 ib_free_recv_mad(mad_wc);
482 /* will destroy_ah & free_send_mad in send completion */
486 ib_free_send_mad(rsp);
491 ib_free_recv_mad(mad_wc);
495 * Enable InfiniBand management datagram processing, update the cached sm_lid,
496 * lid and gid values, and register a callback function for processing MADs
497 * on the specified port. It is safe to call this function more than once for
500 static int srpt_refresh_port(struct srpt_port *sport)
502 struct ib_mad_reg_req reg_req;
503 struct ib_port_modify port_modify;
504 struct ib_port_attr port_attr;
507 memset(&port_modify, 0, sizeof port_modify);
508 port_modify.set_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP;
509 port_modify.clr_port_cap_mask = 0;
511 ret = ib_modify_port(sport->sdev->device, sport->port, 0, &port_modify);
515 ret = ib_query_port(sport->sdev->device, sport->port, &port_attr);
519 sport->sm_lid = port_attr.sm_lid;
520 sport->lid = port_attr.lid;
522 ret = ib_query_gid(sport->sdev->device, sport->port, 0, &sport->gid);
526 if (!sport->mad_agent) {
527 memset(®_req, 0, sizeof reg_req);
528 reg_req.mgmt_class = IB_MGMT_CLASS_DEVICE_MGMT;
529 reg_req.mgmt_class_version = IB_MGMT_BASE_VERSION;
530 set_bit(IB_MGMT_METHOD_GET, reg_req.method_mask);
531 set_bit(IB_MGMT_METHOD_SET, reg_req.method_mask);
533 sport->mad_agent = ib_register_mad_agent(sport->sdev->device,
537 srpt_mad_send_handler,
538 srpt_mad_recv_handler,
540 if (IS_ERR(sport->mad_agent)) {
541 ret = PTR_ERR(sport->mad_agent);
542 sport->mad_agent = NULL;
551 port_modify.set_port_cap_mask = 0;
552 port_modify.clr_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP;
553 ib_modify_port(sport->sdev->device, sport->port, 0, &port_modify);
561 * Unregister the callback function for processing MADs and disable MAD
562 * processing for all ports of the specified device. It is safe to call this
563 * function more than once for the same device.
565 static void srpt_unregister_mad_agent(struct srpt_device *sdev)
567 struct ib_port_modify port_modify = {
568 .clr_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP,
570 struct srpt_port *sport;
573 for (i = 1; i <= sdev->device->phys_port_cnt; i++) {
574 sport = &sdev->port[i - 1];
575 WARN_ON(sport->port != i);
576 if (ib_modify_port(sdev->device, i, 0, &port_modify) < 0)
577 PRINT_ERROR("%s", "disabling MAD processing failed.");
578 if (sport->mad_agent) {
579 ib_unregister_mad_agent(sport->mad_agent);
580 sport->mad_agent = NULL;
586 * Allocate and initialize an SRPT I/O context structure.
588 static struct srpt_ioctx *srpt_alloc_ioctx(struct srpt_device *sdev)
590 struct srpt_ioctx *ioctx;
592 ioctx = kmalloc(sizeof *ioctx, GFP_KERNEL);
596 ioctx->buf = kzalloc(MAX_MESSAGE_SIZE, GFP_KERNEL);
600 ioctx->dma = dma_map_single(sdev->device->dma_device, ioctx->buf,
601 MAX_MESSAGE_SIZE, DMA_BIDIRECTIONAL);
602 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
603 if (dma_mapping_error(sdev->device->dma_device, ioctx->dma))
605 if (dma_mapping_error(ioctx->dma))
620 * Deallocate an SRPT I/O context structure.
622 static void srpt_free_ioctx(struct srpt_device *sdev, struct srpt_ioctx *ioctx)
627 dma_unmap_single(sdev->device->dma_device, ioctx->dma,
628 MAX_MESSAGE_SIZE, DMA_BIDIRECTIONAL);
634 * Associate a ring of SRPT I/O context structures with the specified device.
636 static int srpt_alloc_ioctx_ring(struct srpt_device *sdev)
640 for (i = 0; i < SRPT_SRQ_SIZE; ++i) {
641 sdev->ioctx_ring[i] = srpt_alloc_ioctx(sdev);
643 if (!sdev->ioctx_ring[i])
646 sdev->ioctx_ring[i]->index = i;
653 srpt_free_ioctx(sdev, sdev->ioctx_ring[i]);
654 sdev->ioctx_ring[i] = NULL;
659 /* Free the ring of SRPT I/O context structures. */
660 static void srpt_free_ioctx_ring(struct srpt_device *sdev)
664 for (i = 0; i < SRPT_SRQ_SIZE; ++i) {
665 srpt_free_ioctx(sdev, sdev->ioctx_ring[i]);
666 sdev->ioctx_ring[i] = NULL;
671 * Post a receive request on the work queue of InfiniBand device 'sdev'.
673 static int srpt_post_recv(struct srpt_device *sdev, struct srpt_ioctx *ioctx)
676 struct ib_recv_wr wr, *bad_wr;
678 wr.wr_id = ioctx->index | SRPT_OP_RECV;
680 list.addr = ioctx->dma;
681 list.length = MAX_MESSAGE_SIZE;
682 list.lkey = sdev->mr->lkey;
688 return ib_post_srq_recv(sdev->srq, &wr, &bad_wr);
692 * Post an IB send request.
693 * @ch: RDMA channel to post the send request on.
694 * @ioctx: I/O context of the send request.
695 * @len: length of the request to be sent in bytes.
697 * Returns zero upon success and a non-zero value upon failure.
699 static int srpt_post_send(struct srpt_rdma_ch *ch, struct srpt_ioctx *ioctx,
703 struct ib_send_wr wr, *bad_wr;
704 struct srpt_device *sdev = ch->sport->sdev;
706 dma_sync_single_for_device(sdev->device->dma_device, ioctx->dma,
707 MAX_MESSAGE_SIZE, DMA_TO_DEVICE);
709 list.addr = ioctx->dma;
711 list.lkey = sdev->mr->lkey;
714 wr.wr_id = ioctx->index;
717 wr.opcode = IB_WR_SEND;
718 wr.send_flags = IB_SEND_SIGNALED;
720 return ib_post_send(ch->qp, &wr, &bad_wr);
723 static int srpt_get_desc_tbl(struct srpt_ioctx *ioctx, struct srp_cmd *srp_cmd,
726 struct srp_indirect_buf *idb;
727 struct srp_direct_buf *db;
730 if (((srp_cmd->buf_fmt & 0xf) == SRP_DATA_DESC_DIRECT) ||
731 ((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_DIRECT)) {
733 ioctx->rbufs = &ioctx->single_rbuf;
735 db = (void *)srp_cmd->add_data;
736 memcpy(ioctx->rbufs, db, sizeof *db);
737 ioctx->data_len = be32_to_cpu(db->len);
739 idb = (void *)srp_cmd->add_data;
741 ioctx->n_rbuf = be32_to_cpu(idb->table_desc.len) / sizeof *db;
744 (srp_cmd->data_out_desc_cnt + srp_cmd->data_in_desc_cnt)) {
750 if (ioctx->n_rbuf == 1)
751 ioctx->rbufs = &ioctx->single_rbuf;
754 kmalloc(ioctx->n_rbuf * sizeof *db, GFP_ATOMIC);
761 memcpy(ioctx->rbufs, db, ioctx->n_rbuf * sizeof *db);
762 ioctx->data_len = be32_to_cpu(idb->len);
769 * Modify the attributes of queue pair 'qp': allow local write, remote read,
770 * and remote write. Also transition 'qp' to state IB_QPS_INIT.
772 static int srpt_init_ch_qp(struct srpt_rdma_ch *ch, struct ib_qp *qp)
774 struct ib_qp_attr *attr;
777 attr = kzalloc(sizeof *attr, GFP_KERNEL);
781 attr->qp_state = IB_QPS_INIT;
782 attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_READ |
783 IB_ACCESS_REMOTE_WRITE;
784 attr->port_num = ch->sport->port;
785 attr->pkey_index = 0;
787 ret = ib_modify_qp(qp, attr,
788 IB_QP_STATE | IB_QP_ACCESS_FLAGS | IB_QP_PORT |
796 * Change the state of a channel to 'ready to receive' (RTR).
797 * @ch: channel of the queue pair.
798 * @qp: queue pair to change the state of.
800 * Returns zero upon success and a negative value upon failure.
802 * Note: currently a struct ib_qp_attr takes 136 bytes on a 64-bit system.
803 * If this structure ever becomes larger, it might be necessary to allocate
804 * it dynamically instead of on the stack.
806 static int srpt_ch_qp_rtr(struct srpt_rdma_ch *ch, struct ib_qp *qp)
808 struct ib_qp_attr qp_attr;
812 qp_attr.qp_state = IB_QPS_RTR;
813 ret = ib_cm_init_qp_attr(ch->cm_id, &qp_attr, &attr_mask);
817 qp_attr.max_dest_rd_atomic = 4;
819 ret = ib_modify_qp(qp, &qp_attr, attr_mask);
826 * Change the state of a channel to 'ready to send' (RTS).
827 * @ch: channel of the queue pair.
828 * @qp: queue pair to change the state of.
830 * Returns zero upon success and a negative value upon failure.
832 * Note: currently a struct ib_qp_attr takes 136 bytes on a 64-bit system.
833 * If this structure ever becomes larger, it might be necessary to allocate
834 * it dynamically instead of on the stack.
836 static int srpt_ch_qp_rts(struct srpt_rdma_ch *ch, struct ib_qp *qp)
838 struct ib_qp_attr qp_attr;
842 qp_attr.qp_state = IB_QPS_RTS;
843 ret = ib_cm_init_qp_attr(ch->cm_id, &qp_attr, &attr_mask);
847 qp_attr.max_rd_atomic = 4;
849 ret = ib_modify_qp(qp, &qp_attr, attr_mask);
855 static void srpt_reset_ioctx(struct srpt_rdma_ch *ch, struct srpt_ioctx *ioctx)
859 if (ioctx->n_rdma_ius > 0 && ioctx->rdma_ius) {
860 struct rdma_iu *riu = ioctx->rdma_ius;
862 for (i = 0; i < ioctx->n_rdma_ius; ++i, ++riu)
864 kfree(ioctx->rdma_ius);
867 if (ioctx->n_rbuf > 1)
870 if (srpt_post_recv(ch->sport->sdev, ioctx))
871 PRINT_ERROR("%s", "SRQ post_recv failed - this is serious.");
872 /* we should queue it back to free_ioctx queue */
874 atomic_inc(&ch->req_lim_delta);
877 static void srpt_abort_scst_cmd(struct srpt_device *sdev,
878 struct scst_cmd *scmnd,
881 struct srpt_ioctx *ioctx;
882 scst_data_direction dir;
883 enum srpt_command_state orig_ioctx_state;
885 ioctx = scst_cmd_get_tgt_priv(scmnd);
887 dir = scst_cmd_get_data_direction(scmnd);
888 if (dir != SCST_DATA_NONE) {
889 dma_unmap_sg(sdev->device->dma_device,
890 scst_cmd_get_sg(scmnd),
891 scst_cmd_get_sg_cnt(scmnd),
892 scst_to_tgt_dma_dir(dir));
894 #ifdef CONFIG_SCST_EXTRACHECKS
895 switch (scmnd->state) {
896 case SCST_CMD_STATE_DATA_WAIT:
897 WARN_ON(ioctx->state != SRPT_STATE_NEED_DATA);
899 case SCST_CMD_STATE_XMIT_WAIT:
900 WARN_ON(ioctx->state != SRPT_STATE_PROCESSED);
903 WARN_ON(ioctx->state == SRPT_STATE_NEED_DATA ||
904 ioctx->state == SRPT_STATE_PROCESSED);
909 orig_ioctx_state = ioctx->state;
910 ioctx->state = SRPT_STATE_ABORTED;
912 if (orig_ioctx_state == SRPT_STATE_NEED_DATA) {
913 WARN_ON(scst_cmd_get_data_direction(ioctx->scmnd)
916 tell_initiator ? SCST_RX_STATUS_ERROR
917 : SCST_RX_STATUS_ERROR_FATAL,
918 SCST_CONTEXT_THREAD);
920 } else if (ioctx->state == SRPT_STATE_PROCESSED)
923 WARN_ON("unexpected cmd state");
925 scst_set_delivery_status(scmnd, SCST_CMD_DELIVERY_FAILED);
926 WARN_ON(scmnd->state != SCST_CMD_STATE_XMIT_WAIT);
927 scst_tgt_cmd_done(scmnd, scst_estimate_context());
932 static void srpt_handle_err_comp(struct srpt_rdma_ch *ch, struct ib_wc *wc)
934 struct srpt_ioctx *ioctx;
935 struct srpt_device *sdev = ch->sport->sdev;
937 if (wc->wr_id & SRPT_OP_RECV) {
938 ioctx = sdev->ioctx_ring[wc->wr_id & ~SRPT_OP_RECV];
939 PRINT_ERROR("%s", "This is serious - SRQ is in bad state.");
941 ioctx = sdev->ioctx_ring[wc->wr_id];
944 srpt_abort_scst_cmd(sdev, ioctx->scmnd, true);
946 srpt_reset_ioctx(ch, ioctx);
950 static void srpt_handle_send_comp(struct srpt_rdma_ch *ch,
951 struct srpt_ioctx *ioctx,
952 enum scst_exec_context context)
955 scst_data_direction dir =
956 scst_cmd_get_data_direction(ioctx->scmnd);
958 if (dir != SCST_DATA_NONE)
959 dma_unmap_sg(ch->sport->sdev->device->dma_device,
960 scst_cmd_get_sg(ioctx->scmnd),
961 scst_cmd_get_sg_cnt(ioctx->scmnd),
962 scst_to_tgt_dma_dir(dir));
964 WARN_ON(ioctx->scmnd->state != SCST_CMD_STATE_XMIT_WAIT);
965 scst_tgt_cmd_done(ioctx->scmnd, context);
967 srpt_reset_ioctx(ch, ioctx);
970 /** Process an RDMA completion notification. */
971 static void srpt_handle_rdma_comp(struct srpt_rdma_ch *ch,
972 struct srpt_ioctx *ioctx)
975 WARN_ON("ERROR: ioctx->scmnd == NULL");
976 srpt_reset_ioctx(ch, ioctx);
981 * If an RDMA completion notification has been received for a write
982 * command, tell SCST that processing can continue by calling
985 if (ioctx->state == SRPT_STATE_NEED_DATA) {
986 WARN_ON(scst_cmd_get_data_direction(ioctx->scmnd)
988 ioctx->state = SRPT_STATE_DATA_IN;
989 scst_rx_data(ioctx->scmnd, SCST_RX_STATUS_SUCCESS,
990 scst_estimate_context());
995 * Build an SRP_RSP response.
996 * @ch: RDMA channel through which the request has been received.
997 * @ioctx: I/O context in which the SRP_RSP response will be built.
998 * @s_key: sense key that will be stored in the response.
999 * @s_code: value that will be stored in the asc_ascq field of the sense data.
1000 * @tag: tag of the request for which this response is being generated.
1002 * Returns the size in bytes of the SRP_RSP response.
1004 * An SRP_RSP response contains a SCSI status or service response. See also
1005 * section 6.9 in the T10 SRP r16a document for the format of an SRP_RSP
1006 * response. See also SPC-2 for more information about sense data.
1008 static int srpt_build_cmd_rsp(struct srpt_rdma_ch *ch,
1009 struct srpt_ioctx *ioctx, u8 s_key, u8 s_code,
1012 struct srp_rsp *srp_rsp;
1013 struct sense_data *sense;
1015 int sense_data_len = 0;
1017 srp_rsp = ioctx->buf;
1018 memset(srp_rsp, 0, sizeof *srp_rsp);
1020 limit_delta = atomic_read(&ch->req_lim_delta);
1021 atomic_sub(limit_delta, &ch->req_lim_delta);
1023 srp_rsp->opcode = SRP_RSP;
1024 srp_rsp->req_lim_delta = cpu_to_be32(limit_delta);
1027 if (s_key != NO_SENSE) {
1028 sense_data_len = sizeof *sense + (sizeof *sense % 4);
1029 srp_rsp->flags |= SRP_RSP_FLAG_SNSVALID;
1030 srp_rsp->status = SAM_STAT_CHECK_CONDITION;
1031 srp_rsp->sense_data_len = cpu_to_be32(sense_data_len);
1033 sense = (struct sense_data *)(srp_rsp + 1);
1034 sense->err_code = 0x70;
1036 sense->asc_ascq = s_code;
1039 return sizeof(*srp_rsp) + sense_data_len;
1043 * Build a task management response, which is a specific SRP_RSP response.
1044 * @ch: RDMA channel through which the request has been received.
1045 * @ioctx: I/O context in which the SRP_RSP response will be built.
1046 * @rsp_code: RSP_CODE that will be stored in the response.
1047 * @tag: tag of the request for which this response is being generated.
1049 * Returns the size in bytes of the SRP_RSP response.
1051 * An SRP_RSP response contains a SCSI status or service response. See also
1052 * section 6.9 in the T10 SRP r16a document for the format of an SRP_RSP
1055 static int srpt_build_tskmgmt_rsp(struct srpt_rdma_ch *ch,
1056 struct srpt_ioctx *ioctx, u8 rsp_code,
1059 struct srp_rsp *srp_rsp;
1061 int resp_data_len = 0;
1063 dma_sync_single_for_cpu(ch->sport->sdev->device->dma_device, ioctx->dma,
1064 MAX_MESSAGE_SIZE, DMA_TO_DEVICE);
1066 srp_rsp = ioctx->buf;
1067 memset(srp_rsp, 0, sizeof *srp_rsp);
1069 limit_delta = atomic_read(&ch->req_lim_delta);
1070 atomic_sub(limit_delta, &ch->req_lim_delta);
1072 srp_rsp->opcode = SRP_RSP;
1073 srp_rsp->req_lim_delta = cpu_to_be32(limit_delta);
1076 if (rsp_code != SRP_TSK_MGMT_SUCCESS) {
1078 srp_rsp->flags |= SRP_RSP_FLAG_RSPVALID;
1079 srp_rsp->resp_data_len = cpu_to_be32(resp_data_len);
1080 srp_rsp->data[3] = rsp_code;
1083 return sizeof(*srp_rsp) + resp_data_len;
1089 static int srpt_handle_cmd(struct srpt_rdma_ch *ch, struct srpt_ioctx *ioctx)
1091 struct scst_cmd *scmnd;
1092 struct srp_cmd *srp_cmd;
1093 struct srp_rsp *srp_rsp;
1094 scst_data_direction dir;
1095 int indirect_desc = 0;
1097 unsigned long flags;
1099 srp_cmd = ioctx->buf;
1100 srp_rsp = ioctx->buf;
1102 dir = SCST_DATA_NONE;
1103 if (srp_cmd->buf_fmt) {
1104 ret = srpt_get_desc_tbl(ioctx, srp_cmd, &indirect_desc);
1106 srpt_build_cmd_rsp(ch, ioctx, NO_SENSE,
1107 NO_ADD_SENSE, srp_cmd->tag);
1108 srp_rsp->status = SAM_STAT_TASK_SET_FULL;
1112 if (indirect_desc) {
1113 srpt_build_cmd_rsp(ch, ioctx, NO_SENSE,
1114 NO_ADD_SENSE, srp_cmd->tag);
1115 srp_rsp->status = SAM_STAT_TASK_SET_FULL;
1120 * The lower four bits of the buffer format field contain the
1121 * DATA-IN buffer descriptor format, and the highest four bits
1122 * contain the DATA-OUT buffer descriptor format.
1124 if (srp_cmd->buf_fmt & 0xf)
1125 /* DATA-IN: transfer data from target to initiator. */
1126 dir = SCST_DATA_READ;
1127 else if (srp_cmd->buf_fmt >> 4)
1128 /* DATA-OUT: transfer data from initiator to target. */
1129 dir = SCST_DATA_WRITE;
1132 scmnd = scst_rx_cmd(ch->scst_sess, (u8 *) &srp_cmd->lun,
1133 sizeof srp_cmd->lun, srp_cmd->cdb, 16,
1134 thread ? SCST_NON_ATOMIC : SCST_ATOMIC);
1136 srpt_build_cmd_rsp(ch, ioctx, NO_SENSE,
1137 NO_ADD_SENSE, srp_cmd->tag);
1138 srp_rsp->status = SAM_STAT_TASK_SET_FULL;
1142 ioctx->scmnd = scmnd;
1144 switch (srp_cmd->task_attr) {
1145 case SRP_CMD_HEAD_OF_Q:
1146 scmnd->queue_type = SCST_CMD_QUEUE_HEAD_OF_QUEUE;
1148 case SRP_CMD_ORDERED_Q:
1149 scmnd->queue_type = SCST_CMD_QUEUE_ORDERED;
1151 case SRP_CMD_SIMPLE_Q:
1152 scmnd->queue_type = SCST_CMD_QUEUE_SIMPLE;
1155 scmnd->queue_type = SCST_CMD_QUEUE_ACA;
1158 scmnd->queue_type = SCST_CMD_QUEUE_ORDERED;
1162 scst_cmd_set_tag(scmnd, srp_cmd->tag);
1163 scst_cmd_set_tgt_priv(scmnd, ioctx);
1164 scst_cmd_set_expected(scmnd, dir, ioctx->data_len);
1166 spin_lock_irqsave(&ch->spinlock, flags);
1167 list_add_tail(&ioctx->scmnd_list, &ch->active_scmnd_list);
1168 ch->active_scmnd_cnt++;
1169 spin_unlock_irqrestore(&ch->spinlock, flags);
1171 scst_cmd_init_done(scmnd, scst_estimate_context());
1176 WARN_ON(srp_rsp->opcode != SRP_RSP);
1182 * Process an SRP_TSK_MGMT request.
1184 * Returns 0 upon success and -1 upon failure.
1186 * Each task management function is performed by calling one of the
1187 * scst_rx_mgmt_fn*() functions. These functions will either report failure
1188 * or process the task management function asynchronously. The function
1189 * srpt_tsk_mgmt_done() will be called by the SCST core upon completion of the
1190 * task management function. When srpt_handle_tsk_mgmt() reports failure
1191 * (i.e. returns -1) a response will have been built in ioctx->buf. This
1192 * information unit has to be sent back by the caller.
1194 * For more information about SRP_TSK_MGMT information units, see also section
1195 * 6.7 in the T10 SRP r16a document.
1197 static int srpt_handle_tsk_mgmt(struct srpt_rdma_ch *ch,
1198 struct srpt_ioctx *ioctx)
1200 struct srp_tsk_mgmt *srp_tsk;
1201 struct srpt_mgmt_ioctx *mgmt_ioctx;
1204 srp_tsk = ioctx->buf;
1206 TRACE_DBG("recv_tsk_mgmt= %d for task_tag= %lld"
1207 " using tag= %lld cm_id= %p sess= %p",
1208 srp_tsk->tsk_mgmt_func,
1209 (unsigned long long) srp_tsk->task_tag,
1210 (unsigned long long) srp_tsk->tag,
1211 ch->cm_id, ch->scst_sess);
1213 mgmt_ioctx = kmalloc(sizeof *mgmt_ioctx, GFP_ATOMIC);
1215 srpt_build_tskmgmt_rsp(ch, ioctx, SRP_TSK_MGMT_FAILED,
1220 mgmt_ioctx->ioctx = ioctx;
1221 mgmt_ioctx->ch = ch;
1222 mgmt_ioctx->tag = srp_tsk->tag;
1224 switch (srp_tsk->tsk_mgmt_func) {
1225 case SRP_TSK_ABORT_TASK:
1226 TRACE_DBG("%s", "Processing SRP_TSK_ABORT_TASK");
1227 ret = scst_rx_mgmt_fn_tag(ch->scst_sess,
1231 SCST_NON_ATOMIC : SCST_ATOMIC,
1234 case SRP_TSK_ABORT_TASK_SET:
1235 TRACE_DBG("%s", "Processing SRP_TSK_ABORT_TASK_SET");
1236 ret = scst_rx_mgmt_fn_lun(ch->scst_sess,
1237 SCST_ABORT_TASK_SET,
1238 (u8 *) &srp_tsk->lun,
1239 sizeof srp_tsk->lun,
1241 SCST_NON_ATOMIC : SCST_ATOMIC,
1244 case SRP_TSK_CLEAR_TASK_SET:
1245 TRACE_DBG("%s", "Processing SRP_TSK_CLEAR_TASK_SET");
1246 ret = scst_rx_mgmt_fn_lun(ch->scst_sess,
1247 SCST_CLEAR_TASK_SET,
1248 (u8 *) &srp_tsk->lun,
1249 sizeof srp_tsk->lun,
1251 SCST_NON_ATOMIC : SCST_ATOMIC,
1254 case SRP_TSK_LUN_RESET:
1255 TRACE_DBG("%s", "Processing SRP_TSK_LUN_RESET");
1256 ret = scst_rx_mgmt_fn_lun(ch->scst_sess,
1258 (u8 *) &srp_tsk->lun,
1259 sizeof srp_tsk->lun,
1261 SCST_NON_ATOMIC : SCST_ATOMIC,
1264 case SRP_TSK_CLEAR_ACA:
1265 TRACE_DBG("%s", "Processing SRP_TSK_CLEAR_ACA");
1266 ret = scst_rx_mgmt_fn_lun(ch->scst_sess,
1268 (u8 *) &srp_tsk->lun,
1269 sizeof srp_tsk->lun,
1271 SCST_NON_ATOMIC : SCST_ATOMIC,
1275 TRACE_DBG("%s", "Unsupported task management function.");
1276 srpt_build_tskmgmt_rsp(ch, ioctx,
1277 SRP_TSK_MGMT_FUNC_NOT_SUPP,
1283 TRACE_DBG("%s", "Processing task management function failed.");
1284 srpt_build_tskmgmt_rsp(ch, ioctx, SRP_TSK_MGMT_FAILED,
1289 WARN_ON(srp_tsk->opcode == SRP_RSP);
1294 WARN_ON(srp_tsk->opcode != SRP_RSP);
1301 * Process a receive completion event.
1302 * @ch: RDMA channel for which the completion event has been received.
1303 * @ioctx: SRPT I/O context for which the completion event has been received.
1305 static void srpt_handle_new_iu(struct srpt_rdma_ch *ch,
1306 struct srpt_ioctx *ioctx)
1308 struct srp_cmd *srp_cmd;
1309 struct srp_rsp *srp_rsp;
1310 unsigned long flags;
1313 spin_lock_irqsave(&ch->spinlock, flags);
1314 if (ch->state != RDMA_CHANNEL_LIVE) {
1315 if (ch->state == RDMA_CHANNEL_CONNECTING) {
1316 list_add_tail(&ioctx->wait_list, &ch->cmd_wait_list);
1317 spin_unlock_irqrestore(&ch->spinlock, flags);
1320 spin_unlock_irqrestore(&ch->spinlock, flags);
1321 srpt_reset_ioctx(ch, ioctx);
1325 spin_unlock_irqrestore(&ch->spinlock, flags);
1327 dma_sync_single_for_cpu(ch->sport->sdev->device->dma_device, ioctx->dma,
1328 MAX_MESSAGE_SIZE, DMA_FROM_DEVICE);
1330 ioctx->data_len = 0;
1332 ioctx->rbufs = NULL;
1334 ioctx->n_rdma_ius = 0;
1335 ioctx->rdma_ius = NULL;
1336 ioctx->scmnd = NULL;
1337 ioctx->state = SRPT_STATE_NEW;
1339 srp_cmd = ioctx->buf;
1340 srp_rsp = ioctx->buf;
1342 switch (srp_cmd->opcode) {
1344 if (srpt_handle_cmd(ch, ioctx) < 0)
1349 if (srpt_handle_tsk_mgmt(ch, ioctx) < 0)
1356 srpt_build_cmd_rsp(ch, ioctx, ILLEGAL_REQUEST, INVALID_CDB,
1361 dma_sync_single_for_device(ch->sport->sdev->device->dma_device,
1362 ioctx->dma, MAX_MESSAGE_SIZE,
1368 WARN_ON(srp_rsp->opcode != SRP_RSP);
1369 len = (sizeof *srp_rsp) + be32_to_cpu(srp_rsp->sense_data_len);
1371 if (ch->state != RDMA_CHANNEL_LIVE) {
1372 /* Give up if another thread modified the channel state. */
1373 PRINT_ERROR("%s: channel is in state %d", __func__, ch->state);
1374 srpt_reset_ioctx(ch, ioctx);
1375 } else if (srpt_post_send(ch, ioctx, len)) {
1376 PRINT_ERROR("%s: sending SRP_RSP response failed", __func__);
1377 srpt_reset_ioctx(ch, ioctx);
1382 * Returns true if the ioctx list is non-empty or if the ib_srpt kernel thread
1386 static inline int srpt_test_ioctx_list(void)
1388 int res = (!list_empty(&srpt_thread.thread_ioctx_list) ||
1389 unlikely(kthread_should_stop()));
1394 * Add 'ioctx' to the tail of the ioctx list and wake up the kernel thread.
1398 static inline void srpt_schedule_thread(struct srpt_ioctx *ioctx)
1400 unsigned long flags;
1402 spin_lock_irqsave(&srpt_thread.thread_lock, flags);
1403 list_add_tail(&ioctx->comp_list, &srpt_thread.thread_ioctx_list);
1404 spin_unlock_irqrestore(&srpt_thread.thread_lock, flags);
1405 wake_up(&ioctx_list_waitQ);
1409 * InfiniBand completion queue callback function.
1410 * @cq: completion queue.
1411 * @ctx: completion queue context, which was passed as the fourth argument of
1412 * the function ib_create_cq().
1414 static void srpt_completion(struct ib_cq *cq, void *ctx)
1416 struct srpt_rdma_ch *ch = ctx;
1417 struct srpt_device *sdev = ch->sport->sdev;
1419 struct srpt_ioctx *ioctx;
1421 ib_req_notify_cq(ch->cq, IB_CQ_NEXT_COMP);
1422 while (ib_poll_cq(ch->cq, 1, &wc) > 0) {
1424 PRINT_ERROR("failed %s status= %d",
1425 wc.wr_id & SRPT_OP_RECV ? "receive" : "send",
1427 srpt_handle_err_comp(ch, &wc);
1431 if (wc.wr_id & SRPT_OP_RECV) {
1432 ioctx = sdev->ioctx_ring[wc.wr_id & ~SRPT_OP_RECV];
1435 ioctx->op = IB_WC_RECV;
1436 srpt_schedule_thread(ioctx);
1438 srpt_handle_new_iu(ch, ioctx);
1441 ioctx = sdev->ioctx_ring[wc.wr_id];
1445 ioctx->op = wc.opcode;
1446 srpt_schedule_thread(ioctx);
1448 switch (wc.opcode) {
1450 srpt_handle_send_comp(ch, ioctx,
1451 scst_estimate_context());
1453 case IB_WC_RDMA_WRITE:
1454 case IB_WC_RDMA_READ:
1455 srpt_handle_rdma_comp(ch, ioctx);
1465 * Create a completion queue on the specified device.
1467 static int srpt_create_ch_ib(struct srpt_rdma_ch *ch)
1469 struct ib_qp_init_attr *qp_init;
1470 struct srpt_device *sdev = ch->sport->sdev;
1474 qp_init = kzalloc(sizeof *qp_init, GFP_KERNEL);
1478 /* Create a completion queue (CQ). */
1480 cqe = SRPT_RQ_SIZE + SRPT_SQ_SIZE - 1;
1481 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20) && ! defined(RHEL_RELEASE_CODE)
1482 ch->cq = ib_create_cq(sdev->device, srpt_completion, NULL, ch, cqe);
1484 ch->cq = ib_create_cq(sdev->device, srpt_completion, NULL, ch, cqe, 0);
1486 if (IS_ERR(ch->cq)) {
1487 ret = PTR_ERR(ch->cq);
1488 PRINT_ERROR("failed to create_cq cqe= %d ret= %d", cqe, ret);
1492 /* Request completion notification. */
1494 ib_req_notify_cq(ch->cq, IB_CQ_NEXT_COMP);
1496 /* Create a queue pair (QP). */
1498 qp_init->qp_context = (void *)ch;
1499 qp_init->event_handler = srpt_qp_event;
1500 qp_init->send_cq = ch->cq;
1501 qp_init->recv_cq = ch->cq;
1502 qp_init->srq = sdev->srq;
1503 qp_init->sq_sig_type = IB_SIGNAL_REQ_WR;
1504 qp_init->qp_type = IB_QPT_RC;
1505 qp_init->cap.max_send_wr = SRPT_SQ_SIZE;
1506 qp_init->cap.max_send_sge = SRPT_DEF_SG_PER_WQE;
1508 ch->qp = ib_create_qp(sdev->pd, qp_init);
1509 if (IS_ERR(ch->qp)) {
1510 ret = PTR_ERR(ch->qp);
1511 ib_destroy_cq(ch->cq);
1512 PRINT_ERROR("failed to create_qp ret= %d", ret);
1516 TRACE_DBG("%s: max_cqe= %d max_sge= %d cm_id= %p",
1517 __func__, ch->cq->cqe, qp_init->cap.max_send_sge,
1520 /* Modify the attributes and the state of queue pair ch->qp. */
1522 ret = srpt_init_ch_qp(ch, ch->qp);
1524 ib_destroy_qp(ch->qp);
1525 ib_destroy_cq(ch->cq);
1529 atomic_set(&ch->req_lim_delta, SRPT_RQ_SIZE);
1536 * Look up the RDMA channel that corresponds to the specified cm_id.
1538 * Return NULL if no matching RDMA channel has been found.
1540 static struct srpt_rdma_ch *srpt_find_channel(struct ib_cm_id *cm_id, bool del)
1542 struct srpt_device *sdev = cm_id->context;
1543 struct srpt_rdma_ch *ch;
1545 spin_lock_irq(&sdev->spinlock);
1546 list_for_each_entry(ch, &sdev->rch_list, list) {
1547 if (ch->cm_id == cm_id) {
1549 list_del(&ch->list);
1550 spin_unlock_irq(&sdev->spinlock);
1555 spin_unlock_irq(&sdev->spinlock);
1561 * Release all resources associated with the specified RDMA channel.
1563 * Note: the caller must have removed the channel from the channel list
1564 * before calling this function.
1566 static void srpt_release_channel(struct srpt_rdma_ch *ch, int destroy_cmid)
1570 WARN_ON(srpt_find_channel(ch->cm_id, false) == ch);
1572 if (ch->cm_id && destroy_cmid) {
1573 TRACE_DBG("%s: destroy cm_id= %p", __func__, ch->cm_id);
1574 ib_destroy_cm_id(ch->cm_id);
1578 ib_destroy_qp(ch->qp);
1579 ib_destroy_cq(ch->cq);
1581 if (ch->scst_sess) {
1582 struct srpt_ioctx *ioctx, *ioctx_tmp;
1584 if (ch->active_scmnd_cnt)
1585 PRINT_INFO("Releasing session %s which still has %d"
1587 ch->sess_name, ch->active_scmnd_cnt);
1589 PRINT_INFO("Releasing session %s", ch->sess_name);
1591 spin_lock_irq(&ch->spinlock);
1592 list_for_each_entry_safe(ioctx, ioctx_tmp,
1593 &ch->active_scmnd_list, scmnd_list) {
1594 spin_unlock_irq(&ch->spinlock);
1597 srpt_abort_scst_cmd(ch->sport->sdev,
1598 ioctx->scmnd, true);
1600 spin_lock_irq(&ch->spinlock);
1602 WARN_ON(!list_empty(&ch->active_scmnd_list));
1603 WARN_ON(ch->active_scmnd_cnt != 0);
1604 spin_unlock_irq(&ch->spinlock);
1606 scst_unregister_session(ch->scst_sess, 0, NULL);
1607 ch->scst_sess = NULL;
1615 static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
1616 struct ib_cm_req_event_param *param,
1619 struct srpt_device *sdev = cm_id->context;
1620 struct srp_login_req *req;
1621 struct srp_login_rsp *rsp;
1622 struct srp_login_rej *rej;
1623 struct ib_cm_rep_param *rep_param;
1624 struct srpt_rdma_ch *ch, *tmp_ch;
1628 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 18)
1629 WARN_ON(!sdev || !private_data);
1630 if (!sdev || !private_data)
1633 if (WARN_ON(!sdev || !private_data))
1637 req = (struct srp_login_req *)private_data;
1639 it_iu_len = be32_to_cpu(req->req_it_iu_len);
1641 PRINT_INFO("Received SRP_LOGIN_REQ with"
1642 " i_port_id 0x%llx:0x%llx, t_port_id 0x%llx:0x%llx and length %d"
1643 " on port %d (guid=0x%llx:0x%llx)",
1644 (unsigned long long)be64_to_cpu(*(u64 *)&req->initiator_port_id[0]),
1645 (unsigned long long)be64_to_cpu(*(u64 *)&req->initiator_port_id[8]),
1646 (unsigned long long)be64_to_cpu(*(u64 *)&req->target_port_id[0]),
1647 (unsigned long long)be64_to_cpu(*(u64 *)&req->target_port_id[8]),
1650 (unsigned long long)be64_to_cpu(*(u64 *)
1651 &sdev->port[param->port - 1].gid.raw[0]),
1652 (unsigned long long)be64_to_cpu(*(u64 *)
1653 &sdev->port[param->port - 1].gid.raw[8]));
1655 rsp = kzalloc(sizeof *rsp, GFP_KERNEL);
1656 rej = kzalloc(sizeof *rej, GFP_KERNEL);
1657 rep_param = kzalloc(sizeof *rep_param, GFP_KERNEL);
1659 if (!rsp || !rej || !rep_param) {
1664 if (it_iu_len > MAX_MESSAGE_SIZE || it_iu_len < 64) {
1666 cpu_to_be32(SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE);
1668 PRINT_ERROR("rejected SRP_LOGIN_REQ because its"
1669 " length (%d bytes) is invalid", it_iu_len);
1673 if ((req->req_flags & 0x3) == SRP_MULTICHAN_SINGLE) {
1674 rsp->rsp_flags = SRP_LOGIN_RSP_MULTICHAN_NO_CHAN;
1676 spin_lock_irq(&sdev->spinlock);
1678 list_for_each_entry_safe(ch, tmp_ch, &sdev->rch_list, list) {
1679 if (!memcmp(ch->i_port_id, req->initiator_port_id, 16)
1680 && !memcmp(ch->t_port_id, req->target_port_id, 16)
1681 && param->port == ch->sport->port
1682 && param->listen_id == ch->sport->sdev->cm_id
1684 enum rdma_ch_state prev_state;
1686 /* found an existing channel */
1687 TRACE_DBG("Found existing channel name= %s"
1688 " cm_id= %p state= %d",
1689 ch->sess_name, ch->cm_id, ch->state);
1691 prev_state = ch->state;
1692 if (ch->state == RDMA_CHANNEL_LIVE)
1693 ch->state = RDMA_CHANNEL_DISCONNECTING;
1694 else if (ch->state == RDMA_CHANNEL_CONNECTING)
1695 list_del(&ch->list);
1697 spin_unlock_irq(&sdev->spinlock);
1700 SRP_LOGIN_RSP_MULTICHAN_TERMINATED;
1702 if (prev_state == RDMA_CHANNEL_LIVE) {
1703 ib_send_cm_dreq(ch->cm_id, NULL, 0);
1704 PRINT_INFO("disconnected"
1705 " session %s because a new"
1706 " SRP_LOGIN_REQ has been received.",
1708 } else if (prev_state ==
1709 RDMA_CHANNEL_CONNECTING) {
1710 PRINT_ERROR("%s", "rejected"
1711 " SRP_LOGIN_REQ because another login"
1712 " request is being processed.");
1713 ib_send_cm_rej(ch->cm_id,
1714 IB_CM_REJ_NO_RESOURCES,
1716 srpt_release_channel(ch, 1);
1719 spin_lock_irq(&sdev->spinlock);
1723 spin_unlock_irq(&sdev->spinlock);
1726 rsp->rsp_flags = SRP_LOGIN_RSP_MULTICHAN_MAINTAINED;
1728 if (((u64) (*(u64 *) req->target_port_id) !=
1729 cpu_to_be64(srpt_service_guid)) ||
1730 ((u64) (*(u64 *) (req->target_port_id + 8)) !=
1731 cpu_to_be64(srpt_service_guid))) {
1733 cpu_to_be32(SRP_LOGIN_REJ_UNABLE_ASSOCIATE_CHANNEL);
1735 PRINT_ERROR("%s", "rejected SRP_LOGIN_REQ because it"
1736 " has an invalid target port identifier.");
1740 ch = kzalloc(sizeof *ch, GFP_KERNEL);
1742 rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
1744 "rejected SRP_LOGIN_REQ because out of memory.");
1749 spin_lock_init(&ch->spinlock);
1750 memcpy(ch->i_port_id, req->initiator_port_id, 16);
1751 memcpy(ch->t_port_id, req->target_port_id, 16);
1752 ch->sport = &sdev->port[param->port - 1];
1754 ch->state = RDMA_CHANNEL_CONNECTING;
1755 INIT_LIST_HEAD(&ch->cmd_wait_list);
1756 INIT_LIST_HEAD(&ch->active_scmnd_list);
1758 ret = srpt_create_ch_ib(ch);
1760 rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
1761 PRINT_ERROR("%s", "rejected SRP_LOGIN_REQ because creating"
1762 " a new RDMA channel failed.");
1766 ret = srpt_ch_qp_rtr(ch, ch->qp);
1768 rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
1769 PRINT_ERROR("rejected SRP_LOGIN_REQ because enabling"
1770 " RTR failed (error code = %d)", ret);
1774 if (use_port_guid_in_session_name) {
1776 * If the kernel module parameter use_port_guid_in_session_name
1777 * has been specified, use a combination of the target port
1778 * GUID and the initiator port ID as the session name. This
1779 * was the original behavior of the SRP target implementation
1780 * (i.e. before the SRPT was included in OFED 1.3).
1782 snprintf(ch->sess_name, sizeof(ch->sess_name),
1784 (unsigned long long)be64_to_cpu(*(u64 *)
1785 &sdev->port[param->port - 1].gid.raw[8]),
1786 (unsigned long long)be64_to_cpu(*(u64 *)
1787 (ch->i_port_id + 8)));
1790 * Default behavior: use the initator port identifier as the
1793 snprintf(ch->sess_name, sizeof(ch->sess_name),
1795 (unsigned long long)be64_to_cpu(*(u64 *)ch->i_port_id),
1796 (unsigned long long)be64_to_cpu(*(u64 *)
1797 (ch->i_port_id + 8)));
1800 TRACE_DBG("registering session %s", ch->sess_name);
1802 BUG_ON(!sdev->scst_tgt);
1803 ch->scst_sess = scst_register_session(sdev->scst_tgt, 0, ch->sess_name,
1805 if (!ch->scst_sess) {
1806 rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
1807 TRACE_DBG("%s", "Failed to create scst sess");
1811 TRACE_DBG("Establish connection sess=%p name=%s cm_id=%p",
1812 ch->scst_sess, ch->sess_name, ch->cm_id);
1814 scst_sess_set_tgt_priv(ch->scst_sess, ch);
1816 /* create srp_login_response */
1817 rsp->opcode = SRP_LOGIN_RSP;
1818 rsp->tag = req->tag;
1819 rsp->max_it_iu_len = req->req_it_iu_len;
1820 rsp->max_ti_iu_len = req->req_it_iu_len;
1822 cpu_to_be16(SRP_BUF_FORMAT_DIRECT | SRP_BUF_FORMAT_INDIRECT);
1823 rsp->req_lim_delta = cpu_to_be32(SRPT_RQ_SIZE);
1824 atomic_set(&ch->req_lim_delta, 0);
1826 /* create cm reply */
1827 rep_param->qp_num = ch->qp->qp_num;
1828 rep_param->private_data = (void *)rsp;
1829 rep_param->private_data_len = sizeof *rsp;
1830 rep_param->rnr_retry_count = 7;
1831 rep_param->flow_control = 1;
1832 rep_param->failover_accepted = 0;
1834 rep_param->responder_resources = 4;
1835 rep_param->initiator_depth = 4;
1837 ret = ib_send_cm_rep(cm_id, rep_param);
1839 PRINT_ERROR("sending SRP_LOGIN_REQ response failed"
1840 " (error code = %d)", ret);
1841 goto release_channel;
1844 spin_lock_irq(&sdev->spinlock);
1845 list_add_tail(&ch->list, &sdev->rch_list);
1846 spin_unlock_irq(&sdev->spinlock);
1851 scst_unregister_session(ch->scst_sess, 0, NULL);
1852 ch->scst_sess = NULL;
1855 ib_destroy_qp(ch->qp);
1856 ib_destroy_cq(ch->cq);
1862 rej->opcode = SRP_LOGIN_REJ;
1863 rej->tag = req->tag;
1865 cpu_to_be16(SRP_BUF_FORMAT_DIRECT | SRP_BUF_FORMAT_INDIRECT);
1867 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, NULL, 0,
1868 (void *)rej, sizeof *rej);
1879 * Release the channel with the specified cm_id.
1881 * Returns one to indicate that the caller of srpt_cm_handler() should destroy
1884 static void srpt_find_and_release_channel(struct ib_cm_id *cm_id)
1886 struct srpt_rdma_ch *ch;
1888 ch = srpt_find_channel(cm_id, true);
1890 srpt_release_channel(ch, 0);
1893 static void srpt_cm_rej_recv(struct ib_cm_id *cm_id)
1895 PRINT_INFO("%s", "Received InfiniBand REJ packet.");
1896 srpt_find_and_release_channel(cm_id);
1900 * Process an IB_CM_RTU_RECEIVED or IB_CM_USER_ESTABLISHED event.
1902 * An IB_CM_RTU_RECEIVED message indicates that the connection is established
1903 * and that the recipient may begin transmitting (RTU = ready to use).
1905 static int srpt_cm_rtu_recv(struct ib_cm_id *cm_id)
1907 struct srpt_rdma_ch *ch;
1910 ch = srpt_find_channel(cm_id, false);
1914 if (srpt_test_and_set_channel_state(ch, RDMA_CHANNEL_CONNECTING,
1915 RDMA_CHANNEL_LIVE)) {
1916 struct srpt_ioctx *ioctx, *ioctx_tmp;
1918 ret = srpt_ch_qp_rts(ch, ch->qp);
1920 list_for_each_entry_safe(ioctx, ioctx_tmp, &ch->cmd_wait_list,
1922 list_del(&ioctx->wait_list);
1923 srpt_handle_new_iu(ch, ioctx);
1925 if (ret && srpt_test_and_set_channel_state(ch,
1927 RDMA_CHANNEL_DISCONNECTING)) {
1928 TRACE_DBG("cm_id=%p sess_name=%s state=%d",
1929 cm_id, ch->sess_name, ch->state);
1930 ib_send_cm_dreq(ch->cm_id, NULL, 0);
1932 } else if (ch->state == RDMA_CHANNEL_DISCONNECTING) {
1933 TRACE_DBG("cm_id=%p sess_name=%s state=%d",
1934 cm_id, ch->sess_name, ch->state);
1935 ib_send_cm_dreq(ch->cm_id, NULL, 0);
1943 static void srpt_cm_timewait_exit(struct ib_cm_id *cm_id)
1945 PRINT_INFO("%s", "Received InfiniBand TimeWait exit.");
1946 srpt_find_and_release_channel(cm_id);
1949 static void srpt_cm_rep_error(struct ib_cm_id *cm_id)
1951 PRINT_INFO("%s", "Received InfiniBand REP error.");
1952 srpt_find_and_release_channel(cm_id);
1955 static int srpt_cm_dreq_recv(struct ib_cm_id *cm_id)
1957 struct srpt_rdma_ch *ch;
1959 ch = srpt_find_channel(cm_id, false);
1963 TRACE_DBG("%s: cm_id= %p ch->state= %d",
1964 __func__, cm_id, ch->state);
1966 switch (ch->state) {
1967 case RDMA_CHANNEL_LIVE:
1968 case RDMA_CHANNEL_CONNECTING:
1969 ib_send_cm_drep(ch->cm_id, NULL, 0);
1970 PRINT_INFO("Received DREQ and sent DREP for session %s.",
1973 case RDMA_CHANNEL_DISCONNECTING:
1981 static void srpt_cm_drep_recv(struct ib_cm_id *cm_id)
1983 PRINT_INFO("%s", "Received InfiniBand DREP message.");
1984 srpt_find_and_release_channel(cm_id);
1988 * IB connection manager callback function.
1990 * A non-zero return value will make the caller destroy the CM ID.
1992 * Note: srpt_add_one passes a struct srpt_device* as the third argument to
1993 * the ib_create_cm_id() call.
1995 static int srpt_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
1999 switch (event->event) {
2000 case IB_CM_REQ_RECEIVED:
2001 ret = srpt_cm_req_recv(cm_id, &event->param.req_rcvd,
2002 event->private_data);
2004 case IB_CM_REJ_RECEIVED:
2005 srpt_cm_rej_recv(cm_id);
2008 case IB_CM_RTU_RECEIVED:
2009 case IB_CM_USER_ESTABLISHED:
2010 ret = srpt_cm_rtu_recv(cm_id);
2012 case IB_CM_DREQ_RECEIVED:
2013 ret = srpt_cm_dreq_recv(cm_id);
2015 case IB_CM_DREP_RECEIVED:
2016 srpt_cm_drep_recv(cm_id);
2019 case IB_CM_TIMEWAIT_EXIT:
2020 srpt_cm_timewait_exit(cm_id);
2023 case IB_CM_REP_ERROR:
2024 srpt_cm_rep_error(cm_id);
2034 static int srpt_map_sg_to_ib_sge(struct srpt_rdma_ch *ch,
2035 struct srpt_ioctx *ioctx,
2036 struct scst_cmd *scmnd)
2038 struct scatterlist *scat;
2039 scst_data_direction dir;
2040 struct rdma_iu *riu;
2041 struct srp_direct_buf *db;
2042 dma_addr_t dma_addr;
2051 scat = scst_cmd_get_sg(scmnd);
2052 dir = scst_cmd_get_data_direction(scmnd);
2053 count = dma_map_sg(ch->sport->sdev->device->dma_device, scat,
2054 scst_cmd_get_sg_cnt(scmnd),
2055 scst_to_tgt_dma_dir(dir));
2056 if (unlikely(!count))
2059 if (ioctx->rdma_ius && ioctx->n_rdma_ius)
2060 nrdma = ioctx->n_rdma_ius;
2062 nrdma = count / SRPT_DEF_SG_PER_WQE + ioctx->n_rbuf;
2064 ioctx->rdma_ius = kzalloc(nrdma * sizeof *riu,
2065 scst_cmd_atomic(scmnd)
2066 ? GFP_ATOMIC : GFP_KERNEL);
2067 if (!ioctx->rdma_ius) {
2068 dma_unmap_sg(ch->sport->sdev->device->dma_device,
2069 scat, scst_cmd_get_sg_cnt(scmnd),
2070 scst_to_tgt_dma_dir(dir));
2074 ioctx->n_rdma_ius = nrdma;
2078 tsize = (dir == SCST_DATA_READ) ?
2079 scst_cmd_get_resp_data_len(scmnd) : scst_cmd_get_bufflen(scmnd);
2080 dma_len = sg_dma_len(&scat[0]);
2081 riu = ioctx->rdma_ius;
2084 * For each remote desc - calculate the #ib_sge.
2085 * If #ib_sge < SRPT_DEF_SG_PER_WQE per rdma operation then
2086 * each remote desc rdma_iu is required a rdma wr;
2088 * we need to allocate extra rdma_iu to carry extra #ib_sge in
2092 j < count && i < ioctx->n_rbuf && tsize > 0; ++i, ++riu, ++db) {
2093 rsize = be32_to_cpu(db->len);
2094 raddr = be64_to_cpu(db->va);
2096 riu->rkey = be32_to_cpu(db->key);
2099 /* calculate how many sge required for this remote_buf */
2100 while (rsize > 0 && tsize > 0) {
2102 if (rsize >= dma_len) {
2110 dma_len = sg_dma_len(&scat[j]);
2120 if (rsize > 0 && riu->sge_cnt == SRPT_DEF_SG_PER_WQE) {
2122 kmalloc(riu->sge_cnt * sizeof *riu->sge,
2123 scst_cmd_atomic(scmnd)
2124 ? GFP_ATOMIC : GFP_KERNEL);
2132 riu->rkey = be32_to_cpu(db->key);
2136 riu->sge = kmalloc(riu->sge_cnt * sizeof *riu->sge,
2137 scst_cmd_atomic(scmnd)
2138 ? GFP_ATOMIC : GFP_KERNEL);
2147 scat = scst_cmd_get_sg(scmnd);
2148 tsize = (dir == SCST_DATA_READ) ?
2149 scst_cmd_get_resp_data_len(scmnd) : scst_cmd_get_bufflen(scmnd);
2150 riu = ioctx->rdma_ius;
2151 dma_len = sg_dma_len(&scat[0]);
2152 dma_addr = sg_dma_address(&scat[0]);
2154 /* this second loop is really mapped sg_addres to rdma_iu->ib_sge */
2156 j < count && i < ioctx->n_rbuf && tsize > 0; ++i, ++riu, ++db) {
2157 rsize = be32_to_cpu(db->len);
2161 while (rsize > 0 && tsize > 0) {
2162 sge->addr = dma_addr;
2163 sge->lkey = ch->sport->sdev->mr->lkey;
2165 if (rsize >= dma_len) {
2167 (tsize < dma_len) ? tsize : dma_len;
2174 dma_len = sg_dma_len(&scat[j]);
2176 sg_dma_address(&scat[j]);
2180 sge->length = (tsize < rsize) ? tsize : rsize;
2188 if (k == riu->sge_cnt && rsize > 0) {
2192 } else if (rsize > 0)
2200 while (ioctx->n_rdma)
2201 kfree(ioctx->rdma_ius[ioctx->n_rdma--].sge);
2203 kfree(ioctx->rdma_ius);
2205 dma_unmap_sg(ch->sport->sdev->device->dma_device,
2206 scat, scst_cmd_get_sg_cnt(scmnd),
2207 scst_to_tgt_dma_dir(dir));
2212 static int srpt_perform_rdmas(struct srpt_rdma_ch *ch, struct srpt_ioctx *ioctx,
2213 scst_data_direction dir)
2215 struct ib_send_wr wr;
2216 struct ib_send_wr *bad_wr;
2217 struct rdma_iu *riu;
2221 riu = ioctx->rdma_ius;
2222 memset(&wr, 0, sizeof wr);
2224 for (i = 0; i < ioctx->n_rdma; ++i, ++riu) {
2225 wr.opcode = (dir == SCST_DATA_READ) ?
2226 IB_WR_RDMA_WRITE : IB_WR_RDMA_READ;
2228 wr.wr_id = ioctx->index;
2229 wr.wr.rdma.remote_addr = riu->raddr;
2230 wr.wr.rdma.rkey = riu->rkey;
2231 wr.num_sge = riu->sge_cnt;
2232 wr.sg_list = riu->sge;
2234 /* only get completion event for the last rdma wr */
2235 if (i == (ioctx->n_rdma - 1) && dir == SCST_DATA_WRITE)
2236 wr.send_flags = IB_SEND_SIGNALED;
2238 ret = ib_post_send(ch->qp, &wr, &bad_wr);
2247 * Start data transfer between initiator and target. Must not block.
2249 static int srpt_xfer_data(struct srpt_rdma_ch *ch, struct srpt_ioctx *ioctx,
2250 struct scst_cmd *scmnd)
2254 ret = srpt_map_sg_to_ib_sge(ch, ioctx, scmnd);
2256 PRINT_ERROR("%s[%d] ret=%d", __func__, __LINE__, ret);
2257 ret = SCST_TGT_RES_QUEUE_FULL;
2261 ret = srpt_perform_rdmas(ch, ioctx, scst_cmd_get_data_direction(scmnd));
2263 PRINT_ERROR("%s[%d] ret=%d", __func__, __LINE__, ret);
2264 if (ret == -EAGAIN || ret == -ENOMEM)
2265 ret = SCST_TGT_RES_QUEUE_FULL;
2267 ret = SCST_TGT_RES_FATAL_ERROR;
2271 ret = SCST_TGT_RES_SUCCESS;
2278 * Called by the SCST core to inform ib_srpt that data reception from the
2279 * initiator should start (SCST_DATA_WRITE). Must not block.
2281 static int srpt_rdy_to_xfer(struct scst_cmd *scmnd)
2283 struct srpt_rdma_ch *ch;
2284 struct srpt_ioctx *ioctx;
2286 ioctx = scst_cmd_get_tgt_priv(scmnd);
2289 ch = scst_sess_get_tgt_priv(scst_cmd_get_session(scmnd));
2292 if (ch->state == RDMA_CHANNEL_DISCONNECTING)
2293 return SCST_TGT_RES_FATAL_ERROR;
2294 else if (ch->state == RDMA_CHANNEL_CONNECTING)
2295 return SCST_TGT_RES_QUEUE_FULL;
2297 ioctx->state = SRPT_STATE_NEED_DATA;
2299 return srpt_xfer_data(ch, ioctx, scmnd);
2303 * Called by the SCST core. Transmits the response buffer and status held in
2304 * 'scmnd'. Must not block.
2306 static int srpt_xmit_response(struct scst_cmd *scmnd)
2308 struct srpt_rdma_ch *ch;
2309 struct srpt_ioctx *ioctx;
2310 struct srp_rsp *srp_rsp;
2312 int ret = SCST_TGT_RES_SUCCESS;
2316 ioctx = scst_cmd_get_tgt_priv(scmnd);
2319 ch = scst_sess_get_tgt_priv(scst_cmd_get_session(scmnd));
2322 tag = scst_cmd_get_tag(scmnd);
2324 ioctx->state = SRPT_STATE_PROCESSED;
2326 if (ch->state != RDMA_CHANNEL_LIVE) {
2327 PRINT_ERROR("%s: tag= %lld channel in bad state %d",
2328 __func__, (unsigned long long)tag, ch->state);
2330 if (ch->state == RDMA_CHANNEL_DISCONNECTING)
2331 ret = SCST_TGT_RES_FATAL_ERROR;
2332 else if (ch->state == RDMA_CHANNEL_CONNECTING)
2333 ret = SCST_TGT_RES_QUEUE_FULL;
2335 if (unlikely(scst_cmd_aborted(scmnd)))
2341 dma_sync_single_for_cpu(ch->sport->sdev->device->dma_device, ioctx->dma,
2342 MAX_MESSAGE_SIZE, DMA_TO_DEVICE);
2344 srp_rsp = ioctx->buf;
2346 if (unlikely(scst_cmd_aborted(scmnd))) {
2347 PRINT_ERROR("%s: tag= %lld already get aborted",
2348 __func__, (unsigned long long)tag);
2352 dir = scst_cmd_get_data_direction(scmnd);
2353 status = scst_cmd_get_status(scmnd) & 0xff;
2355 srpt_build_cmd_rsp(ch, ioctx, NO_SENSE, NO_ADD_SENSE, tag);
2357 if (SCST_SENSE_VALID(scst_cmd_get_sense_buffer(scmnd))) {
2358 srp_rsp->sense_data_len = scst_cmd_get_sense_buffer_len(scmnd);
2359 if (srp_rsp->sense_data_len >
2360 (MAX_MESSAGE_SIZE - sizeof *srp_rsp))
2361 srp_rsp->sense_data_len =
2362 MAX_MESSAGE_SIZE - sizeof *srp_rsp;
2364 memcpy((u8 *) (srp_rsp + 1), scst_cmd_get_sense_buffer(scmnd),
2365 srp_rsp->sense_data_len);
2367 srp_rsp->sense_data_len = cpu_to_be32(srp_rsp->sense_data_len);
2368 srp_rsp->flags |= SRP_RSP_FLAG_SNSVALID;
2371 status = SAM_STAT_CHECK_CONDITION;
2374 srp_rsp->status = status;
2376 /* For read commands, transfer the data to the initiator. */
2377 if (dir == SCST_DATA_READ && scst_cmd_get_resp_data_len(scmnd)) {
2378 ret = srpt_xfer_data(ch, ioctx, scmnd);
2379 if (ret != SCST_TGT_RES_SUCCESS) {
2380 PRINT_ERROR("%s: tag= %lld xfer_data failed",
2381 __func__, (unsigned long long)tag);
2386 if (srpt_post_send(ch, ioctx,
2388 be32_to_cpu(srp_rsp->sense_data_len))) {
2389 PRINT_ERROR("%s: ch->state= %d tag= %lld",
2390 __func__, ch->state,
2391 (unsigned long long)tag);
2392 ret = SCST_TGT_RES_FATAL_ERROR;
2399 ret = SCST_TGT_RES_SUCCESS;
2400 scst_set_delivery_status(scmnd, SCST_CMD_DELIVERY_ABORTED);
2401 ioctx->state = SRPT_STATE_ABORTED;
2402 WARN_ON(scmnd->state != SCST_CMD_STATE_XMIT_WAIT);
2403 scst_tgt_cmd_done(scmnd, SCST_CONTEXT_SAME);
2408 * Called by the SCST core to inform ib_srpt that a received task management
2409 * function has been completed. Must not block.
2411 static void srpt_tsk_mgmt_done(struct scst_mgmt_cmd *mcmnd)
2413 struct srpt_rdma_ch *ch;
2414 struct srpt_mgmt_ioctx *mgmt_ioctx;
2415 struct srpt_ioctx *ioctx;
2418 mgmt_ioctx = scst_mgmt_cmd_get_tgt_priv(mcmnd);
2419 BUG_ON(!mgmt_ioctx);
2421 ch = mgmt_ioctx->ch;
2424 ioctx = mgmt_ioctx->ioctx;
2427 TRACE_DBG("%s: tsk_mgmt_done for tag= %lld status=%d",
2428 __func__, (unsigned long long)mgmt_ioctx->tag,
2429 scst_mgmt_cmd_get_status(mcmnd));
2431 rsp_len = srpt_build_tskmgmt_rsp(ch, ioctx,
2432 (scst_mgmt_cmd_get_status(mcmnd) ==
2433 SCST_MGMT_STATUS_SUCCESS) ?
2434 SRP_TSK_MGMT_SUCCESS :
2435 SRP_TSK_MGMT_FAILED,
2437 srpt_post_send(ch, ioctx, rsp_len);
2439 scst_mgmt_cmd_set_tgt_priv(mcmnd, NULL);
2445 * Called by the SCST core to inform ib_srpt that the command 'scmnd' is about
2446 * to be freed. May be called in IRQ context.
2448 static void srpt_on_free_cmd(struct scst_cmd *scmnd)
2450 struct srpt_rdma_ch *ch;
2451 struct srpt_ioctx *ioctx;
2453 ioctx = scst_cmd_get_tgt_priv(scmnd);
2456 ch = scst_sess_get_tgt_priv(scst_cmd_get_session(scmnd));
2459 spin_lock_irq(&ch->spinlock);
2460 list_del(&ioctx->scmnd_list);
2461 ch->active_scmnd_cnt--;
2462 spin_unlock_irq(&ch->spinlock);
2464 srpt_reset_ioctx(ch, ioctx);
2465 scst_cmd_set_tgt_priv(scmnd, NULL);
2468 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20) && ! defined(BACKPORT_LINUX_WORKQUEUE_TO_2_6_19)
2469 /* A vanilla 2.6.19 or older kernel without backported OFED kernel headers. */
2470 static void srpt_refresh_port_work(void *ctx)
2472 static void srpt_refresh_port_work(struct work_struct *work)
2475 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20) && ! defined(BACKPORT_LINUX_WORKQUEUE_TO_2_6_19)
2476 struct srpt_port *sport = (struct srpt_port *)ctx;
2478 struct srpt_port *sport = container_of(work, struct srpt_port, work);
2481 srpt_refresh_port(sport);
2485 * Called by the SCST core to detect target adapters. Returns the number of
2486 * detected target adapters.
2488 static int srpt_detect(struct scst_tgt_template *tp)
2494 device_count = atomic_read(&srpt_device_count);
2496 TRACE_EXIT_RES(device_count);
2498 return device_count;
2502 * Callback function called by the SCST core from scst_unregister() to free up
2503 * the resources associated with device scst_tgt.
2505 static int srpt_release(struct scst_tgt *scst_tgt)
2507 struct srpt_device *sdev = scst_tgt_get_tgt_priv(scst_tgt);
2508 struct srpt_rdma_ch *ch, *tmp_ch;
2513 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 18)
2522 srpt_unregister_procfs_entry(scst_tgt->tgtt);
2524 spin_lock_irq(&sdev->spinlock);
2525 list_for_each_entry_safe(ch, tmp_ch, &sdev->rch_list, list) {
2526 list_del(&ch->list);
2527 spin_unlock_irq(&sdev->spinlock);
2528 srpt_release_channel(ch, 1);
2529 spin_lock_irq(&sdev->spinlock);
2531 spin_unlock_irq(&sdev->spinlock);
2533 srpt_unregister_mad_agent(sdev);
2535 scst_tgt_set_tgt_priv(scst_tgt, NULL);
2543 * Entry point for ib_srpt's kernel thread. This kernel thread is only created
2544 * when the module parameter 'thread' is not zero (the default is zero).
2545 * This thread processes the ioctx list srpt_thread.thread_ioctx_list.
2549 static int srpt_ioctx_thread(void *arg)
2551 struct srpt_ioctx *ioctx;
2553 /* Hibernation / freezing of the SRPT kernel thread is not supported. */
2554 current->flags |= PF_NOFREEZE;
2556 spin_lock_irq(&srpt_thread.thread_lock);
2557 while (!kthread_should_stop()) {
2559 init_waitqueue_entry(&wait, current);
2561 if (!srpt_test_ioctx_list()) {
2562 add_wait_queue_exclusive(&ioctx_list_waitQ, &wait);
2565 set_current_state(TASK_INTERRUPTIBLE);
2566 if (srpt_test_ioctx_list())
2568 spin_unlock_irq(&srpt_thread.thread_lock);
2570 spin_lock_irq(&srpt_thread.thread_lock);
2572 set_current_state(TASK_RUNNING);
2573 remove_wait_queue(&ioctx_list_waitQ, &wait);
2576 while (!list_empty(&srpt_thread.thread_ioctx_list)) {
2577 ioctx = list_entry(srpt_thread.thread_ioctx_list.next,
2578 struct srpt_ioctx, comp_list);
2580 list_del(&ioctx->comp_list);
2582 spin_unlock_irq(&srpt_thread.thread_lock);
2583 switch (ioctx->op) {
2585 srpt_handle_send_comp(ioctx->ch, ioctx,
2586 SCST_CONTEXT_DIRECT);
2588 case IB_WC_RDMA_WRITE:
2589 case IB_WC_RDMA_READ:
2590 srpt_handle_rdma_comp(ioctx->ch, ioctx);
2593 srpt_handle_new_iu(ioctx->ch, ioctx);
2598 spin_lock_irq(&srpt_thread.thread_lock);
2601 spin_unlock_irq(&srpt_thread.thread_lock);
2606 /* SCST target template for the SRP target implementation. */
2607 static struct scst_tgt_template srpt_template = {
2609 .sg_tablesize = SRPT_DEF_SG_TABLESIZE,
2610 .xmit_response_atomic = 1,
2611 .rdy_to_xfer_atomic = 1,
2613 .detect = srpt_detect,
2614 .release = srpt_release,
2615 .xmit_response = srpt_xmit_response,
2616 .rdy_to_xfer = srpt_rdy_to_xfer,
2617 .on_free_cmd = srpt_on_free_cmd,
2618 .task_mgmt_fn_done = srpt_tsk_mgmt_done
2622 * The callback function srpt_release_class_dev() is called whenever a
2623 * device is removed from the /sys/class/infiniband_srpt device class.
2625 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
2626 static void srpt_release_class_dev(struct class_device *class_dev)
2628 static void srpt_release_class_dev(struct device *dev)
2633 #if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
2634 static int srpt_trace_level_show(struct seq_file *seq, void *v)
2636 return scst_proc_log_entry_read(seq, trace_flag, NULL);
2639 static ssize_t srpt_proc_trace_level_write(struct file *file,
2640 const char __user *buf, size_t length, loff_t *off)
2642 return scst_proc_log_entry_write(file, buf, length, &trace_flag,
2643 DEFAULT_SRPT_TRACE_FLAGS, NULL);
2646 static struct scst_proc_data srpt_log_proc_data = {
2647 SCST_DEF_RW_SEQ_OP(srpt_proc_trace_level_write)
2648 .show = srpt_trace_level_show,
2652 static struct class_attribute srpt_class_attrs[] = {
2656 static struct class srpt_class = {
2657 .name = "infiniband_srpt",
2658 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
2659 .release = srpt_release_class_dev,
2661 .dev_release = srpt_release_class_dev,
2663 .class_attrs = srpt_class_attrs,
2666 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
2667 static ssize_t show_login_info(struct class_device *class_dev, char *buf)
2669 static ssize_t show_login_info(struct device *dev,
2670 struct device_attribute *attr, char *buf)
2673 struct srpt_device *sdev =
2674 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
2675 container_of(class_dev, struct srpt_device, class_dev);
2677 container_of(dev, struct srpt_device, dev);
2679 struct srpt_port *sport;
2683 for (i = 0; i < sdev->device->phys_port_cnt; i++) {
2684 sport = &sdev->port[i];
2686 len += sprintf(buf + len,
2687 "tid_ext=%016llx,ioc_guid=%016llx,pkey=ffff,"
2688 "dgid=%04x%04x%04x%04x%04x%04x%04x%04x,"
2689 "service_id=%016llx\n",
2690 (unsigned long long) srpt_service_guid,
2691 (unsigned long long) srpt_service_guid,
2692 be16_to_cpu(((__be16 *) sport->gid.raw)[0]),
2693 be16_to_cpu(((__be16 *) sport->gid.raw)[1]),
2694 be16_to_cpu(((__be16 *) sport->gid.raw)[2]),
2695 be16_to_cpu(((__be16 *) sport->gid.raw)[3]),
2696 be16_to_cpu(((__be16 *) sport->gid.raw)[4]),
2697 be16_to_cpu(((__be16 *) sport->gid.raw)[5]),
2698 be16_to_cpu(((__be16 *) sport->gid.raw)[6]),
2699 be16_to_cpu(((__be16 *) sport->gid.raw)[7]),
2700 (unsigned long long) srpt_service_guid);
2706 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
2707 static CLASS_DEVICE_ATTR(login_info, S_IRUGO, show_login_info, NULL);
2709 static DEVICE_ATTR(login_info, S_IRUGO, show_login_info, NULL);
2713 * Callback function called by the InfiniBand core when either an InfiniBand
2714 * device has been added or during the ib_register_client() call for each
2715 * registered InfiniBand device.
2717 static void srpt_add_one(struct ib_device *device)
2719 struct srpt_device *sdev;
2720 struct srpt_port *sport;
2721 struct ib_srq_init_attr srq_attr;
2726 sdev = kzalloc(sizeof *sdev, GFP_KERNEL);
2730 sdev->device = device;
2732 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
2733 sdev->class_dev.class = &srpt_class;
2734 sdev->class_dev.dev = device->dma_device;
2735 snprintf(sdev->class_dev.class_id, BUS_ID_SIZE,
2736 "srpt-%s", device->name);
2738 sdev->dev.class = &srpt_class;
2739 sdev->dev.parent = device->dma_device;
2740 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 30)
2741 snprintf(sdev->dev.bus_id, BUS_ID_SIZE, "srpt-%s", device->name);
2743 snprintf(sdev->init_name, sizeof(sdev->init_name),
2744 "srpt-%s", device->name);
2745 sdev->dev.init_name = sdev->init_name;
2749 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
2750 if (class_device_register(&sdev->class_dev))
2752 if (class_device_create_file(&sdev->class_dev,
2753 &class_device_attr_login_info))
2756 if (device_register(&sdev->dev))
2758 if (device_create_file(&sdev->dev, &dev_attr_login_info))
2762 if (ib_query_device(device, &sdev->dev_attr))
2765 sdev->pd = ib_alloc_pd(device);
2766 if (IS_ERR(sdev->pd))
2769 sdev->mr = ib_get_dma_mr(sdev->pd, IB_ACCESS_LOCAL_WRITE);
2770 if (IS_ERR(sdev->mr))
2773 srq_attr.event_handler = srpt_srq_event;
2774 srq_attr.srq_context = (void *)sdev;
2775 srq_attr.attr.max_wr = min(SRPT_SRQ_SIZE, sdev->dev_attr.max_srq_wr);
2776 srq_attr.attr.max_sge = 1;
2777 srq_attr.attr.srq_limit = 0;
2779 sdev->srq = ib_create_srq(sdev->pd, &srq_attr);
2780 if (IS_ERR(sdev->srq))
2783 TRACE_DBG("%s: create SRQ #wr= %d max_allow=%d dev= %s",
2784 __func__, srq_attr.attr.max_wr,
2785 sdev->dev_attr.max_srq_wr, device->name);
2787 if (!srpt_service_guid)
2788 srpt_service_guid = be64_to_cpu(device->node_guid);
2790 sdev->cm_id = ib_create_cm_id(device, srpt_cm_handler, sdev);
2791 if (IS_ERR(sdev->cm_id))
2794 /* print out target login information */
2795 TRACE_DBG("Target login info: id_ext=%016llx,"
2796 "ioc_guid=%016llx,pkey=ffff,service_id=%016llx",
2797 (unsigned long long) srpt_service_guid,
2798 (unsigned long long) srpt_service_guid,
2799 (unsigned long long) srpt_service_guid);
2802 * We do not have a consistent service_id (ie. also id_ext of target_id)
2803 * to identify this target. We currently use the guid of the first HCA
2804 * in the system as service_id; therefore, the target_id will change
2805 * if this HCA is gone bad and replaced by different HCA
2807 if (ib_cm_listen(sdev->cm_id, cpu_to_be64(srpt_service_guid), 0, NULL))
2810 INIT_IB_EVENT_HANDLER(&sdev->event_handler, sdev->device,
2811 srpt_event_handler);
2812 if (ib_register_event_handler(&sdev->event_handler))
2815 if (srpt_alloc_ioctx_ring(sdev))
2818 INIT_LIST_HEAD(&sdev->rch_list);
2819 spin_lock_init(&sdev->spinlock);
2821 for (i = 0; i < SRPT_SRQ_SIZE; ++i)
2822 srpt_post_recv(sdev, sdev->ioctx_ring[i]);
2824 ib_set_client_data(device, &srpt_client, sdev);
2826 sdev->scst_tgt = scst_register(&srpt_template, NULL);
2827 if (!sdev->scst_tgt) {
2828 PRINT_ERROR("SCST registration failed for %s.",
2829 sdev->device->name);
2833 scst_tgt_set_tgt_priv(sdev->scst_tgt, sdev);
2835 for (i = 1; i <= sdev->device->phys_port_cnt; i++) {
2836 sport = &sdev->port[i - 1];
2839 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20) && ! defined(BACKPORT_LINUX_WORKQUEUE_TO_2_6_19)
2841 * A vanilla 2.6.19 or older kernel without backported OFED
2844 INIT_WORK(&sport->work, srpt_refresh_port_work, sport);
2846 INIT_WORK(&sport->work, srpt_refresh_port_work);
2848 if (srpt_refresh_port(sport)) {
2849 PRINT_ERROR("MAD registration failed for %s-%d.",
2850 sdev->device->name, i);
2851 goto err_refresh_port;
2855 atomic_inc(&srpt_device_count);
2862 scst_unregister(sdev->scst_tgt);
2864 ib_set_client_data(device, &srpt_client, NULL);
2865 srpt_free_ioctx_ring(sdev);
2867 ib_unregister_event_handler(&sdev->event_handler);
2869 ib_destroy_cm_id(sdev->cm_id);
2871 ib_destroy_srq(sdev->srq);
2873 ib_dereg_mr(sdev->mr);
2875 ib_dealloc_pd(sdev->pd);
2877 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
2878 class_device_unregister(&sdev->class_dev);
2880 device_unregister(&sdev->dev);
2889 * Callback function called by the InfiniBand core when either an InfiniBand
2890 * device has been removed or during the ib_unregister_client() call for each
2891 * registered InfiniBand device.
2893 static void srpt_remove_one(struct ib_device *device)
2896 struct srpt_device *sdev;
2900 sdev = ib_get_client_data(device, &srpt_client);
2901 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 18)
2911 * Cancel the work if it is queued. Wait until srpt_refresh_port_work()
2912 * finished if it is running.
2914 for (i = 0; i < sdev->device->phys_port_cnt; i++)
2915 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
2916 cancel_work_sync(&sdev->port[i].work);
2919 * cancel_work_sync() was introduced in kernel 2.6.22. Older
2920 * kernels do not have a facility to cancel scheduled work.
2923 "your kernel does not provide cancel_work_sync().");
2926 scst_unregister(sdev->scst_tgt);
2927 sdev->scst_tgt = NULL;
2929 ib_unregister_event_handler(&sdev->event_handler);
2930 ib_destroy_cm_id(sdev->cm_id);
2931 ib_destroy_srq(sdev->srq);
2932 ib_dereg_mr(sdev->mr);
2933 ib_dealloc_pd(sdev->pd);
2934 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
2935 class_device_unregister(&sdev->class_dev);
2937 device_unregister(&sdev->dev);
2940 srpt_free_ioctx_ring(sdev);
2947 * Create procfs entries for srpt. Currently the only procfs entry created
2948 * by this function is the "trace_level" entry.
2950 static int srpt_register_procfs_entry(struct scst_tgt_template *tgt)
2953 #if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
2954 struct proc_dir_entry *p, *root;
2956 root = scst_proc_get_tgt_root(tgt);
2960 * Fill in the scst_proc_data::data pointer, which is used in
2961 * a printk(KERN_INFO ...) statement in
2962 * scst_proc_log_entry_write() in scst_proc.c.
2964 srpt_log_proc_data.data = (char *)tgt->name;
2965 p = scst_create_proc_entry(root, SRPT_PROC_TRACE_LEVEL_NAME,
2966 &srpt_log_proc_data);
2976 static void srpt_unregister_procfs_entry(struct scst_tgt_template *tgt)
2978 #if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
2979 struct proc_dir_entry *root;
2981 root = scst_proc_get_tgt_root(tgt);
2984 remove_proc_entry(SRPT_PROC_TRACE_LEVEL_NAME, root);
2989 * Module initialization.
2991 * Note: since ib_register_client() registers callback functions, and since at
2992 * least one of these callback functions (srpt_add_one()) calls SCST functions,
2993 * the SCST target template must be registered before ib_register_client() is
2996 static int __init srpt_init_module(void)
3000 ret = class_register(&srpt_class);
3002 PRINT_ERROR("%s", "couldn't register class ib_srpt");
3006 ret = scst_register_target_template(&srpt_template);
3008 PRINT_ERROR("%s", "couldn't register with scst");
3010 goto out_unregister_class;
3013 ret = srpt_register_procfs_entry(&srpt_template);
3015 PRINT_ERROR("%s", "couldn't register procfs entry");
3016 goto out_unregister_target;
3019 ret = ib_register_client(&srpt_client);
3021 PRINT_ERROR("%s", "couldn't register IB client");
3022 goto out_unregister_target;
3026 spin_lock_init(&srpt_thread.thread_lock);
3027 INIT_LIST_HEAD(&srpt_thread.thread_ioctx_list);
3028 srpt_thread.thread = kthread_run(srpt_ioctx_thread,
3029 NULL, "srpt_thread");
3030 if (IS_ERR(srpt_thread.thread)) {
3031 srpt_thread.thread = NULL;
3038 out_unregister_target:
3040 * Note: the procfs entry is unregistered in srpt_release(), which is
3041 * called by scst_unregister_target_template().
3043 scst_unregister_target_template(&srpt_template);
3044 out_unregister_class:
3045 class_unregister(&srpt_class);
3050 static void __exit srpt_cleanup_module(void)
3054 if (srpt_thread.thread)
3055 kthread_stop(srpt_thread.thread);
3056 ib_unregister_client(&srpt_client);
3057 scst_unregister_target_template(&srpt_template);
3058 class_unregister(&srpt_class);
3063 module_init(srpt_init_module);
3064 module_exit(srpt_cleanup_module);