2 * Copyright (c) 2006-2008 Mellanox Technology Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/module.h>
35 #include <linux/init.h>
36 #include <linux/slab.h>
37 #include <linux/err.h>
38 #include <linux/string.h>
39 #include <linux/kthread.h>
41 #include <asm/atomic.h>
45 #define DRV_NAME "ib_srpt"
46 #define PFX DRV_NAME ": "
47 #define DRV_VERSION "1.0"
48 #define DRV_RELDATE "July 10, 2008"
50 #define MELLANOX_SRPT_ID_STRING "Mellanox OFED SRP target"
52 MODULE_AUTHOR("Vu Pham");
53 MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol target "
54 "v" DRV_VERSION " (" DRV_RELDATE ")");
55 MODULE_LICENSE("Dual BSD/GPL");
58 spinlock_t thread_lock;
59 struct list_head thread_ioctx_list;
60 struct task_struct *thread;
63 static u64 mellanox_ioc_guid;
64 static struct list_head srpt_devices;
65 static int thread = 1;
66 static struct srpt_thread srpt_thread;
67 static DECLARE_WAIT_QUEUE_HEAD(ioctx_list_waitQ);
69 module_param(thread, int, 0444);
70 MODULE_PARM_DESC(thread,
71 "Executing ioctx in thread context. Default thread = 1");
73 static void srpt_add_one(struct ib_device *device);
74 static void srpt_remove_one(struct ib_device *device);
75 static int srpt_disconnect_channel(struct srpt_rdma_ch *ch, int dreq);
77 static struct ib_client srpt_client = {
80 .remove = srpt_remove_one
83 static void srpt_event_handler(struct ib_event_handler *handler,
84 struct ib_event *event)
86 struct srpt_device *sdev =
87 ib_get_client_data(event->device, &srpt_client);
88 struct srpt_port *sport;
90 if (!sdev || sdev->device != event->device)
93 printk(KERN_WARNING PFX "ASYNC event= %d on device= %s\n",
94 event->event, sdev->device->name);
96 switch (event->event) {
97 case IB_EVENT_PORT_ERR:
98 if (event->element.port_num <= sdev->device->phys_port_cnt) {
99 sport = &sdev->port[event->element.port_num - 1];
104 case IB_EVENT_PORT_ACTIVE:
105 case IB_EVENT_LID_CHANGE:
106 case IB_EVENT_PKEY_CHANGE:
107 case IB_EVENT_SM_CHANGE:
108 case IB_EVENT_CLIENT_REREGISTER:
109 if (event->element.port_num <= sdev->device->phys_port_cnt) {
110 sport = &sdev->port[event->element.port_num - 1];
111 if (!sport->lid && !sport->sm_lid)
112 schedule_work(&sport->work);
121 static void srpt_srq_event(struct ib_event *event, void *ctx)
123 printk(KERN_WARNING PFX "SRQ event %d\n", event->event);
126 static void srpt_qp_event(struct ib_event *event, void *ctx)
128 struct srpt_rdma_ch *ch = ctx;
130 printk(KERN_WARNING PFX
131 "QP event %d on cm_id=%p sess_name=%s state=%d\n",
132 event->event, ch->cm_id, ch->sess_name, ch->state);
134 switch (event->event) {
135 case IB_EVENT_COMM_EST:
136 ib_cm_notify(ch->cm_id, event->event);
138 case IB_EVENT_QP_LAST_WQE_REACHED:
139 if (ch->state == RDMA_CHANNEL_LIVE) {
140 printk(KERN_WARNING PFX
141 "Schedule CM_DISCONNECT_WORK\n");
142 srpt_disconnect_channel(ch, 1);
150 static void srpt_set_ioc(u8 *c_list, u32 slot, u8 value)
157 tmp = c_list[id] & 0xf;
158 c_list[id] = (value << 4) | tmp;
160 tmp = c_list[id] & 0xf0;
161 c_list[id] = (value & 0xf) | tmp;
165 static void srpt_get_class_port_info(struct ib_dm_mad *mad)
167 struct ib_class_port_info *cif;
169 cif = (struct ib_class_port_info *)mad->data;
170 memset(cif, 0, sizeof *cif);
171 cif->base_version = 1;
172 cif->class_version = 1;
173 cif->resp_time_value = 20;
175 mad->mad_hdr.status = 0;
178 static void srpt_get_iou(struct ib_dm_mad *mad)
180 struct ib_dm_iou_info *ioui;
184 ioui = (struct ib_dm_iou_info *)mad->data;
186 ioui->max_controllers = 16;
188 /* set present for slot 1 and empty for the rest */
189 srpt_set_ioc(ioui->controller_list, 1, 1);
190 for (i = 1, slot = 2; i < 16; i++, slot++)
191 srpt_set_ioc(ioui->controller_list, slot, 0);
193 mad->mad_hdr.status = 0;
196 static void srpt_get_ioc(struct srpt_device *sdev, u32 slot,
197 struct ib_dm_mad *mad)
199 struct ib_dm_ioc_profile *iocp;
201 iocp = (struct ib_dm_ioc_profile *)mad->data;
203 if (!slot || slot > 16) {
204 mad->mad_hdr.status = cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD);
209 mad->mad_hdr.status = cpu_to_be16(DM_MAD_STATUS_NO_IOC);
213 memset(iocp, 0, sizeof *iocp);
214 strcpy(iocp->id_string, MELLANOX_SRPT_ID_STRING);
215 iocp->guid = cpu_to_be64(mellanox_ioc_guid);
216 iocp->vendor_id = cpu_to_be32(sdev->dev_attr.vendor_id);
217 iocp->device_id = cpu_to_be32(sdev->dev_attr.vendor_part_id);
218 iocp->device_version = cpu_to_be16(sdev->dev_attr.hw_ver);
219 iocp->subsys_vendor_id = cpu_to_be32(sdev->dev_attr.vendor_id);
220 iocp->subsys_device_id = 0x0;
221 iocp->io_class = cpu_to_be16(SRP_REV16A_IB_IO_CLASS);
222 iocp->io_subclass = cpu_to_be16(SRP_IO_SUBCLASS);
223 iocp->protocol = cpu_to_be16(SRP_PROTOCOL);
224 iocp->protocol_version = cpu_to_be16(SRP_PROTOCOL_VERSION);
225 iocp->send_queue_depth = cpu_to_be16(SRPT_SRQ_SIZE);
226 iocp->rdma_read_depth = 4;
227 iocp->send_size = cpu_to_be32(MAX_MESSAGE_SIZE);
228 iocp->rdma_size = cpu_to_be32(MAX_RDMA_SIZE);
229 iocp->num_svc_entries = 1;
230 iocp->op_cap_mask = SRP_SEND_TO_IOC | SRP_SEND_FROM_IOC |
231 SRP_RDMA_READ_FROM_IOC | SRP_RDMA_WRITE_FROM_IOC;
233 mad->mad_hdr.status = 0;
236 static void srpt_get_svc_entries(u16 slot, u8 hi, u8 lo, struct ib_dm_mad *mad)
238 struct ib_dm_svc_entries *svc_entries;
240 if (!slot || slot > 16) {
241 mad->mad_hdr.status = cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD);
245 if (slot > 2 || lo > hi || hi > 1) {
246 mad->mad_hdr.status = cpu_to_be16(DM_MAD_STATUS_NO_IOC);
250 svc_entries = (struct ib_dm_svc_entries *)mad->data;
251 memset(svc_entries, 0, sizeof *svc_entries);
252 svc_entries->service_entries[0].id = cpu_to_be64(mellanox_ioc_guid);
253 sprintf(svc_entries->service_entries[0].name, "%s%016llx",
254 SRP_SERVICE_NAME_PREFIX, (unsigned long long)mellanox_ioc_guid);
256 mad->mad_hdr.status = 0;
259 static void srpt_mgmt_method_get(struct srpt_port *sp, struct ib_mad *rq_mad,
260 struct ib_dm_mad *rsp_mad)
266 attr_id = be16_to_cpu(rq_mad->mad_hdr.attr_id);
268 case DM_ATTR_CLASS_PORT_INFO:
269 srpt_get_class_port_info(rsp_mad);
271 case DM_ATTR_IOU_INFO:
272 srpt_get_iou(rsp_mad);
274 case DM_ATTR_IOC_PROFILE:
275 slot = be32_to_cpu(rq_mad->mad_hdr.attr_mod);
276 srpt_get_ioc(sp->sdev, slot, rsp_mad);
278 case DM_ATTR_SVC_ENTRIES:
279 slot = be32_to_cpu(rq_mad->mad_hdr.attr_mod);
280 hi = (u8) ((slot >> 8) & 0xff);
281 lo = (u8) (slot & 0xff);
282 slot = (u16) ((slot >> 16) & 0xffff);
283 srpt_get_svc_entries(slot, hi, lo, rsp_mad);
286 rsp_mad->mad_hdr.status =
287 cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR);
292 static void srpt_mad_send_handler(struct ib_mad_agent *mad_agent,
293 struct ib_mad_send_wc *mad_wc)
295 ib_destroy_ah(mad_wc->send_buf->ah);
296 ib_free_send_mad(mad_wc->send_buf);
299 static void srpt_mad_recv_handler(struct ib_mad_agent *mad_agent,
300 struct ib_mad_recv_wc *mad_wc)
302 struct srpt_port *sport = (struct srpt_port *)mad_agent->context;
304 struct ib_mad_send_buf *rsp;
305 struct ib_dm_mad *dm_mad;
307 if (!mad_wc || !mad_wc->recv_buf.mad)
310 ah = ib_create_ah_from_wc(mad_agent->qp->pd, mad_wc->wc,
311 mad_wc->recv_buf.grh, mad_agent->port_num);
315 rsp = ib_create_send_mad(mad_agent, mad_wc->wc->src_qp,
316 mad_wc->wc->pkey_index, 0,
317 IB_MGMT_DEVICE_HDR, IB_MGMT_DEVICE_DATA,
325 memcpy(dm_mad, mad_wc->recv_buf.mad, sizeof *dm_mad);
326 dm_mad->mad_hdr.method = IB_MGMT_METHOD_GET_RESP;
327 dm_mad->mad_hdr.status = 0;
329 switch (mad_wc->recv_buf.mad->mad_hdr.method) {
330 case IB_MGMT_METHOD_GET:
331 srpt_mgmt_method_get(sport, mad_wc->recv_buf.mad, dm_mad);
333 case IB_MGMT_METHOD_SET:
334 dm_mad->mad_hdr.status =
335 cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR);
338 dm_mad->mad_hdr.status =
339 cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD);
343 if (!ib_post_send_mad(rsp, NULL)) {
344 ib_free_recv_mad(mad_wc);
345 /* will destroy_ah & free_send_mad in send completion */
349 ib_free_send_mad(rsp);
354 ib_free_recv_mad(mad_wc);
357 static int srpt_refresh_port(struct srpt_port *sport)
359 struct ib_mad_reg_req reg_req;
360 struct ib_port_modify port_modify;
361 struct ib_port_attr port_attr;
364 memset(&port_modify, 0, sizeof port_modify);
365 port_modify.set_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP;
366 port_modify.clr_port_cap_mask = 0;
368 ret = ib_modify_port(sport->sdev->device, sport->port, 0, &port_modify);
372 ret = ib_query_port(sport->sdev->device, sport->port, &port_attr);
376 sport->sm_lid = port_attr.sm_lid;
377 sport->lid = port_attr.lid;
379 ret = ib_query_gid(sport->sdev->device, sport->port, 0, &sport->gid);
383 if (!sport->mad_agent) {
384 memset(®_req, 0, sizeof reg_req);
385 reg_req.mgmt_class = IB_MGMT_CLASS_DEVICE_MGMT;
386 reg_req.mgmt_class_version = IB_MGMT_BASE_VERSION;
387 set_bit(IB_MGMT_METHOD_GET, reg_req.method_mask);
388 set_bit(IB_MGMT_METHOD_SET, reg_req.method_mask);
390 sport->mad_agent = ib_register_mad_agent(sport->sdev->device,
394 srpt_mad_send_handler,
395 srpt_mad_recv_handler,
397 if (IS_ERR(sport->mad_agent))
405 port_modify.set_port_cap_mask = 0;
406 port_modify.clr_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP;
407 ib_modify_port(sport->sdev->device, sport->port, 0, &port_modify);
414 static struct srpt_ioctx *srpt_alloc_ioctx(struct srpt_device *sdev)
416 struct srpt_ioctx *ioctx;
418 ioctx = kmalloc(sizeof *ioctx, GFP_KERNEL);
422 ioctx->buf = kzalloc(MAX_MESSAGE_SIZE, GFP_KERNEL);
426 ioctx->dma = dma_map_single(sdev->device->dma_device, ioctx->buf,
427 MAX_MESSAGE_SIZE, DMA_BIDIRECTIONAL);
428 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
429 if (dma_mapping_error(sdev->device->dma_device, ioctx->dma))
431 if (dma_mapping_error(ioctx->dma))
445 static void srpt_free_ioctx(struct srpt_device *sdev, struct srpt_ioctx *ioctx)
450 dma_unmap_single(sdev->device->dma_device, ioctx->dma,
451 MAX_MESSAGE_SIZE, DMA_BIDIRECTIONAL);
456 static int srpt_alloc_ioctx_ring(struct srpt_device *sdev)
460 for (i = 0; i < SRPT_SRQ_SIZE; ++i) {
461 sdev->ioctx_ring[i] = srpt_alloc_ioctx(sdev);
463 if (!sdev->ioctx_ring[i])
466 sdev->ioctx_ring[i]->index = i;
473 srpt_free_ioctx(sdev, sdev->ioctx_ring[i]);
474 sdev->ioctx_ring[i] = NULL;
479 static int srpt_post_recv(struct srpt_device *sdev, struct srpt_ioctx *ioctx)
482 struct ib_recv_wr wr, *bad_wr;
484 wr.wr_id = ioctx->index | SRPT_OP_RECV;
486 list.addr = ioctx->dma;
487 list.length = MAX_MESSAGE_SIZE;
488 list.lkey = sdev->mr->lkey;
494 return ib_post_srq_recv(sdev->srq, &wr, &bad_wr);
497 static int srpt_post_send(struct srpt_rdma_ch *ch, struct srpt_ioctx *ioctx,
501 struct ib_send_wr wr, *bad_wr;
502 struct srpt_device *sdev = ch->sport->sdev;
504 dma_sync_single_for_device(sdev->device->dma_device, ioctx->dma,
505 MAX_MESSAGE_SIZE, DMA_TO_DEVICE);
507 list.addr = ioctx->dma;
509 list.lkey = sdev->mr->lkey;
512 wr.wr_id = ioctx->index;
515 wr.opcode = IB_WR_SEND;
516 wr.send_flags = IB_SEND_SIGNALED;
518 return ib_post_send(ch->qp, &wr, &bad_wr);
521 static int srpt_get_desc_tbl(struct srpt_ioctx *ioctx, struct srp_cmd *srp_cmd,
524 struct srp_indirect_buf *idb;
525 struct srp_direct_buf *db;
528 if (((srp_cmd->buf_fmt & 0xf) == SRP_DATA_DESC_DIRECT) ||
529 ((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_DIRECT)) {
531 ioctx->rbufs = &ioctx->single_rbuf;
533 db = (void *)srp_cmd->add_data;
534 memcpy(ioctx->rbufs, db, sizeof *db);
535 ioctx->data_len = be32_to_cpu(db->len);
537 idb = (void *)srp_cmd->add_data;
539 ioctx->n_rbuf = be32_to_cpu(idb->table_desc.len) / sizeof *db;
542 (srp_cmd->data_out_desc_cnt + srp_cmd->data_in_desc_cnt)) {
548 if (ioctx->n_rbuf == 1)
549 ioctx->rbufs = &ioctx->single_rbuf;
552 kmalloc(ioctx->n_rbuf * sizeof *db, GFP_ATOMIC);
559 memcpy(ioctx->rbufs, db, ioctx->n_rbuf * sizeof *db);
560 ioctx->data_len = be32_to_cpu(idb->len);
566 static int srpt_init_ch_qp(struct srpt_rdma_ch *ch, struct ib_qp *qp)
568 struct ib_qp_attr *attr;
571 attr = kzalloc(sizeof *attr, GFP_KERNEL);
575 attr->qp_state = IB_QPS_INIT;
576 attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_READ |
577 IB_ACCESS_REMOTE_WRITE;
578 attr->port_num = ch->sport->port;
579 attr->pkey_index = 0;
581 ret = ib_modify_qp(qp, attr,
582 IB_QP_STATE | IB_QP_ACCESS_FLAGS | IB_QP_PORT |
589 static int srpt_ch_qp_rtr_rts(struct srpt_rdma_ch *ch, struct ib_qp *qp,
590 enum ib_qp_state qp_state)
592 struct ib_qp_attr *qp_attr;
596 qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
600 qp_attr->qp_state = qp_state;
601 ret = ib_cm_init_qp_attr(ch->cm_id, qp_attr, &attr_mask);
605 if (qp_state == IB_QPS_RTR)
606 qp_attr->max_dest_rd_atomic = 4;
608 qp_attr->max_rd_atomic = 4;
610 ret = ib_modify_qp(qp, qp_attr, attr_mask);
617 static void srpt_reset_ioctx(struct srpt_rdma_ch *ch, struct srpt_ioctx *ioctx)
621 if (ioctx->n_rdma_ius > 0 && ioctx->rdma_ius) {
622 struct rdma_iu *riu = ioctx->rdma_ius;
624 for (i = 0; i < ioctx->n_rdma_ius; ++i, ++riu)
626 kfree(ioctx->rdma_ius);
629 if (ioctx->n_rbuf > 1)
632 if (srpt_post_recv(ch->sport->sdev, ioctx))
633 printk(KERN_ERR PFX "SRQ post_recv failed - this is serious\n");
634 /* we should queue it back to free_ioctx queue */
636 atomic_inc(&ch->req_lim_delta);
639 static void srpt_handle_err_comp(struct srpt_rdma_ch *ch, struct ib_wc *wc)
641 struct srpt_ioctx *ioctx;
642 struct srpt_device *sdev = ch->sport->sdev;
643 scst_data_direction dir = SCST_DATA_NONE;
645 if (wc->wr_id & SRPT_OP_RECV) {
646 ioctx = sdev->ioctx_ring[wc->wr_id & ~SRPT_OP_RECV];
647 printk(KERN_ERR PFX "This is serious - SRQ is in bad state\n");
649 ioctx = sdev->ioctx_ring[wc->wr_id];
652 struct scst_cmd *scmnd = ioctx->scmnd;
654 dir = scst_cmd_get_data_direction(scmnd);
656 if (dir == SCST_DATA_NONE)
657 scst_tgt_cmd_done(scmnd,
658 scst_estimate_context());
660 dma_unmap_sg(sdev->device->dma_device,
661 scst_cmd_get_sg(scmnd),
662 scst_cmd_get_sg_cnt(scmnd),
663 scst_to_tgt_dma_dir(dir));
665 if (scmnd->data_buf_tgt_alloc &&
666 scmnd->data_buf_alloced) {
672 if (scmnd->state == SCST_CMD_STATE_DATA_WAIT)
674 SCST_RX_STATUS_ERROR,
675 SCST_CONTEXT_THREAD);
676 else if (scmnd->state ==
677 SCST_CMD_STATE_XMIT_WAIT)
678 scst_tgt_cmd_done(scmnd,
679 scst_estimate_context());
682 srpt_reset_ioctx(ch, ioctx);
686 static void srpt_handle_send_comp(struct srpt_rdma_ch *ch,
687 struct srpt_ioctx *ioctx,
688 enum scst_exec_context context)
691 scst_data_direction dir =
692 scst_cmd_get_data_direction(ioctx->scmnd);
694 if (dir != SCST_DATA_NONE)
695 dma_unmap_sg(ch->sport->sdev->device->dma_device,
696 scst_cmd_get_sg(ioctx->scmnd),
697 scst_cmd_get_sg_cnt(ioctx->scmnd),
698 scst_to_tgt_dma_dir(dir));
700 if (ioctx->scmnd->data_buf_tgt_alloc &&
701 ioctx->scmnd->data_buf_alloced) {
702 kfree(ioctx->scmnd->sg);
703 ioctx->scmnd->sg = NULL;
704 ioctx->scmnd->sg_cnt = 0;
707 scst_tgt_cmd_done(ioctx->scmnd, context);
709 srpt_reset_ioctx(ch, ioctx);
712 static void srpt_handle_rdma_comp(struct srpt_rdma_ch *ch,
713 struct srpt_ioctx *ioctx)
716 srpt_reset_ioctx(ch, ioctx);
720 if (scst_cmd_get_data_direction(ioctx->scmnd) == SCST_DATA_WRITE)
721 scst_rx_data(ioctx->scmnd, SCST_RX_STATUS_SUCCESS,
722 SCST_CONTEXT_THREAD);
725 static void srpt_build_cmd_rsp(struct srpt_rdma_ch *ch,
726 struct srpt_ioctx *ioctx, u8 s_key, u8 s_code,
729 struct srp_rsp *srp_rsp;
730 struct sense_data *sense;
733 srp_rsp = ioctx->buf;
734 memset(srp_rsp, 0, sizeof *srp_rsp);
736 limit_delta = atomic_read(&ch->req_lim_delta);
737 atomic_sub(limit_delta, &ch->req_lim_delta);
739 srp_rsp->opcode = SRP_RSP;
740 srp_rsp->req_lim_delta = cpu_to_be32(limit_delta);
743 if (s_key != NO_SENSE) {
744 srp_rsp->flags |= SRP_RSP_FLAG_SNSVALID;
745 srp_rsp->status = SAM_STAT_CHECK_CONDITION;
746 srp_rsp->sense_data_len =
747 cpu_to_be32(sizeof *sense + (sizeof *sense % 4));
749 sense = (struct sense_data *)(srp_rsp + 1);
750 sense->err_code = 0x70;
752 sense->asc_ascq = s_code;
756 static void srpt_build_tskmgmt_rsp(struct srpt_rdma_ch *ch,
757 struct srpt_ioctx *ioctx, u8 rsp_code,
760 struct srp_rsp *srp_rsp;
763 dma_sync_single_for_cpu(ch->sport->sdev->device->dma_device, ioctx->dma,
764 MAX_MESSAGE_SIZE, DMA_TO_DEVICE);
766 srp_rsp = ioctx->buf;
767 memset(srp_rsp, 0, sizeof *srp_rsp);
769 limit_delta = atomic_read(&ch->req_lim_delta);
770 atomic_sub(limit_delta, &ch->req_lim_delta);
772 srp_rsp->opcode = SRP_RSP;
773 srp_rsp->req_lim_delta = cpu_to_be32(limit_delta);
776 if (rsp_code != SRP_TSK_MGMT_SUCCESS) {
777 srp_rsp->flags |= SRP_RSP_FLAG_RSPVALID;
778 srp_rsp->resp_data_len = cpu_to_be32(4);
779 srp_rsp->data[3] = rsp_code;
783 static void srpt_handle_new_iu(struct srpt_rdma_ch *ch,
784 struct srpt_ioctx *ioctx)
786 struct scst_cmd *scmnd = NULL;
787 struct srp_cmd *srp_cmd = NULL;
788 struct srp_tsk_mgmt *srp_tsk = NULL;
789 struct srpt_mgmt_ioctx *mgmt_ioctx;
790 scst_data_direction dir = SCST_DATA_NONE;
791 int indirect_desc = 0;
795 if (ch->state != RDMA_CHANNEL_LIVE) {
796 if (ch->state == RDMA_CHANNEL_CONNECTING) {
797 spin_lock_irq(&ch->spinlock);
798 list_add_tail(&ioctx->wait_list, &ch->cmd_wait_list);
799 spin_unlock_irq(&ch->spinlock);
801 srpt_reset_ioctx(ch, ioctx);
806 dma_sync_single_for_cpu(ch->sport->sdev->device->dma_device, ioctx->dma,
807 MAX_MESSAGE_SIZE, DMA_FROM_DEVICE);
813 ioctx->n_rdma_ius = 0;
814 ioctx->rdma_ius = NULL;
817 op = *(u8 *) ioctx->buf;
820 srp_cmd = ioctx->buf;
822 if (srp_cmd->buf_fmt) {
823 ret = srpt_get_desc_tbl(ioctx, srp_cmd, &indirect_desc);
825 srpt_build_cmd_rsp(ch, ioctx, NO_SENSE,
826 NO_ADD_SENSE, srp_cmd->tag);
827 ((struct srp_rsp *)ioctx->buf)->status =
828 SAM_STAT_TASK_SET_FULL;
833 srpt_build_cmd_rsp(ch, ioctx, NO_SENSE,
834 NO_ADD_SENSE, srp_cmd->tag);
835 ((struct srp_rsp *)ioctx->buf)->status =
836 SAM_STAT_TASK_SET_FULL;
840 if (srp_cmd->buf_fmt & 0xf)
841 dir = SCST_DATA_READ;
842 else if (srp_cmd->buf_fmt >> 4)
843 dir = SCST_DATA_WRITE;
845 dir = SCST_DATA_NONE;
847 dir = SCST_DATA_NONE;
849 scmnd = scst_rx_cmd(ch->scst_sess, (u8 *) &srp_cmd->lun,
850 sizeof srp_cmd->lun, srp_cmd->cdb, 16,
851 thread ? SCST_NON_ATOMIC : SCST_ATOMIC);
853 srpt_build_cmd_rsp(ch, ioctx, NO_SENSE,
854 NO_ADD_SENSE, srp_cmd->tag);
855 ((struct srp_rsp *)ioctx->buf)->status =
856 SAM_STAT_TASK_SET_FULL;
860 ioctx->scmnd = scmnd;
862 switch (srp_cmd->task_attr) {
863 case SRP_CMD_HEAD_OF_Q:
864 scmnd->queue_type = SCST_CMD_QUEUE_HEAD_OF_QUEUE;
866 case SRP_CMD_ORDERED_Q:
867 scmnd->queue_type = SCST_CMD_QUEUE_ORDERED;
869 case SRP_CMD_SIMPLE_Q:
870 scmnd->queue_type = SCST_CMD_QUEUE_SIMPLE;
873 scmnd->queue_type = SCST_CMD_QUEUE_ACA;
876 scmnd->queue_type = SCST_CMD_QUEUE_ORDERED;
880 scst_cmd_set_tag(scmnd, srp_cmd->tag);
881 scst_cmd_set_tgt_priv(scmnd, ioctx);
882 scst_cmd_set_expected(scmnd, dir, ioctx->data_len);
884 spin_lock_irq(&ch->spinlock);
885 list_add_tail(&ioctx->scmnd_list, &ch->active_scmnd_list);
886 ch->active_scmnd_cnt++;
887 scst_cmd_init_done(scmnd, SCST_CONTEXT_THREAD);
888 spin_unlock_irq(&ch->spinlock);
893 srp_tsk = ioctx->buf;
895 printk(KERN_WARNING PFX
896 "recv_tsk_mgmt= %d for task_tag= %lld"
897 " using tag= %lld cm_id= %p sess= %p\n",
898 srp_tsk->tsk_mgmt_func,
899 (unsigned long long) srp_tsk->task_tag,
900 (unsigned long long) srp_tsk->tag,
901 ch->cm_id, ch->scst_sess);
903 mgmt_ioctx = kmalloc(sizeof *mgmt_ioctx, GFP_ATOMIC);
905 srpt_build_tskmgmt_rsp(ch, ioctx, SRP_TSK_MGMT_FAILED,
910 mgmt_ioctx->ioctx = ioctx;
912 mgmt_ioctx->tag = srp_tsk->tag;
914 switch (srp_tsk->tsk_mgmt_func) {
915 case SRP_TSK_ABORT_TASK:
916 ret = scst_rx_mgmt_fn_tag(ch->scst_sess,
920 SCST_NON_ATOMIC : SCST_ATOMIC,
923 case SRP_TSK_ABORT_TASK_SET:
924 ret = scst_rx_mgmt_fn_lun(ch->scst_sess,
926 (u8 *) &srp_tsk->lun,
929 SCST_NON_ATOMIC : SCST_ATOMIC,
932 case SRP_TSK_CLEAR_TASK_SET:
933 ret = scst_rx_mgmt_fn_lun(ch->scst_sess,
935 (u8 *) &srp_tsk->lun,
938 SCST_NON_ATOMIC : SCST_ATOMIC,
942 case SRP_TSK_LUN_RESET:
943 ret = scst_rx_mgmt_fn_lun(ch->scst_sess,
945 (u8 *) &srp_tsk->lun,
948 SCST_NON_ATOMIC : SCST_ATOMIC,
952 case SRP_TSK_CLEAR_ACA:
953 ret = scst_rx_mgmt_fn_lun(ch->scst_sess,
955 (u8 *) &srp_tsk->lun,
958 SCST_NON_ATOMIC : SCST_ATOMIC,
962 srpt_build_tskmgmt_rsp(ch, ioctx,
963 SRP_TSK_MGMT_FUNC_NOT_SUPP,
972 srpt_build_cmd_rsp(ch, ioctx, ILLEGAL_REQUEST, INVALID_CDB,
973 ((struct srp_cmd *)ioctx->buf)->tag);
978 dma_sync_single_for_device(ch->sport->sdev->device->dma_device,
979 ioctx->dma, MAX_MESSAGE_SIZE,
985 if (ch->state != RDMA_CHANNEL_LIVE ||
986 srpt_post_send(ch, ioctx,
987 sizeof(struct srp_rsp) +
988 be32_to_cpu(((struct srp_rsp *)ioctx->buf)->
990 srpt_reset_ioctx(ch, ioctx);
993 static inline int srpt_test_ioctx_list(void)
995 int res = (!list_empty(&srpt_thread.thread_ioctx_list) ||
996 unlikely(kthread_should_stop()));
1000 static inline void srpt_schedule_thread(struct srpt_ioctx *ioctx)
1002 unsigned long flags;
1004 spin_lock_irqsave(&srpt_thread.thread_lock, flags);
1005 list_add_tail(&ioctx->comp_list, &srpt_thread.thread_ioctx_list);
1006 spin_unlock_irqrestore(&srpt_thread.thread_lock, flags);
1007 wake_up(&ioctx_list_waitQ);
1010 static void srpt_completion(struct ib_cq *cq, void *ctx)
1012 struct srpt_rdma_ch *ch = ctx;
1013 struct srpt_device *sdev = ch->sport->sdev;
1015 struct srpt_ioctx *ioctx;
1017 ib_req_notify_cq(ch->cq, IB_CQ_NEXT_COMP);
1018 while (ib_poll_cq(ch->cq, 1, &wc) > 0) {
1020 printk(KERN_ERR PFX "failed %s status= %d\n",
1021 wc.wr_id & SRPT_OP_RECV ? "receive" : "send",
1023 srpt_handle_err_comp(ch, &wc);
1027 if (wc.wr_id & SRPT_OP_RECV) {
1028 ioctx = sdev->ioctx_ring[wc.wr_id & ~SRPT_OP_RECV];
1031 ioctx->op = IB_WC_RECV;
1032 srpt_schedule_thread(ioctx);
1034 srpt_handle_new_iu(ch, ioctx);
1037 ioctx = sdev->ioctx_ring[wc.wr_id];
1041 ioctx->op = wc.opcode;
1042 srpt_schedule_thread(ioctx);
1044 switch (wc.opcode) {
1046 srpt_handle_send_comp(ch, ioctx,
1047 scst_estimate_context());
1049 case IB_WC_RDMA_WRITE:
1050 case IB_WC_RDMA_READ:
1051 srpt_handle_rdma_comp(ch, ioctx);
1060 static int srpt_create_ch_ib(struct srpt_rdma_ch *ch)
1062 struct ib_qp_init_attr *qp_init;
1063 struct srpt_device *sdev = ch->sport->sdev;
1067 qp_init = kzalloc(sizeof *qp_init, GFP_KERNEL);
1071 cqe = SRPT_RQ_SIZE + SRPT_SQ_SIZE - 1;
1072 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
1073 ch->cq = ib_create_cq(sdev->device, srpt_completion, NULL, ch, cqe);
1075 ch->cq = ib_create_cq(sdev->device, srpt_completion, NULL, ch, cqe, 0);
1077 if (IS_ERR(ch->cq)) {
1078 ret = PTR_ERR(ch->cq);
1079 printk(KERN_ERR PFX "failed to create_cq cqe= %d ret= %d\n",
1084 ib_req_notify_cq(ch->cq, IB_CQ_NEXT_COMP);
1086 qp_init->qp_context = (void *)ch;
1087 qp_init->event_handler = srpt_qp_event;
1088 qp_init->send_cq = ch->cq;
1089 qp_init->recv_cq = ch->cq;
1090 qp_init->srq = sdev->srq;
1091 qp_init->sq_sig_type = IB_SIGNAL_REQ_WR;
1092 qp_init->qp_type = IB_QPT_RC;
1093 qp_init->cap.max_send_wr = SRPT_SQ_SIZE;
1094 qp_init->cap.max_send_sge = SRPT_DEF_SG_PER_WQE;
1096 ch->qp = ib_create_qp(sdev->pd, qp_init);
1097 if (IS_ERR(ch->qp)) {
1098 ret = PTR_ERR(ch->qp);
1099 ib_destroy_cq(ch->cq);
1100 printk(KERN_ERR PFX "failed to create_qp ret= %d\n", ret);
1104 printk(KERN_DEBUG PFX "%s: max_cqe= %d max_sge= %d cm_id= %p\n",
1105 __func__, ch->cq->cqe, qp_init->cap.max_send_sge,
1108 ret = srpt_init_ch_qp(ch, ch->qp);
1110 ib_destroy_qp(ch->qp);
1111 ib_destroy_cq(ch->cq);
1115 atomic_set(&ch->req_lim_delta, SRPT_RQ_SIZE);
1121 static struct srpt_rdma_ch *srpt_find_channel(struct ib_cm_id *cm_id)
1123 struct srpt_device *sdev = cm_id->context;
1124 struct srpt_rdma_ch *ch, *tmp_ch;
1126 spin_lock_irq(&sdev->spinlock);
1127 list_for_each_entry_safe(ch, tmp_ch, &sdev->rch_list, list) {
1128 if (ch->cm_id == cm_id) {
1129 spin_unlock_irq(&sdev->spinlock);
1134 spin_unlock_irq(&sdev->spinlock);
1139 static int srpt_release_channel(struct srpt_rdma_ch *ch, int destroy_cmid)
1141 spin_lock_irq(&ch->sport->sdev->spinlock);
1142 list_del(&ch->list);
1143 spin_unlock_irq(&ch->sport->sdev->spinlock);
1145 if (ch->cm_id && destroy_cmid) {
1146 printk(KERN_WARNING PFX
1147 "%s: destroy cm_id= %p\n", __func__, ch->cm_id);
1148 ib_destroy_cm_id(ch->cm_id);
1152 ib_destroy_qp(ch->qp);
1153 ib_destroy_cq(ch->cq);
1155 if (ch->scst_sess) {
1156 struct srpt_ioctx *ioctx, *ioctx_tmp;
1158 printk(KERN_WARNING PFX
1159 "%s: release sess= %p sess_name= %s active_cmd= %d\n",
1160 __func__, ch->scst_sess, ch->sess_name,
1161 ch->active_scmnd_cnt);
1163 list_for_each_entry_safe(ioctx, ioctx_tmp,
1164 &ch->active_scmnd_list, scmnd_list) {
1165 list_del(&ioctx->scmnd_list);
1166 ch->active_scmnd_cnt--;
1169 scst_unregister_session(ch->scst_sess, 0, NULL);
1170 ch->scst_sess = NULL;
1175 return destroy_cmid ? 0 : 1;
1178 static void srpt_register_channel_done(struct scst_session *scst_sess,
1179 void *data, int status)
1181 struct srpt_rdma_ch *ch = data;
1186 if (ch->scst_sess) {
1187 scst_unregister_session(ch->scst_sess, 0, NULL);
1188 ch->scst_sess = NULL;
1191 "%s: Failed to establish sess= %p status= %d\n",
1192 __func__, scst_sess, status);
1195 complete(&ch->scst_sess_done);
1198 static int srpt_disconnect_channel(struct srpt_rdma_ch *ch, int dreq)
1200 spin_lock_irq(&ch->spinlock);
1201 ch->state = RDMA_CHANNEL_DISCONNECTING;
1202 spin_unlock_irq(&ch->spinlock);
1205 ib_send_cm_dreq(ch->cm_id, NULL, 0);
1207 ib_send_cm_drep(ch->cm_id, NULL, 0);
1212 static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
1213 struct ib_cm_req_event_param *param,
1216 struct srpt_device *sdev = cm_id->context;
1217 struct srp_login_req *req;
1218 struct srp_login_rsp *rsp;
1219 struct srp_login_rej *rej;
1220 struct ib_cm_rep_param *rep_param;
1221 struct srpt_rdma_ch *ch, *tmp_ch;
1225 if (!sdev || !private_data)
1228 rsp = kzalloc(sizeof *rsp, GFP_KERNEL);
1229 rej = kzalloc(sizeof *rej, GFP_KERNEL);
1230 rep_param = kzalloc(sizeof *rep_param, GFP_KERNEL);
1232 if (!rsp || !rej || !rep_param) {
1237 req = (struct srp_login_req *)private_data;
1239 it_iu_len = be32_to_cpu(req->req_it_iu_len);
1241 printk(KERN_DEBUG PFX
1242 "Host login i_port_id=0x%llx:0x%llx t_port_id=0x%llx:0x%llx"
1244 (unsigned long long)
1245 be64_to_cpu(*(u64 *)&req->initiator_port_id[0]),
1246 (unsigned long long)
1247 be64_to_cpu(*(u64 *)&req->initiator_port_id[8]),
1248 (unsigned long long)be64_to_cpu(*(u64 *)&req->target_port_id[0]),
1249 (unsigned long long)be64_to_cpu(*(u64 *)&req->target_port_id[8]),
1252 if (it_iu_len > MAX_MESSAGE_SIZE || it_iu_len < 64) {
1254 cpu_to_be32(SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE);
1256 printk(KERN_WARNING PFX
1257 "Reject invalid it_iu_len=%d\n", it_iu_len);
1261 if ((req->req_flags & 0x3) == SRP_MULTICHAN_SINGLE) {
1262 rsp->rsp_flags = SRP_LOGIN_RSP_MULTICHAN_NO_CHAN;
1264 spin_lock_irq(&sdev->spinlock);
1266 list_for_each_entry_safe(ch, tmp_ch, &sdev->rch_list, list) {
1267 if (!memcmp(ch->i_port_id, req->initiator_port_id, 16)
1268 && !memcmp(ch->t_port_id, req->target_port_id, 16)
1269 && param->port == ch->sport->port
1270 && param->listen_id == ch->sport->sdev->cm_id
1272 /* found an existing channel */
1273 printk(KERN_WARNING PFX
1274 "Found existing channel name= %s"
1275 " cm_id= %p state= %d\n",
1276 ch->sess_name, ch->cm_id, ch->state);
1278 spin_unlock_irq(&sdev->spinlock);
1281 SRP_LOGIN_RSP_MULTICHAN_TERMINATED;
1283 if (ch->state == RDMA_CHANNEL_LIVE)
1284 srpt_disconnect_channel(ch, 1);
1285 else if (ch->state == RDMA_CHANNEL_CONNECTING) {
1286 ib_send_cm_rej(ch->cm_id,
1287 IB_CM_REJ_NO_RESOURCES,
1289 srpt_release_channel(ch, 1);
1292 spin_lock_irq(&sdev->spinlock);
1296 spin_unlock_irq(&sdev->spinlock);
1299 rsp->rsp_flags = SRP_LOGIN_RSP_MULTICHAN_MAINTAINED;
1301 if (((u64) (*(u64 *) req->target_port_id) !=
1302 cpu_to_be64(mellanox_ioc_guid)) ||
1303 ((u64) (*(u64 *) (req->target_port_id + 8)) !=
1304 cpu_to_be64(mellanox_ioc_guid))) {
1306 cpu_to_be32(SRP_LOGIN_REJ_UNABLE_ASSOCIATE_CHANNEL);
1308 printk(KERN_WARNING PFX "Reject invalid target_port_id\n");
1312 ch = kzalloc(sizeof *ch, GFP_KERNEL);
1314 rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
1315 printk(KERN_WARNING PFX "Reject failed allocate rdma_ch\n");
1320 spin_lock_init(&ch->spinlock);
1321 memcpy(ch->i_port_id, req->initiator_port_id, 16);
1322 memcpy(ch->t_port_id, req->target_port_id, 16);
1323 ch->sport = &sdev->port[param->port - 1];
1325 ch->state = RDMA_CHANNEL_CONNECTING;
1326 INIT_LIST_HEAD(&ch->cmd_wait_list);
1327 INIT_LIST_HEAD(&ch->active_scmnd_list);
1329 ret = srpt_create_ch_ib(ch);
1331 rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
1332 printk(KERN_WARNING PFX "Reject failed to create rdma_ch\n");
1336 ret = srpt_ch_qp_rtr_rts(ch, ch->qp, IB_QPS_RTR);
1338 rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
1339 printk(KERN_WARNING PFX
1340 "Reject failed qp to rtr/rts ret=%d\n", ret);
1344 init_completion(&ch->scst_sess_done);
1345 sprintf(ch->sess_name, "0x%016llx%016llx",
1346 (unsigned long long)be64_to_cpu(*(u64 *)ch->i_port_id),
1347 (unsigned long long)be64_to_cpu(*(u64 *)(ch->i_port_id + 8)));
1349 scst_register_session(sdev->scst_tgt, 1, ch->sess_name,
1350 ch, srpt_register_channel_done);
1352 wait_for_completion(&ch->scst_sess_done);
1354 if (!ch->scst_sess) {
1355 rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
1356 printk(KERN_WARNING PFX "Reject failed to create scst sess");
1360 spin_lock_irq(&sdev->spinlock);
1361 list_add_tail(&ch->list, &sdev->rch_list);
1362 spin_unlock_irq(&sdev->spinlock);
1364 printk(KERN_DEBUG PFX "Establish connection sess=%p name=%s cm_id=%p\n",
1365 ch->scst_sess, ch->sess_name, ch->cm_id);
1367 scst_sess_set_tgt_priv(ch->scst_sess, ch);
1369 /* create srp_login_response */
1370 rsp->opcode = SRP_LOGIN_RSP;
1371 rsp->tag = req->tag;
1372 rsp->max_it_iu_len = req->req_it_iu_len;
1373 rsp->max_ti_iu_len = req->req_it_iu_len;
1375 cpu_to_be16(SRP_BUF_FORMAT_DIRECT | SRP_BUF_FORMAT_INDIRECT);
1376 rsp->req_lim_delta = cpu_to_be32(SRPT_RQ_SIZE);
1377 atomic_set(&ch->req_lim_delta, 0);
1379 /* create cm reply */
1380 rep_param->qp_num = ch->qp->qp_num;
1381 rep_param->private_data = (void *)rsp;
1382 rep_param->private_data_len = sizeof *rsp;
1383 rep_param->rnr_retry_count = 7;
1384 rep_param->flow_control = 1;
1385 rep_param->failover_accepted = 0;
1387 rep_param->responder_resources = 4;
1388 rep_param->initiator_depth = 4;
1390 ret = ib_send_cm_rep(cm_id, rep_param);
1392 srpt_release_channel(ch, 0);
1397 ib_destroy_qp(ch->qp);
1398 ib_destroy_cq(ch->cq);
1404 rej->opcode = SRP_LOGIN_REJ;
1405 rej->tag = req->tag;
1407 cpu_to_be16(SRP_BUF_FORMAT_DIRECT | SRP_BUF_FORMAT_INDIRECT);
1409 ret = ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, NULL, 0,
1410 (void *)rej, sizeof *rej);
1420 static int srpt_find_and_release_channel(struct ib_cm_id *cm_id)
1422 struct srpt_rdma_ch *ch;
1424 ch = srpt_find_channel(cm_id);
1428 return srpt_release_channel(ch, 0);
1431 static int srpt_cm_rej_recv(struct ib_cm_id *cm_id)
1433 printk(KERN_DEBUG PFX "%s: cm_id=%p\n", __func__, cm_id);
1434 return srpt_find_and_release_channel(cm_id);
1437 static int srpt_cm_rtu_recv(struct ib_cm_id *cm_id)
1439 struct srpt_rdma_ch *ch;
1442 ch = srpt_find_channel(cm_id);
1446 if (ch->state == RDMA_CHANNEL_CONNECTING) {
1447 struct srpt_ioctx *ioctx, *ioctx_tmp;
1449 spin_lock_irq(&ch->spinlock);
1450 ch->state = RDMA_CHANNEL_LIVE;
1451 spin_unlock_irq(&ch->spinlock);
1452 ret = srpt_ch_qp_rtr_rts(ch, ch->qp, IB_QPS_RTS);
1454 list_for_each_entry_safe(ioctx, ioctx_tmp, &ch->cmd_wait_list,
1456 list_del(&ioctx->wait_list);
1457 srpt_handle_new_iu(ch, ioctx);
1459 } else if (ch->state == RDMA_CHANNEL_DISCONNECTING)
1465 printk(KERN_ERR PFX "cm_id=%p sess_name=%s state=%d\n",
1466 cm_id, ch->sess_name, ch->state);
1467 srpt_disconnect_channel(ch, 1);
1473 static int srpt_cm_timewait_exit(struct ib_cm_id *cm_id)
1475 printk(KERN_DEBUG PFX "%s: cm_id=%p\n", __func__, cm_id);
1476 return srpt_find_and_release_channel(cm_id);
1479 static int srpt_cm_rep_error(struct ib_cm_id *cm_id)
1481 printk(KERN_DEBUG PFX "%s: cm_id=%p\n", __func__, cm_id);
1482 return srpt_find_and_release_channel(cm_id);
1485 static int srpt_cm_dreq_recv(struct ib_cm_id *cm_id)
1487 struct srpt_rdma_ch *ch;
1490 ch = srpt_find_channel(cm_id);
1495 printk(KERN_DEBUG PFX "%s: cm_id= %p ch->state= %d\n",
1496 __func__, cm_id, ch->state);
1498 switch (ch->state) {
1499 case RDMA_CHANNEL_LIVE:
1500 case RDMA_CHANNEL_CONNECTING:
1501 ret = srpt_disconnect_channel(ch, 0);
1503 case RDMA_CHANNEL_DISCONNECTING:
1511 static int srpt_cm_drep_recv(struct ib_cm_id *cm_id)
1513 printk(KERN_DEBUG PFX "%s: cm_id=%p\n", __func__, cm_id);
1514 return srpt_find_and_release_channel(cm_id);
1517 static int srpt_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
1521 switch (event->event) {
1522 case IB_CM_REQ_RECEIVED:
1523 ret = srpt_cm_req_recv(cm_id, &event->param.req_rcvd,
1524 event->private_data);
1526 case IB_CM_REJ_RECEIVED:
1527 ret = srpt_cm_rej_recv(cm_id);
1529 case IB_CM_RTU_RECEIVED:
1530 case IB_CM_USER_ESTABLISHED:
1531 ret = srpt_cm_rtu_recv(cm_id);
1533 case IB_CM_DREQ_RECEIVED:
1534 ret = srpt_cm_dreq_recv(cm_id);
1536 case IB_CM_DREP_RECEIVED:
1537 ret = srpt_cm_drep_recv(cm_id);
1539 case IB_CM_TIMEWAIT_EXIT:
1540 ret = srpt_cm_timewait_exit(cm_id);
1542 case IB_CM_REP_ERROR:
1543 ret = srpt_cm_rep_error(cm_id);
1552 static int srpt_map_sg_to_ib_sge(struct srpt_rdma_ch *ch,
1553 struct srpt_ioctx *ioctx,
1554 struct scst_cmd *scmnd)
1556 struct scatterlist *scat;
1557 scst_data_direction dir;
1558 struct rdma_iu *riu;
1559 struct srp_direct_buf *db;
1560 dma_addr_t dma_addr;
1569 scat = scst_cmd_get_sg(scmnd);
1570 dir = scst_cmd_get_data_direction(scmnd);
1571 count = dma_map_sg(ch->sport->sdev->device->dma_device, scat,
1572 scst_cmd_get_sg_cnt(scmnd),
1573 scst_to_tgt_dma_dir(dir));
1574 if (unlikely(!count))
1577 if (ioctx->rdma_ius && ioctx->n_rdma_ius)
1578 nrdma = ioctx->n_rdma_ius;
1580 nrdma = count / SRPT_DEF_SG_PER_WQE + ioctx->n_rbuf;
1582 ioctx->rdma_ius = kzalloc(nrdma * sizeof *riu,
1583 scst_cmd_atomic(scmnd)
1584 ? GFP_ATOMIC : GFP_KERNEL);
1585 if (!ioctx->rdma_ius) {
1586 dma_unmap_sg(ch->sport->sdev->device->dma_device,
1587 scat, scst_cmd_get_sg_cnt(scmnd),
1588 scst_to_tgt_dma_dir(dir));
1592 ioctx->n_rdma_ius = nrdma;
1596 tsize = (dir == SCST_DATA_READ) ?
1597 scst_cmd_get_resp_data_len(scmnd) : scst_cmd_get_bufflen(scmnd);
1598 dma_len = sg_dma_len(&scat[0]);
1599 riu = ioctx->rdma_ius;
1602 * For each remote desc - calculate the #ib_sge.
1603 * If #ib_sge < SRPT_DEF_SG_PER_WQE per rdma operation then
1604 * each remote desc rdma_iu is required a rdma wr;
1606 * we need to allocate extra rdma_iu to carry extra #ib_sge in
1610 j < count && i < ioctx->n_rbuf && tsize > 0; ++i, ++riu, ++db) {
1611 rsize = be32_to_cpu(db->len);
1612 raddr = be64_to_cpu(db->va);
1614 riu->rkey = be32_to_cpu(db->key);
1617 /* calculate how many sge required for this remote_buf */
1618 while (rsize > 0 && tsize > 0) {
1620 if (rsize >= dma_len) {
1628 dma_len = sg_dma_len(&scat[j]);
1638 if (rsize > 0 && riu->sge_cnt == SRPT_DEF_SG_PER_WQE) {
1640 kmalloc(riu->sge_cnt * sizeof *riu->sge,
1641 scst_cmd_atomic(scmnd)
1642 ? GFP_ATOMIC : GFP_KERNEL);
1650 riu->rkey = be32_to_cpu(db->key);
1654 riu->sge = kmalloc(riu->sge_cnt * sizeof *riu->sge,
1655 scst_cmd_atomic(scmnd)
1656 ? GFP_ATOMIC : GFP_KERNEL);
1665 scat = scst_cmd_get_sg(scmnd);
1666 tsize = (dir == SCST_DATA_READ) ?
1667 scst_cmd_get_resp_data_len(scmnd) : scst_cmd_get_bufflen(scmnd);
1668 riu = ioctx->rdma_ius;
1669 dma_len = sg_dma_len(&scat[0]);
1670 dma_addr = sg_dma_address(&scat[0]);
1672 /* this second loop is really mapped sg_addres to rdma_iu->ib_sge */
1674 j < count && i < ioctx->n_rbuf && tsize > 0; ++i, ++riu, ++db) {
1675 rsize = be32_to_cpu(db->len);
1679 while (rsize > 0 && tsize > 0) {
1680 sge->addr = dma_addr;
1681 sge->lkey = ch->sport->sdev->mr->lkey;
1683 if (rsize >= dma_len) {
1685 (tsize < dma_len) ? tsize : dma_len;
1692 dma_len = sg_dma_len(&scat[j]);
1694 sg_dma_address(&scat[j]);
1698 sge->length = (tsize < rsize) ? tsize : rsize;
1706 if (k == riu->sge_cnt && rsize > 0) {
1710 } else if (rsize > 0)
1718 while (ioctx->n_rdma)
1719 kfree(ioctx->rdma_ius[ioctx->n_rdma--].sge);
1721 kfree(ioctx->rdma_ius);
1723 dma_unmap_sg(ch->sport->sdev->device->dma_device,
1724 scat, scst_cmd_get_sg_cnt(scmnd),
1725 scst_to_tgt_dma_dir(dir));
1730 static int srpt_perform_rdmas(struct srpt_rdma_ch *ch, struct srpt_ioctx *ioctx,
1731 scst_data_direction dir)
1733 struct ib_send_wr wr;
1734 struct ib_send_wr *bad_wr;
1735 struct rdma_iu *riu;
1739 riu = ioctx->rdma_ius;
1740 memset(&wr, 0, sizeof wr);
1742 for (i = 0; i < ioctx->n_rdma; ++i, ++riu) {
1743 wr.opcode = (dir == SCST_DATA_READ) ?
1744 IB_WR_RDMA_WRITE : IB_WR_RDMA_READ;
1746 wr.wr_id = ioctx->index;
1747 wr.wr.rdma.remote_addr = riu->raddr;
1748 wr.wr.rdma.rkey = riu->rkey;
1749 wr.num_sge = riu->sge_cnt;
1750 wr.sg_list = riu->sge;
1752 /* only get completion event for the last rdma wr */
1753 if (i == (ioctx->n_rdma - 1) && dir == SCST_DATA_WRITE)
1754 wr.send_flags = IB_SEND_SIGNALED;
1756 ret = ib_post_send(ch->qp, &wr, &bad_wr);
1764 static int srpt_xfer_data(struct srpt_rdma_ch *ch, struct srpt_ioctx *ioctx,
1765 struct scst_cmd *scmnd)
1769 ret = srpt_map_sg_to_ib_sge(ch, ioctx, scmnd);
1771 printk(KERN_ERR PFX "%s[%d] ret=%d\n", __func__, __LINE__, ret);
1772 ret = SCST_TGT_RES_QUEUE_FULL;
1776 ret = srpt_perform_rdmas(ch, ioctx, scst_cmd_get_data_direction(scmnd));
1778 printk(KERN_ERR PFX "%s[%d] ret=%d\n", __func__, __LINE__, ret);
1779 if (ret == -EAGAIN || ret == -ENOMEM)
1780 ret = SCST_TGT_RES_QUEUE_FULL;
1782 ret = SCST_TGT_RES_FATAL_ERROR;
1786 ret = SCST_TGT_RES_SUCCESS;
1792 static int srpt_rdy_to_xfer(struct scst_cmd *scmnd)
1794 struct srpt_rdma_ch *ch;
1795 struct srpt_ioctx *ioctx;
1797 ioctx = scst_cmd_get_tgt_priv(scmnd);
1800 ch = scst_sess_get_tgt_priv(scst_cmd_get_session(scmnd));
1803 if (ch->state == RDMA_CHANNEL_DISCONNECTING)
1804 return SCST_TGT_RES_FATAL_ERROR;
1805 else if (ch->state == RDMA_CHANNEL_CONNECTING)
1806 return SCST_TGT_RES_QUEUE_FULL;
1808 return srpt_xfer_data(ch, ioctx, scmnd);
1811 static int srpt_xmit_response(struct scst_cmd *scmnd)
1813 struct srpt_rdma_ch *ch;
1814 struct srpt_ioctx *ioctx;
1815 struct srp_rsp *srp_rsp;
1817 int ret = SCST_TGT_RES_SUCCESS;
1821 ioctx = scst_cmd_get_tgt_priv(scmnd);
1824 ch = scst_sess_get_tgt_priv(scst_cmd_get_session(scmnd));
1827 tag = scst_cmd_get_tag(scmnd);
1829 if (ch->state != RDMA_CHANNEL_LIVE) {
1831 "%s: tag= %lld channel in bad state %d\n",
1832 __func__, (unsigned long long)tag, ch->state);
1834 if (ch->state == RDMA_CHANNEL_DISCONNECTING)
1835 ret = SCST_TGT_RES_FATAL_ERROR;
1836 else if (ch->state == RDMA_CHANNEL_CONNECTING)
1837 ret = SCST_TGT_RES_QUEUE_FULL;
1839 if (unlikely(scst_cmd_aborted(scmnd)))
1845 dma_sync_single_for_cpu(ch->sport->sdev->device->dma_device, ioctx->dma,
1846 MAX_MESSAGE_SIZE, DMA_TO_DEVICE);
1848 srp_rsp = ioctx->buf;
1850 if (unlikely(scst_cmd_aborted(scmnd))) {
1852 "%s: tag= %lld already get aborted\n",
1853 __func__, (unsigned long long)tag);
1857 dir = scst_cmd_get_data_direction(scmnd);
1858 status = scst_cmd_get_status(scmnd) & 0xff;
1860 srpt_build_cmd_rsp(ch, ioctx, NO_SENSE, NO_ADD_SENSE, tag);
1862 if (SCST_SENSE_VALID(scst_cmd_get_sense_buffer(scmnd))) {
1863 srp_rsp->sense_data_len = scst_cmd_get_sense_buffer_len(scmnd);
1864 if (srp_rsp->sense_data_len >
1865 (MAX_MESSAGE_SIZE - sizeof *srp_rsp))
1866 srp_rsp->sense_data_len =
1867 MAX_MESSAGE_SIZE - sizeof *srp_rsp;
1869 memcpy((u8 *) (srp_rsp + 1), scst_cmd_get_sense_buffer(scmnd),
1870 srp_rsp->sense_data_len);
1872 srp_rsp->sense_data_len = cpu_to_be32(srp_rsp->sense_data_len);
1873 srp_rsp->flags |= SRP_RSP_FLAG_SNSVALID;
1876 status = SAM_STAT_CHECK_CONDITION;
1879 srp_rsp->status = status;
1881 /* transfer read data if any */
1882 if (dir == SCST_DATA_READ && scst_cmd_get_resp_data_len(scmnd)) {
1883 ret = srpt_xfer_data(ch, ioctx, scmnd);
1884 if (ret != SCST_TGT_RES_SUCCESS) {
1886 "%s: tag= %lld xfer_data failed\n",
1887 __func__, (unsigned long long)tag);
1892 if (srpt_post_send(ch, ioctx,
1894 be32_to_cpu(srp_rsp->sense_data_len))) {
1895 printk(KERN_ERR PFX "%s: ch->state= %d tag= %lld\n",
1896 __func__, ch->state,
1897 (unsigned long long)tag);
1898 ret = SCST_TGT_RES_FATAL_ERROR;
1905 ret = SCST_TGT_RES_SUCCESS;
1906 scst_set_delivery_status(scmnd, SCST_CMD_DELIVERY_ABORTED);
1907 scst_tgt_cmd_done(scmnd, SCST_CONTEXT_SAME);
1911 static void srpt_tsk_mgmt_done(struct scst_mgmt_cmd *mcmnd)
1913 struct srpt_rdma_ch *ch;
1914 struct srpt_mgmt_ioctx *mgmt_ioctx;
1915 struct srpt_ioctx *ioctx;
1917 mgmt_ioctx = scst_mgmt_cmd_get_tgt_priv(mcmnd);
1918 BUG_ON(!mgmt_ioctx);
1920 ch = mgmt_ioctx->ch;
1923 ioctx = mgmt_ioctx->ioctx;
1926 printk(KERN_WARNING PFX
1927 "%s: tsk_mgmt_done for tag= %lld status=%d\n",
1928 __func__, (unsigned long long)mgmt_ioctx->tag,
1929 scst_mgmt_cmd_get_status(mcmnd));
1931 srpt_build_tskmgmt_rsp(ch, ioctx,
1932 (scst_mgmt_cmd_get_status(mcmnd) ==
1933 SCST_MGMT_STATUS_SUCCESS) ?
1934 SRP_TSK_MGMT_SUCCESS : SRP_TSK_MGMT_FAILED,
1936 srpt_post_send(ch, ioctx, sizeof(struct srp_rsp) + 4);
1938 scst_mgmt_cmd_set_tgt_priv(mcmnd, NULL);
1943 static void srpt_on_free_cmd(struct scst_cmd *scmnd)
1945 struct srpt_rdma_ch *ch;
1946 struct srpt_ioctx *ioctx;
1948 ioctx = scst_cmd_get_tgt_priv(scmnd);
1951 ch = scst_sess_get_tgt_priv(scst_cmd_get_session(scmnd));
1954 spin_lock_irq(&ch->spinlock);
1955 list_del(&ioctx->scmnd_list);
1956 ch->active_scmnd_cnt--;
1957 spin_unlock_irq(&ch->spinlock);
1959 srpt_reset_ioctx(ch, ioctx);
1960 scst_cmd_set_tgt_priv(scmnd, NULL);
1963 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
1964 static void srpt_refresh_port_work(void *ctx)
1966 static void srpt_refresh_port_work(struct work_struct *work)
1969 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
1970 struct srpt_port *sport = (struct srpt_port *)ctx;
1972 struct srpt_port *sport = container_of(work, struct srpt_port, work);
1975 srpt_refresh_port(sport);
1978 static int srpt_detect(struct scst_tgt_template *tp)
1980 struct srpt_device *sdev;
1981 struct srpt_port *sport;
1985 list_for_each_entry(sdev, &srpt_devices, list) {
1987 sdev->scst_tgt = scst_register(tp, NULL);
1988 if (!sdev->scst_tgt)
1991 scst_tgt_set_tgt_priv(sdev->scst_tgt, sdev);
1993 for (i = 1; i <= sdev->device->phys_port_cnt; i++) {
1994 sport = &sdev->port[i - 1];
1997 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
1998 INIT_WORK(&sport->work, srpt_refresh_port_work, sport);
2000 INIT_WORK(&sport->work, srpt_refresh_port_work);
2003 if (srpt_refresh_port(sport)) {
2004 scst_unregister(sdev->scst_tgt);
2015 static int srpt_release(struct scst_tgt *scst_tgt)
2017 struct srpt_device *sdev = scst_tgt_get_tgt_priv(scst_tgt);
2018 struct srpt_port *sport;
2019 struct srpt_rdma_ch *ch, *tmp_ch;
2020 struct ib_port_modify port_modify = {
2021 .clr_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP
2025 list_for_each_entry_safe(ch, tmp_ch, &sdev->rch_list, list)
2026 srpt_release_channel(ch, 1);
2028 for (i = 1; i <= sdev->device->phys_port_cnt; i++) {
2029 sport = &sdev->port[i - 1];
2030 ib_modify_port(sdev->device, sport->port, 0, &port_modify);
2031 ib_unregister_mad_agent(sport->mad_agent);
2034 scst_tgt_set_tgt_priv(scst_tgt, NULL);
2036 complete(&sdev->scst_released);
2041 static int srpt_ioctx_thread(void *arg)
2043 struct srpt_ioctx *ioctx;
2045 current->flags |= PF_NOFREEZE;
2047 spin_lock_irq(&srpt_thread.thread_lock);
2048 while (!kthread_should_stop()) {
2050 init_waitqueue_entry(&wait, current);
2052 if (!srpt_test_ioctx_list()) {
2053 add_wait_queue_exclusive(&ioctx_list_waitQ, &wait);
2056 set_current_state(TASK_INTERRUPTIBLE);
2057 if (srpt_test_ioctx_list())
2059 spin_unlock_irq(&srpt_thread.thread_lock);
2061 spin_lock_irq(&srpt_thread.thread_lock);
2063 set_current_state(TASK_RUNNING);
2064 remove_wait_queue(&ioctx_list_waitQ, &wait);
2067 while (!list_empty(&srpt_thread.thread_ioctx_list)) {
2068 ioctx = list_entry(srpt_thread.thread_ioctx_list.next,
2069 struct srpt_ioctx, comp_list);
2071 list_del(&ioctx->comp_list);
2073 spin_unlock_irq(&srpt_thread.thread_lock);
2074 switch (ioctx->op) {
2076 srpt_handle_send_comp(ioctx->ch, ioctx,
2077 SCST_CONTEXT_DIRECT);
2079 case IB_WC_RDMA_WRITE:
2080 case IB_WC_RDMA_READ:
2081 srpt_handle_rdma_comp(ioctx->ch, ioctx);
2084 srpt_handle_new_iu(ioctx->ch, ioctx);
2089 spin_lock_irq(&srpt_thread.thread_lock);
2092 spin_unlock_irq(&srpt_thread.thread_lock);
2097 static struct scst_tgt_template srpt_template = {
2099 .sg_tablesize = SRPT_DEF_SG_TABLESIZE,
2100 .xmit_response_atomic = 1,
2101 .rdy_to_xfer_atomic = 1,
2103 .detect = srpt_detect,
2104 .release = srpt_release,
2105 .xmit_response = srpt_xmit_response,
2106 .rdy_to_xfer = srpt_rdy_to_xfer,
2107 .on_free_cmd = srpt_on_free_cmd,
2108 .task_mgmt_fn_done = srpt_tsk_mgmt_done
2111 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
2112 static void srpt_release_class_dev(struct class_device *class_dev)
2114 static void srpt_release_class_dev(struct device *dev)
2119 static struct class srpt_class = {
2120 .name = "infiniband_srpt",
2121 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
2122 .release = srpt_release_class_dev
2124 .dev_release = srpt_release_class_dev
2128 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
2129 static ssize_t show_login_info(struct class_device *class_dev, char *buf)
2131 static ssize_t show_login_info(struct device *dev,
2132 struct device_attribute *attr, char *buf)
2135 struct srpt_device *sdev =
2136 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
2137 container_of(class_dev, struct srpt_device, class_dev);
2139 container_of(dev, struct srpt_device, dev);
2141 struct srpt_port *sport;
2145 for (i = 0; i < sdev->device->phys_port_cnt; i++) {
2146 sport = &sdev->port[i];
2149 "tid_ext=%016llx,ioc_guid=%016llx,pkey=ffff,"
2150 "dgid=%04x%04x%04x%04x%04x%04x%04x%04x,"
2151 "service_id=%016llx\n",
2152 (unsigned long long) mellanox_ioc_guid,
2153 (unsigned long long) mellanox_ioc_guid,
2154 be16_to_cpu(((__be16 *) sport->gid.raw)[0]),
2155 be16_to_cpu(((__be16 *) sport->gid.raw)[1]),
2156 be16_to_cpu(((__be16 *) sport->gid.raw)[2]),
2157 be16_to_cpu(((__be16 *) sport->gid.raw)[3]),
2158 be16_to_cpu(((__be16 *) sport->gid.raw)[4]),
2159 be16_to_cpu(((__be16 *) sport->gid.raw)[5]),
2160 be16_to_cpu(((__be16 *) sport->gid.raw)[6]),
2161 be16_to_cpu(((__be16 *) sport->gid.raw)[7]),
2162 (unsigned long long) mellanox_ioc_guid);
2169 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
2170 static CLASS_DEVICE_ATTR(login_info, S_IRUGO, show_login_info, NULL);
2172 static DEVICE_ATTR(login_info, S_IRUGO, show_login_info, NULL);
2175 static void srpt_add_one(struct ib_device *device)
2177 struct srpt_device *sdev;
2178 struct ib_srq_init_attr srq_attr;
2181 sdev = kzalloc(sizeof *sdev, GFP_KERNEL);
2185 sdev->device = device;
2186 init_completion(&sdev->scst_released);
2188 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
2189 sdev->class_dev.class = &srpt_class;
2190 sdev->class_dev.dev = device->dma_device;
2191 snprintf(sdev->class_dev.class_id, BUS_ID_SIZE,
2192 "srpt-%s", device->name);
2194 sdev->dev.class = &srpt_class;
2195 sdev->dev.parent = device->dma_device;
2196 snprintf(sdev->dev.bus_id, BUS_ID_SIZE, "srpt-%s", device->name);
2199 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
2200 if (class_device_register(&sdev->class_dev))
2202 if (class_device_create_file(&sdev->class_dev,
2203 &class_device_attr_login_info))
2206 if (device_register(&sdev->dev))
2208 if (device_create_file(&sdev->dev, &dev_attr_login_info))
2212 if (ib_query_device(device, &sdev->dev_attr))
2215 sdev->pd = ib_alloc_pd(device);
2216 if (IS_ERR(sdev->pd))
2219 sdev->mr = ib_get_dma_mr(sdev->pd, IB_ACCESS_LOCAL_WRITE);
2220 if (IS_ERR(sdev->mr))
2223 srq_attr.event_handler = srpt_srq_event;
2224 srq_attr.srq_context = (void *)sdev;
2225 srq_attr.attr.max_wr = min(SRPT_SRQ_SIZE, sdev->dev_attr.max_srq_wr);
2226 srq_attr.attr.max_sge = 1;
2227 srq_attr.attr.srq_limit = 0;
2229 sdev->srq = ib_create_srq(sdev->pd, &srq_attr);
2230 if (IS_ERR(sdev->srq))
2233 printk(KERN_DEBUG PFX "%s: create SRQ #wr= %d max_allow=%d dev= %s\n",
2234 __func__, srq_attr.attr.max_wr,
2235 sdev->dev_attr.max_srq_wr, device->name);
2237 if (!mellanox_ioc_guid)
2238 mellanox_ioc_guid = be64_to_cpu(device->node_guid);
2240 sdev->cm_id = ib_create_cm_id(device, srpt_cm_handler, sdev);
2241 if (IS_ERR(sdev->cm_id))
2244 /* print out target login information */
2245 printk(KERN_DEBUG PFX "Target login info: id_ext=%016llx,"
2246 "ioc_guid=%016llx,pkey=ffff,service_id=%016llx\n",
2247 (unsigned long long) mellanox_ioc_guid,
2248 (unsigned long long) mellanox_ioc_guid,
2249 (unsigned long long) mellanox_ioc_guid);
2252 * We do not have a consistent service_id (ie. also id_ext of target_id)
2253 * to identify this target. We currently use the guid of the first HCA
2254 * in the system as service_id; therefore, the target_id will change
2255 * if this HCA is gone bad and replaced by different HCA
2257 if (ib_cm_listen(sdev->cm_id, cpu_to_be64(mellanox_ioc_guid), 0, NULL))
2260 INIT_IB_EVENT_HANDLER(&sdev->event_handler, sdev->device,
2261 srpt_event_handler);
2262 if (ib_register_event_handler(&sdev->event_handler))
2265 if (srpt_alloc_ioctx_ring(sdev))
2268 INIT_LIST_HEAD(&sdev->rch_list);
2269 spin_lock_init(&sdev->spinlock);
2271 for (i = 0; i < SRPT_SRQ_SIZE; ++i)
2272 srpt_post_recv(sdev, sdev->ioctx_ring[i]);
2274 list_add_tail(&sdev->list, &srpt_devices);
2276 ib_set_client_data(device, &srpt_client, sdev);
2281 ib_unregister_event_handler(&sdev->event_handler);
2283 ib_destroy_cm_id(sdev->cm_id);
2285 ib_destroy_srq(sdev->srq);
2287 ib_dereg_mr(sdev->mr);
2289 ib_dealloc_pd(sdev->pd);
2291 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
2292 class_device_unregister(&sdev->class_dev);
2294 device_unregister(&sdev->dev);
2300 static void srpt_remove_one(struct ib_device *device)
2302 struct srpt_device *sdev;
2305 sdev = ib_get_client_data(device, &srpt_client);
2309 wait_for_completion(&sdev->scst_released);
2311 ib_unregister_event_handler(&sdev->event_handler);
2312 ib_destroy_cm_id(sdev->cm_id);
2313 ib_destroy_srq(sdev->srq);
2314 ib_dereg_mr(sdev->mr);
2315 ib_dealloc_pd(sdev->pd);
2316 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
2317 class_device_unregister(&sdev->class_dev);
2319 device_unregister(&sdev->dev);
2322 for (i = 0; i < SRPT_SRQ_SIZE; ++i)
2323 srpt_free_ioctx(sdev, sdev->ioctx_ring[i]);
2325 list_del(&sdev->list);
2329 static int __init srpt_init_module(void)
2333 INIT_LIST_HEAD(&srpt_devices);
2335 ret = class_register(&srpt_class);
2337 printk(KERN_ERR PFX "couldn't register class ib_srpt\n");
2341 ret = ib_register_client(&srpt_client);
2343 printk(KERN_ERR PFX "couldn't register IB client\n");
2347 ret = scst_register_target_template(&srpt_template);
2349 printk(KERN_ERR PFX "couldn't register with scst\n");
2355 spin_lock_init(&srpt_thread.thread_lock);
2356 INIT_LIST_HEAD(&srpt_thread.thread_ioctx_list);
2357 srpt_thread.thread = kthread_run(srpt_ioctx_thread,
2358 NULL, "srpt_thread");
2359 if (IS_ERR(srpt_thread.thread)) {
2360 srpt_thread.thread = NULL;
2368 ib_unregister_client(&srpt_client);
2370 class_unregister(&srpt_class);
2374 static void __exit srpt_cleanup_module(void)
2376 if (srpt_thread.thread)
2377 kthread_stop(srpt_thread.thread);
2378 scst_unregister_target_template(&srpt_template);
2379 ib_unregister_client(&srpt_client);
2380 class_unregister(&srpt_class);
2383 module_init(srpt_init_module);
2384 module_exit(srpt_cleanup_module);