2 * Copyright (C) 2002 - 2003 Ardis Technolgies <roman@ardistech.com>
3 * Copyright (C) 2007 - 2008 Vladislav Bolkhovitin
4 * Copyright (C) 2007 - 2008 CMS Distribution Limited
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation, version 2
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
17 #include <linux/module.h>
18 #include <linux/hash.h>
19 #include <linux/kthread.h>
21 #include <scsi/scsi.h>
26 #ifndef NET_PAGE_CALLBACKS_DEFINED
27 #warning "Patch put_page_callback-<kernel-version>.patch not applied on your \
28 kernel. ISCSI-SCST will run in the performance degraded mode. Refer \
29 README file for details."
32 #define ISCSI_INIT_WRITE_WAKE 0x1
33 #define ISCSI_INIT_WRITE_REMOVE_HASH 0x2
36 static char ctr_name[] = "iscsi-scst-ctl";
37 static int iscsi_template_registered;
39 #if defined(DEBUG) || defined(TRACING)
40 unsigned long iscsi_trace_flag = ISCSI_DEFAULT_LOG_FLAGS;
43 static struct kmem_cache *iscsi_cmnd_cache;
45 DEFINE_SPINLOCK(iscsi_rd_lock);
46 LIST_HEAD(iscsi_rd_list);
47 DECLARE_WAIT_QUEUE_HEAD(iscsi_rd_waitQ);
49 DEFINE_SPINLOCK(iscsi_wr_lock);
50 LIST_HEAD(iscsi_wr_list);
51 DECLARE_WAIT_QUEUE_HEAD(iscsi_wr_waitQ);
53 static char dummy_data[1024];
55 struct iscsi_thread_t {
56 struct task_struct *thr;
57 struct list_head threads_list_entry;
60 static LIST_HEAD(iscsi_threads_list);
62 static void cmnd_remove_hash(struct iscsi_cmnd *cmnd);
63 static void iscsi_send_task_mgmt_resp(struct iscsi_cmnd *req, int status);
64 static void cmnd_prepare_get_rejected_cmd_data(struct iscsi_cmnd *cmnd);
65 static void iscsi_check_send_delayed_tm_resp(struct iscsi_session *sess);
66 static void iscsi_session_push_cmnd(struct iscsi_cmnd *cmnd);
68 static inline u32 cmnd_write_size(struct iscsi_cmnd *cmnd)
70 struct iscsi_scsi_cmd_hdr *hdr = cmnd_hdr(cmnd);
72 if (hdr->flags & ISCSI_CMD_WRITE)
73 return be32_to_cpu(hdr->data_length);
77 static inline u32 cmnd_read_size(struct iscsi_cmnd *cmnd)
79 struct iscsi_scsi_cmd_hdr *hdr = cmnd_hdr(cmnd);
81 if (hdr->flags & ISCSI_CMD_READ) {
82 struct iscsi_rlength_ahdr *ahdr =
83 (struct iscsi_rlength_ahdr *)cmnd->pdu.ahs;
85 if (!(hdr->flags & ISCSI_CMD_WRITE))
86 return be32_to_cpu(hdr->data_length);
87 if (ahdr && ahdr->ahstype == ISCSI_AHSTYPE_RLENGTH)
88 return be32_to_cpu(ahdr->read_length);
93 static inline void iscsi_restart_cmnd(struct iscsi_cmnd *cmnd)
95 EXTRACHECKS_BUG_ON(cmnd->data_waiting);
97 cmnd->scst_state = ISCSI_CMD_STATE_RESTARTED;
98 scst_restart_cmd(cmnd->scst_cmd, SCST_PREPROCESS_STATUS_SUCCESS,
102 static inline void iscsi_restart_waiting_cmnd(struct iscsi_cmnd *cmnd)
105 * There is no race with conn_abort(), since all functions
106 * called from single read thread
108 iscsi_extracheck_is_rd_thread(cmnd->conn);
109 cmnd->data_waiting = 0;
111 iscsi_restart_cmnd(cmnd);
114 static inline void iscsi_fail_waiting_cmnd(struct iscsi_cmnd *cmnd)
116 TRACE_MGMT_DBG("Failing data waiting cmd %p", cmnd);
119 * There is no race with conn_abort(), since all functions
120 * called from single read thread
122 iscsi_extracheck_is_rd_thread(cmnd->conn);
123 cmnd->data_waiting = 0;
125 req_cmnd_release_force(cmnd, ISCSI_FORCE_RELEASE_WRITE);
128 struct iscsi_cmnd *cmnd_alloc(struct iscsi_conn *conn, struct iscsi_cmnd *parent)
130 struct iscsi_cmnd *cmnd;
132 /* ToDo: __GFP_NOFAIL?? */
133 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
134 cmnd = kmem_cache_alloc(iscsi_cmnd_cache, GFP_KERNEL|__GFP_NOFAIL);
135 memset(cmnd, 0, sizeof(*cmnd));
137 cmnd = kmem_cache_zalloc(iscsi_cmnd_cache, GFP_KERNEL|__GFP_NOFAIL);
140 atomic_set(&cmnd->ref_cnt, 1);
141 cmnd->scst_state = ISCSI_CMD_STATE_NEW;
143 cmnd->parent_req = parent;
144 init_waitqueue_head(&cmnd->scst_waitQ);
146 if (parent == NULL) {
149 #ifdef NET_PAGE_CALLBACKS_DEFINED
150 atomic_set(&cmnd->net_ref_cnt, 0);
152 spin_lock_init(&cmnd->rsp_cmd_lock);
153 INIT_LIST_HEAD(&cmnd->rsp_cmd_list);
154 INIT_LIST_HEAD(&cmnd->rx_ddigest_cmd_list);
156 spin_lock_bh(&conn->cmd_list_lock);
157 list_add_tail(&cmnd->cmd_list_entry, &conn->cmd_list);
158 spin_unlock_bh(&conn->cmd_list_lock);
161 TRACE_DBG("conn %p, parent %p, cmnd %p", conn, parent, cmnd);
165 /* Frees a command. Also frees the additional header. */
166 static void cmnd_free(struct iscsi_cmnd *cmnd)
168 TRACE_DBG("%p", cmnd);
170 if (unlikely(cmnd->tm_aborted)) {
171 TRACE_MGMT_DBG("Free aborted cmd %p (scst cmd %p, state %d, "
172 "parent_req %p)", cmnd, cmnd->scst_cmd, cmnd->scst_state,
176 /* Catch users from cmd_list or rsp_cmd_list */
177 EXTRACHECKS_BUG_ON(atomic_read(&cmnd->ref_cnt) != 0);
179 kfree(cmnd->pdu.ahs);
181 if (unlikely(cmnd->on_write_list || cmnd->on_written_list)) {
182 struct iscsi_scsi_cmd_hdr *req = cmnd_hdr(cmnd);
184 PRINT_CRIT_ERROR("cmnd %p still on some list?, %x, %x, %x, %x, %x, %x, %x",
185 cmnd, req->opcode, req->scb[0], req->flags, req->itt,
186 be32_to_cpu(req->data_length),
187 req->cmd_sn, be32_to_cpu(cmnd->pdu.datasize));
189 if (unlikely(cmnd->parent_req)) {
190 struct iscsi_scsi_cmd_hdr *preq =
191 cmnd_hdr(cmnd->parent_req);
192 PRINT_CRIT_ERROR("%p %x %u", preq, preq->opcode, preq->scb[0]);
197 kmem_cache_free(iscsi_cmnd_cache, cmnd);
201 void cmnd_done(struct iscsi_cmnd *cmnd)
203 TRACE_DBG("%p", cmnd);
205 if (unlikely(cmnd->tm_aborted)) {
206 TRACE_MGMT_DBG("Done aborted cmd %p (scst cmd %p, state %d, "
207 "parent_req %p)", cmnd, cmnd->scst_cmd, cmnd->scst_state,
211 EXTRACHECKS_BUG_ON(cmnd->on_rx_digest_list);
213 if (cmnd->on_written_list) {
214 struct iscsi_conn *conn = cmnd->conn;
215 TRACE_DBG("Deleting cmd %p from conn %p written_list", cmnd,
217 spin_lock_bh(&conn->write_list_lock);
218 list_del(&cmnd->write_list_entry);
219 cmnd->on_written_list = 0;
220 spin_unlock_bh(&conn->write_list_lock);
223 if (cmnd->parent_req == NULL) {
224 struct iscsi_conn *conn = cmnd->conn;
225 TRACE_DBG("Deleting req %p from conn %p", cmnd, conn);
227 spin_lock_bh(&conn->cmd_list_lock);
228 list_del(&cmnd->cmd_list_entry);
229 spin_unlock_bh(&conn->cmd_list_lock);
233 EXTRACHECKS_BUG_ON(!list_empty(&cmnd->rsp_cmd_list));
234 EXTRACHECKS_BUG_ON(!list_empty(&cmnd->rx_ddigest_cmd_list));
236 /* Order between above and below code is important! */
238 if (cmnd->scst_cmd) {
239 switch (cmnd->scst_state) {
240 case ISCSI_CMD_STATE_PROCESSED:
241 TRACE_DBG("cmd %p PROCESSED", cmnd);
242 scst_tgt_cmd_done(cmnd->scst_cmd);
244 case ISCSI_CMD_STATE_AFTER_PREPROC:
246 struct scst_cmd *scst_cmd = cmnd->scst_cmd;
247 TRACE_DBG("cmd %p AFTER_PREPROC", cmnd);
248 cmnd->scst_state = ISCSI_CMD_STATE_RESTARTED;
249 cmnd->scst_cmd = NULL;
250 scst_restart_cmd(scst_cmd,
251 SCST_PREPROCESS_STATUS_ERROR_FATAL,
252 SCST_CONTEXT_THREAD);
256 PRINT_CRIT_ERROR("Unexpected cmnd scst state %d",
263 EXTRACHECKS_BUG_ON(cmnd->scst_cmd != NULL);
264 TRACE_DBG("Deleting rsp %p from parent %p", cmnd,
267 spin_lock_bh(&cmnd->parent_req->rsp_cmd_lock);
268 list_del(&cmnd->rsp_cmd_list_entry);
269 spin_unlock_bh(&cmnd->parent_req->rsp_cmd_lock);
271 cmnd_put(cmnd->parent_req);
274 /* Order between above and below code is important! */
277 TRACE_DBG("%s", "own_sg");
278 scst_free(cmnd->sg, cmnd->sg_cnt);
286 if (cmnd->dec_active_cmnds) {
287 struct iscsi_session *sess = cmnd->conn->session;
288 TRACE_DBG("Decrementing active_cmds (cmd %p, sess %p, "
289 "new value %d)", cmnd, sess,
290 atomic_read(&sess->active_cmds)-1);
291 atomic_dec(&sess->active_cmds);
293 if (unlikely(atomic_read(&sess->active_cmds) < 0)) {
294 PRINT_CRIT_ERROR("active_cmds < 0 (%d)!!",
295 atomic_read(&sess->active_cmds));
306 * Corresponding conn may also gets destroyed atfer this function, except only
307 * if it's called from the read thread!
309 * It can't be called in parallel with iscsi_cmnds_init_write()!
311 void req_cmnd_release_force(struct iscsi_cmnd *req, int flags)
313 struct iscsi_cmnd *rsp, *t;
314 struct iscsi_conn *conn = req->conn;
315 LIST_HEAD(cmds_list);
319 TRACE_MGMT_DBG("%p", req);
321 sBUG_ON(req == conn->read_cmnd);
323 if (flags & ISCSI_FORCE_RELEASE_WRITE) {
324 spin_lock_bh(&conn->write_list_lock);
325 list_for_each_entry_safe(rsp, t, &conn->write_list,
327 if (rsp->parent_req != req)
330 cmd_del_from_write_list(rsp);
332 list_add_tail(&rsp->write_list_entry, &cmds_list);
334 spin_unlock_bh(&conn->write_list_lock);
336 list_for_each_entry_safe(rsp, t, &cmds_list, write_list_entry) {
337 TRACE_MGMT_DBG("Putting write rsp %p", rsp);
338 list_del(&rsp->write_list_entry);
344 spin_lock_bh(&req->rsp_cmd_lock);
345 list_for_each_entry_reverse(rsp, &req->rsp_cmd_list, rsp_cmd_list_entry) {
348 if (rsp->force_cleanup_done)
351 rsp->force_cleanup_done = 1;
353 if (cmnd_get_check(rsp))
356 spin_unlock_bh(&req->rsp_cmd_lock);
358 spin_lock_bh(&conn->write_list_lock);
359 r = rsp->on_write_list || rsp->write_processing_started;
360 spin_unlock_bh(&conn->write_list_lock);
368 * If both on_write_list and write_processing_started not set,
369 * we can safely put() rsp.
371 TRACE_MGMT_DBG("Putting rsp %p", rsp);
375 spin_unlock_bh(&req->rsp_cmd_lock);
377 req_cmnd_release(req);
384 * Corresponding conn may also gets destroyed atfer this function, except only
385 * if it's called from the read thread!
387 void req_cmnd_release(struct iscsi_cmnd *req)
389 struct iscsi_cmnd *c, *t;
393 TRACE_DBG("%p", req);
396 sBUG_ON(req->release_called);
397 req->release_called = 1;
400 if (unlikely(req->tm_aborted)) {
401 TRACE_MGMT_DBG("Release aborted req cmd %p (scst cmd %p, "
402 "state %d)", req, req->scst_cmd, req->scst_state);
405 sBUG_ON(req->parent_req != NULL);
407 list_for_each_entry_safe(c, t, &req->rx_ddigest_cmd_list,
408 rx_ddigest_cmd_list_entry) {
409 cmd_del_from_rx_ddigest_list(c);
414 cmnd_remove_hash(req);
416 if (req->dec_active_cmnds) {
417 struct iscsi_session *sess = req->conn->session;
418 TRACE_DBG("Decrementing active_cmds (cmd %p, sess %p, "
419 "new value %d)", req, sess,
420 atomic_read(&sess->active_cmds)-1);
421 atomic_dec(&sess->active_cmds);
422 req->dec_active_cmnds = 0;
424 if (unlikely(atomic_read(&sess->active_cmds) < 0)) {
425 PRINT_CRIT_ERROR("active_cmds < 0 (%d)!!",
426 atomic_read(&sess->active_cmds));
439 * Corresponding conn may also gets destroyed atfer this function, except only
440 * if it's called from the read thread!
442 void rsp_cmnd_release(struct iscsi_cmnd *cmnd)
444 TRACE_DBG("%p", cmnd);
447 sBUG_ON(cmnd->release_called);
448 cmnd->release_called = 1;
451 sBUG_ON(cmnd->hashed);
452 sBUG_ON(cmnd->parent_req == NULL);
459 * create a new command used as response.
461 * iscsi_cmnd_create_rsp_cmnd -
462 * @cmnd: ptr to request command
464 * @return ptr to response command or NULL
466 static struct iscsi_cmnd *iscsi_cmnd_create_rsp_cmnd(struct iscsi_cmnd *parent)
468 struct iscsi_cmnd *rsp;
470 rsp = cmnd_alloc(parent->conn, parent);
472 spin_lock_bh(&parent->rsp_cmd_lock);
473 TRACE_DBG("Adding rsp %p to parent %p", rsp, parent);
474 list_add_tail(&rsp->rsp_cmd_list_entry, &parent->rsp_cmd_list);
475 spin_unlock_bh(&parent->rsp_cmd_lock);
480 static inline struct iscsi_cmnd *get_rsp_cmnd(struct iscsi_cmnd *req)
482 struct iscsi_cmnd *res = NULL;
484 /* Currently this lock isn't needed, but just in case.. */
485 spin_lock_bh(&req->rsp_cmd_lock);
486 if (!list_empty(&req->rsp_cmd_list)) {
487 res = list_entry(req->rsp_cmd_list.prev, struct iscsi_cmnd,
490 spin_unlock_bh(&req->rsp_cmd_lock);
495 static void iscsi_cmnds_init_write(struct list_head *send, int flags)
497 struct iscsi_cmnd *rsp = list_entry(send->next, struct iscsi_cmnd,
499 struct iscsi_conn *conn = rsp->conn;
500 struct list_head *pos, *next;
502 sBUG_ON(list_empty(send));
505 * If we don't remove hashed req cmd from the hash list here, before
506 * submitting it for transmittion, we will have a race, when for
507 * some reason cmd's release is delayed after transmittion and
508 * initiator sends cmd with the same ITT => this command will be
509 * erroneously rejected as a duplicate.
511 if ((flags & ISCSI_INIT_WRITE_REMOVE_HASH) && rsp->parent_req->hashed &&
512 (rsp->parent_req->r2t_length == 0) &&
513 (rsp->parent_req->outstanding_r2t == 0))
514 cmnd_remove_hash(rsp->parent_req);
516 if (!(conn->ddigest_type & DIGEST_NONE)) {
517 list_for_each(pos, send) {
518 rsp = list_entry(pos, struct iscsi_cmnd,
521 if (rsp->pdu.datasize != 0) {
522 TRACE_DBG("Doing data digest (%p:%x)", rsp,
529 spin_lock_bh(&conn->write_list_lock);
530 list_for_each_safe(pos, next, send) {
531 rsp = list_entry(pos, struct iscsi_cmnd, write_list_entry);
533 TRACE_DBG("%p:%x", rsp, cmnd_opcode(rsp));
535 sBUG_ON(conn != rsp->conn);
537 list_del(&rsp->write_list_entry);
538 cmd_add_on_write_list(conn, rsp);
540 spin_unlock_bh(&conn->write_list_lock);
542 if (flags & ISCSI_INIT_WRITE_WAKE)
543 iscsi_make_conn_wr_active(conn);
546 static void iscsi_cmnd_init_write(struct iscsi_cmnd *rsp, int flags)
550 if (unlikely(rsp->on_write_list)) {
551 PRINT_CRIT_ERROR("cmd already on write list (%x %x %x %x %u %u "
552 "%u %u %u %u %u %d %d",
553 cmnd_itt(rsp), cmnd_ttt(rsp), cmnd_opcode(rsp),
554 cmnd_scsicode(rsp), rsp->r2t_sn,
555 rsp->r2t_length, rsp->is_unsolicited_data,
556 rsp->target_task_tag, rsp->outstanding_r2t,
557 rsp->hdigest, rsp->ddigest,
558 list_empty(&rsp->rsp_cmd_list), rsp->hashed);
561 list_add(&rsp->write_list_entry, &head);
562 iscsi_cmnds_init_write(&head, flags);
565 static void iscsi_set_datasize(struct iscsi_cmnd *cmnd, u32 offset, u32 size)
567 cmnd->pdu.datasize = size;
570 u32 last_off = offset + size;
571 int idx = last_off >> PAGE_SHIFT;
572 u8 *p = (u8 *)page_address(sg_page(&cmnd->sg[idx])) +
573 (last_off & ~PAGE_MASK);
574 int i = 4 - (size & 3);
580 static void send_data_rsp(struct iscsi_cmnd *req, u8 status, int send_status)
582 struct iscsi_cmnd *rsp;
583 struct iscsi_scsi_cmd_hdr *req_hdr = cmnd_hdr(req);
584 struct iscsi_data_in_hdr *rsp_hdr;
585 u32 pdusize, expsize, scsisize, size, offset, sn;
588 TRACE_DBG("req %p", req);
590 pdusize = req->conn->session->sess_param.max_xmit_data_length;
591 expsize = cmnd_read_size(req);
592 size = min(expsize, (u32)req->bufflen);
597 rsp = iscsi_cmnd_create_rsp_cmnd(req);
598 TRACE_DBG("rsp %p", rsp);
600 rsp->sg_cnt = req->sg_cnt;
601 rsp->bufflen = req->bufflen;
602 rsp_hdr = (struct iscsi_data_in_hdr *)&rsp->pdu.bhs;
604 rsp_hdr->opcode = ISCSI_OP_SCSI_DATA_IN;
605 rsp_hdr->itt = req_hdr->itt;
606 rsp_hdr->ttt = cpu_to_be32(ISCSI_RESERVED_TAG);
607 rsp_hdr->buffer_offset = cpu_to_be32(offset);
608 rsp_hdr->data_sn = cpu_to_be32(sn);
610 if (size <= pdusize) {
611 TRACE_DBG("offset %d, size %d", offset, size);
612 iscsi_set_datasize(rsp, offset, size);
614 TRACE_DBG("status %x", status);
616 ISCSI_FLG_FINAL | ISCSI_FLG_STATUS;
617 rsp_hdr->cmd_status = status;
619 scsisize = req->bufflen;
620 if (scsisize < expsize) {
621 rsp_hdr->flags |= ISCSI_FLG_RESIDUAL_UNDERFLOW;
622 size = expsize - scsisize;
623 } else if (scsisize > expsize) {
624 rsp_hdr->flags |= ISCSI_FLG_RESIDUAL_OVERFLOW;
625 size = scsisize - expsize;
628 rsp_hdr->residual_count = cpu_to_be32(size);
629 list_add_tail(&rsp->write_list_entry, &send);
633 TRACE_DBG("pdusize %d, offset %d, size %d", pdusize, offset,
636 iscsi_set_datasize(rsp, offset, pdusize);
642 list_add_tail(&rsp->write_list_entry, &send);
644 iscsi_cmnds_init_write(&send, ISCSI_INIT_WRITE_REMOVE_HASH);
647 static struct iscsi_cmnd *create_status_rsp(struct iscsi_cmnd *req, int status,
648 const u8 *sense_buf, int sense_len)
650 struct iscsi_cmnd *rsp;
651 struct iscsi_scsi_rsp_hdr *rsp_hdr;
652 struct iscsi_sense_data *sense;
653 struct scatterlist *sg;
655 rsp = iscsi_cmnd_create_rsp_cmnd(req);
656 TRACE_DBG("%p", rsp);
658 rsp_hdr = (struct iscsi_scsi_rsp_hdr *)&rsp->pdu.bhs;
659 rsp_hdr->opcode = ISCSI_OP_SCSI_RSP;
660 rsp_hdr->flags = ISCSI_FLG_FINAL;
661 rsp_hdr->response = ISCSI_RESPONSE_COMMAND_COMPLETED;
662 rsp_hdr->cmd_status = status;
663 rsp_hdr->itt = cmnd_hdr(req)->itt;
665 if (SCST_SENSE_VALID(sense_buf)) {
666 TRACE_DBG("%s", "SENSE VALID");
667 /* ToDo: __GFP_NOFAIL ?? */
668 sg = rsp->sg = scst_alloc(PAGE_SIZE, GFP_KERNEL|__GFP_NOFAIL,
674 sense = (struct iscsi_sense_data *)page_address(sg_page(&sg[0]));
675 sense->length = cpu_to_be16(sense_len);
676 memcpy(sense->data, sense_buf, sense_len);
677 rsp->pdu.datasize = sizeof(struct iscsi_sense_data) + sense_len;
678 rsp->bufflen = (rsp->pdu.datasize + 3) & -4;
679 if (rsp->bufflen - rsp->pdu.datasize) {
680 int i = rsp->pdu.datasize;
681 u8 *p = (u8 *)sense + i;
683 while (i < rsp->bufflen) {
689 rsp->pdu.datasize = 0;
696 static struct iscsi_cmnd *create_sense_rsp(struct iscsi_cmnd *req,
697 u8 sense_key, u8 asc, u8 ascq)
700 memset(sense, 0, sizeof(sense));
702 sense[2] = sense_key;
703 sense[7] = 6; /* Additional sense length */
706 return create_status_rsp(req, SAM_STAT_CHECK_CONDITION, sense,
710 static void iscsi_cmnd_reject(struct iscsi_cmnd *req, int reason)
712 struct iscsi_cmnd *rsp;
713 struct iscsi_reject_hdr *rsp_hdr;
714 struct scatterlist *sg;
717 TRACE_MGMT_DBG("Reject: req %p, reason %x", req, reason);
719 sBUG_ON(req->rejected);
721 req->reject_reason = ISCSI_REJECT_CMD;
723 rsp = iscsi_cmnd_create_rsp_cmnd(req);
724 rsp_hdr = (struct iscsi_reject_hdr *)&rsp->pdu.bhs;
726 rsp_hdr->opcode = ISCSI_OP_REJECT;
727 rsp_hdr->ffffffff = ISCSI_RESERVED_TAG;
728 rsp_hdr->reason = reason;
730 /* ToDo: __GFP_NOFAIL ?? */
731 sg = rsp->sg = scst_alloc(PAGE_SIZE, GFP_KERNEL|__GFP_NOFAIL,
737 addr = page_address(sg_page(&sg[0]));
739 memcpy(addr, &req->pdu.bhs, sizeof(struct iscsi_hdr));
740 rsp->bufflen = rsp->pdu.datasize = sizeof(struct iscsi_hdr);
742 iscsi_cmnd_init_write(rsp, ISCSI_INIT_WRITE_REMOVE_HASH |
743 ISCSI_INIT_WRITE_WAKE);
745 cmnd_prepare_get_rejected_cmd_data(req);
748 static inline int iscsi_get_allowed_cmds(struct iscsi_session *sess)
750 int res = max(-1, (int)sess->max_queued_cmnds -
751 atomic_read(&sess->active_cmds)-1);
752 TRACE_DBG("allowed cmds %d (sess %p, active_cmds %d)", res,
753 sess, atomic_read(&sess->active_cmds));
757 static u32 cmnd_set_sn(struct iscsi_cmnd *cmnd, int set_stat_sn)
759 struct iscsi_conn *conn = cmnd->conn;
760 struct iscsi_session *sess = conn->session;
763 spin_lock(&sess->sn_lock);
766 cmnd->pdu.bhs.sn = cpu_to_be32(conn->stat_sn++);
767 cmnd->pdu.bhs.exp_sn = cpu_to_be32(sess->exp_cmd_sn);
768 cmnd->pdu.bhs.max_sn = cpu_to_be32(sess->exp_cmd_sn +
769 iscsi_get_allowed_cmds(sess));
771 res = cpu_to_be32(conn->stat_sn);
773 spin_unlock(&sess->sn_lock);
777 /* Called under sn_lock */
778 static void __update_stat_sn(struct iscsi_cmnd *cmnd)
780 struct iscsi_conn *conn = cmnd->conn;
783 cmnd->pdu.bhs.exp_sn = exp_stat_sn = be32_to_cpu(cmnd->pdu.bhs.exp_sn);
784 TRACE_DBG("%x,%x", cmnd_opcode(cmnd), exp_stat_sn);
785 if ((int)(exp_stat_sn - conn->exp_stat_sn) > 0 &&
786 (int)(exp_stat_sn - conn->stat_sn) <= 0) {
787 /* free pdu resources */
788 cmnd->conn->exp_stat_sn = exp_stat_sn;
792 static inline void update_stat_sn(struct iscsi_cmnd *cmnd)
794 spin_lock(&cmnd->conn->session->sn_lock);
795 __update_stat_sn(cmnd);
796 spin_unlock(&cmnd->conn->session->sn_lock);
799 /* Called under sn_lock */
800 static int check_cmd_sn(struct iscsi_cmnd *cmnd)
802 struct iscsi_session *session = cmnd->conn->session;
805 cmnd->pdu.bhs.sn = cmd_sn = be32_to_cpu(cmnd->pdu.bhs.sn);
806 TRACE_DBG("%d(%d)", cmd_sn, session->exp_cmd_sn);
807 if (likely((s32)(cmd_sn - session->exp_cmd_sn) >= 0))
809 PRINT_ERROR("sequence error (%x,%x)", cmd_sn, session->exp_cmd_sn);
810 return -ISCSI_REASON_PROTOCOL_ERROR;
813 static inline struct iscsi_cmnd *__cmnd_find_hash(struct iscsi_session *session,
816 struct list_head *head;
817 struct iscsi_cmnd *cmnd;
819 head = &session->cmnd_hash[cmnd_hashfn(itt)];
821 list_for_each_entry(cmnd, head, hash_list_entry) {
822 if (cmnd->pdu.bhs.itt == itt) {
823 if ((ttt != ISCSI_RESERVED_TAG) && (ttt != cmnd->target_task_tag))
831 static struct iscsi_cmnd *cmnd_find_hash(struct iscsi_session *session,
834 struct iscsi_cmnd *cmnd;
836 spin_lock(&session->cmnd_hash_lock);
837 cmnd = __cmnd_find_hash(session, itt, ttt);
838 spin_unlock(&session->cmnd_hash_lock);
843 static struct iscsi_cmnd *cmnd_find_hash_get(struct iscsi_session *session,
846 struct iscsi_cmnd *cmnd;
848 spin_lock(&session->cmnd_hash_lock);
849 cmnd = __cmnd_find_hash(session, itt, ttt);
851 if (unlikely(cmnd_get_check(cmnd)))
854 spin_unlock(&session->cmnd_hash_lock);
859 static int cmnd_insert_hash(struct iscsi_cmnd *cmnd)
861 struct iscsi_session *session = cmnd->conn->session;
862 struct iscsi_cmnd *tmp;
863 struct list_head *head;
865 u32 itt = cmnd->pdu.bhs.itt;
867 TRACE_DBG("%p:%x", cmnd, itt);
868 if (unlikely(itt == ISCSI_RESERVED_TAG)) {
869 PRINT_ERROR("%s", "ITT is RESERVED_TAG");
870 PRINT_BUFFER("Incorrect BHS", &cmnd->pdu.bhs,
871 sizeof(cmnd->pdu.bhs));
872 err = -ISCSI_REASON_PROTOCOL_ERROR;
876 spin_lock(&session->cmnd_hash_lock);
878 head = &session->cmnd_hash[cmnd_hashfn(cmnd->pdu.bhs.itt)];
880 tmp = __cmnd_find_hash(session, itt, ISCSI_RESERVED_TAG);
882 list_add_tail(&cmnd->hash_list_entry, head);
885 PRINT_ERROR("Task %x in progress, cmnd %p", itt, cmnd);
886 err = -ISCSI_REASON_TASK_IN_PROGRESS;
889 spin_unlock(&session->cmnd_hash_lock);
892 spin_lock(&session->sn_lock);
893 __update_stat_sn(cmnd);
894 err = check_cmd_sn(cmnd);
895 spin_unlock(&session->sn_lock);
902 static void cmnd_remove_hash(struct iscsi_cmnd *cmnd)
904 struct iscsi_session *session = cmnd->conn->session;
905 struct iscsi_cmnd *tmp;
907 spin_lock(&session->cmnd_hash_lock);
909 tmp = __cmnd_find_hash(session, cmnd->pdu.bhs.itt, ISCSI_RESERVED_TAG);
911 if (likely(tmp && tmp == cmnd)) {
912 list_del(&cmnd->hash_list_entry);
915 PRINT_ERROR("%p:%x not found", cmnd, cmnd_itt(cmnd));
918 spin_unlock(&session->cmnd_hash_lock);
921 static void cmnd_prepare_get_rejected_cmd_data(struct iscsi_cmnd *cmnd)
923 struct iscsi_conn *conn = cmnd->conn;
924 struct scatterlist *sg = cmnd->sg;
929 TRACE_MGMT_DBG("Skipping (%p, %x %x %x %u, %p, scst state %d)", cmnd,
930 cmnd_itt(cmnd), cmnd_opcode(cmnd), cmnd_hdr(cmnd)->scb[0],
931 cmnd->pdu.datasize, cmnd->scst_cmd, cmnd->scst_state);
933 iscsi_extracheck_is_rd_thread(conn);
935 size = cmnd->pdu.datasize;
940 /* ToDo: __GFP_NOFAIL ?? */
941 sg = cmnd->sg = scst_alloc(PAGE_SIZE, GFP_KERNEL|__GFP_NOFAIL,
947 cmnd->bufflen = PAGE_SIZE;
950 addr = page_address(sg_page(&sg[0]));
951 sBUG_ON(addr == NULL);
952 size = (size + 3) & -4;
953 conn->read_size = size;
954 for (i = 0; size > PAGE_SIZE; i++, size -= cmnd->bufflen) {
955 sBUG_ON(i >= ISCSI_CONN_IOV_MAX);
956 conn->read_iov[i].iov_base = addr;
957 conn->read_iov[i].iov_len = cmnd->bufflen;
959 conn->read_iov[i].iov_base = addr;
960 conn->read_iov[i].iov_len = size;
961 conn->read_msg.msg_iov = conn->read_iov;
962 conn->read_msg.msg_iovlen = ++i;
967 static void cmnd_reject_scsi_cmd(struct iscsi_cmnd *req)
969 struct iscsi_cmnd *rsp;
970 struct iscsi_scsi_rsp_hdr *rsp_hdr;
973 TRACE_DBG("%p", req);
975 sBUG_ON(req->rejected);
977 req->reject_reason = ISCSI_REJECT_SCSI_CMD;
979 rsp = get_rsp_cmnd(req);
981 /* That can be true for aborted commands */
985 rsp_hdr = (struct iscsi_scsi_rsp_hdr *)&rsp->pdu.bhs;
987 sBUG_ON(cmnd_opcode(rsp) != ISCSI_OP_SCSI_RSP);
989 size = cmnd_write_size(req);
991 rsp_hdr->flags |= ISCSI_FLG_RESIDUAL_UNDERFLOW;
992 rsp_hdr->residual_count = cpu_to_be32(size);
994 size = cmnd_read_size(req);
996 if (cmnd_hdr(req)->flags & ISCSI_CMD_WRITE) {
997 rsp_hdr->flags |= ISCSI_FLG_BIRESIDUAL_UNDERFLOW;
998 rsp_hdr->bi_residual_count = cpu_to_be32(size);
1000 rsp_hdr->flags |= ISCSI_FLG_RESIDUAL_UNDERFLOW;
1001 rsp_hdr->residual_count = cpu_to_be32(size);
1005 iscsi_cmnd_init_write(rsp, ISCSI_INIT_WRITE_REMOVE_HASH |
1006 ISCSI_INIT_WRITE_WAKE);
1009 cmnd_prepare_get_rejected_cmd_data(req);
1013 static int cmnd_prepare_recv_pdu(struct iscsi_conn *conn,
1014 struct iscsi_cmnd *cmd, u32 offset, u32 size)
1016 struct scatterlist *sg = cmd->sg;
1017 int bufflen = cmd->bufflen;
1022 TRACE_DBG("%p %u,%u", cmd->sg, offset, size);
1024 iscsi_extracheck_is_rd_thread(conn);
1026 if (unlikely((offset >= bufflen) ||
1027 (offset + size > bufflen))) {
1028 PRINT_ERROR("Wrong ltn (%u %u %u)", offset, size, bufflen);
1029 mark_conn_closed(conn);
1034 offset += sg[0].offset;
1035 idx = offset >> PAGE_SHIFT;
1036 offset &= ~PAGE_MASK;
1038 conn->read_msg.msg_iov = conn->read_iov;
1039 conn->read_size = size = (size + 3) & -4;
1043 addr = page_address(sg_page(&sg[idx]));
1044 sBUG_ON(addr == NULL);
1045 conn->read_iov[i].iov_base = addr + offset;
1046 if (offset + size <= PAGE_SIZE) {
1047 TRACE_DBG("idx=%d, offset=%u, size=%d, addr=%p",
1048 idx, offset, size, addr);
1049 conn->read_iov[i].iov_len = size;
1050 conn->read_msg.msg_iovlen = ++i;
1053 conn->read_iov[i].iov_len = PAGE_SIZE - offset;
1054 TRACE_DBG("idx=%d, offset=%u, size=%d, iov_len=%zd, addr=%p",
1055 idx, offset, size, conn->read_iov[i].iov_len, addr);
1056 size -= conn->read_iov[i].iov_len;
1058 if (unlikely(++i >= ISCSI_CONN_IOV_MAX)) {
1059 PRINT_ERROR("Initiator %s violated negotiated "
1060 "parameters by sending too much data (size "
1061 "left %d)", conn->session->initiator_name, size);
1062 mark_conn_closed(conn);
1068 TRACE_DBG("msg_iov=%p, msg_iovlen=%zd",
1069 conn->read_msg.msg_iov, conn->read_msg.msg_iovlen);
1075 static void send_r2t(struct iscsi_cmnd *req)
1077 struct iscsi_session *session = req->conn->session;
1078 struct iscsi_cmnd *rsp;
1079 struct iscsi_r2t_hdr *rsp_hdr;
1083 if (unlikely(req->tm_aborted)) {
1084 TRACE_MGMT_DBG("req %p (scst_cmd %p) aborted on R2T "
1085 "(r2t_length %d, outstanding_r2t %d)", req,
1086 req->scst_cmd, req->r2t_length, req->outstanding_r2t);
1087 if (req->outstanding_r2t == 0)
1088 iscsi_fail_waiting_cmnd(req);
1093 * There is no race with data_out_start() and conn_abort(), since
1094 * all functions called from single read thread
1096 iscsi_extracheck_is_rd_thread(req->conn);
1098 burst = session->sess_param.max_burst_length;
1099 offset = be32_to_cpu(cmnd_hdr(req)->data_length) - req->r2t_length;
1102 rsp = iscsi_cmnd_create_rsp_cmnd(req);
1103 rsp->pdu.bhs.ttt = req->target_task_tag;
1104 rsp_hdr = (struct iscsi_r2t_hdr *)&rsp->pdu.bhs;
1105 rsp_hdr->opcode = ISCSI_OP_R2T;
1106 rsp_hdr->flags = ISCSI_FLG_FINAL;
1107 rsp_hdr->lun = cmnd_hdr(req)->lun;
1108 rsp_hdr->itt = cmnd_hdr(req)->itt;
1109 rsp_hdr->r2t_sn = cpu_to_be32(req->r2t_sn++);
1110 rsp_hdr->buffer_offset = cpu_to_be32(offset);
1111 if (req->r2t_length > burst) {
1112 rsp_hdr->data_length = cpu_to_be32(burst);
1113 req->r2t_length -= burst;
1116 rsp_hdr->data_length = cpu_to_be32(req->r2t_length);
1117 req->r2t_length = 0;
1120 TRACE_WRITE("%x %u %u %u %u", cmnd_itt(req),
1121 be32_to_cpu(rsp_hdr->data_length),
1122 be32_to_cpu(rsp_hdr->buffer_offset),
1123 be32_to_cpu(rsp_hdr->r2t_sn), req->outstanding_r2t);
1125 list_add_tail(&rsp->write_list_entry, &send);
1127 if (++req->outstanding_r2t >= session->sess_param.max_outstanding_r2t)
1130 } while (req->r2t_length != 0);
1132 iscsi_cmnds_init_write(&send, ISCSI_INIT_WRITE_WAKE);
1138 static int iscsi_pre_exec(struct scst_cmd *scst_cmd)
1140 int res = SCST_PREPROCESS_STATUS_SUCCESS;
1141 struct iscsi_cmnd *req = (struct iscsi_cmnd *)
1142 scst_cmd_get_tgt_priv(scst_cmd);
1143 struct iscsi_cmnd *c, *t;
1147 EXTRACHECKS_BUG_ON(scst_cmd_atomic(scst_cmd));
1149 if (scst_cmd_get_data_direction(scst_cmd) == SCST_DATA_READ) {
1150 if (!(req->conn->ddigest_type & DIGEST_NONE))
1151 scst_set_long_xmit(scst_cmd);
1152 #ifndef NET_PAGE_CALLBACKS_DEFINED
1153 else if (cmnd_hdr(req)->data_length > 8*1024)
1154 scst_set_long_xmit(scst_cmd);
1156 EXTRACHECKS_BUG_ON(!list_empty(&req->rx_ddigest_cmd_list));
1160 /* If data digest isn't used this list will be empty */
1161 list_for_each_entry_safe(c, t, &req->rx_ddigest_cmd_list,
1162 rx_ddigest_cmd_list_entry) {
1163 TRACE_DBG("Checking digest of RX ddigest cmd %p", c);
1164 if (digest_rx_data(c) != 0) {
1165 scst_set_cmd_error(scst_cmd,
1166 SCST_LOAD_SENSE(iscsi_sense_crc_error));
1167 res = SCST_PREPROCESS_STATUS_ERROR_SENSE_SET;
1169 * The rest of rx_ddigest_cmd_list will be freed
1170 * in req_cmnd_release()
1174 cmd_del_from_rx_ddigest_list(c);
1179 TRACE_EXIT_RES(res);
1183 static int noop_out_start(struct iscsi_cmnd *cmnd)
1185 struct iscsi_conn *conn = cmnd->conn;
1189 TRACE_DBG("%p", cmnd);
1191 iscsi_extracheck_is_rd_thread(conn);
1193 if (unlikely(cmnd_ttt(cmnd) != cpu_to_be32(ISCSI_RESERVED_TAG))) {
1195 * We don't request a NOP-Out by sending a NOP-In.
1196 * See 10.18.2 in the draft 20.
1198 PRINT_ERROR("Initiator sent command with not RESERVED tag and "
1199 "TTT %x", cmnd_itt(cmnd));
1200 err = -ISCSI_REASON_PROTOCOL_ERROR;
1204 if (cmnd_itt(cmnd) == cpu_to_be32(ISCSI_RESERVED_TAG)) {
1205 if (unlikely(!(cmnd->pdu.bhs.opcode & ISCSI_OP_IMMEDIATE)))
1206 PRINT_ERROR("%s", "Initiator sent RESERVED tag for "
1207 "non-immediate command");
1208 spin_lock(&conn->session->sn_lock);
1209 __update_stat_sn(cmnd);
1210 err = check_cmd_sn(cmnd);
1211 spin_unlock(&conn->session->sn_lock);
1215 err = cmnd_insert_hash(cmnd);
1216 if (unlikely(err < 0)) {
1217 PRINT_ERROR("Can't insert in hash: ignore this request %x",
1223 size = cmnd->pdu.datasize;
1225 size = (size + 3) & -4;
1226 conn->read_msg.msg_iov = conn->read_iov;
1227 if (cmnd->pdu.bhs.itt != cpu_to_be32(ISCSI_RESERVED_TAG)) {
1228 struct scatterlist *sg;
1230 /* ToDo: __GFP_NOFAIL ?? */
1231 cmnd->sg = sg = scst_alloc(size,
1232 GFP_KERNEL|__GFP_NOFAIL, &cmnd->sg_cnt);
1236 if (cmnd->sg_cnt > ISCSI_CONN_IOV_MAX) {
1240 cmnd->bufflen = size;
1242 for (i = 0; i < cmnd->sg_cnt; i++) {
1243 conn->read_iov[i].iov_base =
1244 page_address(sg_page(&sg[i]));
1245 tmp = min_t(u32, size, PAGE_SIZE);
1246 conn->read_iov[i].iov_len = tmp;
1247 conn->read_size += tmp;
1252 * There are no problems with the safety from concurrent
1253 * accesses to dummy_data, since for ISCSI_RESERVED_TAG
1254 * the data only read and then discarded.
1256 for (i = 0; i < ISCSI_CONN_IOV_MAX; i++) {
1257 conn->read_iov[i].iov_base = dummy_data;
1258 tmp = min_t(u32, size, sizeof(dummy_data));
1259 conn->read_iov[i].iov_len = tmp;
1260 conn->read_size += tmp;
1265 conn->read_msg.msg_iovlen = i;
1266 TRACE_DBG("msg_iov=%p, msg_iovlen=%zd", conn->read_msg.msg_iov,
1267 conn->read_msg.msg_iovlen);
1273 static inline u32 get_next_ttt(struct iscsi_conn *conn)
1276 struct iscsi_session *session = conn->session;
1278 iscsi_extracheck_is_rd_thread(conn);
1280 if (session->next_ttt == ISCSI_RESERVED_TAG)
1281 session->next_ttt++;
1282 ttt = session->next_ttt++;
1284 return cpu_to_be32(ttt);
1287 static int scsi_cmnd_start(struct iscsi_cmnd *req)
1289 struct iscsi_conn *conn = req->conn;
1290 struct iscsi_session *session = conn->session;
1291 struct iscsi_scsi_cmd_hdr *req_hdr = cmnd_hdr(req);
1292 struct scst_cmd *scst_cmd;
1293 scst_data_direction dir;
1298 TRACE_DBG("scsi command: %02x", req_hdr->scb[0]);
1300 TRACE_DBG("Incrementing active_cmds (cmd %p, sess %p, "
1301 "new value %d)", req, session,
1302 atomic_read(&session->active_cmds)+1);
1303 atomic_inc(&session->active_cmds);
1304 req->dec_active_cmnds = 1;
1306 scst_cmd = scst_rx_cmd(session->scst_sess,
1307 (uint8_t *)&req_hdr->lun, sizeof(req_hdr->lun),
1308 req_hdr->scb, sizeof(req_hdr->scb), SCST_NON_ATOMIC);
1309 if (scst_cmd == NULL) {
1310 create_status_rsp(req, SAM_STAT_BUSY, NULL, 0);
1311 cmnd_reject_scsi_cmd(req);
1315 req->scst_cmd = scst_cmd;
1316 scst_cmd_set_tag(scst_cmd, req_hdr->itt);
1317 scst_cmd_set_tgt_priv(scst_cmd, req);
1318 #ifndef NET_PAGE_CALLBACKS_DEFINED
1319 scst_cmd_set_data_buf_tgt_alloc(scst_cmd);
1322 if (req_hdr->flags & ISCSI_CMD_READ)
1323 dir = SCST_DATA_READ;
1324 else if (req_hdr->flags & ISCSI_CMD_WRITE)
1325 dir = SCST_DATA_WRITE;
1327 dir = SCST_DATA_NONE;
1328 scst_cmd_set_expected(scst_cmd, dir, be32_to_cpu(req_hdr->data_length));
1330 switch (req_hdr->flags & ISCSI_CMD_ATTR_MASK) {
1331 case ISCSI_CMD_SIMPLE:
1332 scst_cmd->queue_type = SCST_CMD_QUEUE_SIMPLE;
1334 case ISCSI_CMD_HEAD_OF_QUEUE:
1335 scst_cmd->queue_type = SCST_CMD_QUEUE_HEAD_OF_QUEUE;
1337 case ISCSI_CMD_ORDERED:
1338 scst_cmd->queue_type = SCST_CMD_QUEUE_ORDERED;
1341 scst_cmd->queue_type = SCST_CMD_QUEUE_ACA;
1343 case ISCSI_CMD_UNTAGGED:
1344 scst_cmd->queue_type = SCST_CMD_QUEUE_UNTAGGED;
1347 PRINT_ERROR("Unknown task code %x, use ORDERED instead",
1348 req_hdr->flags & ISCSI_CMD_ATTR_MASK);
1349 scst_cmd->queue_type = SCST_CMD_QUEUE_ORDERED;
1353 /* cmd_sn is already in CPU format converted in check_cmd_sn() */
1354 scst_cmd_set_tgt_sn(scst_cmd, req_hdr->cmd_sn);
1356 TRACE_DBG("START Command (tag %d, queue_type %d)",
1357 req_hdr->itt, scst_cmd->queue_type);
1358 req->scst_state = ISCSI_CMD_STATE_RX_CMD;
1359 scst_cmd_init_stage1_done(scst_cmd, SCST_CONTEXT_DIRECT, 0);
1361 wait_event(req->scst_waitQ, (req->scst_state != ISCSI_CMD_STATE_RX_CMD));
1363 if (unlikely(req->scst_state != ISCSI_CMD_STATE_AFTER_PREPROC)) {
1364 TRACE_DBG("req %p is in %x state", req, req->scst_state);
1365 if (req->scst_state == ISCSI_CMD_STATE_PROCESSED) {
1366 cmnd_reject_scsi_cmd(req);
1369 if (unlikely(req->tm_aborted)) {
1370 TRACE_MGMT_DBG("req %p (scst_cmd %p) aborted", req,
1372 cmnd_prepare_get_rejected_cmd_data(req);
1378 dir = scst_cmd_get_data_direction(scst_cmd);
1379 if (dir != SCST_DATA_WRITE) {
1380 if (unlikely(!(req_hdr->flags & ISCSI_CMD_FINAL) ||
1381 req->pdu.datasize)) {
1382 PRINT_ERROR("Unexpected unsolicited data (ITT %x "
1383 "CDB %x", cmnd_itt(req), req_hdr->scb[0]);
1384 create_sense_rsp(req, ABORTED_COMMAND, 0xc, 0xc);
1385 cmnd_reject_scsi_cmd(req);
1390 if (dir == SCST_DATA_WRITE) {
1391 req->is_unsolicited_data = !(req_hdr->flags & ISCSI_CMD_FINAL);
1392 req->r2t_length = be32_to_cpu(req_hdr->data_length) - req->pdu.datasize;
1393 if (req->r2t_length > 0)
1394 req->data_waiting = 1;
1396 req->target_task_tag = get_next_ttt(conn);
1397 req->sg = scst_cmd_get_sg(scst_cmd);
1398 req->sg_cnt = scst_cmd_get_sg_cnt(scst_cmd);
1399 req->bufflen = scst_cmd_get_bufflen(scst_cmd);
1400 if (unlikely(req->r2t_length > req->bufflen)) {
1401 PRINT_ERROR("req->r2t_length %d > req->bufflen %d",
1402 req->r2t_length, req->bufflen);
1403 req->r2t_length = req->bufflen;
1406 TRACE_DBG("req=%p, dir=%d, is_unsolicited_data=%d, "
1407 "r2t_length=%d, bufflen=%d", req, dir,
1408 req->is_unsolicited_data, req->r2t_length, req->bufflen);
1410 if (unlikely(!session->sess_param.immediate_data &&
1411 req->pdu.datasize)) {
1412 PRINT_ERROR("Initiator %s violated negotiated paremeters: "
1413 "forbidden immediate data sent (ITT %x, op %x)",
1414 session->initiator_name, cmnd_itt(req), req_hdr->scb[0]);
1419 if (unlikely(session->sess_param.initial_r2t &&
1420 !(req_hdr->flags & ISCSI_CMD_FINAL))) {
1421 PRINT_ERROR("Initiator %s violated negotiated paremeters: "
1422 "initial R2T is required (ITT %x, op %x)",
1423 session->initiator_name, cmnd_itt(req), req_hdr->scb[0]);
1428 if (req->pdu.datasize) {
1429 if (unlikely(dir != SCST_DATA_WRITE)) {
1430 PRINT_ERROR("pdu.datasize(%d) >0, but dir(%x) isn't WRITE",
1431 req->pdu.datasize, dir);
1432 create_sense_rsp(req, ABORTED_COMMAND, 0xc, 0xc);
1433 cmnd_reject_scsi_cmd(req);
1435 res = cmnd_prepare_recv_pdu(conn, req, 0, req->pdu.datasize);
1438 /* Aborted commands will be freed in cmnd_rx_end() */
1439 TRACE_EXIT_RES(res);
1443 static int data_out_start(struct iscsi_conn *conn, struct iscsi_cmnd *cmnd)
1445 struct iscsi_data_out_hdr *req_hdr = (struct iscsi_data_out_hdr *)&cmnd->pdu.bhs;
1446 struct iscsi_cmnd *orig_req = NULL;
1447 u32 offset = be32_to_cpu(req_hdr->buffer_offset);
1453 * There is no race with send_r2t() and conn_abort(), since
1454 * all functions called from single read thread
1456 iscsi_extracheck_is_rd_thread(cmnd->conn);
1458 update_stat_sn(cmnd);
1460 cmnd->cmd_req = orig_req = cmnd_find_hash(conn->session, req_hdr->itt,
1462 if (unlikely(orig_req == NULL)) {
1463 /* It might happen if req was aborted and then freed */
1464 TRACE(TRACE_MGMT_MINOR, "Unable to find scsi task %x %x",
1465 cmnd_itt(cmnd), cmnd_ttt(cmnd));
1469 if (orig_req->is_unsolicited_data) {
1470 if (unlikely(orig_req->r2t_length < cmnd->pdu.datasize)) {
1471 PRINT_ERROR("Data size (%d) > R2T length (%d)",
1472 cmnd->pdu.datasize, orig_req->r2t_length);
1473 mark_conn_closed(conn);
1477 orig_req->r2t_length -= cmnd->pdu.datasize;
1480 /* Check unsolicited burst data */
1481 if (unlikely((req_hdr->ttt == cpu_to_be32(ISCSI_RESERVED_TAG)) &&
1482 (orig_req->pdu.bhs.flags & ISCSI_FLG_FINAL))) {
1483 PRINT_ERROR("Unexpected data from %x %x",
1484 cmnd_itt(cmnd), cmnd_ttt(cmnd));
1485 mark_conn_closed(conn);
1490 TRACE_WRITE("%u %p %p %u %u", req_hdr->ttt, cmnd, orig_req,
1491 offset, cmnd->pdu.datasize);
1493 res = cmnd_prepare_recv_pdu(conn, orig_req, offset, cmnd->pdu.datasize);
1496 TRACE_EXIT_RES(res);
1500 sBUG_ON(cmnd->rejected);
1502 cmnd->reject_reason = ISCSI_REJECT_DATA;
1503 cmnd_prepare_get_rejected_cmd_data(cmnd);
1507 static void data_out_end(struct iscsi_cmnd *cmnd)
1509 struct iscsi_data_out_hdr *req_hdr = (struct iscsi_data_out_hdr *)&cmnd->pdu.bhs;
1510 struct iscsi_cmnd *req;
1512 sBUG_ON(cmnd == NULL);
1513 req = cmnd->cmd_req;
1514 sBUG_ON(req == NULL);
1516 TRACE_DBG("cmnd %p, req %p", cmnd, req);
1518 iscsi_extracheck_is_rd_thread(cmnd->conn);
1520 if (!(cmnd->conn->ddigest_type & DIGEST_NONE) &&
1521 !cmnd->ddigest_checked) {
1522 cmd_add_on_rx_ddigest_list(req, cmnd);
1526 if (req_hdr->ttt == cpu_to_be32(ISCSI_RESERVED_TAG)) {
1527 TRACE_DBG("ISCSI_RESERVED_TAG, FINAL %x",
1528 req_hdr->flags & ISCSI_FLG_FINAL);
1530 if (req_hdr->flags & ISCSI_FLG_FINAL) {
1531 req->is_unsolicited_data = 0;
1537 TRACE_DBG("FINAL %x, outstanding_r2t %d, r2t_length %d",
1538 req_hdr->flags & ISCSI_FLG_FINAL,
1539 req->outstanding_r2t, req->r2t_length);
1541 if (req_hdr->flags & ISCSI_FLG_FINAL) {
1542 if (unlikely(req->is_unsolicited_data)) {
1543 PRINT_ERROR("Unexpected unsolicited data "
1544 "(r2t_length %u, outstanding_r2t %d)",
1545 req->r2t_length, req->is_unsolicited_data);
1546 mark_conn_closed(req->conn);
1549 req->outstanding_r2t--;
1554 if (req->r2t_length != 0) {
1555 if (!req->is_unsolicited_data)
1558 iscsi_restart_waiting_cmnd(req);
1565 static void __cmnd_abort(struct iscsi_cmnd *cmnd)
1568 * Here, if cmnd is data_waiting, we should iscsi_fail_waiting_cmnd()
1569 * it. But, since this function can be called from any thread, not only
1570 * from the read one, we at the moment can't do that, because of
1571 * absence of appropriate locking protection. But this isn't a stuff
1572 * for 1.0.0. So, currently a misbehaving initiator, not sending
1573 * data in R2T state for a sharing between targets device, for which
1574 * for some reason an aborting TM command, e.g. TARGET RESET, from
1575 * another initiator is issued, can block response for this TM command
1576 * virtually forever and by this make the issuing initiator eventually
1577 * put the device offline.
1579 * ToDo in the next version, possibly a simple connection mutex, taken
1580 * by the read thread before starting any processing and by this
1581 * function, should be sufficient.
1584 TRACE_MGMT_DBG("Aborting cmd %p, scst_cmd %p (scst state %x, "
1585 "ref_cnt %d, itt %x, sn %u, op %x, r2t_len %x, CDB op %x, "
1586 "size to write %u, is_unsolicited_data %d, "
1587 "outstanding_r2t %d, data_waiting %d, sess->exp_cmd_sn %u, "
1588 "conn %p, rd_task %p)", cmnd, cmnd->scst_cmd, cmnd->scst_state,
1589 atomic_read(&cmnd->ref_cnt), cmnd_itt(cmnd), cmnd->pdu.bhs.sn,
1590 cmnd_opcode(cmnd), cmnd->r2t_length, cmnd_scsicode(cmnd),
1591 cmnd_write_size(cmnd), cmnd->is_unsolicited_data,
1592 cmnd->outstanding_r2t, cmnd->data_waiting,
1593 cmnd->conn->session->exp_cmd_sn, cmnd->conn,
1594 cmnd->conn->rd_task);
1596 #ifdef NET_PAGE_CALLBACKS_DEFINED
1597 TRACE_MGMT_DBG("net_ref_cnt %d", atomic_read(&cmnd->net_ref_cnt));
1600 cmnd->tm_aborted = 1;
1605 /* Must be called from the read thread */
1606 static int cmnd_abort(struct iscsi_cmnd *req)
1608 struct iscsi_session *session = req->conn->session;
1609 struct iscsi_task_mgt_hdr *req_hdr =
1610 (struct iscsi_task_mgt_hdr *)&req->pdu.bhs;
1611 struct iscsi_cmnd *cmnd;
1614 req_hdr->ref_cmd_sn = be32_to_cpu(req_hdr->ref_cmd_sn);
1616 if (after(req_hdr->ref_cmd_sn, req_hdr->cmd_sn)) {
1617 PRINT_ERROR("ABORT TASK: RefCmdSN(%u) > CmdSN(%u)",
1618 req_hdr->ref_cmd_sn, req_hdr->cmd_sn);
1619 err = ISCSI_RESPONSE_FUNCTION_REJECTED;
1623 cmnd = cmnd_find_hash_get(session, req_hdr->rtt, ISCSI_RESERVED_TAG);
1625 struct iscsi_conn *conn = cmnd->conn;
1626 struct iscsi_scsi_cmd_hdr *hdr = cmnd_hdr(cmnd);
1628 if (req_hdr->lun != hdr->lun) {
1629 PRINT_ERROR("ABORT TASK: LUN mismatch: req LUN "
1630 "%Lx, cmd LUN %Lx, rtt %u",
1631 (long long unsigned int)req_hdr->lun,
1632 (long long unsigned int)hdr->lun,
1634 err = ISCSI_RESPONSE_FUNCTION_REJECTED;
1638 if (cmnd->pdu.bhs.opcode & ISCSI_OP_IMMEDIATE) {
1639 if (req_hdr->ref_cmd_sn != req_hdr->cmd_sn) {
1640 PRINT_ERROR("ABORT TASK: RefCmdSN(%u) != TM cmd "
1641 "CmdSN(%u) for immediate command %p",
1642 req_hdr->ref_cmd_sn, req_hdr->cmd_sn,
1644 err = ISCSI_RESPONSE_FUNCTION_REJECTED;
1648 if (req_hdr->ref_cmd_sn != hdr->cmd_sn) {
1649 PRINT_ERROR("ABORT TASK: RefCmdSN(%u) != "
1650 "CmdSN(%u) for command %p",
1651 req_hdr->ref_cmd_sn, req_hdr->cmd_sn,
1653 err = ISCSI_RESPONSE_FUNCTION_REJECTED;
1658 if (before(req_hdr->cmd_sn, hdr->cmd_sn) ||
1659 (req_hdr->cmd_sn == hdr->cmd_sn)) {
1660 PRINT_ERROR("ABORT TASK: SN mismatch: req SN %x, "
1661 "cmd SN %x, rtt %u", req_hdr->cmd_sn,
1662 hdr->cmd_sn, req_hdr->rtt);
1663 err = ISCSI_RESPONSE_FUNCTION_REJECTED;
1667 spin_lock_bh(&conn->cmd_list_lock);
1669 spin_unlock_bh(&conn->cmd_list_lock);
1674 TRACE_MGMT_DBG("cmd RTT %x not found", req_hdr->rtt);
1675 err = ISCSI_RESPONSE_UNKNOWN_TASK;
1686 /* Must be called from the read thread */
1687 static int target_abort(struct iscsi_cmnd *req, int all)
1689 struct iscsi_target *target = req->conn->session->target;
1690 struct iscsi_task_mgt_hdr *req_hdr =
1691 (struct iscsi_task_mgt_hdr *)&req->pdu.bhs;
1692 struct iscsi_session *session;
1693 struct iscsi_conn *conn;
1694 struct iscsi_cmnd *cmnd;
1696 mutex_lock(&target->target_mutex);
1698 list_for_each_entry(session, &target->session_list, session_list_entry) {
1699 list_for_each_entry(conn, &session->conn_list, conn_list_entry) {
1700 spin_lock_bh(&conn->cmd_list_lock);
1701 list_for_each_entry(cmnd, &conn->cmd_list, cmd_list_entry) {
1706 else if (req_hdr->lun == cmnd_hdr(cmnd)->lun)
1709 spin_unlock_bh(&conn->cmd_list_lock);
1713 mutex_unlock(&target->target_mutex);
1717 /* Must be called from the read thread */
1718 static void task_set_abort(struct iscsi_cmnd *req)
1720 struct iscsi_session *session = req->conn->session;
1721 struct iscsi_task_mgt_hdr *req_hdr =
1722 (struct iscsi_task_mgt_hdr *)&req->pdu.bhs;
1723 struct iscsi_target *target = session->target;
1724 struct iscsi_conn *conn;
1725 struct iscsi_cmnd *cmnd;
1727 mutex_lock(&target->target_mutex);
1729 list_for_each_entry(conn, &session->conn_list, conn_list_entry) {
1730 spin_lock_bh(&conn->cmd_list_lock);
1731 list_for_each_entry(cmnd, &conn->cmd_list, cmd_list_entry) {
1732 struct iscsi_scsi_cmd_hdr *hdr = cmnd_hdr(cmnd);
1735 if (req_hdr->lun != hdr->lun)
1737 if (before(req_hdr->cmd_sn, hdr->cmd_sn) ||
1738 req_hdr->cmd_sn == hdr->cmd_sn)
1742 spin_unlock_bh(&conn->cmd_list_lock);
1745 mutex_unlock(&target->target_mutex);
1749 /* Must be called from the read thread */
1750 void conn_abort(struct iscsi_conn *conn)
1752 struct iscsi_cmnd *cmnd;
1754 TRACE_MGMT_DBG("Aborting conn %p", conn);
1756 iscsi_extracheck_is_rd_thread(conn);
1758 spin_lock_bh(&conn->cmd_list_lock);
1760 list_for_each_entry(cmnd, &conn->cmd_list, cmd_list_entry) {
1762 if (cmnd->data_waiting) {
1763 if (!cmnd_get_check(cmnd)) {
1764 spin_unlock_bh(&conn->cmd_list_lock);
1766 /* ToDo: this is racy for MC/S */
1767 TRACE_MGMT_DBG("Restarting data waiting cmd %p",
1769 iscsi_fail_waiting_cmnd(cmnd);
1774 * We are in the read thread, so we may not
1775 * worry that after cmnd release conn gets
1778 spin_lock_bh(&conn->cmd_list_lock);
1783 spin_unlock_bh(&conn->cmd_list_lock);
1788 static void execute_task_management(struct iscsi_cmnd *req)
1790 struct iscsi_conn *conn = req->conn;
1791 struct iscsi_session *sess = conn->session;
1792 struct iscsi_task_mgt_hdr *req_hdr =
1793 (struct iscsi_task_mgt_hdr *)&req->pdu.bhs;
1794 int rc, status, function = req_hdr->function & ISCSI_FUNCTION_MASK;
1795 struct scst_rx_mgmt_params params;
1797 TRACE((function == ISCSI_FUNCTION_ABORT_TASK) ? TRACE_MGMT_MINOR : TRACE_MGMT,
1798 "TM fn %d", function);
1800 TRACE_MGMT_DBG("TM req %p, itt %x, rtt %x, sn %u, con %p", req,
1801 cmnd_itt(req), req_hdr->rtt, req_hdr->cmd_sn, conn);
1803 iscsi_extracheck_is_rd_thread(conn);
1805 spin_lock(&sess->sn_lock);
1807 sess->tm_sn = req_hdr->cmd_sn;
1808 if (sess->tm_rsp != NULL) {
1809 struct iscsi_cmnd *tm_rsp = sess->tm_rsp;
1811 TRACE(TRACE_MGMT_MINOR, "Dropping delayed TM rsp %p", tm_rsp);
1813 sess->tm_rsp = NULL;
1816 spin_unlock(&sess->sn_lock);
1818 sBUG_ON(sess->tm_active < 0);
1820 rsp_cmnd_release(tm_rsp);
1822 spin_unlock(&sess->sn_lock);
1824 memset(¶ms, 0, sizeof(params));
1825 params.atomic = SCST_NON_ATOMIC;
1826 params.tgt_priv = req;
1828 if ((function != ISCSI_FUNCTION_ABORT_TASK) &&
1829 (req_hdr->rtt != ISCSI_RESERVED_TAG)) {
1830 PRINT_ERROR("Invalid RTT %x (TM fn %x)", req_hdr->rtt,
1833 status = ISCSI_RESPONSE_FUNCTION_REJECTED;
1837 /* cmd_sn is already in CPU format converted in check_cmd_sn() */
1840 case ISCSI_FUNCTION_ABORT_TASK:
1842 status = cmnd_abort(req);
1844 params.fn = SCST_ABORT_TASK;
1845 params.tag = req_hdr->rtt;
1847 params.lun = (uint8_t *)&req_hdr->lun;
1848 params.lun_len = sizeof(req_hdr->lun);
1850 params.cmd_sn = req_hdr->cmd_sn;
1851 params.cmd_sn_set = 1;
1852 rc = scst_rx_mgmt_fn(conn->session->scst_sess,
1854 status = ISCSI_RESPONSE_FUNCTION_REJECTED;
1857 case ISCSI_FUNCTION_ABORT_TASK_SET:
1858 task_set_abort(req);
1859 params.fn = SCST_ABORT_TASK_SET;
1860 params.lun = (uint8_t *)&req_hdr->lun;
1861 params.lun_len = sizeof(req_hdr->lun);
1863 params.cmd_sn = req_hdr->cmd_sn;
1864 params.cmd_sn_set = 1;
1865 rc = scst_rx_mgmt_fn(conn->session->scst_sess,
1867 status = ISCSI_RESPONSE_FUNCTION_REJECTED;
1869 case ISCSI_FUNCTION_CLEAR_TASK_SET:
1870 task_set_abort(req);
1871 params.fn = SCST_CLEAR_TASK_SET;
1872 params.lun = (uint8_t *)&req_hdr->lun;
1873 params.lun_len = sizeof(req_hdr->lun);
1875 params.cmd_sn = req_hdr->cmd_sn;
1876 params.cmd_sn_set = 1;
1877 rc = scst_rx_mgmt_fn(conn->session->scst_sess,
1879 status = ISCSI_RESPONSE_FUNCTION_REJECTED;
1881 case ISCSI_FUNCTION_CLEAR_ACA:
1882 params.fn = SCST_CLEAR_ACA;
1883 params.lun = (uint8_t *)&req_hdr->lun;
1884 params.lun_len = sizeof(req_hdr->lun);
1886 params.cmd_sn = req_hdr->cmd_sn;
1887 params.cmd_sn_set = 1;
1888 rc = scst_rx_mgmt_fn(conn->session->scst_sess,
1890 status = ISCSI_RESPONSE_FUNCTION_REJECTED;
1892 case ISCSI_FUNCTION_TARGET_COLD_RESET:
1893 case ISCSI_FUNCTION_TARGET_WARM_RESET:
1894 target_abort(req, 1);
1895 params.fn = SCST_TARGET_RESET;
1896 params.cmd_sn = req_hdr->cmd_sn;
1897 params.cmd_sn_set = 1;
1898 rc = scst_rx_mgmt_fn(conn->session->scst_sess,
1900 status = ISCSI_RESPONSE_FUNCTION_REJECTED;
1902 case ISCSI_FUNCTION_LOGICAL_UNIT_RESET:
1903 target_abort(req, 0);
1904 params.fn = SCST_LUN_RESET;
1905 params.lun = (uint8_t *)&req_hdr->lun;
1906 params.lun_len = sizeof(req_hdr->lun);
1908 params.cmd_sn = req_hdr->cmd_sn;
1909 params.cmd_sn_set = 1;
1910 rc = scst_rx_mgmt_fn(conn->session->scst_sess,
1912 status = ISCSI_RESPONSE_FUNCTION_REJECTED;
1914 case ISCSI_FUNCTION_TASK_REASSIGN:
1916 status = ISCSI_RESPONSE_FUNCTION_UNSUPPORTED;
1919 PRINT_ERROR("Unknown TM function %d", function);
1921 status = ISCSI_RESPONSE_FUNCTION_REJECTED;
1927 iscsi_send_task_mgmt_resp(req, status);
1932 static void noop_out_exec(struct iscsi_cmnd *req)
1934 struct iscsi_cmnd *rsp;
1935 struct iscsi_nop_in_hdr *rsp_hdr;
1937 TRACE_DBG("%p", req);
1939 if (cmnd_itt(req) != cpu_to_be32(ISCSI_RESERVED_TAG)) {
1940 rsp = iscsi_cmnd_create_rsp_cmnd(req);
1942 rsp_hdr = (struct iscsi_nop_in_hdr *)&rsp->pdu.bhs;
1943 rsp_hdr->opcode = ISCSI_OP_NOOP_IN;
1944 rsp_hdr->flags = ISCSI_FLG_FINAL;
1945 rsp_hdr->itt = req->pdu.bhs.itt;
1946 rsp_hdr->ttt = cpu_to_be32(ISCSI_RESERVED_TAG);
1948 if (req->pdu.datasize)
1949 sBUG_ON(req->sg == NULL);
1951 sBUG_ON(req->sg != NULL);
1955 rsp->sg_cnt = req->sg_cnt;
1956 rsp->bufflen = req->bufflen;
1959 sBUG_ON(get_pgcnt(req->pdu.datasize, 0) > ISCSI_CONN_IOV_MAX);
1961 rsp->pdu.datasize = req->pdu.datasize;
1962 iscsi_cmnd_init_write(rsp,
1963 ISCSI_INIT_WRITE_REMOVE_HASH | ISCSI_INIT_WRITE_WAKE);
1964 req_cmnd_release(req);
1969 static void logout_exec(struct iscsi_cmnd *req)
1971 struct iscsi_logout_req_hdr *req_hdr;
1972 struct iscsi_cmnd *rsp;
1973 struct iscsi_logout_rsp_hdr *rsp_hdr;
1975 PRINT_INFO("Logout received from initiator %s",
1976 req->conn->session->initiator_name);
1977 TRACE_DBG("%p", req);
1979 req_hdr = (struct iscsi_logout_req_hdr *)&req->pdu.bhs;
1980 rsp = iscsi_cmnd_create_rsp_cmnd(req);
1981 rsp_hdr = (struct iscsi_logout_rsp_hdr *)&rsp->pdu.bhs;
1982 rsp_hdr->opcode = ISCSI_OP_LOGOUT_RSP;
1983 rsp_hdr->flags = ISCSI_FLG_FINAL;
1984 rsp_hdr->itt = req_hdr->itt;
1985 rsp->should_close_conn = 1;
1986 iscsi_cmnd_init_write(rsp,
1987 ISCSI_INIT_WRITE_REMOVE_HASH | ISCSI_INIT_WRITE_WAKE);
1988 req_cmnd_release(req);
1991 static void iscsi_cmnd_exec(struct iscsi_cmnd *cmnd)
1995 TRACE_DBG("%p,%x,%u", cmnd, cmnd_opcode(cmnd), cmnd->pdu.bhs.sn);
1997 iscsi_extracheck_is_rd_thread(cmnd->conn);
1999 if (unlikely(cmnd->tm_aborted)) {
2000 TRACE_MGMT_DBG("cmnd %p (scst_cmd %p) aborted", cmnd,
2002 req_cmnd_release_force(cmnd, ISCSI_FORCE_RELEASE_WRITE);
2006 if (unlikely(cmnd->rejected))
2009 switch (cmnd_opcode(cmnd)) {
2010 case ISCSI_OP_SCSI_CMD:
2011 if (cmnd->r2t_length != 0) {
2012 if (!cmnd->is_unsolicited_data) {
2017 iscsi_restart_cmnd(cmnd);
2019 case ISCSI_OP_NOOP_OUT:
2020 noop_out_exec(cmnd);
2022 case ISCSI_OP_SCSI_TASK_MGT_MSG:
2023 execute_task_management(cmnd);
2025 case ISCSI_OP_LOGOUT_CMD:
2029 PRINT_ERROR("unexpected cmnd op %x", cmnd_opcode(cmnd));
2030 req_cmnd_release(cmnd);
2038 TRACE_MGMT_DBG("Rejected cmd %p (reason %d)", cmnd,
2039 cmnd->reject_reason);
2040 switch (cmnd->reject_reason) {
2042 PRINT_ERROR("Unexpected reject reason %d", cmnd->reject_reason);
2044 case ISCSI_REJECT_SCSI_CMD:
2045 req_cmnd_release(cmnd);
2051 static void __cmnd_send_pdu(struct iscsi_conn *conn, struct iscsi_cmnd *cmnd,
2052 u32 offset, u32 size)
2054 TRACE_DBG("%p %u,%u,%u", cmnd, offset, size, cmnd->bufflen);
2056 iscsi_extracheck_is_wr_thread(conn);
2058 sBUG_ON(offset > cmnd->bufflen);
2059 sBUG_ON(offset + size > cmnd->bufflen);
2061 conn->write_offset = offset;
2062 conn->write_size += size;
2065 static void cmnd_send_pdu(struct iscsi_conn *conn, struct iscsi_cmnd *cmnd)
2069 if (!cmnd->pdu.datasize)
2072 size = (cmnd->pdu.datasize + 3) & -4;
2073 sBUG_ON(cmnd->sg == NULL);
2074 sBUG_ON(cmnd->bufflen != size);
2075 __cmnd_send_pdu(conn, cmnd, 0, size);
2078 static void set_cork(struct socket *sock, int on)
2085 sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (void *)&opt, sizeof(opt));
2089 void cmnd_tx_start(struct iscsi_cmnd *cmnd)
2091 struct iscsi_conn *conn = cmnd->conn;
2093 TRACE_DBG("%p:%p:%x", conn, cmnd, cmnd_opcode(cmnd));
2094 iscsi_cmnd_set_length(&cmnd->pdu);
2096 iscsi_extracheck_is_wr_thread(conn);
2098 set_cork(conn->sock, 1);
2100 conn->write_iop = conn->write_iov;
2101 conn->write_iop->iov_base = &cmnd->pdu.bhs;
2102 conn->write_iop->iov_len = sizeof(cmnd->pdu.bhs);
2103 conn->write_iop_used = 1;
2104 conn->write_size = sizeof(cmnd->pdu.bhs);
2106 switch (cmnd_opcode(cmnd)) {
2107 case ISCSI_OP_NOOP_IN:
2108 cmnd_set_sn(cmnd, 1);
2109 cmnd_send_pdu(conn, cmnd);
2111 case ISCSI_OP_SCSI_RSP:
2112 cmnd_set_sn(cmnd, 1);
2113 cmnd_send_pdu(conn, cmnd);
2115 case ISCSI_OP_SCSI_TASK_MGT_RSP:
2116 cmnd_set_sn(cmnd, 1);
2118 case ISCSI_OP_TEXT_RSP:
2119 cmnd_set_sn(cmnd, 1);
2121 case ISCSI_OP_SCSI_DATA_IN:
2123 struct iscsi_data_in_hdr *rsp = (struct iscsi_data_in_hdr *)&cmnd->pdu.bhs;
2124 u32 offset = cpu_to_be32(rsp->buffer_offset);
2126 cmnd_set_sn(cmnd, (rsp->flags & ISCSI_FLG_FINAL) ? 1 : 0);
2127 __cmnd_send_pdu(conn, cmnd, offset, cmnd->pdu.datasize);
2130 case ISCSI_OP_LOGOUT_RSP:
2131 cmnd_set_sn(cmnd, 1);
2134 cmnd->pdu.bhs.sn = cmnd_set_sn(cmnd, 0);
2136 case ISCSI_OP_ASYNC_MSG:
2137 cmnd_set_sn(cmnd, 1);
2139 case ISCSI_OP_REJECT:
2140 cmnd_set_sn(cmnd, 1);
2141 cmnd_send_pdu(conn, cmnd);
2144 PRINT_ERROR("unexpected cmnd op %x", cmnd_opcode(cmnd));
2149 conn->write_size = (conn->write_size + 3) & -4;
2150 iscsi_dump_pdu(&cmnd->pdu);
2153 void cmnd_tx_end(struct iscsi_cmnd *cmnd)
2155 struct iscsi_conn *conn = cmnd->conn;
2157 TRACE_DBG("%p:%x (should_close_conn %d)", cmnd, cmnd_opcode(cmnd),
2158 cmnd->should_close_conn);
2160 switch (cmnd_opcode(cmnd)) {
2161 case ISCSI_OP_NOOP_IN:
2162 case ISCSI_OP_SCSI_RSP:
2163 case ISCSI_OP_SCSI_TASK_MGT_RSP:
2164 case ISCSI_OP_TEXT_RSP:
2166 case ISCSI_OP_ASYNC_MSG:
2167 case ISCSI_OP_REJECT:
2168 case ISCSI_OP_SCSI_DATA_IN:
2169 case ISCSI_OP_LOGOUT_RSP:
2172 PRINT_CRIT_ERROR("unexpected cmnd op %x", cmnd_opcode(cmnd));
2177 if (cmnd->should_close_conn) {
2178 PRINT_INFO("Closing connection at initiator %s request",
2179 conn->session->initiator_name);
2180 mark_conn_closed(conn);
2183 set_cork(cmnd->conn->sock, 0);
2187 * Push the command for execution. This functions reorders the commands.
2188 * Called from the read thread.
2190 static void iscsi_session_push_cmnd(struct iscsi_cmnd *cmnd)
2192 struct iscsi_session *session = cmnd->conn->session;
2193 struct list_head *entry;
2196 TRACE_DBG("%p:%x %u,%u",
2197 cmnd, cmnd_opcode(cmnd), cmnd->pdu.bhs.sn, session->exp_cmd_sn);
2199 iscsi_extracheck_is_rd_thread(cmnd->conn);
2201 sBUG_ON(cmnd->parent_req != NULL);
2203 if (cmnd->pdu.bhs.opcode & ISCSI_OP_IMMEDIATE) {
2204 TRACE_DBG("Immediate cmd %p (cmd_sn %u)", cmnd,
2206 iscsi_cmnd_exec(cmnd);
2210 spin_lock(&session->sn_lock);
2212 cmd_sn = cmnd->pdu.bhs.sn;
2213 if (cmd_sn == session->exp_cmd_sn) {
2215 session->exp_cmd_sn = ++cmd_sn;
2217 if (unlikely(session->tm_active > 0)) {
2218 if (before(cmd_sn, session->tm_sn)) {
2219 struct iscsi_conn *conn = cmnd->conn;
2221 spin_unlock(&session->sn_lock);
2223 spin_lock_bh(&conn->cmd_list_lock);
2225 spin_unlock_bh(&conn->cmd_list_lock);
2227 spin_lock(&session->sn_lock);
2229 iscsi_check_send_delayed_tm_resp(session);
2232 spin_unlock(&session->sn_lock);
2234 iscsi_cmnd_exec(cmnd);
2236 if (list_empty(&session->pending_list))
2238 cmnd = list_entry(session->pending_list.next, struct iscsi_cmnd,
2239 pending_list_entry);
2240 if (cmnd->pdu.bhs.sn != cmd_sn)
2243 list_del(&cmnd->pending_list_entry);
2246 TRACE_DBG("Processing pending cmd %p (cmd_sn %u)",
2249 spin_lock(&session->sn_lock);
2254 TRACE_DBG("Pending cmd %p (cmd_sn %u, exp_cmd_sn %u)",
2255 cmnd, cmd_sn, session->exp_cmd_sn);
2258 * iSCSI RFC 3720: "The target MUST silently ignore any
2259 * non-immediate command outside of [from ExpCmdSN to MaxCmdSN
2260 * inclusive] range". But we won't honor the MaxCmdSN
2261 * requirement, because, since we adjust MaxCmdSN from the
2262 * separate write thread, rarery it is possible that initiator
2263 * can legally send command with CmdSN>MaxSN. But it won't
2264 * hurt anything, in the worst case it will lead to
2265 * additional QUEUE FULL status.
2268 if (unlikely(before(cmd_sn, session->exp_cmd_sn))) {
2269 PRINT_ERROR("Unexpected cmd_sn (%u,%u)", cmd_sn,
2270 session->exp_cmd_sn);
2275 if (unlikely(after(cmd_sn, session->exp_cmd_sn +
2276 iscsi_get_allowed_cmds(session)))) {
2277 TRACE_MGMT_DBG("Too large cmd_sn %u (exp_cmd_sn %u, "
2278 "max_sn %u)", cmd_sn, session->exp_cmd_sn,
2279 iscsi_get_allowed_cmds(session));
2283 spin_unlock(&session->sn_lock);
2285 if (unlikely(drop)) {
2286 req_cmnd_release_force(cmnd, ISCSI_FORCE_RELEASE_WRITE);
2290 if (unlikely(cmnd->tm_aborted)) {
2291 struct iscsi_cmnd *tm_clone;
2293 TRACE_MGMT_DBG("Pending aborted cmnd %p, creating TM "
2294 "clone (scst cmd %p, state %d)", cmnd,
2295 cmnd->scst_cmd, cmnd->scst_state);
2297 tm_clone = cmnd_alloc(cmnd->conn, NULL);
2298 if (tm_clone != NULL) {
2299 tm_clone->tm_aborted = 1;
2300 tm_clone->pdu = cmnd->pdu;
2302 TRACE_MGMT_DBG("TM clone %p created", tm_clone);
2304 iscsi_cmnd_exec(cmnd);
2307 PRINT_ERROR("%s", "Unable to create TM clone");
2310 list_for_each(entry, &session->pending_list) {
2311 struct iscsi_cmnd *tmp = list_entry(entry, struct iscsi_cmnd,
2312 pending_list_entry);
2313 if (before(cmd_sn, tmp->pdu.bhs.sn))
2317 list_add_tail(&cmnd->pending_list_entry, entry);
2324 static int check_segment_length(struct iscsi_cmnd *cmnd)
2326 struct iscsi_conn *conn = cmnd->conn;
2327 struct iscsi_session *session = conn->session;
2329 if (unlikely(cmnd->pdu.datasize > session->sess_param.max_recv_data_length)) {
2330 PRINT_ERROR("Initiator %s violated negotiated parameters: "
2331 "data too long (ITT %x, datasize %u, "
2332 "max_recv_data_length %u", session->initiator_name,
2333 cmnd_itt(cmnd), cmnd->pdu.datasize,
2334 session->sess_param.max_recv_data_length);
2335 mark_conn_closed(conn);
2341 int cmnd_rx_start(struct iscsi_cmnd *cmnd)
2343 struct iscsi_conn *conn = cmnd->conn;
2346 iscsi_dump_pdu(&cmnd->pdu);
2348 res = check_segment_length(cmnd);
2352 switch (cmnd_opcode(cmnd)) {
2353 case ISCSI_OP_NOOP_OUT:
2354 rc = noop_out_start(cmnd);
2356 case ISCSI_OP_SCSI_CMD:
2357 rc = cmnd_insert_hash(cmnd);
2358 if (likely(rc == 0)) {
2359 res = scsi_cmnd_start(cmnd);
2360 if (unlikely(res != 0))
2364 case ISCSI_OP_SCSI_TASK_MGT_MSG:
2365 rc = cmnd_insert_hash(cmnd);
2367 case ISCSI_OP_SCSI_DATA_OUT:
2368 res = data_out_start(conn, cmnd);
2369 rc = 0; /* to avoid compiler warning */
2370 if (unlikely(res != 0))
2373 case ISCSI_OP_LOGOUT_CMD:
2374 rc = cmnd_insert_hash(cmnd);
2376 case ISCSI_OP_TEXT_CMD:
2377 case ISCSI_OP_SNACK_CMD:
2378 rc = -ISCSI_REASON_UNSUPPORTED_COMMAND;
2381 rc = -ISCSI_REASON_UNSUPPORTED_COMMAND;
2385 if (unlikely(rc < 0)) {
2386 struct iscsi_scsi_cmd_hdr *hdr = cmnd_hdr(cmnd);
2387 PRINT_ERROR("Error %d (iSCSI opcode %x, ITT %x, op %x)", rc,
2388 cmnd_opcode(cmnd), cmnd_itt(cmnd),
2389 (cmnd_opcode(cmnd) == ISCSI_OP_SCSI_CMD ?
2391 iscsi_cmnd_reject(cmnd, -rc);
2395 TRACE_EXIT_RES(res);
2399 void cmnd_rx_end(struct iscsi_cmnd *cmnd)
2403 TRACE_DBG("%p:%x", cmnd, cmnd_opcode(cmnd));
2405 if (unlikely(cmnd->rejected))
2409 switch (cmnd_opcode(cmnd)) {
2410 case ISCSI_OP_SCSI_CMD:
2411 case ISCSI_OP_NOOP_OUT:
2412 case ISCSI_OP_SCSI_TASK_MGT_MSG:
2413 case ISCSI_OP_LOGOUT_CMD:
2414 iscsi_session_push_cmnd(cmnd);
2416 case ISCSI_OP_SCSI_DATA_OUT:
2420 PRINT_ERROR("unexpected cmnd op %x", cmnd_opcode(cmnd));
2421 req_cmnd_release(cmnd);
2430 switch (cmnd->reject_reason) {
2432 PRINT_ERROR("Unexpected reject reason %d", cmnd->reject_reason);
2434 case ISCSI_REJECT_CMD:
2435 case ISCSI_REJECT_DATA:
2436 req_cmnd_release(cmnd);
2438 case ISCSI_REJECT_SCSI_CMD:
2444 #ifndef NET_PAGE_CALLBACKS_DEFINED
2445 static int iscsi_alloc_data_buf(struct scst_cmd *cmd)
2447 if (scst_cmd_get_data_direction(cmd) == SCST_DATA_READ) {
2449 * sock->ops->sendpage() is async zero copy operation,
2450 * so we must be sure not to free and reuse
2451 * the command's buffer before the sending was completed
2452 * by the network layers. It is possible only if we
2453 * don't use SGV cache.
2455 scst_cmd_set_no_sgv(cmd);
2461 static inline void iscsi_set_state_wake_up(struct iscsi_cmnd *req,
2465 * We use wait_event() to wait for the state change, but it checks its
2466 * condition without any protection, so without cmnd_get() it is
2467 * possible that req will die "immediately" after the state assignment
2468 * and wake_up() will operate on dead data.
2470 cmnd_get_ordered(req);
2471 req->scst_state = new_state;
2472 wake_up(&req->scst_waitQ);
2477 static void iscsi_preprocessing_done(struct scst_cmd *scst_cmd)
2479 struct iscsi_cmnd *req = (struct iscsi_cmnd *)
2480 scst_cmd_get_tgt_priv(scst_cmd);
2482 TRACE_DBG("req %p", req);
2484 EXTRACHECKS_BUG_ON(req->scst_state != ISCSI_CMD_STATE_RX_CMD);
2486 iscsi_set_state_wake_up(req, ISCSI_CMD_STATE_AFTER_PREPROC);
2493 * IMPORTANT! Connection conn must be protected by additional conn_get()
2494 * upon entrance in this function, because otherwise it could be destroyed
2495 * inside as a result of iscsi_send(), which releases sent commands.
2497 static void iscsi_try_local_processing(struct iscsi_conn *conn,
2504 spin_lock_bh(&iscsi_wr_lock);
2505 switch (conn->wr_state) {
2506 case ISCSI_CONN_WR_STATE_IN_LIST:
2507 list_del(&conn->wr_list_entry);
2509 case ISCSI_CONN_WR_STATE_IDLE:
2511 conn->wr_task = current;
2513 conn->wr_state = ISCSI_CONN_WR_STATE_PROCESSING;
2514 conn->wr_space_ready = 0;
2521 spin_unlock_bh(&iscsi_wr_lock);
2525 while (test_write_ready(conn)) {
2526 rc = iscsi_send(conn);
2527 if ((rc <= 0) || single_only)
2531 spin_lock_bh(&iscsi_wr_lock);
2533 conn->wr_task = NULL;
2535 if ((rc <= 0) || test_write_ready(conn)) {
2536 list_add_tail(&conn->wr_list_entry, &iscsi_wr_list);
2537 conn->wr_state = ISCSI_CONN_WR_STATE_IN_LIST;
2538 wake_up(&iscsi_wr_waitQ);
2540 conn->wr_state = ISCSI_CONN_WR_STATE_IDLE;
2541 spin_unlock_bh(&iscsi_wr_lock);
2548 static int iscsi_xmit_response(struct scst_cmd *scst_cmd)
2550 int is_send_status = scst_cmd_get_is_send_status(scst_cmd);
2551 struct iscsi_cmnd *req = (struct iscsi_cmnd *)
2552 scst_cmd_get_tgt_priv(scst_cmd);
2553 struct iscsi_conn *conn = req->conn;
2554 int status = scst_cmd_get_status(scst_cmd);
2555 u8 *sense = scst_cmd_get_sense_buffer(scst_cmd);
2556 int sense_len = scst_cmd_get_sense_buffer_len(scst_cmd);
2557 int old_state = req->scst_state;
2558 #if 0 /* temp. ToDo */
2559 bool single_only = !scst_get_long_xmit(scst_cmd);
2561 bool single_only = 0;
2564 if (scst_cmd_atomic(scst_cmd))
2565 return SCST_TGT_RES_NEED_THREAD_CTX;
2567 scst_cmd_set_tgt_priv(scst_cmd, NULL);
2569 req->tm_aborted |= scst_cmd_aborted(scst_cmd) ? 1 : 0;
2570 if (unlikely(req->tm_aborted)) {
2571 TRACE_MGMT_DBG("req %p (scst_cmd %p) aborted", req,
2574 scst_set_delivery_status(req->scst_cmd,
2575 SCST_CMD_DELIVERY_ABORTED);
2577 if (old_state == ISCSI_CMD_STATE_RESTARTED) {
2578 req->scst_state = ISCSI_CMD_STATE_PROCESSED;
2579 req_cmnd_release_force(req, ISCSI_FORCE_RELEASE_WRITE);
2581 iscsi_set_state_wake_up(req, ISCSI_CMD_STATE_PROCESSED);
2586 if (unlikely(old_state != ISCSI_CMD_STATE_RESTARTED)) {
2587 TRACE_DBG("req %p on %d state", req, old_state);
2589 create_status_rsp(req, status, sense, sense_len);
2591 switch (old_state) {
2592 case ISCSI_CMD_STATE_RX_CMD:
2593 case ISCSI_CMD_STATE_AFTER_PREPROC:
2599 iscsi_set_state_wake_up(req, ISCSI_CMD_STATE_PROCESSED);
2603 req->scst_state = ISCSI_CMD_STATE_PROCESSED;
2605 req->bufflen = scst_cmd_get_resp_data_len(scst_cmd);
2606 req->sg = scst_cmd_get_sg(scst_cmd);
2607 req->sg_cnt = scst_cmd_get_sg_cnt(scst_cmd);
2609 TRACE_DBG("req %p, is_send_status=%x, req->bufflen=%d, req->sg=%p, "
2610 "req->sg_cnt %d", req, is_send_status, req->bufflen, req->sg,
2613 if (unlikely((req->bufflen != 0) && !is_send_status)) {
2614 PRINT_CRIT_ERROR("%s", "Sending DATA without STATUS is unsupported");
2615 scst_set_cmd_error(scst_cmd,
2616 SCST_LOAD_SENSE(scst_sense_hardw_error));
2620 if (req->bufflen != 0) {
2622 * Check above makes sure that is_send_status is set,
2623 * so status is valid here, but in future that could change.
2626 if (status != SAM_STAT_CHECK_CONDITION) {
2627 send_data_rsp(req, status, is_send_status);
2629 struct iscsi_cmnd *rsp;
2630 struct iscsi_scsi_rsp_hdr *rsp_hdr;
2632 send_data_rsp(req, 0, 0);
2633 if (is_send_status) {
2634 rsp = create_status_rsp(req, status, sense,
2636 rsp_hdr = (struct iscsi_scsi_rsp_hdr *)&rsp->pdu.bhs;
2637 resid = cmnd_read_size(req) - req->bufflen;
2640 ISCSI_FLG_RESIDUAL_UNDERFLOW;
2641 rsp_hdr->residual_count = cpu_to_be32(resid);
2642 } else if (resid < 0) {
2644 ISCSI_FLG_RESIDUAL_OVERFLOW;
2645 rsp_hdr->residual_count = cpu_to_be32(-resid);
2647 iscsi_cmnd_init_write(rsp,
2648 ISCSI_INIT_WRITE_REMOVE_HASH);
2651 } else if (is_send_status) {
2652 struct iscsi_cmnd *rsp;
2653 struct iscsi_scsi_rsp_hdr *rsp_hdr;
2655 rsp = create_status_rsp(req, status, sense, sense_len);
2656 rsp_hdr = (struct iscsi_scsi_rsp_hdr *) &rsp->pdu.bhs;
2657 resid = cmnd_read_size(req);
2659 rsp_hdr->flags |= ISCSI_FLG_RESIDUAL_UNDERFLOW;
2660 rsp_hdr->residual_count = cpu_to_be32(resid);
2662 iscsi_cmnd_init_write(rsp, ISCSI_INIT_WRITE_REMOVE_HASH);
2669 conn_get_ordered(conn);
2670 req_cmnd_release(req);
2671 iscsi_try_local_processing(conn, single_only);
2675 return SCST_TGT_RES_SUCCESS;
2678 /* Called under sn_lock */
2679 static bool iscsi_is_delay_tm_resp(struct iscsi_cmnd *rsp)
2682 struct iscsi_task_mgt_hdr *req_hdr =
2683 (struct iscsi_task_mgt_hdr *)&rsp->parent_req->pdu.bhs;
2684 int function = req_hdr->function & ISCSI_FUNCTION_MASK;
2685 struct iscsi_session *sess = rsp->conn->session;
2689 /* This should be checked for immediate TM commands as well */
2693 if (before(sess->exp_cmd_sn, req_hdr->cmd_sn))
2698 TRACE_EXIT_RES(res);
2702 /* Called under sn_lock, but might drop it inside, then reaquire */
2703 static void iscsi_check_send_delayed_tm_resp(struct iscsi_session *sess)
2705 struct iscsi_cmnd *tm_rsp = sess->tm_rsp;
2712 if (iscsi_is_delay_tm_resp(tm_rsp))
2715 TRACE(TRACE_MGMT_MINOR, "Sending delayed rsp %p", tm_rsp);
2717 sess->tm_rsp = NULL;
2720 spin_unlock(&sess->sn_lock);
2722 sBUG_ON(sess->tm_active < 0);
2724 iscsi_cmnd_init_write(tm_rsp,
2725 ISCSI_INIT_WRITE_REMOVE_HASH | ISCSI_INIT_WRITE_WAKE);
2727 spin_lock(&sess->sn_lock);
2734 static void iscsi_send_task_mgmt_resp(struct iscsi_cmnd *req, int status)
2736 struct iscsi_cmnd *rsp;
2737 struct iscsi_task_mgt_hdr *req_hdr =
2738 (struct iscsi_task_mgt_hdr *)&req->pdu.bhs;
2739 struct iscsi_task_rsp_hdr *rsp_hdr;
2740 struct iscsi_session *sess = req->conn->session;
2741 int fn = req_hdr->function & ISCSI_FUNCTION_MASK;
2745 TRACE_MGMT_DBG("TM req %p finished", req);
2746 TRACE((req_hdr->function == ISCSI_FUNCTION_ABORT_TASK) ?
2747 TRACE_MGMT_MINOR : TRACE_MGMT,
2748 "TM fn %d finished, status %d", fn, status);
2750 rsp = iscsi_cmnd_create_rsp_cmnd(req);
2751 rsp_hdr = (struct iscsi_task_rsp_hdr *)&rsp->pdu.bhs;
2753 rsp_hdr->opcode = ISCSI_OP_SCSI_TASK_MGT_RSP;
2754 rsp_hdr->flags = ISCSI_FLG_FINAL;
2755 rsp_hdr->itt = req_hdr->itt;
2756 rsp_hdr->response = status;
2758 if (fn == ISCSI_FUNCTION_TARGET_COLD_RESET)
2759 rsp->should_close_conn = 1;
2761 sBUG_ON(sess->tm_rsp != NULL);
2763 spin_lock(&sess->sn_lock);
2764 if (iscsi_is_delay_tm_resp(rsp)) {
2765 TRACE(TRACE_MGMT_MINOR, "Delaying TM fn %x response %p "
2766 "(req %p), because not all affected commands received "
2767 "(TM cmd sn %u, exp sn %u)",
2768 req_hdr->function & ISCSI_FUNCTION_MASK, rsp, req,
2769 req_hdr->cmd_sn, sess->exp_cmd_sn);
2771 spin_unlock(&sess->sn_lock);
2775 spin_unlock(&sess->sn_lock);
2777 sBUG_ON(sess->tm_active < 0);
2779 iscsi_cmnd_init_write(rsp,
2780 ISCSI_INIT_WRITE_REMOVE_HASH | ISCSI_INIT_WRITE_WAKE);
2783 req_cmnd_release(req);
2789 static inline int iscsi_get_mgmt_response(int status)
2792 case SCST_MGMT_STATUS_SUCCESS:
2793 return ISCSI_RESPONSE_FUNCTION_COMPLETE;
2795 case SCST_MGMT_STATUS_TASK_NOT_EXIST:
2796 return ISCSI_RESPONSE_UNKNOWN_TASK;
2798 case SCST_MGMT_STATUS_LUN_NOT_EXIST:
2799 return ISCSI_RESPONSE_UNKNOWN_LUN;
2801 case SCST_MGMT_STATUS_FN_NOT_SUPPORTED:
2802 return ISCSI_RESPONSE_FUNCTION_UNSUPPORTED;
2804 case SCST_MGMT_STATUS_REJECTED:
2805 case SCST_MGMT_STATUS_FAILED:
2807 return ISCSI_RESPONSE_FUNCTION_REJECTED;
2811 static void iscsi_task_mgmt_fn_done(struct scst_mgmt_cmd *scst_mcmd)
2813 struct iscsi_cmnd *req = (struct iscsi_cmnd *)
2814 scst_mgmt_cmd_get_tgt_priv(scst_mcmd);
2815 int status = iscsi_get_mgmt_response(scst_mgmt_cmd_get_status(scst_mcmd));
2817 TRACE_MGMT_DBG("req %p, scst_mcmd %p, fn %d, scst status %d",
2818 req, scst_mcmd, scst_mgmt_cmd_get_fn(scst_mcmd),
2819 scst_mgmt_cmd_get_status(scst_mcmd));
2821 iscsi_send_task_mgmt_resp(req, status);
2823 scst_mgmt_cmd_set_tgt_priv(scst_mcmd, NULL);
2828 static int iscsi_target_detect(struct scst_tgt_template *templ)
2834 static int iscsi_target_release(struct scst_tgt *scst_tgt)
2840 struct scst_tgt_template iscsi_template = {
2842 .sg_tablesize = ISCSI_CONN_IOV_MAX,
2845 .xmit_response_atomic = 0,
2846 .detect = iscsi_target_detect,
2847 .release = iscsi_target_release,
2848 .xmit_response = iscsi_xmit_response,
2849 #ifndef NET_PAGE_CALLBACKS_DEFINED
2850 .alloc_data_buf = iscsi_alloc_data_buf,
2852 .preprocessing_done = iscsi_preprocessing_done,
2853 .pre_exec = iscsi_pre_exec,
2854 .task_mgmt_fn_done = iscsi_task_mgmt_fn_done,
2857 static __init int iscsi_run_threads(int count, char *name, int (*fn)(void *))
2861 struct iscsi_thread_t *thr;
2863 for (i = 0; i < count; i++) {
2864 thr = kmalloc(sizeof(*thr), GFP_KERNEL);
2867 PRINT_ERROR("Failed to allocate thr %d", res);
2870 thr->thr = kthread_run(fn, NULL, "%s%d", name, i);
2871 if (IS_ERR(thr->thr)) {
2872 res = PTR_ERR(thr->thr);
2873 PRINT_ERROR("kthread_create() failed: %d", res);
2877 list_add(&thr->threads_list_entry, &iscsi_threads_list);
2884 static void iscsi_stop_threads(void)
2886 struct iscsi_thread_t *t, *tmp;
2888 list_for_each_entry_safe(t, tmp, &iscsi_threads_list,
2889 threads_list_entry) {
2890 int rc = kthread_stop(t->thr);
2892 TRACE_MGMT_DBG("kthread_stop() failed: %d", rc);
2893 list_del(&t->threads_list_entry);
2898 static int __init iscsi_init(void)
2903 PRINT_INFO("iSCSI SCST Target - version %s", ISCSI_VERSION_STRING);
2905 #ifdef NET_PAGE_CALLBACKS_DEFINED
2906 err = net_set_get_put_page_callbacks(iscsi_get_page_callback,
2907 iscsi_put_page_callback);
2909 PRINT_INFO("Unable to set page callbackes: %d", err);
2913 PRINT_INFO("%s", "Patch put_page_callback-<kernel-version>.patch "
2914 "not applied on your kernel. Running in the performance "
2915 "degraded mode. Refer README file for details");
2918 ctr_major = register_chrdev(0, ctr_name, &ctr_fops);
2919 if (ctr_major < 0) {
2920 PRINT_ERROR("failed to register the control device %d", ctr_major);
2929 iscsi_cmnd_cache = KMEM_CACHE(iscsi_cmnd, SCST_SLAB_FLAGS);
2930 if (!iscsi_cmnd_cache) {
2935 err = scst_register_target_template(&iscsi_template);
2939 iscsi_template_registered = 1;
2941 err = iscsi_procfs_init();
2945 num = max(num_online_cpus(), 2);
2947 err = iscsi_run_threads(num, "iscsird", istrd);
2951 err = iscsi_run_threads(num, "iscsiwr", istwr);
2959 iscsi_procfs_exit();
2960 iscsi_stop_threads();
2963 scst_unregister_target_template(&iscsi_template);
2966 kmem_cache_destroy(iscsi_cmnd_cache);
2972 unregister_chrdev(ctr_major, ctr_name);
2975 #ifdef NET_PAGE_CALLBACKS_DEFINED
2976 net_set_get_put_page_callbacks(NULL, NULL);
2981 static void __exit iscsi_exit(void)
2983 iscsi_stop_threads();
2985 unregister_chrdev(ctr_major, ctr_name);
2987 iscsi_procfs_exit();
2990 kmem_cache_destroy(iscsi_cmnd_cache);
2992 scst_unregister_target_template(&iscsi_template);
2994 #ifdef NET_PAGE_CALLBACKS_DEFINED
2995 net_set_get_put_page_callbacks(NULL, NULL);
2999 module_init(iscsi_init);
3000 module_exit(iscsi_exit);
3002 MODULE_LICENSE("GPL");