2 * Copyright (C) 2002 - 2003 Ardis Technolgies <roman@ardistech.com>
3 * Copyright (C) 2007 - 2008 Vladislav Bolkhovitin
4 * Copyright (C) 2007 - 2008 CMS Distribution Limited
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation, version 2
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
17 #include <linux/module.h>
18 #include <linux/hash.h>
19 #include <linux/kthread.h>
21 #include <scsi/scsi.h>
26 #if !defined(CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION)
27 #warning "Patch put_page_callback-<kernel-version>.patch not applied on your \
28 kernel or CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION \
29 config option not set. ISCSI-SCST will be working with not the best \
30 performance. Refer README file for details."
33 #define ISCSI_INIT_WRITE_WAKE 0x1
34 #define ISCSI_INIT_WRITE_REMOVE_HASH 0x2
37 static char ctr_name[] = "iscsi-scst-ctl";
38 static int iscsi_template_registered;
40 #if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
41 unsigned long iscsi_trace_flag = ISCSI_DEFAULT_LOG_FLAGS;
44 static struct kmem_cache *iscsi_cmnd_cache;
46 DEFINE_SPINLOCK(iscsi_rd_lock);
47 LIST_HEAD(iscsi_rd_list);
48 DECLARE_WAIT_QUEUE_HEAD(iscsi_rd_waitQ);
50 DEFINE_SPINLOCK(iscsi_wr_lock);
51 LIST_HEAD(iscsi_wr_list);
52 DECLARE_WAIT_QUEUE_HEAD(iscsi_wr_waitQ);
54 static struct page *dummy_page;
55 static struct scatterlist dummy_sg;
57 struct iscsi_thread_t {
58 struct task_struct *thr;
59 struct list_head threads_list_entry;
62 static LIST_HEAD(iscsi_threads_list);
64 static void cmnd_remove_hash(struct iscsi_cmnd *cmnd);
65 static void iscsi_send_task_mgmt_resp(struct iscsi_cmnd *req, int status);
66 static void cmnd_prepare_get_rejected_cmd_data(struct iscsi_cmnd *cmnd);
67 static void iscsi_check_send_delayed_tm_resp(struct iscsi_session *sess);
68 static void iscsi_session_push_cmnd(struct iscsi_cmnd *cmnd);
69 static void req_cmnd_release(struct iscsi_cmnd *req);
71 static inline u32 cmnd_write_size(struct iscsi_cmnd *cmnd)
73 struct iscsi_scsi_cmd_hdr *hdr = cmnd_hdr(cmnd);
75 if (hdr->flags & ISCSI_CMD_WRITE)
76 return be32_to_cpu(hdr->data_length);
80 static inline u32 cmnd_read_size(struct iscsi_cmnd *cmnd)
82 struct iscsi_scsi_cmd_hdr *hdr = cmnd_hdr(cmnd);
84 if (hdr->flags & ISCSI_CMD_READ) {
85 struct iscsi_rlength_ahdr *ahdr =
86 (struct iscsi_rlength_ahdr *)cmnd->pdu.ahs;
88 if (!(hdr->flags & ISCSI_CMD_WRITE))
89 return be32_to_cpu(hdr->data_length);
90 if (ahdr && ahdr->ahstype == ISCSI_AHSTYPE_RLENGTH)
91 return be32_to_cpu(ahdr->read_length);
96 static inline void iscsi_restart_cmnd(struct iscsi_cmnd *cmnd)
98 EXTRACHECKS_BUG_ON(cmnd->data_waiting);
100 cmnd->scst_state = ISCSI_CMD_STATE_RESTARTED;
101 scst_restart_cmd(cmnd->scst_cmd, SCST_PREPROCESS_STATUS_SUCCESS,
102 SCST_CONTEXT_THREAD);
105 static inline void iscsi_restart_waiting_cmnd(struct iscsi_cmnd *cmnd)
108 * There is no race with conn_abort(), since all functions
109 * called from single read thread
111 iscsi_extracheck_is_rd_thread(cmnd->conn);
112 cmnd->data_waiting = 0;
114 iscsi_restart_cmnd(cmnd);
117 static inline void iscsi_fail_waiting_cmnd(struct iscsi_cmnd *cmnd)
119 TRACE_MGMT_DBG("Failing data waiting cmd %p", cmnd);
122 * There is no race with conn_abort(), since all functions
123 * called from single read thread
125 iscsi_extracheck_is_rd_thread(cmnd->conn);
126 cmnd->data_waiting = 0;
128 req_cmnd_release_force(cmnd, ISCSI_FORCE_RELEASE_WRITE);
131 struct iscsi_cmnd *cmnd_alloc(struct iscsi_conn *conn,
132 struct iscsi_cmnd *parent)
134 struct iscsi_cmnd *cmnd;
136 /* ToDo: __GFP_NOFAIL?? */
137 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
138 cmnd = kmem_cache_alloc(iscsi_cmnd_cache, GFP_KERNEL|__GFP_NOFAIL);
139 memset(cmnd, 0, sizeof(*cmnd));
141 cmnd = kmem_cache_zalloc(iscsi_cmnd_cache, GFP_KERNEL|__GFP_NOFAIL);
144 atomic_set(&cmnd->ref_cnt, 1);
145 cmnd->scst_state = ISCSI_CMD_STATE_NEW;
147 cmnd->parent_req = parent;
148 init_waitqueue_head(&cmnd->scst_waitQ);
150 if (parent == NULL) {
153 #if defined(CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION)
154 atomic_set(&cmnd->net_ref_cnt, 0);
156 spin_lock_init(&cmnd->rsp_cmd_lock);
157 INIT_LIST_HEAD(&cmnd->rsp_cmd_list);
158 INIT_LIST_HEAD(&cmnd->rx_ddigest_cmd_list);
160 spin_lock_bh(&conn->cmd_list_lock);
161 list_add_tail(&cmnd->cmd_list_entry, &conn->cmd_list);
162 spin_unlock_bh(&conn->cmd_list_lock);
165 TRACE_DBG("conn %p, parent %p, cmnd %p", conn, parent, cmnd);
169 /* Frees a command. Also frees the additional header. */
170 static void cmnd_free(struct iscsi_cmnd *cmnd)
172 TRACE_DBG("%p", cmnd);
174 if (unlikely(cmnd->tm_aborted)) {
175 TRACE_MGMT_DBG("Free aborted cmd %p (scst cmd %p, state %d, "
176 "parent_req %p)", cmnd, cmnd->scst_cmd,
177 cmnd->scst_state, cmnd->parent_req);
180 /* Catch users from cmd_list or rsp_cmd_list */
181 EXTRACHECKS_BUG_ON(atomic_read(&cmnd->ref_cnt) != 0);
183 kfree(cmnd->pdu.ahs);
185 if (unlikely(cmnd->on_write_list || cmnd->on_written_list)) {
186 struct iscsi_scsi_cmd_hdr *req = cmnd_hdr(cmnd);
188 PRINT_CRIT_ERROR("cmnd %p still on some list?, %x, %x, %x, "
189 "%x, %x, %x, %x", cmnd, req->opcode, req->scb[0],
190 req->flags, req->itt, be32_to_cpu(req->data_length),
191 req->cmd_sn, be32_to_cpu(cmnd->pdu.datasize));
193 if (unlikely(cmnd->parent_req)) {
194 struct iscsi_scsi_cmd_hdr *preq =
195 cmnd_hdr(cmnd->parent_req);
196 PRINT_CRIT_ERROR("%p %x %u", preq, preq->opcode,
202 kmem_cache_free(iscsi_cmnd_cache, cmnd);
206 void cmnd_done(struct iscsi_cmnd *cmnd)
208 TRACE_DBG("%p", cmnd);
210 if (unlikely(cmnd->tm_aborted)) {
211 TRACE_MGMT_DBG("Done aborted cmd %p (scst cmd %p, state %d, "
212 "parent_req %p)", cmnd, cmnd->scst_cmd,
213 cmnd->scst_state, cmnd->parent_req);
216 EXTRACHECKS_BUG_ON(cmnd->on_rx_digest_list);
218 if (cmnd->on_written_list) {
219 struct iscsi_conn *conn = cmnd->conn;
220 TRACE_DBG("Deleting cmd %p from conn %p written_list", cmnd,
222 spin_lock_bh(&conn->write_list_lock);
223 list_del(&cmnd->write_list_entry);
224 cmnd->on_written_list = 0;
225 spin_unlock_bh(&conn->write_list_lock);
228 if (cmnd->parent_req == NULL) {
229 struct iscsi_conn *conn = cmnd->conn;
230 TRACE_DBG("Deleting req %p from conn %p", cmnd, conn);
232 spin_lock_bh(&conn->cmd_list_lock);
233 list_del(&cmnd->cmd_list_entry);
234 spin_unlock_bh(&conn->cmd_list_lock);
238 EXTRACHECKS_BUG_ON(!list_empty(&cmnd->rsp_cmd_list));
239 EXTRACHECKS_BUG_ON(!list_empty(&cmnd->rx_ddigest_cmd_list));
241 /* Order between above and below code is important! */
243 if (cmnd->scst_cmd) {
244 switch (cmnd->scst_state) {
245 case ISCSI_CMD_STATE_PROCESSED:
246 TRACE_DBG("cmd %p PROCESSED", cmnd);
247 scst_tgt_cmd_done(cmnd->scst_cmd,
248 SCST_CONTEXT_DIRECT);
250 case ISCSI_CMD_STATE_AFTER_PREPROC:
252 struct scst_cmd *scst_cmd = cmnd->scst_cmd;
253 TRACE_DBG("cmd %p AFTER_PREPROC", cmnd);
254 cmnd->scst_state = ISCSI_CMD_STATE_RESTARTED;
255 cmnd->scst_cmd = NULL;
256 scst_restart_cmd(scst_cmd,
257 SCST_PREPROCESS_STATUS_ERROR_FATAL,
258 SCST_CONTEXT_THREAD);
262 PRINT_CRIT_ERROR("Unexpected cmnd scst state "
263 "%d", cmnd->scst_state);
269 EXTRACHECKS_BUG_ON(cmnd->scst_cmd != NULL);
270 TRACE_DBG("Deleting rsp %p from parent %p", cmnd,
273 spin_lock_bh(&cmnd->parent_req->rsp_cmd_lock);
274 list_del(&cmnd->rsp_cmd_list_entry);
275 spin_unlock_bh(&cmnd->parent_req->rsp_cmd_lock);
277 cmnd_put(cmnd->parent_req);
280 /* Order between above and below code is important! */
283 TRACE_DBG("%s", "own_sg");
284 if (cmnd->sg != &dummy_sg)
285 scst_free(cmnd->sg, cmnd->sg_cnt);
286 #ifdef CONFIG_SCST_DEBUG
293 if (cmnd->dec_active_cmnds) {
294 struct iscsi_session *sess = cmnd->conn->session;
295 TRACE_DBG("Decrementing active_cmds (cmd %p, sess %p, "
296 "new value %d)", cmnd, sess,
297 atomic_read(&sess->active_cmds)-1);
298 atomic_dec(&sess->active_cmds);
299 #ifdef CONFIG_SCST_EXTRACHECKS
300 if (unlikely(atomic_read(&sess->active_cmds) < 0)) {
301 PRINT_CRIT_ERROR("active_cmds < 0 (%d)!!",
302 atomic_read(&sess->active_cmds));
313 * Corresponding conn may also gets destroyed atfer this function, except only
314 * if it's called from the read thread!
316 * It can't be called in parallel with iscsi_cmnds_init_write()!
318 void req_cmnd_release_force(struct iscsi_cmnd *req, int flags)
320 struct iscsi_cmnd *rsp, *t;
321 struct iscsi_conn *conn = req->conn;
322 LIST_HEAD(cmds_list);
326 TRACE_MGMT_DBG("%p", req);
328 sBUG_ON(req == conn->read_cmnd);
330 if (flags & ISCSI_FORCE_RELEASE_WRITE) {
331 spin_lock_bh(&conn->write_list_lock);
332 list_for_each_entry_safe(rsp, t, &conn->write_list,
334 if (rsp->parent_req != req)
337 cmd_del_from_write_list(rsp);
339 list_add_tail(&rsp->write_list_entry, &cmds_list);
341 spin_unlock_bh(&conn->write_list_lock);
343 list_for_each_entry_safe(rsp, t, &cmds_list,
345 TRACE_MGMT_DBG("Putting write rsp %p", rsp);
346 list_del(&rsp->write_list_entry);
352 spin_lock_bh(&req->rsp_cmd_lock);
353 list_for_each_entry_reverse(rsp, &req->rsp_cmd_list,
354 rsp_cmd_list_entry) {
357 if (rsp->force_cleanup_done)
360 rsp->force_cleanup_done = 1;
362 if (cmnd_get_check(rsp))
365 spin_unlock_bh(&req->rsp_cmd_lock);
367 spin_lock_bh(&conn->write_list_lock);
368 r = rsp->on_write_list || rsp->write_processing_started;
369 spin_unlock_bh(&conn->write_list_lock);
377 * If both on_write_list and write_processing_started not set,
378 * we can safely put() rsp.
380 TRACE_MGMT_DBG("Putting rsp %p", rsp);
384 spin_unlock_bh(&req->rsp_cmd_lock);
386 req_cmnd_release(req);
393 * Corresponding conn may also gets destroyed atfer this function, except only
394 * if it's called from the read thread!
396 static void req_cmnd_release(struct iscsi_cmnd *req)
398 struct iscsi_cmnd *c, *t;
402 TRACE_DBG("%p", req);
404 #ifdef CONFIG_SCST_EXTRACHECKS
405 sBUG_ON(req->release_called);
406 req->release_called = 1;
409 if (unlikely(req->tm_aborted)) {
410 TRACE_MGMT_DBG("Release aborted req cmd %p (scst cmd %p, "
411 "state %d)", req, req->scst_cmd, req->scst_state);
414 sBUG_ON(req->parent_req != NULL);
416 list_for_each_entry_safe(c, t, &req->rx_ddigest_cmd_list,
417 rx_ddigest_cmd_list_entry) {
418 cmd_del_from_rx_ddigest_list(c);
423 cmnd_remove_hash(req);
425 if (req->dec_active_cmnds) {
426 struct iscsi_session *sess = req->conn->session;
427 TRACE_DBG("Decrementing active_cmds (cmd %p, sess %p, "
428 "new value %d)", req, sess,
429 atomic_read(&sess->active_cmds)-1);
430 atomic_dec(&sess->active_cmds);
431 req->dec_active_cmnds = 0;
432 #ifdef CONFIG_SCST_EXTRACHECKS
433 if (unlikely(atomic_read(&sess->active_cmds) < 0)) {
434 PRINT_CRIT_ERROR("active_cmds < 0 (%d)!!",
435 atomic_read(&sess->active_cmds));
448 * Corresponding conn may also gets destroyed atfer this function, except only
449 * if it's called from the read thread!
451 void rsp_cmnd_release(struct iscsi_cmnd *cmnd)
453 TRACE_DBG("%p", cmnd);
455 #ifdef CONFIG_SCST_EXTRACHECKS
456 sBUG_ON(cmnd->release_called);
457 cmnd->release_called = 1;
460 sBUG_ON(cmnd->hashed);
461 sBUG_ON(cmnd->parent_req == NULL);
468 * create a new command used as response.
470 * iscsi_cmnd_create_rsp_cmnd -
471 * @cmnd: ptr to request command
473 * @return ptr to response command or NULL
475 static struct iscsi_cmnd *iscsi_cmnd_create_rsp_cmnd(struct iscsi_cmnd *parent)
477 struct iscsi_cmnd *rsp;
479 rsp = cmnd_alloc(parent->conn, parent);
481 spin_lock_bh(&parent->rsp_cmd_lock);
482 TRACE_DBG("Adding rsp %p to parent %p", rsp, parent);
483 list_add_tail(&rsp->rsp_cmd_list_entry, &parent->rsp_cmd_list);
484 spin_unlock_bh(&parent->rsp_cmd_lock);
489 static inline struct iscsi_cmnd *get_rsp_cmnd(struct iscsi_cmnd *req)
491 struct iscsi_cmnd *res = NULL;
493 /* Currently this lock isn't needed, but just in case.. */
494 spin_lock_bh(&req->rsp_cmd_lock);
495 if (!list_empty(&req->rsp_cmd_list)) {
496 res = list_entry(req->rsp_cmd_list.prev, struct iscsi_cmnd,
499 spin_unlock_bh(&req->rsp_cmd_lock);
504 static void iscsi_cmnds_init_write(struct list_head *send, int flags)
506 struct iscsi_cmnd *rsp = list_entry(send->next, struct iscsi_cmnd,
508 struct iscsi_conn *conn = rsp->conn;
509 struct list_head *pos, *next;
511 sBUG_ON(list_empty(send));
514 * If we don't remove hashed req cmd from the hash list here, before
515 * submitting it for transmittion, we will have a race, when for
516 * some reason cmd's release is delayed after transmittion and
517 * initiator sends cmd with the same ITT => this command will be
518 * erroneously rejected as a duplicate.
520 if ((flags & ISCSI_INIT_WRITE_REMOVE_HASH) &&
521 rsp->parent_req->hashed &&
522 (rsp->parent_req->r2t_length == 0) &&
523 (rsp->parent_req->outstanding_r2t == 0))
524 cmnd_remove_hash(rsp->parent_req);
526 if (!(conn->ddigest_type & DIGEST_NONE)) {
527 list_for_each(pos, send) {
528 rsp = list_entry(pos, struct iscsi_cmnd,
531 if (rsp->pdu.datasize != 0) {
532 TRACE_DBG("Doing data digest (%p:%x)", rsp,
539 spin_lock_bh(&conn->write_list_lock);
540 list_for_each_safe(pos, next, send) {
541 rsp = list_entry(pos, struct iscsi_cmnd, write_list_entry);
543 TRACE_DBG("%p:%x", rsp, cmnd_opcode(rsp));
545 sBUG_ON(conn != rsp->conn);
547 list_del(&rsp->write_list_entry);
548 cmd_add_on_write_list(conn, rsp);
550 spin_unlock_bh(&conn->write_list_lock);
552 if (flags & ISCSI_INIT_WRITE_WAKE)
553 iscsi_make_conn_wr_active(conn);
558 static void iscsi_cmnd_init_write(struct iscsi_cmnd *rsp, int flags)
562 if (unlikely(rsp->on_write_list)) {
563 PRINT_CRIT_ERROR("cmd already on write list (%x %x %x %x %u "
564 "%u %u %u %u %u %u %d %d",
565 cmnd_itt(rsp), cmnd_ttt(rsp), cmnd_opcode(rsp),
566 cmnd_scsicode(rsp), rsp->r2t_sn,
567 rsp->r2t_length, rsp->is_unsolicited_data,
568 rsp->target_task_tag, rsp->outstanding_r2t,
569 rsp->hdigest, rsp->ddigest,
570 list_empty(&rsp->rsp_cmd_list), rsp->hashed);
573 list_add(&rsp->write_list_entry, &head);
574 iscsi_cmnds_init_write(&head, flags);
578 static void iscsi_set_datasize(struct iscsi_cmnd *cmnd, u32 offset, u32 size)
580 cmnd->pdu.datasize = size;
583 u32 last_off = offset + size;
584 int idx = last_off >> PAGE_SHIFT;
585 u8 *p = (u8 *)page_address(sg_page(&cmnd->sg[idx])) +
586 (last_off & ~PAGE_MASK);
587 int i = 4 - (size & 3);
594 static void send_data_rsp(struct iscsi_cmnd *req, u8 status, int send_status)
596 struct iscsi_cmnd *rsp;
597 struct iscsi_scsi_cmd_hdr *req_hdr = cmnd_hdr(req);
598 struct iscsi_data_in_hdr *rsp_hdr;
599 u32 pdusize, expsize, scsisize, size, offset, sn;
602 TRACE_DBG("req %p", req);
604 pdusize = req->conn->session->sess_param.max_xmit_data_length;
605 expsize = cmnd_read_size(req);
606 size = min(expsize, (u32)req->bufflen);
611 rsp = iscsi_cmnd_create_rsp_cmnd(req);
612 TRACE_DBG("rsp %p", rsp);
614 rsp->sg_cnt = req->sg_cnt;
615 rsp->bufflen = req->bufflen;
616 rsp_hdr = (struct iscsi_data_in_hdr *)&rsp->pdu.bhs;
618 rsp_hdr->opcode = ISCSI_OP_SCSI_DATA_IN;
619 rsp_hdr->itt = req_hdr->itt;
620 rsp_hdr->ttt = cpu_to_be32(ISCSI_RESERVED_TAG);
621 rsp_hdr->buffer_offset = cpu_to_be32(offset);
622 rsp_hdr->data_sn = cpu_to_be32(sn);
624 if (size <= pdusize) {
625 TRACE_DBG("offset %d, size %d", offset, size);
626 iscsi_set_datasize(rsp, offset, size);
628 TRACE_DBG("status %x", status);
630 ISCSI_FLG_FINAL | ISCSI_FLG_STATUS;
631 rsp_hdr->cmd_status = status;
633 scsisize = req->bufflen;
634 if (scsisize < expsize) {
635 rsp_hdr->flags |= ISCSI_FLG_RESIDUAL_UNDERFLOW;
636 size = expsize - scsisize;
637 } else if (scsisize > expsize) {
638 rsp_hdr->flags |= ISCSI_FLG_RESIDUAL_OVERFLOW;
639 size = scsisize - expsize;
642 rsp_hdr->residual_count = cpu_to_be32(size);
643 list_add_tail(&rsp->write_list_entry, &send);
647 TRACE_DBG("pdusize %d, offset %d, size %d", pdusize, offset,
650 iscsi_set_datasize(rsp, offset, pdusize);
656 list_add_tail(&rsp->write_list_entry, &send);
658 iscsi_cmnds_init_write(&send, ISCSI_INIT_WRITE_REMOVE_HASH);
662 static struct iscsi_cmnd *create_status_rsp(struct iscsi_cmnd *req, int status,
663 const u8 *sense_buf, int sense_len)
665 struct iscsi_cmnd *rsp;
666 struct iscsi_scsi_rsp_hdr *rsp_hdr;
667 struct iscsi_sense_data *sense;
668 struct scatterlist *sg;
670 rsp = iscsi_cmnd_create_rsp_cmnd(req);
671 TRACE_DBG("%p", rsp);
673 rsp_hdr = (struct iscsi_scsi_rsp_hdr *)&rsp->pdu.bhs;
674 rsp_hdr->opcode = ISCSI_OP_SCSI_RSP;
675 rsp_hdr->flags = ISCSI_FLG_FINAL;
676 rsp_hdr->response = ISCSI_RESPONSE_COMMAND_COMPLETED;
677 rsp_hdr->cmd_status = status;
678 rsp_hdr->itt = cmnd_hdr(req)->itt;
680 if (SCST_SENSE_VALID(sense_buf)) {
681 TRACE_DBG("%s", "SENSE VALID");
682 /* ToDo: __GFP_NOFAIL ?? */
683 sg = rsp->sg = scst_alloc(PAGE_SIZE, GFP_KERNEL|__GFP_NOFAIL,
689 sense = (struct iscsi_sense_data *)page_address(sg_page(&sg[0]));
690 sense->length = cpu_to_be16(sense_len);
691 memcpy(sense->data, sense_buf, sense_len);
692 rsp->pdu.datasize = sizeof(struct iscsi_sense_data) + sense_len;
693 rsp->bufflen = (rsp->pdu.datasize + 3) & -4;
694 if (rsp->bufflen - rsp->pdu.datasize) {
695 unsigned int i = rsp->pdu.datasize;
696 u8 *p = (u8 *)sense + i;
698 while (i < rsp->bufflen) {
704 rsp->pdu.datasize = 0;
711 static struct iscsi_cmnd *create_sense_rsp(struct iscsi_cmnd *req,
712 u8 sense_key, u8 asc, u8 ascq)
715 memset(sense, 0, sizeof(sense));
717 sense[2] = sense_key;
718 sense[7] = 6; /* Additional sense length */
721 return create_status_rsp(req, SAM_STAT_CHECK_CONDITION, sense,
725 static void iscsi_cmnd_reject(struct iscsi_cmnd *req, int reason)
727 struct iscsi_cmnd *rsp;
728 struct iscsi_reject_hdr *rsp_hdr;
729 struct scatterlist *sg;
732 TRACE_MGMT_DBG("Reject: req %p, reason %x", req, reason);
734 sBUG_ON(req->rejected);
736 req->reject_reason = ISCSI_REJECT_CMD;
738 rsp = iscsi_cmnd_create_rsp_cmnd(req);
739 rsp_hdr = (struct iscsi_reject_hdr *)&rsp->pdu.bhs;
741 rsp_hdr->opcode = ISCSI_OP_REJECT;
742 rsp_hdr->ffffffff = ISCSI_RESERVED_TAG;
743 rsp_hdr->reason = reason;
745 /* ToDo: __GFP_NOFAIL ?? */
746 sg = rsp->sg = scst_alloc(PAGE_SIZE, GFP_KERNEL|__GFP_NOFAIL,
752 addr = page_address(sg_page(&sg[0]));
754 memcpy(addr, &req->pdu.bhs, sizeof(struct iscsi_hdr));
755 rsp->bufflen = rsp->pdu.datasize = sizeof(struct iscsi_hdr);
757 iscsi_cmnd_init_write(rsp, ISCSI_INIT_WRITE_REMOVE_HASH |
758 ISCSI_INIT_WRITE_WAKE);
760 cmnd_prepare_get_rejected_cmd_data(req);
763 static inline int iscsi_get_allowed_cmds(struct iscsi_session *sess)
765 int res = max(-1, (int)sess->max_queued_cmnds -
766 atomic_read(&sess->active_cmds)-1);
767 TRACE_DBG("allowed cmds %d (sess %p, active_cmds %d)", res,
768 sess, atomic_read(&sess->active_cmds));
772 static u32 cmnd_set_sn(struct iscsi_cmnd *cmnd, int set_stat_sn)
774 struct iscsi_conn *conn = cmnd->conn;
775 struct iscsi_session *sess = conn->session;
778 spin_lock(&sess->sn_lock);
781 cmnd->pdu.bhs.sn = cpu_to_be32(conn->stat_sn++);
782 cmnd->pdu.bhs.exp_sn = cpu_to_be32(sess->exp_cmd_sn);
783 cmnd->pdu.bhs.max_sn = cpu_to_be32(sess->exp_cmd_sn +
784 iscsi_get_allowed_cmds(sess));
786 res = cpu_to_be32(conn->stat_sn);
788 spin_unlock(&sess->sn_lock);
792 /* Called under sn_lock */
793 static void __update_stat_sn(struct iscsi_cmnd *cmnd)
795 struct iscsi_conn *conn = cmnd->conn;
798 cmnd->pdu.bhs.exp_sn = exp_stat_sn = be32_to_cpu(cmnd->pdu.bhs.exp_sn);
799 TRACE_DBG("%x,%x", cmnd_opcode(cmnd), exp_stat_sn);
800 if ((int)(exp_stat_sn - conn->exp_stat_sn) > 0 &&
801 (int)(exp_stat_sn - conn->stat_sn) <= 0) {
802 /* free pdu resources */
803 cmnd->conn->exp_stat_sn = exp_stat_sn;
807 static inline void update_stat_sn(struct iscsi_cmnd *cmnd)
809 spin_lock(&cmnd->conn->session->sn_lock);
810 __update_stat_sn(cmnd);
811 spin_unlock(&cmnd->conn->session->sn_lock);
814 /* Called under sn_lock */
815 static int check_cmd_sn(struct iscsi_cmnd *cmnd)
817 struct iscsi_session *session = cmnd->conn->session;
820 cmnd->pdu.bhs.sn = cmd_sn = be32_to_cpu(cmnd->pdu.bhs.sn);
821 TRACE_DBG("%d(%d)", cmd_sn, session->exp_cmd_sn);
822 if (likely((s32)(cmd_sn - session->exp_cmd_sn) >= 0))
824 PRINT_ERROR("sequence error (%x,%x)", cmd_sn, session->exp_cmd_sn);
825 return -ISCSI_REASON_PROTOCOL_ERROR;
828 static inline struct iscsi_cmnd *__cmnd_find_hash(
829 struct iscsi_session *session, u32 itt, u32 ttt)
831 struct list_head *head;
832 struct iscsi_cmnd *cmnd;
834 head = &session->cmnd_hash[cmnd_hashfn(itt)];
836 list_for_each_entry(cmnd, head, hash_list_entry) {
837 if (cmnd->pdu.bhs.itt == itt) {
838 if (ttt != ISCSI_RESERVED_TAG &&
839 ttt != cmnd->target_task_tag)
847 static struct iscsi_cmnd *cmnd_find_hash(struct iscsi_session *session,
850 struct iscsi_cmnd *cmnd;
852 spin_lock(&session->cmnd_hash_lock);
853 cmnd = __cmnd_find_hash(session, itt, ttt);
854 spin_unlock(&session->cmnd_hash_lock);
859 static struct iscsi_cmnd *cmnd_find_hash_get(struct iscsi_session *session,
862 struct iscsi_cmnd *cmnd;
864 spin_lock(&session->cmnd_hash_lock);
865 cmnd = __cmnd_find_hash(session, itt, ttt);
867 if (unlikely(cmnd_get_check(cmnd)))
870 spin_unlock(&session->cmnd_hash_lock);
875 static int cmnd_insert_hash(struct iscsi_cmnd *cmnd)
877 struct iscsi_session *session = cmnd->conn->session;
878 struct iscsi_cmnd *tmp;
879 struct list_head *head;
881 u32 itt = cmnd->pdu.bhs.itt;
883 TRACE_DBG("%p:%x", cmnd, itt);
884 if (unlikely(itt == ISCSI_RESERVED_TAG)) {
885 PRINT_ERROR("%s", "ITT is RESERVED_TAG");
886 PRINT_BUFFER("Incorrect BHS", &cmnd->pdu.bhs,
887 sizeof(cmnd->pdu.bhs));
888 err = -ISCSI_REASON_PROTOCOL_ERROR;
892 spin_lock(&session->cmnd_hash_lock);
894 head = &session->cmnd_hash[cmnd_hashfn(cmnd->pdu.bhs.itt)];
896 tmp = __cmnd_find_hash(session, itt, ISCSI_RESERVED_TAG);
898 list_add_tail(&cmnd->hash_list_entry, head);
901 PRINT_ERROR("Task %x in progress, cmnd %p", itt, cmnd);
902 err = -ISCSI_REASON_TASK_IN_PROGRESS;
905 spin_unlock(&session->cmnd_hash_lock);
908 spin_lock(&session->sn_lock);
909 __update_stat_sn(cmnd);
910 err = check_cmd_sn(cmnd);
911 spin_unlock(&session->sn_lock);
918 static void cmnd_remove_hash(struct iscsi_cmnd *cmnd)
920 struct iscsi_session *session = cmnd->conn->session;
921 struct iscsi_cmnd *tmp;
923 spin_lock(&session->cmnd_hash_lock);
925 tmp = __cmnd_find_hash(session, cmnd->pdu.bhs.itt, ISCSI_RESERVED_TAG);
927 if (likely(tmp && tmp == cmnd)) {
928 list_del(&cmnd->hash_list_entry);
931 PRINT_ERROR("%p:%x not found", cmnd, cmnd_itt(cmnd));
934 spin_unlock(&session->cmnd_hash_lock);
937 static void cmnd_prepare_get_rejected_cmd_data(struct iscsi_cmnd *cmnd)
939 struct iscsi_conn *conn = cmnd->conn;
940 struct scatterlist *sg = cmnd->sg;
945 TRACE_MGMT_DBG("Skipping (%p, %x %x %x %u, %p, scst state %d)", cmnd,
946 cmnd_itt(cmnd), cmnd_opcode(cmnd), cmnd_hdr(cmnd)->scb[0],
947 cmnd->pdu.datasize, cmnd->scst_cmd, cmnd->scst_state);
949 iscsi_extracheck_is_rd_thread(conn);
951 size = cmnd->pdu.datasize;
957 * There are no problems with the safety from concurrent
958 * accesses to dummy_page in dummy_sg, since data only
959 * will be read and then discarded.
961 sg = cmnd->sg = &dummy_sg;
962 cmnd->bufflen = PAGE_SIZE;
966 addr = (char __force __user *)(page_address(sg_page(&sg[0])));
967 sBUG_ON(addr == NULL);
968 size = (size + 3) & -4;
969 conn->read_size = size;
970 for (i = 0; size > PAGE_SIZE; i++, size -= cmnd->bufflen) {
971 sBUG_ON(i >= ISCSI_CONN_IOV_MAX);
972 conn->read_iov[i].iov_base = addr;
973 conn->read_iov[i].iov_len = cmnd->bufflen;
975 conn->read_iov[i].iov_base = addr;
976 conn->read_iov[i].iov_len = size;
977 conn->read_msg.msg_iov = conn->read_iov;
978 conn->read_msg.msg_iovlen = ++i;
983 static void cmnd_reject_scsi_cmd(struct iscsi_cmnd *req)
985 struct iscsi_cmnd *rsp;
986 struct iscsi_scsi_rsp_hdr *rsp_hdr;
989 TRACE_DBG("%p", req);
991 sBUG_ON(req->rejected);
993 req->reject_reason = ISCSI_REJECT_SCSI_CMD;
995 rsp = get_rsp_cmnd(req);
997 /* That can be true for aborted commands */
1001 rsp_hdr = (struct iscsi_scsi_rsp_hdr *)&rsp->pdu.bhs;
1003 sBUG_ON(cmnd_opcode(rsp) != ISCSI_OP_SCSI_RSP);
1005 size = cmnd_write_size(req);
1007 rsp_hdr->flags |= ISCSI_FLG_RESIDUAL_UNDERFLOW;
1008 rsp_hdr->residual_count = cpu_to_be32(size);
1010 size = cmnd_read_size(req);
1012 if (cmnd_hdr(req)->flags & ISCSI_CMD_WRITE) {
1013 rsp_hdr->flags |= ISCSI_FLG_BIRESIDUAL_UNDERFLOW;
1014 rsp_hdr->bi_residual_count = cpu_to_be32(size);
1016 rsp_hdr->flags |= ISCSI_FLG_RESIDUAL_UNDERFLOW;
1017 rsp_hdr->residual_count = cpu_to_be32(size);
1021 iscsi_cmnd_init_write(rsp, ISCSI_INIT_WRITE_REMOVE_HASH |
1022 ISCSI_INIT_WRITE_WAKE);
1025 cmnd_prepare_get_rejected_cmd_data(req);
1029 static int cmnd_prepare_recv_pdu(struct iscsi_conn *conn,
1030 struct iscsi_cmnd *cmd, u32 offset, u32 size)
1032 struct scatterlist *sg = cmd->sg;
1033 unsigned int bufflen = cmd->bufflen;
1034 unsigned int idx, i;
1038 TRACE_DBG("%p %u,%u", cmd->sg, offset, size);
1040 iscsi_extracheck_is_rd_thread(conn);
1042 if (unlikely((offset >= bufflen) ||
1043 (offset + size > bufflen))) {
1044 PRINT_ERROR("Wrong ltn (%u %u %u)", offset, size, bufflen);
1045 mark_conn_closed(conn);
1050 offset += sg[0].offset;
1051 idx = offset >> PAGE_SHIFT;
1052 offset &= ~PAGE_MASK;
1054 conn->read_msg.msg_iov = conn->read_iov;
1055 conn->read_size = size = (size + 3) & -4;
1059 addr = (char __force __user *)(page_address(sg_page(&sg[idx])));
1060 sBUG_ON(addr == NULL);
1061 conn->read_iov[i].iov_base = addr + offset;
1062 if (offset + size <= PAGE_SIZE) {
1063 TRACE_DBG("idx=%d, offset=%u, size=%d, addr=%p",
1064 idx, offset, size, addr);
1065 conn->read_iov[i].iov_len = size;
1066 conn->read_msg.msg_iovlen = ++i;
1069 conn->read_iov[i].iov_len = PAGE_SIZE - offset;
1070 TRACE_DBG("idx=%d, offset=%u, size=%d, iov_len=%zd, addr=%p",
1071 idx, offset, size, conn->read_iov[i].iov_len, addr);
1072 size -= conn->read_iov[i].iov_len;
1074 if (unlikely(++i >= ISCSI_CONN_IOV_MAX)) {
1075 PRINT_ERROR("Initiator %s violated negotiated "
1076 "parameters by sending too much data (size "
1077 "left %d)", conn->session->initiator_name,
1079 mark_conn_closed(conn);
1085 TRACE_DBG("msg_iov=%p, msg_iovlen=%zd",
1086 conn->read_msg.msg_iov, conn->read_msg.msg_iovlen);
1092 static void send_r2t(struct iscsi_cmnd *req)
1094 struct iscsi_session *session = req->conn->session;
1095 struct iscsi_cmnd *rsp;
1096 struct iscsi_r2t_hdr *rsp_hdr;
1100 if (unlikely(req->tm_aborted)) {
1101 TRACE_MGMT_DBG("req %p (scst_cmd %p) aborted on R2T "
1102 "(r2t_length %d, outstanding_r2t %d)", req,
1103 req->scst_cmd, req->r2t_length, req->outstanding_r2t);
1104 if (req->outstanding_r2t == 0)
1105 iscsi_fail_waiting_cmnd(req);
1110 * There is no race with data_out_start() and conn_abort(), since
1111 * all functions called from single read thread
1113 iscsi_extracheck_is_rd_thread(req->conn);
1115 burst = session->sess_param.max_burst_length;
1116 offset = be32_to_cpu(cmnd_hdr(req)->data_length) - req->r2t_length;
1119 rsp = iscsi_cmnd_create_rsp_cmnd(req);
1120 rsp->pdu.bhs.ttt = req->target_task_tag;
1121 rsp_hdr = (struct iscsi_r2t_hdr *)&rsp->pdu.bhs;
1122 rsp_hdr->opcode = ISCSI_OP_R2T;
1123 rsp_hdr->flags = ISCSI_FLG_FINAL;
1124 rsp_hdr->lun = cmnd_hdr(req)->lun;
1125 rsp_hdr->itt = cmnd_hdr(req)->itt;
1126 rsp_hdr->r2t_sn = cpu_to_be32(req->r2t_sn++);
1127 rsp_hdr->buffer_offset = cpu_to_be32(offset);
1128 if (req->r2t_length > burst) {
1129 rsp_hdr->data_length = cpu_to_be32(burst);
1130 req->r2t_length -= burst;
1133 rsp_hdr->data_length = cpu_to_be32(req->r2t_length);
1134 req->r2t_length = 0;
1137 TRACE_WRITE("%x %u %u %u %u", cmnd_itt(req),
1138 be32_to_cpu(rsp_hdr->data_length),
1139 be32_to_cpu(rsp_hdr->buffer_offset),
1140 be32_to_cpu(rsp_hdr->r2t_sn), req->outstanding_r2t);
1142 list_add_tail(&rsp->write_list_entry, &send);
1144 if (++req->outstanding_r2t >= session->sess_param.max_outstanding_r2t)
1147 } while (req->r2t_length != 0);
1149 iscsi_cmnds_init_write(&send, ISCSI_INIT_WRITE_WAKE);
1155 static int iscsi_pre_exec(struct scst_cmd *scst_cmd)
1157 int res = SCST_PREPROCESS_STATUS_SUCCESS;
1158 struct iscsi_cmnd *req = (struct iscsi_cmnd *)
1159 scst_cmd_get_tgt_priv(scst_cmd);
1160 struct iscsi_cmnd *c, *t;
1164 EXTRACHECKS_BUG_ON(scst_cmd_atomic(scst_cmd));
1166 if (scst_cmd_get_data_direction(scst_cmd) == SCST_DATA_READ) {
1167 EXTRACHECKS_BUG_ON(!list_empty(&req->rx_ddigest_cmd_list));
1171 /* If data digest isn't used this list will be empty */
1172 list_for_each_entry_safe(c, t, &req->rx_ddigest_cmd_list,
1173 rx_ddigest_cmd_list_entry) {
1174 TRACE_DBG("Checking digest of RX ddigest cmd %p", c);
1175 if (digest_rx_data(c) != 0) {
1176 scst_set_cmd_error(scst_cmd,
1177 SCST_LOAD_SENSE(iscsi_sense_crc_error));
1178 res = SCST_PREPROCESS_STATUS_ERROR_SENSE_SET;
1180 * The rest of rx_ddigest_cmd_list will be freed
1181 * in req_cmnd_release()
1185 cmd_del_from_rx_ddigest_list(c);
1190 TRACE_EXIT_RES(res);
1194 static int noop_out_start(struct iscsi_cmnd *cmnd)
1196 struct iscsi_conn *conn = cmnd->conn;
1200 TRACE_DBG("%p", cmnd);
1202 iscsi_extracheck_is_rd_thread(conn);
1204 if (unlikely(cmnd_ttt(cmnd) != cpu_to_be32(ISCSI_RESERVED_TAG))) {
1206 * We don't request a NOP-Out by sending a NOP-In.
1207 * See 10.18.2 in the draft 20.
1209 PRINT_ERROR("Initiator sent command with not RESERVED tag and "
1210 "TTT %x", cmnd_itt(cmnd));
1211 err = -ISCSI_REASON_PROTOCOL_ERROR;
1215 if (cmnd_itt(cmnd) == cpu_to_be32(ISCSI_RESERVED_TAG)) {
1216 if (unlikely(!(cmnd->pdu.bhs.opcode & ISCSI_OP_IMMEDIATE)))
1217 PRINT_ERROR("%s", "Initiator sent RESERVED tag for "
1218 "non-immediate command");
1219 spin_lock(&conn->session->sn_lock);
1220 __update_stat_sn(cmnd);
1221 err = check_cmd_sn(cmnd);
1222 spin_unlock(&conn->session->sn_lock);
1226 err = cmnd_insert_hash(cmnd);
1227 if (unlikely(err < 0)) {
1228 PRINT_ERROR("Can't insert in hash: ignore this "
1229 "request %x", cmnd_itt(cmnd));
1234 size = cmnd->pdu.datasize;
1237 size = (size + 3) & -4;
1238 conn->read_msg.msg_iov = conn->read_iov;
1239 if (cmnd->pdu.bhs.itt != cpu_to_be32(ISCSI_RESERVED_TAG)) {
1240 struct scatterlist *sg;
1242 cmnd->sg = sg = scst_alloc(size, GFP_KERNEL,
1245 TRACE(TRACE_OUT_OF_MEM, "Allocating buffer for"
1246 " %d NOP-Out payload failed", size);
1247 err = -ISCSI_REASON_OUT_OF_RESOURCES;
1251 /* We already checked it in check_segment_length() */
1252 sBUG_ON(cmnd->sg_cnt > (signed)ISCSI_CONN_IOV_MAX);
1255 cmnd->bufflen = size;
1257 for (i = 0; i < cmnd->sg_cnt; i++) {
1258 conn->read_iov[i].iov_base =
1259 (void __force __user *)(page_address(sg_page(&sg[i])));
1260 tmp = min_t(u32, size, PAGE_SIZE);
1261 conn->read_iov[i].iov_len = tmp;
1262 conn->read_size += tmp;
1268 * There are no problems with the safety from concurrent
1269 * accesses to dummy_page, since for ISCSI_RESERVED_TAG
1270 * the data only read and then discarded.
1272 for (i = 0; i < (signed)ISCSI_CONN_IOV_MAX; i++) {
1273 conn->read_iov[i].iov_base =
1274 (void __force __user *)(page_address(dummy_page));
1275 tmp = min_t(u32, size, PAGE_SIZE);
1276 conn->read_iov[i].iov_len = tmp;
1277 conn->read_size += tmp;
1281 /* We already checked size in check_segment_length() */
1285 conn->read_msg.msg_iovlen = i;
1286 TRACE_DBG("msg_iov=%p, msg_iovlen=%zd", conn->read_msg.msg_iov,
1287 conn->read_msg.msg_iovlen);
1294 static inline u32 get_next_ttt(struct iscsi_conn *conn)
1297 struct iscsi_session *session = conn->session;
1299 iscsi_extracheck_is_rd_thread(conn);
1301 if (session->next_ttt == ISCSI_RESERVED_TAG)
1302 session->next_ttt++;
1303 ttt = session->next_ttt++;
1305 return cpu_to_be32(ttt);
1308 static int scsi_cmnd_start(struct iscsi_cmnd *req)
1310 struct iscsi_conn *conn = req->conn;
1311 struct iscsi_session *session = conn->session;
1312 struct iscsi_scsi_cmd_hdr *req_hdr = cmnd_hdr(req);
1313 struct scst_cmd *scst_cmd;
1314 scst_data_direction dir;
1319 TRACE_DBG("scsi command: %02x", req_hdr->scb[0]);
1321 TRACE_DBG("Incrementing active_cmds (cmd %p, sess %p, "
1322 "new value %d)", req, session,
1323 atomic_read(&session->active_cmds)+1);
1324 atomic_inc(&session->active_cmds);
1325 req->dec_active_cmnds = 1;
1327 scst_cmd = scst_rx_cmd(session->scst_sess,
1328 (uint8_t *)&req_hdr->lun, sizeof(req_hdr->lun),
1329 req_hdr->scb, sizeof(req_hdr->scb), SCST_NON_ATOMIC);
1330 if (scst_cmd == NULL) {
1331 create_status_rsp(req, SAM_STAT_BUSY, NULL, 0);
1332 cmnd_reject_scsi_cmd(req);
1336 req->scst_cmd = scst_cmd;
1337 scst_cmd_set_tag(scst_cmd, req_hdr->itt);
1338 scst_cmd_set_tgt_priv(scst_cmd, req);
1340 if (req_hdr->flags & ISCSI_CMD_READ) {
1341 dir = SCST_DATA_READ;
1342 #if !defined(CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION)
1343 scst_cmd_set_tgt_need_alloc_data_buf(scst_cmd);
1345 } else if (req_hdr->flags & ISCSI_CMD_WRITE)
1346 dir = SCST_DATA_WRITE;
1348 dir = SCST_DATA_NONE;
1349 scst_cmd_set_expected(scst_cmd, dir,
1350 be32_to_cpu(req_hdr->data_length));
1352 switch (req_hdr->flags & ISCSI_CMD_ATTR_MASK) {
1353 case ISCSI_CMD_SIMPLE:
1354 scst_cmd->queue_type = SCST_CMD_QUEUE_SIMPLE;
1356 case ISCSI_CMD_HEAD_OF_QUEUE:
1357 scst_cmd->queue_type = SCST_CMD_QUEUE_HEAD_OF_QUEUE;
1359 case ISCSI_CMD_ORDERED:
1360 scst_cmd->queue_type = SCST_CMD_QUEUE_ORDERED;
1363 scst_cmd->queue_type = SCST_CMD_QUEUE_ACA;
1365 case ISCSI_CMD_UNTAGGED:
1366 scst_cmd->queue_type = SCST_CMD_QUEUE_UNTAGGED;
1369 PRINT_ERROR("Unknown task code %x, use ORDERED instead",
1370 req_hdr->flags & ISCSI_CMD_ATTR_MASK);
1371 scst_cmd->queue_type = SCST_CMD_QUEUE_ORDERED;
1375 /* cmd_sn is already in CPU format converted in check_cmd_sn() */
1376 scst_cmd_set_tgt_sn(scst_cmd, req_hdr->cmd_sn);
1378 TRACE_DBG("START Command (tag %d, queue_type %d)",
1379 req_hdr->itt, scst_cmd->queue_type);
1380 req->scst_state = ISCSI_CMD_STATE_RX_CMD;
1381 scst_cmd_init_stage1_done(scst_cmd, SCST_CONTEXT_DIRECT, 0);
1383 wait_event(req->scst_waitQ, req->scst_state != ISCSI_CMD_STATE_RX_CMD);
1385 if (unlikely(req->scst_state != ISCSI_CMD_STATE_AFTER_PREPROC)) {
1386 TRACE_DBG("req %p is in %x state", req, req->scst_state);
1387 if (req->scst_state == ISCSI_CMD_STATE_PROCESSED) {
1388 cmnd_reject_scsi_cmd(req);
1391 if (unlikely(req->tm_aborted)) {
1392 TRACE_MGMT_DBG("req %p (scst_cmd %p) aborted", req,
1394 cmnd_prepare_get_rejected_cmd_data(req);
1400 dir = scst_cmd_get_data_direction(scst_cmd);
1401 if (dir != SCST_DATA_WRITE) {
1402 if (unlikely(!(req_hdr->flags & ISCSI_CMD_FINAL) ||
1403 req->pdu.datasize)) {
1404 PRINT_ERROR("Unexpected unsolicited data (ITT %x "
1405 "CDB %x", cmnd_itt(req), req_hdr->scb[0]);
1406 create_sense_rsp(req, ABORTED_COMMAND, 0xc, 0xc);
1407 cmnd_reject_scsi_cmd(req);
1412 if (dir == SCST_DATA_WRITE) {
1413 req->is_unsolicited_data = !(req_hdr->flags & ISCSI_CMD_FINAL);
1414 req->r2t_length = be32_to_cpu(req_hdr->data_length) -
1416 if (req->r2t_length > 0)
1417 req->data_waiting = 1;
1419 req->target_task_tag = get_next_ttt(conn);
1420 req->sg = scst_cmd_get_sg(scst_cmd);
1421 req->sg_cnt = scst_cmd_get_sg_cnt(scst_cmd);
1422 req->bufflen = scst_cmd_get_bufflen(scst_cmd);
1423 if (unlikely(req->r2t_length > req->bufflen)) {
1424 PRINT_ERROR("req->r2t_length %d > req->bufflen %d",
1425 req->r2t_length, req->bufflen);
1426 req->r2t_length = req->bufflen;
1429 TRACE_DBG("req=%p, dir=%d, is_unsolicited_data=%d, "
1430 "r2t_length=%d, bufflen=%d", req, dir,
1431 req->is_unsolicited_data, req->r2t_length, req->bufflen);
1433 if (unlikely(!session->sess_param.immediate_data &&
1434 req->pdu.datasize)) {
1435 PRINT_ERROR("Initiator %s violated negotiated paremeters: "
1436 "forbidden immediate data sent (ITT %x, op %x)",
1437 session->initiator_name, cmnd_itt(req),
1443 if (unlikely(session->sess_param.initial_r2t &&
1444 !(req_hdr->flags & ISCSI_CMD_FINAL))) {
1445 PRINT_ERROR("Initiator %s violated negotiated paremeters: "
1446 "initial R2T is required (ITT %x, op %x)",
1447 session->initiator_name, cmnd_itt(req),
1453 if (req->pdu.datasize) {
1454 if (unlikely(dir != SCST_DATA_WRITE)) {
1455 PRINT_ERROR("pdu.datasize(%d) >0, but dir(%x) isn't "
1456 "WRITE", req->pdu.datasize, dir);
1457 create_sense_rsp(req, ABORTED_COMMAND, 0xc, 0xc);
1458 cmnd_reject_scsi_cmd(req);
1460 res = cmnd_prepare_recv_pdu(conn, req, 0,
1464 /* Aborted commands will be freed in cmnd_rx_end() */
1465 TRACE_EXIT_RES(res);
1469 static int data_out_start(struct iscsi_conn *conn, struct iscsi_cmnd *cmnd)
1471 struct iscsi_data_out_hdr *req_hdr =
1472 (struct iscsi_data_out_hdr *)&cmnd->pdu.bhs;
1473 struct iscsi_cmnd *orig_req = NULL;
1474 u32 offset = be32_to_cpu(req_hdr->buffer_offset);
1480 * There is no race with send_r2t() and conn_abort(), since
1481 * all functions called from single read thread
1483 iscsi_extracheck_is_rd_thread(cmnd->conn);
1485 update_stat_sn(cmnd);
1487 cmnd->cmd_req = orig_req = cmnd_find_hash(conn->session, req_hdr->itt,
1489 if (unlikely(orig_req == NULL)) {
1490 /* It might happen if req was aborted and then freed */
1491 TRACE(TRACE_MGMT_MINOR, "Unable to find scsi task %x %x",
1492 cmnd_itt(cmnd), cmnd_ttt(cmnd));
1496 if (orig_req->is_unsolicited_data) {
1497 if (unlikely(orig_req->r2t_length < cmnd->pdu.datasize)) {
1498 PRINT_ERROR("Data size (%d) > R2T length (%d)",
1499 cmnd->pdu.datasize, orig_req->r2t_length);
1500 mark_conn_closed(conn);
1504 orig_req->r2t_length -= cmnd->pdu.datasize;
1507 /* Check unsolicited burst data */
1508 if (unlikely((req_hdr->ttt == cpu_to_be32(ISCSI_RESERVED_TAG)) &&
1509 (orig_req->pdu.bhs.flags & ISCSI_FLG_FINAL))) {
1510 PRINT_ERROR("Unexpected data from %x %x",
1511 cmnd_itt(cmnd), cmnd_ttt(cmnd));
1512 mark_conn_closed(conn);
1517 TRACE_WRITE("%u %p %p %u %u", req_hdr->ttt, cmnd, orig_req,
1518 offset, cmnd->pdu.datasize);
1520 res = cmnd_prepare_recv_pdu(conn, orig_req, offset, cmnd->pdu.datasize);
1523 TRACE_EXIT_RES(res);
1527 sBUG_ON(cmnd->rejected);
1529 cmnd->reject_reason = ISCSI_REJECT_DATA;
1530 cmnd_prepare_get_rejected_cmd_data(cmnd);
1534 static void data_out_end(struct iscsi_cmnd *cmnd)
1536 struct iscsi_data_out_hdr *req_hdr =
1537 (struct iscsi_data_out_hdr *)&cmnd->pdu.bhs;
1538 struct iscsi_cmnd *req;
1540 sBUG_ON(cmnd == NULL);
1541 req = cmnd->cmd_req;
1542 sBUG_ON(req == NULL);
1544 TRACE_DBG("cmnd %p, req %p", cmnd, req);
1546 iscsi_extracheck_is_rd_thread(cmnd->conn);
1548 if (!(cmnd->conn->ddigest_type & DIGEST_NONE) &&
1549 !cmnd->ddigest_checked) {
1550 cmd_add_on_rx_ddigest_list(req, cmnd);
1554 if (req_hdr->ttt == cpu_to_be32(ISCSI_RESERVED_TAG)) {
1555 TRACE_DBG("ISCSI_RESERVED_TAG, FINAL %x",
1556 req_hdr->flags & ISCSI_FLG_FINAL);
1558 if (req_hdr->flags & ISCSI_FLG_FINAL) {
1559 req->is_unsolicited_data = 0;
1565 TRACE_DBG("FINAL %x, outstanding_r2t %d, r2t_length %d",
1566 req_hdr->flags & ISCSI_FLG_FINAL,
1567 req->outstanding_r2t, req->r2t_length);
1569 if (req_hdr->flags & ISCSI_FLG_FINAL) {
1570 if (unlikely(req->is_unsolicited_data)) {
1571 PRINT_ERROR("Unexpected unsolicited data "
1572 "(r2t_length %u, outstanding_r2t %d)",
1574 req->is_unsolicited_data);
1575 mark_conn_closed(req->conn);
1578 req->outstanding_r2t--;
1583 if (req->r2t_length != 0) {
1584 if (!req->is_unsolicited_data)
1587 iscsi_restart_waiting_cmnd(req);
1594 static void __cmnd_abort(struct iscsi_cmnd *cmnd)
1597 * Here, if cmnd is data_waiting, we should iscsi_fail_waiting_cmnd()
1598 * it. But, since this function can be called from any thread, not only
1599 * from the read one, we at the moment can't do that, because of
1600 * absence of appropriate locking protection. But this isn't a stuff
1601 * for 1.0.0. So, currently a misbehaving initiator, not sending
1602 * data in R2T state for a sharing between targets device, for which
1603 * for some reason an aborting TM command, e.g. TARGET RESET, from
1604 * another initiator is issued, can block response for this TM command
1605 * virtually forever and by this make the issuing initiator eventually
1606 * put the device offline.
1608 * ToDo in the next version, possibly a simple connection mutex, taken
1609 * by the read thread before starting any processing and by this
1610 * function, should be sufficient.
1613 TRACE_MGMT_DBG("Aborting cmd %p, scst_cmd %p (scst state %x, "
1614 "ref_cnt %d, itt %x, sn %u, op %x, r2t_len %x, CDB op %x, "
1615 "size to write %u, is_unsolicited_data %d, "
1616 "outstanding_r2t %d, data_waiting %d, sess->exp_cmd_sn %u, "
1617 "conn %p, rd_task %p)", cmnd, cmnd->scst_cmd, cmnd->scst_state,
1618 atomic_read(&cmnd->ref_cnt), cmnd_itt(cmnd), cmnd->pdu.bhs.sn,
1619 cmnd_opcode(cmnd), cmnd->r2t_length, cmnd_scsicode(cmnd),
1620 cmnd_write_size(cmnd), cmnd->is_unsolicited_data,
1621 cmnd->outstanding_r2t, cmnd->data_waiting,
1622 cmnd->conn->session->exp_cmd_sn, cmnd->conn,
1623 cmnd->conn->rd_task);
1625 #if defined(CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION)
1626 TRACE_MGMT_DBG("net_ref_cnt %d", atomic_read(&cmnd->net_ref_cnt));
1629 cmnd->tm_aborted = 1;
1634 /* Must be called from the read thread */
1635 static int cmnd_abort(struct iscsi_cmnd *req)
1637 struct iscsi_session *session = req->conn->session;
1638 struct iscsi_task_mgt_hdr *req_hdr =
1639 (struct iscsi_task_mgt_hdr *)&req->pdu.bhs;
1640 struct iscsi_cmnd *cmnd;
1643 req_hdr->ref_cmd_sn = be32_to_cpu(req_hdr->ref_cmd_sn);
1645 if (after(req_hdr->ref_cmd_sn, req_hdr->cmd_sn)) {
1646 PRINT_ERROR("ABORT TASK: RefCmdSN(%u) > CmdSN(%u)",
1647 req_hdr->ref_cmd_sn, req_hdr->cmd_sn);
1648 err = ISCSI_RESPONSE_FUNCTION_REJECTED;
1652 cmnd = cmnd_find_hash_get(session, req_hdr->rtt, ISCSI_RESERVED_TAG);
1654 struct iscsi_conn *conn = cmnd->conn;
1655 struct iscsi_scsi_cmd_hdr *hdr = cmnd_hdr(cmnd);
1657 if (req_hdr->lun != hdr->lun) {
1658 PRINT_ERROR("ABORT TASK: LUN mismatch: req LUN "
1659 "%llx, cmd LUN %llx, rtt %u",
1660 (long long unsigned int)req_hdr->lun,
1661 (long long unsigned int)hdr->lun,
1663 err = ISCSI_RESPONSE_FUNCTION_REJECTED;
1667 if (cmnd->pdu.bhs.opcode & ISCSI_OP_IMMEDIATE) {
1668 if (req_hdr->ref_cmd_sn != req_hdr->cmd_sn) {
1669 PRINT_ERROR("ABORT TASK: RefCmdSN(%u) != TM "
1670 "cmd CmdSN(%u) for immediate command "
1671 "%p", req_hdr->ref_cmd_sn,
1672 req_hdr->cmd_sn, cmnd);
1673 err = ISCSI_RESPONSE_FUNCTION_REJECTED;
1677 if (req_hdr->ref_cmd_sn != hdr->cmd_sn) {
1678 PRINT_ERROR("ABORT TASK: RefCmdSN(%u) != "
1679 "CmdSN(%u) for command %p",
1680 req_hdr->ref_cmd_sn, req_hdr->cmd_sn,
1682 err = ISCSI_RESPONSE_FUNCTION_REJECTED;
1687 if (before(req_hdr->cmd_sn, hdr->cmd_sn) ||
1688 (req_hdr->cmd_sn == hdr->cmd_sn)) {
1689 PRINT_ERROR("ABORT TASK: SN mismatch: req SN %x, "
1690 "cmd SN %x, rtt %u", req_hdr->cmd_sn,
1691 hdr->cmd_sn, req_hdr->rtt);
1692 err = ISCSI_RESPONSE_FUNCTION_REJECTED;
1696 spin_lock_bh(&conn->cmd_list_lock);
1698 spin_unlock_bh(&conn->cmd_list_lock);
1703 TRACE_MGMT_DBG("cmd RTT %x not found", req_hdr->rtt);
1704 err = ISCSI_RESPONSE_UNKNOWN_TASK;
1715 /* Must be called from the read thread */
1716 static int target_abort(struct iscsi_cmnd *req, int all)
1718 struct iscsi_target *target = req->conn->session->target;
1719 struct iscsi_task_mgt_hdr *req_hdr =
1720 (struct iscsi_task_mgt_hdr *)&req->pdu.bhs;
1721 struct iscsi_session *session;
1722 struct iscsi_conn *conn;
1723 struct iscsi_cmnd *cmnd;
1725 mutex_lock(&target->target_mutex);
1727 list_for_each_entry(session, &target->session_list,
1728 session_list_entry) {
1729 list_for_each_entry(conn, &session->conn_list,
1731 spin_lock_bh(&conn->cmd_list_lock);
1732 list_for_each_entry(cmnd, &conn->cmd_list,
1738 else if (req_hdr->lun == cmnd_hdr(cmnd)->lun)
1741 spin_unlock_bh(&conn->cmd_list_lock);
1745 mutex_unlock(&target->target_mutex);
1749 /* Must be called from the read thread */
1750 static void task_set_abort(struct iscsi_cmnd *req)
1752 struct iscsi_session *session = req->conn->session;
1753 struct iscsi_task_mgt_hdr *req_hdr =
1754 (struct iscsi_task_mgt_hdr *)&req->pdu.bhs;
1755 struct iscsi_target *target = session->target;
1756 struct iscsi_conn *conn;
1757 struct iscsi_cmnd *cmnd;
1759 mutex_lock(&target->target_mutex);
1761 list_for_each_entry(conn, &session->conn_list, conn_list_entry) {
1762 spin_lock_bh(&conn->cmd_list_lock);
1763 list_for_each_entry(cmnd, &conn->cmd_list, cmd_list_entry) {
1764 struct iscsi_scsi_cmd_hdr *hdr = cmnd_hdr(cmnd);
1767 if (req_hdr->lun != hdr->lun)
1769 if (before(req_hdr->cmd_sn, hdr->cmd_sn) ||
1770 req_hdr->cmd_sn == hdr->cmd_sn)
1774 spin_unlock_bh(&conn->cmd_list_lock);
1777 mutex_unlock(&target->target_mutex);
1781 /* Must be called from the read thread */
1782 void conn_abort(struct iscsi_conn *conn)
1784 struct iscsi_cmnd *cmnd;
1786 TRACE_MGMT_DBG("Aborting conn %p", conn);
1788 iscsi_extracheck_is_rd_thread(conn);
1790 spin_lock_bh(&conn->cmd_list_lock);
1792 list_for_each_entry(cmnd, &conn->cmd_list, cmd_list_entry) {
1794 if (cmnd->data_waiting) {
1795 if (!cmnd_get_check(cmnd)) {
1796 spin_unlock_bh(&conn->cmd_list_lock);
1798 /* ToDo: this is racy for MC/S */
1799 TRACE_MGMT_DBG("Restarting data waiting cmd "
1801 iscsi_fail_waiting_cmnd(cmnd);
1806 * We are in the read thread, so we may not
1807 * worry that after cmnd release conn gets
1810 spin_lock_bh(&conn->cmd_list_lock);
1815 spin_unlock_bh(&conn->cmd_list_lock);
1820 static void execute_task_management(struct iscsi_cmnd *req)
1822 struct iscsi_conn *conn = req->conn;
1823 struct iscsi_session *sess = conn->session;
1824 struct iscsi_task_mgt_hdr *req_hdr =
1825 (struct iscsi_task_mgt_hdr *)&req->pdu.bhs;
1826 int rc, status, function = req_hdr->function & ISCSI_FUNCTION_MASK;
1827 struct scst_rx_mgmt_params params;
1829 TRACE((function == ISCSI_FUNCTION_ABORT_TASK) ?
1830 TRACE_MGMT_MINOR : TRACE_MGMT,
1831 "TM fn %d", function);
1833 TRACE_MGMT_DBG("TM req %p, itt %x, rtt %x, sn %u, con %p", req,
1834 cmnd_itt(req), req_hdr->rtt, req_hdr->cmd_sn, conn);
1836 iscsi_extracheck_is_rd_thread(conn);
1838 spin_lock(&sess->sn_lock);
1840 sess->tm_sn = req_hdr->cmd_sn;
1841 if (sess->tm_rsp != NULL) {
1842 struct iscsi_cmnd *tm_rsp = sess->tm_rsp;
1844 TRACE(TRACE_MGMT_MINOR, "Dropping delayed TM rsp %p", tm_rsp);
1846 sess->tm_rsp = NULL;
1849 spin_unlock(&sess->sn_lock);
1851 sBUG_ON(sess->tm_active < 0);
1853 rsp_cmnd_release(tm_rsp);
1855 spin_unlock(&sess->sn_lock);
1857 memset(¶ms, 0, sizeof(params));
1858 params.atomic = SCST_NON_ATOMIC;
1859 params.tgt_priv = req;
1861 if ((function != ISCSI_FUNCTION_ABORT_TASK) &&
1862 (req_hdr->rtt != ISCSI_RESERVED_TAG)) {
1863 PRINT_ERROR("Invalid RTT %x (TM fn %x)", req_hdr->rtt,
1866 status = ISCSI_RESPONSE_FUNCTION_REJECTED;
1870 /* cmd_sn is already in CPU format converted in check_cmd_sn() */
1873 case ISCSI_FUNCTION_ABORT_TASK:
1875 status = cmnd_abort(req);
1877 params.fn = SCST_ABORT_TASK;
1878 params.tag = req_hdr->rtt;
1880 params.lun = (uint8_t *)&req_hdr->lun;
1881 params.lun_len = sizeof(req_hdr->lun);
1883 params.cmd_sn = req_hdr->cmd_sn;
1884 params.cmd_sn_set = 1;
1885 rc = scst_rx_mgmt_fn(conn->session->scst_sess,
1887 status = ISCSI_RESPONSE_FUNCTION_REJECTED;
1890 case ISCSI_FUNCTION_ABORT_TASK_SET:
1891 task_set_abort(req);
1892 params.fn = SCST_ABORT_TASK_SET;
1893 params.lun = (uint8_t *)&req_hdr->lun;
1894 params.lun_len = sizeof(req_hdr->lun);
1896 params.cmd_sn = req_hdr->cmd_sn;
1897 params.cmd_sn_set = 1;
1898 rc = scst_rx_mgmt_fn(conn->session->scst_sess,
1900 status = ISCSI_RESPONSE_FUNCTION_REJECTED;
1902 case ISCSI_FUNCTION_CLEAR_TASK_SET:
1903 task_set_abort(req);
1904 params.fn = SCST_CLEAR_TASK_SET;
1905 params.lun = (uint8_t *)&req_hdr->lun;
1906 params.lun_len = sizeof(req_hdr->lun);
1908 params.cmd_sn = req_hdr->cmd_sn;
1909 params.cmd_sn_set = 1;
1910 rc = scst_rx_mgmt_fn(conn->session->scst_sess,
1912 status = ISCSI_RESPONSE_FUNCTION_REJECTED;
1914 case ISCSI_FUNCTION_CLEAR_ACA:
1915 params.fn = SCST_CLEAR_ACA;
1916 params.lun = (uint8_t *)&req_hdr->lun;
1917 params.lun_len = sizeof(req_hdr->lun);
1919 params.cmd_sn = req_hdr->cmd_sn;
1920 params.cmd_sn_set = 1;
1921 rc = scst_rx_mgmt_fn(conn->session->scst_sess,
1923 status = ISCSI_RESPONSE_FUNCTION_REJECTED;
1925 case ISCSI_FUNCTION_TARGET_COLD_RESET:
1926 case ISCSI_FUNCTION_TARGET_WARM_RESET:
1927 target_abort(req, 1);
1928 params.fn = SCST_TARGET_RESET;
1929 params.cmd_sn = req_hdr->cmd_sn;
1930 params.cmd_sn_set = 1;
1931 rc = scst_rx_mgmt_fn(conn->session->scst_sess,
1933 status = ISCSI_RESPONSE_FUNCTION_REJECTED;
1935 case ISCSI_FUNCTION_LOGICAL_UNIT_RESET:
1936 target_abort(req, 0);
1937 params.fn = SCST_LUN_RESET;
1938 params.lun = (uint8_t *)&req_hdr->lun;
1939 params.lun_len = sizeof(req_hdr->lun);
1941 params.cmd_sn = req_hdr->cmd_sn;
1942 params.cmd_sn_set = 1;
1943 rc = scst_rx_mgmt_fn(conn->session->scst_sess,
1945 status = ISCSI_RESPONSE_FUNCTION_REJECTED;
1947 case ISCSI_FUNCTION_TASK_REASSIGN:
1949 status = ISCSI_RESPONSE_ALLEGIANCE_REASSIGNMENT_UNSUPPORTED;
1952 PRINT_ERROR("Unknown TM function %d", function);
1954 status = ISCSI_RESPONSE_FUNCTION_REJECTED;
1960 iscsi_send_task_mgmt_resp(req, status);
1965 static void noop_out_exec(struct iscsi_cmnd *req)
1967 struct iscsi_cmnd *rsp;
1968 struct iscsi_nop_in_hdr *rsp_hdr;
1970 TRACE_DBG("%p", req);
1972 if (cmnd_itt(req) != cpu_to_be32(ISCSI_RESERVED_TAG)) {
1973 rsp = iscsi_cmnd_create_rsp_cmnd(req);
1975 rsp_hdr = (struct iscsi_nop_in_hdr *)&rsp->pdu.bhs;
1976 rsp_hdr->opcode = ISCSI_OP_NOOP_IN;
1977 rsp_hdr->flags = ISCSI_FLG_FINAL;
1978 rsp_hdr->itt = req->pdu.bhs.itt;
1979 rsp_hdr->ttt = cpu_to_be32(ISCSI_RESERVED_TAG);
1981 if (req->pdu.datasize)
1982 sBUG_ON(req->sg == NULL);
1984 sBUG_ON(req->sg != NULL);
1988 rsp->sg_cnt = req->sg_cnt;
1989 rsp->bufflen = req->bufflen;
1992 sBUG_ON(get_pgcnt(req->pdu.datasize, 0) > ISCSI_CONN_IOV_MAX);
1994 rsp->pdu.datasize = req->pdu.datasize;
1995 iscsi_cmnd_init_write(rsp,
1996 ISCSI_INIT_WRITE_REMOVE_HASH | ISCSI_INIT_WRITE_WAKE);
1997 req_cmnd_release(req);
2002 static void logout_exec(struct iscsi_cmnd *req)
2004 struct iscsi_logout_req_hdr *req_hdr;
2005 struct iscsi_cmnd *rsp;
2006 struct iscsi_logout_rsp_hdr *rsp_hdr;
2008 PRINT_INFO("Logout received from initiator %s",
2009 req->conn->session->initiator_name);
2010 TRACE_DBG("%p", req);
2012 req_hdr = (struct iscsi_logout_req_hdr *)&req->pdu.bhs;
2013 rsp = iscsi_cmnd_create_rsp_cmnd(req);
2014 rsp_hdr = (struct iscsi_logout_rsp_hdr *)&rsp->pdu.bhs;
2015 rsp_hdr->opcode = ISCSI_OP_LOGOUT_RSP;
2016 rsp_hdr->flags = ISCSI_FLG_FINAL;
2017 rsp_hdr->itt = req_hdr->itt;
2018 rsp->should_close_conn = 1;
2019 iscsi_cmnd_init_write(rsp,
2020 ISCSI_INIT_WRITE_REMOVE_HASH | ISCSI_INIT_WRITE_WAKE);
2021 req_cmnd_release(req);
2024 static void iscsi_cmnd_exec(struct iscsi_cmnd *cmnd)
2028 TRACE_DBG("%p,%x,%u", cmnd, cmnd_opcode(cmnd), cmnd->pdu.bhs.sn);
2030 iscsi_extracheck_is_rd_thread(cmnd->conn);
2032 if (unlikely(cmnd->tm_aborted)) {
2033 TRACE_MGMT_DBG("cmnd %p (scst_cmd %p) aborted", cmnd,
2035 req_cmnd_release_force(cmnd, ISCSI_FORCE_RELEASE_WRITE);
2039 if (unlikely(cmnd->rejected))
2042 switch (cmnd_opcode(cmnd)) {
2043 case ISCSI_OP_SCSI_CMD:
2044 if (cmnd->r2t_length != 0) {
2045 if (!cmnd->is_unsolicited_data) {
2050 iscsi_restart_cmnd(cmnd);
2052 case ISCSI_OP_NOOP_OUT:
2053 noop_out_exec(cmnd);
2055 case ISCSI_OP_SCSI_TASK_MGT_MSG:
2056 execute_task_management(cmnd);
2058 case ISCSI_OP_LOGOUT_CMD:
2062 PRINT_ERROR("unexpected cmnd op %x", cmnd_opcode(cmnd));
2063 req_cmnd_release(cmnd);
2071 TRACE_MGMT_DBG("Rejected cmd %p (reason %d)", cmnd,
2072 cmnd->reject_reason);
2073 switch (cmnd->reject_reason) {
2075 PRINT_ERROR("Unexpected reject reason %d",
2076 cmnd->reject_reason);
2078 case ISCSI_REJECT_SCSI_CMD:
2079 req_cmnd_release(cmnd);
2085 static void __cmnd_send_pdu(struct iscsi_conn *conn, struct iscsi_cmnd *cmnd,
2086 u32 offset, u32 size)
2088 TRACE_DBG("%p %u,%u,%u", cmnd, offset, size, cmnd->bufflen);
2090 iscsi_extracheck_is_wr_thread(conn);
2092 sBUG_ON(offset > cmnd->bufflen);
2093 sBUG_ON(offset + size > cmnd->bufflen);
2095 conn->write_offset = offset;
2096 conn->write_size += size;
2099 static void cmnd_send_pdu(struct iscsi_conn *conn, struct iscsi_cmnd *cmnd)
2103 if (!cmnd->pdu.datasize)
2106 size = (cmnd->pdu.datasize + 3) & -4;
2107 sBUG_ON(cmnd->sg == NULL);
2108 sBUG_ON(cmnd->bufflen != size);
2109 __cmnd_send_pdu(conn, cmnd, 0, size);
2113 * Note: the code belows passes a kernel space pointer (&opt) to setsockopt()
2114 * while the declaration of setsockopt specifies that it expects a user space
2115 * pointer. This seems to work fine, and this approach is also used in some
2116 * other parts of the Linux kernel (see e.g. fs/ocfs2/cluster/tcp.c).
2118 static void set_cork(struct socket *sock, int on)
2125 sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK,
2126 (void __force __user *)&opt, sizeof(opt));
2130 void cmnd_tx_start(struct iscsi_cmnd *cmnd)
2132 struct iscsi_conn *conn = cmnd->conn;
2134 TRACE_DBG("%p:%p:%x", conn, cmnd, cmnd_opcode(cmnd));
2135 iscsi_cmnd_set_length(&cmnd->pdu);
2137 iscsi_extracheck_is_wr_thread(conn);
2139 set_cork(conn->sock, 1);
2141 conn->write_iop = conn->write_iov;
2142 conn->write_iop->iov_base = (void __force __user *)(&cmnd->pdu.bhs);
2143 conn->write_iop->iov_len = sizeof(cmnd->pdu.bhs);
2144 conn->write_iop_used = 1;
2145 conn->write_size = sizeof(cmnd->pdu.bhs);
2147 switch (cmnd_opcode(cmnd)) {
2148 case ISCSI_OP_NOOP_IN:
2149 cmnd_set_sn(cmnd, 1);
2150 cmnd_send_pdu(conn, cmnd);
2152 case ISCSI_OP_SCSI_RSP:
2153 cmnd_set_sn(cmnd, 1);
2154 cmnd_send_pdu(conn, cmnd);
2156 case ISCSI_OP_SCSI_TASK_MGT_RSP:
2157 cmnd_set_sn(cmnd, 1);
2159 case ISCSI_OP_TEXT_RSP:
2160 cmnd_set_sn(cmnd, 1);
2162 case ISCSI_OP_SCSI_DATA_IN:
2164 struct iscsi_data_in_hdr *rsp =
2165 (struct iscsi_data_in_hdr *)&cmnd->pdu.bhs;
2166 u32 offset = cpu_to_be32(rsp->buffer_offset);
2168 cmnd_set_sn(cmnd, (rsp->flags & ISCSI_FLG_FINAL) ? 1 : 0);
2169 __cmnd_send_pdu(conn, cmnd, offset, cmnd->pdu.datasize);
2172 case ISCSI_OP_LOGOUT_RSP:
2173 cmnd_set_sn(cmnd, 1);
2176 cmnd->pdu.bhs.sn = cmnd_set_sn(cmnd, 0);
2178 case ISCSI_OP_ASYNC_MSG:
2179 cmnd_set_sn(cmnd, 1);
2181 case ISCSI_OP_REJECT:
2182 cmnd_set_sn(cmnd, 1);
2183 cmnd_send_pdu(conn, cmnd);
2186 PRINT_ERROR("unexpected cmnd op %x", cmnd_opcode(cmnd));
2191 conn->write_size = (conn->write_size + 3) & -4;
2192 iscsi_dump_pdu(&cmnd->pdu);
2195 void cmnd_tx_end(struct iscsi_cmnd *cmnd)
2197 struct iscsi_conn *conn = cmnd->conn;
2199 TRACE_DBG("%p:%x (should_close_conn %d)", cmnd, cmnd_opcode(cmnd),
2200 cmnd->should_close_conn);
2202 switch (cmnd_opcode(cmnd)) {
2203 case ISCSI_OP_NOOP_IN:
2204 case ISCSI_OP_SCSI_RSP:
2205 case ISCSI_OP_SCSI_TASK_MGT_RSP:
2206 case ISCSI_OP_TEXT_RSP:
2208 case ISCSI_OP_ASYNC_MSG:
2209 case ISCSI_OP_REJECT:
2210 case ISCSI_OP_SCSI_DATA_IN:
2211 case ISCSI_OP_LOGOUT_RSP:
2214 PRINT_CRIT_ERROR("unexpected cmnd op %x", cmnd_opcode(cmnd));
2219 if (cmnd->should_close_conn) {
2220 PRINT_INFO("Closing connection at initiator %s request",
2221 conn->session->initiator_name);
2222 mark_conn_closed(conn);
2225 set_cork(cmnd->conn->sock, 0);
2229 * Push the command for execution. This functions reorders the commands.
2230 * Called from the read thread.
2232 static void iscsi_session_push_cmnd(struct iscsi_cmnd *cmnd)
2234 struct iscsi_session *session = cmnd->conn->session;
2235 struct list_head *entry;
2238 TRACE_DBG("%p:%x %u,%u",
2239 cmnd, cmnd_opcode(cmnd), cmnd->pdu.bhs.sn,
2240 session->exp_cmd_sn);
2242 iscsi_extracheck_is_rd_thread(cmnd->conn);
2244 sBUG_ON(cmnd->parent_req != NULL);
2246 if (cmnd->pdu.bhs.opcode & ISCSI_OP_IMMEDIATE) {
2247 TRACE_DBG("Immediate cmd %p (cmd_sn %u)", cmnd,
2249 iscsi_cmnd_exec(cmnd);
2253 spin_lock(&session->sn_lock);
2255 cmd_sn = cmnd->pdu.bhs.sn;
2256 if (cmd_sn == session->exp_cmd_sn) {
2258 session->exp_cmd_sn = ++cmd_sn;
2260 if (unlikely(session->tm_active > 0)) {
2261 if (before(cmd_sn, session->tm_sn)) {
2262 struct iscsi_conn *conn = cmnd->conn;
2264 spin_unlock(&session->sn_lock);
2266 spin_lock_bh(&conn->cmd_list_lock);
2268 spin_unlock_bh(&conn->cmd_list_lock);
2270 spin_lock(&session->sn_lock);
2272 iscsi_check_send_delayed_tm_resp(session);
2275 spin_unlock(&session->sn_lock);
2277 iscsi_cmnd_exec(cmnd);
2279 if (list_empty(&session->pending_list))
2281 cmnd = list_entry(session->pending_list.next,
2283 pending_list_entry);
2284 if (cmnd->pdu.bhs.sn != cmd_sn)
2287 list_del(&cmnd->pending_list_entry);
2290 TRACE_DBG("Processing pending cmd %p (cmd_sn %u)",
2293 spin_lock(&session->sn_lock);
2298 TRACE_DBG("Pending cmd %p (cmd_sn %u, exp_cmd_sn %u)",
2299 cmnd, cmd_sn, session->exp_cmd_sn);
2302 * iSCSI RFC 3720: "The target MUST silently ignore any
2303 * non-immediate command outside of [from ExpCmdSN to MaxCmdSN
2304 * inclusive] range". But we won't honor the MaxCmdSN
2305 * requirement, because, since we adjust MaxCmdSN from the
2306 * separate write thread, rarery it is possible that initiator
2307 * can legally send command with CmdSN>MaxSN. But it won't
2308 * hurt anything, in the worst case it will lead to
2309 * additional QUEUE FULL status.
2312 if (unlikely(before(cmd_sn, session->exp_cmd_sn))) {
2313 PRINT_ERROR("Unexpected cmd_sn (%u,%u)", cmd_sn,
2314 session->exp_cmd_sn);
2319 if (unlikely(after(cmd_sn, session->exp_cmd_sn +
2320 iscsi_get_allowed_cmds(session)))) {
2321 TRACE_MGMT_DBG("Too large cmd_sn %u (exp_cmd_sn %u, "
2322 "max_sn %u)", cmd_sn, session->exp_cmd_sn,
2323 iscsi_get_allowed_cmds(session));
2327 spin_unlock(&session->sn_lock);
2329 if (unlikely(drop)) {
2330 req_cmnd_release_force(cmnd,
2331 ISCSI_FORCE_RELEASE_WRITE);
2335 if (unlikely(cmnd->tm_aborted)) {
2336 struct iscsi_cmnd *tm_clone;
2338 TRACE_MGMT_DBG("Pending aborted cmnd %p, creating TM "
2339 "clone (scst cmd %p, state %d)", cmnd,
2340 cmnd->scst_cmd, cmnd->scst_state);
2342 tm_clone = cmnd_alloc(cmnd->conn, NULL);
2343 if (tm_clone != NULL) {
2344 tm_clone->tm_aborted = 1;
2345 tm_clone->pdu = cmnd->pdu;
2347 TRACE_MGMT_DBG("TM clone %p created",
2350 iscsi_cmnd_exec(cmnd);
2353 PRINT_ERROR("%s", "Unable to create TM clone");
2356 list_for_each(entry, &session->pending_list) {
2357 struct iscsi_cmnd *tmp =
2358 list_entry(entry, struct iscsi_cmnd,
2359 pending_list_entry);
2360 if (before(cmd_sn, tmp->pdu.bhs.sn))
2364 list_add_tail(&cmnd->pending_list_entry, entry);
2371 static int check_segment_length(struct iscsi_cmnd *cmnd)
2373 struct iscsi_conn *conn = cmnd->conn;
2374 struct iscsi_session *session = conn->session;
2376 if (unlikely(cmnd->pdu.datasize > session->sess_param.max_recv_data_length)) {
2377 PRINT_ERROR("Initiator %s violated negotiated parameters: "
2378 "data too long (ITT %x, datasize %u, "
2379 "max_recv_data_length %u", session->initiator_name,
2380 cmnd_itt(cmnd), cmnd->pdu.datasize,
2381 session->sess_param.max_recv_data_length);
2382 mark_conn_closed(conn);
2388 int cmnd_rx_start(struct iscsi_cmnd *cmnd)
2390 struct iscsi_conn *conn = cmnd->conn;
2393 iscsi_dump_pdu(&cmnd->pdu);
2395 res = check_segment_length(cmnd);
2399 switch (cmnd_opcode(cmnd)) {
2400 case ISCSI_OP_NOOP_OUT:
2401 rc = noop_out_start(cmnd);
2403 case ISCSI_OP_SCSI_CMD:
2404 rc = cmnd_insert_hash(cmnd);
2405 if (likely(rc == 0)) {
2406 res = scsi_cmnd_start(cmnd);
2407 if (unlikely(res != 0))
2411 case ISCSI_OP_SCSI_TASK_MGT_MSG:
2412 rc = cmnd_insert_hash(cmnd);
2414 case ISCSI_OP_SCSI_DATA_OUT:
2415 res = data_out_start(conn, cmnd);
2416 rc = 0; /* to avoid compiler warning */
2417 if (unlikely(res != 0))
2420 case ISCSI_OP_LOGOUT_CMD:
2421 rc = cmnd_insert_hash(cmnd);
2423 case ISCSI_OP_TEXT_CMD:
2424 case ISCSI_OP_SNACK_CMD:
2425 rc = -ISCSI_REASON_UNSUPPORTED_COMMAND;
2428 rc = -ISCSI_REASON_UNSUPPORTED_COMMAND;
2432 if (unlikely(rc < 0)) {
2433 struct iscsi_scsi_cmd_hdr *hdr = cmnd_hdr(cmnd);
2434 PRINT_ERROR("Error %d (iSCSI opcode %x, ITT %x, op %x)", rc,
2435 cmnd_opcode(cmnd), cmnd_itt(cmnd),
2436 (cmnd_opcode(cmnd) == ISCSI_OP_SCSI_CMD ?
2438 iscsi_cmnd_reject(cmnd, -rc);
2442 TRACE_EXIT_RES(res);
2446 void cmnd_rx_end(struct iscsi_cmnd *cmnd)
2450 TRACE_DBG("%p:%x", cmnd, cmnd_opcode(cmnd));
2452 if (unlikely(cmnd->rejected))
2456 switch (cmnd_opcode(cmnd)) {
2457 case ISCSI_OP_SCSI_CMD:
2458 case ISCSI_OP_NOOP_OUT:
2459 case ISCSI_OP_SCSI_TASK_MGT_MSG:
2460 case ISCSI_OP_LOGOUT_CMD:
2461 iscsi_session_push_cmnd(cmnd);
2463 case ISCSI_OP_SCSI_DATA_OUT:
2467 PRINT_ERROR("unexpected cmnd op %x", cmnd_opcode(cmnd));
2468 req_cmnd_release(cmnd);
2477 switch (cmnd->reject_reason) {
2479 PRINT_ERROR("Unexpected reject reason %d",
2480 cmnd->reject_reason);
2482 case ISCSI_REJECT_CMD:
2483 case ISCSI_REJECT_DATA:
2484 req_cmnd_release(cmnd);
2486 case ISCSI_REJECT_SCSI_CMD:
2492 #if !defined(CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION)
2493 static int iscsi_alloc_data_buf(struct scst_cmd *cmd)
2496 * sock->ops->sendpage() is async zero copy operation,
2497 * so we must be sure not to free and reuse
2498 * the command's buffer before the sending was completed
2499 * by the network layers. It is possible only if we
2500 * don't use SGV cache.
2502 EXTRACHECKS_BUG_ON(scst_cmd_get_data_direction(cmd) != SCST_DATA_READ);
2503 scst_cmd_set_no_sgv(cmd);
2508 static inline void iscsi_set_state_wake_up(struct iscsi_cmnd *req,
2512 * We use wait_event() to wait for the state change, but it checks its
2513 * condition without any protection, so without cmnd_get() it is
2514 * possible that req will die "immediately" after the state assignment
2515 * and wake_up() will operate on dead data. We use the ordered version
2516 * of cmnd_get(), because "get" must be done before the state
2519 cmnd_get_ordered(req);
2520 req->scst_state = new_state;
2521 wake_up(&req->scst_waitQ);
2526 static void iscsi_preprocessing_done(struct scst_cmd *scst_cmd)
2528 struct iscsi_cmnd *req = (struct iscsi_cmnd *)
2529 scst_cmd_get_tgt_priv(scst_cmd);
2531 TRACE_DBG("req %p", req);
2533 EXTRACHECKS_BUG_ON(req->scst_state != ISCSI_CMD_STATE_RX_CMD);
2535 iscsi_set_state_wake_up(req, ISCSI_CMD_STATE_AFTER_PREPROC);
2542 * IMPORTANT! Connection conn must be protected by additional conn_get()
2543 * upon entrance in this function, because otherwise it could be destroyed
2544 * inside as a result of iscsi_send(), which releases sent commands.
2546 static void iscsi_try_local_processing(struct iscsi_conn *conn)
2552 spin_lock_bh(&iscsi_wr_lock);
2553 switch (conn->wr_state) {
2554 case ISCSI_CONN_WR_STATE_IN_LIST:
2555 list_del(&conn->wr_list_entry);
2557 case ISCSI_CONN_WR_STATE_IDLE:
2558 #ifdef CONFIG_SCST_EXTRACHECKS
2559 conn->wr_task = current;
2561 conn->wr_state = ISCSI_CONN_WR_STATE_PROCESSING;
2562 conn->wr_space_ready = 0;
2569 spin_unlock_bh(&iscsi_wr_lock);
2574 if (test_write_ready(conn))
2575 rc = iscsi_send(conn);
2577 spin_lock_bh(&iscsi_wr_lock);
2578 #ifdef CONFIG_SCST_EXTRACHECKS
2579 conn->wr_task = NULL;
2581 if ((rc <= 0) || test_write_ready(conn)) {
2582 list_add_tail(&conn->wr_list_entry, &iscsi_wr_list);
2583 conn->wr_state = ISCSI_CONN_WR_STATE_IN_LIST;
2584 wake_up(&iscsi_wr_waitQ);
2586 conn->wr_state = ISCSI_CONN_WR_STATE_IDLE;
2587 spin_unlock_bh(&iscsi_wr_lock);
2594 static int iscsi_xmit_response(struct scst_cmd *scst_cmd)
2596 int is_send_status = scst_cmd_get_is_send_status(scst_cmd);
2597 struct iscsi_cmnd *req = (struct iscsi_cmnd *)
2598 scst_cmd_get_tgt_priv(scst_cmd);
2599 struct iscsi_conn *conn = req->conn;
2600 int status = scst_cmd_get_status(scst_cmd);
2601 u8 *sense = scst_cmd_get_sense_buffer(scst_cmd);
2602 int sense_len = scst_cmd_get_sense_buffer_len(scst_cmd);
2603 int old_state = req->scst_state;
2605 if (scst_cmd_atomic(scst_cmd))
2606 return SCST_TGT_RES_NEED_THREAD_CTX;
2608 scst_cmd_set_tgt_priv(scst_cmd, NULL);
2610 req->tm_aborted |= scst_cmd_aborted(scst_cmd) ? 1 : 0;
2611 if (unlikely(req->tm_aborted)) {
2612 TRACE_MGMT_DBG("req %p (scst_cmd %p) aborted", req,
2615 scst_set_delivery_status(req->scst_cmd,
2616 SCST_CMD_DELIVERY_ABORTED);
2618 if (old_state == ISCSI_CMD_STATE_RESTARTED) {
2619 req->scst_state = ISCSI_CMD_STATE_PROCESSED;
2620 req_cmnd_release_force(req, ISCSI_FORCE_RELEASE_WRITE);
2622 iscsi_set_state_wake_up(req,
2623 ISCSI_CMD_STATE_PROCESSED);
2628 if (unlikely(old_state != ISCSI_CMD_STATE_RESTARTED)) {
2629 TRACE_DBG("req %p on %d state", req, old_state);
2631 create_status_rsp(req, status, sense, sense_len);
2633 switch (old_state) {
2634 case ISCSI_CMD_STATE_RX_CMD:
2635 case ISCSI_CMD_STATE_AFTER_PREPROC:
2641 iscsi_set_state_wake_up(req, ISCSI_CMD_STATE_PROCESSED);
2645 req->scst_state = ISCSI_CMD_STATE_PROCESSED;
2647 req->bufflen = scst_cmd_get_resp_data_len(scst_cmd);
2648 req->sg = scst_cmd_get_sg(scst_cmd);
2649 req->sg_cnt = scst_cmd_get_sg_cnt(scst_cmd);
2651 TRACE_DBG("req %p, is_send_status=%x, req->bufflen=%d, req->sg=%p, "
2652 "req->sg_cnt %d", req, is_send_status, req->bufflen, req->sg,
2655 if (unlikely((req->bufflen != 0) && !is_send_status)) {
2656 PRINT_CRIT_ERROR("%s", "Sending DATA without STATUS is "
2658 scst_set_cmd_error(scst_cmd,
2659 SCST_LOAD_SENSE(scst_sense_hardw_error));
2663 if (req->bufflen != 0) {
2665 * Check above makes sure that is_send_status is set,
2666 * so status is valid here, but in future that could change.
2669 if (status != SAM_STAT_CHECK_CONDITION) {
2670 send_data_rsp(req, status, is_send_status);
2672 struct iscsi_cmnd *rsp;
2673 struct iscsi_scsi_rsp_hdr *rsp_hdr;
2675 send_data_rsp(req, 0, 0);
2676 if (is_send_status) {
2677 rsp = create_status_rsp(req, status, sense,
2680 (struct iscsi_scsi_rsp_hdr *)&rsp->pdu.bhs;
2681 resid = cmnd_read_size(req) - req->bufflen;
2684 ISCSI_FLG_RESIDUAL_UNDERFLOW;
2685 rsp_hdr->residual_count =
2687 } else if (resid < 0) {
2689 ISCSI_FLG_RESIDUAL_OVERFLOW;
2690 rsp_hdr->residual_count =
2691 cpu_to_be32(-resid);
2693 iscsi_cmnd_init_write(rsp,
2694 ISCSI_INIT_WRITE_REMOVE_HASH);
2697 } else if (is_send_status) {
2698 struct iscsi_cmnd *rsp;
2699 struct iscsi_scsi_rsp_hdr *rsp_hdr;
2701 rsp = create_status_rsp(req, status, sense, sense_len);
2702 rsp_hdr = (struct iscsi_scsi_rsp_hdr *) &rsp->pdu.bhs;
2703 resid = cmnd_read_size(req);
2705 rsp_hdr->flags |= ISCSI_FLG_RESIDUAL_UNDERFLOW;
2706 rsp_hdr->residual_count = cpu_to_be32(resid);
2708 iscsi_cmnd_init_write(rsp, ISCSI_INIT_WRITE_REMOVE_HASH);
2710 #ifdef CONFIG_SCST_EXTRACHECKS
2716 * "_ordered" here to protect from reorder, which can lead to
2717 * preliminary connection destroy in req_cmnd_release(). Just in
2718 * case, actually, because reordering shouldn't go so far, but who
2721 conn_get_ordered(conn);
2722 req_cmnd_release(req);
2723 iscsi_try_local_processing(conn);
2727 return SCST_TGT_RES_SUCCESS;
2730 /* Called under sn_lock */
2731 static bool iscsi_is_delay_tm_resp(struct iscsi_cmnd *rsp)
2734 struct iscsi_task_mgt_hdr *req_hdr =
2735 (struct iscsi_task_mgt_hdr *)&rsp->parent_req->pdu.bhs;
2736 int function = req_hdr->function & ISCSI_FUNCTION_MASK;
2737 struct iscsi_session *sess = rsp->conn->session;
2741 /* This should be checked for immediate TM commands as well */
2745 if (before(sess->exp_cmd_sn, req_hdr->cmd_sn))
2750 TRACE_EXIT_RES(res);
2754 /* Called under sn_lock, but might drop it inside, then reaquire */
2755 static void iscsi_check_send_delayed_tm_resp(struct iscsi_session *sess)
2757 struct iscsi_cmnd *tm_rsp = sess->tm_rsp;
2764 if (iscsi_is_delay_tm_resp(tm_rsp))
2767 TRACE(TRACE_MGMT_MINOR, "Sending delayed rsp %p", tm_rsp);
2769 sess->tm_rsp = NULL;
2772 spin_unlock(&sess->sn_lock);
2774 sBUG_ON(sess->tm_active < 0);
2776 iscsi_cmnd_init_write(tm_rsp,
2777 ISCSI_INIT_WRITE_REMOVE_HASH | ISCSI_INIT_WRITE_WAKE);
2779 spin_lock(&sess->sn_lock);
2786 static void iscsi_send_task_mgmt_resp(struct iscsi_cmnd *req, int status)
2788 struct iscsi_cmnd *rsp;
2789 struct iscsi_task_mgt_hdr *req_hdr =
2790 (struct iscsi_task_mgt_hdr *)&req->pdu.bhs;
2791 struct iscsi_task_rsp_hdr *rsp_hdr;
2792 struct iscsi_session *sess = req->conn->session;
2793 int fn = req_hdr->function & ISCSI_FUNCTION_MASK;
2797 TRACE_MGMT_DBG("TM req %p finished", req);
2798 TRACE((req_hdr->function == ISCSI_FUNCTION_ABORT_TASK) ?
2799 TRACE_MGMT_MINOR : TRACE_MGMT,
2800 "TM fn %d finished, status %d", fn, status);
2802 rsp = iscsi_cmnd_create_rsp_cmnd(req);
2803 rsp_hdr = (struct iscsi_task_rsp_hdr *)&rsp->pdu.bhs;
2805 rsp_hdr->opcode = ISCSI_OP_SCSI_TASK_MGT_RSP;
2806 rsp_hdr->flags = ISCSI_FLG_FINAL;
2807 rsp_hdr->itt = req_hdr->itt;
2808 rsp_hdr->response = status;
2810 if (fn == ISCSI_FUNCTION_TARGET_COLD_RESET)
2811 rsp->should_close_conn = 1;
2813 sBUG_ON(sess->tm_rsp != NULL);
2815 spin_lock(&sess->sn_lock);
2816 if (iscsi_is_delay_tm_resp(rsp)) {
2817 TRACE(TRACE_MGMT_MINOR, "Delaying TM fn %x response %p "
2818 "(req %p), because not all affected commands received "
2819 "(TM cmd sn %u, exp sn %u)",
2820 req_hdr->function & ISCSI_FUNCTION_MASK, rsp, req,
2821 req_hdr->cmd_sn, sess->exp_cmd_sn);
2823 spin_unlock(&sess->sn_lock);
2827 spin_unlock(&sess->sn_lock);
2829 sBUG_ON(sess->tm_active < 0);
2831 iscsi_cmnd_init_write(rsp,
2832 ISCSI_INIT_WRITE_REMOVE_HASH | ISCSI_INIT_WRITE_WAKE);
2835 req_cmnd_release(req);
2841 static inline int iscsi_get_mgmt_response(int status)
2844 case SCST_MGMT_STATUS_SUCCESS:
2845 return ISCSI_RESPONSE_FUNCTION_COMPLETE;
2847 case SCST_MGMT_STATUS_TASK_NOT_EXIST:
2848 return ISCSI_RESPONSE_UNKNOWN_TASK;
2850 case SCST_MGMT_STATUS_LUN_NOT_EXIST:
2851 return ISCSI_RESPONSE_UNKNOWN_LUN;
2853 case SCST_MGMT_STATUS_FN_NOT_SUPPORTED:
2854 return ISCSI_RESPONSE_FUNCTION_UNSUPPORTED;
2856 case SCST_MGMT_STATUS_REJECTED:
2857 case SCST_MGMT_STATUS_FAILED:
2859 return ISCSI_RESPONSE_FUNCTION_REJECTED;
2863 static void iscsi_task_mgmt_fn_done(struct scst_mgmt_cmd *scst_mcmd)
2865 struct iscsi_cmnd *req = (struct iscsi_cmnd *)
2866 scst_mgmt_cmd_get_tgt_priv(scst_mcmd);
2868 iscsi_get_mgmt_response(scst_mgmt_cmd_get_status(scst_mcmd));
2870 TRACE_MGMT_DBG("req %p, scst_mcmd %p, fn %d, scst status %d",
2871 req, scst_mcmd, scst_mgmt_cmd_get_fn(scst_mcmd),
2872 scst_mgmt_cmd_get_status(scst_mcmd));
2874 iscsi_send_task_mgmt_resp(req, status);
2876 scst_mgmt_cmd_set_tgt_priv(scst_mcmd, NULL);
2881 static int iscsi_target_detect(struct scst_tgt_template *templ)
2887 static int iscsi_target_release(struct scst_tgt *scst_tgt)
2893 struct scst_tgt_template iscsi_template = {
2895 .sg_tablesize = ISCSI_CONN_IOV_MAX,
2898 .xmit_response_atomic = 0,
2899 .detect = iscsi_target_detect,
2900 .release = iscsi_target_release,
2901 .xmit_response = iscsi_xmit_response,
2902 #if !defined(CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION)
2903 .alloc_data_buf = iscsi_alloc_data_buf,
2905 .preprocessing_done = iscsi_preprocessing_done,
2906 .pre_exec = iscsi_pre_exec,
2907 .task_mgmt_fn_done = iscsi_task_mgmt_fn_done,
2910 static __init int iscsi_run_threads(int count, char *name, int (*fn)(void *))
2914 struct iscsi_thread_t *thr;
2916 for (i = 0; i < count; i++) {
2917 thr = kmalloc(sizeof(*thr), GFP_KERNEL);
2920 PRINT_ERROR("Failed to allocate thr %d", res);
2923 thr->thr = kthread_run(fn, NULL, "%s%d", name, i);
2924 if (IS_ERR(thr->thr)) {
2925 res = PTR_ERR(thr->thr);
2926 PRINT_ERROR("kthread_create() failed: %d", res);
2930 list_add(&thr->threads_list_entry, &iscsi_threads_list);
2937 static void iscsi_stop_threads(void)
2939 struct iscsi_thread_t *t, *tmp;
2941 list_for_each_entry_safe(t, tmp, &iscsi_threads_list,
2942 threads_list_entry) {
2943 int rc = kthread_stop(t->thr);
2945 TRACE_MGMT_DBG("kthread_stop() failed: %d", rc);
2946 list_del(&t->threads_list_entry);
2951 static int __init iscsi_init(void)
2956 PRINT_INFO("iSCSI SCST Target - version %s", ISCSI_VERSION_STRING);
2958 dummy_page = alloc_pages(GFP_KERNEL, 0);
2959 if (dummy_page == NULL) {
2960 PRINT_ERROR("%s", "Dummy page allocation failed");
2964 sg_init_table(&dummy_sg, 1);
2965 sg_set_page(&dummy_sg, dummy_page, PAGE_SIZE, 0);
2967 #if defined(CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION)
2968 err = net_set_get_put_page_callbacks(iscsi_get_page_callback,
2969 iscsi_put_page_callback);
2971 PRINT_INFO("Unable to set page callbackes: %d", err);
2972 goto out_free_dummy;
2976 "CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION "
2977 "not enabled in your kernel. ISCSI-SCST will be working with "
2978 "not the best performance. Refer README file for details.");
2981 ctr_major = register_chrdev(0, ctr_name, &ctr_fops);
2982 if (ctr_major < 0) {
2983 PRINT_ERROR("failed to register the control device %d",
2993 iscsi_cmnd_cache = KMEM_CACHE(iscsi_cmnd, SCST_SLAB_FLAGS);
2994 if (!iscsi_cmnd_cache) {
2999 err = scst_register_target_template(&iscsi_template);
3003 iscsi_template_registered = 1;
3005 err = iscsi_procfs_init();
3009 num = max(num_online_cpus(), 2);
3011 err = iscsi_run_threads(num, "iscsird", istrd);
3015 err = iscsi_run_threads(num, "iscsiwr", istwr);
3023 iscsi_procfs_exit();
3024 iscsi_stop_threads();
3027 scst_unregister_target_template(&iscsi_template);
3030 kmem_cache_destroy(iscsi_cmnd_cache);
3036 unregister_chrdev(ctr_major, ctr_name);
3039 #if defined(CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION)
3040 net_set_get_put_page_callbacks(NULL, NULL);
3044 __free_pages(dummy_page, 0);
3048 static void __exit iscsi_exit(void)
3050 iscsi_stop_threads();
3052 unregister_chrdev(ctr_major, ctr_name);
3054 iscsi_procfs_exit();
3057 kmem_cache_destroy(iscsi_cmnd_cache);
3059 scst_unregister_target_template(&iscsi_template);
3061 #if defined(CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION)
3062 net_set_get_put_page_callbacks(NULL, NULL);
3065 __free_pages(dummy_page, 0);
3069 module_init(iscsi_init);
3070 module_exit(iscsi_exit);
3072 MODULE_LICENSE("GPL");