2 * Copyright (C) 2002 - 2003 Ardis Technolgies <roman@ardistech.com>
3 * Copyright (C) 2007 - 2009 Vladislav Bolkhovitin
4 * Copyright (C) 2007 - 2009 ID7 Ltd.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation, version 2
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
17 #include <linux/module.h>
18 #include <linux/hash.h>
19 #include <linux/kthread.h>
20 #include <linux/scatterlist.h>
22 #include <scsi/scsi.h>
27 #if !defined(CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION)
28 #warning "Patch put_page_callback-<kernel-version>.patch not applied on your\
29 kernel or CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION\
30 config option not set. ISCSI-SCST will be working with not the best\
31 performance. Refer README file for details."
34 #define ISCSI_INIT_WRITE_WAKE 0x1
35 #define ISCSI_INIT_WRITE_REMOVE_HASH 0x2
38 static char ctr_name[] = "iscsi-scst-ctl";
39 static int iscsi_template_registered;
41 #if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
42 unsigned long iscsi_trace_flag = ISCSI_DEFAULT_LOG_FLAGS;
45 static struct kmem_cache *iscsi_cmnd_cache;
47 DEFINE_SPINLOCK(iscsi_rd_lock);
48 LIST_HEAD(iscsi_rd_list);
49 DECLARE_WAIT_QUEUE_HEAD(iscsi_rd_waitQ);
51 DEFINE_SPINLOCK(iscsi_wr_lock);
52 LIST_HEAD(iscsi_wr_list);
53 DECLARE_WAIT_QUEUE_HEAD(iscsi_wr_waitQ);
55 static struct page *dummy_page;
56 static struct scatterlist dummy_sg;
58 struct iscsi_thread_t {
59 struct task_struct *thr;
60 struct list_head threads_list_entry;
63 static LIST_HEAD(iscsi_threads_list);
65 static void cmnd_remove_hash(struct iscsi_cmnd *cmnd);
66 static void iscsi_send_task_mgmt_resp(struct iscsi_cmnd *req, int status);
67 static void cmnd_prepare_get_rejected_cmd_data(struct iscsi_cmnd *cmnd);
68 static void iscsi_check_send_delayed_tm_resp(struct iscsi_session *sess);
69 static void iscsi_session_push_cmnd(struct iscsi_cmnd *cmnd);
70 static void req_cmnd_release(struct iscsi_cmnd *req);
72 static inline u32 cmnd_write_size(struct iscsi_cmnd *cmnd)
74 struct iscsi_scsi_cmd_hdr *hdr = cmnd_hdr(cmnd);
76 if (hdr->flags & ISCSI_CMD_WRITE)
77 return be32_to_cpu(hdr->data_length);
81 static inline int cmnd_read_size(struct iscsi_cmnd *cmnd)
83 struct iscsi_scsi_cmd_hdr *hdr = cmnd_hdr(cmnd);
85 if (hdr->flags & ISCSI_CMD_READ) {
86 struct iscsi_ahs_hdr *ahdr;
88 if (!(hdr->flags & ISCSI_CMD_WRITE))
89 return be32_to_cpu(hdr->data_length);
91 ahdr = (struct iscsi_ahs_hdr *)cmnd->pdu.ahs;
93 uint8_t *p = (uint8_t *)ahdr;
98 ahdr = (struct iscsi_ahs_hdr *)p;
100 if (ahdr->ahstype == ISCSI_AHSTYPE_RLENGTH) {
101 struct iscsi_rlength_ahdr *rh =
102 (struct iscsi_rlength_ahdr *)ahdr;
103 return be32_to_cpu(rh->read_length);
106 s = 3 + be16_to_cpu(ahdr->ahslength);
110 } while (size < cmnd->pdu.ahssize);
117 void iscsi_restart_cmnd(struct iscsi_cmnd *cmnd)
119 EXTRACHECKS_BUG_ON(cmnd->data_waiting);
121 if (unlikely(test_bit(ISCSI_CONN_REINSTATING,
122 &cmnd->conn->conn_aflags))) {
123 struct iscsi_target *target = cmnd->conn->session->target;
126 mutex_lock(&target->target_mutex);
128 get_out = test_bit(ISCSI_CONN_REINSTATING,
129 &cmnd->conn->conn_aflags);
130 /* Let's don't look dead */
131 if (scst_cmd_get_cdb(cmnd->scst_cmd)[0] == TEST_UNIT_READY)
137 TRACE_MGMT_DBG("Pending cmnd %p, because conn %p is "
138 "reinstated", cmnd, cmnd->conn);
140 cmnd->scst_state = ISCSI_CMD_STATE_REINST_PENDING;
141 list_add_tail(&cmnd->reinst_pending_cmd_list_entry,
142 &cmnd->conn->reinst_pending_cmd_list);
145 mutex_unlock(&target->target_mutex);
151 cmnd->scst_state = ISCSI_CMD_STATE_RESTARTED;
152 scst_restart_cmd(cmnd->scst_cmd, SCST_PREPROCESS_STATUS_SUCCESS,
153 SCST_CONTEXT_THREAD);
159 static inline void iscsi_restart_waiting_cmnd(struct iscsi_cmnd *cmnd)
162 * There is no race with conn_abort(), since all functions
163 * called from single read thread
165 iscsi_extracheck_is_rd_thread(cmnd->conn);
166 cmnd->data_waiting = 0;
168 iscsi_restart_cmnd(cmnd);
172 static inline void iscsi_fail_waiting_cmnd(struct iscsi_cmnd *cmnd)
174 TRACE_MGMT_DBG("Failing data waiting cmd %p", cmnd);
177 * There is no race with conn_abort(), since all functions
178 * called from single read thread
180 iscsi_extracheck_is_rd_thread(cmnd->conn);
181 cmnd->data_waiting = 0;
183 req_cmnd_release_force(cmnd, ISCSI_FORCE_RELEASE_WRITE);
186 struct iscsi_cmnd *cmnd_alloc(struct iscsi_conn *conn,
187 struct iscsi_cmnd *parent)
189 struct iscsi_cmnd *cmnd;
191 /* ToDo: __GFP_NOFAIL?? */
192 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
193 cmnd = kmem_cache_alloc(iscsi_cmnd_cache, GFP_KERNEL|__GFP_NOFAIL);
194 memset(cmnd, 0, sizeof(*cmnd));
196 cmnd = kmem_cache_zalloc(iscsi_cmnd_cache, GFP_KERNEL|__GFP_NOFAIL);
199 atomic_set(&cmnd->ref_cnt, 1);
200 cmnd->scst_state = ISCSI_CMD_STATE_NEW;
202 cmnd->parent_req = parent;
204 if (parent == NULL) {
207 #if defined(CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION)
208 atomic_set(&cmnd->net_ref_cnt, 0);
210 spin_lock_init(&cmnd->rsp_cmd_lock);
211 INIT_LIST_HEAD(&cmnd->rsp_cmd_list);
212 INIT_LIST_HEAD(&cmnd->rx_ddigest_cmd_list);
214 spin_lock_bh(&conn->cmd_list_lock);
215 list_add_tail(&cmnd->cmd_list_entry, &conn->cmd_list);
216 spin_unlock_bh(&conn->cmd_list_lock);
219 TRACE_DBG("conn %p, parent %p, cmnd %p", conn, parent, cmnd);
223 /* Frees a command. Also frees the additional header. */
224 static void cmnd_free(struct iscsi_cmnd *cmnd)
226 TRACE_DBG("%p", cmnd);
228 if (unlikely(cmnd->tm_aborted)) {
229 TRACE_MGMT_DBG("Free aborted cmd %p (scst cmd %p, state %d, "
230 "parent_req %p)", cmnd, cmnd->scst_cmd,
231 cmnd->scst_state, cmnd->parent_req);
234 /* Catch users from cmd_list or rsp_cmd_list */
235 EXTRACHECKS_BUG_ON(atomic_read(&cmnd->ref_cnt) != 0);
237 kfree(cmnd->pdu.ahs);
239 if (unlikely(cmnd->on_write_list || cmnd->on_written_list)) {
240 struct iscsi_scsi_cmd_hdr *req = cmnd_hdr(cmnd);
242 PRINT_CRIT_ERROR("cmnd %p still on some list?, %x, %x, %x, "
243 "%x, %x, %x, %x", cmnd, req->opcode, req->scb[0],
244 req->flags, req->itt, be32_to_cpu(req->data_length),
245 req->cmd_sn, be32_to_cpu(cmnd->pdu.datasize));
247 if (unlikely(cmnd->parent_req)) {
248 struct iscsi_scsi_cmd_hdr *preq =
249 cmnd_hdr(cmnd->parent_req);
250 PRINT_CRIT_ERROR("%p %x %u", preq, preq->opcode,
256 kmem_cache_free(iscsi_cmnd_cache, cmnd);
260 /* Might be called unded some lock and on SIRQ */
261 void cmnd_done(struct iscsi_cmnd *cmnd)
263 TRACE_DBG("%p", cmnd);
265 if (unlikely(cmnd->tm_aborted)) {
266 TRACE_MGMT_DBG("Done aborted cmd %p (scst cmd %p, state %d, "
267 "parent_req %p)", cmnd, cmnd->scst_cmd,
268 cmnd->scst_state, cmnd->parent_req);
271 EXTRACHECKS_BUG_ON(cmnd->on_rx_digest_list);
273 if (cmnd->on_written_list) {
274 struct iscsi_conn *conn = cmnd->conn;
275 TRACE_DBG("Deleting cmd %p from conn %p written_list", cmnd,
277 spin_lock_bh(&conn->write_list_lock);
278 list_del(&cmnd->written_list_entry);
279 cmnd->on_written_list = 0;
280 spin_unlock_bh(&conn->write_list_lock);
283 if (cmnd->parent_req == NULL) {
284 struct iscsi_conn *conn = cmnd->conn;
285 TRACE_DBG("Deleting req %p from conn %p", cmnd, conn);
287 spin_lock_bh(&conn->cmd_list_lock);
288 list_del(&cmnd->cmd_list_entry);
289 spin_unlock_bh(&conn->cmd_list_lock);
293 EXTRACHECKS_BUG_ON(!list_empty(&cmnd->rsp_cmd_list));
294 EXTRACHECKS_BUG_ON(!list_empty(&cmnd->rx_ddigest_cmd_list));
296 /* Order between above and below code is important! */
298 if ((cmnd->scst_cmd != NULL) || (cmnd->scst_aen != NULL)) {
299 switch (cmnd->scst_state) {
300 case ISCSI_CMD_STATE_PROCESSED:
301 TRACE_DBG("cmd %p PROCESSED", cmnd);
302 scst_tgt_cmd_done(cmnd->scst_cmd,
303 SCST_CONTEXT_DIRECT);
306 case ISCSI_CMD_STATE_AFTER_PREPROC:
308 struct scst_cmd *scst_cmd = cmnd->scst_cmd;
309 TRACE_DBG("cmd %p AFTER_PREPROC", cmnd);
310 cmnd->scst_state = ISCSI_CMD_STATE_RESTARTED;
311 cmnd->scst_cmd = NULL;
312 scst_restart_cmd(scst_cmd,
313 SCST_PREPROCESS_STATUS_ERROR_FATAL,
314 SCST_CONTEXT_THREAD);
318 case ISCSI_CMD_STATE_AEN:
319 TRACE_DBG("cmd %p AEN PROCESSED", cmnd);
320 scst_aen_done(cmnd->scst_aen);
324 PRINT_CRIT_ERROR("Unexpected cmnd scst state "
325 "%d", cmnd->scst_state);
331 TRACE_DBG("Deleting rsp %p from parent %p", cmnd,
334 spin_lock_bh(&cmnd->parent_req->rsp_cmd_lock);
335 list_del(&cmnd->rsp_cmd_list_entry);
336 spin_unlock_bh(&cmnd->parent_req->rsp_cmd_lock);
338 cmnd_put(cmnd->parent_req);
341 /* Order between above and below code is important! */
344 TRACE_DBG("%s", "own_sg");
345 if ((cmnd->sg != &dummy_sg) && (cmnd->sg != cmnd->rsp_sg))
346 scst_free(cmnd->sg, cmnd->sg_cnt);
347 #ifdef CONFIG_SCST_DEBUG
354 if (cmnd->dec_active_cmnds) {
355 struct iscsi_session *sess = cmnd->conn->session;
356 TRACE_DBG("Decrementing active_cmds (cmd %p, sess %p, "
357 "new value %d)", cmnd, sess,
358 atomic_read(&sess->active_cmds)-1);
359 atomic_dec(&sess->active_cmds);
360 #ifdef CONFIG_SCST_EXTRACHECKS
361 if (unlikely(atomic_read(&sess->active_cmds) < 0)) {
362 PRINT_CRIT_ERROR("active_cmds < 0 (%d)!!",
363 atomic_read(&sess->active_cmds));
374 * Corresponding conn may also gets destroyed atfer this function, except only
375 * if it's called from the read thread!
377 * It can't be called in parallel with iscsi_cmnds_init_write()!
379 void req_cmnd_release_force(struct iscsi_cmnd *req, int flags)
381 struct iscsi_cmnd *rsp, *t;
382 struct iscsi_conn *conn = req->conn;
383 LIST_HEAD(cmds_list);
387 TRACE_MGMT_DBG("%p", req);
389 sBUG_ON(req == conn->read_cmnd);
391 if (flags & ISCSI_FORCE_RELEASE_WRITE) {
392 spin_lock_bh(&conn->write_list_lock);
393 list_for_each_entry_safe(rsp, t, &conn->write_list,
395 if (rsp->parent_req != req)
398 cmd_del_from_write_list(rsp);
400 list_add_tail(&rsp->write_list_entry, &cmds_list);
402 spin_unlock_bh(&conn->write_list_lock);
404 list_for_each_entry_safe(rsp, t, &cmds_list,
406 TRACE_MGMT_DBG("Putting write rsp %p", rsp);
407 list_del(&rsp->write_list_entry);
413 spin_lock_bh(&req->rsp_cmd_lock);
414 list_for_each_entry_reverse(rsp, &req->rsp_cmd_list,
415 rsp_cmd_list_entry) {
418 if (rsp->force_cleanup_done)
421 rsp->force_cleanup_done = 1;
423 if (cmnd_get_check(rsp))
426 spin_unlock_bh(&req->rsp_cmd_lock);
428 spin_lock_bh(&conn->write_list_lock);
429 r = rsp->on_write_list || rsp->write_processing_started;
430 spin_unlock_bh(&conn->write_list_lock);
438 * If both on_write_list and write_processing_started not set,
439 * we can safely put() rsp.
441 TRACE_MGMT_DBG("Putting rsp %p", rsp);
445 spin_unlock_bh(&req->rsp_cmd_lock);
447 req_cmnd_release(req);
454 * Corresponding conn may also gets destroyed atfer this function, except only
455 * if it's called from the read thread!
457 static void req_cmnd_release(struct iscsi_cmnd *req)
459 struct iscsi_cmnd *c, *t;
463 TRACE_DBG("%p", req);
465 #ifdef CONFIG_SCST_EXTRACHECKS
466 sBUG_ON(req->release_called);
467 req->release_called = 1;
470 if (unlikely(req->tm_aborted)) {
471 TRACE_MGMT_DBG("Release aborted req cmd %p (scst cmd %p, "
472 "state %d)", req, req->scst_cmd, req->scst_state);
475 sBUG_ON(req->parent_req != NULL);
477 list_for_each_entry_safe(c, t, &req->rx_ddigest_cmd_list,
478 rx_ddigest_cmd_list_entry) {
479 cmd_del_from_rx_ddigest_list(c);
484 cmnd_remove_hash(req);
486 if (req->dec_active_cmnds) {
487 struct iscsi_session *sess = req->conn->session;
488 TRACE_DBG("Decrementing active_cmds (cmd %p, sess %p, "
489 "new value %d)", req, sess,
490 atomic_read(&sess->active_cmds)-1);
491 atomic_dec(&sess->active_cmds);
492 req->dec_active_cmnds = 0;
493 #ifdef CONFIG_SCST_EXTRACHECKS
494 if (unlikely(atomic_read(&sess->active_cmds) < 0)) {
495 PRINT_CRIT_ERROR("active_cmds < 0 (%d)!!",
496 atomic_read(&sess->active_cmds));
509 * Corresponding conn may also gets destroyed atfer this function, except only
510 * if it's called from the read thread!
512 void rsp_cmnd_release(struct iscsi_cmnd *cmnd)
514 TRACE_DBG("%p", cmnd);
516 #ifdef CONFIG_SCST_EXTRACHECKS
517 sBUG_ON(cmnd->release_called);
518 cmnd->release_called = 1;
521 sBUG_ON(cmnd->hashed);
522 sBUG_ON(cmnd->parent_req == NULL);
529 * create a new command used as response.
531 * iscsi_cmnd_create_rsp_cmnd -
532 * @cmnd: ptr to request command
534 * @return ptr to response command or NULL
536 static struct iscsi_cmnd *iscsi_cmnd_create_rsp_cmnd(struct iscsi_cmnd *parent)
538 struct iscsi_cmnd *rsp;
540 rsp = cmnd_alloc(parent->conn, parent);
542 spin_lock_bh(&parent->rsp_cmd_lock);
543 TRACE_DBG("Adding rsp %p to parent %p", rsp, parent);
544 list_add_tail(&rsp->rsp_cmd_list_entry, &parent->rsp_cmd_list);
545 spin_unlock_bh(&parent->rsp_cmd_lock);
550 static inline struct iscsi_cmnd *get_rsp_cmnd(struct iscsi_cmnd *req)
552 struct iscsi_cmnd *res = NULL;
554 /* Currently this lock isn't needed, but just in case.. */
555 spin_lock_bh(&req->rsp_cmd_lock);
556 if (!list_empty(&req->rsp_cmd_list)) {
557 res = list_entry(req->rsp_cmd_list.prev, struct iscsi_cmnd,
560 spin_unlock_bh(&req->rsp_cmd_lock);
565 static void iscsi_cmnds_init_write(struct list_head *send, int flags)
567 struct iscsi_cmnd *rsp = list_entry(send->next, struct iscsi_cmnd,
569 struct iscsi_conn *conn = rsp->conn;
570 struct list_head *pos, *next;
572 sBUG_ON(list_empty(send));
575 * If we don't remove hashed req cmd from the hash list here, before
576 * submitting it for transmittion, we will have a race, when for
577 * some reason cmd's release is delayed after transmittion and
578 * initiator sends cmd with the same ITT => this command will be
579 * erroneously rejected as a duplicate.
581 if ((flags & ISCSI_INIT_WRITE_REMOVE_HASH) &&
582 rsp->parent_req->hashed &&
583 (rsp->parent_req->r2t_length == 0) &&
584 (rsp->parent_req->outstanding_r2t == 0))
585 cmnd_remove_hash(rsp->parent_req);
587 if (!(conn->ddigest_type & DIGEST_NONE)) {
588 list_for_each(pos, send) {
589 rsp = list_entry(pos, struct iscsi_cmnd,
592 if (rsp->pdu.datasize != 0) {
593 TRACE_DBG("Doing data digest (%p:%x)", rsp,
600 spin_lock_bh(&conn->write_list_lock);
601 list_for_each_safe(pos, next, send) {
602 rsp = list_entry(pos, struct iscsi_cmnd, write_list_entry);
604 TRACE_DBG("%p:%x", rsp, cmnd_opcode(rsp));
606 sBUG_ON(conn != rsp->conn);
608 list_del(&rsp->write_list_entry);
609 cmd_add_on_write_list(conn, rsp);
611 spin_unlock_bh(&conn->write_list_lock);
613 if (flags & ISCSI_INIT_WRITE_WAKE)
614 iscsi_make_conn_wr_active(conn);
619 static void iscsi_cmnd_init_write(struct iscsi_cmnd *rsp, int flags)
623 if (unlikely(rsp->on_write_list)) {
624 PRINT_CRIT_ERROR("cmd already on write list (%x %x %x %x %u "
625 "%u %u %u %u %u %u %d %d",
626 cmnd_itt(rsp), cmnd_ttt(rsp), cmnd_opcode(rsp),
627 cmnd_scsicode(rsp), rsp->r2t_sn,
628 rsp->r2t_length, rsp->is_unsolicited_data,
629 rsp->target_task_tag, rsp->outstanding_r2t,
630 rsp->hdigest, rsp->ddigest,
631 list_empty(&rsp->rsp_cmd_list), rsp->hashed);
634 list_add_tail(&rsp->write_list_entry, &head);
635 iscsi_cmnds_init_write(&head, flags);
639 static void send_data_rsp(struct iscsi_cmnd *req, u8 status, int send_status)
641 struct iscsi_cmnd *rsp;
642 struct iscsi_scsi_cmd_hdr *req_hdr = cmnd_hdr(req);
643 struct iscsi_data_in_hdr *rsp_hdr;
644 u32 pdusize, expsize, size, offset, sn;
647 TRACE_DBG("req %p", req);
649 pdusize = req->conn->session->sess_param.max_xmit_data_length;
650 expsize = req->read_size;
651 size = min(expsize, (u32)req->bufflen);
656 rsp = iscsi_cmnd_create_rsp_cmnd(req);
657 TRACE_DBG("rsp %p", rsp);
659 rsp->sg_cnt = req->sg_cnt;
660 rsp->bufflen = req->bufflen;
661 rsp_hdr = (struct iscsi_data_in_hdr *)&rsp->pdu.bhs;
663 rsp_hdr->opcode = ISCSI_OP_SCSI_DATA_IN;
664 rsp_hdr->itt = req_hdr->itt;
665 rsp_hdr->ttt = cpu_to_be32(ISCSI_RESERVED_TAG);
666 rsp_hdr->buffer_offset = cpu_to_be32(offset);
667 rsp_hdr->data_sn = cpu_to_be32(sn);
669 if (size <= pdusize) {
670 TRACE_DBG("offset %d, size %d", offset, size);
671 rsp->pdu.datasize = size;
675 TRACE_DBG("status %x", status);
677 EXTRACHECKS_BUG_ON((cmnd_hdr(req)->flags & ISCSI_CMD_WRITE) != 0);
679 rsp_hdr->flags = ISCSI_FLG_FINAL | ISCSI_FLG_STATUS;
680 rsp_hdr->cmd_status = status;
682 scsisize = req->bufflen;
683 if (scsisize < expsize) {
684 rsp_hdr->flags |= ISCSI_FLG_RESIDUAL_UNDERFLOW;
685 size = expsize - scsisize;
686 } else if (scsisize > expsize) {
687 rsp_hdr->flags |= ISCSI_FLG_RESIDUAL_OVERFLOW;
688 size = scsisize - expsize;
691 rsp_hdr->residual_count = cpu_to_be32(size);
693 list_add_tail(&rsp->write_list_entry, &send);
697 TRACE_DBG("pdusize %d, offset %d, size %d", pdusize, offset,
700 rsp->pdu.datasize = pdusize;
706 list_add_tail(&rsp->write_list_entry, &send);
708 iscsi_cmnds_init_write(&send, ISCSI_INIT_WRITE_REMOVE_HASH);
712 static struct iscsi_cmnd *create_status_rsp(struct iscsi_cmnd *req, int status,
713 const u8 *sense_buf, int sense_len)
715 struct iscsi_cmnd *rsp;
716 struct iscsi_scsi_rsp_hdr *rsp_hdr;
717 struct scatterlist *sg;
719 rsp = iscsi_cmnd_create_rsp_cmnd(req);
720 TRACE_DBG("%p", rsp);
722 rsp_hdr = (struct iscsi_scsi_rsp_hdr *)&rsp->pdu.bhs;
723 rsp_hdr->opcode = ISCSI_OP_SCSI_RSP;
724 rsp_hdr->flags = ISCSI_FLG_FINAL;
725 rsp_hdr->response = ISCSI_RESPONSE_COMMAND_COMPLETED;
726 rsp_hdr->cmd_status = status;
727 rsp_hdr->itt = cmnd_hdr(req)->itt;
729 if (SCST_SENSE_VALID(sense_buf)) {
730 TRACE_DBG("%s", "SENSE VALID");
732 sg = rsp->sg = rsp->rsp_sg;
736 sg_init_table(sg, 2);
737 sg_set_buf(&sg[0], &rsp->sense_hdr, sizeof(rsp->sense_hdr));
738 sg_set_buf(&sg[1], sense_buf, sense_len);
740 rsp->sense_hdr.length = cpu_to_be16(sense_len);
742 rsp->pdu.datasize = sizeof(rsp->sense_hdr) + sense_len;
743 rsp->bufflen = rsp->pdu.datasize;
745 rsp->pdu.datasize = 0;
752 static void iscsi_cmnd_reject(struct iscsi_cmnd *req, int reason)
754 struct iscsi_cmnd *rsp;
755 struct iscsi_reject_hdr *rsp_hdr;
756 struct scatterlist *sg;
758 TRACE_MGMT_DBG("Reject: req %p, reason %x", req, reason);
760 sBUG_ON(req->rejected);
762 req->reject_reason = ISCSI_REJECT_CMD;
764 rsp = iscsi_cmnd_create_rsp_cmnd(req);
765 rsp_hdr = (struct iscsi_reject_hdr *)&rsp->pdu.bhs;
767 rsp_hdr->opcode = ISCSI_OP_REJECT;
768 rsp_hdr->ffffffff = ISCSI_RESERVED_TAG;
769 rsp_hdr->reason = reason;
771 sg = rsp->sg = rsp->rsp_sg;
774 sg_init_one(sg, &req->pdu.bhs, sizeof(struct iscsi_hdr));
775 rsp->bufflen = rsp->pdu.datasize = sizeof(struct iscsi_hdr);
777 iscsi_cmnd_init_write(rsp, ISCSI_INIT_WRITE_REMOVE_HASH |
778 ISCSI_INIT_WRITE_WAKE);
780 cmnd_prepare_get_rejected_cmd_data(req);
784 static inline int iscsi_get_allowed_cmds(struct iscsi_session *sess)
786 int res = max(-1, (int)sess->max_queued_cmnds -
787 atomic_read(&sess->active_cmds)-1);
788 TRACE_DBG("allowed cmds %d (sess %p, active_cmds %d)", res,
789 sess, atomic_read(&sess->active_cmds));
793 static u32 cmnd_set_sn(struct iscsi_cmnd *cmnd, int set_stat_sn)
795 struct iscsi_conn *conn = cmnd->conn;
796 struct iscsi_session *sess = conn->session;
799 spin_lock(&sess->sn_lock);
802 cmnd->pdu.bhs.sn = cpu_to_be32(conn->stat_sn++);
803 cmnd->pdu.bhs.exp_sn = cpu_to_be32(sess->exp_cmd_sn);
804 cmnd->pdu.bhs.max_sn = cpu_to_be32(sess->exp_cmd_sn +
805 iscsi_get_allowed_cmds(sess));
807 res = cpu_to_be32(conn->stat_sn);
809 spin_unlock(&sess->sn_lock);
813 /* Called under sn_lock */
814 static void __update_stat_sn(struct iscsi_cmnd *cmnd)
816 struct iscsi_conn *conn = cmnd->conn;
819 cmnd->pdu.bhs.exp_sn = exp_stat_sn = be32_to_cpu(cmnd->pdu.bhs.exp_sn);
820 TRACE_DBG("%x,%x", cmnd_opcode(cmnd), exp_stat_sn);
821 if ((int)(exp_stat_sn - conn->exp_stat_sn) > 0 &&
822 (int)(exp_stat_sn - conn->stat_sn) <= 0) {
823 /* free pdu resources */
824 cmnd->conn->exp_stat_sn = exp_stat_sn;
829 static inline void update_stat_sn(struct iscsi_cmnd *cmnd)
831 spin_lock(&cmnd->conn->session->sn_lock);
832 __update_stat_sn(cmnd);
833 spin_unlock(&cmnd->conn->session->sn_lock);
837 /* Called under sn_lock */
838 static int check_cmd_sn(struct iscsi_cmnd *cmnd)
840 struct iscsi_session *session = cmnd->conn->session;
843 cmnd->pdu.bhs.sn = cmd_sn = be32_to_cpu(cmnd->pdu.bhs.sn);
844 TRACE_DBG("%d(%d)", cmd_sn, session->exp_cmd_sn);
845 if (likely((s32)(cmd_sn - session->exp_cmd_sn) >= 0))
847 PRINT_ERROR("sequence error (%x,%x)", cmd_sn, session->exp_cmd_sn);
848 return -ISCSI_REASON_PROTOCOL_ERROR;
851 static inline struct iscsi_cmnd *__cmnd_find_hash(
852 struct iscsi_session *session, u32 itt, u32 ttt)
854 struct list_head *head;
855 struct iscsi_cmnd *cmnd;
857 head = &session->cmnd_hash[cmnd_hashfn(itt)];
859 list_for_each_entry(cmnd, head, hash_list_entry) {
860 if (cmnd->pdu.bhs.itt == itt) {
861 if (ttt != ISCSI_RESERVED_TAG &&
862 ttt != cmnd->target_task_tag)
870 static struct iscsi_cmnd *cmnd_find_hash(struct iscsi_session *session,
873 struct iscsi_cmnd *cmnd;
875 spin_lock(&session->cmnd_hash_lock);
876 cmnd = __cmnd_find_hash(session, itt, ttt);
877 spin_unlock(&session->cmnd_hash_lock);
882 static struct iscsi_cmnd *cmnd_find_hash_get(struct iscsi_session *session,
885 struct iscsi_cmnd *cmnd;
887 spin_lock(&session->cmnd_hash_lock);
888 cmnd = __cmnd_find_hash(session, itt, ttt);
890 if (unlikely(cmnd_get_check(cmnd)))
893 spin_unlock(&session->cmnd_hash_lock);
898 static int cmnd_insert_hash(struct iscsi_cmnd *cmnd)
900 struct iscsi_session *session = cmnd->conn->session;
901 struct iscsi_cmnd *tmp;
902 struct list_head *head;
904 u32 itt = cmnd->pdu.bhs.itt;
906 TRACE_DBG("%p:%x", cmnd, itt);
907 if (unlikely(itt == ISCSI_RESERVED_TAG)) {
908 PRINT_ERROR("%s", "ITT is RESERVED_TAG");
909 PRINT_BUFFER("Incorrect BHS", &cmnd->pdu.bhs,
910 sizeof(cmnd->pdu.bhs));
911 err = -ISCSI_REASON_PROTOCOL_ERROR;
915 spin_lock(&session->cmnd_hash_lock);
917 head = &session->cmnd_hash[cmnd_hashfn(cmnd->pdu.bhs.itt)];
919 tmp = __cmnd_find_hash(session, itt, ISCSI_RESERVED_TAG);
921 list_add_tail(&cmnd->hash_list_entry, head);
924 PRINT_ERROR("Task %x in progress, cmnd %p", itt, cmnd);
925 err = -ISCSI_REASON_TASK_IN_PROGRESS;
928 spin_unlock(&session->cmnd_hash_lock);
931 spin_lock(&session->sn_lock);
932 __update_stat_sn(cmnd);
933 err = check_cmd_sn(cmnd);
934 spin_unlock(&session->sn_lock);
941 static void cmnd_remove_hash(struct iscsi_cmnd *cmnd)
943 struct iscsi_session *session = cmnd->conn->session;
944 struct iscsi_cmnd *tmp;
946 spin_lock(&session->cmnd_hash_lock);
948 tmp = __cmnd_find_hash(session, cmnd->pdu.bhs.itt, ISCSI_RESERVED_TAG);
950 if (likely(tmp && tmp == cmnd)) {
951 list_del(&cmnd->hash_list_entry);
954 PRINT_ERROR("%p:%x not found", cmnd, cmnd_itt(cmnd));
957 spin_unlock(&session->cmnd_hash_lock);
961 static void cmnd_prepare_get_rejected_cmd_data(struct iscsi_cmnd *cmnd)
963 struct iscsi_conn *conn = cmnd->conn;
964 struct scatterlist *sg = cmnd->sg;
969 TRACE_MGMT_DBG("Skipping (%p, %x %x %x %u, %p, scst state %d)", cmnd,
970 cmnd_itt(cmnd), cmnd_opcode(cmnd), cmnd_hdr(cmnd)->scb[0],
971 cmnd->pdu.datasize, cmnd->scst_cmd, cmnd->scst_state);
973 iscsi_extracheck_is_rd_thread(conn);
975 size = cmnd->pdu.datasize;
981 * There are no problems with the safety from concurrent
982 * accesses to dummy_page in dummy_sg, since data only
983 * will be read and then discarded.
985 sg = cmnd->sg = &dummy_sg;
986 cmnd->bufflen = PAGE_SIZE;
990 addr = (char __force __user *)(page_address(sg_page(&sg[0])));
991 sBUG_ON(addr == NULL);
992 conn->read_size = size;
993 for (i = 0; size > PAGE_SIZE; i++, size -= cmnd->bufflen) {
994 /* We already checked pdu.datasize in check_segment_length() */
995 sBUG_ON(i >= ISCSI_CONN_IOV_MAX);
996 conn->read_iov[i].iov_base = addr;
997 conn->read_iov[i].iov_len = cmnd->bufflen;
999 conn->read_iov[i].iov_base = addr;
1000 conn->read_iov[i].iov_len = size;
1001 conn->read_msg.msg_iov = conn->read_iov;
1002 conn->read_msg.msg_iovlen = ++i;
1007 static void iscsi_set_resid(struct iscsi_cmnd *req, struct iscsi_cmnd *rsp,
1010 struct iscsi_scsi_cmd_hdr *req_hdr = cmnd_hdr(req);
1011 struct iscsi_scsi_rsp_hdr *rsp_hdr;
1012 int resid, resp_len, in_resp_len;
1014 if ((req_hdr->flags & ISCSI_CMD_READ) &&
1015 (req_hdr->flags & ISCSI_CMD_WRITE)) {
1016 rsp_hdr = (struct iscsi_scsi_rsp_hdr *)&rsp->pdu.bhs;
1019 resp_len = req->bufflen;
1020 if (req->scst_cmd != NULL)
1021 in_resp_len = scst_cmd_get_in_bufflen(req->scst_cmd);
1029 resid = be32_to_cpu(req_hdr->data_length) - in_resp_len;
1031 rsp_hdr->flags |= ISCSI_FLG_RESIDUAL_UNDERFLOW;
1032 rsp_hdr->residual_count = cpu_to_be32(resid);
1033 } else if (resid < 0) {
1035 rsp_hdr->flags |= ISCSI_FLG_RESIDUAL_OVERFLOW;
1036 rsp_hdr->residual_count = cpu_to_be32(resid);
1039 resid = req->read_size - resp_len;
1041 rsp_hdr->flags |= ISCSI_FLG_BIRESIDUAL_UNDERFLOW;
1042 rsp_hdr->bi_residual_count = cpu_to_be32(resid);
1043 } else if (resid < 0) {
1045 rsp_hdr->flags |= ISCSI_FLG_BIRESIDUAL_OVERFLOW;
1046 rsp_hdr->bi_residual_count = cpu_to_be32(resid);
1050 resp_len = req->bufflen;
1054 resid = req->read_size - resp_len;
1056 rsp_hdr = (struct iscsi_scsi_rsp_hdr *)&rsp->pdu.bhs;
1057 rsp_hdr->flags |= ISCSI_FLG_RESIDUAL_UNDERFLOW;
1058 rsp_hdr->residual_count = cpu_to_be32(resid);
1059 } else if (resid < 0) {
1060 rsp_hdr = (struct iscsi_scsi_rsp_hdr *)&rsp->pdu.bhs;
1062 rsp_hdr->flags |= ISCSI_FLG_RESIDUAL_OVERFLOW;
1063 rsp_hdr->residual_count = cpu_to_be32(resid);
1069 static void cmnd_reject_scsi_cmd(struct iscsi_cmnd *req)
1071 struct iscsi_cmnd *rsp;
1073 TRACE_DBG("%p", req);
1075 sBUG_ON(req->rejected);
1077 req->reject_reason = ISCSI_REJECT_SCSI_CMD;
1079 rsp = get_rsp_cmnd(req);
1081 /* That can be true for aborted commands */
1085 sBUG_ON(cmnd_opcode(rsp) != ISCSI_OP_SCSI_RSP);
1087 iscsi_set_resid(req, rsp, false);
1089 iscsi_cmnd_init_write(rsp, ISCSI_INIT_WRITE_REMOVE_HASH |
1090 ISCSI_INIT_WRITE_WAKE);
1093 cmnd_prepare_get_rejected_cmd_data(req);
1097 static int cmnd_prepare_recv_pdu(struct iscsi_conn *conn,
1098 struct iscsi_cmnd *cmd, u32 offset, u32 size)
1100 struct scatterlist *sg = cmd->sg;
1101 unsigned int bufflen = cmd->bufflen;
1102 unsigned int idx, i;
1106 TRACE_DBG("%p %u,%u", cmd->sg, offset, size);
1108 iscsi_extracheck_is_rd_thread(conn);
1110 if (unlikely((offset >= bufflen) ||
1111 (offset + size > bufflen))) {
1112 PRINT_ERROR("Wrong ltn (%u %u %u)", offset, size, bufflen);
1113 mark_conn_closed(conn);
1118 offset += sg[0].offset;
1119 idx = offset >> PAGE_SHIFT;
1120 offset &= ~PAGE_MASK;
1122 conn->read_msg.msg_iov = conn->read_iov;
1123 conn->read_size = size;
1127 addr = (char __force __user *)(page_address(sg_page(&sg[idx])));
1128 sBUG_ON(addr == NULL);
1129 conn->read_iov[i].iov_base = addr + offset;
1130 if (offset + size <= PAGE_SIZE) {
1131 TRACE_DBG("idx=%d, offset=%u, size=%d, addr=%p",
1132 idx, offset, size, addr);
1133 conn->read_iov[i].iov_len = size;
1134 conn->read_msg.msg_iovlen = ++i;
1137 conn->read_iov[i].iov_len = PAGE_SIZE - offset;
1138 TRACE_DBG("idx=%d, offset=%u, size=%d, iov_len=%zd, addr=%p",
1139 idx, offset, size, conn->read_iov[i].iov_len, addr);
1140 size -= conn->read_iov[i].iov_len;
1141 if (unlikely(++i >= ISCSI_CONN_IOV_MAX)) {
1142 PRINT_ERROR("Initiator %s violated negotiated "
1143 "parameters by sending too much data (size "
1144 "left %d)", conn->session->initiator_name,
1146 mark_conn_closed(conn);
1151 offset = sg[idx].offset;
1153 TRACE_DBG("msg_iov=%p, msg_iovlen=%zd",
1154 conn->read_msg.msg_iov, conn->read_msg.msg_iovlen);
1160 static void send_r2t(struct iscsi_cmnd *req)
1162 struct iscsi_session *session = req->conn->session;
1163 struct iscsi_cmnd *rsp;
1164 struct iscsi_r2t_hdr *rsp_hdr;
1168 if (unlikely(req->tm_aborted)) {
1169 TRACE_MGMT_DBG("req %p (scst_cmd %p) aborted on R2T "
1170 "(r2t_length %d, outstanding_r2t %d)", req,
1171 req->scst_cmd, req->r2t_length, req->outstanding_r2t);
1172 if (req->outstanding_r2t == 0)
1173 iscsi_fail_waiting_cmnd(req);
1178 * There is no race with data_out_start() and conn_abort(), since
1179 * all functions called from single read thread
1181 iscsi_extracheck_is_rd_thread(req->conn);
1183 burst = session->sess_param.max_burst_length;
1184 offset = be32_to_cpu(cmnd_hdr(req)->data_length) - req->r2t_length;
1187 rsp = iscsi_cmnd_create_rsp_cmnd(req);
1188 rsp->pdu.bhs.ttt = req->target_task_tag;
1189 rsp_hdr = (struct iscsi_r2t_hdr *)&rsp->pdu.bhs;
1190 rsp_hdr->opcode = ISCSI_OP_R2T;
1191 rsp_hdr->flags = ISCSI_FLG_FINAL;
1192 rsp_hdr->lun = cmnd_hdr(req)->lun;
1193 rsp_hdr->itt = cmnd_hdr(req)->itt;
1194 rsp_hdr->r2t_sn = cpu_to_be32(req->r2t_sn++);
1195 rsp_hdr->buffer_offset = cpu_to_be32(offset);
1196 if (req->r2t_length > burst) {
1197 rsp_hdr->data_length = cpu_to_be32(burst);
1198 req->r2t_length -= burst;
1201 rsp_hdr->data_length = cpu_to_be32(req->r2t_length);
1202 req->r2t_length = 0;
1205 TRACE_WRITE("%x %u %u %u %u", cmnd_itt(req),
1206 be32_to_cpu(rsp_hdr->data_length),
1207 be32_to_cpu(rsp_hdr->buffer_offset),
1208 be32_to_cpu(rsp_hdr->r2t_sn), req->outstanding_r2t);
1210 list_add_tail(&rsp->write_list_entry, &send);
1212 if (++req->outstanding_r2t >= session->sess_param.max_outstanding_r2t)
1215 } while (req->r2t_length != 0);
1217 iscsi_cmnds_init_write(&send, ISCSI_INIT_WRITE_WAKE);
1223 static int iscsi_pre_exec(struct scst_cmd *scst_cmd)
1225 int res = SCST_PREPROCESS_STATUS_SUCCESS;
1226 struct iscsi_cmnd *req = (struct iscsi_cmnd *)
1227 scst_cmd_get_tgt_priv(scst_cmd);
1228 struct iscsi_cmnd *c, *t;
1232 EXTRACHECKS_BUG_ON(scst_cmd_atomic(scst_cmd));
1234 /* If data digest isn't used this list will be empty */
1235 list_for_each_entry_safe(c, t, &req->rx_ddigest_cmd_list,
1236 rx_ddigest_cmd_list_entry) {
1237 TRACE_DBG("Checking digest of RX ddigest cmd %p", c);
1238 if (digest_rx_data(c) != 0) {
1239 scst_set_cmd_error(scst_cmd,
1240 SCST_LOAD_SENSE(iscsi_sense_crc_error));
1241 res = SCST_PREPROCESS_STATUS_ERROR_SENSE_SET;
1243 * The rest of rx_ddigest_cmd_list will be freed
1244 * in req_cmnd_release()
1248 cmd_del_from_rx_ddigest_list(c);
1253 TRACE_EXIT_RES(res);
1257 static int noop_out_start(struct iscsi_cmnd *cmnd)
1259 struct iscsi_conn *conn = cmnd->conn;
1263 TRACE_DBG("%p", cmnd);
1265 iscsi_extracheck_is_rd_thread(conn);
1267 if (unlikely(cmnd_ttt(cmnd) != cpu_to_be32(ISCSI_RESERVED_TAG))) {
1269 * We don't request a NOP-Out by sending a NOP-In.
1270 * See 10.18.2 in the draft 20.
1272 PRINT_ERROR("Initiator sent command with not RESERVED tag and "
1273 "TTT %x", cmnd_itt(cmnd));
1274 err = -ISCSI_REASON_PROTOCOL_ERROR;
1278 if (cmnd_itt(cmnd) == cpu_to_be32(ISCSI_RESERVED_TAG)) {
1279 if (unlikely(!(cmnd->pdu.bhs.opcode & ISCSI_OP_IMMEDIATE)))
1280 PRINT_ERROR("%s", "Initiator sent RESERVED tag for "
1281 "non-immediate command");
1282 spin_lock(&conn->session->sn_lock);
1283 __update_stat_sn(cmnd);
1284 err = check_cmd_sn(cmnd);
1285 spin_unlock(&conn->session->sn_lock);
1289 err = cmnd_insert_hash(cmnd);
1290 if (unlikely(err < 0)) {
1291 PRINT_ERROR("Can't insert in hash: ignore this "
1292 "request %x", cmnd_itt(cmnd));
1297 size = cmnd->pdu.datasize;
1300 conn->read_msg.msg_iov = conn->read_iov;
1301 if (cmnd->pdu.bhs.itt != cpu_to_be32(ISCSI_RESERVED_TAG)) {
1302 struct scatterlist *sg;
1304 cmnd->sg = sg = scst_alloc(size, GFP_KERNEL,
1307 TRACE(TRACE_OUT_OF_MEM, "Allocating buffer for"
1308 " %d NOP-Out payload failed", size);
1309 err = -ISCSI_REASON_OUT_OF_RESOURCES;
1313 /* We already checked it in check_segment_length() */
1314 sBUG_ON(cmnd->sg_cnt > (signed)ISCSI_CONN_IOV_MAX);
1317 cmnd->bufflen = size;
1319 for (i = 0; i < cmnd->sg_cnt; i++) {
1320 conn->read_iov[i].iov_base =
1321 (void __force __user *)(page_address(sg_page(&sg[i])));
1322 tmp = min_t(u32, size, PAGE_SIZE);
1323 conn->read_iov[i].iov_len = tmp;
1324 conn->read_size += tmp;
1330 * There are no problems with the safety from concurrent
1331 * accesses to dummy_page, since for ISCSI_RESERVED_TAG
1332 * the data only read and then discarded.
1334 for (i = 0; i < (signed)ISCSI_CONN_IOV_MAX; i++) {
1335 conn->read_iov[i].iov_base =
1336 (void __force __user *)(page_address(dummy_page));
1337 tmp = min_t(u32, size, PAGE_SIZE);
1338 conn->read_iov[i].iov_len = tmp;
1339 conn->read_size += tmp;
1343 /* We already checked size in check_segment_length() */
1347 conn->read_msg.msg_iovlen = i;
1348 TRACE_DBG("msg_iov=%p, msg_iovlen=%zd", conn->read_msg.msg_iov,
1349 conn->read_msg.msg_iovlen);
1356 static inline u32 get_next_ttt(struct iscsi_conn *conn)
1359 struct iscsi_session *session = conn->session;
1361 iscsi_extracheck_is_rd_thread(conn);
1363 if (session->next_ttt == ISCSI_RESERVED_TAG)
1364 session->next_ttt++;
1365 ttt = session->next_ttt++;
1367 return cpu_to_be32(ttt);
1370 int cmnd_rx_continue(struct iscsi_cmnd *req)
1372 struct iscsi_conn *conn = req->conn;
1373 struct iscsi_session *session = conn->session;
1374 struct iscsi_scsi_cmd_hdr *req_hdr = cmnd_hdr(req);
1375 struct scst_cmd *scst_cmd = req->scst_cmd;
1376 scst_data_direction dir;
1381 TRACE_DBG("scsi command: %02x", req_hdr->scb[0]);
1383 if (unlikely(req->scst_state != ISCSI_CMD_STATE_AFTER_PREPROC)) {
1384 TRACE_DBG("req %p is in %x state", req, req->scst_state);
1385 if (req->scst_state == ISCSI_CMD_STATE_PROCESSED) {
1386 cmnd_reject_scsi_cmd(req);
1389 if (unlikely(req->tm_aborted)) {
1390 TRACE_MGMT_DBG("req %p (scst_cmd %p) aborted", req,
1392 cmnd_prepare_get_rejected_cmd_data(req);
1398 dir = scst_cmd_get_data_direction(scst_cmd);
1399 if (dir & SCST_DATA_WRITE) {
1400 req->is_unsolicited_data = !(req_hdr->flags & ISCSI_CMD_FINAL);
1401 req->r2t_length = be32_to_cpu(req_hdr->data_length) -
1403 if (req->r2t_length > 0)
1404 req->data_waiting = 1;
1406 if (unlikely(!(req_hdr->flags & ISCSI_CMD_FINAL) ||
1407 req->pdu.datasize)) {
1408 PRINT_ERROR("Unexpected unsolicited data (ITT %x "
1409 "CDB %x", cmnd_itt(req), req_hdr->scb[0]);
1410 scst_set_cmd_error(scst_cmd,
1411 SCST_LOAD_SENSE(iscsi_sense_unexpected_unsolicited_data));
1412 if (scst_cmd_get_sense_buffer(scst_cmd) != NULL)
1413 create_status_rsp(req, SAM_STAT_CHECK_CONDITION,
1414 scst_cmd_get_sense_buffer(scst_cmd),
1415 scst_cmd_get_sense_buffer_len(scst_cmd));
1417 create_status_rsp(req, SAM_STAT_BUSY, NULL, 0);
1418 cmnd_reject_scsi_cmd(req);
1423 req->target_task_tag = get_next_ttt(conn);
1424 if (dir != SCST_DATA_BIDI) {
1425 req->sg = scst_cmd_get_sg(scst_cmd);
1426 req->sg_cnt = scst_cmd_get_sg_cnt(scst_cmd);
1427 req->bufflen = scst_cmd_get_bufflen(scst_cmd);
1429 req->sg = scst_cmd_get_in_sg(scst_cmd);
1430 req->sg_cnt = scst_cmd_get_in_sg_cnt(scst_cmd);
1431 req->bufflen = scst_cmd_get_in_bufflen(scst_cmd);
1433 if (unlikely(req->r2t_length > req->bufflen)) {
1434 PRINT_ERROR("req->r2t_length %d > req->bufflen %d",
1435 req->r2t_length, req->bufflen);
1436 req->r2t_length = req->bufflen;
1439 TRACE_DBG("req=%p, dir=%d, is_unsolicited_data=%d, "
1440 "r2t_length=%d, bufflen=%d", req, dir,
1441 req->is_unsolicited_data, req->r2t_length, req->bufflen);
1443 if (unlikely(!session->sess_param.immediate_data &&
1444 req->pdu.datasize)) {
1445 PRINT_ERROR("Initiator %s violated negotiated paremeters: "
1446 "forbidden immediate data sent (ITT %x, op %x)",
1447 session->initiator_name, cmnd_itt(req),
1453 if (unlikely(session->sess_param.initial_r2t &&
1454 !(req_hdr->flags & ISCSI_CMD_FINAL))) {
1455 PRINT_ERROR("Initiator %s violated negotiated paremeters: "
1456 "initial R2T is required (ITT %x, op %x)",
1457 session->initiator_name, cmnd_itt(req),
1463 if (req->pdu.datasize)
1464 res = cmnd_prepare_recv_pdu(conn, req, 0, req->pdu.datasize);
1467 /* Aborted commands will be freed in cmnd_rx_end() */
1468 TRACE_EXIT_RES(res);
1472 static int scsi_cmnd_start(struct iscsi_cmnd *req)
1474 struct iscsi_conn *conn = req->conn;
1475 struct iscsi_session *session = conn->session;
1476 struct iscsi_scsi_cmd_hdr *req_hdr = cmnd_hdr(req);
1477 struct scst_cmd *scst_cmd;
1478 scst_data_direction dir;
1479 struct iscsi_ahs_hdr *ahdr;
1484 TRACE_DBG("scsi command: %02x", req_hdr->scb[0]);
1486 TRACE_DBG("Incrementing active_cmds (cmd %p, sess %p, "
1487 "new value %d)", req, session,
1488 atomic_read(&session->active_cmds)+1);
1489 atomic_inc(&session->active_cmds);
1490 req->dec_active_cmnds = 1;
1492 scst_cmd = scst_rx_cmd(session->scst_sess,
1493 (uint8_t *)&req_hdr->lun, sizeof(req_hdr->lun),
1494 req_hdr->scb, sizeof(req_hdr->scb), SCST_NON_ATOMIC);
1495 if (scst_cmd == NULL) {
1496 create_status_rsp(req, SAM_STAT_BUSY, NULL, 0);
1497 cmnd_reject_scsi_cmd(req);
1501 req->scst_cmd = scst_cmd;
1502 scst_cmd_set_tag(scst_cmd, req_hdr->itt);
1503 scst_cmd_set_tgt_priv(scst_cmd, req);
1505 if ((req_hdr->flags & ISCSI_CMD_READ) &&
1506 (req_hdr->flags & ISCSI_CMD_WRITE)) {
1507 int sz = cmnd_read_size(req);
1508 if (unlikely(sz < 0)) {
1509 PRINT_ERROR("%s", "BIDI data transfer, but initiator "
1510 "not supplied Bidirectional Read Expected Data "
1511 "Transfer Length AHS");
1512 scst_set_cmd_error(scst_cmd,
1513 SCST_LOAD_SENSE(scst_sense_parameter_value_invalid));
1515 * scst_cmd_init_done() will handle commands with
1516 * set status as preliminary completed
1519 req->read_size = sz;
1520 dir = SCST_DATA_BIDI;
1521 scst_cmd_set_expected(scst_cmd, dir, sz);
1522 scst_cmd_set_expected_in_transfer_len(scst_cmd,
1523 be32_to_cpu(req_hdr->data_length));
1524 #if !defined(CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION)
1525 scst_cmd_set_tgt_need_alloc_data_buf(scst_cmd);
1528 } else if (req_hdr->flags & ISCSI_CMD_READ) {
1529 req->read_size = be32_to_cpu(req_hdr->data_length);
1530 dir = SCST_DATA_READ;
1531 scst_cmd_set_expected(scst_cmd, dir, req->read_size);
1532 #if !defined(CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION)
1533 scst_cmd_set_tgt_need_alloc_data_buf(scst_cmd);
1535 } else if (req_hdr->flags & ISCSI_CMD_WRITE) {
1536 dir = SCST_DATA_WRITE;
1537 scst_cmd_set_expected(scst_cmd, dir,
1538 be32_to_cpu(req_hdr->data_length));
1540 dir = SCST_DATA_NONE;
1541 scst_cmd_set_expected(scst_cmd, dir, 0);
1544 switch (req_hdr->flags & ISCSI_CMD_ATTR_MASK) {
1545 case ISCSI_CMD_SIMPLE:
1546 scst_cmd->queue_type = SCST_CMD_QUEUE_SIMPLE;
1548 case ISCSI_CMD_HEAD_OF_QUEUE:
1549 scst_cmd->queue_type = SCST_CMD_QUEUE_HEAD_OF_QUEUE;
1551 case ISCSI_CMD_ORDERED:
1552 scst_cmd->queue_type = SCST_CMD_QUEUE_ORDERED;
1555 scst_cmd->queue_type = SCST_CMD_QUEUE_ACA;
1557 case ISCSI_CMD_UNTAGGED:
1558 scst_cmd->queue_type = SCST_CMD_QUEUE_UNTAGGED;
1561 PRINT_ERROR("Unknown task code %x, use ORDERED instead",
1562 req_hdr->flags & ISCSI_CMD_ATTR_MASK);
1563 scst_cmd->queue_type = SCST_CMD_QUEUE_ORDERED;
1567 /* cmd_sn is already in CPU format converted in check_cmd_sn() */
1568 scst_cmd_set_tgt_sn(scst_cmd, req_hdr->cmd_sn);
1570 ahdr = (struct iscsi_ahs_hdr *)req->pdu.ahs;
1572 uint8_t *p = (uint8_t *)ahdr;
1577 ahdr = (struct iscsi_ahs_hdr *)p;
1579 if (ahdr->ahstype == ISCSI_AHSTYPE_CDB) {
1580 struct iscsi_cdb_ahdr *eca =
1581 (struct iscsi_cdb_ahdr *)ahdr;
1582 scst_cmd_set_ext_cdb(scst_cmd, eca->cdb,
1583 be16_to_cpu(ahdr->ahslength) - 1);
1586 s = 3 + be16_to_cpu(ahdr->ahslength);
1590 } while (size < req->pdu.ahssize);
1593 TRACE_DBG("START Command (tag %d, queue_type %d)",
1594 req_hdr->itt, scst_cmd->queue_type);
1595 req->scst_state = ISCSI_CMD_STATE_RX_CMD;
1596 conn->rx_task = current;
1597 scst_cmd_init_stage1_done(scst_cmd, SCST_CONTEXT_DIRECT, 0);
1599 if (req->scst_state != ISCSI_CMD_STATE_RX_CMD)
1600 res = cmnd_rx_continue(req);
1602 TRACE_DBG("Delaying req %p post processing (scst_state %d)",
1603 req, req->scst_state);
1608 /* Aborted commands will be freed in cmnd_rx_end() */
1609 TRACE_EXIT_RES(res);
1613 static int data_out_start(struct iscsi_conn *conn, struct iscsi_cmnd *cmnd)
1615 struct iscsi_data_out_hdr *req_hdr =
1616 (struct iscsi_data_out_hdr *)&cmnd->pdu.bhs;
1617 struct iscsi_cmnd *orig_req = NULL;
1618 u32 offset = be32_to_cpu(req_hdr->buffer_offset);
1624 * There is no race with send_r2t() and conn_abort(), since
1625 * all functions called from single read thread
1627 iscsi_extracheck_is_rd_thread(cmnd->conn);
1629 update_stat_sn(cmnd);
1631 cmnd->cmd_req = orig_req = cmnd_find_hash(conn->session, req_hdr->itt,
1633 if (unlikely(orig_req == NULL)) {
1634 /* It might happen if req was aborted and then freed */
1635 TRACE(TRACE_MGMT_MINOR, "Unable to find scsi task %x %x",
1636 cmnd_itt(cmnd), cmnd_ttt(cmnd));
1640 if (orig_req->is_unsolicited_data) {
1641 if (unlikely(orig_req->r2t_length < cmnd->pdu.datasize)) {
1642 PRINT_ERROR("Data size (%d) > R2T length (%d)",
1643 cmnd->pdu.datasize, orig_req->r2t_length);
1644 mark_conn_closed(conn);
1648 orig_req->r2t_length -= cmnd->pdu.datasize;
1651 /* Check unsolicited burst data */
1652 if (unlikely((req_hdr->ttt == cpu_to_be32(ISCSI_RESERVED_TAG)) &&
1653 (orig_req->pdu.bhs.flags & ISCSI_FLG_FINAL))) {
1654 PRINT_ERROR("Unexpected data from %x %x",
1655 cmnd_itt(cmnd), cmnd_ttt(cmnd));
1656 mark_conn_closed(conn);
1661 TRACE_WRITE("%u %p %p %u %u", req_hdr->ttt, cmnd, orig_req,
1662 offset, cmnd->pdu.datasize);
1664 res = cmnd_prepare_recv_pdu(conn, orig_req, offset, cmnd->pdu.datasize);
1667 TRACE_EXIT_RES(res);
1671 sBUG_ON(cmnd->rejected);
1673 cmnd->reject_reason = ISCSI_REJECT_DATA;
1674 cmnd_prepare_get_rejected_cmd_data(cmnd);
1678 static void data_out_end(struct iscsi_cmnd *cmnd)
1680 struct iscsi_data_out_hdr *req_hdr =
1681 (struct iscsi_data_out_hdr *)&cmnd->pdu.bhs;
1682 struct iscsi_cmnd *req;
1684 sBUG_ON(cmnd == NULL);
1685 req = cmnd->cmd_req;
1686 sBUG_ON(req == NULL);
1688 TRACE_DBG("cmnd %p, req %p", cmnd, req);
1690 iscsi_extracheck_is_rd_thread(cmnd->conn);
1692 if (!(cmnd->conn->ddigest_type & DIGEST_NONE) &&
1693 !cmnd->ddigest_checked) {
1694 cmd_add_on_rx_ddigest_list(req, cmnd);
1698 if (req_hdr->ttt == cpu_to_be32(ISCSI_RESERVED_TAG)) {
1699 TRACE_DBG("ISCSI_RESERVED_TAG, FINAL %x",
1700 req_hdr->flags & ISCSI_FLG_FINAL);
1702 if (req_hdr->flags & ISCSI_FLG_FINAL) {
1703 req->is_unsolicited_data = 0;
1709 TRACE_DBG("FINAL %x, outstanding_r2t %d, r2t_length %d",
1710 req_hdr->flags & ISCSI_FLG_FINAL,
1711 req->outstanding_r2t, req->r2t_length);
1713 if (req_hdr->flags & ISCSI_FLG_FINAL) {
1714 if (unlikely(req->is_unsolicited_data)) {
1715 PRINT_ERROR("Unexpected unsolicited data "
1716 "(r2t_length %u, outstanding_r2t %d)",
1718 req->is_unsolicited_data);
1719 mark_conn_closed(req->conn);
1722 req->outstanding_r2t--;
1727 if (req->r2t_length != 0) {
1728 if (!req->is_unsolicited_data)
1731 iscsi_restart_waiting_cmnd(req);
1738 static void __cmnd_abort(struct iscsi_cmnd *cmnd)
1741 * Here, if cmnd is data_waiting, we should iscsi_fail_waiting_cmnd()
1742 * it. But, since this function can be called from any thread, not only
1743 * from the read one, we at the moment can't do that, because of
1744 * absence of appropriate locking protection. But this isn't a stuff
1745 * for 1.0.0. So, currently a misbehaving initiator, not sending
1746 * data in R2T state for a sharing between targets device, for which
1747 * for some reason an aborting TM command, e.g. TARGET RESET, from
1748 * another initiator is issued, can block response for this TM command
1749 * virtually forever and by this make the issuing initiator eventually
1750 * put the device offline.
1752 * ToDo in the next version, possibly a simple connection mutex, taken
1753 * by the read thread before starting any processing and by this
1754 * function, should be sufficient.
1757 TRACE_MGMT_DBG("Aborting cmd %p, scst_cmd %p (scst state %x, "
1758 "ref_cnt %d, itt %x, sn %u, op %x, r2t_len %x, CDB op %x, "
1759 "size to write %u, is_unsolicited_data %d, "
1760 "outstanding_r2t %d, data_waiting %d, sess->exp_cmd_sn %u, "
1761 "conn %p, rd_task %p)", cmnd, cmnd->scst_cmd, cmnd->scst_state,
1762 atomic_read(&cmnd->ref_cnt), cmnd_itt(cmnd), cmnd->pdu.bhs.sn,
1763 cmnd_opcode(cmnd), cmnd->r2t_length, cmnd_scsicode(cmnd),
1764 cmnd_write_size(cmnd), cmnd->is_unsolicited_data,
1765 cmnd->outstanding_r2t, cmnd->data_waiting,
1766 cmnd->conn->session->exp_cmd_sn, cmnd->conn,
1767 cmnd->conn->rd_task);
1769 #if defined(CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION)
1770 TRACE_MGMT_DBG("net_ref_cnt %d", atomic_read(&cmnd->net_ref_cnt));
1773 cmnd->tm_aborted = 1;
1778 /* Must be called from the read or conn close thread */
1779 static int cmnd_abort(struct iscsi_cmnd *req)
1781 struct iscsi_session *session = req->conn->session;
1782 struct iscsi_task_mgt_hdr *req_hdr =
1783 (struct iscsi_task_mgt_hdr *)&req->pdu.bhs;
1784 struct iscsi_cmnd *cmnd;
1787 req_hdr->ref_cmd_sn = be32_to_cpu(req_hdr->ref_cmd_sn);
1789 if (after(req_hdr->ref_cmd_sn, req_hdr->cmd_sn)) {
1790 PRINT_ERROR("ABORT TASK: RefCmdSN(%u) > CmdSN(%u)",
1791 req_hdr->ref_cmd_sn, req_hdr->cmd_sn);
1792 err = ISCSI_RESPONSE_FUNCTION_REJECTED;
1796 cmnd = cmnd_find_hash_get(session, req_hdr->rtt, ISCSI_RESERVED_TAG);
1798 struct iscsi_conn *conn = cmnd->conn;
1799 struct iscsi_scsi_cmd_hdr *hdr = cmnd_hdr(cmnd);
1801 if (req_hdr->lun != hdr->lun) {
1802 PRINT_ERROR("ABORT TASK: LUN mismatch: req LUN "
1803 "%llx, cmd LUN %llx, rtt %u",
1804 (long long unsigned int)req_hdr->lun,
1805 (long long unsigned int)hdr->lun,
1807 err = ISCSI_RESPONSE_FUNCTION_REJECTED;
1811 if (cmnd->pdu.bhs.opcode & ISCSI_OP_IMMEDIATE) {
1812 if (req_hdr->ref_cmd_sn != req_hdr->cmd_sn) {
1813 PRINT_ERROR("ABORT TASK: RefCmdSN(%u) != TM "
1814 "cmd CmdSN(%u) for immediate command "
1815 "%p", req_hdr->ref_cmd_sn,
1816 req_hdr->cmd_sn, cmnd);
1817 err = ISCSI_RESPONSE_FUNCTION_REJECTED;
1821 if (req_hdr->ref_cmd_sn != hdr->cmd_sn) {
1822 PRINT_ERROR("ABORT TASK: RefCmdSN(%u) != "
1823 "CmdSN(%u) for command %p",
1824 req_hdr->ref_cmd_sn, req_hdr->cmd_sn,
1826 err = ISCSI_RESPONSE_FUNCTION_REJECTED;
1831 if (before(req_hdr->cmd_sn, hdr->cmd_sn) ||
1832 (req_hdr->cmd_sn == hdr->cmd_sn)) {
1833 PRINT_ERROR("ABORT TASK: SN mismatch: req SN %x, "
1834 "cmd SN %x, rtt %u", req_hdr->cmd_sn,
1835 hdr->cmd_sn, req_hdr->rtt);
1836 err = ISCSI_RESPONSE_FUNCTION_REJECTED;
1840 spin_lock_bh(&conn->cmd_list_lock);
1842 spin_unlock_bh(&conn->cmd_list_lock);
1847 TRACE_MGMT_DBG("cmd RTT %x not found", req_hdr->rtt);
1848 err = ISCSI_RESPONSE_UNKNOWN_TASK;
1859 /* Must be called from the read or conn close thread */
1860 static int target_abort(struct iscsi_cmnd *req, int all)
1862 struct iscsi_target *target = req->conn->session->target;
1863 struct iscsi_task_mgt_hdr *req_hdr =
1864 (struct iscsi_task_mgt_hdr *)&req->pdu.bhs;
1865 struct iscsi_session *session;
1866 struct iscsi_conn *conn;
1867 struct iscsi_cmnd *cmnd;
1869 mutex_lock(&target->target_mutex);
1871 list_for_each_entry(session, &target->session_list,
1872 session_list_entry) {
1873 list_for_each_entry(conn, &session->conn_list,
1875 spin_lock_bh(&conn->cmd_list_lock);
1876 list_for_each_entry(cmnd, &conn->cmd_list,
1882 else if (req_hdr->lun == cmnd_hdr(cmnd)->lun)
1885 spin_unlock_bh(&conn->cmd_list_lock);
1889 mutex_unlock(&target->target_mutex);
1893 /* Must be called from the read or conn close thread */
1894 static void task_set_abort(struct iscsi_cmnd *req)
1896 struct iscsi_session *session = req->conn->session;
1897 struct iscsi_task_mgt_hdr *req_hdr =
1898 (struct iscsi_task_mgt_hdr *)&req->pdu.bhs;
1899 struct iscsi_target *target = session->target;
1900 struct iscsi_conn *conn;
1901 struct iscsi_cmnd *cmnd;
1903 mutex_lock(&target->target_mutex);
1905 list_for_each_entry(conn, &session->conn_list, conn_list_entry) {
1906 spin_lock_bh(&conn->cmd_list_lock);
1907 list_for_each_entry(cmnd, &conn->cmd_list, cmd_list_entry) {
1908 struct iscsi_scsi_cmd_hdr *hdr = cmnd_hdr(cmnd);
1911 if (req_hdr->lun != hdr->lun)
1913 if (before(req_hdr->cmd_sn, hdr->cmd_sn) ||
1914 req_hdr->cmd_sn == hdr->cmd_sn)
1918 spin_unlock_bh(&conn->cmd_list_lock);
1921 mutex_unlock(&target->target_mutex);
1925 /* Must be called from the read or conn close thread */
1926 void conn_abort(struct iscsi_conn *conn)
1928 struct iscsi_cmnd *cmnd;
1930 TRACE_MGMT_DBG("Aborting conn %p", conn);
1932 iscsi_extracheck_is_rd_thread(conn);
1934 spin_lock_bh(&conn->cmd_list_lock);
1936 list_for_each_entry(cmnd, &conn->cmd_list, cmd_list_entry) {
1938 if (cmnd->data_waiting) {
1939 if (!cmnd_get_check(cmnd)) {
1940 spin_unlock_bh(&conn->cmd_list_lock);
1942 /* ToDo: this is racy for MC/S */
1943 TRACE_MGMT_DBG("Restarting data waiting cmd "
1945 iscsi_fail_waiting_cmnd(cmnd);
1950 * We are in the read thread, so we may not
1951 * worry that after cmnd release conn gets
1954 spin_lock_bh(&conn->cmd_list_lock);
1959 spin_unlock_bh(&conn->cmd_list_lock);
1964 static void execute_task_management(struct iscsi_cmnd *req)
1966 struct iscsi_conn *conn = req->conn;
1967 struct iscsi_session *sess = conn->session;
1968 struct iscsi_task_mgt_hdr *req_hdr =
1969 (struct iscsi_task_mgt_hdr *)&req->pdu.bhs;
1970 int rc, status, function = req_hdr->function & ISCSI_FUNCTION_MASK;
1971 struct scst_rx_mgmt_params params;
1973 TRACE((function == ISCSI_FUNCTION_ABORT_TASK) ?
1974 TRACE_MGMT_MINOR : TRACE_MGMT,
1975 "TM fn %d", function);
1977 TRACE_MGMT_DBG("TM req %p, itt %x, rtt %x, sn %u, con %p", req,
1978 cmnd_itt(req), req_hdr->rtt, req_hdr->cmd_sn, conn);
1980 iscsi_extracheck_is_rd_thread(conn);
1982 spin_lock(&sess->sn_lock);
1984 sess->tm_sn = req_hdr->cmd_sn;
1985 if (sess->tm_rsp != NULL) {
1986 struct iscsi_cmnd *tm_rsp = sess->tm_rsp;
1988 TRACE(TRACE_MGMT_MINOR, "Dropping delayed TM rsp %p", tm_rsp);
1990 sess->tm_rsp = NULL;
1993 spin_unlock(&sess->sn_lock);
1995 sBUG_ON(sess->tm_active < 0);
1997 rsp_cmnd_release(tm_rsp);
1999 spin_unlock(&sess->sn_lock);
2001 memset(¶ms, 0, sizeof(params));
2002 params.atomic = SCST_NON_ATOMIC;
2003 params.tgt_priv = req;
2005 if ((function != ISCSI_FUNCTION_ABORT_TASK) &&
2006 (req_hdr->rtt != ISCSI_RESERVED_TAG)) {
2007 PRINT_ERROR("Invalid RTT %x (TM fn %d)", req_hdr->rtt,
2010 status = ISCSI_RESPONSE_FUNCTION_REJECTED;
2014 /* cmd_sn is already in CPU format converted in check_cmd_sn() */
2017 case ISCSI_FUNCTION_ABORT_TASK:
2019 status = cmnd_abort(req);
2021 params.fn = SCST_ABORT_TASK;
2022 params.tag = req_hdr->rtt;
2024 params.lun = (uint8_t *)&req_hdr->lun;
2025 params.lun_len = sizeof(req_hdr->lun);
2027 params.cmd_sn = req_hdr->cmd_sn;
2028 params.cmd_sn_set = 1;
2029 rc = scst_rx_mgmt_fn(conn->session->scst_sess,
2031 status = ISCSI_RESPONSE_FUNCTION_REJECTED;
2034 case ISCSI_FUNCTION_ABORT_TASK_SET:
2035 task_set_abort(req);
2036 params.fn = SCST_ABORT_TASK_SET;
2037 params.lun = (uint8_t *)&req_hdr->lun;
2038 params.lun_len = sizeof(req_hdr->lun);
2040 params.cmd_sn = req_hdr->cmd_sn;
2041 params.cmd_sn_set = 1;
2042 rc = scst_rx_mgmt_fn(conn->session->scst_sess,
2044 status = ISCSI_RESPONSE_FUNCTION_REJECTED;
2046 case ISCSI_FUNCTION_CLEAR_TASK_SET:
2047 task_set_abort(req);
2048 params.fn = SCST_CLEAR_TASK_SET;
2049 params.lun = (uint8_t *)&req_hdr->lun;
2050 params.lun_len = sizeof(req_hdr->lun);
2052 params.cmd_sn = req_hdr->cmd_sn;
2053 params.cmd_sn_set = 1;
2054 rc = scst_rx_mgmt_fn(conn->session->scst_sess,
2056 status = ISCSI_RESPONSE_FUNCTION_REJECTED;
2058 case ISCSI_FUNCTION_CLEAR_ACA:
2059 params.fn = SCST_CLEAR_ACA;
2060 params.lun = (uint8_t *)&req_hdr->lun;
2061 params.lun_len = sizeof(req_hdr->lun);
2063 params.cmd_sn = req_hdr->cmd_sn;
2064 params.cmd_sn_set = 1;
2065 rc = scst_rx_mgmt_fn(conn->session->scst_sess,
2067 status = ISCSI_RESPONSE_FUNCTION_REJECTED;
2069 case ISCSI_FUNCTION_TARGET_COLD_RESET:
2070 case ISCSI_FUNCTION_TARGET_WARM_RESET:
2071 target_abort(req, 1);
2072 params.fn = SCST_TARGET_RESET;
2073 params.cmd_sn = req_hdr->cmd_sn;
2074 params.cmd_sn_set = 1;
2075 rc = scst_rx_mgmt_fn(conn->session->scst_sess,
2077 status = ISCSI_RESPONSE_FUNCTION_REJECTED;
2079 case ISCSI_FUNCTION_LOGICAL_UNIT_RESET:
2080 target_abort(req, 0);
2081 params.fn = SCST_LUN_RESET;
2082 params.lun = (uint8_t *)&req_hdr->lun;
2083 params.lun_len = sizeof(req_hdr->lun);
2085 params.cmd_sn = req_hdr->cmd_sn;
2086 params.cmd_sn_set = 1;
2087 rc = scst_rx_mgmt_fn(conn->session->scst_sess,
2089 status = ISCSI_RESPONSE_FUNCTION_REJECTED;
2091 case ISCSI_FUNCTION_TASK_REASSIGN:
2093 status = ISCSI_RESPONSE_ALLEGIANCE_REASSIGNMENT_UNSUPPORTED;
2096 PRINT_ERROR("Unknown TM function %d", function);
2098 status = ISCSI_RESPONSE_FUNCTION_REJECTED;
2104 iscsi_send_task_mgmt_resp(req, status);
2109 static void noop_out_exec(struct iscsi_cmnd *req)
2111 struct iscsi_cmnd *rsp;
2112 struct iscsi_nop_in_hdr *rsp_hdr;
2114 TRACE_DBG("%p", req);
2116 if (cmnd_itt(req) != cpu_to_be32(ISCSI_RESERVED_TAG)) {
2117 rsp = iscsi_cmnd_create_rsp_cmnd(req);
2119 rsp_hdr = (struct iscsi_nop_in_hdr *)&rsp->pdu.bhs;
2120 rsp_hdr->opcode = ISCSI_OP_NOOP_IN;
2121 rsp_hdr->flags = ISCSI_FLG_FINAL;
2122 rsp_hdr->itt = req->pdu.bhs.itt;
2123 rsp_hdr->ttt = cpu_to_be32(ISCSI_RESERVED_TAG);
2125 if (req->pdu.datasize)
2126 sBUG_ON(req->sg == NULL);
2128 sBUG_ON(req->sg != NULL);
2132 rsp->sg_cnt = req->sg_cnt;
2133 rsp->bufflen = req->bufflen;
2136 /* We already checked it in check_segment_length() */
2137 sBUG_ON(get_pgcnt(req->pdu.datasize, 0) > ISCSI_CONN_IOV_MAX);
2139 rsp->pdu.datasize = req->pdu.datasize;
2140 iscsi_cmnd_init_write(rsp,
2141 ISCSI_INIT_WRITE_REMOVE_HASH | ISCSI_INIT_WRITE_WAKE);
2142 req_cmnd_release(req);
2148 static void logout_exec(struct iscsi_cmnd *req)
2150 struct iscsi_logout_req_hdr *req_hdr;
2151 struct iscsi_cmnd *rsp;
2152 struct iscsi_logout_rsp_hdr *rsp_hdr;
2154 PRINT_INFO("Logout received from initiator %s",
2155 req->conn->session->initiator_name);
2156 TRACE_DBG("%p", req);
2158 req_hdr = (struct iscsi_logout_req_hdr *)&req->pdu.bhs;
2159 rsp = iscsi_cmnd_create_rsp_cmnd(req);
2160 rsp_hdr = (struct iscsi_logout_rsp_hdr *)&rsp->pdu.bhs;
2161 rsp_hdr->opcode = ISCSI_OP_LOGOUT_RSP;
2162 rsp_hdr->flags = ISCSI_FLG_FINAL;
2163 rsp_hdr->itt = req_hdr->itt;
2164 rsp->should_close_conn = 1;
2165 iscsi_cmnd_init_write(rsp,
2166 ISCSI_INIT_WRITE_REMOVE_HASH | ISCSI_INIT_WRITE_WAKE);
2167 req_cmnd_release(req);
2171 static void iscsi_cmnd_exec(struct iscsi_cmnd *cmnd)
2175 TRACE_DBG("%p,%x,%u", cmnd, cmnd_opcode(cmnd), cmnd->pdu.bhs.sn);
2177 iscsi_extracheck_is_rd_thread(cmnd->conn);
2179 if (unlikely(cmnd->tm_aborted)) {
2180 TRACE_MGMT_DBG("cmnd %p (scst_cmd %p) aborted", cmnd,
2182 req_cmnd_release_force(cmnd, ISCSI_FORCE_RELEASE_WRITE);
2186 if (unlikely(cmnd->rejected))
2189 switch (cmnd_opcode(cmnd)) {
2190 case ISCSI_OP_SCSI_CMD:
2191 if (cmnd->r2t_length != 0) {
2192 if (!cmnd->is_unsolicited_data) {
2197 iscsi_restart_cmnd(cmnd);
2199 case ISCSI_OP_NOOP_OUT:
2200 noop_out_exec(cmnd);
2202 case ISCSI_OP_SCSI_TASK_MGT_MSG:
2203 execute_task_management(cmnd);
2205 case ISCSI_OP_LOGOUT_CMD:
2209 PRINT_ERROR("unexpected cmnd op %x", cmnd_opcode(cmnd));
2210 req_cmnd_release(cmnd);
2218 TRACE_MGMT_DBG("Rejected cmd %p (reason %d)", cmnd,
2219 cmnd->reject_reason);
2220 switch (cmnd->reject_reason) {
2222 PRINT_ERROR("Unexpected reject reason %d",
2223 cmnd->reject_reason);
2225 case ISCSI_REJECT_SCSI_CMD:
2226 req_cmnd_release(cmnd);
2233 * Note: the code belows passes a kernel space pointer (&opt) to setsockopt()
2234 * while the declaration of setsockopt specifies that it expects a user space
2235 * pointer. This seems to work fine, and this approach is also used in some
2236 * other parts of the Linux kernel (see e.g. fs/ocfs2/cluster/tcp.c).
2238 static void set_cork(struct socket *sock, int on)
2245 sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK,
2246 (void __force __user *)&opt, sizeof(opt));
2251 void cmnd_tx_start(struct iscsi_cmnd *cmnd)
2253 struct iscsi_conn *conn = cmnd->conn;
2255 TRACE_DBG("conn %p, cmnd %p, opcode %x", conn, cmnd, cmnd_opcode(cmnd));
2256 iscsi_cmnd_set_length(&cmnd->pdu);
2258 iscsi_extracheck_is_wr_thread(conn);
2260 set_cork(conn->sock, 1);
2262 conn->write_iop = conn->write_iov;
2263 conn->write_iop->iov_base = (void __force __user *)(&cmnd->pdu.bhs);
2264 conn->write_iop->iov_len = sizeof(cmnd->pdu.bhs);
2265 conn->write_iop_used = 1;
2266 conn->write_size = sizeof(cmnd->pdu.bhs) + cmnd->pdu.datasize;
2267 conn->write_offset = 0;
2269 switch (cmnd_opcode(cmnd)) {
2270 case ISCSI_OP_NOOP_IN:
2271 cmnd_set_sn(cmnd, 1);
2273 case ISCSI_OP_SCSI_RSP:
2274 cmnd_set_sn(cmnd, 1);
2276 case ISCSI_OP_SCSI_TASK_MGT_RSP:
2277 cmnd_set_sn(cmnd, 1);
2279 case ISCSI_OP_TEXT_RSP:
2280 cmnd_set_sn(cmnd, 1);
2282 case ISCSI_OP_SCSI_DATA_IN:
2284 struct iscsi_data_in_hdr *rsp =
2285 (struct iscsi_data_in_hdr *)&cmnd->pdu.bhs;
2286 u32 offset = cpu_to_be32(rsp->buffer_offset);
2288 TRACE_DBG("cmnd %p, offset %u, datasize %u, bufflen %u", cmnd,
2289 offset, cmnd->pdu.datasize, cmnd->bufflen);
2291 sBUG_ON(offset > cmnd->bufflen);
2292 sBUG_ON(offset + cmnd->pdu.datasize > cmnd->bufflen);
2294 conn->write_offset = offset;
2296 cmnd_set_sn(cmnd, (rsp->flags & ISCSI_FLG_FINAL) ? 1 : 0);
2299 case ISCSI_OP_LOGOUT_RSP:
2300 cmnd_set_sn(cmnd, 1);
2303 cmnd->pdu.bhs.sn = cmnd_set_sn(cmnd, 0);
2305 case ISCSI_OP_ASYNC_MSG:
2306 cmnd_set_sn(cmnd, 1);
2308 case ISCSI_OP_REJECT:
2309 cmnd_set_sn(cmnd, 1);
2312 PRINT_ERROR("Unexpected cmnd op %x", cmnd_opcode(cmnd));
2316 iscsi_dump_pdu(&cmnd->pdu);
2320 void cmnd_tx_end(struct iscsi_cmnd *cmnd)
2322 struct iscsi_conn *conn = cmnd->conn;
2324 TRACE_DBG("%p:%x (should_close_conn %d, should_close_all_conn %d)",
2325 cmnd, cmnd_opcode(cmnd), cmnd->should_close_conn,
2326 cmnd->should_close_all_conn);
2328 switch (cmnd_opcode(cmnd)) {
2329 case ISCSI_OP_NOOP_IN:
2330 case ISCSI_OP_SCSI_RSP:
2331 case ISCSI_OP_SCSI_TASK_MGT_RSP:
2332 case ISCSI_OP_TEXT_RSP:
2334 case ISCSI_OP_ASYNC_MSG:
2335 case ISCSI_OP_REJECT:
2336 case ISCSI_OP_SCSI_DATA_IN:
2337 case ISCSI_OP_LOGOUT_RSP:
2340 PRINT_CRIT_ERROR("unexpected cmnd op %x", cmnd_opcode(cmnd));
2345 if (unlikely(cmnd->should_close_conn)) {
2346 if (cmnd->should_close_all_conn) {
2347 PRINT_INFO("Closing all connections for target %x at "
2348 "initiator's %s request",
2349 cmnd->conn->session->target->tid,
2350 conn->session->initiator_name);
2351 target_del_all_sess(cmnd->conn->session->target, 0);
2353 PRINT_INFO("Closing connection at initiator's %s "
2354 "request", conn->session->initiator_name);
2355 mark_conn_closed(conn);
2359 set_cork(cmnd->conn->sock, 0);
2364 * Push the command for execution. This functions reorders the commands.
2365 * Called from the read thread.
2367 static void iscsi_session_push_cmnd(struct iscsi_cmnd *cmnd)
2369 struct iscsi_session *session = cmnd->conn->session;
2370 struct list_head *entry;
2373 TRACE_DBG("%p:%x %u,%u",
2374 cmnd, cmnd_opcode(cmnd), cmnd->pdu.bhs.sn,
2375 session->exp_cmd_sn);
2377 iscsi_extracheck_is_rd_thread(cmnd->conn);
2379 sBUG_ON(cmnd->parent_req != NULL);
2381 if (cmnd->pdu.bhs.opcode & ISCSI_OP_IMMEDIATE) {
2382 TRACE_DBG("Immediate cmd %p (cmd_sn %u)", cmnd,
2384 iscsi_cmnd_exec(cmnd);
2388 spin_lock(&session->sn_lock);
2390 cmd_sn = cmnd->pdu.bhs.sn;
2391 if (cmd_sn == session->exp_cmd_sn) {
2393 session->exp_cmd_sn = ++cmd_sn;
2395 if (unlikely(session->tm_active > 0)) {
2396 if (before(cmd_sn, session->tm_sn)) {
2397 struct iscsi_conn *conn = cmnd->conn;
2399 spin_unlock(&session->sn_lock);
2401 spin_lock_bh(&conn->cmd_list_lock);
2403 spin_unlock_bh(&conn->cmd_list_lock);
2405 spin_lock(&session->sn_lock);
2407 iscsi_check_send_delayed_tm_resp(session);
2410 spin_unlock(&session->sn_lock);
2412 iscsi_cmnd_exec(cmnd);
2414 spin_lock(&session->sn_lock);
2416 if (list_empty(&session->pending_list))
2418 cmnd = list_entry(session->pending_list.next,
2420 pending_list_entry);
2421 if (cmnd->pdu.bhs.sn != cmd_sn)
2424 list_del(&cmnd->pending_list_entry);
2427 TRACE_DBG("Processing pending cmd %p (cmd_sn %u)",
2433 TRACE_DBG("Pending cmd %p (cmd_sn %u, exp_cmd_sn %u)",
2434 cmnd, cmd_sn, session->exp_cmd_sn);
2437 * iSCSI RFC 3720: "The target MUST silently ignore any
2438 * non-immediate command outside of [from ExpCmdSN to MaxCmdSN
2439 * inclusive] range". But we won't honor the MaxCmdSN
2440 * requirement, because, since we adjust MaxCmdSN from the
2441 * separate write thread, rarery it is possible that initiator
2442 * can legally send command with CmdSN>MaxSN. But it won't
2443 * hurt anything, in the worst case it will lead to
2444 * additional QUEUE FULL status.
2447 if (unlikely(before(cmd_sn, session->exp_cmd_sn))) {
2448 PRINT_ERROR("Unexpected cmd_sn (%u,%u)", cmd_sn,
2449 session->exp_cmd_sn);
2454 if (unlikely(after(cmd_sn, session->exp_cmd_sn +
2455 iscsi_get_allowed_cmds(session)))) {
2456 TRACE_MGMT_DBG("Too large cmd_sn %u (exp_cmd_sn %u, "
2457 "max_sn %u)", cmd_sn, session->exp_cmd_sn,
2458 iscsi_get_allowed_cmds(session));
2462 spin_unlock(&session->sn_lock);
2464 if (unlikely(drop)) {
2465 req_cmnd_release_force(cmnd,
2466 ISCSI_FORCE_RELEASE_WRITE);
2470 if (unlikely(cmnd->tm_aborted)) {
2471 struct iscsi_cmnd *tm_clone;
2473 TRACE_MGMT_DBG("Pending aborted cmnd %p, creating TM "
2474 "clone (scst cmd %p, state %d)", cmnd,
2475 cmnd->scst_cmd, cmnd->scst_state);
2477 tm_clone = cmnd_alloc(cmnd->conn, NULL);
2478 if (tm_clone != NULL) {
2479 tm_clone->tm_aborted = 1;
2480 tm_clone->pdu = cmnd->pdu;
2482 TRACE_MGMT_DBG("TM clone %p created",
2485 iscsi_cmnd_exec(cmnd);
2488 PRINT_ERROR("%s", "Unable to create TM clone");
2491 spin_lock(&session->sn_lock);
2492 list_for_each(entry, &session->pending_list) {
2493 struct iscsi_cmnd *tmp =
2494 list_entry(entry, struct iscsi_cmnd,
2495 pending_list_entry);
2496 if (before(cmd_sn, tmp->pdu.bhs.sn))
2499 list_add_tail(&cmnd->pending_list_entry, entry);
2503 spin_unlock(&session->sn_lock);
2508 static int check_segment_length(struct iscsi_cmnd *cmnd)
2510 struct iscsi_conn *conn = cmnd->conn;
2511 struct iscsi_session *session = conn->session;
2513 if (unlikely(cmnd->pdu.datasize > session->sess_param.max_recv_data_length)) {
2514 PRINT_ERROR("Initiator %s violated negotiated parameters: "
2515 "data too long (ITT %x, datasize %u, "
2516 "max_recv_data_length %u", session->initiator_name,
2517 cmnd_itt(cmnd), cmnd->pdu.datasize,
2518 session->sess_param.max_recv_data_length);
2519 mark_conn_closed(conn);
2525 int cmnd_rx_start(struct iscsi_cmnd *cmnd)
2527 struct iscsi_conn *conn = cmnd->conn;
2530 iscsi_dump_pdu(&cmnd->pdu);
2532 res = check_segment_length(cmnd);
2536 switch (cmnd_opcode(cmnd)) {
2537 case ISCSI_OP_NOOP_OUT:
2538 rc = noop_out_start(cmnd);
2540 case ISCSI_OP_SCSI_CMD:
2541 rc = cmnd_insert_hash(cmnd);
2542 if (likely(rc == 0)) {
2543 res = scsi_cmnd_start(cmnd);
2544 if (unlikely(res != 0))
2548 case ISCSI_OP_SCSI_TASK_MGT_MSG:
2549 rc = cmnd_insert_hash(cmnd);
2551 case ISCSI_OP_SCSI_DATA_OUT:
2552 res = data_out_start(conn, cmnd);
2553 rc = 0; /* to avoid compiler warning */
2554 if (unlikely(res != 0))
2557 case ISCSI_OP_LOGOUT_CMD:
2558 rc = cmnd_insert_hash(cmnd);
2560 case ISCSI_OP_TEXT_CMD:
2561 case ISCSI_OP_SNACK_CMD:
2562 rc = -ISCSI_REASON_UNSUPPORTED_COMMAND;
2565 rc = -ISCSI_REASON_UNSUPPORTED_COMMAND;
2569 if (unlikely(rc < 0)) {
2570 struct iscsi_scsi_cmd_hdr *hdr = cmnd_hdr(cmnd);
2571 PRINT_ERROR("Error %d (iSCSI opcode %x, ITT %x, op %x)", rc,
2572 cmnd_opcode(cmnd), cmnd_itt(cmnd),
2573 (cmnd_opcode(cmnd) == ISCSI_OP_SCSI_CMD ?
2575 iscsi_cmnd_reject(cmnd, -rc);
2579 TRACE_EXIT_RES(res);
2583 void cmnd_rx_end(struct iscsi_cmnd *cmnd)
2587 TRACE_DBG("%p:%x", cmnd, cmnd_opcode(cmnd));
2589 if (unlikely(cmnd->rejected))
2593 switch (cmnd_opcode(cmnd)) {
2594 case ISCSI_OP_SCSI_CMD:
2595 case ISCSI_OP_NOOP_OUT:
2596 case ISCSI_OP_SCSI_TASK_MGT_MSG:
2597 case ISCSI_OP_LOGOUT_CMD:
2598 iscsi_session_push_cmnd(cmnd);
2600 case ISCSI_OP_SCSI_DATA_OUT:
2604 PRINT_ERROR("unexpected cmnd op %x", cmnd_opcode(cmnd));
2605 req_cmnd_release(cmnd);
2614 switch (cmnd->reject_reason) {
2616 PRINT_ERROR("Unexpected reject reason %d",
2617 cmnd->reject_reason);
2619 case ISCSI_REJECT_CMD:
2620 case ISCSI_REJECT_DATA:
2621 req_cmnd_release(cmnd);
2623 case ISCSI_REJECT_SCSI_CMD:
2629 #if !defined(CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION)
2630 static int iscsi_alloc_data_buf(struct scst_cmd *cmd)
2633 * sock->ops->sendpage() is async zero copy operation,
2634 * so we must be sure not to free and reuse
2635 * the command's buffer before the sending was completed
2636 * by the network layers. It is possible only if we
2637 * don't use SGV cache.
2639 EXTRACHECKS_BUG_ON(!(scst_cmd_get_data_direction(cmd) & SCST_DATA_READ));
2640 scst_cmd_set_no_sgv(cmd);
2645 static inline void iscsi_set_state_wake_up(struct iscsi_cmnd *req,
2648 if (req->conn->rx_task == current)
2649 req->scst_state = new_state;
2652 * We wait for the state change without any protection, so
2653 * without cmnd_get() it is possible that req will die
2654 * "immediately" after the state assignment and
2655 * iscsi_make_conn_rd_active() will operate on dead data.
2656 * We use the ordered version of cmnd_get(), because "get"
2657 * must be done before the state assignment.
2659 cmnd_get_ordered(req);
2660 req->scst_state = new_state;
2661 iscsi_make_conn_rd_active(req->conn);
2662 if (unlikely(req->conn->closing)) {
2663 TRACE_DBG("Waiking up closing conn %p", req->conn);
2664 wake_up(&req->conn->read_state_waitQ);
2671 static void iscsi_preprocessing_done(struct scst_cmd *scst_cmd)
2673 struct iscsi_cmnd *req = (struct iscsi_cmnd *)
2674 scst_cmd_get_tgt_priv(scst_cmd);
2676 TRACE_DBG("req %p", req);
2678 EXTRACHECKS_BUG_ON(req->scst_state != ISCSI_CMD_STATE_RX_CMD);
2680 iscsi_set_state_wake_up(req, ISCSI_CMD_STATE_AFTER_PREPROC);
2687 * IMPORTANT! Connection conn must be protected by additional conn_get()
2688 * upon entrance in this function, because otherwise it could be destroyed
2689 * inside as a result of iscsi_send(), which releases sent commands.
2691 static void iscsi_try_local_processing(struct iscsi_conn *conn)
2697 spin_lock_bh(&iscsi_wr_lock);
2698 switch (conn->wr_state) {
2699 case ISCSI_CONN_WR_STATE_IN_LIST:
2700 list_del(&conn->wr_list_entry);
2702 case ISCSI_CONN_WR_STATE_IDLE:
2703 #ifdef CONFIG_SCST_EXTRACHECKS
2704 conn->wr_task = current;
2706 conn->wr_state = ISCSI_CONN_WR_STATE_PROCESSING;
2707 conn->wr_space_ready = 0;
2714 spin_unlock_bh(&iscsi_wr_lock);
2719 if (test_write_ready(conn))
2720 rc = iscsi_send(conn);
2722 spin_lock_bh(&iscsi_wr_lock);
2723 #ifdef CONFIG_SCST_EXTRACHECKS
2724 conn->wr_task = NULL;
2726 if ((rc <= 0) || test_write_ready(conn)) {
2727 list_add_tail(&conn->wr_list_entry, &iscsi_wr_list);
2728 conn->wr_state = ISCSI_CONN_WR_STATE_IN_LIST;
2729 wake_up(&iscsi_wr_waitQ);
2731 conn->wr_state = ISCSI_CONN_WR_STATE_IDLE;
2732 spin_unlock_bh(&iscsi_wr_lock);
2739 static int iscsi_xmit_response(struct scst_cmd *scst_cmd)
2741 int is_send_status = scst_cmd_get_is_send_status(scst_cmd);
2742 struct iscsi_cmnd *req = (struct iscsi_cmnd *)
2743 scst_cmd_get_tgt_priv(scst_cmd);
2744 struct iscsi_conn *conn = req->conn;
2745 int status = scst_cmd_get_status(scst_cmd);
2746 u8 *sense = scst_cmd_get_sense_buffer(scst_cmd);
2747 int sense_len = scst_cmd_get_sense_buffer_len(scst_cmd);
2748 int old_state = req->scst_state;
2750 if (scst_cmd_atomic(scst_cmd))
2751 return SCST_TGT_RES_NEED_THREAD_CTX;
2753 scst_cmd_set_tgt_priv(scst_cmd, NULL);
2755 req->tm_aborted |= scst_cmd_aborted(scst_cmd) ? 1 : 0;
2756 if (unlikely(req->tm_aborted)) {
2757 TRACE_MGMT_DBG("req %p (scst_cmd %p) aborted", req,
2760 scst_set_delivery_status(req->scst_cmd,
2761 SCST_CMD_DELIVERY_ABORTED);
2763 if (old_state == ISCSI_CMD_STATE_RESTARTED) {
2764 req->scst_state = ISCSI_CMD_STATE_PROCESSED;
2765 req_cmnd_release_force(req, ISCSI_FORCE_RELEASE_WRITE);
2767 iscsi_set_state_wake_up(req,
2768 ISCSI_CMD_STATE_PROCESSED);
2773 if (unlikely(old_state != ISCSI_CMD_STATE_RESTARTED)) {
2774 TRACE_DBG("req %p on %d state", req, old_state);
2777 * We could preliminary have finished req before we knew its
2778 * device, so check if we return correct sense format.
2780 scst_check_convert_sense(scst_cmd);
2782 create_status_rsp(req, status, sense, sense_len);
2784 switch (old_state) {
2785 case ISCSI_CMD_STATE_RX_CMD:
2786 case ISCSI_CMD_STATE_AFTER_PREPROC:
2792 iscsi_set_state_wake_up(req, ISCSI_CMD_STATE_PROCESSED);
2796 req->scst_state = ISCSI_CMD_STATE_PROCESSED;
2798 req->bufflen = scst_cmd_get_resp_data_len(scst_cmd);
2799 req->sg = scst_cmd_get_sg(scst_cmd);
2800 req->sg_cnt = scst_cmd_get_sg_cnt(scst_cmd);
2802 TRACE_DBG("req %p, is_send_status=%x, req->bufflen=%d, req->sg=%p, "
2803 "req->sg_cnt %d", req, is_send_status, req->bufflen, req->sg,
2806 if (unlikely((req->bufflen != 0) && !is_send_status)) {
2807 PRINT_CRIT_ERROR("%s", "Sending DATA without STATUS is "
2809 scst_set_cmd_error(scst_cmd,
2810 SCST_LOAD_SENSE(scst_sense_hardw_error));
2814 if (req->bufflen != 0) {
2816 * Check above makes sure that is_send_status is set,
2817 * so status is valid here, but in future that could change.
2820 if ((status != SAM_STAT_CHECK_CONDITION) &&
2821 ((cmnd_hdr(req)->flags & (ISCSI_CMD_WRITE|ISCSI_CMD_READ)) !=
2822 (ISCSI_CMD_WRITE|ISCSI_CMD_READ))) {
2823 send_data_rsp(req, status, is_send_status);
2825 struct iscsi_cmnd *rsp;
2826 send_data_rsp(req, 0, 0);
2827 if (is_send_status) {
2828 rsp = create_status_rsp(req, status, sense,
2830 iscsi_set_resid(req, rsp, true);
2831 iscsi_cmnd_init_write(rsp,
2832 ISCSI_INIT_WRITE_REMOVE_HASH);
2835 } else if (is_send_status) {
2836 struct iscsi_cmnd *rsp;
2837 rsp = create_status_rsp(req, status, sense, sense_len);
2838 iscsi_set_resid(req, rsp, false);
2839 iscsi_cmnd_init_write(rsp, ISCSI_INIT_WRITE_REMOVE_HASH);
2841 #ifdef CONFIG_SCST_EXTRACHECKS
2847 * "_ordered" here to protect from reorder, which can lead to
2848 * preliminary connection destroy in req_cmnd_release(). Just in
2849 * case, actually, because reordering shouldn't go so far, but who
2852 conn_get_ordered(conn);
2853 req_cmnd_release(req);
2854 iscsi_try_local_processing(conn);
2858 return SCST_TGT_RES_SUCCESS;
2861 /* Called under sn_lock */
2862 static bool iscsi_is_delay_tm_resp(struct iscsi_cmnd *rsp)
2865 struct iscsi_task_mgt_hdr *req_hdr =
2866 (struct iscsi_task_mgt_hdr *)&rsp->parent_req->pdu.bhs;
2867 int function = req_hdr->function & ISCSI_FUNCTION_MASK;
2868 struct iscsi_session *sess = rsp->conn->session;
2872 /* This should be checked for immediate TM commands as well */
2876 if (before(sess->exp_cmd_sn, req_hdr->cmd_sn))
2881 TRACE_EXIT_RES(res);
2885 /* Called under sn_lock, but might drop it inside, then reaquire */
2886 static void iscsi_check_send_delayed_tm_resp(struct iscsi_session *sess)
2887 __acquires(&sn_lock)
2888 __releases(&sn_lock)
2890 struct iscsi_cmnd *tm_rsp = sess->tm_rsp;
2897 if (iscsi_is_delay_tm_resp(tm_rsp))
2900 TRACE(TRACE_MGMT_MINOR, "Sending delayed rsp %p", tm_rsp);
2902 sess->tm_rsp = NULL;
2905 spin_unlock(&sess->sn_lock);
2907 sBUG_ON(sess->tm_active < 0);
2909 iscsi_cmnd_init_write(tm_rsp,
2910 ISCSI_INIT_WRITE_REMOVE_HASH | ISCSI_INIT_WRITE_WAKE);
2912 spin_lock(&sess->sn_lock);
2919 static void iscsi_send_task_mgmt_resp(struct iscsi_cmnd *req, int status)
2921 struct iscsi_cmnd *rsp;
2922 struct iscsi_task_mgt_hdr *req_hdr =
2923 (struct iscsi_task_mgt_hdr *)&req->pdu.bhs;
2924 struct iscsi_task_rsp_hdr *rsp_hdr;
2925 struct iscsi_session *sess = req->conn->session;
2926 int fn = req_hdr->function & ISCSI_FUNCTION_MASK;
2930 TRACE_MGMT_DBG("TM req %p finished", req);
2931 TRACE((req_hdr->function == ISCSI_FUNCTION_ABORT_TASK) ?
2932 TRACE_MGMT_MINOR : TRACE_MGMT,
2933 "TM fn %d finished, status %d", fn, status);
2935 rsp = iscsi_cmnd_create_rsp_cmnd(req);
2936 rsp_hdr = (struct iscsi_task_rsp_hdr *)&rsp->pdu.bhs;
2938 rsp_hdr->opcode = ISCSI_OP_SCSI_TASK_MGT_RSP;
2939 rsp_hdr->flags = ISCSI_FLG_FINAL;
2940 rsp_hdr->itt = req_hdr->itt;
2941 rsp_hdr->response = status;
2943 if (fn == ISCSI_FUNCTION_TARGET_COLD_RESET) {
2944 rsp->should_close_conn = 1;
2945 rsp->should_close_all_conn = 1;
2948 sBUG_ON(sess->tm_rsp != NULL);
2950 spin_lock(&sess->sn_lock);
2951 if (iscsi_is_delay_tm_resp(rsp)) {
2952 TRACE(TRACE_MGMT_MINOR, "Delaying TM fn %d response %p "
2953 "(req %p), because not all affected commands received "
2954 "(TM cmd sn %u, exp sn %u)",
2955 req_hdr->function & ISCSI_FUNCTION_MASK, rsp, req,
2956 req_hdr->cmd_sn, sess->exp_cmd_sn);
2958 spin_unlock(&sess->sn_lock);
2962 spin_unlock(&sess->sn_lock);
2964 sBUG_ON(sess->tm_active < 0);
2966 iscsi_cmnd_init_write(rsp,
2967 ISCSI_INIT_WRITE_REMOVE_HASH | ISCSI_INIT_WRITE_WAKE);
2970 req_cmnd_release(req);
2976 static inline int iscsi_get_mgmt_response(int status)
2979 case SCST_MGMT_STATUS_SUCCESS:
2980 return ISCSI_RESPONSE_FUNCTION_COMPLETE;
2982 case SCST_MGMT_STATUS_TASK_NOT_EXIST:
2983 return ISCSI_RESPONSE_UNKNOWN_TASK;
2985 case SCST_MGMT_STATUS_LUN_NOT_EXIST:
2986 return ISCSI_RESPONSE_UNKNOWN_LUN;
2988 case SCST_MGMT_STATUS_FN_NOT_SUPPORTED:
2989 return ISCSI_RESPONSE_FUNCTION_UNSUPPORTED;
2991 case SCST_MGMT_STATUS_REJECTED:
2992 case SCST_MGMT_STATUS_FAILED:
2994 return ISCSI_RESPONSE_FUNCTION_REJECTED;
2998 static void iscsi_task_mgmt_fn_done(struct scst_mgmt_cmd *scst_mcmd)
3000 int fn = scst_mgmt_cmd_get_fn(scst_mcmd);
3001 struct iscsi_cmnd *req = (struct iscsi_cmnd *)
3002 scst_mgmt_cmd_get_tgt_priv(scst_mcmd);
3004 iscsi_get_mgmt_response(scst_mgmt_cmd_get_status(scst_mcmd));
3006 TRACE_MGMT_DBG("req %p, scst_mcmd %p, fn %d, scst status %d",
3007 req, scst_mcmd, fn, scst_mgmt_cmd_get_status(scst_mcmd));
3010 case SCST_NEXUS_LOSS_SESS:
3011 case SCST_ABORT_ALL_TASKS_SESS:
3012 /* They are internal */
3015 iscsi_send_task_mgmt_resp(req, status);
3016 scst_mgmt_cmd_set_tgt_priv(scst_mcmd, NULL);
3022 static int iscsi_scsi_aen(struct scst_aen *aen)
3024 int res = SCST_AEN_RES_SUCCESS;
3025 uint64_t lun = scst_aen_get_lun(aen);
3026 const uint8_t *sense = scst_aen_get_sense(aen);
3027 int sense_len = scst_aen_get_sense_len(aen);
3028 struct iscsi_session *sess = scst_sess_get_tgt_priv(
3029 scst_aen_get_sess(aen));
3030 struct iscsi_conn *conn;
3032 struct iscsi_cmnd *fake_req, *rsp;
3033 struct iscsi_async_msg_hdr *rsp_hdr;
3034 struct scatterlist *sg;
3038 TRACE_MGMT_DBG("SCSI AEN to sess %p (initiator %s)", sess,
3039 sess->initiator_name);
3041 mutex_lock(&sess->target->target_mutex);
3044 list_for_each_entry_reverse(conn, &sess->conn_list, conn_list_entry) {
3045 if (!test_bit(ISCSI_CONN_SHUTTINGDOWN, &conn->conn_aflags) &&
3046 (conn->conn_reinst_successor == NULL)) {
3052 TRACE_MGMT_DBG("Unable to find alive conn for sess %p", sess);
3056 /* Create a fake request */
3057 fake_req = cmnd_alloc(conn, NULL);
3058 if (fake_req == NULL) {
3059 PRINT_ERROR("%s", "Unable to alloc fake AEN request");
3063 mutex_unlock(&sess->target->target_mutex);
3065 rsp = iscsi_cmnd_create_rsp_cmnd(fake_req);
3067 PRINT_ERROR("%s", "Unable to alloc AEN rsp");
3068 goto out_err_free_req;
3071 fake_req->scst_state = ISCSI_CMD_STATE_AEN;
3072 fake_req->scst_aen = aen;
3074 rsp_hdr = (struct iscsi_async_msg_hdr *)&rsp->pdu.bhs;
3076 rsp_hdr->opcode = ISCSI_OP_ASYNC_MSG;
3077 rsp_hdr->flags = ISCSI_FLG_FINAL;
3078 rsp_hdr->lun = lun; /* it's already in SCSI form */
3079 rsp_hdr->ffffffff = 0xffffffff;
3080 rsp_hdr->async_event = ISCSI_ASYNC_SCSI;
3082 sg = rsp->sg = rsp->rsp_sg;
3086 sg_init_table(sg, 2);
3087 sg_set_buf(&sg[0], &rsp->sense_hdr, sizeof(rsp->sense_hdr));
3088 sg_set_buf(&sg[1], sense, sense_len);
3090 rsp->sense_hdr.length = cpu_to_be16(sense_len);
3091 rsp->pdu.datasize = sizeof(rsp->sense_hdr) + sense_len;
3092 rsp->bufflen = rsp->pdu.datasize;
3094 iscsi_cmnd_init_write(rsp, ISCSI_INIT_WRITE_WAKE);
3096 req_cmnd_release(fake_req);
3099 TRACE_EXIT_RES(res);
3103 req_cmnd_release(fake_req);
3106 mutex_unlock(&sess->target->target_mutex);
3107 res = SCST_AEN_RES_FAILED;
3111 static int iscsi_report_aen(struct scst_aen *aen)
3114 int event_fn = scst_aen_get_event_fn(aen);
3120 res = iscsi_scsi_aen(aen);
3123 TRACE_MGMT_DBG("Unsupported AEN %d", event_fn);
3124 res = SCST_AEN_RES_NOT_SUPPORTED;
3128 TRACE_EXIT_RES(res);
3132 static int iscsi_target_detect(struct scst_tgt_template *templ)
3138 static int iscsi_target_release(struct scst_tgt *scst_tgt)
3144 struct scst_tgt_template iscsi_template = {
3146 .sg_tablesize = 0xFFFF /* no limit */,
3149 .xmit_response_atomic = 0,
3150 .detect = iscsi_target_detect,
3151 .release = iscsi_target_release,
3152 .xmit_response = iscsi_xmit_response,
3153 #if !defined(CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION)
3154 .alloc_data_buf = iscsi_alloc_data_buf,
3156 .preprocessing_done = iscsi_preprocessing_done,
3157 .pre_exec = iscsi_pre_exec,
3158 .task_mgmt_affected_cmds_done = iscsi_task_mgmt_affected_cmds_done,
3159 .task_mgmt_fn_done = iscsi_task_mgmt_fn_done,
3160 .report_aen = iscsi_report_aen,
3163 static __init int iscsi_run_threads(int count, char *name, int (*fn)(void *))
3167 struct iscsi_thread_t *thr;
3169 for (i = 0; i < count; i++) {
3170 thr = kmalloc(sizeof(*thr), GFP_KERNEL);
3173 PRINT_ERROR("Failed to allocate thr %d", res);
3176 thr->thr = kthread_run(fn, NULL, "%s%d", name, i);
3177 if (IS_ERR(thr->thr)) {
3178 res = PTR_ERR(thr->thr);
3179 PRINT_ERROR("kthread_create() failed: %d", res);
3183 list_add_tail(&thr->threads_list_entry, &iscsi_threads_list);
3190 static void iscsi_stop_threads(void)
3192 struct iscsi_thread_t *t, *tmp;
3194 list_for_each_entry_safe(t, tmp, &iscsi_threads_list,
3195 threads_list_entry) {
3196 int rc = kthread_stop(t->thr);
3198 TRACE_MGMT_DBG("kthread_stop() failed: %d", rc);
3199 list_del(&t->threads_list_entry);
3205 static int __init iscsi_init(void)
3210 PRINT_INFO("iSCSI SCST Target - version %s", ISCSI_VERSION_STRING);
3212 dummy_page = alloc_pages(GFP_KERNEL, 0);
3213 if (dummy_page == NULL) {
3214 PRINT_ERROR("%s", "Dummy page allocation failed");
3218 sg_init_table(&dummy_sg, 1);
3219 sg_set_page(&dummy_sg, dummy_page, PAGE_SIZE, 0);
3221 #if defined(CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION)
3222 err = net_set_get_put_page_callbacks(iscsi_get_page_callback,
3223 iscsi_put_page_callback);
3225 PRINT_INFO("Unable to set page callbackes: %d", err);
3226 goto out_free_dummy;
3230 "CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION "
3231 "not enabled in your kernel. ISCSI-SCST will be working with "
3232 "not the best performance. Refer README file for details.");
3235 ctr_major = register_chrdev(0, ctr_name, &ctr_fops);
3236 if (ctr_major < 0) {
3237 PRINT_ERROR("failed to register the control device %d",
3247 iscsi_cmnd_cache = KMEM_CACHE(iscsi_cmnd, SCST_SLAB_FLAGS);
3248 if (!iscsi_cmnd_cache) {
3253 err = scst_register_target_template(&iscsi_template);
3257 iscsi_template_registered = 1;
3259 #ifdef CONFIG_SCST_PROC
3260 err = iscsi_procfs_init();
3265 num = max((int)num_online_cpus(), 2);
3267 err = iscsi_run_threads(num, "iscsird", istrd);
3271 err = iscsi_run_threads(num, "iscsiwr", istwr);
3279 #ifdef CONFIG_SCST_PROC
3280 iscsi_procfs_exit();
3282 iscsi_stop_threads();
3284 #ifdef CONFIG_SCST_PROC
3287 scst_unregister_target_template(&iscsi_template);
3290 kmem_cache_destroy(iscsi_cmnd_cache);
3296 unregister_chrdev(ctr_major, ctr_name);
3299 #if defined(CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION)
3300 net_set_get_put_page_callbacks(NULL, NULL);
3304 __free_pages(dummy_page, 0);
3308 static void __exit iscsi_exit(void)
3310 iscsi_stop_threads();
3312 unregister_chrdev(ctr_major, ctr_name);
3314 #ifdef CONFIG_SCST_PROC
3315 iscsi_procfs_exit();
3319 kmem_cache_destroy(iscsi_cmnd_cache);
3321 scst_unregister_target_template(&iscsi_template);
3323 #if defined(CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION)
3324 net_set_get_put_page_callbacks(NULL, NULL);
3327 __free_pages(dummy_page, 0);
3331 module_init(iscsi_init);
3332 module_exit(iscsi_exit);
3334 MODULE_LICENSE("GPL");