2 * Copyright (C) 2002 - 2003 Ardis Technolgies <roman@ardistech.com>
3 * Copyright (C) 2007 - 2008 Vladislav Bolkhovitin
4 * Copyright (C) 2007 - 2008 CMS Distribution Limited
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation, version 2
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
17 #include <linux/module.h>
18 #include <linux/hash.h>
19 #include <linux/kthread.h>
20 #include <linux/scatterlist.h>
22 #include <scsi/scsi.h>
27 #if !defined(CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION)
28 #warning "Patch put_page_callback-<kernel-version>.patch not applied on your\
29 kernel or CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION\
30 config option not set. ISCSI-SCST will be working with not the best\
31 performance. Refer README file for details."
34 #define ISCSI_INIT_WRITE_WAKE 0x1
35 #define ISCSI_INIT_WRITE_REMOVE_HASH 0x2
38 static char ctr_name[] = "iscsi-scst-ctl";
39 static int iscsi_template_registered;
41 #if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
42 unsigned long iscsi_trace_flag = ISCSI_DEFAULT_LOG_FLAGS;
45 static struct kmem_cache *iscsi_cmnd_cache;
47 DEFINE_SPINLOCK(iscsi_rd_lock);
48 LIST_HEAD(iscsi_rd_list);
49 DECLARE_WAIT_QUEUE_HEAD(iscsi_rd_waitQ);
51 DEFINE_SPINLOCK(iscsi_wr_lock);
52 LIST_HEAD(iscsi_wr_list);
53 DECLARE_WAIT_QUEUE_HEAD(iscsi_wr_waitQ);
55 static struct page *dummy_page;
56 static struct scatterlist dummy_sg;
58 struct iscsi_thread_t {
59 struct task_struct *thr;
60 struct list_head threads_list_entry;
63 static LIST_HEAD(iscsi_threads_list);
65 static void cmnd_remove_hash(struct iscsi_cmnd *cmnd);
66 static void iscsi_send_task_mgmt_resp(struct iscsi_cmnd *req, int status);
67 static void cmnd_prepare_get_rejected_cmd_data(struct iscsi_cmnd *cmnd);
68 static void iscsi_check_send_delayed_tm_resp(struct iscsi_session *sess);
69 static void iscsi_session_push_cmnd(struct iscsi_cmnd *cmnd);
70 static void req_cmnd_release(struct iscsi_cmnd *req);
72 static inline u32 cmnd_write_size(struct iscsi_cmnd *cmnd)
74 struct iscsi_scsi_cmd_hdr *hdr = cmnd_hdr(cmnd);
76 if (hdr->flags & ISCSI_CMD_WRITE)
77 return be32_to_cpu(hdr->data_length);
81 static inline int cmnd_read_size(struct iscsi_cmnd *cmnd)
83 struct iscsi_scsi_cmd_hdr *hdr = cmnd_hdr(cmnd);
85 if (hdr->flags & ISCSI_CMD_READ) {
86 struct iscsi_ahs_hdr *ahdr;
88 if (!(hdr->flags & ISCSI_CMD_WRITE))
89 return be32_to_cpu(hdr->data_length);
91 ahdr = (struct iscsi_ahs_hdr *)cmnd->pdu.ahs;
93 uint8_t *p = (uint8_t *)ahdr;
98 ahdr = (struct iscsi_ahs_hdr *)p;
100 if (ahdr->ahstype == ISCSI_AHSTYPE_RLENGTH) {
101 struct iscsi_rlength_ahdr *rh =
102 (struct iscsi_rlength_ahdr *)ahdr;
103 return be32_to_cpu(rh->read_length);
106 s = 3 + be16_to_cpu(ahdr->ahslength);
110 } while (size < cmnd->pdu.ahssize);
117 void iscsi_restart_cmnd(struct iscsi_cmnd *cmnd)
119 EXTRACHECKS_BUG_ON(cmnd->data_waiting);
121 if (unlikely(test_bit(ISCSI_CONN_REINSTATING,
122 &cmnd->conn->conn_aflags))) {
123 struct iscsi_target *target = cmnd->conn->session->target;
126 mutex_lock(&target->target_mutex);
128 get_out = test_bit(ISCSI_CONN_REINSTATING,
129 &cmnd->conn->conn_aflags);
130 /* Let's don't look dead */
131 if (scst_cmd_get_cdb(cmnd->scst_cmd)[0] == TEST_UNIT_READY)
137 TRACE_MGMT_DBG("Pending cmnd %p, because conn %p is "
138 "reinstated", cmnd, cmnd->conn);
140 cmnd->scst_state = ISCSI_CMD_STATE_REINST_PENDING;
141 list_add_tail(&cmnd->reinst_pending_cmd_list_entry,
142 &cmnd->conn->reinst_pending_cmd_list);
145 mutex_unlock(&target->target_mutex);
151 cmnd->scst_state = ISCSI_CMD_STATE_RESTARTED;
152 scst_restart_cmd(cmnd->scst_cmd, SCST_PREPROCESS_STATUS_SUCCESS,
153 SCST_CONTEXT_THREAD);
159 static inline void iscsi_restart_waiting_cmnd(struct iscsi_cmnd *cmnd)
162 * There is no race with conn_abort(), since all functions
163 * called from single read thread
165 iscsi_extracheck_is_rd_thread(cmnd->conn);
166 cmnd->data_waiting = 0;
168 iscsi_restart_cmnd(cmnd);
172 static inline void iscsi_fail_waiting_cmnd(struct iscsi_cmnd *cmnd)
174 TRACE_MGMT_DBG("Failing data waiting cmd %p", cmnd);
177 * There is no race with conn_abort(), since all functions
178 * called from single read thread
180 iscsi_extracheck_is_rd_thread(cmnd->conn);
181 cmnd->data_waiting = 0;
183 req_cmnd_release_force(cmnd, ISCSI_FORCE_RELEASE_WRITE);
186 struct iscsi_cmnd *cmnd_alloc(struct iscsi_conn *conn,
187 struct iscsi_cmnd *parent)
189 struct iscsi_cmnd *cmnd;
191 /* ToDo: __GFP_NOFAIL?? */
192 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
193 cmnd = kmem_cache_alloc(iscsi_cmnd_cache, GFP_KERNEL|__GFP_NOFAIL);
194 memset(cmnd, 0, sizeof(*cmnd));
196 cmnd = kmem_cache_zalloc(iscsi_cmnd_cache, GFP_KERNEL|__GFP_NOFAIL);
199 atomic_set(&cmnd->ref_cnt, 1);
200 cmnd->scst_state = ISCSI_CMD_STATE_NEW;
202 cmnd->parent_req = parent;
203 init_waitqueue_head(&cmnd->scst_waitQ);
205 if (parent == NULL) {
208 #if defined(CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION)
209 atomic_set(&cmnd->net_ref_cnt, 0);
211 spin_lock_init(&cmnd->rsp_cmd_lock);
212 INIT_LIST_HEAD(&cmnd->rsp_cmd_list);
213 INIT_LIST_HEAD(&cmnd->rx_ddigest_cmd_list);
215 spin_lock_bh(&conn->cmd_list_lock);
216 list_add_tail(&cmnd->cmd_list_entry, &conn->cmd_list);
217 spin_unlock_bh(&conn->cmd_list_lock);
220 TRACE_DBG("conn %p, parent %p, cmnd %p", conn, parent, cmnd);
224 /* Frees a command. Also frees the additional header. */
225 static void cmnd_free(struct iscsi_cmnd *cmnd)
227 TRACE_DBG("%p", cmnd);
229 if (unlikely(cmnd->tm_aborted)) {
230 TRACE_MGMT_DBG("Free aborted cmd %p (scst cmd %p, state %d, "
231 "parent_req %p)", cmnd, cmnd->scst_cmd,
232 cmnd->scst_state, cmnd->parent_req);
235 /* Catch users from cmd_list or rsp_cmd_list */
236 EXTRACHECKS_BUG_ON(atomic_read(&cmnd->ref_cnt) != 0);
238 kfree(cmnd->pdu.ahs);
240 if (unlikely(cmnd->on_write_list || cmnd->on_written_list)) {
241 struct iscsi_scsi_cmd_hdr *req = cmnd_hdr(cmnd);
243 PRINT_CRIT_ERROR("cmnd %p still on some list?, %x, %x, %x, "
244 "%x, %x, %x, %x", cmnd, req->opcode, req->scb[0],
245 req->flags, req->itt, be32_to_cpu(req->data_length),
246 req->cmd_sn, be32_to_cpu(cmnd->pdu.datasize));
248 if (unlikely(cmnd->parent_req)) {
249 struct iscsi_scsi_cmd_hdr *preq =
250 cmnd_hdr(cmnd->parent_req);
251 PRINT_CRIT_ERROR("%p %x %u", preq, preq->opcode,
257 kmem_cache_free(iscsi_cmnd_cache, cmnd);
261 /* Might be called unded some lock and on SIRQ */
262 void cmnd_done(struct iscsi_cmnd *cmnd)
264 TRACE_DBG("%p", cmnd);
266 if (unlikely(cmnd->tm_aborted)) {
267 TRACE_MGMT_DBG("Done aborted cmd %p (scst cmd %p, state %d, "
268 "parent_req %p)", cmnd, cmnd->scst_cmd,
269 cmnd->scst_state, cmnd->parent_req);
272 EXTRACHECKS_BUG_ON(cmnd->on_rx_digest_list);
274 if (cmnd->on_written_list) {
275 struct iscsi_conn *conn = cmnd->conn;
276 TRACE_DBG("Deleting cmd %p from conn %p written_list", cmnd,
278 spin_lock_bh(&conn->write_list_lock);
279 list_del(&cmnd->written_list_entry);
280 cmnd->on_written_list = 0;
281 spin_unlock_bh(&conn->write_list_lock);
284 if (cmnd->parent_req == NULL) {
285 struct iscsi_conn *conn = cmnd->conn;
286 TRACE_DBG("Deleting req %p from conn %p", cmnd, conn);
288 spin_lock_bh(&conn->cmd_list_lock);
289 list_del(&cmnd->cmd_list_entry);
290 spin_unlock_bh(&conn->cmd_list_lock);
294 EXTRACHECKS_BUG_ON(!list_empty(&cmnd->rsp_cmd_list));
295 EXTRACHECKS_BUG_ON(!list_empty(&cmnd->rx_ddigest_cmd_list));
297 /* Order between above and below code is important! */
299 if ((cmnd->scst_cmd != NULL) || (cmnd->scst_aen != NULL)) {
300 switch (cmnd->scst_state) {
301 case ISCSI_CMD_STATE_PROCESSED:
302 TRACE_DBG("cmd %p PROCESSED", cmnd);
303 scst_tgt_cmd_done(cmnd->scst_cmd,
304 SCST_CONTEXT_DIRECT);
307 case ISCSI_CMD_STATE_AFTER_PREPROC:
309 struct scst_cmd *scst_cmd = cmnd->scst_cmd;
310 TRACE_DBG("cmd %p AFTER_PREPROC", cmnd);
311 cmnd->scst_state = ISCSI_CMD_STATE_RESTARTED;
312 cmnd->scst_cmd = NULL;
313 scst_restart_cmd(scst_cmd,
314 SCST_PREPROCESS_STATUS_ERROR_FATAL,
315 SCST_CONTEXT_THREAD);
319 case ISCSI_CMD_STATE_AEN:
320 TRACE_DBG("cmd %p AEN PROCESSED", cmnd);
321 scst_aen_done(cmnd->scst_aen);
325 PRINT_CRIT_ERROR("Unexpected cmnd scst state "
326 "%d", cmnd->scst_state);
332 TRACE_DBG("Deleting rsp %p from parent %p", cmnd,
335 spin_lock_bh(&cmnd->parent_req->rsp_cmd_lock);
336 list_del(&cmnd->rsp_cmd_list_entry);
337 spin_unlock_bh(&cmnd->parent_req->rsp_cmd_lock);
339 cmnd_put(cmnd->parent_req);
342 /* Order between above and below code is important! */
345 TRACE_DBG("%s", "own_sg");
346 if ((cmnd->sg != &dummy_sg) && (cmnd->sg != cmnd->rsp_sg))
347 scst_free(cmnd->sg, cmnd->sg_cnt);
348 #ifdef CONFIG_SCST_DEBUG
355 if (cmnd->dec_active_cmnds) {
356 struct iscsi_session *sess = cmnd->conn->session;
357 TRACE_DBG("Decrementing active_cmds (cmd %p, sess %p, "
358 "new value %d)", cmnd, sess,
359 atomic_read(&sess->active_cmds)-1);
360 atomic_dec(&sess->active_cmds);
361 #ifdef CONFIG_SCST_EXTRACHECKS
362 if (unlikely(atomic_read(&sess->active_cmds) < 0)) {
363 PRINT_CRIT_ERROR("active_cmds < 0 (%d)!!",
364 atomic_read(&sess->active_cmds));
375 * Corresponding conn may also gets destroyed atfer this function, except only
376 * if it's called from the read thread!
378 * It can't be called in parallel with iscsi_cmnds_init_write()!
380 void req_cmnd_release_force(struct iscsi_cmnd *req, int flags)
382 struct iscsi_cmnd *rsp, *t;
383 struct iscsi_conn *conn = req->conn;
384 LIST_HEAD(cmds_list);
388 TRACE_MGMT_DBG("%p", req);
390 sBUG_ON(req == conn->read_cmnd);
392 if (flags & ISCSI_FORCE_RELEASE_WRITE) {
393 spin_lock_bh(&conn->write_list_lock);
394 list_for_each_entry_safe(rsp, t, &conn->write_list,
396 if (rsp->parent_req != req)
399 cmd_del_from_write_list(rsp);
401 list_add_tail(&rsp->write_list_entry, &cmds_list);
403 spin_unlock_bh(&conn->write_list_lock);
405 list_for_each_entry_safe(rsp, t, &cmds_list,
407 TRACE_MGMT_DBG("Putting write rsp %p", rsp);
408 list_del(&rsp->write_list_entry);
414 spin_lock_bh(&req->rsp_cmd_lock);
415 list_for_each_entry_reverse(rsp, &req->rsp_cmd_list,
416 rsp_cmd_list_entry) {
419 if (rsp->force_cleanup_done)
422 rsp->force_cleanup_done = 1;
424 if (cmnd_get_check(rsp))
427 spin_unlock_bh(&req->rsp_cmd_lock);
429 spin_lock_bh(&conn->write_list_lock);
430 r = rsp->on_write_list || rsp->write_processing_started;
431 spin_unlock_bh(&conn->write_list_lock);
439 * If both on_write_list and write_processing_started not set,
440 * we can safely put() rsp.
442 TRACE_MGMT_DBG("Putting rsp %p", rsp);
446 spin_unlock_bh(&req->rsp_cmd_lock);
448 req_cmnd_release(req);
455 * Corresponding conn may also gets destroyed atfer this function, except only
456 * if it's called from the read thread!
458 static void req_cmnd_release(struct iscsi_cmnd *req)
460 struct iscsi_cmnd *c, *t;
464 TRACE_DBG("%p", req);
466 #ifdef CONFIG_SCST_EXTRACHECKS
467 sBUG_ON(req->release_called);
468 req->release_called = 1;
471 if (unlikely(req->tm_aborted)) {
472 TRACE_MGMT_DBG("Release aborted req cmd %p (scst cmd %p, "
473 "state %d)", req, req->scst_cmd, req->scst_state);
476 sBUG_ON(req->parent_req != NULL);
478 list_for_each_entry_safe(c, t, &req->rx_ddigest_cmd_list,
479 rx_ddigest_cmd_list_entry) {
480 cmd_del_from_rx_ddigest_list(c);
485 cmnd_remove_hash(req);
487 if (req->dec_active_cmnds) {
488 struct iscsi_session *sess = req->conn->session;
489 TRACE_DBG("Decrementing active_cmds (cmd %p, sess %p, "
490 "new value %d)", req, sess,
491 atomic_read(&sess->active_cmds)-1);
492 atomic_dec(&sess->active_cmds);
493 req->dec_active_cmnds = 0;
494 #ifdef CONFIG_SCST_EXTRACHECKS
495 if (unlikely(atomic_read(&sess->active_cmds) < 0)) {
496 PRINT_CRIT_ERROR("active_cmds < 0 (%d)!!",
497 atomic_read(&sess->active_cmds));
510 * Corresponding conn may also gets destroyed atfer this function, except only
511 * if it's called from the read thread!
513 void rsp_cmnd_release(struct iscsi_cmnd *cmnd)
515 TRACE_DBG("%p", cmnd);
517 #ifdef CONFIG_SCST_EXTRACHECKS
518 sBUG_ON(cmnd->release_called);
519 cmnd->release_called = 1;
522 sBUG_ON(cmnd->hashed);
523 sBUG_ON(cmnd->parent_req == NULL);
530 * create a new command used as response.
532 * iscsi_cmnd_create_rsp_cmnd -
533 * @cmnd: ptr to request command
535 * @return ptr to response command or NULL
537 static struct iscsi_cmnd *iscsi_cmnd_create_rsp_cmnd(struct iscsi_cmnd *parent)
539 struct iscsi_cmnd *rsp;
541 rsp = cmnd_alloc(parent->conn, parent);
543 spin_lock_bh(&parent->rsp_cmd_lock);
544 TRACE_DBG("Adding rsp %p to parent %p", rsp, parent);
545 list_add_tail(&rsp->rsp_cmd_list_entry, &parent->rsp_cmd_list);
546 spin_unlock_bh(&parent->rsp_cmd_lock);
551 static inline struct iscsi_cmnd *get_rsp_cmnd(struct iscsi_cmnd *req)
553 struct iscsi_cmnd *res = NULL;
555 /* Currently this lock isn't needed, but just in case.. */
556 spin_lock_bh(&req->rsp_cmd_lock);
557 if (!list_empty(&req->rsp_cmd_list)) {
558 res = list_entry(req->rsp_cmd_list.prev, struct iscsi_cmnd,
561 spin_unlock_bh(&req->rsp_cmd_lock);
566 static void iscsi_cmnds_init_write(struct list_head *send, int flags)
568 struct iscsi_cmnd *rsp = list_entry(send->next, struct iscsi_cmnd,
570 struct iscsi_conn *conn = rsp->conn;
571 struct list_head *pos, *next;
573 sBUG_ON(list_empty(send));
576 * If we don't remove hashed req cmd from the hash list here, before
577 * submitting it for transmittion, we will have a race, when for
578 * some reason cmd's release is delayed after transmittion and
579 * initiator sends cmd with the same ITT => this command will be
580 * erroneously rejected as a duplicate.
582 if ((flags & ISCSI_INIT_WRITE_REMOVE_HASH) &&
583 rsp->parent_req->hashed &&
584 (rsp->parent_req->r2t_length == 0) &&
585 (rsp->parent_req->outstanding_r2t == 0))
586 cmnd_remove_hash(rsp->parent_req);
588 if (!(conn->ddigest_type & DIGEST_NONE)) {
589 list_for_each(pos, send) {
590 rsp = list_entry(pos, struct iscsi_cmnd,
593 if (rsp->pdu.datasize != 0) {
594 TRACE_DBG("Doing data digest (%p:%x)", rsp,
601 spin_lock_bh(&conn->write_list_lock);
602 list_for_each_safe(pos, next, send) {
603 rsp = list_entry(pos, struct iscsi_cmnd, write_list_entry);
605 TRACE_DBG("%p:%x", rsp, cmnd_opcode(rsp));
607 sBUG_ON(conn != rsp->conn);
609 list_del(&rsp->write_list_entry);
610 cmd_add_on_write_list(conn, rsp);
612 spin_unlock_bh(&conn->write_list_lock);
614 if (flags & ISCSI_INIT_WRITE_WAKE)
615 iscsi_make_conn_wr_active(conn);
620 static void iscsi_cmnd_init_write(struct iscsi_cmnd *rsp, int flags)
624 if (unlikely(rsp->on_write_list)) {
625 PRINT_CRIT_ERROR("cmd already on write list (%x %x %x %x %u "
626 "%u %u %u %u %u %u %d %d",
627 cmnd_itt(rsp), cmnd_ttt(rsp), cmnd_opcode(rsp),
628 cmnd_scsicode(rsp), rsp->r2t_sn,
629 rsp->r2t_length, rsp->is_unsolicited_data,
630 rsp->target_task_tag, rsp->outstanding_r2t,
631 rsp->hdigest, rsp->ddigest,
632 list_empty(&rsp->rsp_cmd_list), rsp->hashed);
635 list_add_tail(&rsp->write_list_entry, &head);
636 iscsi_cmnds_init_write(&head, flags);
640 static void send_data_rsp(struct iscsi_cmnd *req, u8 status, int send_status)
642 struct iscsi_cmnd *rsp;
643 struct iscsi_scsi_cmd_hdr *req_hdr = cmnd_hdr(req);
644 struct iscsi_data_in_hdr *rsp_hdr;
645 u32 pdusize, expsize, size, offset, sn;
648 TRACE_DBG("req %p", req);
650 pdusize = req->conn->session->sess_param.max_xmit_data_length;
651 expsize = req->read_size;
652 size = min(expsize, (u32)req->bufflen);
657 rsp = iscsi_cmnd_create_rsp_cmnd(req);
658 TRACE_DBG("rsp %p", rsp);
660 rsp->sg_cnt = req->sg_cnt;
661 rsp->bufflen = req->bufflen;
662 rsp_hdr = (struct iscsi_data_in_hdr *)&rsp->pdu.bhs;
664 rsp_hdr->opcode = ISCSI_OP_SCSI_DATA_IN;
665 rsp_hdr->itt = req_hdr->itt;
666 rsp_hdr->ttt = cpu_to_be32(ISCSI_RESERVED_TAG);
667 rsp_hdr->buffer_offset = cpu_to_be32(offset);
668 rsp_hdr->data_sn = cpu_to_be32(sn);
670 if (size <= pdusize) {
671 TRACE_DBG("offset %d, size %d", offset, size);
672 rsp->pdu.datasize = size;
676 TRACE_DBG("status %x", status);
678 EXTRACHECKS_BUG_ON((cmnd_hdr(req)->flags & ISCSI_CMD_WRITE) != 0);
680 rsp_hdr->flags = ISCSI_FLG_FINAL | ISCSI_FLG_STATUS;
681 rsp_hdr->cmd_status = status;
683 scsisize = req->bufflen;
684 if (scsisize < expsize) {
685 rsp_hdr->flags |= ISCSI_FLG_RESIDUAL_UNDERFLOW;
686 size = expsize - scsisize;
687 } else if (scsisize > expsize) {
688 rsp_hdr->flags |= ISCSI_FLG_RESIDUAL_OVERFLOW;
689 size = scsisize - expsize;
692 rsp_hdr->residual_count = cpu_to_be32(size);
694 list_add_tail(&rsp->write_list_entry, &send);
698 TRACE_DBG("pdusize %d, offset %d, size %d", pdusize, offset,
701 rsp->pdu.datasize = pdusize;
707 list_add_tail(&rsp->write_list_entry, &send);
709 iscsi_cmnds_init_write(&send, ISCSI_INIT_WRITE_REMOVE_HASH);
713 static struct iscsi_cmnd *create_status_rsp(struct iscsi_cmnd *req, int status,
714 const u8 *sense_buf, int sense_len)
716 struct iscsi_cmnd *rsp;
717 struct iscsi_scsi_rsp_hdr *rsp_hdr;
718 struct scatterlist *sg;
720 rsp = iscsi_cmnd_create_rsp_cmnd(req);
721 TRACE_DBG("%p", rsp);
723 rsp_hdr = (struct iscsi_scsi_rsp_hdr *)&rsp->pdu.bhs;
724 rsp_hdr->opcode = ISCSI_OP_SCSI_RSP;
725 rsp_hdr->flags = ISCSI_FLG_FINAL;
726 rsp_hdr->response = ISCSI_RESPONSE_COMMAND_COMPLETED;
727 rsp_hdr->cmd_status = status;
728 rsp_hdr->itt = cmnd_hdr(req)->itt;
730 if (SCST_SENSE_VALID(sense_buf)) {
731 TRACE_DBG("%s", "SENSE VALID");
733 sg = rsp->sg = rsp->rsp_sg;
737 sg_init_table(sg, 2);
738 sg_set_buf(&sg[0], &rsp->sense_hdr, sizeof(rsp->sense_hdr));
739 sg_set_buf(&sg[1], sense_buf, sense_len);
741 rsp->sense_hdr.length = cpu_to_be16(sense_len);
743 rsp->pdu.datasize = sizeof(rsp->sense_hdr) + sense_len;
744 rsp->bufflen = rsp->pdu.datasize;
746 rsp->pdu.datasize = 0;
753 static void iscsi_cmnd_reject(struct iscsi_cmnd *req, int reason)
755 struct iscsi_cmnd *rsp;
756 struct iscsi_reject_hdr *rsp_hdr;
757 struct scatterlist *sg;
759 TRACE_MGMT_DBG("Reject: req %p, reason %x", req, reason);
761 sBUG_ON(req->rejected);
763 req->reject_reason = ISCSI_REJECT_CMD;
765 rsp = iscsi_cmnd_create_rsp_cmnd(req);
766 rsp_hdr = (struct iscsi_reject_hdr *)&rsp->pdu.bhs;
768 rsp_hdr->opcode = ISCSI_OP_REJECT;
769 rsp_hdr->ffffffff = ISCSI_RESERVED_TAG;
770 rsp_hdr->reason = reason;
772 sg = rsp->sg = rsp->rsp_sg;
775 sg_init_one(sg, &req->pdu.bhs, sizeof(struct iscsi_hdr));
776 rsp->bufflen = rsp->pdu.datasize = sizeof(struct iscsi_hdr);
778 iscsi_cmnd_init_write(rsp, ISCSI_INIT_WRITE_REMOVE_HASH |
779 ISCSI_INIT_WRITE_WAKE);
781 cmnd_prepare_get_rejected_cmd_data(req);
785 static inline int iscsi_get_allowed_cmds(struct iscsi_session *sess)
787 int res = max(-1, (int)sess->max_queued_cmnds -
788 atomic_read(&sess->active_cmds)-1);
789 TRACE_DBG("allowed cmds %d (sess %p, active_cmds %d)", res,
790 sess, atomic_read(&sess->active_cmds));
794 static u32 cmnd_set_sn(struct iscsi_cmnd *cmnd, int set_stat_sn)
796 struct iscsi_conn *conn = cmnd->conn;
797 struct iscsi_session *sess = conn->session;
800 spin_lock(&sess->sn_lock);
803 cmnd->pdu.bhs.sn = cpu_to_be32(conn->stat_sn++);
804 cmnd->pdu.bhs.exp_sn = cpu_to_be32(sess->exp_cmd_sn);
805 cmnd->pdu.bhs.max_sn = cpu_to_be32(sess->exp_cmd_sn +
806 iscsi_get_allowed_cmds(sess));
808 res = cpu_to_be32(conn->stat_sn);
810 spin_unlock(&sess->sn_lock);
814 /* Called under sn_lock */
815 static void __update_stat_sn(struct iscsi_cmnd *cmnd)
817 struct iscsi_conn *conn = cmnd->conn;
820 cmnd->pdu.bhs.exp_sn = exp_stat_sn = be32_to_cpu(cmnd->pdu.bhs.exp_sn);
821 TRACE_DBG("%x,%x", cmnd_opcode(cmnd), exp_stat_sn);
822 if ((int)(exp_stat_sn - conn->exp_stat_sn) > 0 &&
823 (int)(exp_stat_sn - conn->stat_sn) <= 0) {
824 /* free pdu resources */
825 cmnd->conn->exp_stat_sn = exp_stat_sn;
830 static inline void update_stat_sn(struct iscsi_cmnd *cmnd)
832 spin_lock(&cmnd->conn->session->sn_lock);
833 __update_stat_sn(cmnd);
834 spin_unlock(&cmnd->conn->session->sn_lock);
838 /* Called under sn_lock */
839 static int check_cmd_sn(struct iscsi_cmnd *cmnd)
841 struct iscsi_session *session = cmnd->conn->session;
844 cmnd->pdu.bhs.sn = cmd_sn = be32_to_cpu(cmnd->pdu.bhs.sn);
845 TRACE_DBG("%d(%d)", cmd_sn, session->exp_cmd_sn);
846 if (likely((s32)(cmd_sn - session->exp_cmd_sn) >= 0))
848 PRINT_ERROR("sequence error (%x,%x)", cmd_sn, session->exp_cmd_sn);
849 return -ISCSI_REASON_PROTOCOL_ERROR;
852 static inline struct iscsi_cmnd *__cmnd_find_hash(
853 struct iscsi_session *session, u32 itt, u32 ttt)
855 struct list_head *head;
856 struct iscsi_cmnd *cmnd;
858 head = &session->cmnd_hash[cmnd_hashfn(itt)];
860 list_for_each_entry(cmnd, head, hash_list_entry) {
861 if (cmnd->pdu.bhs.itt == itt) {
862 if (ttt != ISCSI_RESERVED_TAG &&
863 ttt != cmnd->target_task_tag)
871 static struct iscsi_cmnd *cmnd_find_hash(struct iscsi_session *session,
874 struct iscsi_cmnd *cmnd;
876 spin_lock(&session->cmnd_hash_lock);
877 cmnd = __cmnd_find_hash(session, itt, ttt);
878 spin_unlock(&session->cmnd_hash_lock);
883 static struct iscsi_cmnd *cmnd_find_hash_get(struct iscsi_session *session,
886 struct iscsi_cmnd *cmnd;
888 spin_lock(&session->cmnd_hash_lock);
889 cmnd = __cmnd_find_hash(session, itt, ttt);
891 if (unlikely(cmnd_get_check(cmnd)))
894 spin_unlock(&session->cmnd_hash_lock);
899 static int cmnd_insert_hash(struct iscsi_cmnd *cmnd)
901 struct iscsi_session *session = cmnd->conn->session;
902 struct iscsi_cmnd *tmp;
903 struct list_head *head;
905 u32 itt = cmnd->pdu.bhs.itt;
907 TRACE_DBG("%p:%x", cmnd, itt);
908 if (unlikely(itt == ISCSI_RESERVED_TAG)) {
909 PRINT_ERROR("%s", "ITT is RESERVED_TAG");
910 PRINT_BUFFER("Incorrect BHS", &cmnd->pdu.bhs,
911 sizeof(cmnd->pdu.bhs));
912 err = -ISCSI_REASON_PROTOCOL_ERROR;
916 spin_lock(&session->cmnd_hash_lock);
918 head = &session->cmnd_hash[cmnd_hashfn(cmnd->pdu.bhs.itt)];
920 tmp = __cmnd_find_hash(session, itt, ISCSI_RESERVED_TAG);
922 list_add_tail(&cmnd->hash_list_entry, head);
925 PRINT_ERROR("Task %x in progress, cmnd %p", itt, cmnd);
926 err = -ISCSI_REASON_TASK_IN_PROGRESS;
929 spin_unlock(&session->cmnd_hash_lock);
932 spin_lock(&session->sn_lock);
933 __update_stat_sn(cmnd);
934 err = check_cmd_sn(cmnd);
935 spin_unlock(&session->sn_lock);
942 static void cmnd_remove_hash(struct iscsi_cmnd *cmnd)
944 struct iscsi_session *session = cmnd->conn->session;
945 struct iscsi_cmnd *tmp;
947 spin_lock(&session->cmnd_hash_lock);
949 tmp = __cmnd_find_hash(session, cmnd->pdu.bhs.itt, ISCSI_RESERVED_TAG);
951 if (likely(tmp && tmp == cmnd)) {
952 list_del(&cmnd->hash_list_entry);
955 PRINT_ERROR("%p:%x not found", cmnd, cmnd_itt(cmnd));
958 spin_unlock(&session->cmnd_hash_lock);
962 static void cmnd_prepare_get_rejected_cmd_data(struct iscsi_cmnd *cmnd)
964 struct iscsi_conn *conn = cmnd->conn;
965 struct scatterlist *sg = cmnd->sg;
970 TRACE_MGMT_DBG("Skipping (%p, %x %x %x %u, %p, scst state %d)", cmnd,
971 cmnd_itt(cmnd), cmnd_opcode(cmnd), cmnd_hdr(cmnd)->scb[0],
972 cmnd->pdu.datasize, cmnd->scst_cmd, cmnd->scst_state);
974 iscsi_extracheck_is_rd_thread(conn);
976 size = cmnd->pdu.datasize;
982 * There are no problems with the safety from concurrent
983 * accesses to dummy_page in dummy_sg, since data only
984 * will be read and then discarded.
986 sg = cmnd->sg = &dummy_sg;
987 cmnd->bufflen = PAGE_SIZE;
991 addr = (char __force __user *)(page_address(sg_page(&sg[0])));
992 sBUG_ON(addr == NULL);
993 conn->read_size = size;
994 for (i = 0; size > PAGE_SIZE; i++, size -= cmnd->bufflen) {
995 /* We already checked pdu.datasize in check_segment_length() */
996 sBUG_ON(i >= ISCSI_CONN_IOV_MAX);
997 conn->read_iov[i].iov_base = addr;
998 conn->read_iov[i].iov_len = cmnd->bufflen;
1000 conn->read_iov[i].iov_base = addr;
1001 conn->read_iov[i].iov_len = size;
1002 conn->read_msg.msg_iov = conn->read_iov;
1003 conn->read_msg.msg_iovlen = ++i;
1008 static void iscsi_set_resid(struct iscsi_cmnd *req, struct iscsi_cmnd *rsp,
1011 struct iscsi_scsi_cmd_hdr *req_hdr = cmnd_hdr(req);
1012 struct iscsi_scsi_rsp_hdr *rsp_hdr;
1013 int resid, resp_len, in_resp_len;
1015 if ((req_hdr->flags & ISCSI_CMD_READ) &&
1016 (req_hdr->flags & ISCSI_CMD_WRITE)) {
1017 rsp_hdr = (struct iscsi_scsi_rsp_hdr *)&rsp->pdu.bhs;
1020 resp_len = req->bufflen;
1021 if (req->scst_cmd != NULL)
1022 in_resp_len = scst_cmd_get_in_bufflen(req->scst_cmd);
1030 resid = be32_to_cpu(req_hdr->data_length) - in_resp_len;
1032 rsp_hdr->flags |= ISCSI_FLG_RESIDUAL_UNDERFLOW;
1033 rsp_hdr->residual_count = cpu_to_be32(resid);
1034 } else if (resid < 0) {
1036 rsp_hdr->flags |= ISCSI_FLG_RESIDUAL_OVERFLOW;
1037 rsp_hdr->residual_count = cpu_to_be32(resid);
1040 resid = req->read_size - resp_len;
1042 rsp_hdr->flags |= ISCSI_FLG_BIRESIDUAL_UNDERFLOW;
1043 rsp_hdr->bi_residual_count = cpu_to_be32(resid);
1044 } else if (resid < 0) {
1046 rsp_hdr->flags |= ISCSI_FLG_BIRESIDUAL_OVERFLOW;
1047 rsp_hdr->bi_residual_count = cpu_to_be32(resid);
1051 resp_len = req->bufflen;
1055 resid = req->read_size - resp_len;
1057 rsp_hdr = (struct iscsi_scsi_rsp_hdr *)&rsp->pdu.bhs;
1058 rsp_hdr->flags |= ISCSI_FLG_RESIDUAL_UNDERFLOW;
1059 rsp_hdr->residual_count = cpu_to_be32(resid);
1060 } else if (resid < 0) {
1061 rsp_hdr = (struct iscsi_scsi_rsp_hdr *)&rsp->pdu.bhs;
1063 rsp_hdr->flags |= ISCSI_FLG_RESIDUAL_OVERFLOW;
1064 rsp_hdr->residual_count = cpu_to_be32(resid);
1070 static void cmnd_reject_scsi_cmd(struct iscsi_cmnd *req)
1072 struct iscsi_cmnd *rsp;
1074 TRACE_DBG("%p", req);
1076 sBUG_ON(req->rejected);
1078 req->reject_reason = ISCSI_REJECT_SCSI_CMD;
1080 rsp = get_rsp_cmnd(req);
1082 /* That can be true for aborted commands */
1086 sBUG_ON(cmnd_opcode(rsp) != ISCSI_OP_SCSI_RSP);
1088 iscsi_set_resid(req, rsp, false);
1090 iscsi_cmnd_init_write(rsp, ISCSI_INIT_WRITE_REMOVE_HASH |
1091 ISCSI_INIT_WRITE_WAKE);
1094 cmnd_prepare_get_rejected_cmd_data(req);
1098 static int cmnd_prepare_recv_pdu(struct iscsi_conn *conn,
1099 struct iscsi_cmnd *cmd, u32 offset, u32 size)
1101 struct scatterlist *sg = cmd->sg;
1102 unsigned int bufflen = cmd->bufflen;
1103 unsigned int idx, i;
1107 TRACE_DBG("%p %u,%u", cmd->sg, offset, size);
1109 iscsi_extracheck_is_rd_thread(conn);
1111 if (unlikely((offset >= bufflen) ||
1112 (offset + size > bufflen))) {
1113 PRINT_ERROR("Wrong ltn (%u %u %u)", offset, size, bufflen);
1114 mark_conn_closed(conn);
1119 offset += sg[0].offset;
1120 idx = offset >> PAGE_SHIFT;
1121 offset &= ~PAGE_MASK;
1123 conn->read_msg.msg_iov = conn->read_iov;
1124 conn->read_size = size;
1128 addr = (char __force __user *)(page_address(sg_page(&sg[idx])));
1129 sBUG_ON(addr == NULL);
1130 conn->read_iov[i].iov_base = addr + offset;
1131 if (offset + size <= PAGE_SIZE) {
1132 TRACE_DBG("idx=%d, offset=%u, size=%d, addr=%p",
1133 idx, offset, size, addr);
1134 conn->read_iov[i].iov_len = size;
1135 conn->read_msg.msg_iovlen = ++i;
1138 conn->read_iov[i].iov_len = PAGE_SIZE - offset;
1139 TRACE_DBG("idx=%d, offset=%u, size=%d, iov_len=%zd, addr=%p",
1140 idx, offset, size, conn->read_iov[i].iov_len, addr);
1141 size -= conn->read_iov[i].iov_len;
1142 if (unlikely(++i >= ISCSI_CONN_IOV_MAX)) {
1143 PRINT_ERROR("Initiator %s violated negotiated "
1144 "parameters by sending too much data (size "
1145 "left %d)", conn->session->initiator_name,
1147 mark_conn_closed(conn);
1152 offset = sg[idx].offset;
1154 TRACE_DBG("msg_iov=%p, msg_iovlen=%zd",
1155 conn->read_msg.msg_iov, conn->read_msg.msg_iovlen);
1161 static void send_r2t(struct iscsi_cmnd *req)
1163 struct iscsi_session *session = req->conn->session;
1164 struct iscsi_cmnd *rsp;
1165 struct iscsi_r2t_hdr *rsp_hdr;
1169 if (unlikely(req->tm_aborted)) {
1170 TRACE_MGMT_DBG("req %p (scst_cmd %p) aborted on R2T "
1171 "(r2t_length %d, outstanding_r2t %d)", req,
1172 req->scst_cmd, req->r2t_length, req->outstanding_r2t);
1173 if (req->outstanding_r2t == 0)
1174 iscsi_fail_waiting_cmnd(req);
1179 * There is no race with data_out_start() and conn_abort(), since
1180 * all functions called from single read thread
1182 iscsi_extracheck_is_rd_thread(req->conn);
1184 burst = session->sess_param.max_burst_length;
1185 offset = be32_to_cpu(cmnd_hdr(req)->data_length) - req->r2t_length;
1188 rsp = iscsi_cmnd_create_rsp_cmnd(req);
1189 rsp->pdu.bhs.ttt = req->target_task_tag;
1190 rsp_hdr = (struct iscsi_r2t_hdr *)&rsp->pdu.bhs;
1191 rsp_hdr->opcode = ISCSI_OP_R2T;
1192 rsp_hdr->flags = ISCSI_FLG_FINAL;
1193 rsp_hdr->lun = cmnd_hdr(req)->lun;
1194 rsp_hdr->itt = cmnd_hdr(req)->itt;
1195 rsp_hdr->r2t_sn = cpu_to_be32(req->r2t_sn++);
1196 rsp_hdr->buffer_offset = cpu_to_be32(offset);
1197 if (req->r2t_length > burst) {
1198 rsp_hdr->data_length = cpu_to_be32(burst);
1199 req->r2t_length -= burst;
1202 rsp_hdr->data_length = cpu_to_be32(req->r2t_length);
1203 req->r2t_length = 0;
1206 TRACE_WRITE("%x %u %u %u %u", cmnd_itt(req),
1207 be32_to_cpu(rsp_hdr->data_length),
1208 be32_to_cpu(rsp_hdr->buffer_offset),
1209 be32_to_cpu(rsp_hdr->r2t_sn), req->outstanding_r2t);
1211 list_add_tail(&rsp->write_list_entry, &send);
1213 if (++req->outstanding_r2t >= session->sess_param.max_outstanding_r2t)
1216 } while (req->r2t_length != 0);
1218 iscsi_cmnds_init_write(&send, ISCSI_INIT_WRITE_WAKE);
1224 static int iscsi_pre_exec(struct scst_cmd *scst_cmd)
1226 int res = SCST_PREPROCESS_STATUS_SUCCESS;
1227 struct iscsi_cmnd *req = (struct iscsi_cmnd *)
1228 scst_cmd_get_tgt_priv(scst_cmd);
1229 struct iscsi_cmnd *c, *t;
1233 EXTRACHECKS_BUG_ON(scst_cmd_atomic(scst_cmd));
1235 /* If data digest isn't used this list will be empty */
1236 list_for_each_entry_safe(c, t, &req->rx_ddigest_cmd_list,
1237 rx_ddigest_cmd_list_entry) {
1238 TRACE_DBG("Checking digest of RX ddigest cmd %p", c);
1239 if (digest_rx_data(c) != 0) {
1240 scst_set_cmd_error(scst_cmd,
1241 SCST_LOAD_SENSE(iscsi_sense_crc_error));
1242 res = SCST_PREPROCESS_STATUS_ERROR_SENSE_SET;
1244 * The rest of rx_ddigest_cmd_list will be freed
1245 * in req_cmnd_release()
1249 cmd_del_from_rx_ddigest_list(c);
1254 TRACE_EXIT_RES(res);
1258 static int noop_out_start(struct iscsi_cmnd *cmnd)
1260 struct iscsi_conn *conn = cmnd->conn;
1264 TRACE_DBG("%p", cmnd);
1266 iscsi_extracheck_is_rd_thread(conn);
1268 if (unlikely(cmnd_ttt(cmnd) != cpu_to_be32(ISCSI_RESERVED_TAG))) {
1270 * We don't request a NOP-Out by sending a NOP-In.
1271 * See 10.18.2 in the draft 20.
1273 PRINT_ERROR("Initiator sent command with not RESERVED tag and "
1274 "TTT %x", cmnd_itt(cmnd));
1275 err = -ISCSI_REASON_PROTOCOL_ERROR;
1279 if (cmnd_itt(cmnd) == cpu_to_be32(ISCSI_RESERVED_TAG)) {
1280 if (unlikely(!(cmnd->pdu.bhs.opcode & ISCSI_OP_IMMEDIATE)))
1281 PRINT_ERROR("%s", "Initiator sent RESERVED tag for "
1282 "non-immediate command");
1283 spin_lock(&conn->session->sn_lock);
1284 __update_stat_sn(cmnd);
1285 err = check_cmd_sn(cmnd);
1286 spin_unlock(&conn->session->sn_lock);
1290 err = cmnd_insert_hash(cmnd);
1291 if (unlikely(err < 0)) {
1292 PRINT_ERROR("Can't insert in hash: ignore this "
1293 "request %x", cmnd_itt(cmnd));
1298 size = cmnd->pdu.datasize;
1301 conn->read_msg.msg_iov = conn->read_iov;
1302 if (cmnd->pdu.bhs.itt != cpu_to_be32(ISCSI_RESERVED_TAG)) {
1303 struct scatterlist *sg;
1305 cmnd->sg = sg = scst_alloc(size, GFP_KERNEL,
1308 TRACE(TRACE_OUT_OF_MEM, "Allocating buffer for"
1309 " %d NOP-Out payload failed", size);
1310 err = -ISCSI_REASON_OUT_OF_RESOURCES;
1314 /* We already checked it in check_segment_length() */
1315 sBUG_ON(cmnd->sg_cnt > (signed)ISCSI_CONN_IOV_MAX);
1318 cmnd->bufflen = size;
1320 for (i = 0; i < cmnd->sg_cnt; i++) {
1321 conn->read_iov[i].iov_base =
1322 (void __force __user *)(page_address(sg_page(&sg[i])));
1323 tmp = min_t(u32, size, PAGE_SIZE);
1324 conn->read_iov[i].iov_len = tmp;
1325 conn->read_size += tmp;
1331 * There are no problems with the safety from concurrent
1332 * accesses to dummy_page, since for ISCSI_RESERVED_TAG
1333 * the data only read and then discarded.
1335 for (i = 0; i < (signed)ISCSI_CONN_IOV_MAX; i++) {
1336 conn->read_iov[i].iov_base =
1337 (void __force __user *)(page_address(dummy_page));
1338 tmp = min_t(u32, size, PAGE_SIZE);
1339 conn->read_iov[i].iov_len = tmp;
1340 conn->read_size += tmp;
1344 /* We already checked size in check_segment_length() */
1348 conn->read_msg.msg_iovlen = i;
1349 TRACE_DBG("msg_iov=%p, msg_iovlen=%zd", conn->read_msg.msg_iov,
1350 conn->read_msg.msg_iovlen);
1357 static inline u32 get_next_ttt(struct iscsi_conn *conn)
1360 struct iscsi_session *session = conn->session;
1362 iscsi_extracheck_is_rd_thread(conn);
1364 if (session->next_ttt == ISCSI_RESERVED_TAG)
1365 session->next_ttt++;
1366 ttt = session->next_ttt++;
1368 return cpu_to_be32(ttt);
1371 static int scsi_cmnd_start(struct iscsi_cmnd *req)
1373 struct iscsi_conn *conn = req->conn;
1374 struct iscsi_session *session = conn->session;
1375 struct iscsi_scsi_cmd_hdr *req_hdr = cmnd_hdr(req);
1376 struct scst_cmd *scst_cmd;
1377 scst_data_direction dir;
1378 struct iscsi_ahs_hdr *ahdr;
1383 TRACE_DBG("scsi command: %02x", req_hdr->scb[0]);
1385 TRACE_DBG("Incrementing active_cmds (cmd %p, sess %p, "
1386 "new value %d)", req, session,
1387 atomic_read(&session->active_cmds)+1);
1388 atomic_inc(&session->active_cmds);
1389 req->dec_active_cmnds = 1;
1391 scst_cmd = scst_rx_cmd(session->scst_sess,
1392 (uint8_t *)&req_hdr->lun, sizeof(req_hdr->lun),
1393 req_hdr->scb, sizeof(req_hdr->scb), SCST_NON_ATOMIC);
1394 if (scst_cmd == NULL) {
1395 create_status_rsp(req, SAM_STAT_BUSY, NULL, 0);
1396 cmnd_reject_scsi_cmd(req);
1400 req->scst_cmd = scst_cmd;
1401 scst_cmd_set_tag(scst_cmd, req_hdr->itt);
1402 scst_cmd_set_tgt_priv(scst_cmd, req);
1404 if ((req_hdr->flags & ISCSI_CMD_READ) &&
1405 (req_hdr->flags & ISCSI_CMD_WRITE)) {
1406 int sz = cmnd_read_size(req);
1407 if (unlikely(sz < 0)) {
1408 PRINT_ERROR("%s", "BIDI data transfer, but initiator "
1409 "not supplied Bidirectional Read Expected Data "
1410 "Transfer Length AHS");
1411 scst_set_cmd_error(scst_cmd,
1412 SCST_LOAD_SENSE(scst_sense_parameter_value_invalid));
1414 * scst_cmd_init_done() will handle commands with
1415 * set status as preliminary completed
1418 req->read_size = sz;
1419 dir = SCST_DATA_BIDI;
1420 scst_cmd_set_expected(scst_cmd, dir, sz);
1421 scst_cmd_set_expected_in_transfer_len(scst_cmd,
1422 be32_to_cpu(req_hdr->data_length));
1423 #if !defined(CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION)
1424 scst_cmd_set_tgt_need_alloc_data_buf(scst_cmd);
1427 } else if (req_hdr->flags & ISCSI_CMD_READ) {
1428 req->read_size = be32_to_cpu(req_hdr->data_length);
1429 dir = SCST_DATA_READ;
1430 scst_cmd_set_expected(scst_cmd, dir, req->read_size);
1431 #if !defined(CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION)
1432 scst_cmd_set_tgt_need_alloc_data_buf(scst_cmd);
1434 } else if (req_hdr->flags & ISCSI_CMD_WRITE) {
1435 dir = SCST_DATA_WRITE;
1436 scst_cmd_set_expected(scst_cmd, dir,
1437 be32_to_cpu(req_hdr->data_length));
1439 dir = SCST_DATA_NONE;
1440 scst_cmd_set_expected(scst_cmd, dir, 0);
1443 switch (req_hdr->flags & ISCSI_CMD_ATTR_MASK) {
1444 case ISCSI_CMD_SIMPLE:
1445 scst_cmd->queue_type = SCST_CMD_QUEUE_SIMPLE;
1447 case ISCSI_CMD_HEAD_OF_QUEUE:
1448 scst_cmd->queue_type = SCST_CMD_QUEUE_HEAD_OF_QUEUE;
1450 case ISCSI_CMD_ORDERED:
1451 scst_cmd->queue_type = SCST_CMD_QUEUE_ORDERED;
1454 scst_cmd->queue_type = SCST_CMD_QUEUE_ACA;
1456 case ISCSI_CMD_UNTAGGED:
1457 scst_cmd->queue_type = SCST_CMD_QUEUE_UNTAGGED;
1460 PRINT_ERROR("Unknown task code %x, use ORDERED instead",
1461 req_hdr->flags & ISCSI_CMD_ATTR_MASK);
1462 scst_cmd->queue_type = SCST_CMD_QUEUE_ORDERED;
1466 /* cmd_sn is already in CPU format converted in check_cmd_sn() */
1467 scst_cmd_set_tgt_sn(scst_cmd, req_hdr->cmd_sn);
1469 ahdr = (struct iscsi_ahs_hdr *)req->pdu.ahs;
1471 uint8_t *p = (uint8_t *)ahdr;
1476 ahdr = (struct iscsi_ahs_hdr *)p;
1478 if (ahdr->ahstype == ISCSI_AHSTYPE_CDB) {
1479 struct iscsi_cdb_ahdr *eca =
1480 (struct iscsi_cdb_ahdr *)ahdr;
1481 scst_cmd_set_ext_cdb(scst_cmd, eca->cdb,
1482 be16_to_cpu(ahdr->ahslength) - 1);
1485 s = 3 + be16_to_cpu(ahdr->ahslength);
1489 } while (size < req->pdu.ahssize);
1492 TRACE_DBG("START Command (tag %d, queue_type %d)",
1493 req_hdr->itt, scst_cmd->queue_type);
1494 req->scst_state = ISCSI_CMD_STATE_RX_CMD;
1495 scst_cmd_init_stage1_done(scst_cmd, SCST_CONTEXT_DIRECT, 0);
1497 wait_event(req->scst_waitQ, req->scst_state != ISCSI_CMD_STATE_RX_CMD);
1499 if (unlikely(req->scst_state != ISCSI_CMD_STATE_AFTER_PREPROC)) {
1500 TRACE_DBG("req %p is in %x state", req, req->scst_state);
1501 if (req->scst_state == ISCSI_CMD_STATE_PROCESSED) {
1502 cmnd_reject_scsi_cmd(req);
1505 if (unlikely(req->tm_aborted)) {
1506 TRACE_MGMT_DBG("req %p (scst_cmd %p) aborted", req,
1508 cmnd_prepare_get_rejected_cmd_data(req);
1514 dir = scst_cmd_get_data_direction(scst_cmd);
1515 if (dir & SCST_DATA_WRITE) {
1516 req->is_unsolicited_data = !(req_hdr->flags & ISCSI_CMD_FINAL);
1517 req->r2t_length = be32_to_cpu(req_hdr->data_length) -
1519 if (req->r2t_length > 0)
1520 req->data_waiting = 1;
1522 if (unlikely(!(req_hdr->flags & ISCSI_CMD_FINAL) ||
1523 req->pdu.datasize)) {
1524 PRINT_ERROR("Unexpected unsolicited data (ITT %x "
1525 "CDB %x", cmnd_itt(req), req_hdr->scb[0]);
1526 scst_set_cmd_error(scst_cmd,
1527 SCST_LOAD_SENSE(iscsi_sense_unexpected_unsolicited_data));
1528 if (scst_cmd_get_sense_buffer(scst_cmd) != NULL)
1529 create_status_rsp(req, SAM_STAT_CHECK_CONDITION,
1530 scst_cmd_get_sense_buffer(scst_cmd),
1531 scst_cmd_get_sense_buffer_len(scst_cmd));
1533 create_status_rsp(req, SAM_STAT_BUSY, NULL, 0);
1534 cmnd_reject_scsi_cmd(req);
1539 req->target_task_tag = get_next_ttt(conn);
1540 if (dir != SCST_DATA_BIDI) {
1541 req->sg = scst_cmd_get_sg(scst_cmd);
1542 req->sg_cnt = scst_cmd_get_sg_cnt(scst_cmd);
1543 req->bufflen = scst_cmd_get_bufflen(scst_cmd);
1545 req->sg = scst_cmd_get_in_sg(scst_cmd);
1546 req->sg_cnt = scst_cmd_get_in_sg_cnt(scst_cmd);
1547 req->bufflen = scst_cmd_get_in_bufflen(scst_cmd);
1549 if (unlikely(req->r2t_length > req->bufflen)) {
1550 PRINT_ERROR("req->r2t_length %d > req->bufflen %d",
1551 req->r2t_length, req->bufflen);
1552 req->r2t_length = req->bufflen;
1555 TRACE_DBG("req=%p, dir=%d, is_unsolicited_data=%d, "
1556 "r2t_length=%d, bufflen=%d", req, dir,
1557 req->is_unsolicited_data, req->r2t_length, req->bufflen);
1559 if (unlikely(!session->sess_param.immediate_data &&
1560 req->pdu.datasize)) {
1561 PRINT_ERROR("Initiator %s violated negotiated paremeters: "
1562 "forbidden immediate data sent (ITT %x, op %x)",
1563 session->initiator_name, cmnd_itt(req),
1569 if (unlikely(session->sess_param.initial_r2t &&
1570 !(req_hdr->flags & ISCSI_CMD_FINAL))) {
1571 PRINT_ERROR("Initiator %s violated negotiated paremeters: "
1572 "initial R2T is required (ITT %x, op %x)",
1573 session->initiator_name, cmnd_itt(req),
1579 if (req->pdu.datasize)
1580 res = cmnd_prepare_recv_pdu(conn, req, 0, req->pdu.datasize);
1582 /* Aborted commands will be freed in cmnd_rx_end() */
1583 TRACE_EXIT_RES(res);
1587 static int data_out_start(struct iscsi_conn *conn, struct iscsi_cmnd *cmnd)
1589 struct iscsi_data_out_hdr *req_hdr =
1590 (struct iscsi_data_out_hdr *)&cmnd->pdu.bhs;
1591 struct iscsi_cmnd *orig_req = NULL;
1592 u32 offset = be32_to_cpu(req_hdr->buffer_offset);
1598 * There is no race with send_r2t() and conn_abort(), since
1599 * all functions called from single read thread
1601 iscsi_extracheck_is_rd_thread(cmnd->conn);
1603 update_stat_sn(cmnd);
1605 cmnd->cmd_req = orig_req = cmnd_find_hash(conn->session, req_hdr->itt,
1607 if (unlikely(orig_req == NULL)) {
1608 /* It might happen if req was aborted and then freed */
1609 TRACE(TRACE_MGMT_MINOR, "Unable to find scsi task %x %x",
1610 cmnd_itt(cmnd), cmnd_ttt(cmnd));
1614 if (orig_req->is_unsolicited_data) {
1615 if (unlikely(orig_req->r2t_length < cmnd->pdu.datasize)) {
1616 PRINT_ERROR("Data size (%d) > R2T length (%d)",
1617 cmnd->pdu.datasize, orig_req->r2t_length);
1618 mark_conn_closed(conn);
1622 orig_req->r2t_length -= cmnd->pdu.datasize;
1625 /* Check unsolicited burst data */
1626 if (unlikely((req_hdr->ttt == cpu_to_be32(ISCSI_RESERVED_TAG)) &&
1627 (orig_req->pdu.bhs.flags & ISCSI_FLG_FINAL))) {
1628 PRINT_ERROR("Unexpected data from %x %x",
1629 cmnd_itt(cmnd), cmnd_ttt(cmnd));
1630 mark_conn_closed(conn);
1635 TRACE_WRITE("%u %p %p %u %u", req_hdr->ttt, cmnd, orig_req,
1636 offset, cmnd->pdu.datasize);
1638 res = cmnd_prepare_recv_pdu(conn, orig_req, offset, cmnd->pdu.datasize);
1641 TRACE_EXIT_RES(res);
1645 sBUG_ON(cmnd->rejected);
1647 cmnd->reject_reason = ISCSI_REJECT_DATA;
1648 cmnd_prepare_get_rejected_cmd_data(cmnd);
1652 static void data_out_end(struct iscsi_cmnd *cmnd)
1654 struct iscsi_data_out_hdr *req_hdr =
1655 (struct iscsi_data_out_hdr *)&cmnd->pdu.bhs;
1656 struct iscsi_cmnd *req;
1658 sBUG_ON(cmnd == NULL);
1659 req = cmnd->cmd_req;
1660 sBUG_ON(req == NULL);
1662 TRACE_DBG("cmnd %p, req %p", cmnd, req);
1664 iscsi_extracheck_is_rd_thread(cmnd->conn);
1666 if (!(cmnd->conn->ddigest_type & DIGEST_NONE) &&
1667 !cmnd->ddigest_checked) {
1668 cmd_add_on_rx_ddigest_list(req, cmnd);
1672 if (req_hdr->ttt == cpu_to_be32(ISCSI_RESERVED_TAG)) {
1673 TRACE_DBG("ISCSI_RESERVED_TAG, FINAL %x",
1674 req_hdr->flags & ISCSI_FLG_FINAL);
1676 if (req_hdr->flags & ISCSI_FLG_FINAL) {
1677 req->is_unsolicited_data = 0;
1683 TRACE_DBG("FINAL %x, outstanding_r2t %d, r2t_length %d",
1684 req_hdr->flags & ISCSI_FLG_FINAL,
1685 req->outstanding_r2t, req->r2t_length);
1687 if (req_hdr->flags & ISCSI_FLG_FINAL) {
1688 if (unlikely(req->is_unsolicited_data)) {
1689 PRINT_ERROR("Unexpected unsolicited data "
1690 "(r2t_length %u, outstanding_r2t %d)",
1692 req->is_unsolicited_data);
1693 mark_conn_closed(req->conn);
1696 req->outstanding_r2t--;
1701 if (req->r2t_length != 0) {
1702 if (!req->is_unsolicited_data)
1705 iscsi_restart_waiting_cmnd(req);
1712 static void __cmnd_abort(struct iscsi_cmnd *cmnd)
1715 * Here, if cmnd is data_waiting, we should iscsi_fail_waiting_cmnd()
1716 * it. But, since this function can be called from any thread, not only
1717 * from the read one, we at the moment can't do that, because of
1718 * absence of appropriate locking protection. But this isn't a stuff
1719 * for 1.0.0. So, currently a misbehaving initiator, not sending
1720 * data in R2T state for a sharing between targets device, for which
1721 * for some reason an aborting TM command, e.g. TARGET RESET, from
1722 * another initiator is issued, can block response for this TM command
1723 * virtually forever and by this make the issuing initiator eventually
1724 * put the device offline.
1726 * ToDo in the next version, possibly a simple connection mutex, taken
1727 * by the read thread before starting any processing and by this
1728 * function, should be sufficient.
1731 TRACE_MGMT_DBG("Aborting cmd %p, scst_cmd %p (scst state %x, "
1732 "ref_cnt %d, itt %x, sn %u, op %x, r2t_len %x, CDB op %x, "
1733 "size to write %u, is_unsolicited_data %d, "
1734 "outstanding_r2t %d, data_waiting %d, sess->exp_cmd_sn %u, "
1735 "conn %p, rd_task %p)", cmnd, cmnd->scst_cmd, cmnd->scst_state,
1736 atomic_read(&cmnd->ref_cnt), cmnd_itt(cmnd), cmnd->pdu.bhs.sn,
1737 cmnd_opcode(cmnd), cmnd->r2t_length, cmnd_scsicode(cmnd),
1738 cmnd_write_size(cmnd), cmnd->is_unsolicited_data,
1739 cmnd->outstanding_r2t, cmnd->data_waiting,
1740 cmnd->conn->session->exp_cmd_sn, cmnd->conn,
1741 cmnd->conn->rd_task);
1743 #if defined(CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION)
1744 TRACE_MGMT_DBG("net_ref_cnt %d", atomic_read(&cmnd->net_ref_cnt));
1747 cmnd->tm_aborted = 1;
1752 /* Must be called from the read or conn close thread */
1753 static int cmnd_abort(struct iscsi_cmnd *req)
1755 struct iscsi_session *session = req->conn->session;
1756 struct iscsi_task_mgt_hdr *req_hdr =
1757 (struct iscsi_task_mgt_hdr *)&req->pdu.bhs;
1758 struct iscsi_cmnd *cmnd;
1761 req_hdr->ref_cmd_sn = be32_to_cpu(req_hdr->ref_cmd_sn);
1763 if (after(req_hdr->ref_cmd_sn, req_hdr->cmd_sn)) {
1764 PRINT_ERROR("ABORT TASK: RefCmdSN(%u) > CmdSN(%u)",
1765 req_hdr->ref_cmd_sn, req_hdr->cmd_sn);
1766 err = ISCSI_RESPONSE_FUNCTION_REJECTED;
1770 cmnd = cmnd_find_hash_get(session, req_hdr->rtt, ISCSI_RESERVED_TAG);
1772 struct iscsi_conn *conn = cmnd->conn;
1773 struct iscsi_scsi_cmd_hdr *hdr = cmnd_hdr(cmnd);
1775 if (req_hdr->lun != hdr->lun) {
1776 PRINT_ERROR("ABORT TASK: LUN mismatch: req LUN "
1777 "%llx, cmd LUN %llx, rtt %u",
1778 (long long unsigned int)req_hdr->lun,
1779 (long long unsigned int)hdr->lun,
1781 err = ISCSI_RESPONSE_FUNCTION_REJECTED;
1785 if (cmnd->pdu.bhs.opcode & ISCSI_OP_IMMEDIATE) {
1786 if (req_hdr->ref_cmd_sn != req_hdr->cmd_sn) {
1787 PRINT_ERROR("ABORT TASK: RefCmdSN(%u) != TM "
1788 "cmd CmdSN(%u) for immediate command "
1789 "%p", req_hdr->ref_cmd_sn,
1790 req_hdr->cmd_sn, cmnd);
1791 err = ISCSI_RESPONSE_FUNCTION_REJECTED;
1795 if (req_hdr->ref_cmd_sn != hdr->cmd_sn) {
1796 PRINT_ERROR("ABORT TASK: RefCmdSN(%u) != "
1797 "CmdSN(%u) for command %p",
1798 req_hdr->ref_cmd_sn, req_hdr->cmd_sn,
1800 err = ISCSI_RESPONSE_FUNCTION_REJECTED;
1805 if (before(req_hdr->cmd_sn, hdr->cmd_sn) ||
1806 (req_hdr->cmd_sn == hdr->cmd_sn)) {
1807 PRINT_ERROR("ABORT TASK: SN mismatch: req SN %x, "
1808 "cmd SN %x, rtt %u", req_hdr->cmd_sn,
1809 hdr->cmd_sn, req_hdr->rtt);
1810 err = ISCSI_RESPONSE_FUNCTION_REJECTED;
1814 spin_lock_bh(&conn->cmd_list_lock);
1816 spin_unlock_bh(&conn->cmd_list_lock);
1821 TRACE_MGMT_DBG("cmd RTT %x not found", req_hdr->rtt);
1822 err = ISCSI_RESPONSE_UNKNOWN_TASK;
1833 /* Must be called from the read or conn close thread */
1834 static int target_abort(struct iscsi_cmnd *req, int all)
1836 struct iscsi_target *target = req->conn->session->target;
1837 struct iscsi_task_mgt_hdr *req_hdr =
1838 (struct iscsi_task_mgt_hdr *)&req->pdu.bhs;
1839 struct iscsi_session *session;
1840 struct iscsi_conn *conn;
1841 struct iscsi_cmnd *cmnd;
1843 mutex_lock(&target->target_mutex);
1845 list_for_each_entry(session, &target->session_list,
1846 session_list_entry) {
1847 list_for_each_entry(conn, &session->conn_list,
1849 spin_lock_bh(&conn->cmd_list_lock);
1850 list_for_each_entry(cmnd, &conn->cmd_list,
1856 else if (req_hdr->lun == cmnd_hdr(cmnd)->lun)
1859 spin_unlock_bh(&conn->cmd_list_lock);
1863 mutex_unlock(&target->target_mutex);
1867 /* Must be called from the read or conn close thread */
1868 static void task_set_abort(struct iscsi_cmnd *req)
1870 struct iscsi_session *session = req->conn->session;
1871 struct iscsi_task_mgt_hdr *req_hdr =
1872 (struct iscsi_task_mgt_hdr *)&req->pdu.bhs;
1873 struct iscsi_target *target = session->target;
1874 struct iscsi_conn *conn;
1875 struct iscsi_cmnd *cmnd;
1877 mutex_lock(&target->target_mutex);
1879 list_for_each_entry(conn, &session->conn_list, conn_list_entry) {
1880 spin_lock_bh(&conn->cmd_list_lock);
1881 list_for_each_entry(cmnd, &conn->cmd_list, cmd_list_entry) {
1882 struct iscsi_scsi_cmd_hdr *hdr = cmnd_hdr(cmnd);
1885 if (req_hdr->lun != hdr->lun)
1887 if (before(req_hdr->cmd_sn, hdr->cmd_sn) ||
1888 req_hdr->cmd_sn == hdr->cmd_sn)
1892 spin_unlock_bh(&conn->cmd_list_lock);
1895 mutex_unlock(&target->target_mutex);
1899 /* Must be called from the read or conn close thread */
1900 void conn_abort(struct iscsi_conn *conn)
1902 struct iscsi_cmnd *cmnd;
1904 TRACE_MGMT_DBG("Aborting conn %p", conn);
1906 iscsi_extracheck_is_rd_thread(conn);
1908 spin_lock_bh(&conn->cmd_list_lock);
1910 list_for_each_entry(cmnd, &conn->cmd_list, cmd_list_entry) {
1912 if (cmnd->data_waiting) {
1913 if (!cmnd_get_check(cmnd)) {
1914 spin_unlock_bh(&conn->cmd_list_lock);
1916 /* ToDo: this is racy for MC/S */
1917 TRACE_MGMT_DBG("Restarting data waiting cmd "
1919 iscsi_fail_waiting_cmnd(cmnd);
1924 * We are in the read thread, so we may not
1925 * worry that after cmnd release conn gets
1928 spin_lock_bh(&conn->cmd_list_lock);
1933 spin_unlock_bh(&conn->cmd_list_lock);
1938 static void execute_task_management(struct iscsi_cmnd *req)
1940 struct iscsi_conn *conn = req->conn;
1941 struct iscsi_session *sess = conn->session;
1942 struct iscsi_task_mgt_hdr *req_hdr =
1943 (struct iscsi_task_mgt_hdr *)&req->pdu.bhs;
1944 int rc, status, function = req_hdr->function & ISCSI_FUNCTION_MASK;
1945 struct scst_rx_mgmt_params params;
1947 TRACE((function == ISCSI_FUNCTION_ABORT_TASK) ?
1948 TRACE_MGMT_MINOR : TRACE_MGMT,
1949 "TM fn %d", function);
1951 TRACE_MGMT_DBG("TM req %p, itt %x, rtt %x, sn %u, con %p", req,
1952 cmnd_itt(req), req_hdr->rtt, req_hdr->cmd_sn, conn);
1954 iscsi_extracheck_is_rd_thread(conn);
1956 spin_lock(&sess->sn_lock);
1958 sess->tm_sn = req_hdr->cmd_sn;
1959 if (sess->tm_rsp != NULL) {
1960 struct iscsi_cmnd *tm_rsp = sess->tm_rsp;
1962 TRACE(TRACE_MGMT_MINOR, "Dropping delayed TM rsp %p", tm_rsp);
1964 sess->tm_rsp = NULL;
1967 spin_unlock(&sess->sn_lock);
1969 sBUG_ON(sess->tm_active < 0);
1971 rsp_cmnd_release(tm_rsp);
1973 spin_unlock(&sess->sn_lock);
1975 memset(¶ms, 0, sizeof(params));
1976 params.atomic = SCST_NON_ATOMIC;
1977 params.tgt_priv = req;
1979 if ((function != ISCSI_FUNCTION_ABORT_TASK) &&
1980 (req_hdr->rtt != ISCSI_RESERVED_TAG)) {
1981 PRINT_ERROR("Invalid RTT %x (TM fn %x)", req_hdr->rtt,
1984 status = ISCSI_RESPONSE_FUNCTION_REJECTED;
1988 /* cmd_sn is already in CPU format converted in check_cmd_sn() */
1991 case ISCSI_FUNCTION_ABORT_TASK:
1993 status = cmnd_abort(req);
1995 params.fn = SCST_ABORT_TASK;
1996 params.tag = req_hdr->rtt;
1998 params.lun = (uint8_t *)&req_hdr->lun;
1999 params.lun_len = sizeof(req_hdr->lun);
2001 params.cmd_sn = req_hdr->cmd_sn;
2002 params.cmd_sn_set = 1;
2003 rc = scst_rx_mgmt_fn(conn->session->scst_sess,
2005 status = ISCSI_RESPONSE_FUNCTION_REJECTED;
2008 case ISCSI_FUNCTION_ABORT_TASK_SET:
2009 task_set_abort(req);
2010 params.fn = SCST_ABORT_TASK_SET;
2011 params.lun = (uint8_t *)&req_hdr->lun;
2012 params.lun_len = sizeof(req_hdr->lun);
2014 params.cmd_sn = req_hdr->cmd_sn;
2015 params.cmd_sn_set = 1;
2016 rc = scst_rx_mgmt_fn(conn->session->scst_sess,
2018 status = ISCSI_RESPONSE_FUNCTION_REJECTED;
2020 case ISCSI_FUNCTION_CLEAR_TASK_SET:
2021 task_set_abort(req);
2022 params.fn = SCST_CLEAR_TASK_SET;
2023 params.lun = (uint8_t *)&req_hdr->lun;
2024 params.lun_len = sizeof(req_hdr->lun);
2026 params.cmd_sn = req_hdr->cmd_sn;
2027 params.cmd_sn_set = 1;
2028 rc = scst_rx_mgmt_fn(conn->session->scst_sess,
2030 status = ISCSI_RESPONSE_FUNCTION_REJECTED;
2032 case ISCSI_FUNCTION_CLEAR_ACA:
2033 params.fn = SCST_CLEAR_ACA;
2034 params.lun = (uint8_t *)&req_hdr->lun;
2035 params.lun_len = sizeof(req_hdr->lun);
2037 params.cmd_sn = req_hdr->cmd_sn;
2038 params.cmd_sn_set = 1;
2039 rc = scst_rx_mgmt_fn(conn->session->scst_sess,
2041 status = ISCSI_RESPONSE_FUNCTION_REJECTED;
2043 case ISCSI_FUNCTION_TARGET_COLD_RESET:
2044 case ISCSI_FUNCTION_TARGET_WARM_RESET:
2045 target_abort(req, 1);
2046 params.fn = SCST_TARGET_RESET;
2047 params.cmd_sn = req_hdr->cmd_sn;
2048 params.cmd_sn_set = 1;
2049 rc = scst_rx_mgmt_fn(conn->session->scst_sess,
2051 status = ISCSI_RESPONSE_FUNCTION_REJECTED;
2053 case ISCSI_FUNCTION_LOGICAL_UNIT_RESET:
2054 target_abort(req, 0);
2055 params.fn = SCST_LUN_RESET;
2056 params.lun = (uint8_t *)&req_hdr->lun;
2057 params.lun_len = sizeof(req_hdr->lun);
2059 params.cmd_sn = req_hdr->cmd_sn;
2060 params.cmd_sn_set = 1;
2061 rc = scst_rx_mgmt_fn(conn->session->scst_sess,
2063 status = ISCSI_RESPONSE_FUNCTION_REJECTED;
2065 case ISCSI_FUNCTION_TASK_REASSIGN:
2067 status = ISCSI_RESPONSE_ALLEGIANCE_REASSIGNMENT_UNSUPPORTED;
2070 PRINT_ERROR("Unknown TM function %d", function);
2072 status = ISCSI_RESPONSE_FUNCTION_REJECTED;
2078 iscsi_send_task_mgmt_resp(req, status);
2083 static void noop_out_exec(struct iscsi_cmnd *req)
2085 struct iscsi_cmnd *rsp;
2086 struct iscsi_nop_in_hdr *rsp_hdr;
2088 TRACE_DBG("%p", req);
2090 if (cmnd_itt(req) != cpu_to_be32(ISCSI_RESERVED_TAG)) {
2091 rsp = iscsi_cmnd_create_rsp_cmnd(req);
2093 rsp_hdr = (struct iscsi_nop_in_hdr *)&rsp->pdu.bhs;
2094 rsp_hdr->opcode = ISCSI_OP_NOOP_IN;
2095 rsp_hdr->flags = ISCSI_FLG_FINAL;
2096 rsp_hdr->itt = req->pdu.bhs.itt;
2097 rsp_hdr->ttt = cpu_to_be32(ISCSI_RESERVED_TAG);
2099 if (req->pdu.datasize)
2100 sBUG_ON(req->sg == NULL);
2102 sBUG_ON(req->sg != NULL);
2106 rsp->sg_cnt = req->sg_cnt;
2107 rsp->bufflen = req->bufflen;
2110 /* We already checked it in check_segment_length() */
2111 sBUG_ON(get_pgcnt(req->pdu.datasize, 0) > ISCSI_CONN_IOV_MAX);
2113 rsp->pdu.datasize = req->pdu.datasize;
2114 iscsi_cmnd_init_write(rsp,
2115 ISCSI_INIT_WRITE_REMOVE_HASH | ISCSI_INIT_WRITE_WAKE);
2116 req_cmnd_release(req);
2122 static void logout_exec(struct iscsi_cmnd *req)
2124 struct iscsi_logout_req_hdr *req_hdr;
2125 struct iscsi_cmnd *rsp;
2126 struct iscsi_logout_rsp_hdr *rsp_hdr;
2128 PRINT_INFO("Logout received from initiator %s",
2129 req->conn->session->initiator_name);
2130 TRACE_DBG("%p", req);
2132 req_hdr = (struct iscsi_logout_req_hdr *)&req->pdu.bhs;
2133 rsp = iscsi_cmnd_create_rsp_cmnd(req);
2134 rsp_hdr = (struct iscsi_logout_rsp_hdr *)&rsp->pdu.bhs;
2135 rsp_hdr->opcode = ISCSI_OP_LOGOUT_RSP;
2136 rsp_hdr->flags = ISCSI_FLG_FINAL;
2137 rsp_hdr->itt = req_hdr->itt;
2138 rsp->should_close_conn = 1;
2139 iscsi_cmnd_init_write(rsp,
2140 ISCSI_INIT_WRITE_REMOVE_HASH | ISCSI_INIT_WRITE_WAKE);
2141 req_cmnd_release(req);
2145 static void iscsi_cmnd_exec(struct iscsi_cmnd *cmnd)
2149 TRACE_DBG("%p,%x,%u", cmnd, cmnd_opcode(cmnd), cmnd->pdu.bhs.sn);
2151 iscsi_extracheck_is_rd_thread(cmnd->conn);
2153 if (unlikely(cmnd->tm_aborted)) {
2154 TRACE_MGMT_DBG("cmnd %p (scst_cmd %p) aborted", cmnd,
2156 req_cmnd_release_force(cmnd, ISCSI_FORCE_RELEASE_WRITE);
2160 if (unlikely(cmnd->rejected))
2163 switch (cmnd_opcode(cmnd)) {
2164 case ISCSI_OP_SCSI_CMD:
2165 if (cmnd->r2t_length != 0) {
2166 if (!cmnd->is_unsolicited_data) {
2171 iscsi_restart_cmnd(cmnd);
2173 case ISCSI_OP_NOOP_OUT:
2174 noop_out_exec(cmnd);
2176 case ISCSI_OP_SCSI_TASK_MGT_MSG:
2177 execute_task_management(cmnd);
2179 case ISCSI_OP_LOGOUT_CMD:
2183 PRINT_ERROR("unexpected cmnd op %x", cmnd_opcode(cmnd));
2184 req_cmnd_release(cmnd);
2192 TRACE_MGMT_DBG("Rejected cmd %p (reason %d)", cmnd,
2193 cmnd->reject_reason);
2194 switch (cmnd->reject_reason) {
2196 PRINT_ERROR("Unexpected reject reason %d",
2197 cmnd->reject_reason);
2199 case ISCSI_REJECT_SCSI_CMD:
2200 req_cmnd_release(cmnd);
2207 * Note: the code belows passes a kernel space pointer (&opt) to setsockopt()
2208 * while the declaration of setsockopt specifies that it expects a user space
2209 * pointer. This seems to work fine, and this approach is also used in some
2210 * other parts of the Linux kernel (see e.g. fs/ocfs2/cluster/tcp.c).
2212 static void set_cork(struct socket *sock, int on)
2219 sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK,
2220 (void __force __user *)&opt, sizeof(opt));
2225 void cmnd_tx_start(struct iscsi_cmnd *cmnd)
2227 struct iscsi_conn *conn = cmnd->conn;
2229 TRACE_DBG("conn %p, cmnd %p, opcode %x", conn, cmnd, cmnd_opcode(cmnd));
2230 iscsi_cmnd_set_length(&cmnd->pdu);
2232 iscsi_extracheck_is_wr_thread(conn);
2234 set_cork(conn->sock, 1);
2236 conn->write_iop = conn->write_iov;
2237 conn->write_iop->iov_base = (void __force __user *)(&cmnd->pdu.bhs);
2238 conn->write_iop->iov_len = sizeof(cmnd->pdu.bhs);
2239 conn->write_iop_used = 1;
2240 conn->write_size = sizeof(cmnd->pdu.bhs) + cmnd->pdu.datasize;
2241 conn->write_offset = 0;
2243 switch (cmnd_opcode(cmnd)) {
2244 case ISCSI_OP_NOOP_IN:
2245 cmnd_set_sn(cmnd, 1);
2247 case ISCSI_OP_SCSI_RSP:
2248 cmnd_set_sn(cmnd, 1);
2250 case ISCSI_OP_SCSI_TASK_MGT_RSP:
2251 cmnd_set_sn(cmnd, 1);
2253 case ISCSI_OP_TEXT_RSP:
2254 cmnd_set_sn(cmnd, 1);
2256 case ISCSI_OP_SCSI_DATA_IN:
2258 struct iscsi_data_in_hdr *rsp =
2259 (struct iscsi_data_in_hdr *)&cmnd->pdu.bhs;
2260 u32 offset = cpu_to_be32(rsp->buffer_offset);
2262 TRACE_DBG("cmnd %p, offset %u, datasize %u, bufflen %u", cmnd,
2263 offset, cmnd->pdu.datasize, cmnd->bufflen);
2265 sBUG_ON(offset > cmnd->bufflen);
2266 sBUG_ON(offset + cmnd->pdu.datasize > cmnd->bufflen);
2268 conn->write_offset = offset;
2270 cmnd_set_sn(cmnd, (rsp->flags & ISCSI_FLG_FINAL) ? 1 : 0);
2273 case ISCSI_OP_LOGOUT_RSP:
2274 cmnd_set_sn(cmnd, 1);
2277 cmnd->pdu.bhs.sn = cmnd_set_sn(cmnd, 0);
2279 case ISCSI_OP_ASYNC_MSG:
2280 cmnd_set_sn(cmnd, 1);
2282 case ISCSI_OP_REJECT:
2283 cmnd_set_sn(cmnd, 1);
2286 PRINT_ERROR("Unexpected cmnd op %x", cmnd_opcode(cmnd));
2290 iscsi_dump_pdu(&cmnd->pdu);
2294 void cmnd_tx_end(struct iscsi_cmnd *cmnd)
2296 struct iscsi_conn *conn = cmnd->conn;
2298 TRACE_DBG("%p:%x (should_close_conn %d, should_close_all_conn %d)",
2299 cmnd, cmnd_opcode(cmnd), cmnd->should_close_conn,
2300 cmnd->should_close_all_conn);
2302 switch (cmnd_opcode(cmnd)) {
2303 case ISCSI_OP_NOOP_IN:
2304 case ISCSI_OP_SCSI_RSP:
2305 case ISCSI_OP_SCSI_TASK_MGT_RSP:
2306 case ISCSI_OP_TEXT_RSP:
2308 case ISCSI_OP_ASYNC_MSG:
2309 case ISCSI_OP_REJECT:
2310 case ISCSI_OP_SCSI_DATA_IN:
2311 case ISCSI_OP_LOGOUT_RSP:
2314 PRINT_CRIT_ERROR("unexpected cmnd op %x", cmnd_opcode(cmnd));
2319 if (unlikely(cmnd->should_close_conn)) {
2320 if (cmnd->should_close_all_conn) {
2321 PRINT_INFO("Closing all connections for target %x at "
2322 "initiator's %s request",
2323 cmnd->conn->session->target->tid,
2324 conn->session->initiator_name);
2325 target_del_all_sess(cmnd->conn->session->target, 0);
2327 PRINT_INFO("Closing connection at initiator's %s "
2328 "request", conn->session->initiator_name);
2329 mark_conn_closed(conn);
2333 set_cork(cmnd->conn->sock, 0);
2338 * Push the command for execution. This functions reorders the commands.
2339 * Called from the read thread.
2341 static void iscsi_session_push_cmnd(struct iscsi_cmnd *cmnd)
2343 struct iscsi_session *session = cmnd->conn->session;
2344 struct list_head *entry;
2347 TRACE_DBG("%p:%x %u,%u",
2348 cmnd, cmnd_opcode(cmnd), cmnd->pdu.bhs.sn,
2349 session->exp_cmd_sn);
2351 iscsi_extracheck_is_rd_thread(cmnd->conn);
2353 sBUG_ON(cmnd->parent_req != NULL);
2355 if (cmnd->pdu.bhs.opcode & ISCSI_OP_IMMEDIATE) {
2356 TRACE_DBG("Immediate cmd %p (cmd_sn %u)", cmnd,
2358 iscsi_cmnd_exec(cmnd);
2362 spin_lock(&session->sn_lock);
2364 cmd_sn = cmnd->pdu.bhs.sn;
2365 if (cmd_sn == session->exp_cmd_sn) {
2367 session->exp_cmd_sn = ++cmd_sn;
2369 if (unlikely(session->tm_active > 0)) {
2370 if (before(cmd_sn, session->tm_sn)) {
2371 struct iscsi_conn *conn = cmnd->conn;
2373 spin_unlock(&session->sn_lock);
2375 spin_lock_bh(&conn->cmd_list_lock);
2377 spin_unlock_bh(&conn->cmd_list_lock);
2379 spin_lock(&session->sn_lock);
2381 iscsi_check_send_delayed_tm_resp(session);
2384 spin_unlock(&session->sn_lock);
2386 iscsi_cmnd_exec(cmnd);
2388 spin_lock(&session->sn_lock);
2390 if (list_empty(&session->pending_list))
2392 cmnd = list_entry(session->pending_list.next,
2394 pending_list_entry);
2395 if (cmnd->pdu.bhs.sn != cmd_sn)
2398 list_del(&cmnd->pending_list_entry);
2401 TRACE_DBG("Processing pending cmd %p (cmd_sn %u)",
2407 TRACE_DBG("Pending cmd %p (cmd_sn %u, exp_cmd_sn %u)",
2408 cmnd, cmd_sn, session->exp_cmd_sn);
2411 * iSCSI RFC 3720: "The target MUST silently ignore any
2412 * non-immediate command outside of [from ExpCmdSN to MaxCmdSN
2413 * inclusive] range". But we won't honor the MaxCmdSN
2414 * requirement, because, since we adjust MaxCmdSN from the
2415 * separate write thread, rarery it is possible that initiator
2416 * can legally send command with CmdSN>MaxSN. But it won't
2417 * hurt anything, in the worst case it will lead to
2418 * additional QUEUE FULL status.
2421 if (unlikely(before(cmd_sn, session->exp_cmd_sn))) {
2422 PRINT_ERROR("Unexpected cmd_sn (%u,%u)", cmd_sn,
2423 session->exp_cmd_sn);
2428 if (unlikely(after(cmd_sn, session->exp_cmd_sn +
2429 iscsi_get_allowed_cmds(session)))) {
2430 TRACE_MGMT_DBG("Too large cmd_sn %u (exp_cmd_sn %u, "
2431 "max_sn %u)", cmd_sn, session->exp_cmd_sn,
2432 iscsi_get_allowed_cmds(session));
2436 spin_unlock(&session->sn_lock);
2438 if (unlikely(drop)) {
2439 req_cmnd_release_force(cmnd,
2440 ISCSI_FORCE_RELEASE_WRITE);
2444 if (unlikely(cmnd->tm_aborted)) {
2445 struct iscsi_cmnd *tm_clone;
2447 TRACE_MGMT_DBG("Pending aborted cmnd %p, creating TM "
2448 "clone (scst cmd %p, state %d)", cmnd,
2449 cmnd->scst_cmd, cmnd->scst_state);
2451 tm_clone = cmnd_alloc(cmnd->conn, NULL);
2452 if (tm_clone != NULL) {
2453 tm_clone->tm_aborted = 1;
2454 tm_clone->pdu = cmnd->pdu;
2456 TRACE_MGMT_DBG("TM clone %p created",
2459 iscsi_cmnd_exec(cmnd);
2462 PRINT_ERROR("%s", "Unable to create TM clone");
2465 spin_lock(&session->sn_lock);
2466 list_for_each(entry, &session->pending_list) {
2467 struct iscsi_cmnd *tmp =
2468 list_entry(entry, struct iscsi_cmnd,
2469 pending_list_entry);
2470 if (before(cmd_sn, tmp->pdu.bhs.sn))
2473 list_add_tail(&cmnd->pending_list_entry, entry);
2477 spin_unlock(&session->sn_lock);
2482 static int check_segment_length(struct iscsi_cmnd *cmnd)
2484 struct iscsi_conn *conn = cmnd->conn;
2485 struct iscsi_session *session = conn->session;
2487 if (unlikely(cmnd->pdu.datasize > session->sess_param.max_recv_data_length)) {
2488 PRINT_ERROR("Initiator %s violated negotiated parameters: "
2489 "data too long (ITT %x, datasize %u, "
2490 "max_recv_data_length %u", session->initiator_name,
2491 cmnd_itt(cmnd), cmnd->pdu.datasize,
2492 session->sess_param.max_recv_data_length);
2493 mark_conn_closed(conn);
2499 int cmnd_rx_start(struct iscsi_cmnd *cmnd)
2501 struct iscsi_conn *conn = cmnd->conn;
2504 iscsi_dump_pdu(&cmnd->pdu);
2506 res = check_segment_length(cmnd);
2510 switch (cmnd_opcode(cmnd)) {
2511 case ISCSI_OP_NOOP_OUT:
2512 rc = noop_out_start(cmnd);
2514 case ISCSI_OP_SCSI_CMD:
2515 rc = cmnd_insert_hash(cmnd);
2516 if (likely(rc == 0)) {
2517 res = scsi_cmnd_start(cmnd);
2518 if (unlikely(res != 0))
2522 case ISCSI_OP_SCSI_TASK_MGT_MSG:
2523 rc = cmnd_insert_hash(cmnd);
2525 case ISCSI_OP_SCSI_DATA_OUT:
2526 res = data_out_start(conn, cmnd);
2527 rc = 0; /* to avoid compiler warning */
2528 if (unlikely(res != 0))
2531 case ISCSI_OP_LOGOUT_CMD:
2532 rc = cmnd_insert_hash(cmnd);
2534 case ISCSI_OP_TEXT_CMD:
2535 case ISCSI_OP_SNACK_CMD:
2536 rc = -ISCSI_REASON_UNSUPPORTED_COMMAND;
2539 rc = -ISCSI_REASON_UNSUPPORTED_COMMAND;
2543 if (unlikely(rc < 0)) {
2544 struct iscsi_scsi_cmd_hdr *hdr = cmnd_hdr(cmnd);
2545 PRINT_ERROR("Error %d (iSCSI opcode %x, ITT %x, op %x)", rc,
2546 cmnd_opcode(cmnd), cmnd_itt(cmnd),
2547 (cmnd_opcode(cmnd) == ISCSI_OP_SCSI_CMD ?
2549 iscsi_cmnd_reject(cmnd, -rc);
2553 TRACE_EXIT_RES(res);
2557 void cmnd_rx_end(struct iscsi_cmnd *cmnd)
2561 TRACE_DBG("%p:%x", cmnd, cmnd_opcode(cmnd));
2563 if (unlikely(cmnd->rejected))
2567 switch (cmnd_opcode(cmnd)) {
2568 case ISCSI_OP_SCSI_CMD:
2569 case ISCSI_OP_NOOP_OUT:
2570 case ISCSI_OP_SCSI_TASK_MGT_MSG:
2571 case ISCSI_OP_LOGOUT_CMD:
2572 iscsi_session_push_cmnd(cmnd);
2574 case ISCSI_OP_SCSI_DATA_OUT:
2578 PRINT_ERROR("unexpected cmnd op %x", cmnd_opcode(cmnd));
2579 req_cmnd_release(cmnd);
2588 switch (cmnd->reject_reason) {
2590 PRINT_ERROR("Unexpected reject reason %d",
2591 cmnd->reject_reason);
2593 case ISCSI_REJECT_CMD:
2594 case ISCSI_REJECT_DATA:
2595 req_cmnd_release(cmnd);
2597 case ISCSI_REJECT_SCSI_CMD:
2603 #if !defined(CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION)
2604 static int iscsi_alloc_data_buf(struct scst_cmd *cmd)
2607 * sock->ops->sendpage() is async zero copy operation,
2608 * so we must be sure not to free and reuse
2609 * the command's buffer before the sending was completed
2610 * by the network layers. It is possible only if we
2611 * don't use SGV cache.
2613 EXTRACHECKS_BUG_ON(!(scst_cmd_get_data_direction(cmd) & SCST_DATA_READ));
2614 scst_cmd_set_no_sgv(cmd);
2619 static inline void iscsi_set_state_wake_up(struct iscsi_cmnd *req,
2623 * We use wait_event() to wait for the state change, but it checks its
2624 * condition without any protection, so without cmnd_get() it is
2625 * possible that req will die "immediately" after the state assignment
2626 * and wake_up() will operate on dead data. We use the ordered version
2627 * of cmnd_get(), because "get" must be done before the state
2630 cmnd_get_ordered(req);
2631 req->scst_state = new_state;
2632 wake_up(&req->scst_waitQ);
2637 static void iscsi_preprocessing_done(struct scst_cmd *scst_cmd)
2639 struct iscsi_cmnd *req = (struct iscsi_cmnd *)
2640 scst_cmd_get_tgt_priv(scst_cmd);
2642 TRACE_DBG("req %p", req);
2644 EXTRACHECKS_BUG_ON(req->scst_state != ISCSI_CMD_STATE_RX_CMD);
2646 iscsi_set_state_wake_up(req, ISCSI_CMD_STATE_AFTER_PREPROC);
2653 * IMPORTANT! Connection conn must be protected by additional conn_get()
2654 * upon entrance in this function, because otherwise it could be destroyed
2655 * inside as a result of iscsi_send(), which releases sent commands.
2657 static void iscsi_try_local_processing(struct iscsi_conn *conn)
2663 spin_lock_bh(&iscsi_wr_lock);
2664 switch (conn->wr_state) {
2665 case ISCSI_CONN_WR_STATE_IN_LIST:
2666 list_del(&conn->wr_list_entry);
2668 case ISCSI_CONN_WR_STATE_IDLE:
2669 #ifdef CONFIG_SCST_EXTRACHECKS
2670 conn->wr_task = current;
2672 conn->wr_state = ISCSI_CONN_WR_STATE_PROCESSING;
2673 conn->wr_space_ready = 0;
2680 spin_unlock_bh(&iscsi_wr_lock);
2685 if (test_write_ready(conn))
2686 rc = iscsi_send(conn);
2688 spin_lock_bh(&iscsi_wr_lock);
2689 #ifdef CONFIG_SCST_EXTRACHECKS
2690 conn->wr_task = NULL;
2692 if ((rc <= 0) || test_write_ready(conn)) {
2693 list_add_tail(&conn->wr_list_entry, &iscsi_wr_list);
2694 conn->wr_state = ISCSI_CONN_WR_STATE_IN_LIST;
2695 wake_up(&iscsi_wr_waitQ);
2697 conn->wr_state = ISCSI_CONN_WR_STATE_IDLE;
2698 spin_unlock_bh(&iscsi_wr_lock);
2705 static int iscsi_xmit_response(struct scst_cmd *scst_cmd)
2707 int is_send_status = scst_cmd_get_is_send_status(scst_cmd);
2708 struct iscsi_cmnd *req = (struct iscsi_cmnd *)
2709 scst_cmd_get_tgt_priv(scst_cmd);
2710 struct iscsi_conn *conn = req->conn;
2711 int status = scst_cmd_get_status(scst_cmd);
2712 u8 *sense = scst_cmd_get_sense_buffer(scst_cmd);
2713 int sense_len = scst_cmd_get_sense_buffer_len(scst_cmd);
2714 int old_state = req->scst_state;
2716 if (scst_cmd_atomic(scst_cmd))
2717 return SCST_TGT_RES_NEED_THREAD_CTX;
2719 scst_cmd_set_tgt_priv(scst_cmd, NULL);
2721 req->tm_aborted |= scst_cmd_aborted(scst_cmd) ? 1 : 0;
2722 if (unlikely(req->tm_aborted)) {
2723 TRACE_MGMT_DBG("req %p (scst_cmd %p) aborted", req,
2726 scst_set_delivery_status(req->scst_cmd,
2727 SCST_CMD_DELIVERY_ABORTED);
2729 if (old_state == ISCSI_CMD_STATE_RESTARTED) {
2730 req->scst_state = ISCSI_CMD_STATE_PROCESSED;
2731 req_cmnd_release_force(req, ISCSI_FORCE_RELEASE_WRITE);
2733 iscsi_set_state_wake_up(req,
2734 ISCSI_CMD_STATE_PROCESSED);
2739 if (unlikely(old_state != ISCSI_CMD_STATE_RESTARTED)) {
2740 TRACE_DBG("req %p on %d state", req, old_state);
2743 * We could preliminary have finished req before we knew its
2744 * device, so check if we return correct sense format.
2746 scst_check_convert_sense(scst_cmd);
2748 create_status_rsp(req, status, sense, sense_len);
2750 switch (old_state) {
2751 case ISCSI_CMD_STATE_RX_CMD:
2752 case ISCSI_CMD_STATE_AFTER_PREPROC:
2758 iscsi_set_state_wake_up(req, ISCSI_CMD_STATE_PROCESSED);
2762 req->scst_state = ISCSI_CMD_STATE_PROCESSED;
2764 req->bufflen = scst_cmd_get_resp_data_len(scst_cmd);
2765 req->sg = scst_cmd_get_sg(scst_cmd);
2766 req->sg_cnt = scst_cmd_get_sg_cnt(scst_cmd);
2768 TRACE_DBG("req %p, is_send_status=%x, req->bufflen=%d, req->sg=%p, "
2769 "req->sg_cnt %d", req, is_send_status, req->bufflen, req->sg,
2772 if (unlikely((req->bufflen != 0) && !is_send_status)) {
2773 PRINT_CRIT_ERROR("%s", "Sending DATA without STATUS is "
2775 scst_set_cmd_error(scst_cmd,
2776 SCST_LOAD_SENSE(scst_sense_hardw_error));
2780 if (req->bufflen != 0) {
2782 * Check above makes sure that is_send_status is set,
2783 * so status is valid here, but in future that could change.
2786 if ((status != SAM_STAT_CHECK_CONDITION) &&
2787 ((cmnd_hdr(req)->flags & (ISCSI_CMD_WRITE|ISCSI_CMD_READ)) !=
2788 (ISCSI_CMD_WRITE|ISCSI_CMD_READ))) {
2789 send_data_rsp(req, status, is_send_status);
2791 struct iscsi_cmnd *rsp;
2792 send_data_rsp(req, 0, 0);
2793 if (is_send_status) {
2794 rsp = create_status_rsp(req, status, sense,
2796 iscsi_set_resid(req, rsp, true);
2797 iscsi_cmnd_init_write(rsp,
2798 ISCSI_INIT_WRITE_REMOVE_HASH);
2801 } else if (is_send_status) {
2802 struct iscsi_cmnd *rsp;
2803 rsp = create_status_rsp(req, status, sense, sense_len);
2804 iscsi_set_resid(req, rsp, false);
2805 iscsi_cmnd_init_write(rsp, ISCSI_INIT_WRITE_REMOVE_HASH);
2807 #ifdef CONFIG_SCST_EXTRACHECKS
2813 * "_ordered" here to protect from reorder, which can lead to
2814 * preliminary connection destroy in req_cmnd_release(). Just in
2815 * case, actually, because reordering shouldn't go so far, but who
2818 conn_get_ordered(conn);
2819 req_cmnd_release(req);
2820 iscsi_try_local_processing(conn);
2824 return SCST_TGT_RES_SUCCESS;
2827 /* Called under sn_lock */
2828 static bool iscsi_is_delay_tm_resp(struct iscsi_cmnd *rsp)
2831 struct iscsi_task_mgt_hdr *req_hdr =
2832 (struct iscsi_task_mgt_hdr *)&rsp->parent_req->pdu.bhs;
2833 int function = req_hdr->function & ISCSI_FUNCTION_MASK;
2834 struct iscsi_session *sess = rsp->conn->session;
2838 /* This should be checked for immediate TM commands as well */
2842 if (before(sess->exp_cmd_sn, req_hdr->cmd_sn))
2847 TRACE_EXIT_RES(res);
2851 /* Called under sn_lock, but might drop it inside, then reaquire */
2852 static void iscsi_check_send_delayed_tm_resp(struct iscsi_session *sess)
2854 struct iscsi_cmnd *tm_rsp = sess->tm_rsp;
2861 if (iscsi_is_delay_tm_resp(tm_rsp))
2864 TRACE(TRACE_MGMT_MINOR, "Sending delayed rsp %p", tm_rsp);
2866 sess->tm_rsp = NULL;
2869 spin_unlock(&sess->sn_lock);
2871 sBUG_ON(sess->tm_active < 0);
2873 iscsi_cmnd_init_write(tm_rsp,
2874 ISCSI_INIT_WRITE_REMOVE_HASH | ISCSI_INIT_WRITE_WAKE);
2876 spin_lock(&sess->sn_lock);
2883 static void iscsi_send_task_mgmt_resp(struct iscsi_cmnd *req, int status)
2885 struct iscsi_cmnd *rsp;
2886 struct iscsi_task_mgt_hdr *req_hdr =
2887 (struct iscsi_task_mgt_hdr *)&req->pdu.bhs;
2888 struct iscsi_task_rsp_hdr *rsp_hdr;
2889 struct iscsi_session *sess = req->conn->session;
2890 int fn = req_hdr->function & ISCSI_FUNCTION_MASK;
2894 TRACE_MGMT_DBG("TM req %p finished", req);
2895 TRACE((req_hdr->function == ISCSI_FUNCTION_ABORT_TASK) ?
2896 TRACE_MGMT_MINOR : TRACE_MGMT,
2897 "TM fn %d finished, status %d", fn, status);
2899 rsp = iscsi_cmnd_create_rsp_cmnd(req);
2900 rsp_hdr = (struct iscsi_task_rsp_hdr *)&rsp->pdu.bhs;
2902 rsp_hdr->opcode = ISCSI_OP_SCSI_TASK_MGT_RSP;
2903 rsp_hdr->flags = ISCSI_FLG_FINAL;
2904 rsp_hdr->itt = req_hdr->itt;
2905 rsp_hdr->response = status;
2907 if (fn == ISCSI_FUNCTION_TARGET_COLD_RESET) {
2908 rsp->should_close_conn = 1;
2909 rsp->should_close_all_conn = 1;
2912 sBUG_ON(sess->tm_rsp != NULL);
2914 spin_lock(&sess->sn_lock);
2915 if (iscsi_is_delay_tm_resp(rsp)) {
2916 TRACE(TRACE_MGMT_MINOR, "Delaying TM fn %x response %p "
2917 "(req %p), because not all affected commands received "
2918 "(TM cmd sn %u, exp sn %u)",
2919 req_hdr->function & ISCSI_FUNCTION_MASK, rsp, req,
2920 req_hdr->cmd_sn, sess->exp_cmd_sn);
2922 spin_unlock(&sess->sn_lock);
2926 spin_unlock(&sess->sn_lock);
2928 sBUG_ON(sess->tm_active < 0);
2930 iscsi_cmnd_init_write(rsp,
2931 ISCSI_INIT_WRITE_REMOVE_HASH | ISCSI_INIT_WRITE_WAKE);
2934 req_cmnd_release(req);
2940 static inline int iscsi_get_mgmt_response(int status)
2943 case SCST_MGMT_STATUS_SUCCESS:
2944 return ISCSI_RESPONSE_FUNCTION_COMPLETE;
2946 case SCST_MGMT_STATUS_TASK_NOT_EXIST:
2947 return ISCSI_RESPONSE_UNKNOWN_TASK;
2949 case SCST_MGMT_STATUS_LUN_NOT_EXIST:
2950 return ISCSI_RESPONSE_UNKNOWN_LUN;
2952 case SCST_MGMT_STATUS_FN_NOT_SUPPORTED:
2953 return ISCSI_RESPONSE_FUNCTION_UNSUPPORTED;
2955 case SCST_MGMT_STATUS_REJECTED:
2956 case SCST_MGMT_STATUS_FAILED:
2958 return ISCSI_RESPONSE_FUNCTION_REJECTED;
2962 static void iscsi_task_mgmt_fn_done(struct scst_mgmt_cmd *scst_mcmd)
2964 int fn = scst_mgmt_cmd_get_fn(scst_mcmd);
2965 struct iscsi_cmnd *req = (struct iscsi_cmnd *)
2966 scst_mgmt_cmd_get_tgt_priv(scst_mcmd);
2968 iscsi_get_mgmt_response(scst_mgmt_cmd_get_status(scst_mcmd));
2970 TRACE_MGMT_DBG("req %p, scst_mcmd %p, fn %d, scst status %d",
2971 req, scst_mcmd, fn, scst_mgmt_cmd_get_status(scst_mcmd));
2974 case SCST_NEXUS_LOSS_SESS:
2975 case SCST_ABORT_ALL_TASKS_SESS:
2976 /* They are internal */
2979 iscsi_send_task_mgmt_resp(req, status);
2980 scst_mgmt_cmd_set_tgt_priv(scst_mcmd, NULL);
2986 static int iscsi_scsi_aen(struct scst_aen *aen)
2988 int res = SCST_AEN_RES_SUCCESS;
2989 uint64_t lun = scst_aen_get_lun(aen);
2990 const uint8_t *sense = scst_aen_get_sense(aen);
2991 int sense_len = scst_aen_get_sense_len(aen);
2992 struct iscsi_session *sess = scst_sess_get_tgt_priv(
2993 scst_aen_get_sess(aen));
2994 struct iscsi_conn *conn;
2996 struct iscsi_cmnd *fake_req, *rsp;
2997 struct iscsi_async_msg_hdr *rsp_hdr;
2998 struct scatterlist *sg;
3002 TRACE_MGMT_DBG("SCSI AEN to sess %p (initiator %s)", sess,
3003 sess->initiator_name);
3005 mutex_lock(&sess->target->target_mutex);
3008 list_for_each_entry_reverse(conn, &sess->conn_list, conn_list_entry) {
3009 if (!test_bit(ISCSI_CONN_SHUTTINGDOWN, &conn->conn_aflags) &&
3010 (conn->conn_reinst_successor == NULL)) {
3016 TRACE_MGMT_DBG("Unable to find alive conn for sess %p", sess);
3020 /* Create a fake request */
3021 fake_req = cmnd_alloc(conn, NULL);
3022 if (fake_req == NULL) {
3023 PRINT_ERROR("%s", "Unable to alloc fake AEN request");
3027 mutex_unlock(&sess->target->target_mutex);
3029 rsp = iscsi_cmnd_create_rsp_cmnd(fake_req);
3031 PRINT_ERROR("%s", "Unable to alloc AEN rsp");
3032 goto out_err_free_req;
3035 fake_req->scst_state = ISCSI_CMD_STATE_AEN;
3036 fake_req->scst_aen = aen;
3038 rsp_hdr = (struct iscsi_async_msg_hdr *)&rsp->pdu.bhs;
3040 rsp_hdr->opcode = ISCSI_OP_ASYNC_MSG;
3041 rsp_hdr->flags = ISCSI_FLG_FINAL;
3042 rsp_hdr->lun = lun; /* it's already in SCSI form */
3043 rsp_hdr->ffffffff = 0xffffffff;
3044 rsp_hdr->async_event = ISCSI_ASYNC_SCSI;
3046 sg = rsp->sg = rsp->rsp_sg;
3050 sg_init_table(sg, 2);
3051 sg_set_buf(&sg[0], &rsp->sense_hdr, sizeof(rsp->sense_hdr));
3052 sg_set_buf(&sg[1], sense, sense_len);
3054 rsp->sense_hdr.length = cpu_to_be16(sense_len);
3055 rsp->pdu.datasize = sizeof(rsp->sense_hdr) + sense_len;
3056 rsp->bufflen = rsp->pdu.datasize;
3058 iscsi_cmnd_init_write(rsp, ISCSI_INIT_WRITE_WAKE);
3060 req_cmnd_release(fake_req);
3063 TRACE_EXIT_RES(res);
3067 req_cmnd_release(fake_req);
3070 mutex_unlock(&sess->target->target_mutex);
3071 res = SCST_AEN_RES_FAILED;
3075 static int iscsi_report_aen(struct scst_aen *aen)
3078 int event_fn = scst_aen_get_event_fn(aen);
3084 res = iscsi_scsi_aen(aen);
3087 TRACE_MGMT_DBG("Unsupported AEN %d", event_fn);
3088 res = SCST_AEN_RES_NOT_SUPPORTED;
3092 TRACE_EXIT_RES(res);
3096 static int iscsi_target_detect(struct scst_tgt_template *templ)
3102 static int iscsi_target_release(struct scst_tgt *scst_tgt)
3108 struct scst_tgt_template iscsi_template = {
3110 .sg_tablesize = 0xFFFF /* no limit */,
3113 .xmit_response_atomic = 0,
3114 .detect = iscsi_target_detect,
3115 .release = iscsi_target_release,
3116 .xmit_response = iscsi_xmit_response,
3117 #if !defined(CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION)
3118 .alloc_data_buf = iscsi_alloc_data_buf,
3120 .preprocessing_done = iscsi_preprocessing_done,
3121 .pre_exec = iscsi_pre_exec,
3122 .task_mgmt_affected_cmds_done = iscsi_task_mgmt_affected_cmds_done,
3123 .task_mgmt_fn_done = iscsi_task_mgmt_fn_done,
3124 .report_aen = iscsi_report_aen,
3127 static __init int iscsi_run_threads(int count, char *name, int (*fn)(void *))
3131 struct iscsi_thread_t *thr;
3133 for (i = 0; i < count; i++) {
3134 thr = kmalloc(sizeof(*thr), GFP_KERNEL);
3137 PRINT_ERROR("Failed to allocate thr %d", res);
3140 thr->thr = kthread_run(fn, NULL, "%s%d", name, i);
3141 if (IS_ERR(thr->thr)) {
3142 res = PTR_ERR(thr->thr);
3143 PRINT_ERROR("kthread_create() failed: %d", res);
3147 list_add_tail(&thr->threads_list_entry, &iscsi_threads_list);
3154 static void iscsi_stop_threads(void)
3156 struct iscsi_thread_t *t, *tmp;
3158 list_for_each_entry_safe(t, tmp, &iscsi_threads_list,
3159 threads_list_entry) {
3160 int rc = kthread_stop(t->thr);
3162 TRACE_MGMT_DBG("kthread_stop() failed: %d", rc);
3163 list_del(&t->threads_list_entry);
3169 static int __init iscsi_init(void)
3174 PRINT_INFO("iSCSI SCST Target - version %s", ISCSI_VERSION_STRING);
3176 dummy_page = alloc_pages(GFP_KERNEL, 0);
3177 if (dummy_page == NULL) {
3178 PRINT_ERROR("%s", "Dummy page allocation failed");
3182 sg_init_table(&dummy_sg, 1);
3183 sg_set_page(&dummy_sg, dummy_page, PAGE_SIZE, 0);
3185 #if defined(CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION)
3186 err = net_set_get_put_page_callbacks(iscsi_get_page_callback,
3187 iscsi_put_page_callback);
3189 PRINT_INFO("Unable to set page callbackes: %d", err);
3190 goto out_free_dummy;
3194 "CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION "
3195 "not enabled in your kernel. ISCSI-SCST will be working with "
3196 "not the best performance. Refer README file for details.");
3199 ctr_major = register_chrdev(0, ctr_name, &ctr_fops);
3200 if (ctr_major < 0) {
3201 PRINT_ERROR("failed to register the control device %d",
3211 iscsi_cmnd_cache = KMEM_CACHE(iscsi_cmnd, SCST_SLAB_FLAGS);
3212 if (!iscsi_cmnd_cache) {
3217 err = scst_register_target_template(&iscsi_template);
3221 iscsi_template_registered = 1;
3223 err = iscsi_procfs_init();
3227 num = max((int)num_online_cpus(), 2);
3229 err = iscsi_run_threads(num, "iscsird", istrd);
3233 err = iscsi_run_threads(num, "iscsiwr", istwr);
3241 iscsi_procfs_exit();
3242 iscsi_stop_threads();
3245 scst_unregister_target_template(&iscsi_template);
3248 kmem_cache_destroy(iscsi_cmnd_cache);
3254 unregister_chrdev(ctr_major, ctr_name);
3257 #if defined(CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION)
3258 net_set_get_put_page_callbacks(NULL, NULL);
3262 __free_pages(dummy_page, 0);
3266 static void __exit iscsi_exit(void)
3268 iscsi_stop_threads();
3270 unregister_chrdev(ctr_major, ctr_name);
3272 iscsi_procfs_exit();
3275 kmem_cache_destroy(iscsi_cmnd_cache);
3277 scst_unregister_target_template(&iscsi_template);
3279 #if defined(CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION)
3280 net_set_get_put_page_callbacks(NULL, NULL);
3283 __free_pages(dummy_page, 0);
3287 module_init(iscsi_init);
3288 module_exit(iscsi_exit);
3290 MODULE_LICENSE("GPL");