4 * Copyright (C) 2004 - 2008 Vladislav Bolkhovitin <vst@vlnb.net>
5 * Copyright (C) 2004 - 2005 Leonid Stoljar
6 * Copyright (C) 2007 - 2008 CMS Distribution Limited
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation, version 2
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
19 #include <linux/init.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/list.h>
23 #include <linux/spinlock.h>
24 #include <linux/slab.h>
25 #include <linux/sched.h>
26 #include <linux/smp_lock.h>
27 #include <linux/unistd.h>
28 #include <linux/string.h>
29 #include <linux/kthread.h>
30 #include <linux/delay.h>
33 #include "scst_priv.h"
35 static void scst_cmd_set_sn(struct scst_cmd *cmd);
36 static int __scst_init_cmd(struct scst_cmd *cmd);
37 static void scst_finish_cmd_mgmt(struct scst_cmd *cmd);
38 static struct scst_cmd *__scst_find_cmd_by_tag(struct scst_session *sess,
40 static void scst_proccess_redirect_cmd(struct scst_cmd *cmd,
41 enum scst_exec_context context, int check_retries);
43 static inline void scst_schedule_tasklet(struct scst_cmd *cmd)
45 struct scst_tasklet *t = &scst_tasklets[smp_processor_id()];
48 spin_lock_irqsave(&t->tasklet_lock, flags);
49 TRACE_DBG("Adding cmd %p to tasklet %d cmd list", cmd,
51 list_add_tail(&cmd->cmd_list_entry, &t->tasklet_cmd_list);
52 spin_unlock_irqrestore(&t->tasklet_lock, flags);
54 tasklet_schedule(&t->tasklet);
58 * Must not be called in parallel with scst_unregister_session_ex() for the
61 struct scst_cmd *scst_rx_cmd(struct scst_session *sess,
62 const uint8_t *lun, int lun_len,
63 const uint8_t *cdb, int cdb_len, int atomic)
69 #ifdef CONFIG_SCST_EXTRACHECKS
70 if (unlikely(sess->shut_phase != SCST_SESS_SPH_READY)) {
71 PRINT_CRIT_ERROR("%s",
72 "New cmd while shutting down the session");
77 cmd = scst_alloc_cmd(atomic ? GFP_ATOMIC : GFP_KERNEL);
83 cmd->tgtt = sess->tgt->tgtt;
86 * For both wrong lun and CDB defer the error reporting for
87 * scst_cmd_init_done()
90 cmd->lun = scst_unpack_lun(lun, lun_len);
92 if (cdb_len <= SCST_MAX_CDB_SIZE) {
93 memcpy(cmd->cdb, cdb, cdb_len);
94 cmd->cdb_len = cdb_len;
97 TRACE_DBG("cmd %p, sess %p", cmd, sess);
104 EXPORT_SYMBOL(scst_rx_cmd);
107 * No locks, but might be on IRQ. Returns 0 on success, <0 if processing of
108 * this command should be stopped.
110 static int scst_init_cmd(struct scst_cmd *cmd, enum scst_exec_context *context)
116 /* See the comment in scst_do_job_init() */
117 if (unlikely(!list_empty(&scst_init_cmd_list))) {
118 TRACE_MGMT_DBG("%s", "init cmd list busy");
122 * Memory barrier isn't necessary here, because CPU appears to
123 * be self-consistent and we don't care about the race, described
124 * in comment in scst_do_job_init().
127 rc = __scst_init_cmd(cmd);
128 if (unlikely(rc > 0))
130 else if (unlikely(rc != 0))
133 /* Small context optimization */
134 if (((*context == SCST_CONTEXT_TASKLET) ||
135 (*context == SCST_CONTEXT_DIRECT_ATOMIC) ||
136 ((*context == SCST_CONTEXT_SAME) && scst_cmd_atomic(cmd))) &&
137 scst_cmd_is_expected_set(cmd)) {
138 if (cmd->expected_data_direction == SCST_DATA_WRITE) {
139 if (!test_bit(SCST_TGT_DEV_AFTER_INIT_WR_ATOMIC,
140 &cmd->tgt_dev->tgt_dev_flags))
141 *context = SCST_CONTEXT_THREAD;
143 if (!test_bit(SCST_TGT_DEV_AFTER_INIT_OTH_ATOMIC,
144 &cmd->tgt_dev->tgt_dev_flags))
145 *context = SCST_CONTEXT_THREAD;
154 if (cmd->preprocessing_only) {
156 * Poor man solution for single threaded targets, where
157 * blocking receiver at least sometimes means blocking all.
159 sBUG_ON(*context != SCST_CONTEXT_DIRECT);
161 scst_set_cmd_abnormal_done_state(cmd);
162 /* Keep initiator away from too many BUSY commands */
166 spin_lock_irqsave(&scst_init_lock, flags);
167 TRACE_MGMT_DBG("Adding cmd %p to init cmd list (scst_cmd_count "
168 "%d)", cmd, atomic_read(&scst_cmd_count));
169 list_add_tail(&cmd->cmd_list_entry, &scst_init_cmd_list);
170 if (test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))
171 scst_init_poll_cnt++;
172 spin_unlock_irqrestore(&scst_init_lock, flags);
173 wake_up(&scst_init_cmd_list_waitQ);
179 #ifdef CONFIG_SCST_MEASURE_LATENCY
180 static inline uint64_t scst_sec_to_nsec(time_t sec)
182 return (uint64_t)sec * 1000000000;
186 void scst_cmd_init_done(struct scst_cmd *cmd,
187 enum scst_exec_context pref_context)
190 struct scst_session *sess = cmd->sess;
195 #ifdef CONFIG_SCST_MEASURE_LATENCY
199 cmd->start = scst_sec_to_nsec(ts.tv_sec) + ts.tv_nsec;
200 TRACE_DBG("cmd %p (sess %p): start %lld (tv_sec %ld, "
201 "tv_nsec %ld)", cmd, sess, cmd->start, ts.tv_sec,
206 TRACE_DBG("Preferred context: %d (cmd %p)", pref_context, cmd);
207 TRACE(TRACE_SCSI, "tag=%llu, lun=%lld, CDB len=%d, queue_type=%x "
208 "(cmd %p)", (long long unsigned int)cmd->tag,
209 (long long unsigned int)cmd->lun, cmd->cdb_len,
210 cmd->queue_type, cmd);
211 PRINT_BUFF_FLAG(TRACE_SCSI|TRACE_RCV_BOT, "Recieving CDB",
212 cmd->cdb, cmd->cdb_len);
214 #ifdef CONFIG_SCST_EXTRACHECKS
215 if (unlikely((in_irq() || irqs_disabled())) &&
216 ((pref_context == SCST_CONTEXT_DIRECT) ||
217 (pref_context == SCST_CONTEXT_DIRECT_ATOMIC))) {
218 PRINT_ERROR("Wrong context %d in IRQ from target %s, use "
219 "SCST_CONTEXT_THREAD instead\n", pref_context,
221 pref_context = SCST_CONTEXT_THREAD;
225 atomic_inc(&sess->sess_cmd_count);
227 spin_lock_irqsave(&sess->sess_list_lock, flags);
229 if (unlikely(sess->init_phase != SCST_SESS_IPH_READY)) {
231 * We have to always keep command in the search list from the
232 * very beginning, because otherwise it can be missed during
233 * TM processing. This check is needed because there might be
234 * old, i.e. deferred, commands and new, i.e. just coming, ones.
236 if (cmd->search_cmd_list_entry.next == NULL)
237 list_add_tail(&cmd->search_cmd_list_entry,
238 &sess->search_cmd_list);
239 switch (sess->init_phase) {
240 case SCST_SESS_IPH_SUCCESS:
242 case SCST_SESS_IPH_INITING:
243 TRACE_DBG("Adding cmd %p to init deferred cmd list",
245 list_add_tail(&cmd->cmd_list_entry,
246 &sess->init_deferred_cmd_list);
247 spin_unlock_irqrestore(&sess->sess_list_lock, flags);
249 case SCST_SESS_IPH_FAILED:
250 spin_unlock_irqrestore(&sess->sess_list_lock, flags);
252 scst_set_cmd_abnormal_done_state(cmd);
258 list_add_tail(&cmd->search_cmd_list_entry,
259 &sess->search_cmd_list);
261 spin_unlock_irqrestore(&sess->sess_list_lock, flags);
263 if (unlikely(cmd->lun == NO_SUCH_LUN)) {
264 PRINT_ERROR("Wrong LUN %d, finishing cmd", -1);
265 scst_set_cmd_error(cmd,
266 SCST_LOAD_SENSE(scst_sense_lun_not_supported));
267 scst_set_cmd_abnormal_done_state(cmd);
271 if (unlikely(cmd->cdb_len == 0)) {
272 PRINT_ERROR("Wrong CDB len %d, finishing cmd", 0);
273 scst_set_cmd_error(cmd,
274 SCST_LOAD_SENSE(scst_sense_invalid_opcode));
275 scst_set_cmd_abnormal_done_state(cmd);
279 if (unlikely(cmd->queue_type >= SCST_CMD_QUEUE_ACA)) {
280 PRINT_ERROR("Unsupported queue type %d", cmd->queue_type);
281 scst_set_cmd_error(cmd,
282 SCST_LOAD_SENSE(scst_sense_invalid_message));
283 scst_set_cmd_abnormal_done_state(cmd);
287 cmd->state = SCST_CMD_STATE_INIT;
288 /* cmd must be inited here to preserve the order */
289 rc = scst_init_cmd(cmd, &pref_context);
290 if (unlikely(rc < 0))
294 /* Here cmd must not be in any cmd list, no locks */
295 switch (pref_context) {
296 case SCST_CONTEXT_TASKLET:
297 scst_schedule_tasklet(cmd);
300 case SCST_CONTEXT_DIRECT:
301 scst_process_active_cmd(cmd, false);
302 /* For *NEED_THREAD wake_up() is already done */
305 case SCST_CONTEXT_DIRECT_ATOMIC:
306 scst_process_active_cmd(cmd, true);
307 /* For *NEED_THREAD wake_up() is already done */
311 PRINT_ERROR("Context %x is undefined, using the thread one",
314 case SCST_CONTEXT_THREAD:
315 spin_lock_irqsave(&cmd->cmd_lists->cmd_list_lock, flags);
316 TRACE_DBG("Adding cmd %p to active cmd list", cmd);
317 if (unlikely(cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE))
318 list_add(&cmd->cmd_list_entry,
319 &cmd->cmd_lists->active_cmd_list);
321 list_add_tail(&cmd->cmd_list_entry,
322 &cmd->cmd_lists->active_cmd_list);
323 wake_up(&cmd->cmd_lists->cmd_list_waitQ);
324 spin_unlock_irqrestore(&cmd->cmd_lists->cmd_list_lock, flags);
332 EXPORT_SYMBOL(scst_cmd_init_done);
334 static int scst_pre_parse(struct scst_cmd *cmd)
336 int res = SCST_CMD_STATE_RES_CONT_SAME;
337 struct scst_device *dev = cmd->dev;
342 cmd->inc_expected_sn_on_done = dev->handler->exec_sync ||
343 (!dev->has_own_order_mgmt &&
344 (dev->queue_alg == SCST_CONTR_MODE_QUEUE_ALG_RESTRICTED_REORDER ||
345 cmd->queue_type == SCST_CMD_QUEUE_ORDERED));
348 * Expected transfer data supplied by the SCSI transport via the
349 * target driver are untrusted, so we prefer to fetch them from CDB.
350 * Additionally, not all transports support supplying the expected
354 rc = scst_get_cdb_info(cmd);
355 if (unlikely(rc != 0)) {
357 PRINT_BUFFER("Failed CDB", cmd->cdb, cmd->cdb_len);
360 PRINT_ERROR("Unknown opcode 0x%02x for %s. "
361 "Should you update scst_scsi_op_table?",
362 cmd->cdb[0], dev->handler->name);
363 PRINT_BUFFER("Failed CDB", cmd->cdb, cmd->cdb_len);
364 #ifdef CONFIG_SCST_USE_EXPECTED_VALUES
365 if (scst_cmd_is_expected_set(cmd)) {
366 TRACE(TRACE_SCSI, "Using initiator supplied values: "
367 "direction %d, transfer_len %d",
368 cmd->expected_data_direction,
369 cmd->expected_transfer_len);
370 cmd->data_direction = cmd->expected_data_direction;
372 cmd->bufflen = cmd->expected_transfer_len;
373 /* Restore (likely) lost CDB length */
374 cmd->cdb_len = scst_get_cdb_len(cmd->cdb);
375 if (cmd->cdb_len == -1) {
376 PRINT_ERROR("Unable to get CDB length for "
377 "opcode 0x%02x. Returning INVALID "
378 "OPCODE", cmd->cdb[0]);
379 scst_set_cmd_error(cmd,
380 SCST_LOAD_SENSE(scst_sense_invalid_opcode));
384 PRINT_ERROR("Unknown opcode 0x%02x for %s and "
385 "target %s not supplied expected values",
386 cmd->cdb[0], dev->handler->name, cmd->tgtt->name);
387 scst_set_cmd_error(cmd,
388 SCST_LOAD_SENSE(scst_sense_invalid_opcode));
392 scst_set_cmd_error(cmd,
393 SCST_LOAD_SENSE(scst_sense_invalid_opcode));
397 TRACE(TRACE_SCSI, "op_name <%s> (cmd %p), direction=%d "
398 "(expected %d, set %s), transfer_len=%d (expected "
399 "len %d), flags=%d", cmd->op_name, cmd,
400 cmd->data_direction, cmd->expected_data_direction,
401 scst_cmd_is_expected_set(cmd) ? "yes" : "no",
402 cmd->bufflen, cmd->expected_transfer_len,
405 if (unlikely((cmd->op_flags & SCST_UNKNOWN_LENGTH) != 0)) {
406 if (scst_cmd_is_expected_set(cmd)) {
408 * Command data length can't be easily
409 * determined from the CDB. ToDo, all such
410 * commands processing should be fixed. Until
411 * it's done, get the length from the supplied
412 * expected value, but limit it to some
413 * reasonable value (15MB).
415 cmd->bufflen = min(cmd->expected_transfer_len,
417 cmd->op_flags &= ~SCST_UNKNOWN_LENGTH;
423 if (unlikely(cmd->cdb[cmd->cdb_len - 1] & CONTROL_BYTE_NACA_BIT)) {
424 PRINT_ERROR("NACA bit in control byte CDB is not supported "
425 "(opcode 0x%02x)", cmd->cdb[0]);
426 scst_set_cmd_error(cmd,
427 SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
431 if (unlikely(cmd->cdb[cmd->cdb_len - 1] & CONTROL_BYTE_LINK_BIT)) {
432 PRINT_ERROR("Linked commands are not supported "
433 "(opcode 0x%02x)", cmd->cdb[0]);
434 scst_set_cmd_error(cmd,
435 SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
439 cmd->state = SCST_CMD_STATE_DEV_PARSE;
446 scst_set_cmd_abnormal_done_state(cmd);
447 res = SCST_CMD_STATE_RES_CONT_SAME;
451 #ifndef CONFIG_SCST_USE_EXPECTED_VALUES
452 static bool scst_is_allowed_to_mismatch_cmd(struct scst_cmd *cmd)
456 switch (cmd->cdb[0]) {
457 case TEST_UNIT_READY:
458 /* Crazy VMware people sometimes do TUR with READ direction */
465 /* VERIFY commands with BYTCHK unset shouldn't fail here */
466 if ((cmd->op_flags & SCST_VERIFY_BYTCHK_MISMATCH_ALLOWED) &&
467 (cmd->cdb[1] & BYTCHK) == 0)
476 static int scst_parse_cmd(struct scst_cmd *cmd)
478 int res = SCST_CMD_STATE_RES_CONT_SAME;
480 struct scst_device *dev = cmd->dev;
481 int orig_bufflen = cmd->bufflen;
485 if (likely(!scst_is_cmd_local(cmd))) {
486 if (unlikely(!dev->handler->parse_atomic &&
487 scst_cmd_atomic(cmd))) {
489 * It shouldn't be because of SCST_TGT_DEV_AFTER_*
492 TRACE_DBG("Dev handler %s parse() needs thread "
493 "context, rescheduling", dev->handler->name);
494 res = SCST_CMD_STATE_RES_NEED_THREAD;
498 TRACE_DBG("Calling dev handler %s parse(%p)",
499 dev->handler->name, cmd);
500 TRACE_BUFF_FLAG(TRACE_SND_BOT, "Parsing: ",
501 cmd->cdb, cmd->cdb_len);
502 state = dev->handler->parse(cmd);
503 /* Caution: cmd can be already dead here */
504 TRACE_DBG("Dev handler %s parse() returned %d",
505 dev->handler->name, state);
508 case SCST_CMD_STATE_NEED_THREAD_CTX:
509 TRACE_DBG("Dev handler %s parse() requested thread "
510 "context, rescheduling", dev->handler->name);
511 res = SCST_CMD_STATE_RES_NEED_THREAD;
514 case SCST_CMD_STATE_STOP:
515 TRACE_DBG("Dev handler %s parse() requested stop "
516 "processing", dev->handler->name);
517 res = SCST_CMD_STATE_RES_CONT_NEXT;
521 if (state == SCST_CMD_STATE_DEFAULT)
522 state = SCST_CMD_STATE_PREPARE_SPACE;
524 state = SCST_CMD_STATE_PREPARE_SPACE;
526 if (cmd->data_len == -1)
527 cmd->data_len = cmd->bufflen;
529 if (cmd->bufflen == 0) {
531 * According to SPC bufflen 0 for data transfer commands isn't
532 * an error, so we need to fix the transfer direction.
534 cmd->data_direction = SCST_DATA_NONE;
537 if (cmd->dh_data_buf_alloced &&
538 unlikely((orig_bufflen > cmd->bufflen))) {
539 PRINT_ERROR("Dev handler supplied data buffer (size %d), "
540 "is less, than required (size %d)", cmd->bufflen,
542 PRINT_BUFFER("Failed CDB", cmd->cdb, cmd->cdb_len);
546 if (unlikely(state == SCST_CMD_STATE_PRE_XMIT_RESP))
549 if (unlikely((cmd->bufflen == 0) &&
550 (cmd->op_flags & SCST_UNKNOWN_LENGTH))) {
551 PRINT_ERROR("Unknown data transfer length for opcode 0x%x "
552 "(handler %s, target %s)", cmd->cdb[0],
553 dev->handler->name, cmd->tgtt->name);
554 PRINT_BUFFER("Failed CDB", cmd->cdb, cmd->cdb_len);
558 #ifdef CONFIG_SCST_EXTRACHECKS
559 if ((cmd->bufflen != 0) &&
560 ((cmd->data_direction == SCST_DATA_NONE) ||
561 ((cmd->sg == NULL) && (state > SCST_CMD_STATE_PREPARE_SPACE)))) {
562 PRINT_ERROR("Dev handler %s parse() returned "
563 "invalid cmd data_direction %d, bufflen %d, state %d "
564 "or sg %p (opcode 0x%x)", dev->handler->name,
565 cmd->data_direction, cmd->bufflen, state, cmd->sg,
567 PRINT_BUFFER("Failed CDB", cmd->cdb, cmd->cdb_len);
572 if (scst_cmd_is_expected_set(cmd)) {
573 #ifdef CONFIG_SCST_USE_EXPECTED_VALUES
574 # ifdef CONFIG_SCST_EXTRACHECKS
575 if ((cmd->data_direction != cmd->expected_data_direction) ||
576 (cmd->bufflen != cmd->expected_transfer_len)) {
577 PRINT_WARNING("Expected values don't match decoded "
578 "ones: data_direction %d, "
579 "expected_data_direction %d, "
580 "bufflen %d, expected_transfer_len %d",
582 cmd->expected_data_direction,
583 cmd->bufflen, cmd->expected_transfer_len);
584 PRINT_BUFFER("Suspicious CDB", cmd->cdb, cmd->cdb_len);
587 cmd->data_direction = cmd->expected_data_direction;
588 cmd->bufflen = cmd->expected_transfer_len;
590 if (unlikely(cmd->data_direction !=
591 cmd->expected_data_direction)) {
592 if (((cmd->expected_data_direction != SCST_DATA_NONE) ||
593 (cmd->bufflen != 0)) &&
594 !scst_is_allowed_to_mismatch_cmd(cmd)) {
595 PRINT_ERROR("Expected data direction %d for "
596 "opcode 0x%02x (handler %s, target %s) "
599 cmd->expected_data_direction,
600 cmd->cdb[0], dev->handler->name,
601 cmd->tgtt->name, cmd->data_direction);
602 PRINT_BUFFER("Failed CDB",
603 cmd->cdb, cmd->cdb_len);
604 scst_set_cmd_error(cmd,
605 SCST_LOAD_SENSE(scst_sense_invalid_message));
609 if (unlikely(cmd->bufflen != cmd->expected_transfer_len)) {
610 TRACE(TRACE_MGMT_MINOR, "Warning: expected "
611 "transfer length %d for opcode 0x%02x "
612 "(handler %s, target %s) doesn't match "
613 "decoded value %d. Faulty initiator "
614 "(e.g. VMware is known to be such) or "
615 "scst_scsi_op_table should be updated?",
616 cmd->expected_transfer_len, cmd->cdb[0],
617 dev->handler->name, cmd->tgtt->name,
619 PRINT_BUFF_FLAG(TRACE_MGMT_MINOR, "Suspicious CDB",
620 cmd->cdb, cmd->cdb_len);
625 if (unlikely(cmd->data_direction == SCST_DATA_UNKNOWN)) {
626 PRINT_ERROR("Unknown data direction. Opcode 0x%x, handler %s, "
627 "target %s", cmd->cdb[0], dev->handler->name,
629 PRINT_BUFFER("Failed CDB", cmd->cdb, cmd->cdb_len);
635 case SCST_CMD_STATE_PREPARE_SPACE:
636 case SCST_CMD_STATE_PRE_PARSE:
637 case SCST_CMD_STATE_DEV_PARSE:
638 case SCST_CMD_STATE_RDY_TO_XFER:
639 case SCST_CMD_STATE_TGT_PRE_EXEC:
640 case SCST_CMD_STATE_SEND_FOR_EXEC:
641 case SCST_CMD_STATE_LOCAL_EXEC:
642 case SCST_CMD_STATE_REAL_EXEC:
643 case SCST_CMD_STATE_PRE_DEV_DONE:
644 case SCST_CMD_STATE_DEV_DONE:
645 case SCST_CMD_STATE_PRE_XMIT_RESP:
646 case SCST_CMD_STATE_XMIT_RESP:
647 case SCST_CMD_STATE_FINISHED:
649 res = SCST_CMD_STATE_RES_CONT_SAME;
654 PRINT_ERROR("Dev handler %s parse() returned "
655 "invalid cmd state %d (opcode %d)",
656 dev->handler->name, state, cmd->cdb[0]);
658 PRINT_ERROR("Dev handler %s parse() returned "
659 "error %d (opcode %d)", dev->handler->name,
665 if (cmd->resp_data_len == -1) {
666 if (cmd->data_direction == SCST_DATA_READ)
667 cmd->resp_data_len = cmd->bufflen;
669 cmd->resp_data_len = 0;
673 TRACE_EXIT_HRES(res);
677 /* dev_done() will be called as part of the regular cmd's finish */
678 scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
680 #ifndef CONFIG_SCST_USE_EXPECTED_VALUES
683 cmd->state = SCST_CMD_STATE_PRE_DEV_DONE;
684 res = SCST_CMD_STATE_RES_CONT_SAME;
688 static int scst_prepare_space(struct scst_cmd *cmd)
690 int r = 0, res = SCST_CMD_STATE_RES_CONT_SAME;
694 if (cmd->data_direction == SCST_DATA_NONE)
697 if (cmd->tgt_need_alloc_data_buf) {
698 int orig_bufflen = cmd->bufflen;
700 TRACE_MEM("Custom tgt data buf allocation requested (cmd %p)",
703 r = cmd->tgtt->alloc_data_buf(cmd);
707 if (unlikely(cmd->bufflen == 0)) {
708 /* See comment in scst_alloc_space() */
713 cmd->tgt_data_buf_alloced = 1;
715 if (unlikely(orig_bufflen < cmd->bufflen)) {
716 PRINT_ERROR("Target driver allocated data "
717 "buffer (size %d), is less, than "
718 "required (size %d)", orig_bufflen,
722 TRACE_MEM("tgt_data_buf_alloced (cmd %p)", cmd);
728 if (!cmd->tgt_data_buf_alloced && !cmd->dh_data_buf_alloced) {
729 r = scst_alloc_space(cmd);
730 cmd->tgt_sg = cmd->sg;
731 cmd->tgt_sg_cnt = cmd->sg_cnt;
732 } else if (cmd->dh_data_buf_alloced && !cmd->tgt_data_buf_alloced) {
733 TRACE_MEM("dh_data_buf_alloced set (cmd %p)", cmd);
734 cmd->tgt_sg = cmd->sg;
735 cmd->tgt_sg_cnt = cmd->sg_cnt;
737 } else if (cmd->tgt_data_buf_alloced && !cmd->dh_data_buf_alloced) {
738 TRACE_MEM("tgt_data_buf_alloced set (cmd %p)", cmd);
739 cmd->sg = cmd->tgt_sg;
740 cmd->sg_cnt = cmd->tgt_sg_cnt;
743 TRACE_MEM("Both *_data_buf_alloced set (cmd %p, sg %p, "
744 "sg_cnt %d, tgt_sg %p, tgt_sg_cnt %d)", cmd, cmd->sg,
745 cmd->sg_cnt, cmd->tgt_sg, cmd->tgt_sg_cnt);
751 if (scst_cmd_atomic(cmd)) {
752 TRACE_MEM("%s", "Atomic memory allocation failed, "
753 "rescheduling to the thread");
754 res = SCST_CMD_STATE_RES_NEED_THREAD;
761 if (cmd->preprocessing_only) {
762 if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
763 TRACE_MGMT_DBG("ABORTED set, returning ABORTED for "
765 scst_set_cmd_abnormal_done_state(cmd);
766 res = SCST_CMD_STATE_RES_CONT_SAME;
770 res = SCST_CMD_STATE_RES_CONT_NEXT;
771 cmd->state = SCST_CMD_STATE_PREPROCESS_DONE;
773 TRACE_DBG("Calling preprocessing_done(cmd %p)", cmd);
774 cmd->tgtt->preprocessing_done(cmd);
775 TRACE_DBG("%s", "preprocessing_done() returned");
780 switch (cmd->data_direction) {
781 case SCST_DATA_WRITE:
782 cmd->state = SCST_CMD_STATE_RDY_TO_XFER;
786 cmd->state = SCST_CMD_STATE_TGT_PRE_EXEC;
791 TRACE_EXIT_HRES(res);
795 TRACE(TRACE_OUT_OF_MEM, "Unable to allocate or build requested buffer "
796 "(size %d), sending BUSY or QUEUE FULL status", cmd->bufflen);
798 scst_set_cmd_abnormal_done_state(cmd);
799 res = SCST_CMD_STATE_RES_CONT_SAME;
803 scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
804 scst_set_cmd_abnormal_done_state(cmd);
805 res = SCST_CMD_STATE_RES_CONT_SAME;
809 void scst_restart_cmd(struct scst_cmd *cmd, int status,
810 enum scst_exec_context pref_context)
814 TRACE_DBG("Preferred context: %d", pref_context);
815 TRACE_DBG("tag=%llu, status=%#x",
816 (long long unsigned int)scst_cmd_get_tag(cmd),
819 #ifdef CONFIG_SCST_EXTRACHECKS
820 if ((in_irq() || irqs_disabled()) &&
821 ((pref_context == SCST_CONTEXT_DIRECT) ||
822 (pref_context == SCST_CONTEXT_DIRECT_ATOMIC))) {
823 PRINT_ERROR("Wrong context %d in IRQ from target %s, use "
824 "SCST_CONTEXT_THREAD instead\n", pref_context,
826 pref_context = SCST_CONTEXT_THREAD;
831 case SCST_PREPROCESS_STATUS_SUCCESS:
832 switch (cmd->data_direction) {
833 case SCST_DATA_WRITE:
834 cmd->state = SCST_CMD_STATE_RDY_TO_XFER;
837 cmd->state = SCST_CMD_STATE_TGT_PRE_EXEC;
840 if (cmd->set_sn_on_restart_cmd)
841 scst_cmd_set_sn(cmd);
842 /* Small context optimization */
843 if ((pref_context == SCST_CONTEXT_TASKLET) ||
844 (pref_context == SCST_CONTEXT_DIRECT_ATOMIC) ||
845 ((pref_context == SCST_CONTEXT_SAME) &&
846 scst_cmd_atomic(cmd))) {
847 if (cmd->data_direction == SCST_DATA_WRITE) {
848 if (!test_bit(SCST_TGT_DEV_AFTER_RESTART_WR_ATOMIC,
849 &cmd->tgt_dev->tgt_dev_flags))
850 pref_context = SCST_CONTEXT_THREAD;
852 if (!test_bit(SCST_TGT_DEV_AFTER_RESTART_OTH_ATOMIC,
853 &cmd->tgt_dev->tgt_dev_flags))
854 pref_context = SCST_CONTEXT_THREAD;
859 case SCST_PREPROCESS_STATUS_ERROR_SENSE_SET:
860 scst_set_cmd_abnormal_done_state(cmd);
863 case SCST_PREPROCESS_STATUS_ERROR_FATAL:
864 set_bit(SCST_CMD_NO_RESP, &cmd->cmd_flags);
866 case SCST_PREPROCESS_STATUS_ERROR:
867 scst_set_cmd_error(cmd,
868 SCST_LOAD_SENSE(scst_sense_hardw_error));
869 scst_set_cmd_abnormal_done_state(cmd);
873 PRINT_ERROR("%s() received unknown status %x", __func__,
875 scst_set_cmd_abnormal_done_state(cmd);
879 scst_proccess_redirect_cmd(cmd, pref_context, 1);
884 EXPORT_SYMBOL(scst_restart_cmd);
887 static int scst_queue_retry_cmd(struct scst_cmd *cmd, int finished_cmds)
889 struct scst_tgt *tgt = cmd->sess->tgt;
895 spin_lock_irqsave(&tgt->tgt_lock, flags);
898 * Memory barrier is needed here, because we need the exact order
899 * between the read and write between retry_cmds and finished_cmds to
900 * not miss the case when a command finished while we queuing it for
901 * retry after the finished_cmds check.
904 TRACE_RETRY("TGT QUEUE FULL: incrementing retry_cmds %d",
906 if (finished_cmds != atomic_read(&tgt->finished_cmds)) {
907 /* At least one cmd finished, so try again */
909 TRACE_RETRY("Some command(s) finished, direct retry "
910 "(finished_cmds=%d, tgt->finished_cmds=%d, "
911 "retry_cmds=%d)", finished_cmds,
912 atomic_read(&tgt->finished_cmds), tgt->retry_cmds);
917 TRACE_RETRY("Adding cmd %p to retry cmd list", cmd);
918 list_add_tail(&cmd->cmd_list_entry, &tgt->retry_cmd_list);
920 if (!tgt->retry_timer_active) {
921 tgt->retry_timer.expires = jiffies + SCST_TGT_RETRY_TIMEOUT;
922 add_timer(&tgt->retry_timer);
923 tgt->retry_timer_active = 1;
927 spin_unlock_irqrestore(&tgt->tgt_lock, flags);
933 static int scst_rdy_to_xfer(struct scst_cmd *cmd)
939 if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
940 TRACE_MGMT_DBG("ABORTED set, aborting cmd %p", cmd);
944 if ((cmd->tgtt->rdy_to_xfer == NULL) || unlikely(cmd->internal)) {
945 cmd->state = SCST_CMD_STATE_TGT_PRE_EXEC;
946 res = SCST_CMD_STATE_RES_CONT_SAME;
950 if (unlikely(!cmd->tgtt->rdy_to_xfer_atomic && scst_cmd_atomic(cmd))) {
952 * It shouldn't be because of SCST_TGT_DEV_AFTER_*
955 TRACE_DBG("Target driver %s rdy_to_xfer() needs thread "
956 "context, rescheduling", cmd->tgtt->name);
957 res = SCST_CMD_STATE_RES_NEED_THREAD;
962 int finished_cmds = atomic_read(&cmd->sess->tgt->finished_cmds);
964 res = SCST_CMD_STATE_RES_CONT_NEXT;
965 cmd->state = SCST_CMD_STATE_DATA_WAIT;
967 TRACE_DBG("Calling rdy_to_xfer(%p)", cmd);
968 #ifdef CONFIG_SCST_DEBUG_RETRY
969 if (((scst_random() % 100) == 75))
970 rc = SCST_TGT_RES_QUEUE_FULL;
973 rc = cmd->tgtt->rdy_to_xfer(cmd);
974 TRACE_DBG("rdy_to_xfer() returned %d", rc);
976 if (likely(rc == SCST_TGT_RES_SUCCESS))
979 /* Restore the previous state */
980 cmd->state = SCST_CMD_STATE_RDY_TO_XFER;
983 case SCST_TGT_RES_QUEUE_FULL:
984 if (scst_queue_retry_cmd(cmd, finished_cmds) == 0)
989 case SCST_TGT_RES_NEED_THREAD_CTX:
990 TRACE_DBG("Target driver %s "
991 "rdy_to_xfer() requested thread "
992 "context, rescheduling", cmd->tgtt->name);
993 res = SCST_CMD_STATE_RES_NEED_THREAD;
1003 TRACE_EXIT_HRES(res);
1007 if (rc == SCST_TGT_RES_FATAL_ERROR) {
1008 PRINT_ERROR("Target driver %s rdy_to_xfer() returned "
1009 "fatal error", cmd->tgtt->name);
1011 PRINT_ERROR("Target driver %s rdy_to_xfer() returned invalid "
1012 "value %d", cmd->tgtt->name, rc);
1014 scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
1017 scst_set_cmd_abnormal_done_state(cmd);
1018 res = SCST_CMD_STATE_RES_CONT_SAME;
1022 /* No locks, but might be in IRQ */
1023 static void scst_proccess_redirect_cmd(struct scst_cmd *cmd,
1024 enum scst_exec_context context, int check_retries)
1026 unsigned long flags;
1030 TRACE_DBG("Context: %x", context);
1032 if (context == SCST_CONTEXT_SAME)
1033 context = scst_cmd_atomic(cmd) ? SCST_CONTEXT_DIRECT_ATOMIC :
1034 SCST_CONTEXT_DIRECT;
1037 case SCST_CONTEXT_DIRECT_ATOMIC:
1038 scst_process_active_cmd(cmd, true);
1041 case SCST_CONTEXT_DIRECT:
1043 scst_check_retries(cmd->tgt);
1044 scst_process_active_cmd(cmd, false);
1048 PRINT_ERROR("Context %x is unknown, using the thread one",
1051 case SCST_CONTEXT_THREAD:
1053 scst_check_retries(cmd->tgt);
1054 spin_lock_irqsave(&cmd->cmd_lists->cmd_list_lock, flags);
1055 TRACE_DBG("Adding cmd %p to active cmd list", cmd);
1056 if (unlikely(cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE))
1057 list_add(&cmd->cmd_list_entry,
1058 &cmd->cmd_lists->active_cmd_list);
1060 list_add_tail(&cmd->cmd_list_entry,
1061 &cmd->cmd_lists->active_cmd_list);
1062 wake_up(&cmd->cmd_lists->cmd_list_waitQ);
1063 spin_unlock_irqrestore(&cmd->cmd_lists->cmd_list_lock, flags);
1066 case SCST_CONTEXT_TASKLET:
1068 scst_check_retries(cmd->tgt);
1069 scst_schedule_tasklet(cmd);
1077 void scst_rx_data(struct scst_cmd *cmd, int status,
1078 enum scst_exec_context pref_context)
1082 TRACE_DBG("Preferred context: %d", pref_context);
1083 TRACE(TRACE_SCSI, "cmd %p, status %#x", cmd, status);
1085 #ifdef CONFIG_SCST_EXTRACHECKS
1086 if ((in_irq() || irqs_disabled()) &&
1087 ((pref_context == SCST_CONTEXT_DIRECT) ||
1088 (pref_context == SCST_CONTEXT_DIRECT_ATOMIC))) {
1089 PRINT_ERROR("Wrong context %d in IRQ from target %s, use "
1090 "SCST_CONTEXT_THREAD instead\n", pref_context,
1092 pref_context = SCST_CONTEXT_THREAD;
1097 case SCST_RX_STATUS_SUCCESS:
1098 #if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
1101 struct scatterlist *sg = cmd->tgt_sg;
1102 TRACE_RECV_BOT("RX data for cmd %p "
1103 "(sg_cnt %d, sg %p, sg[0].page %p)", cmd,
1104 cmd->tgt_sg_cnt, sg, (void *)sg_page(&sg[0]));
1105 for (i = 0; i < cmd->tgt_sg_cnt; ++i) {
1106 PRINT_BUFF_FLAG(TRACE_RCV_BOT, "RX sg",
1107 sg_virt(&sg[i]), sg[i].length);
1111 cmd->state = SCST_CMD_STATE_TGT_PRE_EXEC;
1112 /* Small context optimization */
1113 if ((pref_context == SCST_CONTEXT_TASKLET) ||
1114 (pref_context == SCST_CONTEXT_DIRECT_ATOMIC) ||
1115 ((pref_context == SCST_CONTEXT_SAME) &&
1116 scst_cmd_atomic(cmd))) {
1117 if (!test_bit(SCST_TGT_DEV_AFTER_RX_DATA_ATOMIC,
1118 &cmd->tgt_dev->tgt_dev_flags))
1119 pref_context = SCST_CONTEXT_THREAD;
1123 case SCST_RX_STATUS_ERROR_SENSE_SET:
1124 scst_set_cmd_abnormal_done_state(cmd);
1127 case SCST_RX_STATUS_ERROR_FATAL:
1128 set_bit(SCST_CMD_NO_RESP, &cmd->cmd_flags);
1130 case SCST_RX_STATUS_ERROR:
1131 scst_set_cmd_error(cmd,
1132 SCST_LOAD_SENSE(scst_sense_hardw_error));
1133 scst_set_cmd_abnormal_done_state(cmd);
1137 PRINT_ERROR("scst_rx_data() received unknown status %x",
1139 scst_set_cmd_abnormal_done_state(cmd);
1143 scst_proccess_redirect_cmd(cmd, pref_context, 1);
1148 EXPORT_SYMBOL(scst_rx_data);
1150 static int scst_tgt_pre_exec(struct scst_cmd *cmd)
1152 int res = SCST_CMD_STATE_RES_CONT_SAME, rc;
1156 cmd->state = SCST_CMD_STATE_SEND_FOR_EXEC;
1158 if ((cmd->tgtt->pre_exec == NULL) || unlikely(cmd->internal))
1161 TRACE_DBG("Calling pre_exec(%p)", cmd);
1162 rc = cmd->tgtt->pre_exec(cmd);
1163 TRACE_DBG("pre_exec() returned %d", rc);
1165 if (unlikely(rc != SCST_PREPROCESS_STATUS_SUCCESS)) {
1167 case SCST_PREPROCESS_STATUS_ERROR_SENSE_SET:
1168 scst_set_cmd_abnormal_done_state(cmd);
1170 case SCST_PREPROCESS_STATUS_ERROR_FATAL:
1171 set_bit(SCST_CMD_NO_RESP, &cmd->cmd_flags);
1173 case SCST_PREPROCESS_STATUS_ERROR:
1174 scst_set_cmd_error(cmd,
1175 SCST_LOAD_SENSE(scst_sense_hardw_error));
1176 scst_set_cmd_abnormal_done_state(cmd);
1178 case SCST_PREPROCESS_STATUS_NEED_THREAD:
1179 TRACE_DBG("Target driver's %s pre_exec() requested "
1180 "thread context, rescheduling",
1182 res = SCST_CMD_STATE_RES_NEED_THREAD;
1183 cmd->state = SCST_CMD_STATE_TGT_PRE_EXEC;
1192 TRACE_EXIT_RES(res);
1196 static void scst_do_cmd_done(struct scst_cmd *cmd, int result,
1197 const uint8_t *rq_sense, int rq_sense_len, int resid)
1201 #ifdef CONFIG_SCST_MEASURE_LATENCY
1204 getnstimeofday(&ts);
1205 cmd->post_exec_start = scst_sec_to_nsec(ts.tv_sec) + ts.tv_nsec;
1206 TRACE_DBG("cmd %p (sess %p): post_exec_start %lld (tv_sec %ld, "
1207 "tv_nsec %ld)", cmd, cmd->sess, cmd->post_exec_start,
1208 ts.tv_sec, ts.tv_nsec);
1212 cmd->status = result & 0xff;
1213 cmd->msg_status = msg_byte(result);
1214 cmd->host_status = host_byte(result);
1215 cmd->driver_status = driver_byte(result);
1216 if (unlikely(resid != 0)) {
1217 #ifdef CONFIG_SCST_EXTRACHECKS
1218 if ((resid < 0) || (resid > cmd->resp_data_len)) {
1219 PRINT_ERROR("Wrong resid %d (cmd->resp_data_len=%d, "
1220 "op %x)", resid, cmd->resp_data_len,
1224 scst_set_resp_data_len(cmd, cmd->resp_data_len - resid);
1227 if (unlikely(cmd->status == SAM_STAT_CHECK_CONDITION)) {
1228 /* We might have double reset UA here */
1229 cmd->dbl_ua_orig_resp_data_len = cmd->resp_data_len;
1230 cmd->dbl_ua_orig_data_direction = cmd->data_direction;
1232 scst_alloc_set_sense(cmd, 1, rq_sense, rq_sense_len);
1235 TRACE(TRACE_SCSI, "cmd %p, result=%x, cmd->status=%x, resid=%d, "
1236 "cmd->msg_status=%x, cmd->host_status=%x, "
1237 "cmd->driver_status=%x (cmd %p)", cmd, result, cmd->status, resid,
1238 cmd->msg_status, cmd->host_status, cmd->driver_status, cmd);
1246 /* For small context optimization */
1247 static inline enum scst_exec_context scst_optimize_post_exec_context(
1248 struct scst_cmd *cmd, enum scst_exec_context context)
1250 if (((context == SCST_CONTEXT_SAME) && scst_cmd_atomic(cmd)) ||
1251 (context == SCST_CONTEXT_TASKLET) ||
1252 (context == SCST_CONTEXT_DIRECT_ATOMIC)) {
1253 if (!test_bit(SCST_TGT_DEV_AFTER_EXEC_ATOMIC,
1254 &cmd->tgt_dev->tgt_dev_flags))
1255 context = SCST_CONTEXT_THREAD;
1260 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
1261 static inline struct scst_cmd *scst_get_cmd(struct scsi_cmnd *scsi_cmd,
1262 struct scsi_request **req)
1264 struct scst_cmd *cmd = NULL;
1266 if (scsi_cmd && (*req = scsi_cmd->sc_request))
1267 cmd = (struct scst_cmd *)(*req)->upper_private_data;
1270 PRINT_ERROR("%s", "Request with NULL cmd");
1272 scsi_release_request(*req);
1278 static void scst_cmd_done(struct scsi_cmnd *scsi_cmd)
1280 struct scsi_request *req = NULL;
1281 struct scst_cmd *cmd;
1285 cmd = scst_get_cmd(scsi_cmd, &req);
1289 scst_do_cmd_done(cmd, req->sr_result, req->sr_sense_buffer,
1290 sizeof(req->sr_sense_buffer), scsi_cmd->resid);
1292 /* Clear out request structure */
1294 req->sr_sglist_len = 0;
1295 req->sr_bufflen = 0;
1296 req->sr_buffer = NULL;
1297 req->sr_underflow = 0;
1298 req->sr_request->rq_disk = NULL; /* disown request blk */
1300 scst_release_request(cmd);
1302 cmd->state = SCST_CMD_STATE_PRE_DEV_DONE;
1304 scst_proccess_redirect_cmd(cmd,
1305 scst_optimize_post_exec_context(cmd, scst_estimate_context()),
1312 #else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18) */
1313 static void scst_cmd_done(void *data, char *sense, int result, int resid)
1315 struct scst_cmd *cmd;
1319 cmd = (struct scst_cmd *)data;
1323 scst_do_cmd_done(cmd, result, sense, SCST_SENSE_BUFFERSIZE, resid);
1325 cmd->state = SCST_CMD_STATE_PRE_DEV_DONE;
1327 scst_proccess_redirect_cmd(cmd,
1328 scst_optimize_post_exec_context(cmd, scst_estimate_context()),
1335 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18) */
1337 static void scst_cmd_done_local(struct scst_cmd *cmd, int next_state,
1338 enum scst_exec_context pref_context)
1342 #ifdef CONFIG_SCST_MEASURE_LATENCY
1345 getnstimeofday(&ts);
1346 cmd->post_exec_start = scst_sec_to_nsec(ts.tv_sec) + ts.tv_nsec;
1347 TRACE_DBG("cmd %p (sess %p): post_exec_start %lld (tv_sec %ld, "
1348 "tv_nsec %ld)", cmd, cmd->sess, cmd->post_exec_start,
1349 ts.tv_sec, ts.tv_nsec);
1353 if (next_state == SCST_CMD_STATE_DEFAULT)
1354 next_state = SCST_CMD_STATE_PRE_DEV_DONE;
1356 #if defined(CONFIG_SCST_DEBUG)
1357 if (next_state == SCST_CMD_STATE_PRE_DEV_DONE) {
1360 struct scatterlist *sg = cmd->sg;
1361 TRACE_RECV_TOP("Exec'd %d S/G(s) at %p sg[0].page at "
1362 "%p", cmd->sg_cnt, sg, (void *)sg_page(&sg[0]));
1363 for (i = 0; i < cmd->sg_cnt; ++i) {
1364 TRACE_BUFF_FLAG(TRACE_RCV_TOP,
1365 "Exec'd sg", sg_virt(&sg[i]),
1372 cmd->state = next_state;
1374 #ifdef CONFIG_SCST_EXTRACHECKS
1375 if ((next_state != SCST_CMD_STATE_PRE_DEV_DONE) &&
1376 (next_state != SCST_CMD_STATE_PRE_XMIT_RESP) &&
1377 (next_state != SCST_CMD_STATE_FINISHED)) {
1378 PRINT_ERROR("%s() received invalid cmd state %d (opcode %d)",
1379 __func__, next_state, cmd->cdb[0]);
1380 scst_set_cmd_error(cmd,
1381 SCST_LOAD_SENSE(scst_sense_hardw_error));
1382 scst_set_cmd_abnormal_done_state(cmd);
1385 pref_context = scst_optimize_post_exec_context(cmd, pref_context);
1386 scst_proccess_redirect_cmd(cmd, pref_context, 0);
1392 static int scst_report_luns_local(struct scst_cmd *cmd)
1398 struct scst_tgt_dev *tgt_dev = NULL;
1400 int offs, overflow = 0;
1404 rc = scst_check_local_events(cmd);
1405 if (unlikely(rc != 0))
1409 cmd->msg_status = 0;
1410 cmd->host_status = DID_OK;
1411 cmd->driver_status = 0;
1413 if ((cmd->cdb[2] != 0) && (cmd->cdb[2] != 2)) {
1414 PRINT_ERROR("Unsupported SELECT REPORT value %x in REPORT "
1415 "LUNS command", cmd->cdb[2]);
1419 buffer_size = scst_get_buf_first(cmd, &buffer);
1420 if (unlikely(buffer_size == 0))
1422 else if (unlikely(buffer_size < 0))
1425 if (buffer_size < 16)
1428 memset(buffer, 0, buffer_size);
1431 /* sess->sess_tgt_dev_list_hash is protected by suspended activity */
1432 for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
1433 struct list_head *sess_tgt_dev_list_head =
1434 &cmd->sess->sess_tgt_dev_list_hash[i];
1435 list_for_each_entry(tgt_dev, sess_tgt_dev_list_head,
1436 sess_tgt_dev_list_entry) {
1438 if (offs >= buffer_size) {
1439 scst_put_buf(cmd, buffer);
1440 buffer_size = scst_get_buf_next(cmd,
1442 if (buffer_size > 0) {
1443 memset(buffer, 0, buffer_size);
1450 if ((buffer_size - offs) < 8) {
1451 PRINT_ERROR("Buffer allocated for "
1452 "REPORT LUNS command doesn't "
1453 "allow to fit 8 byte entry "
1456 goto out_put_hw_err;
1458 buffer[offs] = (tgt_dev->lun >> 8) & 0xff;
1459 buffer[offs+1] = tgt_dev->lun & 0xff;
1467 scst_put_buf(cmd, buffer);
1469 /* Set the response header */
1470 buffer_size = scst_get_buf_first(cmd, &buffer);
1471 if (unlikely(buffer_size == 0))
1473 else if (unlikely(buffer_size < 0))
1477 buffer[0] = (dev_cnt >> 24) & 0xff;
1478 buffer[1] = (dev_cnt >> 16) & 0xff;
1479 buffer[2] = (dev_cnt >> 8) & 0xff;
1480 buffer[3] = dev_cnt & 0xff;
1482 scst_put_buf(cmd, buffer);
1485 if (dev_cnt < cmd->resp_data_len)
1486 scst_set_resp_data_len(cmd, dev_cnt);
1492 /* Report the result */
1493 cmd->scst_cmd_done(cmd, SCST_CMD_STATE_DEFAULT, SCST_CONTEXT_SAME);
1496 return SCST_EXEC_COMPLETED;
1499 scst_put_buf(cmd, buffer);
1502 scst_set_cmd_error(cmd,
1503 SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
1507 scst_put_buf(cmd, buffer);
1508 scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
1512 static int scst_pre_select(struct scst_cmd *cmd)
1514 int res = SCST_EXEC_NOT_COMPLETED;
1518 if (scst_cmd_atomic(cmd)) {
1519 res = SCST_EXEC_NEED_THREAD;
1523 scst_block_dev_cmd(cmd, 1);
1525 /* Check for local events will be done when cmd will be executed */
1528 TRACE_EXIT_RES(res);
1532 static int scst_reserve_local(struct scst_cmd *cmd)
1534 int res = SCST_EXEC_NOT_COMPLETED, rc;
1535 struct scst_device *dev;
1536 struct scst_tgt_dev *tgt_dev_tmp;
1540 if (scst_cmd_atomic(cmd)) {
1541 res = SCST_EXEC_NEED_THREAD;
1545 if ((cmd->cdb[0] == RESERVE_10) && (cmd->cdb[2] & SCST_RES_3RDPTY)) {
1546 PRINT_ERROR("RESERVE_10: 3rdPty RESERVE not implemented "
1547 "(lun=%lld)", (long long unsigned int)cmd->lun);
1548 scst_set_cmd_error(cmd,
1549 SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
1555 if (dev->tst == SCST_CONTR_MODE_ONE_TASK_SET)
1556 scst_block_dev_cmd(cmd, 1);
1558 rc = scst_check_local_events(cmd);
1559 if (unlikely(rc != 0))
1562 spin_lock_bh(&dev->dev_lock);
1564 if (test_bit(SCST_TGT_DEV_RESERVED, &cmd->tgt_dev->tgt_dev_flags)) {
1565 spin_unlock_bh(&dev->dev_lock);
1566 scst_set_cmd_error_status(cmd, SAM_STAT_RESERVATION_CONFLICT);
1570 list_for_each_entry(tgt_dev_tmp, &dev->dev_tgt_dev_list,
1571 dev_tgt_dev_list_entry) {
1572 if (cmd->tgt_dev != tgt_dev_tmp)
1573 set_bit(SCST_TGT_DEV_RESERVED,
1574 &tgt_dev_tmp->tgt_dev_flags);
1576 dev->dev_reserved = 1;
1578 spin_unlock_bh(&dev->dev_lock);
1581 TRACE_EXIT_RES(res);
1585 /* Report the result */
1586 cmd->scst_cmd_done(cmd, SCST_CMD_STATE_DEFAULT, SCST_CONTEXT_SAME);
1587 res = SCST_EXEC_COMPLETED;
1591 static int scst_release_local(struct scst_cmd *cmd)
1593 int res = SCST_EXEC_NOT_COMPLETED, rc;
1594 struct scst_tgt_dev *tgt_dev_tmp;
1595 struct scst_device *dev;
1599 if (scst_cmd_atomic(cmd)) {
1600 res = SCST_EXEC_NEED_THREAD;
1606 if (dev->tst == SCST_CONTR_MODE_ONE_TASK_SET)
1607 scst_block_dev_cmd(cmd, 1);
1609 rc = scst_check_local_events(cmd);
1610 if (unlikely(rc != 0))
1613 spin_lock_bh(&dev->dev_lock);
1616 * The device could be RELEASED behind us, if RESERVING session
1617 * is closed (see scst_free_tgt_dev()), but this actually doesn't
1618 * matter, so use lock and no retest for DEV_RESERVED bits again
1620 if (test_bit(SCST_TGT_DEV_RESERVED, &cmd->tgt_dev->tgt_dev_flags)) {
1621 res = SCST_EXEC_COMPLETED;
1623 cmd->msg_status = 0;
1624 cmd->host_status = DID_OK;
1625 cmd->driver_status = 0;
1628 list_for_each_entry(tgt_dev_tmp,
1629 &dev->dev_tgt_dev_list,
1630 dev_tgt_dev_list_entry) {
1631 clear_bit(SCST_TGT_DEV_RESERVED,
1632 &tgt_dev_tmp->tgt_dev_flags);
1634 dev->dev_reserved = 0;
1637 spin_unlock_bh(&dev->dev_lock);
1639 if (res == SCST_EXEC_COMPLETED)
1643 TRACE_EXIT_RES(res);
1647 res = SCST_EXEC_COMPLETED;
1648 /* Report the result */
1649 cmd->scst_cmd_done(cmd, SCST_CMD_STATE_DEFAULT, SCST_CONTEXT_SAME);
1653 /* No locks, no IRQ or IRQ-safe context allowed */
1654 int scst_check_local_events(struct scst_cmd *cmd)
1657 struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
1658 struct scst_device *dev = cmd->dev;
1663 * There's no race here, because we need to trace commands sent
1664 * *after* dev_double_ua_possible flag was set.
1666 if (unlikely(dev->dev_double_ua_possible))
1667 cmd->double_ua_possible = 1;
1669 if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
1670 TRACE_MGMT_DBG("ABORTED set, aborting cmd %p", cmd);
1671 goto out_uncomplete;
1674 /* Reserve check before Unit Attention */
1675 if (unlikely(test_bit(SCST_TGT_DEV_RESERVED,
1676 &tgt_dev->tgt_dev_flags))) {
1677 if (cmd->cdb[0] != INQUIRY &&
1678 cmd->cdb[0] != REPORT_LUNS &&
1679 cmd->cdb[0] != RELEASE &&
1680 cmd->cdb[0] != RELEASE_10 &&
1681 cmd->cdb[0] != REPORT_DEVICE_IDENTIFIER &&
1682 (cmd->cdb[0] != ALLOW_MEDIUM_REMOVAL ||
1683 (cmd->cdb[4] & 3)) &&
1684 cmd->cdb[0] != LOG_SENSE &&
1685 cmd->cdb[0] != REQUEST_SENSE) {
1686 scst_set_cmd_error_status(cmd,
1687 SAM_STAT_RESERVATION_CONFLICT);
1692 /* If we had internal bus reset, set the command error unit attention */
1693 if ((dev->scsi_dev != NULL) &&
1694 unlikely(dev->scsi_dev->was_reset)) {
1695 if (scst_is_ua_command(cmd)) {
1698 * Prevent more than 1 cmd to be triggered by
1701 spin_lock_bh(&dev->dev_lock);
1702 if (dev->scsi_dev->was_reset) {
1703 TRACE(TRACE_MGMT, "was_reset is %d", 1);
1704 scst_set_cmd_error(cmd,
1705 SCST_LOAD_SENSE(scst_sense_reset_UA));
1707 * It looks like it is safe to clear was_reset
1710 dev->scsi_dev->was_reset = 0;
1713 spin_unlock_bh(&dev->dev_lock);
1720 if (unlikely(test_bit(SCST_TGT_DEV_UA_PENDING,
1721 &cmd->tgt_dev->tgt_dev_flags))) {
1722 if (scst_is_ua_command(cmd)) {
1723 rc = scst_set_pending_UA(cmd);
1732 TRACE_EXIT_RES(res);
1737 sBUG_ON(!cmd->completed);
1744 EXPORT_SYMBOL(scst_check_local_events);
1747 void scst_inc_expected_sn(struct scst_tgt_dev *tgt_dev, atomic_t *slot)
1752 /* Optimized for lockless fast path */
1754 TRACE_SN("Slot %zd, *cur_sn_slot %d", slot - tgt_dev->sn_slots,
1757 if (!atomic_dec_and_test(slot))
1760 TRACE_SN("Slot is 0 (num_free_sn_slots=%d)",
1761 tgt_dev->num_free_sn_slots);
1762 if (tgt_dev->num_free_sn_slots < (int)ARRAY_SIZE(tgt_dev->sn_slots)-1) {
1763 spin_lock_irq(&tgt_dev->sn_lock);
1764 if (likely(tgt_dev->num_free_sn_slots < (int)ARRAY_SIZE(tgt_dev->sn_slots)-1)) {
1765 if (tgt_dev->num_free_sn_slots < 0)
1766 tgt_dev->cur_sn_slot = slot;
1768 * To be in-sync with SIMPLE case in scst_cmd_set_sn()
1771 tgt_dev->num_free_sn_slots++;
1772 TRACE_SN("Incremented num_free_sn_slots (%d)",
1773 tgt_dev->num_free_sn_slots);
1776 spin_unlock_irq(&tgt_dev->sn_lock);
1781 * No protection of expected_sn is needed, because only one thread
1782 * at time can be here (serialized by sn). Also it is supposed that
1783 * there could not be half-incremented halves.
1785 tgt_dev->expected_sn++;
1787 * Write must be before def_cmd_count read to be in sync. with
1788 * scst_post_exec_sn(). See comment in scst_send_for_exec().
1791 TRACE_SN("Next expected_sn: %ld", tgt_dev->expected_sn);
1798 static struct scst_cmd *scst_post_exec_sn(struct scst_cmd *cmd,
1801 /* For HQ commands SN is not set */
1802 bool inc_expected_sn = !cmd->inc_expected_sn_on_done &&
1803 cmd->sn_set && !cmd->retry;
1804 struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
1805 struct scst_cmd *res;
1809 if (inc_expected_sn)
1810 scst_inc_expected_sn(tgt_dev, cmd->sn_slot);
1813 scst_make_deferred_commands_active(tgt_dev);
1816 res = scst_check_deferred_commands(tgt_dev);
1818 TRACE_EXIT_HRES(res);
1822 /* cmd must be additionally referenced to not die inside */
1823 static int scst_do_real_exec(struct scst_cmd *cmd)
1825 int res = SCST_EXEC_NOT_COMPLETED;
1826 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 18)
1829 struct scst_device *dev = cmd->dev;
1830 struct scst_dev_type *handler = dev->handler;
1834 cmd->state = SCST_CMD_STATE_REAL_EXECUTING;
1836 if (handler->exec) {
1837 if (unlikely(!dev->handler->exec_atomic &&
1838 scst_cmd_atomic(cmd))) {
1840 * It shouldn't be because of SCST_TGT_DEV_AFTER_*
1843 TRACE_DBG("Dev handler %s exec() needs thread "
1844 "context, rescheduling", dev->handler->name);
1845 res = SCST_EXEC_NEED_THREAD;
1849 TRACE_DBG("Calling dev handler %s exec(%p)",
1850 handler->name, cmd);
1851 TRACE_BUFF_FLAG(TRACE_SND_TOP, "Execing: ", cmd->cdb,
1853 res = handler->exec(cmd);
1854 TRACE_DBG("Dev handler %s exec() returned %d",
1855 handler->name, res);
1857 if (res == SCST_EXEC_COMPLETED)
1859 else if (res == SCST_EXEC_NEED_THREAD)
1862 sBUG_ON(res != SCST_EXEC_NOT_COMPLETED);
1865 TRACE_DBG("Sending cmd %p to SCSI mid-level", cmd);
1867 if (unlikely(dev->scsi_dev == NULL)) {
1868 PRINT_ERROR("Command for virtual device must be "
1869 "processed by device handler (lun %lld)!",
1870 (long long unsigned int)cmd->lun);
1874 res = scst_check_local_events(cmd);
1875 if (unlikely(res != 0))
1878 #ifndef CONFIG_SCST_ALLOW_PASSTHROUGH_IO_SUBMIT_IN_SIRQ
1879 if (unlikely(scst_cmd_atomic(cmd))) {
1880 TRACE_DBG("Pass-through exec() can not be called in atomic "
1881 "context, rescheduling to the thread (handler %s)",
1883 res = SCST_EXEC_NEED_THREAD;
1888 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
1889 if (unlikely(scst_alloc_request(cmd) != 0)) {
1890 if (scst_cmd_atomic(cmd)) {
1891 res = SCST_EXEC_NEED_THREAD;
1894 PRINT_INFO("%s", "Unable to allocate request, "
1895 "sending BUSY status");
1900 scst_do_req(cmd->scsi_req, (void *)cmd->cdb,
1901 (void *)cmd->scsi_req->sr_buffer,
1902 cmd->scsi_req->sr_bufflen, scst_cmd_done, cmd->timeout,
1905 rc = scst_exec_req(dev->scsi_dev, cmd->cdb, cmd->cdb_len,
1906 cmd->data_direction, cmd->sg, cmd->bufflen, cmd->sg_cnt,
1907 cmd->timeout, cmd->retries, cmd, scst_cmd_done,
1908 scst_cmd_atomic(cmd) ? GFP_ATOMIC : GFP_KERNEL);
1909 if (unlikely(rc != 0)) {
1910 if (scst_cmd_atomic(cmd)) {
1911 res = SCST_EXEC_NEED_THREAD;
1914 PRINT_ERROR("scst_exec_req() failed: %d", res);
1921 res = SCST_EXEC_COMPLETED;
1928 /* Restore the state */
1929 cmd->state = SCST_CMD_STATE_REAL_EXEC;
1933 scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
1936 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
1943 res = SCST_EXEC_COMPLETED;
1944 /* Report the result */
1945 cmd->scst_cmd_done(cmd, SCST_CMD_STATE_DEFAULT, SCST_CONTEXT_SAME);
1949 static inline int scst_real_exec(struct scst_cmd *cmd)
1955 BUILD_BUG_ON(SCST_CMD_STATE_RES_CONT_SAME != SCST_EXEC_NOT_COMPLETED);
1956 BUILD_BUG_ON(SCST_CMD_STATE_RES_CONT_NEXT != SCST_EXEC_COMPLETED);
1957 BUILD_BUG_ON(SCST_CMD_STATE_RES_NEED_THREAD != SCST_EXEC_NEED_THREAD);
1959 __scst_cmd_get(cmd);
1961 res = scst_do_real_exec(cmd);
1963 if (likely(res == SCST_EXEC_COMPLETED)) {
1964 scst_post_exec_sn(cmd, true);
1965 if (cmd->dev->scsi_dev != NULL)
1966 generic_unplug_device(
1967 cmd->dev->scsi_dev->request_queue);
1969 sBUG_ON(res != SCST_EXEC_NEED_THREAD);
1971 __scst_cmd_put(cmd);
1973 /* SCST_EXEC_* match SCST_CMD_STATE_RES_* */
1975 TRACE_EXIT_RES(res);
1979 static int scst_do_local_exec(struct scst_cmd *cmd)
1982 struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
1986 /* Check READ_ONLY device status */
1987 if (((tgt_dev->acg_dev->rd_only_flag) || cmd->dev->swp) &&
1988 (cmd->cdb[0] == WRITE_6 || /* ToDo: full list of the modify cmds */
1989 cmd->cdb[0] == WRITE_10 ||
1990 cmd->cdb[0] == WRITE_12 ||
1991 cmd->cdb[0] == WRITE_16 ||
1992 cmd->cdb[0] == WRITE_VERIFY ||
1993 cmd->cdb[0] == WRITE_VERIFY_12 ||
1994 cmd->cdb[0] == WRITE_VERIFY_16 ||
1995 (cmd->dev->handler->type == TYPE_TAPE &&
1996 (cmd->cdb[0] == ERASE || cmd->cdb[0] == WRITE_FILEMARKS)))) {
1997 scst_set_cmd_error(cmd,
1998 SCST_LOAD_SENSE(scst_sense_data_protect));
2003 * Adding new commands here don't forget to update
2004 * scst_is_cmd_local() in scst.h, if necessary
2007 switch (cmd->cdb[0]) {
2009 case MODE_SELECT_10:
2011 res = scst_pre_select(cmd);
2015 res = scst_reserve_local(cmd);
2019 res = scst_release_local(cmd);
2022 res = scst_report_luns_local(cmd);
2025 res = SCST_EXEC_NOT_COMPLETED;
2030 TRACE_EXIT_RES(res);
2034 /* Report the result */
2035 cmd->scst_cmd_done(cmd, SCST_CMD_STATE_DEFAULT, SCST_CONTEXT_SAME);
2036 res = SCST_EXEC_COMPLETED;
2040 static int scst_local_exec(struct scst_cmd *cmd)
2046 BUILD_BUG_ON(SCST_CMD_STATE_RES_CONT_SAME != SCST_EXEC_NOT_COMPLETED);
2047 BUILD_BUG_ON(SCST_CMD_STATE_RES_CONT_NEXT != SCST_EXEC_COMPLETED);
2048 BUILD_BUG_ON(SCST_CMD_STATE_RES_NEED_THREAD != SCST_EXEC_NEED_THREAD);
2050 __scst_cmd_get(cmd);
2052 res = scst_do_local_exec(cmd);
2053 if (likely(res == SCST_EXEC_NOT_COMPLETED))
2054 cmd->state = SCST_CMD_STATE_REAL_EXEC;
2055 else if (res == SCST_EXEC_COMPLETED)
2056 scst_post_exec_sn(cmd, true);
2058 sBUG_ON(res != SCST_EXEC_NEED_THREAD);
2060 __scst_cmd_put(cmd);
2062 /* SCST_EXEC_* match SCST_CMD_STATE_RES_* */
2063 TRACE_EXIT_RES(res);
2067 static int scst_exec(struct scst_cmd **active_cmd)
2069 struct scst_cmd *cmd = *active_cmd;
2070 struct scst_cmd *ref_cmd;
2071 struct scst_device *dev = cmd->dev;
2072 int res = SCST_CMD_STATE_RES_CONT_NEXT, count;
2076 if (unlikely(scst_inc_on_dev_cmd(cmd) != 0))
2079 /* To protect tgt_dev */
2081 __scst_cmd_get(ref_cmd);
2087 cmd->sent_for_exec = 1;
2088 cmd->scst_cmd_done = scst_cmd_done_local;
2089 cmd->state = SCST_CMD_STATE_LOCAL_EXEC;
2091 if (cmd->tgt_data_buf_alloced && cmd->dh_data_buf_alloced &&
2092 (cmd->data_direction == SCST_DATA_WRITE))
2093 scst_copy_sg(cmd, SCST_SG_COPY_FROM_TARGET);
2095 rc = scst_do_local_exec(cmd);
2096 if (likely(rc == SCST_EXEC_NOT_COMPLETED))
2097 /* Nothing to do */;
2098 else if (rc == SCST_EXEC_NEED_THREAD) {
2099 TRACE_DBG("%s", "scst_do_local_exec() requested "
2100 "thread context, rescheduling");
2101 scst_dec_on_dev_cmd(cmd);
2102 res = SCST_CMD_STATE_RES_NEED_THREAD;
2105 sBUG_ON(rc != SCST_EXEC_COMPLETED);
2109 cmd->state = SCST_CMD_STATE_REAL_EXEC;
2111 rc = scst_do_real_exec(cmd);
2112 if (likely(rc == SCST_EXEC_COMPLETED))
2113 /* Nothing to do */;
2114 else if (rc == SCST_EXEC_NEED_THREAD) {
2115 TRACE_DBG("scst_real_exec() requested thread "
2116 "context, rescheduling (cmd %p)", cmd);
2117 scst_dec_on_dev_cmd(cmd);
2118 res = SCST_CMD_STATE_RES_NEED_THREAD;
2126 cmd = scst_post_exec_sn(cmd, false);
2130 if (unlikely(scst_inc_on_dev_cmd(cmd) != 0))
2133 __scst_cmd_put(ref_cmd);
2135 __scst_cmd_get(ref_cmd);
2143 if (dev->scsi_dev != NULL)
2144 generic_unplug_device(dev->scsi_dev->request_queue);
2147 __scst_cmd_put(ref_cmd);
2148 /* !! At this point sess, dev and tgt_dev can be already freed !! */
2151 TRACE_EXIT_RES(res);
2155 static int scst_send_for_exec(struct scst_cmd **active_cmd)
2158 struct scst_cmd *cmd = *active_cmd;
2159 struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
2160 typeof(tgt_dev->expected_sn) expected_sn;
2164 #ifdef CONFIG_SCST_MEASURE_LATENCY
2165 if (cmd->pre_exec_finish == 0) {
2167 getnstimeofday(&ts);
2168 cmd->pre_exec_finish = scst_sec_to_nsec(ts.tv_sec) + ts.tv_nsec;
2169 TRACE_DBG("cmd %p (sess %p): pre_exec_finish %lld (tv_sec %ld, "
2170 "tv_nsec %ld)", cmd, cmd->sess, cmd->pre_exec_finish,
2171 ts.tv_sec, ts.tv_nsec);
2175 if (unlikely(cmd->internal))
2178 if (unlikely(cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE))
2181 sBUG_ON(!cmd->sn_set);
2183 expected_sn = tgt_dev->expected_sn;
2184 /* Optimized for lockless fast path */
2185 if ((cmd->sn != expected_sn) || (tgt_dev->hq_cmd_count > 0)) {
2186 spin_lock_irq(&tgt_dev->sn_lock);
2188 tgt_dev->def_cmd_count++;
2190 * Memory barrier is needed here to implement lockless fast
2191 * path. We need the exact order of read and write between
2192 * def_cmd_count and expected_sn. Otherwise, we can miss case,
2193 * when expected_sn was changed to be equal to cmd->sn while
2194 * we are queuing cmd the deferred list after the expected_sn
2195 * below. It will lead to a forever stuck command. But with
2196 * the barrier in such case __scst_check_deferred_commands()
2197 * will be called and it will take sn_lock, so we will be
2202 expected_sn = tgt_dev->expected_sn;
2203 if ((cmd->sn != expected_sn) || (tgt_dev->hq_cmd_count > 0)) {
2204 if (unlikely(test_bit(SCST_CMD_ABORTED,
2205 &cmd->cmd_flags))) {
2206 /* Necessary to allow aborting out of sn cmds */
2207 TRACE_MGMT_DBG("Aborting out of sn cmd %p "
2208 "(tag %llu, sn %lu)", cmd,
2209 (long long unsigned)cmd->tag, cmd->sn);
2210 tgt_dev->def_cmd_count--;
2211 scst_set_cmd_abnormal_done_state(cmd);
2212 res = SCST_CMD_STATE_RES_CONT_SAME;
2214 TRACE_SN("Deferring cmd %p (sn=%ld, set %d, "
2215 "expected_sn=%ld)", cmd, cmd->sn,
2216 cmd->sn_set, expected_sn);
2217 list_add_tail(&cmd->sn_cmd_list_entry,
2218 &tgt_dev->deferred_cmd_list);
2219 res = SCST_CMD_STATE_RES_CONT_NEXT;
2221 spin_unlock_irq(&tgt_dev->sn_lock);
2224 TRACE_SN("Somebody incremented expected_sn %ld, "
2225 "continuing", expected_sn);
2226 tgt_dev->def_cmd_count--;
2227 spin_unlock_irq(&tgt_dev->sn_lock);
2232 res = scst_exec(active_cmd);
2235 TRACE_EXIT_HRES(res);
2239 /* No locks supposed to be held */
2240 static int scst_check_sense(struct scst_cmd *cmd)
2243 struct scst_device *dev = cmd->dev;
2247 if (unlikely(cmd->ua_ignore))
2250 /* If we had internal bus reset behind us, set the command error UA */
2251 if ((dev->scsi_dev != NULL) &&
2252 unlikely(cmd->host_status == DID_RESET) &&
2253 scst_is_ua_command(cmd)) {
2254 TRACE(TRACE_MGMT, "DID_RESET: was_reset=%d host_status=%x",
2255 dev->scsi_dev->was_reset, cmd->host_status);
2256 scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_reset_UA));
2257 /* It looks like it is safe to clear was_reset here */
2258 dev->scsi_dev->was_reset = 0;
2261 if (unlikely(cmd->status == SAM_STAT_CHECK_CONDITION) &&
2262 SCST_SENSE_VALID(cmd->sense)) {
2263 PRINT_BUFF_FLAG(TRACE_SCSI, "Sense", cmd->sense,
2264 SCST_SENSE_BUFFERSIZE);
2266 /* Check Unit Attention Sense Key */
2267 if (scst_is_ua_sense(cmd->sense)) {
2268 if (cmd->sense[12] == SCST_SENSE_ASC_UA_RESET) {
2269 if (cmd->double_ua_possible) {
2270 TRACE(TRACE_MGMT_MINOR, "Double UA "
2271 "detected for device %p", dev);
2272 TRACE(TRACE_MGMT_MINOR, "Retrying cmd"
2273 " %p (tag %llu)", cmd,
2274 (long long unsigned)cmd->tag);
2277 cmd->msg_status = 0;
2278 cmd->host_status = DID_OK;
2279 cmd->driver_status = 0;
2281 mempool_free(cmd->sense,
2282 scst_sense_mempool);
2285 scst_check_restore_sg_buff(cmd);
2287 sBUG_ON(cmd->dbl_ua_orig_resp_data_len < 0);
2288 cmd->data_direction =
2289 cmd->dbl_ua_orig_data_direction;
2290 cmd->resp_data_len =
2291 cmd->dbl_ua_orig_resp_data_len;
2293 cmd->state = SCST_CMD_STATE_REAL_EXEC;
2299 scst_dev_check_set_UA(dev, cmd, cmd->sense,
2300 SCST_SENSE_BUFFERSIZE);
2304 if (unlikely(cmd->double_ua_possible)) {
2305 if (scst_is_ua_command(cmd)) {
2306 TRACE_MGMT_DBG("Clearing dbl_ua_possible flag (dev %p, "
2307 "cmd %p)", dev, cmd);
2309 * Lock used to protect other flags in the bitfield
2310 * (just in case, actually). Those flags can't be
2311 * changed in parallel, because the device is
2314 spin_lock_bh(&dev->dev_lock);
2315 dev->dev_double_ua_possible = 0;
2316 spin_unlock_bh(&dev->dev_lock);
2321 TRACE_EXIT_RES(res);
2325 static int scst_check_auto_sense(struct scst_cmd *cmd)
2331 if (unlikely(cmd->status == SAM_STAT_CHECK_CONDITION) &&
2332 (!SCST_SENSE_VALID(cmd->sense) ||
2333 SCST_NO_SENSE(cmd->sense))) {
2334 TRACE(TRACE_SCSI|TRACE_MINOR, "CHECK_CONDITION, but no sense: "
2335 "cmd->status=%x, cmd->msg_status=%x, "
2336 "cmd->host_status=%x, cmd->driver_status=%x (cmd %p)",
2337 cmd->status, cmd->msg_status, cmd->host_status,
2338 cmd->driver_status, cmd);
2340 } else if (unlikely(cmd->host_status)) {
2341 if ((cmd->host_status == DID_REQUEUE) ||
2342 (cmd->host_status == DID_IMM_RETRY) ||
2343 (cmd->host_status == DID_SOFT_ERROR) ||
2344 (cmd->host_status == DID_ABORT)) {
2347 TRACE(TRACE_SCSI|TRACE_MINOR, "Host status %x "
2348 "received, returning HARDWARE ERROR instead "
2349 "(cmd %p)", cmd->host_status, cmd);
2350 scst_set_cmd_error(cmd,
2351 SCST_LOAD_SENSE(scst_sense_hardw_error));
2355 TRACE_EXIT_RES(res);
2359 static int scst_pre_dev_done(struct scst_cmd *cmd)
2361 int res = SCST_CMD_STATE_RES_CONT_SAME, rc;
2365 if (unlikely(scst_check_auto_sense(cmd))) {
2366 PRINT_INFO("Command finished with CHECK CONDITION, but "
2367 "without sense data (opcode 0x%x), issuing "
2368 "REQUEST SENSE", cmd->cdb[0]);
2369 rc = scst_prepare_request_sense(cmd);
2371 res = SCST_CMD_STATE_RES_CONT_NEXT;
2373 PRINT_ERROR("%s", "Unable to issue REQUEST SENSE, "
2374 "returning HARDWARE ERROR");
2375 scst_set_cmd_error(cmd,
2376 SCST_LOAD_SENSE(scst_sense_hardw_error));
2379 } else if (unlikely(scst_check_sense(cmd)))
2382 if (likely(scsi_status_is_good(cmd->status))) {
2383 unsigned char type = cmd->dev->handler->type;
2384 if (unlikely((cmd->cdb[0] == MODE_SENSE ||
2385 cmd->cdb[0] == MODE_SENSE_10)) &&
2386 cmd->tgt_dev->acg_dev->rd_only_flag &&
2387 (type == TYPE_DISK ||
2388 type == TYPE_WORM ||
2390 type == TYPE_TAPE)) {
2395 length = scst_get_buf_first(cmd, &address);
2397 PRINT_ERROR("%s", "Unable to get "
2398 "MODE_SENSE buffer");
2399 scst_set_cmd_error(cmd,
2401 scst_sense_hardw_error));
2403 } else if (length > 2 && cmd->cdb[0] == MODE_SENSE)
2404 address[2] |= 0x80; /* Write Protect*/
2405 else if (length > 3 && cmd->cdb[0] == MODE_SENSE_10)
2406 address[3] |= 0x80; /* Write Protect*/
2407 scst_put_buf(cmd, address);
2414 * Check and clear NormACA option for the device, if necessary,
2415 * since we don't support ACA
2417 if (unlikely((cmd->cdb[0] == INQUIRY)) &&
2418 /* Std INQUIRY data (no EVPD) */
2419 !(cmd->cdb[1] & SCST_INQ_EVPD) &&
2420 (cmd->resp_data_len > SCST_INQ_BYTE3)) {
2425 /* ToDo: all pages ?? */
2426 buflen = scst_get_buf_first(cmd, &buffer);
2427 if (buflen > SCST_INQ_BYTE3) {
2428 #ifdef CONFIG_SCST_EXTRACHECKS
2429 if (buffer[SCST_INQ_BYTE3] & SCST_INQ_NORMACA_BIT) {
2430 PRINT_INFO("NormACA set for device: "
2431 "lun=%lld, type 0x%02x. Clear it, "
2432 "since it's unsupported.",
2433 (long long unsigned int)cmd->lun,
2437 buffer[SCST_INQ_BYTE3] &= ~SCST_INQ_NORMACA_BIT;
2438 } else if (buflen != 0) {
2439 PRINT_ERROR("%s", "Unable to get INQUIRY "
2441 scst_set_cmd_error(cmd,
2442 SCST_LOAD_SENSE(scst_sense_hardw_error));
2446 scst_put_buf(cmd, buffer);
2452 if (unlikely((cmd->cdb[0] == MODE_SELECT) ||
2453 (cmd->cdb[0] == MODE_SELECT_10) ||
2454 (cmd->cdb[0] == LOG_SELECT))) {
2456 "MODE/LOG SELECT succeeded (LUN %lld)",
2457 (long long unsigned int)cmd->lun);
2458 cmd->state = SCST_CMD_STATE_MODE_SELECT_CHECKS;
2462 if ((cmd->cdb[0] == RESERVE) || (cmd->cdb[0] == RESERVE_10)) {
2463 if (!test_bit(SCST_TGT_DEV_RESERVED,
2464 &cmd->tgt_dev->tgt_dev_flags)) {
2465 struct scst_tgt_dev *tgt_dev_tmp;
2466 struct scst_device *dev = cmd->dev;
2469 "Real RESERVE failed lun=%lld, "
2471 (long long unsigned int)cmd->lun,
2473 PRINT_BUFF_FLAG(TRACE_SCSI, "Sense", cmd->sense,
2474 SCST_SENSE_BUFFERSIZE);
2476 /* Clearing the reservation */
2477 spin_lock_bh(&dev->dev_lock);
2478 list_for_each_entry(tgt_dev_tmp,
2479 &dev->dev_tgt_dev_list,
2480 dev_tgt_dev_list_entry) {
2481 clear_bit(SCST_TGT_DEV_RESERVED,
2482 &tgt_dev_tmp->tgt_dev_flags);
2484 dev->dev_reserved = 0;
2485 spin_unlock_bh(&dev->dev_lock);
2489 /* Check for MODE PARAMETERS CHANGED UA */
2490 if ((cmd->dev->scsi_dev != NULL) &&
2491 (cmd->status == SAM_STAT_CHECK_CONDITION) &&
2492 SCST_SENSE_VALID(cmd->sense) &&
2493 scst_is_ua_sense(cmd->sense) &&
2494 (cmd->sense[12] == 0x2a) && (cmd->sense[13] == 0x01)) {
2496 "MODE PARAMETERS CHANGED UA (lun %lld)",
2497 (long long unsigned int)cmd->lun);
2498 cmd->state = SCST_CMD_STATE_MODE_SELECT_CHECKS;
2503 cmd->state = SCST_CMD_STATE_DEV_DONE;
2506 TRACE_EXIT_RES(res);
2510 static int scst_mode_select_checks(struct scst_cmd *cmd)
2512 int res = SCST_CMD_STATE_RES_CONT_SAME;
2513 int atomic = scst_cmd_atomic(cmd);
2517 if (likely(scsi_status_is_good(cmd->status))) {
2518 if (unlikely((cmd->cdb[0] == MODE_SELECT) ||
2519 (cmd->cdb[0] == MODE_SELECT_10) ||
2520 (cmd->cdb[0] == LOG_SELECT))) {
2521 struct scst_device *dev = cmd->dev;
2522 if (atomic && (dev->scsi_dev != NULL)) {
2523 TRACE_DBG("%s", "MODE/LOG SELECT: thread "
2524 "context required");
2525 res = SCST_CMD_STATE_RES_NEED_THREAD;
2529 TRACE(TRACE_SCSI, "MODE/LOG SELECT succeeded, "
2530 "setting the SELECT UA (lun=%lld)",
2531 (long long unsigned int)cmd->lun);
2533 spin_lock_bh(&dev->dev_lock);
2534 spin_lock(&scst_temp_UA_lock);
2535 if (cmd->cdb[0] == LOG_SELECT) {
2536 scst_set_sense(scst_temp_UA,
2537 sizeof(scst_temp_UA),
2538 UNIT_ATTENTION, 0x2a, 0x02);
2540 scst_set_sense(scst_temp_UA,
2541 sizeof(scst_temp_UA),
2542 UNIT_ATTENTION, 0x2a, 0x01);
2544 scst_dev_check_set_local_UA(dev, cmd, scst_temp_UA,
2545 sizeof(scst_temp_UA));
2546 spin_unlock(&scst_temp_UA_lock);
2547 spin_unlock_bh(&dev->dev_lock);
2549 if (dev->scsi_dev != NULL)
2550 scst_obtain_device_parameters(dev);
2552 } else if ((cmd->status == SAM_STAT_CHECK_CONDITION) &&
2553 SCST_SENSE_VALID(cmd->sense) &&
2554 scst_is_ua_sense(cmd->sense) &&
2555 (((cmd->sense[12] == 0x2a) && (cmd->sense[13] == 0x01)) ||
2556 (cmd->sense[12] == 0x29) /* reset */ ||
2557 (cmd->sense[12] == 0x28) /* medium changed */ ||
2558 /* cleared by another ini (just in case) */
2559 (cmd->sense[12] == 0x2F))) {
2561 TRACE_DBG("Possible parameters changed UA %x: "
2562 "thread context required", cmd->sense[12]);
2563 res = SCST_CMD_STATE_RES_NEED_THREAD;
2567 TRACE(TRACE_SCSI, "Possible parameters changed UA %x "
2568 "(lun %lld): getting new parameters", cmd->sense[12],
2569 (long long unsigned int)cmd->lun);
2571 scst_obtain_device_parameters(cmd->dev);
2575 cmd->state = SCST_CMD_STATE_DEV_DONE;
2578 TRACE_EXIT_HRES(res);
2582 static void scst_inc_check_expected_sn(struct scst_cmd *cmd)
2584 if (likely(cmd->sn_set))
2585 scst_inc_expected_sn(cmd->tgt_dev, cmd->sn_slot);
2587 scst_make_deferred_commands_active(cmd->tgt_dev);
2590 static int scst_dev_done(struct scst_cmd **pcmd)
2592 int res = SCST_CMD_STATE_RES_CONT_SAME;
2593 struct scst_cmd *cmd = *pcmd;
2595 struct scst_device *dev = cmd->dev;
2599 state = SCST_CMD_STATE_PRE_XMIT_RESP;
2601 if (likely(!scst_is_cmd_local(cmd)) &&
2602 likely(dev->handler->dev_done != NULL)) {
2605 if (unlikely(!dev->handler->dev_done_atomic &&
2606 scst_cmd_atomic(cmd))) {
2608 * It shouldn't be because of SCST_TGT_DEV_AFTER_*
2611 TRACE_DBG("Dev handler %s dev_done() needs thread "
2612 "context, rescheduling", dev->handler->name);
2613 res = SCST_CMD_STATE_RES_NEED_THREAD;
2617 TRACE_DBG("Calling dev handler %s dev_done(%p)",
2618 dev->handler->name, cmd);
2619 rc = dev->handler->dev_done(cmd);
2620 TRACE_DBG("Dev handler %s dev_done() returned %d",
2621 dev->handler->name, rc);
2622 if (rc != SCST_CMD_STATE_DEFAULT)
2627 case SCST_CMD_STATE_PRE_XMIT_RESP:
2628 case SCST_CMD_STATE_DEV_PARSE:
2629 case SCST_CMD_STATE_PRE_PARSE:
2630 case SCST_CMD_STATE_PREPARE_SPACE:
2631 case SCST_CMD_STATE_RDY_TO_XFER:
2632 case SCST_CMD_STATE_TGT_PRE_EXEC:
2633 case SCST_CMD_STATE_SEND_FOR_EXEC:
2634 case SCST_CMD_STATE_LOCAL_EXEC:
2635 case SCST_CMD_STATE_REAL_EXEC:
2636 case SCST_CMD_STATE_PRE_DEV_DONE:
2637 case SCST_CMD_STATE_MODE_SELECT_CHECKS:
2638 case SCST_CMD_STATE_DEV_DONE:
2639 case SCST_CMD_STATE_XMIT_RESP:
2640 case SCST_CMD_STATE_FINISHED:
2644 case SCST_CMD_STATE_NEED_THREAD_CTX:
2645 TRACE_DBG("Dev handler %s dev_done() requested "
2646 "thread context, rescheduling",
2647 dev->handler->name);
2648 res = SCST_CMD_STATE_RES_NEED_THREAD;
2653 PRINT_ERROR("Dev handler %s dev_done() returned "
2654 "invalid cmd state %d",
2655 dev->handler->name, state);
2657 PRINT_ERROR("Dev handler %s dev_done() returned "
2658 "error %d", dev->handler->name,
2661 scst_set_cmd_error(cmd,
2662 SCST_LOAD_SENSE(scst_sense_hardw_error));
2663 scst_set_cmd_abnormal_done_state(cmd);
2667 if (cmd->needs_unblocking)
2668 scst_unblock_dev_cmd(cmd);
2670 if (likely(cmd->dec_on_dev_needed))
2671 scst_dec_on_dev_cmd(cmd);
2673 if (cmd->inc_expected_sn_on_done && cmd->sent_for_exec)
2674 scst_inc_check_expected_sn(cmd);
2676 if (unlikely(cmd->cdb[0] == REQUEST_SENSE) && (cmd->internal))
2677 *pcmd = scst_complete_request_sense(cmd);
2680 TRACE_EXIT_HRES(res);
2684 static int scst_pre_xmit_response(struct scst_cmd *cmd)
2690 #ifdef CONFIG_SCST_DEBUG_TM
2691 if (cmd->tm_dbg_delayed &&
2692 !test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)) {
2693 if (scst_cmd_atomic(cmd)) {
2694 TRACE_MGMT_DBG("%s",
2695 "DEBUG_TM delayed cmd needs a thread");
2696 res = SCST_CMD_STATE_RES_NEED_THREAD;
2699 TRACE_MGMT_DBG("Delaying cmd %p (tag %llu) for 1 second",
2701 schedule_timeout_uninterruptible(HZ);
2705 if (likely(cmd->tgt_dev != NULL)) {
2706 atomic_dec(&cmd->tgt_dev->tgt_dev_cmd_count);
2707 atomic_dec(&cmd->dev->dev_cmd_count);
2708 /* If expected values not set, expected direction is UNKNOWN */
2709 if (cmd->expected_data_direction == SCST_DATA_WRITE)
2710 atomic_dec(&cmd->dev->write_cmd_count);
2712 if (unlikely(cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE))
2713 scst_on_hq_cmd_response(cmd);
2715 if (unlikely(!cmd->sent_for_exec)) {
2716 TRACE_SN("cmd %p was not sent to mid-lev"
2717 " (sn %ld, set %d)",
2718 cmd, cmd->sn, cmd->sn_set);
2719 scst_unblock_deferred(cmd->tgt_dev, cmd);
2720 cmd->sent_for_exec = 1;
2725 * If we don't remove cmd from the search list here, before
2726 * submitting it for transmittion, we will have a race, when for
2727 * some reason cmd's release is delayed after transmittion and
2728 * initiator sends cmd with the same tag => it is possible that
2729 * a wrong cmd will be found by find() functions.
2731 spin_lock_irq(&cmd->sess->sess_list_lock);
2732 list_del(&cmd->search_cmd_list_entry);
2733 spin_unlock_irq(&cmd->sess->sess_list_lock);
2736 smp_mb(); /* to sync with scst_abort_cmd() */
2738 if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)))
2739 scst_xmit_process_aborted_cmd(cmd);
2741 if (unlikely(test_bit(SCST_CMD_NO_RESP, &cmd->cmd_flags))) {
2742 TRACE_MGMT_DBG("Flag NO_RESP set for cmd %p (tag %llu),"
2744 cmd, (long long unsigned int)cmd->tag);
2745 cmd->state = SCST_CMD_STATE_FINISHED;
2746 res = SCST_CMD_STATE_RES_CONT_SAME;
2750 if (cmd->tgt_data_buf_alloced && cmd->dh_data_buf_alloced &&
2751 (cmd->data_direction == SCST_DATA_READ))
2752 scst_copy_sg(cmd, SCST_SG_COPY_TO_TARGET);
2754 cmd->state = SCST_CMD_STATE_XMIT_RESP;
2755 res = SCST_CMD_STATE_RES_CONT_SAME;
2758 #ifdef CONFIG_SCST_MEASURE_LATENCY
2761 uint64_t finish, scst_time, proc_time;
2762 struct scst_session *sess = cmd->sess;
2764 getnstimeofday(&ts);
2765 finish = scst_sec_to_nsec(ts.tv_sec) + ts.tv_nsec;
2767 spin_lock_bh(&sess->meas_lock);
2769 scst_time = cmd->pre_exec_finish - cmd->start;
2770 scst_time += finish - cmd->post_exec_start;
2771 proc_time = finish - cmd->start;
2773 sess->scst_time += scst_time;
2774 sess->processing_time += proc_time;
2775 sess->processed_cmds++;
2777 spin_unlock_bh(&sess->meas_lock);
2779 TRACE_DBG("cmd %p (sess %p): finish %lld (tv_sec %ld, "
2780 "tv_nsec %ld), scst_time %lld, proc_time %lld",
2781 cmd, sess, finish, ts.tv_sec, ts.tv_nsec, scst_time,
2785 TRACE_EXIT_HRES(res);
2789 static int scst_xmit_response(struct scst_cmd *cmd)
2795 if (unlikely(!cmd->tgtt->xmit_response_atomic &&
2796 scst_cmd_atomic(cmd))) {
2798 * It shouldn't be because of SCST_TGT_DEV_AFTER_*
2801 TRACE_DBG("Target driver %s xmit_response() needs thread "
2802 "context, rescheduling", cmd->tgtt->name);
2803 res = SCST_CMD_STATE_RES_NEED_THREAD;
2808 int finished_cmds = atomic_read(&cmd->sess->tgt->finished_cmds);
2810 res = SCST_CMD_STATE_RES_CONT_NEXT;
2811 cmd->state = SCST_CMD_STATE_XMIT_WAIT;
2813 TRACE_DBG("Calling xmit_response(%p)", cmd);
2815 #if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
2818 struct scatterlist *sg = cmd->tgt_sg;
2819 TRACE(TRACE_SND_BOT, "Xmitting data for cmd %p "
2820 "(sg_cnt %d, sg %p, sg[0].page %p)", cmd,
2821 cmd->tgt_sg_cnt, sg, (void *)sg_page(&sg[0]));
2822 for (i = 0; i < cmd->tgt_sg_cnt; ++i) {
2823 PRINT_BUFF_FLAG(TRACE_SND_BOT, "Xmitting sg",
2824 sg_virt(&sg[i]), sg[i].length);
2829 #ifdef CONFIG_SCST_DEBUG_RETRY
2830 if (((scst_random() % 100) == 77))
2831 rc = SCST_TGT_RES_QUEUE_FULL;
2834 rc = cmd->tgtt->xmit_response(cmd);
2835 TRACE_DBG("xmit_response() returned %d", rc);
2837 if (likely(rc == SCST_TGT_RES_SUCCESS))
2840 /* Restore the previous state */
2841 cmd->state = SCST_CMD_STATE_XMIT_RESP;
2844 case SCST_TGT_RES_QUEUE_FULL:
2845 if (scst_queue_retry_cmd(cmd, finished_cmds) == 0)
2850 case SCST_TGT_RES_NEED_THREAD_CTX:
2851 TRACE_DBG("Target driver %s xmit_response() "
2852 "requested thread context, rescheduling",
2854 res = SCST_CMD_STATE_RES_NEED_THREAD;
2864 /* Caution: cmd can be already dead here */
2865 TRACE_EXIT_HRES(res);
2869 if (rc == SCST_TGT_RES_FATAL_ERROR) {
2870 PRINT_ERROR("Target driver %s xmit_response() returned "
2871 "fatal error", cmd->tgtt->name);
2873 PRINT_ERROR("Target driver %s xmit_response() returned "
2874 "invalid value %d", cmd->tgtt->name, rc);
2876 scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
2877 cmd->state = SCST_CMD_STATE_FINISHED;
2878 res = SCST_CMD_STATE_RES_CONT_SAME;
2882 void scst_tgt_cmd_done(struct scst_cmd *cmd,
2883 enum scst_exec_context pref_context)
2887 sBUG_ON(cmd->state != SCST_CMD_STATE_XMIT_WAIT);
2889 cmd->state = SCST_CMD_STATE_FINISHED;
2890 scst_proccess_redirect_cmd(cmd, pref_context, 1);
2895 EXPORT_SYMBOL(scst_tgt_cmd_done);
2897 static int scst_finish_cmd(struct scst_cmd *cmd)
2903 atomic_dec(&cmd->sess->sess_cmd_count);
2906 smp_mb(); /* to sync with scst_abort_cmd() */
2908 if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
2909 TRACE_MGMT_DBG("Aborted cmd %p finished (cmd_ref %d, "
2910 "scst_cmd_count %d)", cmd, atomic_read(&cmd->cmd_ref),
2911 atomic_read(&scst_cmd_count));
2913 scst_finish_cmd_mgmt(cmd);
2916 if (unlikely(cmd->delivery_status != SCST_CMD_DELIVERY_SUCCESS)) {
2917 if ((cmd->tgt_dev != NULL) &&
2918 scst_is_ua_sense(cmd->sense)) {
2919 /* This UA delivery failed, so requeue it */
2920 TRACE_MGMT_DBG("Requeuing UA for delivery failed cmd "
2922 scst_check_set_UA(cmd->tgt_dev, cmd->sense,
2923 SCST_SENSE_BUFFERSIZE, 1);
2927 __scst_cmd_put(cmd);
2929 res = SCST_CMD_STATE_RES_CONT_NEXT;
2931 TRACE_EXIT_HRES(res);
2936 * No locks, but it must be externally serialized (see comment for
2937 * scst_cmd_init_done() in scst.h)
2939 static void scst_cmd_set_sn(struct scst_cmd *cmd)
2941 struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
2942 unsigned long flags;
2946 if (scst_is_implicit_hq(cmd)) {
2947 TRACE_SN("Implicit HQ cmd %p", cmd);
2948 cmd->queue_type = SCST_CMD_QUEUE_HEAD_OF_QUEUE;
2951 EXTRACHECKS_BUG_ON(cmd->sn_set || cmd->hq_cmd_inced);
2953 /* Optimized for lockless fast path */
2955 scst_check_debug_sn(cmd);
2957 if (cmd->dev->queue_alg ==
2958 SCST_CONTR_MODE_QUEUE_ALG_RESTRICTED_REORDER) {
2960 * Not the best way, but well enough until there will be a
2961 * possibility to specify queue type during pass-through
2962 * commands submission.
2964 cmd->queue_type = SCST_CMD_QUEUE_ORDERED;
2967 switch (cmd->queue_type) {
2968 case SCST_CMD_QUEUE_SIMPLE:
2969 case SCST_CMD_QUEUE_UNTAGGED:
2970 #if 0 /* left for future performance investigations */
2971 if (scst_cmd_is_expected_set(cmd)) {
2972 if ((cmd->expected_data_direction == SCST_DATA_READ) &&
2973 (atomic_read(&cmd->dev->write_cmd_count) == 0))
2978 if (likely(tgt_dev->num_free_sn_slots >= 0)) {
2980 * atomic_inc_return() implies memory barrier to sync
2981 * with scst_inc_expected_sn()
2983 if (atomic_inc_return(tgt_dev->cur_sn_slot) == 1) {
2985 TRACE_SN("Incremented curr_sn %ld",
2988 cmd->sn_slot = tgt_dev->cur_sn_slot;
2989 cmd->sn = tgt_dev->curr_sn;
2991 tgt_dev->prev_cmd_ordered = 0;
2993 TRACE(TRACE_MINOR, "***WARNING*** Not enough SN slots "
2994 "%zd", ARRAY_SIZE(tgt_dev->sn_slots));
2999 case SCST_CMD_QUEUE_ORDERED:
3000 TRACE_SN("ORDERED cmd %p (op %x)", cmd, cmd->cdb[0]);
3002 if (!tgt_dev->prev_cmd_ordered) {
3003 spin_lock_irqsave(&tgt_dev->sn_lock, flags);
3004 if (tgt_dev->num_free_sn_slots >= 0) {
3005 tgt_dev->num_free_sn_slots--;
3006 if (tgt_dev->num_free_sn_slots >= 0) {
3008 /* Commands can finish in any order, so
3009 * we don't know which slot is empty.
3012 tgt_dev->cur_sn_slot++;
3013 if (tgt_dev->cur_sn_slot ==
3014 tgt_dev->sn_slots + ARRAY_SIZE(tgt_dev->sn_slots))
3015 tgt_dev->cur_sn_slot = tgt_dev->sn_slots;
3017 if (atomic_read(tgt_dev->cur_sn_slot) == 0)
3021 sBUG_ON(i == ARRAY_SIZE(tgt_dev->sn_slots));
3023 TRACE_SN("New cur SN slot %zd",
3024 tgt_dev->cur_sn_slot -
3028 spin_unlock_irqrestore(&tgt_dev->sn_lock, flags);
3030 tgt_dev->prev_cmd_ordered = 1;
3032 cmd->sn = tgt_dev->curr_sn;
3035 case SCST_CMD_QUEUE_HEAD_OF_QUEUE:
3036 TRACE_SN("HQ cmd %p (op %x)", cmd, cmd->cdb[0]);
3037 spin_lock_irqsave(&tgt_dev->sn_lock, flags);
3038 tgt_dev->hq_cmd_count++;
3039 spin_unlock_irqrestore(&tgt_dev->sn_lock, flags);
3040 cmd->hq_cmd_inced = 1;
3047 TRACE_SN("cmd(%p)->sn: %ld (tgt_dev %p, *cur_sn_slot %d, "
3048 "num_free_sn_slots %d, prev_cmd_ordered %ld, "
3049 "cur_sn_slot %zd)", cmd, cmd->sn, tgt_dev,
3050 atomic_read(tgt_dev->cur_sn_slot),
3051 tgt_dev->num_free_sn_slots, tgt_dev->prev_cmd_ordered,
3052 tgt_dev->cur_sn_slot-tgt_dev->sn_slots);
3062 * Returns 0 on success, > 0 when we need to wait for unblock,
3063 * < 0 if there is no device (lun) or device type handler.
3065 * No locks, but might be on IRQ, protection is done by the
3066 * suspended activity.
3068 static int scst_translate_lun(struct scst_cmd *cmd)
3070 struct scst_tgt_dev *tgt_dev = NULL;
3075 /* See comment about smp_mb() in scst_suspend_activity() */
3078 if (likely(!test_bit(SCST_FLAG_SUSPENDED, &scst_flags))) {
3079 struct list_head *sess_tgt_dev_list_head =
3080 &cmd->sess->sess_tgt_dev_list_hash[HASH_VAL(cmd->lun)];
3081 TRACE_DBG("Finding tgt_dev for cmd %p (lun %lld)", cmd,
3082 (long long unsigned int)cmd->lun);
3084 list_for_each_entry(tgt_dev, sess_tgt_dev_list_head,
3085 sess_tgt_dev_list_entry) {
3086 if (tgt_dev->lun == cmd->lun) {
3087 TRACE_DBG("tgt_dev %p found", tgt_dev);
3089 if (unlikely(tgt_dev->dev->handler ==
3090 &scst_null_devtype)) {
3091 PRINT_INFO("Dev handler for device "
3092 "%lld is NULL, the device will not "
3093 "be visible remotely",
3094 (long long unsigned int)cmd->lun);
3098 cmd->cmd_lists = tgt_dev->dev->p_cmd_lists;
3099 cmd->tgt_dev = tgt_dev;
3100 cmd->dev = tgt_dev->dev;
3108 "tgt_dev for lun %lld not found, command to "
3110 (long long unsigned int)cmd->lun);
3114 TRACE_MGMT_DBG("%s", "FLAG SUSPENDED set, skipping");
3119 TRACE_EXIT_RES(res);
3124 * No locks, but might be on IRQ
3126 * Returns 0 on success, > 0 when we need to wait for unblock,
3127 * < 0 if there is no device (lun) or device type handler.
3129 static int __scst_init_cmd(struct scst_cmd *cmd)
3135 res = scst_translate_lun(cmd);
3136 if (likely(res == 0)) {
3138 bool failure = false;
3140 cmd->state = SCST_CMD_STATE_PRE_PARSE;
3142 cnt = atomic_inc_return(&cmd->tgt_dev->tgt_dev_cmd_count);
3143 if (unlikely(cnt > SCST_MAX_TGT_DEV_COMMANDS)) {
3144 TRACE(TRACE_MGMT_MINOR,
3145 "Too many pending commands (%d) in "
3146 "session, returning BUSY to initiator \"%s\"",
3147 cnt, (cmd->sess->initiator_name[0] == '\0') ?
3148 "Anonymous" : cmd->sess->initiator_name);
3152 cnt = atomic_inc_return(&cmd->dev->dev_cmd_count);
3153 if (unlikely(cnt > SCST_MAX_DEV_COMMANDS)) {
3155 TRACE(TRACE_MGMT_MINOR,
3156 "Too many pending device "
3157 "commands (%d), returning BUSY to "
3158 "initiator \"%s\"", cnt,
3159 (cmd->sess->initiator_name[0] == '\0') ?
3161 cmd->sess->initiator_name);
3166 /* If expected values not set, expected direction is UNKNOWN */
3167 if (cmd->expected_data_direction == SCST_DATA_WRITE)
3168 atomic_inc(&cmd->dev->write_cmd_count);
3170 if (unlikely(failure))
3173 if (!cmd->set_sn_on_restart_cmd)
3174 scst_cmd_set_sn(cmd);
3175 } else if (res < 0) {
3176 TRACE_DBG("Finishing cmd %p", cmd);
3177 scst_set_cmd_error(cmd,
3178 SCST_LOAD_SENSE(scst_sense_lun_not_supported));
3179 scst_set_cmd_abnormal_done_state(cmd);
3184 TRACE_EXIT_RES(res);
3189 scst_set_cmd_abnormal_done_state(cmd);
3193 /* Called under scst_init_lock and IRQs disabled */
3194 static void scst_do_job_init(void)
3195 __releases(&scst_init_lock)
3196 __acquires(&scst_init_lock)
3198 struct scst_cmd *cmd;
3205 * There is no need for read barrier here, because we don't care where
3206 * this check will be done.
3208 susp = test_bit(SCST_FLAG_SUSPENDED, &scst_flags);
3209 if (scst_init_poll_cnt > 0)
3210 scst_init_poll_cnt--;
3212 list_for_each_entry(cmd, &scst_init_cmd_list, cmd_list_entry) {
3214 if (susp && !test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))
3216 if (!test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)) {
3217 spin_unlock_irq(&scst_init_lock);
3218 rc = __scst_init_cmd(cmd);
3219 spin_lock_irq(&scst_init_lock);
3221 TRACE_MGMT_DBG("%s",
3222 "FLAG SUSPENDED set, restarting");
3226 TRACE_MGMT_DBG("Aborting not inited cmd %p (tag %llu)",
3227 cmd, (long long unsigned int)cmd->tag);
3228 scst_set_cmd_abnormal_done_state(cmd);
3232 * Deleting cmd from init cmd list after __scst_init_cmd()
3233 * is necessary to keep the check in scst_init_cmd() correct
3234 * to preserve the commands order.
3236 * We don't care about the race, when init cmd list is empty
3237 * and one command detected that it just was not empty, so
3238 * it's inserting to it, but another command at the same time
3239 * seeing init cmd list empty and goes directly, because it
3240 * could affect only commands from the same initiator to the
3241 * same tgt_dev, but scst_cmd_init_done*() doesn't guarantee
3242 * the order in case of simultaneous such calls anyway.
3244 TRACE_MGMT_DBG("Deleting cmd %p from init cmd list", cmd);
3245 smp_wmb(); /* enforce the required order */
3246 list_del(&cmd->cmd_list_entry);
3247 spin_unlock(&scst_init_lock);
3249 spin_lock(&cmd->cmd_lists->cmd_list_lock);
3250 TRACE_MGMT_DBG("Adding cmd %p to active cmd list", cmd);
3251 if (unlikely(cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE))
3252 list_add(&cmd->cmd_list_entry,
3253 &cmd->cmd_lists->active_cmd_list);
3255 list_add_tail(&cmd->cmd_list_entry,
3256 &cmd->cmd_lists->active_cmd_list);
3257 wake_up(&cmd->cmd_lists->cmd_list_waitQ);
3258 spin_unlock(&cmd->cmd_lists->cmd_list_lock);
3260 spin_lock(&scst_init_lock);
3264 /* It isn't really needed, but let's keep it */
3265 if (susp != test_bit(SCST_FLAG_SUSPENDED, &scst_flags))
3272 static inline int test_init_cmd_list(void)
3274 int res = (!list_empty(&scst_init_cmd_list) &&
3275 !test_bit(SCST_FLAG_SUSPENDED, &scst_flags)) ||
3276 unlikely(kthread_should_stop()) ||
3277 (scst_init_poll_cnt > 0);
3281 int scst_init_cmd_thread(void *arg)
3285 PRINT_INFO("Init thread started, PID %d", current->pid);
3287 current->flags |= PF_NOFREEZE;
3289 set_user_nice(current, -10);
3291 spin_lock_irq(&scst_init_lock);
3292 while (!kthread_should_stop()) {
3294 init_waitqueue_entry(&wait, current);
3296 if (!test_init_cmd_list()) {
3297 add_wait_queue_exclusive(&scst_init_cmd_list_waitQ,
3300 set_current_state(TASK_INTERRUPTIBLE);
3301 if (test_init_cmd_list())
3303 spin_unlock_irq(&scst_init_lock);
3305 spin_lock_irq(&scst_init_lock);
3307 set_current_state(TASK_RUNNING);
3308 remove_wait_queue(&scst_init_cmd_list_waitQ, &wait);
3312 spin_unlock_irq(&scst_init_lock);
3315 * If kthread_should_stop() is true, we are guaranteed to be
3316 * on the module unload, so scst_init_cmd_list must be empty.
3318 sBUG_ON(!list_empty(&scst_init_cmd_list));
3320 PRINT_INFO("Init thread PID %d finished", current->pid);
3326 /* Called with no locks held */
3327 void scst_process_active_cmd(struct scst_cmd *cmd, bool atomic)
3333 EXTRACHECKS_BUG_ON(in_irq() || irqs_disabled());
3335 cmd->atomic = atomic;
3337 TRACE_DBG("cmd %p, atomic %d", cmd, atomic);
3340 switch (cmd->state) {
3341 case SCST_CMD_STATE_PRE_PARSE:
3342 res = scst_pre_parse(cmd);
3343 EXTRACHECKS_BUG_ON(res ==
3344 SCST_CMD_STATE_RES_NEED_THREAD);
3347 case SCST_CMD_STATE_DEV_PARSE:
3348 res = scst_parse_cmd(cmd);
3351 case SCST_CMD_STATE_PREPARE_SPACE:
3352 res = scst_prepare_space(cmd);
3355 case SCST_CMD_STATE_RDY_TO_XFER:
3356 res = scst_rdy_to_xfer(cmd);
3359 case SCST_CMD_STATE_TGT_PRE_EXEC:
3360 res = scst_tgt_pre_exec(cmd);
3363 case SCST_CMD_STATE_SEND_FOR_EXEC:
3364 if (tm_dbg_check_cmd(cmd) != 0) {
3365 res = SCST_CMD_STATE_RES_CONT_NEXT;
3366 TRACE_MGMT_DBG("Skipping cmd %p (tag %llu), "
3367 "because of TM DBG delay", cmd,
3368 (long long unsigned int)cmd->tag);
3371 res = scst_send_for_exec(&cmd);
3373 * !! At this point cmd, sess & tgt_dev can already be
3378 case SCST_CMD_STATE_LOCAL_EXEC:
3379 res = scst_local_exec(cmd);
3381 * !! At this point cmd, sess & tgt_dev can already be
3386 case SCST_CMD_STATE_REAL_EXEC:
3387 res = scst_real_exec(cmd);
3389 * !! At this point cmd, sess & tgt_dev can already be
3394 case SCST_CMD_STATE_PRE_DEV_DONE:
3395 res = scst_pre_dev_done(cmd);
3396 EXTRACHECKS_BUG_ON(res ==
3397 SCST_CMD_STATE_RES_NEED_THREAD);
3400 case SCST_CMD_STATE_MODE_SELECT_CHECKS:
3401 res = scst_mode_select_checks(cmd);
3404 case SCST_CMD_STATE_DEV_DONE:
3405 res = scst_dev_done(&cmd);
3408 case SCST_CMD_STATE_PRE_XMIT_RESP:
3409 res = scst_pre_xmit_response(cmd);
3410 EXTRACHECKS_BUG_ON(res ==
3411 SCST_CMD_STATE_RES_NEED_THREAD);
3414 case SCST_CMD_STATE_XMIT_RESP:
3415 res = scst_xmit_response(cmd);
3418 case SCST_CMD_STATE_FINISHED:
3419 res = scst_finish_cmd(cmd);
3420 EXTRACHECKS_BUG_ON(res ==
3421 SCST_CMD_STATE_RES_NEED_THREAD);
3425 PRINT_CRIT_ERROR("cmd (%p) in state %d, but shouldn't "
3426 "be", cmd, cmd->state);
3428 res = SCST_CMD_STATE_RES_CONT_NEXT;
3431 } while (res == SCST_CMD_STATE_RES_CONT_SAME);
3433 if (res == SCST_CMD_STATE_RES_CONT_NEXT) {
3435 } else if (res == SCST_CMD_STATE_RES_NEED_THREAD) {
3436 spin_lock_irq(&cmd->cmd_lists->cmd_list_lock);
3437 switch (cmd->state) {
3438 case SCST_CMD_STATE_PRE_PARSE:
3439 case SCST_CMD_STATE_DEV_PARSE:
3440 case SCST_CMD_STATE_PREPARE_SPACE:
3441 case SCST_CMD_STATE_RDY_TO_XFER:
3442 case SCST_CMD_STATE_TGT_PRE_EXEC:
3443 case SCST_CMD_STATE_SEND_FOR_EXEC:
3444 case SCST_CMD_STATE_LOCAL_EXEC:
3445 case SCST_CMD_STATE_REAL_EXEC:
3446 case SCST_CMD_STATE_PRE_DEV_DONE:
3447 case SCST_CMD_STATE_MODE_SELECT_CHECKS:
3448 case SCST_CMD_STATE_DEV_DONE:
3449 case SCST_CMD_STATE_PRE_XMIT_RESP:
3450 case SCST_CMD_STATE_XMIT_RESP:
3451 case SCST_CMD_STATE_FINISHED:
3452 TRACE_DBG("Adding cmd %p to head of active cmd list",
3454 list_add(&cmd->cmd_list_entry,
3455 &cmd->cmd_lists->active_cmd_list);
3457 #ifdef CONFIG_SCST_EXTRACHECKS
3458 /* not very valid commands */
3459 case SCST_CMD_STATE_DEFAULT:
3460 case SCST_CMD_STATE_NEED_THREAD_CTX:
3461 PRINT_CRIT_ERROR("cmd %p is in invalid state %d)", cmd,
3463 spin_unlock_irq(&cmd->cmd_lists->cmd_list_lock);
3465 spin_lock_irq(&cmd->cmd_lists->cmd_list_lock);
3471 wake_up(&cmd->cmd_lists->cmd_list_waitQ);
3472 spin_unlock_irq(&cmd->cmd_lists->cmd_list_lock);
3479 EXPORT_SYMBOL(scst_process_active_cmd);
3481 /* Called under cmd_list_lock and IRQs disabled */
3482 static void scst_do_job_active(struct list_head *cmd_list,
3483 spinlock_t *cmd_list_lock, bool atomic)
3484 __releases(cmd_list_lock)
3485 __acquires(cmd_list_lock)
3489 while (!list_empty(cmd_list)) {
3490 struct scst_cmd *cmd = list_entry(cmd_list->next, typeof(*cmd),
3492 TRACE_DBG("Deleting cmd %p from active cmd list", cmd);
3493 list_del(&cmd->cmd_list_entry);
3494 spin_unlock_irq(cmd_list_lock);
3495 scst_process_active_cmd(cmd, atomic);
3496 spin_lock_irq(cmd_list_lock);