4 * Copyright (C) 2004 - 2009 Vladislav Bolkhovitin <vst@vlnb.net>
5 * Copyright (C) 2004 - 2005 Leonid Stoljar
6 * Copyright (C) 2007 - 2009 ID7 Ltd.
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation, version 2
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
19 #include <linux/init.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/list.h>
23 #include <linux/spinlock.h>
24 #include <linux/slab.h>
25 #include <linux/sched.h>
26 #include <linux/smp_lock.h>
27 #include <linux/unistd.h>
28 #include <linux/string.h>
29 #include <linux/kthread.h>
30 #include <linux/delay.h>
33 #include "scst_priv.h"
35 static void scst_cmd_set_sn(struct scst_cmd *cmd);
36 static int __scst_init_cmd(struct scst_cmd *cmd);
37 static void scst_finish_cmd_mgmt(struct scst_cmd *cmd);
38 static struct scst_cmd *__scst_find_cmd_by_tag(struct scst_session *sess,
40 static void scst_proccess_redirect_cmd(struct scst_cmd *cmd,
41 enum scst_exec_context context, int check_retries);
43 static inline void scst_schedule_tasklet(struct scst_cmd *cmd)
45 struct scst_tasklet *t = &scst_tasklets[smp_processor_id()];
48 spin_lock_irqsave(&t->tasklet_lock, flags);
49 TRACE_DBG("Adding cmd %p to tasklet %d cmd list", cmd,
51 list_add_tail(&cmd->cmd_list_entry, &t->tasklet_cmd_list);
52 spin_unlock_irqrestore(&t->tasklet_lock, flags);
54 tasklet_schedule(&t->tasklet);
58 * Must not be called in parallel with scst_unregister_session() for the
61 struct scst_cmd *scst_rx_cmd(struct scst_session *sess,
62 const uint8_t *lun, int lun_len,
63 const uint8_t *cdb, int cdb_len, int atomic)
69 #ifdef CONFIG_SCST_EXTRACHECKS
70 if (unlikely(sess->shut_phase != SCST_SESS_SPH_READY)) {
71 PRINT_CRIT_ERROR("%s",
72 "New cmd while shutting down the session");
77 cmd = scst_alloc_cmd(atomic ? GFP_ATOMIC : GFP_KERNEL);
83 cmd->tgtt = sess->tgt->tgtt;
86 * For both wrong lun and CDB defer the error reporting for
87 * scst_cmd_init_done()
90 cmd->lun = scst_unpack_lun(lun, lun_len);
92 if (cdb_len <= SCST_MAX_CDB_SIZE) {
93 memcpy(cmd->cdb, cdb, cdb_len);
94 cmd->cdb_len = cdb_len;
97 TRACE_DBG("cmd %p, sess %p", cmd, sess);
104 EXPORT_SYMBOL(scst_rx_cmd);
107 * No locks, but might be on IRQ. Returns 0 on success, <0 if processing of
108 * this command should be stopped.
110 static int scst_init_cmd(struct scst_cmd *cmd, enum scst_exec_context *context)
116 /* See the comment in scst_do_job_init() */
117 if (unlikely(!list_empty(&scst_init_cmd_list))) {
118 TRACE_MGMT_DBG("%s", "init cmd list busy");
122 * Memory barrier isn't necessary here, because CPU appears to
123 * be self-consistent and we don't care about the race, described
124 * in comment in scst_do_job_init().
127 rc = __scst_init_cmd(cmd);
128 if (unlikely(rc > 0))
130 else if (unlikely(rc != 0)) {
135 /* Small context optimization */
136 if (((*context == SCST_CONTEXT_TASKLET) ||
137 (*context == SCST_CONTEXT_DIRECT_ATOMIC) ||
138 ((*context == SCST_CONTEXT_SAME) && scst_cmd_atomic(cmd))) &&
139 scst_cmd_is_expected_set(cmd)) {
140 if (cmd->expected_data_direction & SCST_DATA_WRITE) {
141 if (!test_bit(SCST_TGT_DEV_AFTER_INIT_WR_ATOMIC,
142 &cmd->tgt_dev->tgt_dev_flags))
143 *context = SCST_CONTEXT_THREAD;
145 if (!test_bit(SCST_TGT_DEV_AFTER_INIT_OTH_ATOMIC,
146 &cmd->tgt_dev->tgt_dev_flags))
147 *context = SCST_CONTEXT_THREAD;
156 if (cmd->preprocessing_only) {
158 * Poor man solution for single threaded targets, where
159 * blocking receiver at least sometimes means blocking all.
161 sBUG_ON(*context != SCST_CONTEXT_DIRECT);
163 scst_set_cmd_abnormal_done_state(cmd);
165 /* Keep initiator away from too many BUSY commands */
169 spin_lock_irqsave(&scst_init_lock, flags);
170 TRACE_MGMT_DBG("Adding cmd %p to init cmd list (scst_cmd_count "
171 "%d)", cmd, atomic_read(&scst_cmd_count));
172 list_add_tail(&cmd->cmd_list_entry, &scst_init_cmd_list);
173 if (test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))
174 scst_init_poll_cnt++;
175 spin_unlock_irqrestore(&scst_init_lock, flags);
176 wake_up(&scst_init_cmd_list_waitQ);
182 #ifdef CONFIG_SCST_MEASURE_LATENCY
183 static inline uint64_t scst_sec_to_nsec(time_t sec)
185 return (uint64_t)sec * 1000000000;
189 void scst_cmd_init_done(struct scst_cmd *cmd,
190 enum scst_exec_context pref_context)
193 struct scst_session *sess = cmd->sess;
198 #ifdef CONFIG_SCST_MEASURE_LATENCY
202 cmd->start = scst_sec_to_nsec(ts.tv_sec) + ts.tv_nsec;
203 TRACE_DBG("cmd %p (sess %p): start %lld (tv_sec %ld, "
204 "tv_nsec %ld)", cmd, sess, cmd->start, ts.tv_sec,
209 TRACE_DBG("Preferred context: %d (cmd %p)", pref_context, cmd);
210 TRACE(TRACE_SCSI, "tag=%llu, lun=%lld, CDB len=%d, queue_type=%x "
211 "(cmd %p)", (long long unsigned int)cmd->tag,
212 (long long unsigned int)cmd->lun, cmd->cdb_len,
213 cmd->queue_type, cmd);
214 PRINT_BUFF_FLAG(TRACE_SCSI|TRACE_RCV_BOT, "Recieving CDB",
215 cmd->cdb, cmd->cdb_len);
217 #ifdef CONFIG_SCST_EXTRACHECKS
218 if (unlikely((in_irq() || irqs_disabled())) &&
219 ((pref_context == SCST_CONTEXT_DIRECT) ||
220 (pref_context == SCST_CONTEXT_DIRECT_ATOMIC))) {
221 PRINT_ERROR("Wrong context %d in IRQ from target %s, use "
222 "SCST_CONTEXT_THREAD instead\n", pref_context,
224 pref_context = SCST_CONTEXT_THREAD;
228 atomic_inc(&sess->sess_cmd_count);
230 spin_lock_irqsave(&sess->sess_list_lock, flags);
232 if (unlikely(sess->init_phase != SCST_SESS_IPH_READY)) {
234 * We have to always keep command in the search list from the
235 * very beginning, because otherwise it can be missed during
236 * TM processing. This check is needed because there might be
237 * old, i.e. deferred, commands and new, i.e. just coming, ones.
239 if (cmd->search_cmd_list_entry.next == NULL)
240 list_add_tail(&cmd->search_cmd_list_entry,
241 &sess->search_cmd_list);
242 switch (sess->init_phase) {
243 case SCST_SESS_IPH_SUCCESS:
245 case SCST_SESS_IPH_INITING:
246 TRACE_DBG("Adding cmd %p to init deferred cmd list",
248 list_add_tail(&cmd->cmd_list_entry,
249 &sess->init_deferred_cmd_list);
250 spin_unlock_irqrestore(&sess->sess_list_lock, flags);
252 case SCST_SESS_IPH_FAILED:
253 spin_unlock_irqrestore(&sess->sess_list_lock, flags);
255 scst_set_cmd_abnormal_done_state(cmd);
261 list_add_tail(&cmd->search_cmd_list_entry,
262 &sess->search_cmd_list);
264 spin_unlock_irqrestore(&sess->sess_list_lock, flags);
266 if (unlikely(cmd->lun == NO_SUCH_LUN)) {
267 PRINT_ERROR("Wrong LUN %d, finishing cmd", -1);
268 scst_set_cmd_error(cmd,
269 SCST_LOAD_SENSE(scst_sense_lun_not_supported));
270 scst_set_cmd_abnormal_done_state(cmd);
274 if (unlikely(cmd->cdb_len == 0)) {
275 PRINT_ERROR("%s", "Wrong CDB len, finishing cmd");
276 scst_set_cmd_error(cmd,
277 SCST_LOAD_SENSE(scst_sense_invalid_opcode));
278 scst_set_cmd_abnormal_done_state(cmd);
282 if (unlikely(cmd->queue_type >= SCST_CMD_QUEUE_ACA)) {
283 PRINT_ERROR("Unsupported queue type %d", cmd->queue_type);
284 scst_set_cmd_error(cmd,
285 SCST_LOAD_SENSE(scst_sense_invalid_message));
286 scst_set_cmd_abnormal_done_state(cmd);
291 * Cmd must be inited here to preserve the order. In case if cmd
292 * already preliminary completed by target driver we need to init
293 * cmd anyway to find out in which format we should return sense.
295 cmd->state = SCST_CMD_STATE_INIT;
296 rc = scst_init_cmd(cmd, &pref_context);
297 if (unlikely(rc < 0))
299 else if (unlikely(cmd->status == SAM_STAT_CHECK_CONDITION)) {
301 /* Target driver preliminary completed cmd */
302 scst_set_cmd_abnormal_done_state(cmd);
307 /* Here cmd must not be in any cmd list, no locks */
308 switch (pref_context) {
309 case SCST_CONTEXT_TASKLET:
310 scst_schedule_tasklet(cmd);
313 case SCST_CONTEXT_DIRECT:
314 scst_process_active_cmd(cmd, false);
315 /* For *NEED_THREAD wake_up() is already done */
318 case SCST_CONTEXT_DIRECT_ATOMIC:
319 scst_process_active_cmd(cmd, true);
320 /* For *NEED_THREAD wake_up() is already done */
324 PRINT_ERROR("Context %x is undefined, using the thread one",
327 case SCST_CONTEXT_THREAD:
328 spin_lock_irqsave(&cmd->cmd_lists->cmd_list_lock, flags);
329 TRACE_DBG("Adding cmd %p to active cmd list", cmd);
330 if (unlikely(cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE))
331 list_add(&cmd->cmd_list_entry,
332 &cmd->cmd_lists->active_cmd_list);
334 list_add_tail(&cmd->cmd_list_entry,
335 &cmd->cmd_lists->active_cmd_list);
336 wake_up(&cmd->cmd_lists->cmd_list_waitQ);
337 spin_unlock_irqrestore(&cmd->cmd_lists->cmd_list_lock, flags);
345 EXPORT_SYMBOL(scst_cmd_init_done);
347 static int scst_pre_parse(struct scst_cmd *cmd)
349 int res = SCST_CMD_STATE_RES_CONT_SAME;
350 struct scst_device *dev = cmd->dev;
355 cmd->inc_expected_sn_on_done = dev->handler->exec_sync ||
356 (!dev->has_own_order_mgmt &&
357 (dev->queue_alg == SCST_CONTR_MODE_QUEUE_ALG_RESTRICTED_REORDER ||
358 cmd->queue_type == SCST_CMD_QUEUE_ORDERED));
361 * Expected transfer data supplied by the SCSI transport via the
362 * target driver are untrusted, so we prefer to fetch them from CDB.
363 * Additionally, not all transports support supplying the expected
367 rc = scst_get_cdb_info(cmd);
368 if (unlikely(rc != 0)) {
370 PRINT_BUFFER("Failed CDB", cmd->cdb, cmd->cdb_len);
373 PRINT_ERROR("Unknown opcode 0x%02x for %s. "
374 "Should you update scst_scsi_op_table?",
375 cmd->cdb[0], dev->handler->name);
376 PRINT_BUFFER("Failed CDB", cmd->cdb, cmd->cdb_len);
377 #ifdef CONFIG_SCST_USE_EXPECTED_VALUES
378 if (scst_cmd_is_expected_set(cmd)) {
379 TRACE(TRACE_SCSI, "Using initiator supplied values: "
380 "direction %d, transfer_len %d",
381 cmd->expected_data_direction,
382 cmd->expected_transfer_len);
383 cmd->data_direction = cmd->expected_data_direction;
385 cmd->bufflen = cmd->expected_transfer_len;
386 /* Restore (possibly) lost CDB length */
387 cmd->cdb_len = scst_get_cdb_len(cmd->cdb);
388 if (cmd->cdb_len == -1) {
389 PRINT_ERROR("Unable to get CDB length for "
390 "opcode 0x%02x. Returning INVALID "
391 "OPCODE", cmd->cdb[0]);
392 scst_set_cmd_error(cmd,
393 SCST_LOAD_SENSE(scst_sense_invalid_opcode));
397 PRINT_ERROR("Unknown opcode 0x%02x for %s and "
398 "target %s not supplied expected values",
399 cmd->cdb[0], dev->handler->name, cmd->tgtt->name);
400 scst_set_cmd_error(cmd,
401 SCST_LOAD_SENSE(scst_sense_invalid_opcode));
405 scst_set_cmd_error(cmd,
406 SCST_LOAD_SENSE(scst_sense_invalid_opcode));
410 TRACE(TRACE_SCSI, "op_name <%s> (cmd %p), direction=%d "
411 "(expected %d, set %s), transfer_len=%d (expected "
412 "len %d), flags=%d", cmd->op_name, cmd,
413 cmd->data_direction, cmd->expected_data_direction,
414 scst_cmd_is_expected_set(cmd) ? "yes" : "no",
415 cmd->bufflen, cmd->expected_transfer_len,
418 if (unlikely((cmd->op_flags & SCST_UNKNOWN_LENGTH) != 0)) {
419 if (scst_cmd_is_expected_set(cmd)) {
421 * Command data length can't be easily
422 * determined from the CDB. ToDo, all such
423 * commands processing should be fixed. Until
424 * it's done, get the length from the supplied
425 * expected value, but limit it to some
426 * reasonable value (15MB).
428 cmd->bufflen = min(cmd->expected_transfer_len,
430 cmd->op_flags &= ~SCST_UNKNOWN_LENGTH;
436 if (unlikely(cmd->cdb[cmd->cdb_len - 1] & CONTROL_BYTE_NACA_BIT)) {
437 PRINT_ERROR("NACA bit in control byte CDB is not supported "
438 "(opcode 0x%02x)", cmd->cdb[0]);
439 scst_set_cmd_error(cmd,
440 SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
444 if (unlikely(cmd->cdb[cmd->cdb_len - 1] & CONTROL_BYTE_LINK_BIT)) {
445 PRINT_ERROR("Linked commands are not supported "
446 "(opcode 0x%02x)", cmd->cdb[0]);
447 scst_set_cmd_error(cmd,
448 SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
452 cmd->state = SCST_CMD_STATE_DEV_PARSE;
459 scst_set_cmd_abnormal_done_state(cmd);
460 res = SCST_CMD_STATE_RES_CONT_SAME;
464 #ifndef CONFIG_SCST_USE_EXPECTED_VALUES
465 static bool scst_is_allowed_to_mismatch_cmd(struct scst_cmd *cmd)
469 switch (cmd->cdb[0]) {
470 case TEST_UNIT_READY:
471 /* Crazy VMware people sometimes do TUR with READ direction */
478 /* VERIFY commands with BYTCHK unset shouldn't fail here */
479 if ((cmd->op_flags & SCST_VERIFY_BYTCHK_MISMATCH_ALLOWED) &&
480 (cmd->cdb[1] & BYTCHK) == 0)
489 static int scst_parse_cmd(struct scst_cmd *cmd)
491 int res = SCST_CMD_STATE_RES_CONT_SAME;
493 struct scst_device *dev = cmd->dev;
494 int orig_bufflen = cmd->bufflen;
498 if (likely(!scst_is_cmd_local(cmd))) {
499 if (unlikely(!dev->handler->parse_atomic &&
500 scst_cmd_atomic(cmd))) {
502 * It shouldn't be because of SCST_TGT_DEV_AFTER_*
505 TRACE_DBG("Dev handler %s parse() needs thread "
506 "context, rescheduling", dev->handler->name);
507 res = SCST_CMD_STATE_RES_NEED_THREAD;
511 TRACE_DBG("Calling dev handler %s parse(%p)",
512 dev->handler->name, cmd);
513 TRACE_BUFF_FLAG(TRACE_SND_BOT, "Parsing: ",
514 cmd->cdb, cmd->cdb_len);
515 state = dev->handler->parse(cmd);
516 /* Caution: cmd can be already dead here */
517 TRACE_DBG("Dev handler %s parse() returned %d",
518 dev->handler->name, state);
521 case SCST_CMD_STATE_NEED_THREAD_CTX:
522 TRACE_DBG("Dev handler %s parse() requested thread "
523 "context, rescheduling", dev->handler->name);
524 res = SCST_CMD_STATE_RES_NEED_THREAD;
527 case SCST_CMD_STATE_STOP:
528 TRACE_DBG("Dev handler %s parse() requested stop "
529 "processing", dev->handler->name);
530 res = SCST_CMD_STATE_RES_CONT_NEXT;
534 if (state == SCST_CMD_STATE_DEFAULT)
535 state = SCST_CMD_STATE_PREPARE_SPACE;
537 state = SCST_CMD_STATE_PREPARE_SPACE;
539 if (cmd->data_len == -1)
540 cmd->data_len = cmd->bufflen;
542 if (cmd->bufflen == 0) {
544 * According to SPC bufflen 0 for data transfer commands isn't
545 * an error, so we need to fix the transfer direction.
547 cmd->data_direction = SCST_DATA_NONE;
550 if (cmd->dh_data_buf_alloced &&
551 unlikely((orig_bufflen > cmd->bufflen))) {
552 PRINT_ERROR("Dev handler supplied data buffer (size %d), "
553 "is less, than required (size %d)", cmd->bufflen,
555 PRINT_BUFFER("Failed CDB", cmd->cdb, cmd->cdb_len);
559 if (unlikely(state == SCST_CMD_STATE_PRE_XMIT_RESP))
562 if (unlikely((cmd->bufflen == 0) &&
563 (cmd->op_flags & SCST_UNKNOWN_LENGTH))) {
564 PRINT_ERROR("Unknown data transfer length for opcode 0x%x "
565 "(handler %s, target %s)", cmd->cdb[0],
566 dev->handler->name, cmd->tgtt->name);
567 PRINT_BUFFER("Failed CDB", cmd->cdb, cmd->cdb_len);
571 #ifdef CONFIG_SCST_EXTRACHECKS
572 if ((cmd->bufflen != 0) &&
573 ((cmd->data_direction == SCST_DATA_NONE) ||
574 ((cmd->sg == NULL) && (state > SCST_CMD_STATE_PREPARE_SPACE)))) {
575 PRINT_ERROR("Dev handler %s parse() returned "
576 "invalid cmd data_direction %d, bufflen %d, state %d "
577 "or sg %p (opcode 0x%x)", dev->handler->name,
578 cmd->data_direction, cmd->bufflen, state, cmd->sg,
580 PRINT_BUFFER("Failed CDB", cmd->cdb, cmd->cdb_len);
585 if (scst_cmd_is_expected_set(cmd)) {
586 #ifdef CONFIG_SCST_USE_EXPECTED_VALUES
587 # ifdef CONFIG_SCST_EXTRACHECKS
588 if ((cmd->data_direction != cmd->expected_data_direction) ||
589 (cmd->bufflen != cmd->expected_transfer_len)) {
590 PRINT_WARNING("Expected values don't match decoded "
591 "ones: data_direction %d, "
592 "expected_data_direction %d, "
593 "bufflen %d, expected_transfer_len %d",
595 cmd->expected_data_direction,
596 cmd->bufflen, cmd->expected_transfer_len);
597 PRINT_BUFFER("Suspicious CDB", cmd->cdb, cmd->cdb_len);
600 cmd->data_direction = cmd->expected_data_direction;
601 cmd->bufflen = cmd->expected_transfer_len;
603 if (unlikely(cmd->data_direction !=
604 cmd->expected_data_direction)) {
605 if (((cmd->expected_data_direction != SCST_DATA_NONE) ||
606 (cmd->bufflen != 0)) &&
607 !scst_is_allowed_to_mismatch_cmd(cmd)) {
608 PRINT_ERROR("Expected data direction %d for "
609 "opcode 0x%02x (handler %s, target %s) "
612 cmd->expected_data_direction,
613 cmd->cdb[0], dev->handler->name,
614 cmd->tgtt->name, cmd->data_direction);
615 PRINT_BUFFER("Failed CDB",
616 cmd->cdb, cmd->cdb_len);
617 scst_set_cmd_error(cmd,
618 SCST_LOAD_SENSE(scst_sense_invalid_message));
622 if (unlikely(cmd->bufflen != cmd->expected_transfer_len)) {
623 TRACE(TRACE_MGMT_MINOR, "Warning: expected "
624 "transfer length %d for opcode 0x%02x "
625 "(handler %s, target %s) doesn't match "
626 "decoded value %d. Faulty initiator "
627 "(e.g. VMware is known to be such) or "
628 "scst_scsi_op_table should be updated?",
629 cmd->expected_transfer_len, cmd->cdb[0],
630 dev->handler->name, cmd->tgtt->name,
632 PRINT_BUFF_FLAG(TRACE_MGMT_MINOR, "Suspicious CDB",
633 cmd->cdb, cmd->cdb_len);
638 if (unlikely(cmd->data_direction == SCST_DATA_UNKNOWN)) {
639 PRINT_ERROR("Unknown data direction. Opcode 0x%x, handler %s, "
640 "target %s", cmd->cdb[0], dev->handler->name,
642 PRINT_BUFFER("Failed CDB", cmd->cdb, cmd->cdb_len);
648 case SCST_CMD_STATE_PREPARE_SPACE:
649 case SCST_CMD_STATE_PRE_PARSE:
650 case SCST_CMD_STATE_DEV_PARSE:
651 case SCST_CMD_STATE_RDY_TO_XFER:
652 case SCST_CMD_STATE_TGT_PRE_EXEC:
653 case SCST_CMD_STATE_SEND_FOR_EXEC:
654 case SCST_CMD_STATE_LOCAL_EXEC:
655 case SCST_CMD_STATE_REAL_EXEC:
656 case SCST_CMD_STATE_PRE_DEV_DONE:
657 case SCST_CMD_STATE_DEV_DONE:
658 case SCST_CMD_STATE_PRE_XMIT_RESP:
659 case SCST_CMD_STATE_XMIT_RESP:
660 case SCST_CMD_STATE_FINISHED:
661 case SCST_CMD_STATE_FINISHED_INTERNAL:
663 res = SCST_CMD_STATE_RES_CONT_SAME;
668 PRINT_ERROR("Dev handler %s parse() returned "
669 "invalid cmd state %d (opcode %d)",
670 dev->handler->name, state, cmd->cdb[0]);
672 PRINT_ERROR("Dev handler %s parse() returned "
673 "error %d (opcode %d)", dev->handler->name,
679 if (cmd->resp_data_len == -1) {
680 if (cmd->data_direction & SCST_DATA_READ)
681 cmd->resp_data_len = cmd->bufflen;
683 cmd->resp_data_len = 0;
687 TRACE_EXIT_HRES(res);
691 /* dev_done() will be called as part of the regular cmd's finish */
692 scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
694 #ifndef CONFIG_SCST_USE_EXPECTED_VALUES
697 cmd->state = SCST_CMD_STATE_PRE_DEV_DONE;
698 res = SCST_CMD_STATE_RES_CONT_SAME;
702 static int scst_prepare_space(struct scst_cmd *cmd)
704 int r = 0, res = SCST_CMD_STATE_RES_CONT_SAME;
708 if (cmd->data_direction == SCST_DATA_NONE)
711 if (cmd->tgt_need_alloc_data_buf) {
712 int orig_bufflen = cmd->bufflen;
714 TRACE_MEM("Custom tgt data buf allocation requested (cmd %p)",
717 r = cmd->tgtt->alloc_data_buf(cmd);
721 if (unlikely(cmd->bufflen == 0)) {
722 /* See comment in scst_alloc_space() */
727 cmd->tgt_data_buf_alloced = 1;
729 if (unlikely(orig_bufflen < cmd->bufflen)) {
730 PRINT_ERROR("Target driver allocated data "
731 "buffer (size %d), is less, than "
732 "required (size %d)", orig_bufflen,
736 TRACE_MEM("tgt_data_buf_alloced (cmd %p)", cmd);
742 if (!cmd->tgt_data_buf_alloced && !cmd->dh_data_buf_alloced) {
743 r = scst_alloc_space(cmd);
744 } else if (cmd->dh_data_buf_alloced && !cmd->tgt_data_buf_alloced) {
745 TRACE_MEM("dh_data_buf_alloced set (cmd %p)", cmd);
747 } else if (cmd->tgt_data_buf_alloced && !cmd->dh_data_buf_alloced) {
748 TRACE_MEM("tgt_data_buf_alloced set (cmd %p)", cmd);
749 cmd->sg = cmd->tgt_sg;
750 cmd->sg_cnt = cmd->tgt_sg_cnt;
751 cmd->in_sg = cmd->tgt_in_sg;
752 cmd->in_sg_cnt = cmd->tgt_in_sg_cnt;
755 TRACE_MEM("Both *_data_buf_alloced set (cmd %p, sg %p, "
756 "sg_cnt %d, tgt_sg %p, tgt_sg_cnt %d)", cmd, cmd->sg,
757 cmd->sg_cnt, cmd->tgt_sg, cmd->tgt_sg_cnt);
763 if (scst_cmd_atomic(cmd)) {
764 TRACE_MEM("%s", "Atomic memory allocation failed, "
765 "rescheduling to the thread");
766 res = SCST_CMD_STATE_RES_NEED_THREAD;
773 if (cmd->preprocessing_only) {
774 cmd->preprocessing_only = 0;
776 if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
777 TRACE_MGMT_DBG("ABORTED set, returning ABORTED for "
779 scst_set_cmd_abnormal_done_state(cmd);
780 res = SCST_CMD_STATE_RES_CONT_SAME;
784 res = SCST_CMD_STATE_RES_CONT_NEXT;
785 cmd->state = SCST_CMD_STATE_PREPROCESS_DONE;
787 TRACE_DBG("Calling preprocessing_done(cmd %p)", cmd);
788 cmd->tgtt->preprocessing_done(cmd);
789 TRACE_DBG("%s", "preprocessing_done() returned");
794 if (cmd->data_direction & SCST_DATA_WRITE)
795 cmd->state = SCST_CMD_STATE_RDY_TO_XFER;
797 cmd->state = SCST_CMD_STATE_TGT_PRE_EXEC;
800 TRACE_EXIT_HRES(res);
804 TRACE(TRACE_OUT_OF_MEM, "Unable to allocate or build requested buffer "
805 "(size %d), sending BUSY or QUEUE FULL status", cmd->bufflen);
807 scst_set_cmd_abnormal_done_state(cmd);
808 res = SCST_CMD_STATE_RES_CONT_SAME;
812 scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
813 scst_set_cmd_abnormal_done_state(cmd);
814 res = SCST_CMD_STATE_RES_CONT_SAME;
818 void scst_restart_cmd(struct scst_cmd *cmd, int status,
819 enum scst_exec_context pref_context)
823 TRACE_DBG("Preferred context: %d", pref_context);
824 TRACE_DBG("tag=%llu, status=%#x",
825 (long long unsigned int)scst_cmd_get_tag(cmd),
828 #ifdef CONFIG_SCST_EXTRACHECKS
829 if ((in_irq() || irqs_disabled()) &&
830 ((pref_context == SCST_CONTEXT_DIRECT) ||
831 (pref_context == SCST_CONTEXT_DIRECT_ATOMIC))) {
832 PRINT_ERROR("Wrong context %d in IRQ from target %s, use "
833 "SCST_CONTEXT_THREAD instead\n", pref_context,
835 pref_context = SCST_CONTEXT_THREAD;
840 case SCST_PREPROCESS_STATUS_SUCCESS:
841 if (cmd->data_direction & SCST_DATA_WRITE)
842 cmd->state = SCST_CMD_STATE_RDY_TO_XFER;
844 cmd->state = SCST_CMD_STATE_TGT_PRE_EXEC;
845 if (cmd->set_sn_on_restart_cmd)
846 scst_cmd_set_sn(cmd);
847 /* Small context optimization */
848 if ((pref_context == SCST_CONTEXT_TASKLET) ||
849 (pref_context == SCST_CONTEXT_DIRECT_ATOMIC) ||
850 ((pref_context == SCST_CONTEXT_SAME) &&
851 scst_cmd_atomic(cmd))) {
852 if (cmd->data_direction & SCST_DATA_WRITE) {
853 if (!test_bit(SCST_TGT_DEV_AFTER_RESTART_WR_ATOMIC,
854 &cmd->tgt_dev->tgt_dev_flags))
855 pref_context = SCST_CONTEXT_THREAD;
857 if (!test_bit(SCST_TGT_DEV_AFTER_RESTART_OTH_ATOMIC,
858 &cmd->tgt_dev->tgt_dev_flags))
859 pref_context = SCST_CONTEXT_THREAD;
864 case SCST_PREPROCESS_STATUS_ERROR_SENSE_SET:
865 scst_set_cmd_abnormal_done_state(cmd);
868 case SCST_PREPROCESS_STATUS_ERROR_FATAL:
869 set_bit(SCST_CMD_NO_RESP, &cmd->cmd_flags);
871 case SCST_PREPROCESS_STATUS_ERROR:
872 if (cmd->sense != NULL)
873 scst_set_cmd_error(cmd,
874 SCST_LOAD_SENSE(scst_sense_hardw_error));
875 scst_set_cmd_abnormal_done_state(cmd);
879 PRINT_ERROR("%s() received unknown status %x", __func__,
881 scst_set_cmd_abnormal_done_state(cmd);
885 scst_proccess_redirect_cmd(cmd, pref_context, 1);
890 EXPORT_SYMBOL(scst_restart_cmd);
893 static int scst_queue_retry_cmd(struct scst_cmd *cmd, int finished_cmds)
895 struct scst_tgt *tgt = cmd->sess->tgt;
901 spin_lock_irqsave(&tgt->tgt_lock, flags);
904 * Memory barrier is needed here, because we need the exact order
905 * between the read and write between retry_cmds and finished_cmds to
906 * not miss the case when a command finished while we queuing it for
907 * retry after the finished_cmds check.
910 TRACE_RETRY("TGT QUEUE FULL: incrementing retry_cmds %d",
912 if (finished_cmds != atomic_read(&tgt->finished_cmds)) {
913 /* At least one cmd finished, so try again */
915 TRACE_RETRY("Some command(s) finished, direct retry "
916 "(finished_cmds=%d, tgt->finished_cmds=%d, "
917 "retry_cmds=%d)", finished_cmds,
918 atomic_read(&tgt->finished_cmds), tgt->retry_cmds);
923 TRACE_RETRY("Adding cmd %p to retry cmd list", cmd);
924 list_add_tail(&cmd->cmd_list_entry, &tgt->retry_cmd_list);
926 if (!tgt->retry_timer_active) {
927 tgt->retry_timer.expires = jiffies + SCST_TGT_RETRY_TIMEOUT;
928 add_timer(&tgt->retry_timer);
929 tgt->retry_timer_active = 1;
933 spin_unlock_irqrestore(&tgt->tgt_lock, flags);
939 static int scst_rdy_to_xfer(struct scst_cmd *cmd)
945 if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
946 TRACE_MGMT_DBG("ABORTED set, aborting cmd %p", cmd);
950 if ((cmd->tgtt->rdy_to_xfer == NULL) || unlikely(cmd->internal)) {
951 cmd->state = SCST_CMD_STATE_TGT_PRE_EXEC;
952 res = SCST_CMD_STATE_RES_CONT_SAME;
956 if (unlikely(!cmd->tgtt->rdy_to_xfer_atomic && scst_cmd_atomic(cmd))) {
958 * It shouldn't be because of SCST_TGT_DEV_AFTER_*
961 TRACE_DBG("Target driver %s rdy_to_xfer() needs thread "
962 "context, rescheduling", cmd->tgtt->name);
963 res = SCST_CMD_STATE_RES_NEED_THREAD;
968 int finished_cmds = atomic_read(&cmd->sess->tgt->finished_cmds);
970 res = SCST_CMD_STATE_RES_CONT_NEXT;
971 cmd->state = SCST_CMD_STATE_DATA_WAIT;
973 TRACE_DBG("Calling rdy_to_xfer(%p)", cmd);
974 #ifdef CONFIG_SCST_DEBUG_RETRY
975 if (((scst_random() % 100) == 75))
976 rc = SCST_TGT_RES_QUEUE_FULL;
979 rc = cmd->tgtt->rdy_to_xfer(cmd);
980 TRACE_DBG("rdy_to_xfer() returned %d", rc);
982 if (likely(rc == SCST_TGT_RES_SUCCESS))
985 /* Restore the previous state */
986 cmd->state = SCST_CMD_STATE_RDY_TO_XFER;
989 case SCST_TGT_RES_QUEUE_FULL:
990 if (scst_queue_retry_cmd(cmd, finished_cmds) == 0)
995 case SCST_TGT_RES_NEED_THREAD_CTX:
996 TRACE_DBG("Target driver %s "
997 "rdy_to_xfer() requested thread "
998 "context, rescheduling", cmd->tgtt->name);
999 res = SCST_CMD_STATE_RES_NEED_THREAD;
1009 TRACE_EXIT_HRES(res);
1013 if (rc == SCST_TGT_RES_FATAL_ERROR) {
1014 PRINT_ERROR("Target driver %s rdy_to_xfer() returned "
1015 "fatal error", cmd->tgtt->name);
1017 PRINT_ERROR("Target driver %s rdy_to_xfer() returned invalid "
1018 "value %d", cmd->tgtt->name, rc);
1020 scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
1023 scst_set_cmd_abnormal_done_state(cmd);
1024 res = SCST_CMD_STATE_RES_CONT_SAME;
1028 /* No locks, but might be in IRQ */
1029 static void scst_proccess_redirect_cmd(struct scst_cmd *cmd,
1030 enum scst_exec_context context, int check_retries)
1032 unsigned long flags;
1036 TRACE_DBG("Context: %x", context);
1038 if (context == SCST_CONTEXT_SAME)
1039 context = scst_cmd_atomic(cmd) ? SCST_CONTEXT_DIRECT_ATOMIC :
1040 SCST_CONTEXT_DIRECT;
1043 case SCST_CONTEXT_DIRECT_ATOMIC:
1044 scst_process_active_cmd(cmd, true);
1047 case SCST_CONTEXT_DIRECT:
1049 scst_check_retries(cmd->tgt);
1050 scst_process_active_cmd(cmd, false);
1054 PRINT_ERROR("Context %x is unknown, using the thread one",
1057 case SCST_CONTEXT_THREAD:
1059 scst_check_retries(cmd->tgt);
1060 spin_lock_irqsave(&cmd->cmd_lists->cmd_list_lock, flags);
1061 TRACE_DBG("Adding cmd %p to active cmd list", cmd);
1062 if (unlikely(cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE))
1063 list_add(&cmd->cmd_list_entry,
1064 &cmd->cmd_lists->active_cmd_list);
1066 list_add_tail(&cmd->cmd_list_entry,
1067 &cmd->cmd_lists->active_cmd_list);
1068 wake_up(&cmd->cmd_lists->cmd_list_waitQ);
1069 spin_unlock_irqrestore(&cmd->cmd_lists->cmd_list_lock, flags);
1072 case SCST_CONTEXT_TASKLET:
1074 scst_check_retries(cmd->tgt);
1075 scst_schedule_tasklet(cmd);
1083 void scst_rx_data(struct scst_cmd *cmd, int status,
1084 enum scst_exec_context pref_context)
1088 TRACE_DBG("Preferred context: %d", pref_context);
1089 TRACE(TRACE_SCSI, "cmd %p, status %#x", cmd, status);
1091 #ifdef CONFIG_SCST_EXTRACHECKS
1092 if ((in_irq() || irqs_disabled()) &&
1093 ((pref_context == SCST_CONTEXT_DIRECT) ||
1094 (pref_context == SCST_CONTEXT_DIRECT_ATOMIC))) {
1095 PRINT_ERROR("Wrong context %d in IRQ from target %s, use "
1096 "SCST_CONTEXT_THREAD instead\n", pref_context,
1098 pref_context = SCST_CONTEXT_THREAD;
1103 case SCST_RX_STATUS_SUCCESS:
1104 #if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
1105 if (trace_flag & TRACE_RCV_BOT) {
1107 struct scatterlist *sg;
1108 if (cmd->in_sg != NULL)
1110 else if (cmd->tgt_in_sg != NULL)
1111 sg = cmd->tgt_in_sg;
1112 else if (cmd->tgt_sg != NULL)
1117 TRACE_RECV_BOT("RX data for cmd %p "
1118 "(sg_cnt %d, sg %p, sg[0].page %p)",
1119 cmd, cmd->tgt_sg_cnt, sg,
1120 (void *)sg_page(&sg[0]));
1121 for (i = 0; i < cmd->tgt_sg_cnt; ++i) {
1122 PRINT_BUFF_FLAG(TRACE_RCV_BOT, "RX sg",
1123 sg_virt(&sg[i]), sg[i].length);
1128 cmd->state = SCST_CMD_STATE_TGT_PRE_EXEC;
1129 /* Small context optimization */
1130 if ((pref_context == SCST_CONTEXT_TASKLET) ||
1131 (pref_context == SCST_CONTEXT_DIRECT_ATOMIC) ||
1132 ((pref_context == SCST_CONTEXT_SAME) &&
1133 scst_cmd_atomic(cmd))) {
1134 if (!test_bit(SCST_TGT_DEV_AFTER_RX_DATA_ATOMIC,
1135 &cmd->tgt_dev->tgt_dev_flags))
1136 pref_context = SCST_CONTEXT_THREAD;
1140 case SCST_RX_STATUS_ERROR_SENSE_SET:
1141 scst_set_cmd_abnormal_done_state(cmd);
1144 case SCST_RX_STATUS_ERROR_FATAL:
1145 set_bit(SCST_CMD_NO_RESP, &cmd->cmd_flags);
1147 case SCST_RX_STATUS_ERROR:
1148 scst_set_cmd_error(cmd,
1149 SCST_LOAD_SENSE(scst_sense_hardw_error));
1150 scst_set_cmd_abnormal_done_state(cmd);
1154 PRINT_ERROR("scst_rx_data() received unknown status %x",
1156 scst_set_cmd_abnormal_done_state(cmd);
1160 scst_proccess_redirect_cmd(cmd, pref_context, 1);
1165 EXPORT_SYMBOL(scst_rx_data);
1167 static int scst_tgt_pre_exec(struct scst_cmd *cmd)
1169 int res = SCST_CMD_STATE_RES_CONT_SAME, rc;
1173 cmd->state = SCST_CMD_STATE_SEND_FOR_EXEC;
1175 if ((cmd->tgtt->pre_exec == NULL) || unlikely(cmd->internal))
1178 TRACE_DBG("Calling pre_exec(%p)", cmd);
1179 rc = cmd->tgtt->pre_exec(cmd);
1180 TRACE_DBG("pre_exec() returned %d", rc);
1182 if (unlikely(rc != SCST_PREPROCESS_STATUS_SUCCESS)) {
1184 case SCST_PREPROCESS_STATUS_ERROR_SENSE_SET:
1185 scst_set_cmd_abnormal_done_state(cmd);
1187 case SCST_PREPROCESS_STATUS_ERROR_FATAL:
1188 set_bit(SCST_CMD_NO_RESP, &cmd->cmd_flags);
1190 case SCST_PREPROCESS_STATUS_ERROR:
1191 scst_set_cmd_error(cmd,
1192 SCST_LOAD_SENSE(scst_sense_hardw_error));
1193 scst_set_cmd_abnormal_done_state(cmd);
1195 case SCST_PREPROCESS_STATUS_NEED_THREAD:
1196 TRACE_DBG("Target driver's %s pre_exec() requested "
1197 "thread context, rescheduling",
1199 res = SCST_CMD_STATE_RES_NEED_THREAD;
1200 cmd->state = SCST_CMD_STATE_TGT_PRE_EXEC;
1209 TRACE_EXIT_RES(res);
1213 static void scst_do_cmd_done(struct scst_cmd *cmd, int result,
1214 const uint8_t *rq_sense, int rq_sense_len, int resid)
1218 #ifdef CONFIG_SCST_MEASURE_LATENCY
1221 getnstimeofday(&ts);
1222 cmd->post_exec_start = scst_sec_to_nsec(ts.tv_sec) + ts.tv_nsec;
1223 TRACE_DBG("cmd %p (sess %p): post_exec_start %lld (tv_sec %ld, "
1224 "tv_nsec %ld)", cmd, cmd->sess, cmd->post_exec_start,
1225 ts.tv_sec, ts.tv_nsec);
1229 cmd->status = result & 0xff;
1230 cmd->msg_status = msg_byte(result);
1231 cmd->host_status = host_byte(result);
1232 cmd->driver_status = driver_byte(result);
1233 if (unlikely(resid != 0)) {
1234 #ifdef CONFIG_SCST_EXTRACHECKS
1235 if ((resid < 0) || (resid > cmd->resp_data_len)) {
1236 PRINT_ERROR("Wrong resid %d (cmd->resp_data_len=%d, "
1237 "op %x)", resid, cmd->resp_data_len,
1241 scst_set_resp_data_len(cmd, cmd->resp_data_len - resid);
1244 if (unlikely(cmd->status == SAM_STAT_CHECK_CONDITION)) {
1245 /* We might have double reset UA here */
1246 cmd->dbl_ua_orig_resp_data_len = cmd->resp_data_len;
1247 cmd->dbl_ua_orig_data_direction = cmd->data_direction;
1249 scst_alloc_set_sense(cmd, 1, rq_sense, rq_sense_len);
1252 TRACE(TRACE_SCSI, "cmd %p, result=%x, cmd->status=%x, resid=%d, "
1253 "cmd->msg_status=%x, cmd->host_status=%x, "
1254 "cmd->driver_status=%x (cmd %p)", cmd, result, cmd->status, resid,
1255 cmd->msg_status, cmd->host_status, cmd->driver_status, cmd);
1263 /* For small context optimization */
1264 static inline enum scst_exec_context scst_optimize_post_exec_context(
1265 struct scst_cmd *cmd, enum scst_exec_context context)
1267 if (((context == SCST_CONTEXT_SAME) && scst_cmd_atomic(cmd)) ||
1268 (context == SCST_CONTEXT_TASKLET) ||
1269 (context == SCST_CONTEXT_DIRECT_ATOMIC)) {
1270 if (!test_bit(SCST_TGT_DEV_AFTER_EXEC_ATOMIC,
1271 &cmd->tgt_dev->tgt_dev_flags))
1272 context = SCST_CONTEXT_THREAD;
1277 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
1278 static inline struct scst_cmd *scst_get_cmd(struct scsi_cmnd *scsi_cmd,
1279 struct scsi_request **req)
1281 struct scst_cmd *cmd = NULL;
1283 if (scsi_cmd && (*req = scsi_cmd->sc_request))
1284 cmd = (struct scst_cmd *)(*req)->upper_private_data;
1287 PRINT_ERROR("%s", "Request with NULL cmd");
1289 scsi_release_request(*req);
1295 static void scst_cmd_done(struct scsi_cmnd *scsi_cmd)
1297 struct scsi_request *req = NULL;
1298 struct scst_cmd *cmd;
1302 cmd = scst_get_cmd(scsi_cmd, &req);
1306 scst_do_cmd_done(cmd, req->sr_result, req->sr_sense_buffer,
1307 sizeof(req->sr_sense_buffer), scsi_cmd->resid);
1309 /* Clear out request structure */
1311 req->sr_sglist_len = 0;
1312 req->sr_bufflen = 0;
1313 req->sr_buffer = NULL;
1314 req->sr_underflow = 0;
1315 req->sr_request->rq_disk = NULL; /* disown request blk */
1317 scst_release_request(cmd);
1319 cmd->state = SCST_CMD_STATE_PRE_DEV_DONE;
1321 scst_proccess_redirect_cmd(cmd,
1322 scst_optimize_post_exec_context(cmd, scst_estimate_context()),
1329 #else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18) */
1330 static void scst_cmd_done(void *data, char *sense, int result, int resid)
1332 struct scst_cmd *cmd;
1336 cmd = (struct scst_cmd *)data;
1340 scst_do_cmd_done(cmd, result, sense, SCSI_SENSE_BUFFERSIZE, resid);
1342 cmd->state = SCST_CMD_STATE_PRE_DEV_DONE;
1344 scst_proccess_redirect_cmd(cmd,
1345 scst_optimize_post_exec_context(cmd, scst_estimate_context()), 0);
1351 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18) */
1353 static void scst_cmd_done_local(struct scst_cmd *cmd, int next_state,
1354 enum scst_exec_context pref_context)
1358 #ifdef CONFIG_SCST_MEASURE_LATENCY
1361 getnstimeofday(&ts);
1362 cmd->post_exec_start = scst_sec_to_nsec(ts.tv_sec) + ts.tv_nsec;
1363 TRACE_DBG("cmd %p (sess %p): post_exec_start %lld (tv_sec %ld, "
1364 "tv_nsec %ld)", cmd, cmd->sess, cmd->post_exec_start,
1365 ts.tv_sec, ts.tv_nsec);
1369 if (next_state == SCST_CMD_STATE_DEFAULT)
1370 next_state = SCST_CMD_STATE_PRE_DEV_DONE;
1372 #if defined(CONFIG_SCST_DEBUG)
1373 if (next_state == SCST_CMD_STATE_PRE_DEV_DONE) {
1374 if ((trace_flag & TRACE_RCV_TOP) && (cmd->sg != NULL)) {
1376 struct scatterlist *sg = cmd->sg;
1377 TRACE_RECV_TOP("Exec'd %d S/G(s) at %p sg[0].page at "
1378 "%p", cmd->sg_cnt, sg, (void *)sg_page(&sg[0]));
1379 for (i = 0; i < cmd->sg_cnt; ++i) {
1380 TRACE_BUFF_FLAG(TRACE_RCV_TOP,
1381 "Exec'd sg", sg_virt(&sg[i]),
1388 cmd->state = next_state;
1390 #ifdef CONFIG_SCST_EXTRACHECKS
1391 if ((next_state != SCST_CMD_STATE_PRE_DEV_DONE) &&
1392 (next_state != SCST_CMD_STATE_PRE_XMIT_RESP) &&
1393 (next_state != SCST_CMD_STATE_FINISHED) &&
1394 (next_state != SCST_CMD_STATE_FINISHED_INTERNAL)) {
1395 PRINT_ERROR("%s() received invalid cmd state %d (opcode %d)",
1396 __func__, next_state, cmd->cdb[0]);
1397 scst_set_cmd_error(cmd,
1398 SCST_LOAD_SENSE(scst_sense_hardw_error));
1399 scst_set_cmd_abnormal_done_state(cmd);
1402 pref_context = scst_optimize_post_exec_context(cmd, pref_context);
1403 scst_proccess_redirect_cmd(cmd, pref_context, 0);
1409 static int scst_report_luns_local(struct scst_cmd *cmd)
1411 int res = SCST_EXEC_COMPLETED, rc;
1415 struct scst_tgt_dev *tgt_dev = NULL;
1417 int offs, overflow = 0;
1421 if (scst_cmd_atomic(cmd)) {
1422 res = SCST_EXEC_NEED_THREAD;
1426 rc = scst_check_local_events(cmd);
1427 if (unlikely(rc != 0))
1431 cmd->msg_status = 0;
1432 cmd->host_status = DID_OK;
1433 cmd->driver_status = 0;
1435 if ((cmd->cdb[2] != 0) && (cmd->cdb[2] != 2)) {
1436 PRINT_ERROR("Unsupported SELECT REPORT value %x in REPORT "
1437 "LUNS command", cmd->cdb[2]);
1441 buffer_size = scst_get_buf_first(cmd, &buffer);
1442 if (unlikely(buffer_size == 0))
1444 else if (unlikely(buffer_size < 0))
1447 if (buffer_size < 16)
1450 memset(buffer, 0, buffer_size);
1453 /* sess->sess_tgt_dev_list_hash is protected by suspended activity */
1454 for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
1455 struct list_head *sess_tgt_dev_list_head =
1456 &cmd->sess->sess_tgt_dev_list_hash[i];
1457 list_for_each_entry(tgt_dev, sess_tgt_dev_list_head,
1458 sess_tgt_dev_list_entry) {
1460 if (offs >= buffer_size) {
1461 scst_put_buf(cmd, buffer);
1462 buffer_size = scst_get_buf_next(cmd,
1464 if (buffer_size > 0) {
1465 memset(buffer, 0, buffer_size);
1472 if ((buffer_size - offs) < 8) {
1473 PRINT_ERROR("Buffer allocated for "
1474 "REPORT LUNS command doesn't "
1475 "allow to fit 8 byte entry "
1478 goto out_put_hw_err;
1480 buffer[offs] = (tgt_dev->lun >> 8) & 0xff;
1481 buffer[offs+1] = tgt_dev->lun & 0xff;
1489 scst_put_buf(cmd, buffer);
1491 /* Set the response header */
1492 buffer_size = scst_get_buf_first(cmd, &buffer);
1493 if (unlikely(buffer_size == 0))
1495 else if (unlikely(buffer_size < 0))
1499 buffer[0] = (dev_cnt >> 24) & 0xff;
1500 buffer[1] = (dev_cnt >> 16) & 0xff;
1501 buffer[2] = (dev_cnt >> 8) & 0xff;
1502 buffer[3] = dev_cnt & 0xff;
1504 scst_put_buf(cmd, buffer);
1507 if (dev_cnt < cmd->resp_data_len)
1508 scst_set_resp_data_len(cmd, dev_cnt);
1513 /* Clear left sense_reported_luns_data_changed UA, if any. */
1515 mutex_lock(&scst_mutex); /* protect sess_tgt_dev_list_hash */
1516 for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
1517 struct list_head *sess_tgt_dev_list_head =
1518 &cmd->sess->sess_tgt_dev_list_hash[i];
1520 list_for_each_entry(tgt_dev, sess_tgt_dev_list_head,
1521 sess_tgt_dev_list_entry) {
1522 struct scst_tgt_dev_UA *ua;
1524 spin_lock_bh(&tgt_dev->tgt_dev_lock);
1525 list_for_each_entry(ua, &tgt_dev->UA_list,
1527 if (scst_analyze_sense(ua->UA_sense_buffer,
1528 sizeof(ua->UA_sense_buffer),
1529 SCST_SENSE_ALL_VALID,
1530 SCST_LOAD_SENSE(scst_sense_reported_luns_data_changed))) {
1531 TRACE_MGMT_DBG("Freeing not needed "
1532 "REPORTED LUNS DATA CHANGED UA "
1534 list_del(&ua->UA_list_entry);
1535 mempool_free(ua, scst_ua_mempool);
1539 spin_unlock_bh(&tgt_dev->tgt_dev_lock);
1542 mutex_unlock(&scst_mutex);
1545 /* Report the result */
1546 cmd->scst_cmd_done(cmd, SCST_CMD_STATE_DEFAULT, SCST_CONTEXT_SAME);
1549 TRACE_EXIT_RES(res);
1553 scst_put_buf(cmd, buffer);
1556 scst_set_cmd_error(cmd,
1557 SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
1561 scst_put_buf(cmd, buffer);
1564 scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
1568 static int scst_request_sense_local(struct scst_cmd *cmd)
1570 int res = SCST_EXEC_COMPLETED, rc;
1571 struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
1573 int buffer_size = 0;
1577 rc = scst_check_local_events(cmd);
1578 if (unlikely(rc != 0))
1582 cmd->msg_status = 0;
1583 cmd->host_status = DID_OK;
1584 cmd->driver_status = 0;
1586 spin_lock_bh(&tgt_dev->tgt_dev_lock);
1588 if (tgt_dev->tgt_dev_valid_sense_len == 0)
1589 goto out_not_completed;
1591 TRACE(TRACE_SCSI, "%s: Returning stored sense", cmd->op_name);
1593 buffer_size = scst_get_buf_first(cmd, &buffer);
1594 if (unlikely(buffer_size == 0))
1596 else if (unlikely(buffer_size < 0))
1599 memset(buffer, 0, buffer_size);
1601 if (((tgt_dev->tgt_dev_sense[0] == 0x70) ||
1602 (tgt_dev->tgt_dev_sense[0] == 0x71)) && (cmd->cdb[1] & 1)) {
1603 PRINT_WARNING("%s: Fixed format of the saved sense, but "
1604 "descriptor format requested. Convertion will "
1605 "truncated data", cmd->op_name);
1606 PRINT_BUFFER("Original sense", tgt_dev->tgt_dev_sense,
1607 tgt_dev->tgt_dev_valid_sense_len);
1609 buffer_size = min(SCST_STANDARD_SENSE_LEN, buffer_size);
1610 scst_set_sense(buffer, buffer_size, true,
1611 tgt_dev->tgt_dev_sense[2], tgt_dev->tgt_dev_sense[12],
1612 tgt_dev->tgt_dev_sense[13]);
1613 } else if (((tgt_dev->tgt_dev_sense[0] == 0x72) ||
1614 (tgt_dev->tgt_dev_sense[0] == 0x73)) && !(cmd->cdb[1] & 1)) {
1615 PRINT_WARNING("%s: Descriptor format of the "
1616 "saved sense, but fixed format requested. Convertion "
1617 "will truncated data", cmd->op_name);
1618 PRINT_BUFFER("Original sense", tgt_dev->tgt_dev_sense,
1619 tgt_dev->tgt_dev_valid_sense_len);
1621 buffer_size = min(SCST_STANDARD_SENSE_LEN, buffer_size);
1622 scst_set_sense(buffer, buffer_size, false,
1623 tgt_dev->tgt_dev_sense[1], tgt_dev->tgt_dev_sense[2],
1624 tgt_dev->tgt_dev_sense[3]);
1626 if (buffer_size >= tgt_dev->tgt_dev_valid_sense_len)
1627 buffer_size = tgt_dev->tgt_dev_valid_sense_len;
1629 PRINT_WARNING("%s: Being returned sense truncated to "
1630 "size %d (needed %d)", cmd->op_name,
1631 buffer_size, tgt_dev->tgt_dev_valid_sense_len);
1633 memcpy(buffer, tgt_dev->tgt_dev_sense, buffer_size);
1636 scst_put_buf(cmd, buffer);
1639 tgt_dev->tgt_dev_valid_sense_len = 0;
1640 scst_set_resp_data_len(cmd, buffer_size);
1642 spin_unlock_bh(&tgt_dev->tgt_dev_lock);
1647 /* Report the result */
1648 cmd->scst_cmd_done(cmd, SCST_CMD_STATE_DEFAULT, SCST_CONTEXT_SAME);
1651 TRACE_EXIT_RES(res);
1655 scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
1659 spin_unlock_bh(&tgt_dev->tgt_dev_lock);
1660 res = SCST_EXEC_NOT_COMPLETED;
1664 static int scst_pre_select(struct scst_cmd *cmd)
1666 int res = SCST_EXEC_NOT_COMPLETED;
1670 if (scst_cmd_atomic(cmd)) {
1671 res = SCST_EXEC_NEED_THREAD;
1675 scst_block_dev_cmd(cmd, 1);
1677 /* Check for local events will be done when cmd will be executed */
1680 TRACE_EXIT_RES(res);
1684 static int scst_reserve_local(struct scst_cmd *cmd)
1686 int res = SCST_EXEC_NOT_COMPLETED, rc;
1687 struct scst_device *dev;
1688 struct scst_tgt_dev *tgt_dev_tmp;
1692 if (scst_cmd_atomic(cmd)) {
1693 res = SCST_EXEC_NEED_THREAD;
1697 if ((cmd->cdb[0] == RESERVE_10) && (cmd->cdb[2] & SCST_RES_3RDPTY)) {
1698 PRINT_ERROR("RESERVE_10: 3rdPty RESERVE not implemented "
1699 "(lun=%lld)", (long long unsigned int)cmd->lun);
1700 scst_set_cmd_error(cmd,
1701 SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
1707 if (dev->tst == SCST_CONTR_MODE_ONE_TASK_SET)
1708 scst_block_dev_cmd(cmd, 1);
1710 rc = scst_check_local_events(cmd);
1711 if (unlikely(rc != 0))
1714 spin_lock_bh(&dev->dev_lock);
1716 if (test_bit(SCST_TGT_DEV_RESERVED, &cmd->tgt_dev->tgt_dev_flags)) {
1717 spin_unlock_bh(&dev->dev_lock);
1718 scst_set_cmd_error_status(cmd, SAM_STAT_RESERVATION_CONFLICT);
1722 list_for_each_entry(tgt_dev_tmp, &dev->dev_tgt_dev_list,
1723 dev_tgt_dev_list_entry) {
1724 if (cmd->tgt_dev != tgt_dev_tmp)
1725 set_bit(SCST_TGT_DEV_RESERVED,
1726 &tgt_dev_tmp->tgt_dev_flags);
1728 dev->dev_reserved = 1;
1730 spin_unlock_bh(&dev->dev_lock);
1733 TRACE_EXIT_RES(res);
1737 /* Report the result */
1738 cmd->scst_cmd_done(cmd, SCST_CMD_STATE_DEFAULT, SCST_CONTEXT_SAME);
1739 res = SCST_EXEC_COMPLETED;
1743 static int scst_release_local(struct scst_cmd *cmd)
1745 int res = SCST_EXEC_NOT_COMPLETED, rc;
1746 struct scst_tgt_dev *tgt_dev_tmp;
1747 struct scst_device *dev;
1751 if (scst_cmd_atomic(cmd)) {
1752 res = SCST_EXEC_NEED_THREAD;
1758 if (dev->tst == SCST_CONTR_MODE_ONE_TASK_SET)
1759 scst_block_dev_cmd(cmd, 1);
1761 rc = scst_check_local_events(cmd);
1762 if (unlikely(rc != 0))
1765 spin_lock_bh(&dev->dev_lock);
1768 * The device could be RELEASED behind us, if RESERVING session
1769 * is closed (see scst_free_tgt_dev()), but this actually doesn't
1770 * matter, so use lock and no retest for DEV_RESERVED bits again
1772 if (test_bit(SCST_TGT_DEV_RESERVED, &cmd->tgt_dev->tgt_dev_flags)) {
1773 res = SCST_EXEC_COMPLETED;
1775 cmd->msg_status = 0;
1776 cmd->host_status = DID_OK;
1777 cmd->driver_status = 0;
1780 list_for_each_entry(tgt_dev_tmp,
1781 &dev->dev_tgt_dev_list,
1782 dev_tgt_dev_list_entry) {
1783 clear_bit(SCST_TGT_DEV_RESERVED,
1784 &tgt_dev_tmp->tgt_dev_flags);
1786 dev->dev_reserved = 0;
1789 spin_unlock_bh(&dev->dev_lock);
1791 if (res == SCST_EXEC_COMPLETED)
1795 TRACE_EXIT_RES(res);
1799 res = SCST_EXEC_COMPLETED;
1800 /* Report the result */
1801 cmd->scst_cmd_done(cmd, SCST_CMD_STATE_DEFAULT, SCST_CONTEXT_SAME);
1805 /* No locks, no IRQ or IRQ-safe context allowed */
1806 int scst_check_local_events(struct scst_cmd *cmd)
1809 struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
1810 struct scst_device *dev = cmd->dev;
1815 * There's no race here, because we need to trace commands sent
1816 * *after* dev_double_ua_possible flag was set.
1818 if (unlikely(dev->dev_double_ua_possible))
1819 cmd->double_ua_possible = 1;
1821 if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
1822 TRACE_MGMT_DBG("ABORTED set, aborting cmd %p", cmd);
1823 goto out_uncomplete;
1826 /* Reserve check before Unit Attention */
1827 if (unlikely(test_bit(SCST_TGT_DEV_RESERVED,
1828 &tgt_dev->tgt_dev_flags))) {
1829 if (cmd->cdb[0] != INQUIRY &&
1830 cmd->cdb[0] != REPORT_LUNS &&
1831 cmd->cdb[0] != RELEASE &&
1832 cmd->cdb[0] != RELEASE_10 &&
1833 cmd->cdb[0] != REPORT_DEVICE_IDENTIFIER &&
1834 (cmd->cdb[0] != ALLOW_MEDIUM_REMOVAL ||
1835 (cmd->cdb[4] & 3)) &&
1836 cmd->cdb[0] != LOG_SENSE &&
1837 cmd->cdb[0] != REQUEST_SENSE) {
1838 scst_set_cmd_error_status(cmd,
1839 SAM_STAT_RESERVATION_CONFLICT);
1844 /* If we had internal bus reset, set the command error unit attention */
1845 if ((dev->scsi_dev != NULL) &&
1846 unlikely(dev->scsi_dev->was_reset)) {
1847 if (scst_is_ua_command(cmd)) {
1850 * Prevent more than 1 cmd to be triggered by
1853 spin_lock_bh(&dev->dev_lock);
1854 if (dev->scsi_dev->was_reset) {
1855 TRACE(TRACE_MGMT, "was_reset is %d", 1);
1856 scst_set_cmd_error(cmd,
1857 SCST_LOAD_SENSE(scst_sense_reset_UA));
1859 * It looks like it is safe to clear was_reset
1862 dev->scsi_dev->was_reset = 0;
1865 spin_unlock_bh(&dev->dev_lock);
1872 if (unlikely(test_bit(SCST_TGT_DEV_UA_PENDING,
1873 &cmd->tgt_dev->tgt_dev_flags))) {
1874 if (scst_is_ua_command(cmd)) {
1875 rc = scst_set_pending_UA(cmd);
1884 TRACE_EXIT_RES(res);
1889 sBUG_ON(!cmd->completed);
1896 EXPORT_SYMBOL(scst_check_local_events);
1899 void scst_inc_expected_sn(struct scst_tgt_dev *tgt_dev, atomic_t *slot)
1904 /* Optimized for lockless fast path */
1906 TRACE_SN("Slot %zd, *cur_sn_slot %d", slot - tgt_dev->sn_slots,
1909 if (!atomic_dec_and_test(slot))
1912 TRACE_SN("Slot is 0 (num_free_sn_slots=%d)",
1913 tgt_dev->num_free_sn_slots);
1914 if (tgt_dev->num_free_sn_slots < (int)ARRAY_SIZE(tgt_dev->sn_slots)-1) {
1915 spin_lock_irq(&tgt_dev->sn_lock);
1916 if (likely(tgt_dev->num_free_sn_slots < (int)ARRAY_SIZE(tgt_dev->sn_slots)-1)) {
1917 if (tgt_dev->num_free_sn_slots < 0)
1918 tgt_dev->cur_sn_slot = slot;
1920 * To be in-sync with SIMPLE case in scst_cmd_set_sn()
1923 tgt_dev->num_free_sn_slots++;
1924 TRACE_SN("Incremented num_free_sn_slots (%d)",
1925 tgt_dev->num_free_sn_slots);
1928 spin_unlock_irq(&tgt_dev->sn_lock);
1933 * No protection of expected_sn is needed, because only one thread
1934 * at time can be here (serialized by sn). Also it is supposed that
1935 * there could not be half-incremented halves.
1937 tgt_dev->expected_sn++;
1939 * Write must be before def_cmd_count read to be in sync. with
1940 * scst_post_exec_sn(). See comment in scst_send_for_exec().
1943 TRACE_SN("Next expected_sn: %ld", tgt_dev->expected_sn);
1950 static struct scst_cmd *scst_post_exec_sn(struct scst_cmd *cmd,
1953 /* For HQ commands SN is not set */
1954 bool inc_expected_sn = !cmd->inc_expected_sn_on_done &&
1955 cmd->sn_set && !cmd->retry;
1956 struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
1957 struct scst_cmd *res;
1961 if (inc_expected_sn)
1962 scst_inc_expected_sn(tgt_dev, cmd->sn_slot);
1965 scst_make_deferred_commands_active(tgt_dev);
1968 res = scst_check_deferred_commands(tgt_dev);
1970 TRACE_EXIT_HRES(res);
1974 /* cmd must be additionally referenced to not die inside */
1975 static int scst_do_real_exec(struct scst_cmd *cmd)
1977 int res = SCST_EXEC_NOT_COMPLETED;
1978 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 18)
1981 bool atomic = scst_cmd_atomic(cmd);
1982 struct scst_device *dev = cmd->dev;
1983 struct scst_dev_type *handler = dev->handler;
1984 struct io_context *old_ctx = NULL;
1985 bool ctx_changed = false;
1990 ctx_changed = scst_set_io_context(cmd, &old_ctx);
1992 cmd->state = SCST_CMD_STATE_REAL_EXECUTING;
1994 if (handler->exec) {
1995 if (unlikely(!dev->handler->exec_atomic && atomic)) {
1997 * It shouldn't be because of SCST_TGT_DEV_AFTER_*
2000 TRACE_DBG("Dev handler %s exec() needs thread "
2001 "context, rescheduling", dev->handler->name);
2002 res = SCST_EXEC_NEED_THREAD;
2006 TRACE_DBG("Calling dev handler %s exec(%p)",
2007 handler->name, cmd);
2008 TRACE_BUFF_FLAG(TRACE_SND_TOP, "Execing: ", cmd->cdb,
2010 res = handler->exec(cmd);
2011 TRACE_DBG("Dev handler %s exec() returned %d",
2012 handler->name, res);
2014 if (res == SCST_EXEC_COMPLETED)
2016 else if (res == SCST_EXEC_NEED_THREAD)
2019 sBUG_ON(res != SCST_EXEC_NOT_COMPLETED);
2022 TRACE_DBG("Sending cmd %p to SCSI mid-level", cmd);
2024 if (unlikely(dev->scsi_dev == NULL)) {
2025 PRINT_ERROR("Command for virtual device must be "
2026 "processed by device handler (LUN %lld)!",
2027 (long long unsigned int)cmd->lun);
2031 res = scst_check_local_events(cmd);
2032 if (unlikely(res != 0))
2035 #ifndef CONFIG_SCST_ALLOW_PASSTHROUGH_IO_SUBMIT_IN_SIRQ
2036 if (unlikely(atomic)) {
2037 TRACE_DBG("Pass-through exec() can not be called in atomic "
2038 "context, rescheduling to the thread (handler %s)",
2040 res = SCST_EXEC_NEED_THREAD;
2045 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
2046 if (unlikely(scst_alloc_request(cmd) != 0)) {
2048 res = SCST_EXEC_NEED_THREAD;
2051 PRINT_INFO("%s", "Unable to allocate request, "
2052 "sending BUSY status");
2057 scst_do_req(cmd->scsi_req, (void *)cmd->cdb,
2058 (void *)cmd->scsi_req->sr_buffer,
2059 cmd->scsi_req->sr_bufflen, scst_cmd_done, cmd->timeout,
2062 rc = scst_exec_req(dev->scsi_dev, cmd->cdb, cmd->cdb_len,
2063 cmd->data_direction, cmd->sg, cmd->bufflen, cmd->sg_cnt,
2064 cmd->timeout, cmd->retries, cmd, scst_cmd_done,
2065 atomic ? GFP_ATOMIC : GFP_KERNEL);
2066 if (unlikely(rc != 0)) {
2068 res = SCST_EXEC_NEED_THREAD;
2071 PRINT_ERROR("scst_exec_req() failed: %d", res);
2078 res = SCST_EXEC_COMPLETED;
2082 scst_reset_io_context(cmd->tgt_dev, old_ctx);
2088 /* Restore the state */
2089 cmd->state = SCST_CMD_STATE_REAL_EXEC;
2093 scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
2096 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
2103 res = SCST_EXEC_COMPLETED;
2104 /* Report the result */
2105 cmd->scst_cmd_done(cmd, SCST_CMD_STATE_DEFAULT, SCST_CONTEXT_SAME);
2109 static inline int scst_real_exec(struct scst_cmd *cmd)
2115 BUILD_BUG_ON(SCST_CMD_STATE_RES_CONT_SAME != SCST_EXEC_NOT_COMPLETED);
2116 BUILD_BUG_ON(SCST_CMD_STATE_RES_CONT_NEXT != SCST_EXEC_COMPLETED);
2117 BUILD_BUG_ON(SCST_CMD_STATE_RES_NEED_THREAD != SCST_EXEC_NEED_THREAD);
2119 __scst_cmd_get(cmd);
2121 res = scst_do_real_exec(cmd);
2123 if (likely(res == SCST_EXEC_COMPLETED)) {
2124 scst_post_exec_sn(cmd, true);
2125 if (cmd->dev->scsi_dev != NULL)
2126 generic_unplug_device(
2127 cmd->dev->scsi_dev->request_queue);
2129 sBUG_ON(res != SCST_EXEC_NEED_THREAD);
2131 __scst_cmd_put(cmd);
2133 /* SCST_EXEC_* match SCST_CMD_STATE_RES_* */
2135 TRACE_EXIT_RES(res);
2139 static int scst_do_local_exec(struct scst_cmd *cmd)
2142 struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
2146 /* Check READ_ONLY device status */
2147 if ((cmd->op_flags & SCST_WRITE_MEDIUM) &&
2148 (tgt_dev->acg_dev->rd_only || cmd->dev->swp ||
2149 cmd->dev->rd_only)) {
2150 PRINT_WARNING("Attempt of write access to read-only device: "
2151 "initiator %s, LUN %lld, op %x",
2152 cmd->sess->initiator_name, cmd->lun, cmd->cdb[0]);
2153 scst_set_cmd_error(cmd,
2154 SCST_LOAD_SENSE(scst_sense_data_protect));
2159 * Adding new commands here don't forget to update
2160 * scst_is_cmd_local() in scst.h, if necessary
2163 if (!(cmd->op_flags & SCST_LOCAL_EXEC_NEEDED)) {
2164 res = SCST_EXEC_NOT_COMPLETED;
2168 switch (cmd->cdb[0]) {
2170 case MODE_SELECT_10:
2172 res = scst_pre_select(cmd);
2176 res = scst_reserve_local(cmd);
2180 res = scst_release_local(cmd);
2183 res = scst_report_luns_local(cmd);
2186 res = scst_request_sense_local(cmd);
2189 res = SCST_EXEC_NOT_COMPLETED;
2194 TRACE_EXIT_RES(res);
2198 /* Report the result */
2199 cmd->scst_cmd_done(cmd, SCST_CMD_STATE_DEFAULT, SCST_CONTEXT_SAME);
2200 res = SCST_EXEC_COMPLETED;
2204 static int scst_local_exec(struct scst_cmd *cmd)
2210 BUILD_BUG_ON(SCST_CMD_STATE_RES_CONT_SAME != SCST_EXEC_NOT_COMPLETED);
2211 BUILD_BUG_ON(SCST_CMD_STATE_RES_CONT_NEXT != SCST_EXEC_COMPLETED);
2212 BUILD_BUG_ON(SCST_CMD_STATE_RES_NEED_THREAD != SCST_EXEC_NEED_THREAD);
2214 __scst_cmd_get(cmd);
2216 res = scst_do_local_exec(cmd);
2217 if (likely(res == SCST_EXEC_NOT_COMPLETED))
2218 cmd->state = SCST_CMD_STATE_REAL_EXEC;
2219 else if (res == SCST_EXEC_COMPLETED)
2220 scst_post_exec_sn(cmd, true);
2222 sBUG_ON(res != SCST_EXEC_NEED_THREAD);
2224 __scst_cmd_put(cmd);
2226 /* SCST_EXEC_* match SCST_CMD_STATE_RES_* */
2227 TRACE_EXIT_RES(res);
2231 static int scst_exec(struct scst_cmd **active_cmd)
2233 struct scst_cmd *cmd = *active_cmd;
2234 struct scst_cmd *ref_cmd;
2235 struct scst_device *dev = cmd->dev;
2236 int res = SCST_CMD_STATE_RES_CONT_NEXT, count;
2240 if (unlikely(scst_inc_on_dev_cmd(cmd) != 0))
2243 /* To protect tgt_dev */
2245 __scst_cmd_get(ref_cmd);
2251 cmd->sent_for_exec = 1;
2253 * To sync with scst_abort_cmd(). The above assignment must
2254 * be before SCST_CMD_ABORTED test, done later in
2255 * scst_check_local_events(). It's far from here, so the order
2256 * is virtually guaranteed, but let's have it just in case.
2260 cmd->scst_cmd_done = scst_cmd_done_local;
2261 cmd->state = SCST_CMD_STATE_LOCAL_EXEC;
2263 if (cmd->tgt_data_buf_alloced && cmd->dh_data_buf_alloced &&
2264 (cmd->data_direction & SCST_DATA_WRITE))
2265 scst_copy_sg(cmd, SCST_SG_COPY_FROM_TARGET);
2267 rc = scst_do_local_exec(cmd);
2268 if (likely(rc == SCST_EXEC_NOT_COMPLETED))
2269 /* Nothing to do */;
2270 else if (rc == SCST_EXEC_NEED_THREAD) {
2271 TRACE_DBG("%s", "scst_do_local_exec() requested "
2272 "thread context, rescheduling");
2273 scst_dec_on_dev_cmd(cmd);
2274 res = SCST_CMD_STATE_RES_NEED_THREAD;
2277 sBUG_ON(rc != SCST_EXEC_COMPLETED);
2281 cmd->state = SCST_CMD_STATE_REAL_EXEC;
2283 rc = scst_do_real_exec(cmd);
2284 if (likely(rc == SCST_EXEC_COMPLETED))
2285 /* Nothing to do */;
2286 else if (rc == SCST_EXEC_NEED_THREAD) {
2287 TRACE_DBG("scst_real_exec() requested thread "
2288 "context, rescheduling (cmd %p)", cmd);
2289 scst_dec_on_dev_cmd(cmd);
2290 res = SCST_CMD_STATE_RES_NEED_THREAD;
2298 cmd = scst_post_exec_sn(cmd, false);
2302 if (unlikely(scst_inc_on_dev_cmd(cmd) != 0))
2305 __scst_cmd_put(ref_cmd);
2307 __scst_cmd_get(ref_cmd);
2315 if (dev->scsi_dev != NULL)
2316 generic_unplug_device(dev->scsi_dev->request_queue);
2319 __scst_cmd_put(ref_cmd);
2320 /* !! At this point sess, dev and tgt_dev can be already freed !! */
2323 TRACE_EXIT_RES(res);
2327 static int scst_send_for_exec(struct scst_cmd **active_cmd)
2330 struct scst_cmd *cmd = *active_cmd;
2331 struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
2332 typeof(tgt_dev->expected_sn) expected_sn;
2336 #ifdef CONFIG_SCST_MEASURE_LATENCY
2337 if (cmd->pre_exec_finish == 0) {
2339 getnstimeofday(&ts);
2340 cmd->pre_exec_finish = scst_sec_to_nsec(ts.tv_sec) + ts.tv_nsec;
2341 TRACE_DBG("cmd %p (sess %p): pre_exec_finish %lld (tv_sec %ld, "
2342 "tv_nsec %ld)", cmd, cmd->sess, cmd->pre_exec_finish,
2343 ts.tv_sec, ts.tv_nsec);
2347 if (unlikely(cmd->internal))
2350 if (unlikely(cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE))
2353 sBUG_ON(!cmd->sn_set);
2355 expected_sn = tgt_dev->expected_sn;
2356 /* Optimized for lockless fast path */
2357 if ((cmd->sn != expected_sn) || (tgt_dev->hq_cmd_count > 0)) {
2358 spin_lock_irq(&tgt_dev->sn_lock);
2360 tgt_dev->def_cmd_count++;
2362 * Memory barrier is needed here to implement lockless fast
2363 * path. We need the exact order of read and write between
2364 * def_cmd_count and expected_sn. Otherwise, we can miss case,
2365 * when expected_sn was changed to be equal to cmd->sn while
2366 * we are queuing cmd the deferred list after the expected_sn
2367 * below. It will lead to a forever stuck command. But with
2368 * the barrier in such case __scst_check_deferred_commands()
2369 * will be called and it will take sn_lock, so we will be
2374 expected_sn = tgt_dev->expected_sn;
2375 if ((cmd->sn != expected_sn) || (tgt_dev->hq_cmd_count > 0)) {
2376 if (unlikely(test_bit(SCST_CMD_ABORTED,
2377 &cmd->cmd_flags))) {
2378 /* Necessary to allow aborting out of sn cmds */
2379 TRACE_MGMT_DBG("Aborting out of sn cmd %p "
2380 "(tag %llu, sn %lu)", cmd,
2381 (long long unsigned)cmd->tag, cmd->sn);
2382 tgt_dev->def_cmd_count--;
2383 scst_set_cmd_abnormal_done_state(cmd);
2384 res = SCST_CMD_STATE_RES_CONT_SAME;
2386 TRACE_SN("Deferring cmd %p (sn=%ld, set %d, "
2387 "expected_sn=%ld)", cmd, cmd->sn,
2388 cmd->sn_set, expected_sn);
2389 list_add_tail(&cmd->sn_cmd_list_entry,
2390 &tgt_dev->deferred_cmd_list);
2391 res = SCST_CMD_STATE_RES_CONT_NEXT;
2393 spin_unlock_irq(&tgt_dev->sn_lock);
2396 TRACE_SN("Somebody incremented expected_sn %ld, "
2397 "continuing", expected_sn);
2398 tgt_dev->def_cmd_count--;
2399 spin_unlock_irq(&tgt_dev->sn_lock);
2404 res = scst_exec(active_cmd);
2407 TRACE_EXIT_HRES(res);
2411 /* No locks supposed to be held */
2412 static int scst_check_sense(struct scst_cmd *cmd)
2415 struct scst_device *dev = cmd->dev;
2419 if (unlikely(cmd->ua_ignore))
2422 /* If we had internal bus reset behind us, set the command error UA */
2423 if ((dev->scsi_dev != NULL) &&
2424 unlikely(cmd->host_status == DID_RESET) &&
2425 scst_is_ua_command(cmd)) {
2426 TRACE(TRACE_MGMT, "DID_RESET: was_reset=%d host_status=%x",
2427 dev->scsi_dev->was_reset, cmd->host_status);
2428 scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_reset_UA));
2429 /* It looks like it is safe to clear was_reset here */
2430 dev->scsi_dev->was_reset = 0;
2433 if (unlikely(cmd->status == SAM_STAT_CHECK_CONDITION) &&
2434 SCST_SENSE_VALID(cmd->sense)) {
2435 PRINT_BUFF_FLAG(TRACE_SCSI, "Sense", cmd->sense,
2436 cmd->sense_bufflen);
2438 /* Check Unit Attention Sense Key */
2439 if (scst_is_ua_sense(cmd->sense)) {
2440 if (scst_analyze_sense(cmd->sense, cmd->sense_bufflen,
2441 SCST_SENSE_ASC_VALID,
2442 0, SCST_SENSE_ASC_UA_RESET, 0)) {
2443 if (cmd->double_ua_possible) {
2444 TRACE(TRACE_MGMT_MINOR, "Double UA "
2445 "detected for device %p", dev);
2446 TRACE(TRACE_MGMT_MINOR, "Retrying cmd"
2447 " %p (tag %llu)", cmd,
2448 (long long unsigned)cmd->tag);
2451 cmd->msg_status = 0;
2452 cmd->host_status = DID_OK;
2453 cmd->driver_status = 0;
2455 mempool_free(cmd->sense,
2456 scst_sense_mempool);
2459 scst_check_restore_sg_buff(cmd);
2461 sBUG_ON(cmd->dbl_ua_orig_resp_data_len < 0);
2462 cmd->data_direction =
2463 cmd->dbl_ua_orig_data_direction;
2464 cmd->resp_data_len =
2465 cmd->dbl_ua_orig_resp_data_len;
2467 cmd->state = SCST_CMD_STATE_REAL_EXEC;
2473 scst_dev_check_set_UA(dev, cmd, cmd->sense,
2474 cmd->sense_bufflen);
2478 if (unlikely(cmd->double_ua_possible)) {
2479 if (scst_is_ua_command(cmd)) {
2480 TRACE_MGMT_DBG("Clearing dbl_ua_possible flag (dev %p, "
2481 "cmd %p)", dev, cmd);
2483 * Lock used to protect other flags in the bitfield
2484 * (just in case, actually). Those flags can't be
2485 * changed in parallel, because the device is
2488 spin_lock_bh(&dev->dev_lock);
2489 dev->dev_double_ua_possible = 0;
2490 spin_unlock_bh(&dev->dev_lock);
2495 TRACE_EXIT_RES(res);
2499 static int scst_check_auto_sense(struct scst_cmd *cmd)
2505 if (unlikely(cmd->status == SAM_STAT_CHECK_CONDITION) &&
2506 (!SCST_SENSE_VALID(cmd->sense) ||
2507 SCST_NO_SENSE(cmd->sense))) {
2508 TRACE(TRACE_SCSI|TRACE_MINOR, "CHECK_CONDITION, but no sense: "
2509 "cmd->status=%x, cmd->msg_status=%x, "
2510 "cmd->host_status=%x, cmd->driver_status=%x (cmd %p)",
2511 cmd->status, cmd->msg_status, cmd->host_status,
2512 cmd->driver_status, cmd);
2514 } else if (unlikely(cmd->host_status)) {
2515 if ((cmd->host_status == DID_REQUEUE) ||
2516 (cmd->host_status == DID_IMM_RETRY) ||
2517 (cmd->host_status == DID_SOFT_ERROR) ||
2518 (cmd->host_status == DID_ABORT)) {
2521 TRACE(TRACE_SCSI|TRACE_MINOR, "Host status %x "
2522 "received, returning HARDWARE ERROR instead "
2523 "(cmd %p)", cmd->host_status, cmd);
2524 scst_set_cmd_error(cmd,
2525 SCST_LOAD_SENSE(scst_sense_hardw_error));
2529 TRACE_EXIT_RES(res);
2533 static int scst_pre_dev_done(struct scst_cmd *cmd)
2535 int res = SCST_CMD_STATE_RES_CONT_SAME, rc;
2539 if (unlikely(scst_check_auto_sense(cmd))) {
2540 PRINT_INFO("Command finished with CHECK CONDITION, but "
2541 "without sense data (opcode 0x%x), issuing "
2542 "REQUEST SENSE", cmd->cdb[0]);
2543 rc = scst_prepare_request_sense(cmd);
2545 res = SCST_CMD_STATE_RES_CONT_NEXT;
2547 PRINT_ERROR("%s", "Unable to issue REQUEST SENSE, "
2548 "returning HARDWARE ERROR");
2549 scst_set_cmd_error(cmd,
2550 SCST_LOAD_SENSE(scst_sense_hardw_error));
2553 } else if (unlikely(scst_check_sense(cmd)))
2556 if (likely(scsi_status_is_good(cmd->status))) {
2557 unsigned char type = cmd->dev->type;
2558 if (unlikely((cmd->cdb[0] == MODE_SENSE ||
2559 cmd->cdb[0] == MODE_SENSE_10)) &&
2560 (cmd->tgt_dev->acg_dev->rd_only || cmd->dev->swp ||
2561 cmd->dev->rd_only) &&
2562 (type == TYPE_DISK ||
2563 type == TYPE_WORM ||
2565 type == TYPE_TAPE)) {
2570 length = scst_get_buf_first(cmd, &address);
2572 PRINT_ERROR("%s", "Unable to get "
2573 "MODE_SENSE buffer");
2574 scst_set_cmd_error(cmd,
2576 scst_sense_hardw_error));
2578 } else if (length > 2 && cmd->cdb[0] == MODE_SENSE)
2579 address[2] |= 0x80; /* Write Protect*/
2580 else if (length > 3 && cmd->cdb[0] == MODE_SENSE_10)
2581 address[3] |= 0x80; /* Write Protect*/
2582 scst_put_buf(cmd, address);
2589 * Check and clear NormACA option for the device, if necessary,
2590 * since we don't support ACA
2592 if (unlikely((cmd->cdb[0] == INQUIRY)) &&
2593 /* Std INQUIRY data (no EVPD) */
2594 !(cmd->cdb[1] & SCST_INQ_EVPD) &&
2595 (cmd->resp_data_len > SCST_INQ_BYTE3)) {
2600 /* ToDo: all pages ?? */
2601 buflen = scst_get_buf_first(cmd, &buffer);
2602 if (buflen > SCST_INQ_BYTE3) {
2603 #ifdef CONFIG_SCST_EXTRACHECKS
2604 if (buffer[SCST_INQ_BYTE3] & SCST_INQ_NORMACA_BIT) {
2605 PRINT_INFO("NormACA set for device: "
2606 "lun=%lld, type 0x%02x. Clear it, "
2607 "since it's unsupported.",
2608 (long long unsigned int)cmd->lun,
2612 buffer[SCST_INQ_BYTE3] &= ~SCST_INQ_NORMACA_BIT;
2613 } else if (buflen != 0) {
2614 PRINT_ERROR("%s", "Unable to get INQUIRY "
2616 scst_set_cmd_error(cmd,
2617 SCST_LOAD_SENSE(scst_sense_hardw_error));
2621 scst_put_buf(cmd, buffer);
2627 if (unlikely((cmd->cdb[0] == MODE_SELECT) ||
2628 (cmd->cdb[0] == MODE_SELECT_10) ||
2629 (cmd->cdb[0] == LOG_SELECT))) {
2631 "MODE/LOG SELECT succeeded (LUN %lld)",
2632 (long long unsigned int)cmd->lun);
2633 cmd->state = SCST_CMD_STATE_MODE_SELECT_CHECKS;
2637 if ((cmd->cdb[0] == RESERVE) || (cmd->cdb[0] == RESERVE_10)) {
2638 if (!test_bit(SCST_TGT_DEV_RESERVED,
2639 &cmd->tgt_dev->tgt_dev_flags)) {
2640 struct scst_tgt_dev *tgt_dev_tmp;
2641 struct scst_device *dev = cmd->dev;
2644 "Real RESERVE failed lun=%lld, "
2646 (long long unsigned int)cmd->lun,
2648 PRINT_BUFF_FLAG(TRACE_SCSI, "Sense", cmd->sense,
2649 cmd->sense_bufflen);
2651 /* Clearing the reservation */
2652 spin_lock_bh(&dev->dev_lock);
2653 list_for_each_entry(tgt_dev_tmp,
2654 &dev->dev_tgt_dev_list,
2655 dev_tgt_dev_list_entry) {
2656 clear_bit(SCST_TGT_DEV_RESERVED,
2657 &tgt_dev_tmp->tgt_dev_flags);
2659 dev->dev_reserved = 0;
2660 spin_unlock_bh(&dev->dev_lock);
2664 /* Check for MODE PARAMETERS CHANGED UA */
2665 if ((cmd->dev->scsi_dev != NULL) &&
2666 (cmd->status == SAM_STAT_CHECK_CONDITION) &&
2667 SCST_SENSE_VALID(cmd->sense) &&
2668 scst_is_ua_sense(cmd->sense) &&
2669 scst_analyze_sense(cmd->sense, cmd->sense_bufflen,
2670 SCST_SENSE_ASCx_VALID,
2672 TRACE(TRACE_SCSI, "MODE PARAMETERS CHANGED UA (lun "
2673 "%lld)", (long long unsigned int)cmd->lun);
2674 cmd->state = SCST_CMD_STATE_MODE_SELECT_CHECKS;
2679 cmd->state = SCST_CMD_STATE_DEV_DONE;
2682 TRACE_EXIT_RES(res);
2686 static int scst_mode_select_checks(struct scst_cmd *cmd)
2688 int res = SCST_CMD_STATE_RES_CONT_SAME;
2689 int atomic = scst_cmd_atomic(cmd);
2693 if (likely(scsi_status_is_good(cmd->status))) {
2694 if (unlikely((cmd->cdb[0] == MODE_SELECT) ||
2695 (cmd->cdb[0] == MODE_SELECT_10) ||
2696 (cmd->cdb[0] == LOG_SELECT))) {
2697 struct scst_device *dev = cmd->dev;
2698 uint8_t sense_buffer[SCST_STANDARD_SENSE_LEN];
2700 if (atomic && (dev->scsi_dev != NULL)) {
2701 TRACE_DBG("%s", "MODE/LOG SELECT: thread "
2702 "context required");
2703 res = SCST_CMD_STATE_RES_NEED_THREAD;
2707 TRACE(TRACE_SCSI, "MODE/LOG SELECT succeeded, "
2708 "setting the SELECT UA (lun=%lld)",
2709 (long long unsigned int)cmd->lun);
2711 spin_lock_bh(&dev->dev_lock);
2712 if (cmd->cdb[0] == LOG_SELECT) {
2713 scst_set_sense(sense_buffer,
2714 sizeof(sense_buffer),
2716 UNIT_ATTENTION, 0x2a, 0x02);
2718 scst_set_sense(sense_buffer,
2719 sizeof(sense_buffer),
2721 UNIT_ATTENTION, 0x2a, 0x01);
2723 scst_dev_check_set_local_UA(dev, cmd, sense_buffer,
2724 sizeof(sense_buffer));
2725 spin_unlock_bh(&dev->dev_lock);
2727 if (dev->scsi_dev != NULL)
2728 scst_obtain_device_parameters(dev);
2730 } else if ((cmd->status == SAM_STAT_CHECK_CONDITION) &&
2731 SCST_SENSE_VALID(cmd->sense) &&
2732 scst_is_ua_sense(cmd->sense) &&
2733 /* mode parameters changed */
2734 (scst_analyze_sense(cmd->sense, cmd->sense_bufflen,
2735 SCST_SENSE_ASCx_VALID,
2737 scst_analyze_sense(cmd->sense, cmd->sense_bufflen,
2738 SCST_SENSE_ASC_VALID,
2739 0, 0x29, 0) /* reset */ ||
2740 scst_analyze_sense(cmd->sense, cmd->sense_bufflen,
2741 SCST_SENSE_ASC_VALID,
2742 0, 0x28, 0) /* medium changed */ ||
2743 /* cleared by another ini (just in case) */
2744 scst_analyze_sense(cmd->sense, cmd->sense_bufflen,
2745 SCST_SENSE_ASC_VALID,
2748 TRACE_DBG("Possible parameters changed UA %x: "
2749 "thread context required", cmd->sense[12]);
2750 res = SCST_CMD_STATE_RES_NEED_THREAD;
2754 TRACE(TRACE_SCSI, "Possible parameters changed UA %x "
2755 "(LUN %lld): getting new parameters", cmd->sense[12],
2756 (long long unsigned int)cmd->lun);
2758 scst_obtain_device_parameters(cmd->dev);
2762 cmd->state = SCST_CMD_STATE_DEV_DONE;
2765 TRACE_EXIT_HRES(res);
2769 static void scst_inc_check_expected_sn(struct scst_cmd *cmd)
2771 if (likely(cmd->sn_set))
2772 scst_inc_expected_sn(cmd->tgt_dev, cmd->sn_slot);
2774 scst_make_deferred_commands_active(cmd->tgt_dev);
2777 static int scst_dev_done(struct scst_cmd *cmd)
2779 int res = SCST_CMD_STATE_RES_CONT_SAME;
2781 struct scst_device *dev = cmd->dev;
2785 state = SCST_CMD_STATE_PRE_XMIT_RESP;
2787 if (likely(!scst_is_cmd_local(cmd)) &&
2788 likely(dev->handler->dev_done != NULL)) {
2791 if (unlikely(!dev->handler->dev_done_atomic &&
2792 scst_cmd_atomic(cmd))) {
2794 * It shouldn't be because of SCST_TGT_DEV_AFTER_*
2797 TRACE_DBG("Dev handler %s dev_done() needs thread "
2798 "context, rescheduling", dev->handler->name);
2799 res = SCST_CMD_STATE_RES_NEED_THREAD;
2803 TRACE_DBG("Calling dev handler %s dev_done(%p)",
2804 dev->handler->name, cmd);
2805 rc = dev->handler->dev_done(cmd);
2806 TRACE_DBG("Dev handler %s dev_done() returned %d",
2807 dev->handler->name, rc);
2808 if (rc != SCST_CMD_STATE_DEFAULT)
2813 case SCST_CMD_STATE_PRE_XMIT_RESP:
2814 case SCST_CMD_STATE_DEV_PARSE:
2815 case SCST_CMD_STATE_PRE_PARSE:
2816 case SCST_CMD_STATE_PREPARE_SPACE:
2817 case SCST_CMD_STATE_RDY_TO_XFER:
2818 case SCST_CMD_STATE_TGT_PRE_EXEC:
2819 case SCST_CMD_STATE_SEND_FOR_EXEC:
2820 case SCST_CMD_STATE_LOCAL_EXEC:
2821 case SCST_CMD_STATE_REAL_EXEC:
2822 case SCST_CMD_STATE_PRE_DEV_DONE:
2823 case SCST_CMD_STATE_MODE_SELECT_CHECKS:
2824 case SCST_CMD_STATE_DEV_DONE:
2825 case SCST_CMD_STATE_XMIT_RESP:
2826 case SCST_CMD_STATE_FINISHED:
2827 case SCST_CMD_STATE_FINISHED_INTERNAL:
2831 case SCST_CMD_STATE_NEED_THREAD_CTX:
2832 TRACE_DBG("Dev handler %s dev_done() requested "
2833 "thread context, rescheduling",
2834 dev->handler->name);
2835 res = SCST_CMD_STATE_RES_NEED_THREAD;
2840 PRINT_ERROR("Dev handler %s dev_done() returned "
2841 "invalid cmd state %d",
2842 dev->handler->name, state);
2844 PRINT_ERROR("Dev handler %s dev_done() returned "
2845 "error %d", dev->handler->name,
2848 scst_set_cmd_error(cmd,
2849 SCST_LOAD_SENSE(scst_sense_hardw_error));
2850 scst_set_cmd_abnormal_done_state(cmd);
2854 if (cmd->needs_unblocking)
2855 scst_unblock_dev_cmd(cmd);
2857 if (likely(cmd->dec_on_dev_needed))
2858 scst_dec_on_dev_cmd(cmd);
2860 if (cmd->inc_expected_sn_on_done && cmd->sent_for_exec)
2861 scst_inc_check_expected_sn(cmd);
2863 if (unlikely(cmd->internal))
2864 cmd->state = SCST_CMD_STATE_FINISHED_INTERNAL;
2867 TRACE_EXIT_HRES(res);
2871 static int scst_pre_xmit_response(struct scst_cmd *cmd)
2877 EXTRACHECKS_BUG_ON(cmd->internal);
2879 #ifdef CONFIG_SCST_DEBUG_TM
2880 if (cmd->tm_dbg_delayed &&
2881 !test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)) {
2882 if (scst_cmd_atomic(cmd)) {
2883 TRACE_MGMT_DBG("%s",
2884 "DEBUG_TM delayed cmd needs a thread");
2885 res = SCST_CMD_STATE_RES_NEED_THREAD;
2888 TRACE_MGMT_DBG("Delaying cmd %p (tag %llu) for 1 second",
2890 schedule_timeout_uninterruptible(HZ);
2894 if (likely(cmd->tgt_dev != NULL)) {
2895 atomic_dec(&cmd->tgt_dev->tgt_dev_cmd_count);
2896 atomic_dec(&cmd->dev->dev_cmd_count);
2897 /* If expected values not set, expected direction is UNKNOWN */
2898 if (cmd->expected_data_direction & SCST_DATA_WRITE)
2899 atomic_dec(&cmd->dev->write_cmd_count);
2901 if (unlikely(cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE))
2902 scst_on_hq_cmd_response(cmd);
2904 if (unlikely(!cmd->sent_for_exec)) {
2905 TRACE_SN("cmd %p was not sent to mid-lev"
2906 " (sn %ld, set %d)",
2907 cmd, cmd->sn, cmd->sn_set);
2908 scst_unblock_deferred(cmd->tgt_dev, cmd);
2909 cmd->sent_for_exec = 1;
2914 * If we don't remove cmd from the search list here, before
2915 * submitting it for transmittion, we will have a race, when for
2916 * some reason cmd's release is delayed after transmittion and
2917 * initiator sends cmd with the same tag => it is possible that
2918 * a wrong cmd will be found by find() functions.
2920 spin_lock_irq(&cmd->sess->sess_list_lock);
2921 list_del(&cmd->search_cmd_list_entry);
2922 spin_unlock_irq(&cmd->sess->sess_list_lock);
2925 smp_mb(); /* to sync with scst_abort_cmd() */
2927 if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)))
2928 scst_xmit_process_aborted_cmd(cmd);
2929 else if (unlikely(cmd->status == SAM_STAT_CHECK_CONDITION))
2930 scst_store_sense(cmd);
2932 if (unlikely(test_bit(SCST_CMD_NO_RESP, &cmd->cmd_flags))) {
2933 TRACE_MGMT_DBG("Flag NO_RESP set for cmd %p (tag %llu),"
2935 cmd, (long long unsigned int)cmd->tag);
2936 cmd->state = SCST_CMD_STATE_FINISHED;
2937 res = SCST_CMD_STATE_RES_CONT_SAME;
2941 if (cmd->tgt_data_buf_alloced && cmd->dh_data_buf_alloced &&
2942 (cmd->data_direction & SCST_DATA_READ))
2943 scst_copy_sg(cmd, SCST_SG_COPY_TO_TARGET);
2945 cmd->state = SCST_CMD_STATE_XMIT_RESP;
2946 res = SCST_CMD_STATE_RES_CONT_SAME;
2949 #ifdef CONFIG_SCST_MEASURE_LATENCY
2952 uint64_t finish, scst_time, proc_time;
2953 struct scst_session *sess = cmd->sess;
2955 getnstimeofday(&ts);
2956 finish = scst_sec_to_nsec(ts.tv_sec) + ts.tv_nsec;
2958 spin_lock_bh(&sess->meas_lock);
2960 scst_time = cmd->pre_exec_finish - cmd->start;
2961 scst_time += finish - cmd->post_exec_start;
2962 proc_time = finish - cmd->start;
2964 sess->scst_time += scst_time;
2965 sess->processing_time += proc_time;
2966 sess->processed_cmds++;
2968 spin_unlock_bh(&sess->meas_lock);
2970 TRACE_DBG("cmd %p (sess %p): finish %lld (tv_sec %ld, "
2971 "tv_nsec %ld), scst_time %lld, proc_time %lld",
2972 cmd, sess, finish, ts.tv_sec, ts.tv_nsec, scst_time,
2976 TRACE_EXIT_HRES(res);
2980 static int scst_xmit_response(struct scst_cmd *cmd)
2986 EXTRACHECKS_BUG_ON(cmd->internal);
2988 if (unlikely(!cmd->tgtt->xmit_response_atomic &&
2989 scst_cmd_atomic(cmd))) {
2991 * It shouldn't be because of SCST_TGT_DEV_AFTER_*
2994 TRACE_DBG("Target driver %s xmit_response() needs thread "
2995 "context, rescheduling", cmd->tgtt->name);
2996 res = SCST_CMD_STATE_RES_NEED_THREAD;
3001 int finished_cmds = atomic_read(&cmd->sess->tgt->finished_cmds);
3003 res = SCST_CMD_STATE_RES_CONT_NEXT;
3004 cmd->state = SCST_CMD_STATE_XMIT_WAIT;
3006 TRACE_DBG("Calling xmit_response(%p)", cmd);
3008 #if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
3009 if (trace_flag & TRACE_SND_BOT) {
3011 struct scatterlist *sg;
3012 if (cmd->tgt_sg != NULL)
3017 TRACE(TRACE_SND_BOT, "Xmitting data for cmd %p "
3018 "(sg_cnt %d, sg %p, sg[0].page %p)",
3019 cmd, cmd->tgt_sg_cnt, sg,
3020 (void *)sg_page(&sg[0]));
3021 for (i = 0; i < cmd->tgt_sg_cnt; ++i) {
3022 PRINT_BUFF_FLAG(TRACE_SND_BOT,
3023 "Xmitting sg", sg_virt(&sg[i]),
3030 #ifdef CONFIG_SCST_DEBUG_RETRY
3031 if (((scst_random() % 100) == 77))
3032 rc = SCST_TGT_RES_QUEUE_FULL;
3035 rc = cmd->tgtt->xmit_response(cmd);
3036 TRACE_DBG("xmit_response() returned %d", rc);
3038 if (likely(rc == SCST_TGT_RES_SUCCESS))
3041 /* Restore the previous state */
3042 cmd->state = SCST_CMD_STATE_XMIT_RESP;
3045 case SCST_TGT_RES_QUEUE_FULL:
3046 if (scst_queue_retry_cmd(cmd, finished_cmds) == 0)
3051 case SCST_TGT_RES_NEED_THREAD_CTX:
3052 TRACE_DBG("Target driver %s xmit_response() "
3053 "requested thread context, rescheduling",
3055 res = SCST_CMD_STATE_RES_NEED_THREAD;
3065 /* Caution: cmd can be already dead here */
3066 TRACE_EXIT_HRES(res);
3070 if (rc == SCST_TGT_RES_FATAL_ERROR) {
3071 PRINT_ERROR("Target driver %s xmit_response() returned "
3072 "fatal error", cmd->tgtt->name);
3074 PRINT_ERROR("Target driver %s xmit_response() returned "
3075 "invalid value %d", cmd->tgtt->name, rc);
3077 scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
3078 cmd->state = SCST_CMD_STATE_FINISHED;
3079 res = SCST_CMD_STATE_RES_CONT_SAME;
3083 void scst_tgt_cmd_done(struct scst_cmd *cmd,
3084 enum scst_exec_context pref_context)
3088 sBUG_ON(cmd->state != SCST_CMD_STATE_XMIT_WAIT);
3090 cmd->state = SCST_CMD_STATE_FINISHED;
3091 scst_proccess_redirect_cmd(cmd, pref_context, 1);
3096 EXPORT_SYMBOL(scst_tgt_cmd_done);
3098 static int scst_finish_cmd(struct scst_cmd *cmd)
3104 atomic_dec(&cmd->sess->sess_cmd_count);
3107 smp_mb(); /* to sync with scst_abort_cmd() */
3109 if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
3110 TRACE_MGMT_DBG("Aborted cmd %p finished (cmd_ref %d, "
3111 "scst_cmd_count %d)", cmd, atomic_read(&cmd->cmd_ref),
3112 atomic_read(&scst_cmd_count));
3114 scst_finish_cmd_mgmt(cmd);
3117 if (unlikely(cmd->delivery_status != SCST_CMD_DELIVERY_SUCCESS)) {
3118 if ((cmd->tgt_dev != NULL) &&
3119 scst_is_ua_sense(cmd->sense)) {
3120 /* This UA delivery failed, so requeue it */
3121 TRACE_MGMT_DBG("Requeuing UA for delivery failed cmd "
3123 scst_check_set_UA(cmd->tgt_dev, cmd->sense,
3124 cmd->sense_bufflen, SCST_SET_UA_FLAG_AT_HEAD);
3128 __scst_cmd_put(cmd);
3130 res = SCST_CMD_STATE_RES_CONT_NEXT;
3132 TRACE_EXIT_HRES(res);
3137 * No locks, but it must be externally serialized (see comment for
3138 * scst_cmd_init_done() in scst.h)
3140 static void scst_cmd_set_sn(struct scst_cmd *cmd)
3142 struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
3143 unsigned long flags;
3147 if (scst_is_implicit_hq(cmd)) {
3148 TRACE_SN("Implicit HQ cmd %p", cmd);
3149 cmd->queue_type = SCST_CMD_QUEUE_HEAD_OF_QUEUE;
3152 EXTRACHECKS_BUG_ON(cmd->sn_set || cmd->hq_cmd_inced);
3154 /* Optimized for lockless fast path */
3156 scst_check_debug_sn(cmd);
3158 if (cmd->dev->queue_alg ==
3159 SCST_CONTR_MODE_QUEUE_ALG_RESTRICTED_REORDER) {
3161 * Not the best way, but well enough until there will be a
3162 * possibility to specify queue type during pass-through
3163 * commands submission.
3165 cmd->queue_type = SCST_CMD_QUEUE_ORDERED;
3168 switch (cmd->queue_type) {
3169 case SCST_CMD_QUEUE_SIMPLE:
3170 case SCST_CMD_QUEUE_UNTAGGED:
3171 #if 0 /* left for future performance investigations */
3172 if (scst_cmd_is_expected_set(cmd)) {
3173 if ((cmd->expected_data_direction == SCST_DATA_READ) &&
3174 (atomic_read(&cmd->dev->write_cmd_count) == 0))
3179 if (likely(tgt_dev->num_free_sn_slots >= 0)) {
3181 * atomic_inc_return() implies memory barrier to sync
3182 * with scst_inc_expected_sn()
3184 if (atomic_inc_return(tgt_dev->cur_sn_slot) == 1) {
3186 TRACE_SN("Incremented curr_sn %ld",
3189 cmd->sn_slot = tgt_dev->cur_sn_slot;
3190 cmd->sn = tgt_dev->curr_sn;
3192 tgt_dev->prev_cmd_ordered = 0;
3194 TRACE(TRACE_MINOR, "***WARNING*** Not enough SN slots "
3195 "%zd", ARRAY_SIZE(tgt_dev->sn_slots));
3200 case SCST_CMD_QUEUE_ORDERED:
3201 TRACE_SN("ORDERED cmd %p (op %x)", cmd, cmd->cdb[0]);
3203 if (!tgt_dev->prev_cmd_ordered) {
3204 spin_lock_irqsave(&tgt_dev->sn_lock, flags);
3205 if (tgt_dev->num_free_sn_slots >= 0) {
3206 tgt_dev->num_free_sn_slots--;
3207 if (tgt_dev->num_free_sn_slots >= 0) {
3209 /* Commands can finish in any order, so
3210 * we don't know which slot is empty.
3213 tgt_dev->cur_sn_slot++;
3214 if (tgt_dev->cur_sn_slot ==
3215 tgt_dev->sn_slots + ARRAY_SIZE(tgt_dev->sn_slots))
3216 tgt_dev->cur_sn_slot = tgt_dev->sn_slots;
3218 if (atomic_read(tgt_dev->cur_sn_slot) == 0)
3222 sBUG_ON(i == ARRAY_SIZE(tgt_dev->sn_slots));
3224 TRACE_SN("New cur SN slot %zd",
3225 tgt_dev->cur_sn_slot -
3229 spin_unlock_irqrestore(&tgt_dev->sn_lock, flags);
3231 tgt_dev->prev_cmd_ordered = 1;
3233 cmd->sn = tgt_dev->curr_sn;
3236 case SCST_CMD_QUEUE_HEAD_OF_QUEUE:
3237 TRACE_SN("HQ cmd %p (op %x)", cmd, cmd->cdb[0]);
3238 spin_lock_irqsave(&tgt_dev->sn_lock, flags);
3239 tgt_dev->hq_cmd_count++;
3240 spin_unlock_irqrestore(&tgt_dev->sn_lock, flags);
3241 cmd->hq_cmd_inced = 1;
3248 TRACE_SN("cmd(%p)->sn: %ld (tgt_dev %p, *cur_sn_slot %d, "
3249 "num_free_sn_slots %d, prev_cmd_ordered %ld, "
3250 "cur_sn_slot %zd)", cmd, cmd->sn, tgt_dev,
3251 atomic_read(tgt_dev->cur_sn_slot),
3252 tgt_dev->num_free_sn_slots, tgt_dev->prev_cmd_ordered,
3253 tgt_dev->cur_sn_slot-tgt_dev->sn_slots);
3263 * Returns 0 on success, > 0 when we need to wait for unblock,
3264 * < 0 if there is no device (lun) or device type handler.
3266 * No locks, but might be on IRQ, protection is done by the
3267 * suspended activity.
3269 static int scst_translate_lun(struct scst_cmd *cmd)
3271 struct scst_tgt_dev *tgt_dev = NULL;
3276 /* See comment about smp_mb() in scst_suspend_activity() */
3279 if (likely(!test_bit(SCST_FLAG_SUSPENDED, &scst_flags))) {
3280 struct list_head *sess_tgt_dev_list_head =
3281 &cmd->sess->sess_tgt_dev_list_hash[HASH_VAL(cmd->lun)];
3282 TRACE_DBG("Finding tgt_dev for cmd %p (lun %lld)", cmd,
3283 (long long unsigned int)cmd->lun);
3285 list_for_each_entry(tgt_dev, sess_tgt_dev_list_head,
3286 sess_tgt_dev_list_entry) {
3287 if (tgt_dev->lun == cmd->lun) {
3288 TRACE_DBG("tgt_dev %p found", tgt_dev);
3290 if (unlikely(tgt_dev->dev->handler ==
3291 &scst_null_devtype)) {
3292 PRINT_INFO("Dev handler for device "
3293 "%lld is NULL, the device will not "
3294 "be visible remotely",
3295 (long long unsigned int)cmd->lun);
3299 cmd->cmd_lists = tgt_dev->dev->p_cmd_lists;
3300 cmd->tgt_dev = tgt_dev;
3301 cmd->dev = tgt_dev->dev;
3309 "tgt_dev for LUN %lld not found, command to "
3311 (long long unsigned int)cmd->lun);
3315 TRACE_MGMT_DBG("%s", "FLAG SUSPENDED set, skipping");
3320 TRACE_EXIT_RES(res);
3325 * No locks, but might be on IRQ
3327 * Returns 0 on success, > 0 when we need to wait for unblock,
3328 * < 0 if there is no device (lun) or device type handler.
3330 static int __scst_init_cmd(struct scst_cmd *cmd)
3336 res = scst_translate_lun(cmd);
3337 if (likely(res == 0)) {
3339 bool failure = false;
3341 cmd->state = SCST_CMD_STATE_PRE_PARSE;
3343 cnt = atomic_inc_return(&cmd->tgt_dev->tgt_dev_cmd_count);
3344 if (unlikely(cnt > SCST_MAX_TGT_DEV_COMMANDS)) {
3345 TRACE(TRACE_MGMT_MINOR,
3346 "Too many pending commands (%d) in "
3347 "session, returning BUSY to initiator \"%s\"",
3348 cnt, (cmd->sess->initiator_name[0] == '\0') ?
3349 "Anonymous" : cmd->sess->initiator_name);
3353 cnt = atomic_inc_return(&cmd->dev->dev_cmd_count);
3354 if (unlikely(cnt > SCST_MAX_DEV_COMMANDS)) {
3356 TRACE(TRACE_MGMT_MINOR,
3357 "Too many pending device "
3358 "commands (%d), returning BUSY to "
3359 "initiator \"%s\"", cnt,
3360 (cmd->sess->initiator_name[0] == '\0') ?
3362 cmd->sess->initiator_name);
3367 /* If expected values not set, expected direction is UNKNOWN */
3368 if (cmd->expected_data_direction & SCST_DATA_WRITE)
3369 atomic_inc(&cmd->dev->write_cmd_count);
3371 if (unlikely(failure))
3374 if (!cmd->set_sn_on_restart_cmd)
3375 scst_cmd_set_sn(cmd);
3376 } else if (res < 0) {
3377 TRACE_DBG("Finishing cmd %p", cmd);
3378 scst_set_cmd_error(cmd,
3379 SCST_LOAD_SENSE(scst_sense_lun_not_supported));
3380 scst_set_cmd_abnormal_done_state(cmd);
3385 TRACE_EXIT_RES(res);
3390 scst_set_cmd_abnormal_done_state(cmd);
3394 /* Called under scst_init_lock and IRQs disabled */
3395 static void scst_do_job_init(void)
3396 __releases(&scst_init_lock)
3397 __acquires(&scst_init_lock)
3399 struct scst_cmd *cmd;
3406 * There is no need for read barrier here, because we don't care where
3407 * this check will be done.
3409 susp = test_bit(SCST_FLAG_SUSPENDED, &scst_flags);
3410 if (scst_init_poll_cnt > 0)
3411 scst_init_poll_cnt--;
3413 list_for_each_entry(cmd, &scst_init_cmd_list, cmd_list_entry) {
3415 if (susp && !test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))
3417 if (!test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)) {
3418 spin_unlock_irq(&scst_init_lock);
3419 rc = __scst_init_cmd(cmd);
3420 spin_lock_irq(&scst_init_lock);
3422 TRACE_MGMT_DBG("%s",
3423 "FLAG SUSPENDED set, restarting");
3427 TRACE_MGMT_DBG("Aborting not inited cmd %p (tag %llu)",
3428 cmd, (long long unsigned int)cmd->tag);
3429 scst_set_cmd_abnormal_done_state(cmd);
3433 * Deleting cmd from init cmd list after __scst_init_cmd()
3434 * is necessary to keep the check in scst_init_cmd() correct
3435 * to preserve the commands order.
3437 * We don't care about the race, when init cmd list is empty
3438 * and one command detected that it just was not empty, so
3439 * it's inserting to it, but another command at the same time
3440 * seeing init cmd list empty and goes directly, because it
3441 * could affect only commands from the same initiator to the
3442 * same tgt_dev, but scst_cmd_init_done*() doesn't guarantee
3443 * the order in case of simultaneous such calls anyway.
3445 TRACE_MGMT_DBG("Deleting cmd %p from init cmd list", cmd);
3446 smp_wmb(); /* enforce the required order */
3447 list_del(&cmd->cmd_list_entry);
3448 spin_unlock(&scst_init_lock);
3450 spin_lock(&cmd->cmd_lists->cmd_list_lock);
3451 TRACE_MGMT_DBG("Adding cmd %p to active cmd list", cmd);
3452 if (unlikely(cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE))
3453 list_add(&cmd->cmd_list_entry,
3454 &cmd->cmd_lists->active_cmd_list);
3456 list_add_tail(&cmd->cmd_list_entry,
3457 &cmd->cmd_lists->active_cmd_list);
3458 wake_up(&cmd->cmd_lists->cmd_list_waitQ);
3459 spin_unlock(&cmd->cmd_lists->cmd_list_lock);
3461 spin_lock(&scst_init_lock);
3465 /* It isn't really needed, but let's keep it */
3466 if (susp != test_bit(SCST_FLAG_SUSPENDED, &scst_flags))
3473 static inline int test_init_cmd_list(void)
3475 int res = (!list_empty(&scst_init_cmd_list) &&
3476 !test_bit(SCST_FLAG_SUSPENDED, &scst_flags)) ||
3477 unlikely(kthread_should_stop()) ||
3478 (scst_init_poll_cnt > 0);
3482 int scst_init_thread(void *arg)
3486 PRINT_INFO("Init thread started, PID %d", current->pid);
3488 current->flags |= PF_NOFREEZE;
3490 set_user_nice(current, -10);
3492 spin_lock_irq(&scst_init_lock);
3493 while (!kthread_should_stop()) {
3495 init_waitqueue_entry(&wait, current);