4 * Copyright (C) 2004 - 2009 Vladislav Bolkhovitin <vst@vlnb.net>
5 * Copyright (C) 2004 - 2005 Leonid Stoljar
6 * Copyright (C) 2007 - 2009 ID7 Ltd.
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation, version 2
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
19 #include <linux/init.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/list.h>
23 #include <linux/spinlock.h>
24 #include <linux/slab.h>
25 #include <linux/sched.h>
26 #include <linux/smp_lock.h>
27 #include <linux/unistd.h>
28 #include <linux/string.h>
29 #include <linux/kthread.h>
30 #include <linux/delay.h>
31 #include <linux/ktime.h>
34 #include "scst_priv.h"
36 static void scst_cmd_set_sn(struct scst_cmd *cmd);
37 static int __scst_init_cmd(struct scst_cmd *cmd);
38 static void scst_finish_cmd_mgmt(struct scst_cmd *cmd);
39 static struct scst_cmd *__scst_find_cmd_by_tag(struct scst_session *sess,
41 static void scst_process_redirect_cmd(struct scst_cmd *cmd,
42 enum scst_exec_context context, int check_retries);
44 static inline void scst_schedule_tasklet(struct scst_cmd *cmd)
46 struct scst_tasklet *t = &scst_tasklets[smp_processor_id()];
49 spin_lock_irqsave(&t->tasklet_lock, flags);
50 TRACE_DBG("Adding cmd %p to tasklet %d cmd list", cmd,
52 list_add_tail(&cmd->cmd_list_entry, &t->tasklet_cmd_list);
53 spin_unlock_irqrestore(&t->tasklet_lock, flags);
55 tasklet_schedule(&t->tasklet);
59 * Must not be called in parallel with scst_unregister_session() for the
62 struct scst_cmd *scst_rx_cmd(struct scst_session *sess,
63 const uint8_t *lun, int lun_len,
64 const uint8_t *cdb, int cdb_len, int atomic)
70 #ifdef CONFIG_SCST_EXTRACHECKS
71 if (unlikely(sess->shut_phase != SCST_SESS_SPH_READY)) {
72 PRINT_CRIT_ERROR("%s",
73 "New cmd while shutting down the session");
78 cmd = scst_alloc_cmd(atomic ? GFP_ATOMIC : GFP_KERNEL);
84 cmd->tgtt = sess->tgt->tgtt;
87 * For both wrong lun and CDB defer the error reporting for
88 * scst_cmd_init_done()
91 cmd->lun = scst_unpack_lun(lun, lun_len);
93 if (cdb_len <= SCST_MAX_CDB_SIZE) {
94 memcpy(cmd->cdb, cdb, cdb_len);
95 cmd->cdb_len = cdb_len;
98 TRACE_DBG("cmd %p, sess %p", cmd, sess);
105 EXPORT_SYMBOL(scst_rx_cmd);
108 * No locks, but might be on IRQ. Returns 0 on success, <0 if processing of
109 * this command should be stopped.
111 static int scst_init_cmd(struct scst_cmd *cmd, enum scst_exec_context *context)
117 /* See the comment in scst_do_job_init() */
118 if (unlikely(!list_empty(&scst_init_cmd_list))) {
119 TRACE_MGMT_DBG("%s", "init cmd list busy");
123 * Memory barrier isn't necessary here, because CPU appears to
124 * be self-consistent and we don't care about the race, described
125 * in comment in scst_do_job_init().
128 rc = __scst_init_cmd(cmd);
129 if (unlikely(rc > 0))
131 else if (unlikely(rc != 0)) {
136 /* Small context optimization */
137 if (((*context == SCST_CONTEXT_TASKLET) ||
138 (*context == SCST_CONTEXT_DIRECT_ATOMIC) ||
139 ((*context == SCST_CONTEXT_SAME) && scst_cmd_atomic(cmd))) &&
140 scst_cmd_is_expected_set(cmd)) {
141 if (cmd->expected_data_direction & SCST_DATA_WRITE) {
142 if (!test_bit(SCST_TGT_DEV_AFTER_INIT_WR_ATOMIC,
143 &cmd->tgt_dev->tgt_dev_flags))
144 *context = SCST_CONTEXT_THREAD;
146 if (!test_bit(SCST_TGT_DEV_AFTER_INIT_OTH_ATOMIC,
147 &cmd->tgt_dev->tgt_dev_flags))
148 *context = SCST_CONTEXT_THREAD;
157 if (cmd->preprocessing_only) {
159 * Poor man solution for single threaded targets, where
160 * blocking receiver at least sometimes means blocking all.
162 sBUG_ON(*context != SCST_CONTEXT_DIRECT);
164 scst_set_cmd_abnormal_done_state(cmd);
166 /* Keep initiator away from too many BUSY commands */
170 spin_lock_irqsave(&scst_init_lock, flags);
171 TRACE_MGMT_DBG("Adding cmd %p to init cmd list (scst_cmd_count "
172 "%d)", cmd, atomic_read(&scst_cmd_count));
173 list_add_tail(&cmd->cmd_list_entry, &scst_init_cmd_list);
174 if (test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))
175 scst_init_poll_cnt++;
176 spin_unlock_irqrestore(&scst_init_lock, flags);
177 wake_up(&scst_init_cmd_list_waitQ);
183 void scst_cmd_init_done(struct scst_cmd *cmd,
184 enum scst_exec_context pref_context)
187 struct scst_session *sess = cmd->sess;
192 scst_set_start_time(cmd);
194 TRACE_DBG("Preferred context: %d (cmd %p)", pref_context, cmd);
195 TRACE(TRACE_SCSI, "tag=%llu, lun=%lld, CDB len=%d, queue_type=%x "
196 "(cmd %p)", (long long unsigned int)cmd->tag,
197 (long long unsigned int)cmd->lun, cmd->cdb_len,
198 cmd->queue_type, cmd);
199 PRINT_BUFF_FLAG(TRACE_SCSI|TRACE_RCV_BOT, "Recieving CDB",
200 cmd->cdb, cmd->cdb_len);
202 #ifdef CONFIG_SCST_EXTRACHECKS
203 if (unlikely((in_irq() || irqs_disabled())) &&
204 ((pref_context == SCST_CONTEXT_DIRECT) ||
205 (pref_context == SCST_CONTEXT_DIRECT_ATOMIC))) {
206 PRINT_ERROR("Wrong context %d in IRQ from target %s, use "
207 "SCST_CONTEXT_THREAD instead", pref_context,
209 pref_context = SCST_CONTEXT_THREAD;
213 atomic_inc(&sess->sess_cmd_count);
215 spin_lock_irqsave(&sess->sess_list_lock, flags);
217 if (unlikely(sess->init_phase != SCST_SESS_IPH_READY)) {
219 * We have to always keep command in the search list from the
220 * very beginning, because otherwise it can be missed during
221 * TM processing. This check is needed because there might be
222 * old, i.e. deferred, commands and new, i.e. just coming, ones.
224 if (cmd->sess_cmd_list_entry.next == NULL)
225 list_add_tail(&cmd->sess_cmd_list_entry,
226 &sess->search_cmd_list);
227 switch (sess->init_phase) {
228 case SCST_SESS_IPH_SUCCESS:
230 case SCST_SESS_IPH_INITING:
231 TRACE_DBG("Adding cmd %p to init deferred cmd list",
233 list_add_tail(&cmd->cmd_list_entry,
234 &sess->init_deferred_cmd_list);
235 spin_unlock_irqrestore(&sess->sess_list_lock, flags);
237 case SCST_SESS_IPH_FAILED:
238 spin_unlock_irqrestore(&sess->sess_list_lock, flags);
240 scst_set_cmd_abnormal_done_state(cmd);
246 list_add_tail(&cmd->sess_cmd_list_entry,
247 &sess->search_cmd_list);
249 spin_unlock_irqrestore(&sess->sess_list_lock, flags);
251 if (unlikely(cmd->lun == NO_SUCH_LUN)) {
252 PRINT_ERROR("Wrong LUN %d, finishing cmd", -1);
253 scst_set_cmd_error(cmd,
254 SCST_LOAD_SENSE(scst_sense_lun_not_supported));
255 scst_set_cmd_abnormal_done_state(cmd);
259 if (unlikely(cmd->cdb_len == 0)) {
260 PRINT_ERROR("%s", "Wrong CDB len, finishing cmd");
261 scst_set_cmd_error(cmd,
262 SCST_LOAD_SENSE(scst_sense_invalid_opcode));
263 scst_set_cmd_abnormal_done_state(cmd);
267 if (unlikely(cmd->queue_type >= SCST_CMD_QUEUE_ACA)) {
268 PRINT_ERROR("Unsupported queue type %d", cmd->queue_type);
269 scst_set_cmd_error(cmd,
270 SCST_LOAD_SENSE(scst_sense_invalid_message));
271 scst_set_cmd_abnormal_done_state(cmd);
276 * Cmd must be inited here to preserve the order. In case if cmd
277 * already preliminary completed by target driver we need to init
278 * cmd anyway to find out in which format we should return sense.
280 cmd->state = SCST_CMD_STATE_INIT;
281 rc = scst_init_cmd(cmd, &pref_context);
282 if (unlikely(rc < 0))
284 else if (unlikely(cmd->status == SAM_STAT_CHECK_CONDITION)) {
286 /* Target driver preliminary completed cmd */
287 scst_set_cmd_abnormal_done_state(cmd);
292 /* Here cmd must not be in any cmd list, no locks */
293 switch (pref_context) {
294 case SCST_CONTEXT_TASKLET:
295 scst_schedule_tasklet(cmd);
298 case SCST_CONTEXT_DIRECT:
299 scst_process_active_cmd(cmd, false);
300 /* For *NEED_THREAD wake_up() is already done */
303 case SCST_CONTEXT_DIRECT_ATOMIC:
304 scst_process_active_cmd(cmd, true);
305 /* For *NEED_THREAD wake_up() is already done */
309 PRINT_ERROR("Context %x is undefined, using the thread one",
312 case SCST_CONTEXT_THREAD:
313 spin_lock_irqsave(&cmd->cmd_lists->cmd_list_lock, flags);
314 TRACE_DBG("Adding cmd %p to active cmd list", cmd);
315 if (unlikely(cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE))
316 list_add(&cmd->cmd_list_entry,
317 &cmd->cmd_lists->active_cmd_list);
319 list_add_tail(&cmd->cmd_list_entry,
320 &cmd->cmd_lists->active_cmd_list);
321 wake_up(&cmd->cmd_lists->cmd_list_waitQ);
322 spin_unlock_irqrestore(&cmd->cmd_lists->cmd_list_lock, flags);
330 EXPORT_SYMBOL(scst_cmd_init_done);
332 static int scst_pre_parse(struct scst_cmd *cmd)
334 int res = SCST_CMD_STATE_RES_CONT_SAME;
335 struct scst_device *dev = cmd->dev;
340 cmd->inc_expected_sn_on_done = dev->handler->exec_sync ||
341 (!dev->has_own_order_mgmt &&
342 (dev->queue_alg == SCST_CONTR_MODE_QUEUE_ALG_RESTRICTED_REORDER ||
343 cmd->queue_type == SCST_CMD_QUEUE_ORDERED));
346 * Expected transfer data supplied by the SCSI transport via the
347 * target driver are untrusted, so we prefer to fetch them from CDB.
348 * Additionally, not all transports support supplying the expected
352 rc = scst_get_cdb_info(cmd);
353 if (unlikely(rc != 0)) {
355 PRINT_BUFFER("Failed CDB", cmd->cdb, cmd->cdb_len);
358 PRINT_ERROR("Unknown opcode 0x%02x for %s. "
359 "Should you update scst_scsi_op_table?",
360 cmd->cdb[0], dev->handler->name);
361 PRINT_BUFFER("Failed CDB", cmd->cdb, cmd->cdb_len);
362 #ifdef CONFIG_SCST_USE_EXPECTED_VALUES
363 if (scst_cmd_is_expected_set(cmd)) {
364 TRACE(TRACE_SCSI, "Using initiator supplied values: "
365 "direction %d, transfer_len %d",
366 cmd->expected_data_direction,
367 cmd->expected_transfer_len);
368 cmd->data_direction = cmd->expected_data_direction;
370 cmd->bufflen = cmd->expected_transfer_len;
371 /* Restore (possibly) lost CDB length */
372 cmd->cdb_len = scst_get_cdb_len(cmd->cdb);
373 if (cmd->cdb_len == -1) {
374 PRINT_ERROR("Unable to get CDB length for "
375 "opcode 0x%02x. Returning INVALID "
376 "OPCODE", cmd->cdb[0]);
377 scst_set_cmd_error(cmd,
378 SCST_LOAD_SENSE(scst_sense_invalid_opcode));
382 PRINT_ERROR("Unknown opcode 0x%02x for %s and "
383 "target %s not supplied expected values",
384 cmd->cdb[0], dev->handler->name, cmd->tgtt->name);
385 scst_set_cmd_error(cmd,
386 SCST_LOAD_SENSE(scst_sense_invalid_opcode));
390 scst_set_cmd_error(cmd,
391 SCST_LOAD_SENSE(scst_sense_invalid_opcode));
395 TRACE(TRACE_SCSI, "op_name <%s> (cmd %p), direction=%d "
396 "(expected %d, set %s), transfer_len=%d (expected "
397 "len %d), flags=%d", cmd->op_name, cmd,
398 cmd->data_direction, cmd->expected_data_direction,
399 scst_cmd_is_expected_set(cmd) ? "yes" : "no",
400 cmd->bufflen, cmd->expected_transfer_len,
403 if (unlikely((cmd->op_flags & SCST_UNKNOWN_LENGTH) != 0)) {
404 if (scst_cmd_is_expected_set(cmd)) {
406 * Command data length can't be easily
407 * determined from the CDB. ToDo, all such
408 * commands processing should be fixed. Until
409 * it's done, get the length from the supplied
410 * expected value, but limit it to some
411 * reasonable value (15MB).
413 cmd->bufflen = min(cmd->expected_transfer_len,
415 cmd->op_flags &= ~SCST_UNKNOWN_LENGTH;
421 if (unlikely(cmd->cdb[cmd->cdb_len - 1] & CONTROL_BYTE_NACA_BIT)) {
422 PRINT_ERROR("NACA bit in control byte CDB is not supported "
423 "(opcode 0x%02x)", cmd->cdb[0]);
424 scst_set_cmd_error(cmd,
425 SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
429 if (unlikely(cmd->cdb[cmd->cdb_len - 1] & CONTROL_BYTE_LINK_BIT)) {
430 PRINT_ERROR("Linked commands are not supported "
431 "(opcode 0x%02x)", cmd->cdb[0]);
432 scst_set_cmd_error(cmd,
433 SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
437 cmd->state = SCST_CMD_STATE_DEV_PARSE;
444 scst_set_cmd_abnormal_done_state(cmd);
445 res = SCST_CMD_STATE_RES_CONT_SAME;
449 #ifndef CONFIG_SCST_USE_EXPECTED_VALUES
450 static bool scst_is_allowed_to_mismatch_cmd(struct scst_cmd *cmd)
454 switch (cmd->cdb[0]) {
455 case TEST_UNIT_READY:
456 /* Crazy VMware people sometimes do TUR with READ direction */
463 /* VERIFY commands with BYTCHK unset shouldn't fail here */
464 if ((cmd->op_flags & SCST_VERIFY_BYTCHK_MISMATCH_ALLOWED) &&
465 (cmd->cdb[1] & BYTCHK) == 0)
474 static int scst_parse_cmd(struct scst_cmd *cmd)
476 int res = SCST_CMD_STATE_RES_CONT_SAME;
478 struct scst_device *dev = cmd->dev;
479 int orig_bufflen = cmd->bufflen;
483 if (likely(!scst_is_cmd_fully_local(cmd))) {
484 if (unlikely(!dev->handler->parse_atomic &&
485 scst_cmd_atomic(cmd))) {
487 * It shouldn't be because of SCST_TGT_DEV_AFTER_*
490 TRACE_DBG("Dev handler %s parse() needs thread "
491 "context, rescheduling", dev->handler->name);
492 res = SCST_CMD_STATE_RES_NEED_THREAD;
496 TRACE_DBG("Calling dev handler %s parse(%p)",
497 dev->handler->name, cmd);
498 TRACE_BUFF_FLAG(TRACE_SND_BOT, "Parsing: ",
499 cmd->cdb, cmd->cdb_len);
500 scst_set_cur_start(cmd);
501 state = dev->handler->parse(cmd);
502 /* Caution: cmd can be already dead here */
503 TRACE_DBG("Dev handler %s parse() returned %d",
504 dev->handler->name, state);
507 case SCST_CMD_STATE_NEED_THREAD_CTX:
508 scst_set_parse_time(cmd);
509 TRACE_DBG("Dev handler %s parse() requested thread "
510 "context, rescheduling", dev->handler->name);
511 res = SCST_CMD_STATE_RES_NEED_THREAD;
514 case SCST_CMD_STATE_STOP:
515 TRACE_DBG("Dev handler %s parse() requested stop "
516 "processing", dev->handler->name);
517 res = SCST_CMD_STATE_RES_CONT_NEXT;
521 scst_set_parse_time(cmd);
523 if (state == SCST_CMD_STATE_DEFAULT)
524 state = SCST_CMD_STATE_PREPARE_SPACE;
526 state = SCST_CMD_STATE_PREPARE_SPACE;
528 if (cmd->data_len == -1)
529 cmd->data_len = cmd->bufflen;
531 if (cmd->bufflen == 0) {
533 * According to SPC bufflen 0 for data transfer commands isn't
534 * an error, so we need to fix the transfer direction.
536 cmd->data_direction = SCST_DATA_NONE;
539 if (cmd->dh_data_buf_alloced &&
540 unlikely((orig_bufflen > cmd->bufflen))) {
541 PRINT_ERROR("Dev handler supplied data buffer (size %d), "
542 "is less, than required (size %d)", cmd->bufflen,
544 PRINT_BUFFER("Failed CDB", cmd->cdb, cmd->cdb_len);
548 if (unlikely(state == SCST_CMD_STATE_PRE_XMIT_RESP))
551 if (unlikely((cmd->bufflen == 0) &&
552 (cmd->op_flags & SCST_UNKNOWN_LENGTH))) {
553 PRINT_ERROR("Unknown data transfer length for opcode 0x%x "
554 "(handler %s, target %s)", cmd->cdb[0],
555 dev->handler->name, cmd->tgtt->name);
556 PRINT_BUFFER("Failed CDB", cmd->cdb, cmd->cdb_len);
560 #ifdef CONFIG_SCST_EXTRACHECKS
561 if ((cmd->bufflen != 0) &&
562 ((cmd->data_direction == SCST_DATA_NONE) ||
563 ((cmd->sg == NULL) && (state > SCST_CMD_STATE_PREPARE_SPACE)))) {
564 PRINT_ERROR("Dev handler %s parse() returned "
565 "invalid cmd data_direction %d, bufflen %d, state %d "
566 "or sg %p (opcode 0x%x)", dev->handler->name,
567 cmd->data_direction, cmd->bufflen, state, cmd->sg,
569 PRINT_BUFFER("Failed CDB", cmd->cdb, cmd->cdb_len);
574 if (scst_cmd_is_expected_set(cmd)) {
575 #ifdef CONFIG_SCST_USE_EXPECTED_VALUES
576 # ifdef CONFIG_SCST_EXTRACHECKS
577 if ((cmd->data_direction != cmd->expected_data_direction) ||
578 (cmd->bufflen != cmd->expected_transfer_len)) {
579 PRINT_WARNING("Expected values don't match decoded "
580 "ones: data_direction %d, "
581 "expected_data_direction %d, "
582 "bufflen %d, expected_transfer_len %d",
584 cmd->expected_data_direction,
585 cmd->bufflen, cmd->expected_transfer_len);
586 PRINT_BUFFER("Suspicious CDB", cmd->cdb, cmd->cdb_len);
589 cmd->data_direction = cmd->expected_data_direction;
590 cmd->bufflen = cmd->expected_transfer_len;
592 if (unlikely(cmd->data_direction !=
593 cmd->expected_data_direction)) {
594 if (((cmd->expected_data_direction != SCST_DATA_NONE) ||
595 (cmd->bufflen != 0)) &&
596 !scst_is_allowed_to_mismatch_cmd(cmd)) {
597 PRINT_ERROR("Expected data direction %d for "
598 "opcode 0x%02x (handler %s, target %s) "
601 cmd->expected_data_direction,
602 cmd->cdb[0], dev->handler->name,
603 cmd->tgtt->name, cmd->data_direction);
604 PRINT_BUFFER("Failed CDB",
605 cmd->cdb, cmd->cdb_len);
606 scst_set_cmd_error(cmd,
607 SCST_LOAD_SENSE(scst_sense_invalid_message));
611 if (unlikely(cmd->bufflen != cmd->expected_transfer_len)) {
612 TRACE(TRACE_MGMT_MINOR, "Warning: expected "
613 "transfer length %d for opcode 0x%02x "
614 "(handler %s, target %s) doesn't match "
615 "decoded value %d. Faulty initiator "
616 "(e.g. VMware is known to be such) or "
617 "scst_scsi_op_table should be updated?",
618 cmd->expected_transfer_len, cmd->cdb[0],
619 dev->handler->name, cmd->tgtt->name,
621 PRINT_BUFF_FLAG(TRACE_MGMT_MINOR, "Suspicious CDB",
622 cmd->cdb, cmd->cdb_len);
627 if (unlikely(cmd->data_direction == SCST_DATA_UNKNOWN)) {
628 PRINT_ERROR("Unknown data direction. Opcode 0x%x, handler %s, "
629 "target %s", cmd->cdb[0], dev->handler->name,
631 PRINT_BUFFER("Failed CDB", cmd->cdb, cmd->cdb_len);
637 case SCST_CMD_STATE_PREPARE_SPACE:
638 case SCST_CMD_STATE_PRE_PARSE:
639 case SCST_CMD_STATE_DEV_PARSE:
640 case SCST_CMD_STATE_RDY_TO_XFER:
641 case SCST_CMD_STATE_TGT_PRE_EXEC:
642 case SCST_CMD_STATE_SEND_FOR_EXEC:
643 case SCST_CMD_STATE_LOCAL_EXEC:
644 case SCST_CMD_STATE_REAL_EXEC:
645 case SCST_CMD_STATE_PRE_DEV_DONE:
646 case SCST_CMD_STATE_DEV_DONE:
647 case SCST_CMD_STATE_PRE_XMIT_RESP:
648 case SCST_CMD_STATE_XMIT_RESP:
649 case SCST_CMD_STATE_FINISHED:
650 case SCST_CMD_STATE_FINISHED_INTERNAL:
652 res = SCST_CMD_STATE_RES_CONT_SAME;
657 PRINT_ERROR("Dev handler %s parse() returned "
658 "invalid cmd state %d (opcode %d)",
659 dev->handler->name, state, cmd->cdb[0]);
661 PRINT_ERROR("Dev handler %s parse() returned "
662 "error %d (opcode %d)", dev->handler->name,
668 if (cmd->resp_data_len == -1) {
669 if (cmd->data_direction & SCST_DATA_READ)
670 cmd->resp_data_len = cmd->bufflen;
672 cmd->resp_data_len = 0;
676 TRACE_EXIT_HRES(res);
680 /* dev_done() will be called as part of the regular cmd's finish */
681 scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
683 #ifndef CONFIG_SCST_USE_EXPECTED_VALUES
686 cmd->state = SCST_CMD_STATE_PRE_DEV_DONE;
687 res = SCST_CMD_STATE_RES_CONT_SAME;
691 static int scst_prepare_space(struct scst_cmd *cmd)
693 int r = 0, res = SCST_CMD_STATE_RES_CONT_SAME;
697 if (cmd->data_direction == SCST_DATA_NONE)
700 if (cmd->tgt_need_alloc_data_buf) {
701 int orig_bufflen = cmd->bufflen;
703 TRACE_MEM("Custom tgt data buf allocation requested (cmd %p)",
706 scst_set_cur_start(cmd);
707 r = cmd->tgtt->alloc_data_buf(cmd);
708 scst_set_alloc_buf_time(cmd);
713 if (unlikely(cmd->bufflen == 0)) {
714 /* See comment in scst_alloc_space() */
719 cmd->tgt_data_buf_alloced = 1;
721 if (unlikely(orig_bufflen < cmd->bufflen)) {
722 PRINT_ERROR("Target driver allocated data "
723 "buffer (size %d), is less, than "
724 "required (size %d)", orig_bufflen,
728 TRACE_MEM("tgt_data_buf_alloced (cmd %p)", cmd);
734 if (!cmd->tgt_data_buf_alloced && !cmd->dh_data_buf_alloced) {
735 r = scst_alloc_space(cmd);
736 } else if (cmd->dh_data_buf_alloced && !cmd->tgt_data_buf_alloced) {
737 TRACE_MEM("dh_data_buf_alloced set (cmd %p)", cmd);
739 } else if (cmd->tgt_data_buf_alloced && !cmd->dh_data_buf_alloced) {
740 TRACE_MEM("tgt_data_buf_alloced set (cmd %p)", cmd);
741 cmd->sg = cmd->tgt_sg;
742 cmd->sg_cnt = cmd->tgt_sg_cnt;
743 cmd->in_sg = cmd->tgt_in_sg;
744 cmd->in_sg_cnt = cmd->tgt_in_sg_cnt;
747 TRACE_MEM("Both *_data_buf_alloced set (cmd %p, sg %p, "
748 "sg_cnt %d, tgt_sg %p, tgt_sg_cnt %d)", cmd, cmd->sg,
749 cmd->sg_cnt, cmd->tgt_sg, cmd->tgt_sg_cnt);
755 if (scst_cmd_atomic(cmd)) {
756 TRACE_MEM("%s", "Atomic memory allocation failed, "
757 "rescheduling to the thread");
758 res = SCST_CMD_STATE_RES_NEED_THREAD;
765 if (cmd->preprocessing_only) {
766 cmd->preprocessing_only = 0;
768 if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
769 TRACE_MGMT_DBG("ABORTED set, returning ABORTED for "
771 scst_set_cmd_abnormal_done_state(cmd);
772 res = SCST_CMD_STATE_RES_CONT_SAME;
776 res = SCST_CMD_STATE_RES_CONT_NEXT;
777 cmd->state = SCST_CMD_STATE_PREPROCESS_DONE;
779 TRACE_DBG("Calling preprocessing_done(cmd %p)", cmd);
780 scst_set_cur_start(cmd);
781 cmd->tgtt->preprocessing_done(cmd);
782 TRACE_DBG("%s", "preprocessing_done() returned");
787 if (cmd->data_direction & SCST_DATA_WRITE)
788 cmd->state = SCST_CMD_STATE_RDY_TO_XFER;
790 cmd->state = SCST_CMD_STATE_TGT_PRE_EXEC;
793 TRACE_EXIT_HRES(res);
797 TRACE(TRACE_OUT_OF_MEM, "Unable to allocate or build requested buffer "
798 "(size %d), sending BUSY or QUEUE FULL status", cmd->bufflen);
800 scst_set_cmd_abnormal_done_state(cmd);
801 res = SCST_CMD_STATE_RES_CONT_SAME;
805 scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
806 scst_set_cmd_abnormal_done_state(cmd);
807 res = SCST_CMD_STATE_RES_CONT_SAME;
811 void scst_restart_cmd(struct scst_cmd *cmd, int status,
812 enum scst_exec_context pref_context)
816 scst_set_restart_waiting_time(cmd);
818 TRACE_DBG("Preferred context: %d", pref_context);
819 TRACE_DBG("tag=%llu, status=%#x",
820 (long long unsigned int)scst_cmd_get_tag(cmd),
823 #ifdef CONFIG_SCST_EXTRACHECKS
824 if ((in_irq() || irqs_disabled()) &&
825 ((pref_context == SCST_CONTEXT_DIRECT) ||
826 (pref_context == SCST_CONTEXT_DIRECT_ATOMIC))) {
827 PRINT_ERROR("Wrong context %d in IRQ from target %s, use "
828 "SCST_CONTEXT_THREAD instead", pref_context,
830 pref_context = SCST_CONTEXT_THREAD;
835 case SCST_PREPROCESS_STATUS_SUCCESS:
836 if (cmd->data_direction & SCST_DATA_WRITE)
837 cmd->state = SCST_CMD_STATE_RDY_TO_XFER;
839 cmd->state = SCST_CMD_STATE_TGT_PRE_EXEC;
840 if (cmd->set_sn_on_restart_cmd)
841 scst_cmd_set_sn(cmd);
842 /* Small context optimization */
843 if ((pref_context == SCST_CONTEXT_TASKLET) ||
844 (pref_context == SCST_CONTEXT_DIRECT_ATOMIC) ||
845 ((pref_context == SCST_CONTEXT_SAME) &&
846 scst_cmd_atomic(cmd))) {
847 if (cmd->data_direction & SCST_DATA_WRITE) {
848 if (!test_bit(SCST_TGT_DEV_AFTER_RESTART_WR_ATOMIC,
849 &cmd->tgt_dev->tgt_dev_flags))
850 pref_context = SCST_CONTEXT_THREAD;
852 if (!test_bit(SCST_TGT_DEV_AFTER_RESTART_OTH_ATOMIC,
853 &cmd->tgt_dev->tgt_dev_flags))
854 pref_context = SCST_CONTEXT_THREAD;
859 case SCST_PREPROCESS_STATUS_ERROR_SENSE_SET:
860 scst_set_cmd_abnormal_done_state(cmd);
863 case SCST_PREPROCESS_STATUS_ERROR_FATAL:
864 set_bit(SCST_CMD_NO_RESP, &cmd->cmd_flags);
866 case SCST_PREPROCESS_STATUS_ERROR:
867 if (cmd->sense != NULL)
868 scst_set_cmd_error(cmd,
869 SCST_LOAD_SENSE(scst_sense_hardw_error));
870 scst_set_cmd_abnormal_done_state(cmd);
874 PRINT_ERROR("%s() received unknown status %x", __func__,
876 scst_set_cmd_abnormal_done_state(cmd);
880 scst_process_redirect_cmd(cmd, pref_context, 1);
885 EXPORT_SYMBOL(scst_restart_cmd);
887 static int scst_rdy_to_xfer(struct scst_cmd *cmd)
890 struct scst_tgt_template *tgtt = cmd->tgtt;
894 if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
895 TRACE_MGMT_DBG("ABORTED set, aborting cmd %p", cmd);
899 if ((tgtt->rdy_to_xfer == NULL) || unlikely(cmd->internal)) {
900 cmd->state = SCST_CMD_STATE_TGT_PRE_EXEC;
901 res = SCST_CMD_STATE_RES_CONT_SAME;
905 if (unlikely(!tgtt->rdy_to_xfer_atomic && scst_cmd_atomic(cmd))) {
907 * It shouldn't be because of SCST_TGT_DEV_AFTER_*
910 TRACE_DBG("Target driver %s rdy_to_xfer() needs thread "
911 "context, rescheduling", tgtt->name);
912 res = SCST_CMD_STATE_RES_NEED_THREAD;
917 int finished_cmds = atomic_read(&cmd->tgt->finished_cmds);
919 res = SCST_CMD_STATE_RES_CONT_NEXT;
920 cmd->state = SCST_CMD_STATE_DATA_WAIT;
922 if (tgtt->on_hw_pending_cmd_timeout != NULL) {
923 struct scst_session *sess = cmd->sess;
924 cmd->hw_pending_start = jiffies;
925 cmd->cmd_hw_pending = 1;
926 if (!test_bit(SCST_SESS_HW_PENDING_WORK_SCHEDULED, &sess->sess_aflags)) {
927 TRACE_DBG("Sched HW pending work for sess %p "
928 "(max time %d)", sess,
929 tgtt->max_hw_pending_time);
930 set_bit(SCST_SESS_HW_PENDING_WORK_SCHEDULED,
932 schedule_delayed_work(&sess->hw_pending_work,
933 tgtt->max_hw_pending_time * HZ);
937 scst_set_cur_start(cmd);
939 TRACE_DBG("Calling rdy_to_xfer(%p)", cmd);
940 #ifdef CONFIG_SCST_DEBUG_RETRY
941 if (((scst_random() % 100) == 75))
942 rc = SCST_TGT_RES_QUEUE_FULL;
945 rc = tgtt->rdy_to_xfer(cmd);
946 TRACE_DBG("rdy_to_xfer() returned %d", rc);
948 if (likely(rc == SCST_TGT_RES_SUCCESS))
951 scst_set_rdy_to_xfer_time(cmd);
953 cmd->cmd_hw_pending = 0;
955 /* Restore the previous state */
956 cmd->state = SCST_CMD_STATE_RDY_TO_XFER;
959 case SCST_TGT_RES_QUEUE_FULL:
960 if (scst_queue_retry_cmd(cmd, finished_cmds) == 0)
965 case SCST_TGT_RES_NEED_THREAD_CTX:
966 TRACE_DBG("Target driver %s "
967 "rdy_to_xfer() requested thread "
968 "context, rescheduling", tgtt->name);
969 res = SCST_CMD_STATE_RES_NEED_THREAD;
979 TRACE_EXIT_HRES(res);
983 if (rc == SCST_TGT_RES_FATAL_ERROR) {
984 PRINT_ERROR("Target driver %s rdy_to_xfer() returned "
985 "fatal error", tgtt->name);
987 PRINT_ERROR("Target driver %s rdy_to_xfer() returned invalid "
988 "value %d", tgtt->name, rc);
990 scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
993 scst_set_cmd_abnormal_done_state(cmd);
994 res = SCST_CMD_STATE_RES_CONT_SAME;
998 /* No locks, but might be in IRQ */
999 static void scst_process_redirect_cmd(struct scst_cmd *cmd,
1000 enum scst_exec_context context, int check_retries)
1002 struct scst_tgt *tgt = cmd->tgt;
1003 unsigned long flags;
1007 TRACE_DBG("Context: %x", context);
1009 if (context == SCST_CONTEXT_SAME)
1010 context = scst_cmd_atomic(cmd) ? SCST_CONTEXT_DIRECT_ATOMIC :
1011 SCST_CONTEXT_DIRECT;
1014 case SCST_CONTEXT_DIRECT_ATOMIC:
1015 scst_process_active_cmd(cmd, true);
1018 case SCST_CONTEXT_DIRECT:
1020 scst_check_retries(tgt);
1021 scst_process_active_cmd(cmd, false);
1025 PRINT_ERROR("Context %x is unknown, using the thread one",
1028 case SCST_CONTEXT_THREAD:
1030 scst_check_retries(tgt);
1031 spin_lock_irqsave(&cmd->cmd_lists->cmd_list_lock, flags);
1032 TRACE_DBG("Adding cmd %p to active cmd list", cmd);
1033 if (unlikely(cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE))
1034 list_add(&cmd->cmd_list_entry,
1035 &cmd->cmd_lists->active_cmd_list);
1037 list_add_tail(&cmd->cmd_list_entry,
1038 &cmd->cmd_lists->active_cmd_list);
1039 wake_up(&cmd->cmd_lists->cmd_list_waitQ);
1040 spin_unlock_irqrestore(&cmd->cmd_lists->cmd_list_lock, flags);
1043 case SCST_CONTEXT_TASKLET:
1045 scst_check_retries(tgt);
1046 scst_schedule_tasklet(cmd);
1054 void scst_rx_data(struct scst_cmd *cmd, int status,
1055 enum scst_exec_context pref_context)
1059 scst_set_rdy_to_xfer_time(cmd);
1061 TRACE_DBG("Preferred context: %d", pref_context);
1062 TRACE(TRACE_SCSI, "cmd %p, status %#x", cmd, status);
1064 cmd->cmd_hw_pending = 0;
1066 #ifdef CONFIG_SCST_EXTRACHECKS
1067 if ((in_irq() || irqs_disabled()) &&
1068 ((pref_context == SCST_CONTEXT_DIRECT) ||
1069 (pref_context == SCST_CONTEXT_DIRECT_ATOMIC))) {
1070 PRINT_ERROR("Wrong context %d in IRQ from target %s, use "
1071 "SCST_CONTEXT_THREAD instead", pref_context,
1073 pref_context = SCST_CONTEXT_THREAD;
1078 case SCST_RX_STATUS_SUCCESS:
1079 #if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
1080 if (trace_flag & TRACE_RCV_BOT) {
1082 struct scatterlist *sg;
1083 if (cmd->in_sg != NULL)
1085 else if (cmd->tgt_in_sg != NULL)
1086 sg = cmd->tgt_in_sg;
1087 else if (cmd->tgt_sg != NULL)
1092 TRACE_RECV_BOT("RX data for cmd %p "
1093 "(sg_cnt %d, sg %p, sg[0].page %p)",
1094 cmd, cmd->tgt_sg_cnt, sg,
1095 (void *)sg_page(&sg[0]));
1096 for (i = 0; i < cmd->tgt_sg_cnt; ++i) {
1097 PRINT_BUFF_FLAG(TRACE_RCV_BOT, "RX sg",
1098 sg_virt(&sg[i]), sg[i].length);
1103 cmd->state = SCST_CMD_STATE_TGT_PRE_EXEC;
1104 /* Small context optimization */
1105 if ((pref_context == SCST_CONTEXT_TASKLET) ||
1106 (pref_context == SCST_CONTEXT_DIRECT_ATOMIC) ||
1107 ((pref_context == SCST_CONTEXT_SAME) &&
1108 scst_cmd_atomic(cmd))) {
1109 if (!test_bit(SCST_TGT_DEV_AFTER_RX_DATA_ATOMIC,
1110 &cmd->tgt_dev->tgt_dev_flags))
1111 pref_context = SCST_CONTEXT_THREAD;
1115 case SCST_RX_STATUS_ERROR_SENSE_SET:
1116 scst_set_cmd_abnormal_done_state(cmd);
1119 case SCST_RX_STATUS_ERROR_FATAL:
1120 set_bit(SCST_CMD_NO_RESP, &cmd->cmd_flags);
1122 case SCST_RX_STATUS_ERROR:
1123 scst_set_cmd_error(cmd,
1124 SCST_LOAD_SENSE(scst_sense_hardw_error));
1125 scst_set_cmd_abnormal_done_state(cmd);
1129 PRINT_ERROR("scst_rx_data() received unknown status %x",
1131 scst_set_cmd_abnormal_done_state(cmd);
1135 scst_process_redirect_cmd(cmd, pref_context, 1);
1140 EXPORT_SYMBOL(scst_rx_data);
1142 static int scst_tgt_pre_exec(struct scst_cmd *cmd)
1144 int res = SCST_CMD_STATE_RES_CONT_SAME, rc;
1148 cmd->state = SCST_CMD_STATE_SEND_FOR_EXEC;
1150 if ((cmd->tgtt->pre_exec == NULL) || unlikely(cmd->internal))
1153 TRACE_DBG("Calling pre_exec(%p)", cmd);
1154 scst_set_cur_start(cmd);
1155 rc = cmd->tgtt->pre_exec(cmd);
1156 scst_set_pre_exec_time(cmd);
1157 TRACE_DBG("pre_exec() returned %d", rc);
1159 if (unlikely(rc != SCST_PREPROCESS_STATUS_SUCCESS)) {
1161 case SCST_PREPROCESS_STATUS_ERROR_SENSE_SET:
1162 scst_set_cmd_abnormal_done_state(cmd);
1164 case SCST_PREPROCESS_STATUS_ERROR_FATAL:
1165 set_bit(SCST_CMD_NO_RESP, &cmd->cmd_flags);
1167 case SCST_PREPROCESS_STATUS_ERROR:
1168 scst_set_cmd_error(cmd,
1169 SCST_LOAD_SENSE(scst_sense_hardw_error));
1170 scst_set_cmd_abnormal_done_state(cmd);
1172 case SCST_PREPROCESS_STATUS_NEED_THREAD:
1173 TRACE_DBG("Target driver's %s pre_exec() requested "
1174 "thread context, rescheduling",
1176 res = SCST_CMD_STATE_RES_NEED_THREAD;
1177 cmd->state = SCST_CMD_STATE_TGT_PRE_EXEC;
1186 TRACE_EXIT_RES(res);
1190 static void scst_do_cmd_done(struct scst_cmd *cmd, int result,
1191 const uint8_t *rq_sense, int rq_sense_len, int resid)
1195 scst_set_exec_time(cmd);
1197 cmd->status = result & 0xff;
1198 cmd->msg_status = msg_byte(result);
1199 cmd->host_status = host_byte(result);
1200 cmd->driver_status = driver_byte(result);
1201 if (unlikely(resid != 0)) {
1202 #ifdef CONFIG_SCST_EXTRACHECKS
1203 if ((resid < 0) || (resid > cmd->resp_data_len)) {
1204 PRINT_ERROR("Wrong resid %d (cmd->resp_data_len=%d, "
1205 "op %x)", resid, cmd->resp_data_len,
1209 scst_set_resp_data_len(cmd, cmd->resp_data_len - resid);
1212 if (unlikely(cmd->status == SAM_STAT_CHECK_CONDITION)) {
1213 /* We might have double reset UA here */
1214 cmd->dbl_ua_orig_resp_data_len = cmd->resp_data_len;
1215 cmd->dbl_ua_orig_data_direction = cmd->data_direction;
1217 scst_alloc_set_sense(cmd, 1, rq_sense, rq_sense_len);
1220 TRACE(TRACE_SCSI, "cmd %p, result=%x, cmd->status=%x, resid=%d, "
1221 "cmd->msg_status=%x, cmd->host_status=%x, "
1222 "cmd->driver_status=%x (cmd %p)", cmd, result, cmd->status, resid,
1223 cmd->msg_status, cmd->host_status, cmd->driver_status, cmd);
1231 /* For small context optimization */
1232 static inline enum scst_exec_context scst_optimize_post_exec_context(
1233 struct scst_cmd *cmd, enum scst_exec_context context)
1235 if (((context == SCST_CONTEXT_SAME) && scst_cmd_atomic(cmd)) ||
1236 (context == SCST_CONTEXT_TASKLET) ||
1237 (context == SCST_CONTEXT_DIRECT_ATOMIC)) {
1238 if (!test_bit(SCST_TGT_DEV_AFTER_EXEC_ATOMIC,
1239 &cmd->tgt_dev->tgt_dev_flags))
1240 context = SCST_CONTEXT_THREAD;
1245 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
1246 static inline struct scst_cmd *scst_get_cmd(struct scsi_cmnd *scsi_cmd,
1247 struct scsi_request **req)
1249 struct scst_cmd *cmd = NULL;
1252 *req = scsi_cmd->sc_request;
1254 cmd = (struct scst_cmd *)(*req)->upper_private_data;
1258 PRINT_ERROR("%s", "Request with NULL cmd");
1260 scsi_release_request(*req);
1266 static void scst_cmd_done(struct scsi_cmnd *scsi_cmd)
1268 struct scsi_request *req = NULL;
1269 struct scst_cmd *cmd;
1273 cmd = scst_get_cmd(scsi_cmd, &req);
1277 scst_do_cmd_done(cmd, req->sr_result, req->sr_sense_buffer,
1278 sizeof(req->sr_sense_buffer), scsi_cmd->resid);
1280 /* Clear out request structure */
1282 req->sr_sglist_len = 0;
1283 req->sr_bufflen = 0;
1284 req->sr_buffer = NULL;
1285 req->sr_underflow = 0;
1286 req->sr_request->rq_disk = NULL; /* disown request blk */
1288 scst_release_request(cmd);
1290 cmd->state = SCST_CMD_STATE_PRE_DEV_DONE;
1292 scst_process_redirect_cmd(cmd,
1293 scst_optimize_post_exec_context(cmd, scst_estimate_context()),
1300 #else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18) */
1301 static void scst_cmd_done(void *data, char *sense, int result, int resid)
1303 struct scst_cmd *cmd;
1307 cmd = (struct scst_cmd *)data;
1311 scst_do_cmd_done(cmd, result, sense, SCSI_SENSE_BUFFERSIZE, resid);
1313 cmd->state = SCST_CMD_STATE_PRE_DEV_DONE;
1315 scst_process_redirect_cmd(cmd,
1316 scst_optimize_post_exec_context(cmd, scst_estimate_context()), 0);
1322 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18) */
1324 static void scst_cmd_done_local(struct scst_cmd *cmd, int next_state,
1325 enum scst_exec_context pref_context)
1329 scst_set_exec_time(cmd);
1331 if (next_state == SCST_CMD_STATE_DEFAULT)
1332 next_state = SCST_CMD_STATE_PRE_DEV_DONE;
1334 #if defined(CONFIG_SCST_DEBUG)
1335 if (next_state == SCST_CMD_STATE_PRE_DEV_DONE) {
1336 if ((trace_flag & TRACE_RCV_TOP) && (cmd->sg != NULL)) {
1338 struct scatterlist *sg = cmd->sg;
1339 TRACE_RECV_TOP("Exec'd %d S/G(s) at %p sg[0].page at "
1340 "%p", cmd->sg_cnt, sg, (void *)sg_page(&sg[0]));
1341 for (i = 0; i < cmd->sg_cnt; ++i) {
1342 TRACE_BUFF_FLAG(TRACE_RCV_TOP,
1343 "Exec'd sg", sg_virt(&sg[i]),
1350 cmd->state = next_state;
1352 #ifdef CONFIG_SCST_EXTRACHECKS
1353 if ((next_state != SCST_CMD_STATE_PRE_DEV_DONE) &&
1354 (next_state != SCST_CMD_STATE_PRE_XMIT_RESP) &&
1355 (next_state != SCST_CMD_STATE_FINISHED) &&
1356 (next_state != SCST_CMD_STATE_FINISHED_INTERNAL)) {
1357 PRINT_ERROR("%s() received invalid cmd state %d (opcode %d)",
1358 __func__, next_state, cmd->cdb[0]);
1359 scst_set_cmd_error(cmd,
1360 SCST_LOAD_SENSE(scst_sense_hardw_error));
1361 scst_set_cmd_abnormal_done_state(cmd);
1364 pref_context = scst_optimize_post_exec_context(cmd, pref_context);
1365 scst_process_redirect_cmd(cmd, pref_context, 0);
1371 static int scst_report_luns_local(struct scst_cmd *cmd)
1373 int res = SCST_EXEC_COMPLETED, rc;
1377 struct scst_tgt_dev *tgt_dev = NULL;
1379 int offs, overflow = 0;
1383 if (scst_cmd_atomic(cmd)) {
1384 res = SCST_EXEC_NEED_THREAD;
1388 rc = scst_check_local_events(cmd);
1389 if (unlikely(rc != 0))
1393 cmd->msg_status = 0;
1394 cmd->host_status = DID_OK;
1395 cmd->driver_status = 0;
1397 if ((cmd->cdb[2] != 0) && (cmd->cdb[2] != 2)) {
1398 PRINT_ERROR("Unsupported SELECT REPORT value %x in REPORT "
1399 "LUNS command", cmd->cdb[2]);
1403 buffer_size = scst_get_buf_first(cmd, &buffer);
1404 if (unlikely(buffer_size == 0))
1406 else if (unlikely(buffer_size < 0))
1409 if (buffer_size < 16)
1412 memset(buffer, 0, buffer_size);
1416 * cmd won't allow to suspend activities, so we can access
1417 * sess->sess_tgt_dev_list_hash without any additional protection.
1419 for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
1420 struct list_head *sess_tgt_dev_list_head =
1421 &cmd->sess->sess_tgt_dev_list_hash[i];
1422 list_for_each_entry(tgt_dev, sess_tgt_dev_list_head,
1423 sess_tgt_dev_list_entry) {
1425 if (offs >= buffer_size) {
1426 scst_put_buf(cmd, buffer);
1427 buffer_size = scst_get_buf_next(cmd,
1429 if (buffer_size > 0) {
1430 memset(buffer, 0, buffer_size);
1437 if ((buffer_size - offs) < 8) {
1438 PRINT_ERROR("Buffer allocated for "
1439 "REPORT LUNS command doesn't "
1440 "allow to fit 8 byte entry "
1443 goto out_put_hw_err;
1445 buffer[offs] = (tgt_dev->lun >> 8) & 0xff;
1446 buffer[offs+1] = tgt_dev->lun & 0xff;
1454 scst_put_buf(cmd, buffer);
1456 /* Set the response header */
1457 buffer_size = scst_get_buf_first(cmd, &buffer);
1458 if (unlikely(buffer_size == 0))
1460 else if (unlikely(buffer_size < 0))
1464 buffer[0] = (dev_cnt >> 24) & 0xff;
1465 buffer[1] = (dev_cnt >> 16) & 0xff;
1466 buffer[2] = (dev_cnt >> 8) & 0xff;
1467 buffer[3] = dev_cnt & 0xff;
1469 scst_put_buf(cmd, buffer);
1472 if (dev_cnt < cmd->resp_data_len)
1473 scst_set_resp_data_len(cmd, dev_cnt);
1478 /* Clear left sense_reported_luns_data_changed UA, if any. */
1481 * cmd won't allow to suspend activities, so we can access
1482 * sess->sess_tgt_dev_list_hash without any additional protection.
1484 for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
1485 struct list_head *sess_tgt_dev_list_head =
1486 &cmd->sess->sess_tgt_dev_list_hash[i];
1488 list_for_each_entry(tgt_dev, sess_tgt_dev_list_head,
1489 sess_tgt_dev_list_entry) {
1490 struct scst_tgt_dev_UA *ua;
1492 spin_lock_bh(&tgt_dev->tgt_dev_lock);
1493 list_for_each_entry(ua, &tgt_dev->UA_list,
1495 if (scst_analyze_sense(ua->UA_sense_buffer,
1496 ua->UA_valid_sense_len,
1497 SCST_SENSE_ALL_VALID,
1498 SCST_LOAD_SENSE(scst_sense_reported_luns_data_changed))) {
1499 TRACE_MGMT_DBG("Freeing not needed "
1500 "REPORTED LUNS DATA CHANGED UA "
1502 list_del(&ua->UA_list_entry);
1503 mempool_free(ua, scst_ua_mempool);
1507 spin_unlock_bh(&tgt_dev->tgt_dev_lock);
1512 /* Report the result */
1513 cmd->scst_cmd_done(cmd, SCST_CMD_STATE_DEFAULT, SCST_CONTEXT_SAME);
1516 TRACE_EXIT_RES(res);
1520 scst_put_buf(cmd, buffer);
1523 scst_set_cmd_error(cmd,
1524 SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
1528 scst_put_buf(cmd, buffer);
1531 scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
1535 static int scst_request_sense_local(struct scst_cmd *cmd)
1537 int res = SCST_EXEC_COMPLETED, rc;
1538 struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
1540 int buffer_size = 0, sl = 0;
1544 rc = scst_check_local_events(cmd);
1545 if (unlikely(rc != 0))
1549 cmd->msg_status = 0;
1550 cmd->host_status = DID_OK;
1551 cmd->driver_status = 0;
1553 spin_lock_bh(&tgt_dev->tgt_dev_lock);
1555 if (tgt_dev->tgt_dev_valid_sense_len == 0)
1556 goto out_not_completed;
1558 TRACE(TRACE_SCSI, "%s: Returning stored sense", cmd->op_name);
1560 buffer_size = scst_get_buf_first(cmd, &buffer);
1561 if (unlikely(buffer_size == 0))
1563 else if (unlikely(buffer_size < 0))
1566 memset(buffer, 0, buffer_size);
1568 if (((tgt_dev->tgt_dev_sense[0] == 0x70) ||
1569 (tgt_dev->tgt_dev_sense[0] == 0x71)) && (cmd->cdb[1] & 1)) {
1570 PRINT_WARNING("%s: Fixed format of the saved sense, but "
1571 "descriptor format requested. Convertion will "
1572 "truncated data", cmd->op_name);
1573 PRINT_BUFFER("Original sense", tgt_dev->tgt_dev_sense,
1574 tgt_dev->tgt_dev_valid_sense_len);
1576 buffer_size = min(SCST_STANDARD_SENSE_LEN, buffer_size);
1577 sl = scst_set_sense(buffer, buffer_size, true,
1578 tgt_dev->tgt_dev_sense[2], tgt_dev->tgt_dev_sense[12],
1579 tgt_dev->tgt_dev_sense[13]);
1580 } else if (((tgt_dev->tgt_dev_sense[0] == 0x72) ||
1581 (tgt_dev->tgt_dev_sense[0] == 0x73)) && !(cmd->cdb[1] & 1)) {
1582 PRINT_WARNING("%s: Descriptor format of the "
1583 "saved sense, but fixed format requested. Convertion "
1584 "will truncated data", cmd->op_name);
1585 PRINT_BUFFER("Original sense", tgt_dev->tgt_dev_sense,
1586 tgt_dev->tgt_dev_valid_sense_len);
1588 buffer_size = min(SCST_STANDARD_SENSE_LEN, buffer_size);
1589 sl = scst_set_sense(buffer, buffer_size, false,
1590 tgt_dev->tgt_dev_sense[1], tgt_dev->tgt_dev_sense[2],
1591 tgt_dev->tgt_dev_sense[3]);
1593 if (buffer_size >= tgt_dev->tgt_dev_valid_sense_len)
1594 sl = tgt_dev->tgt_dev_valid_sense_len;
1597 PRINT_WARNING("%s: Being returned sense truncated to "
1598 "size %d (needed %d)", cmd->op_name,
1599 buffer_size, tgt_dev->tgt_dev_valid_sense_len);
1601 memcpy(buffer, tgt_dev->tgt_dev_sense, sl);
1604 scst_put_buf(cmd, buffer);
1606 tgt_dev->tgt_dev_valid_sense_len = 0;
1608 spin_unlock_bh(&tgt_dev->tgt_dev_lock);
1610 scst_set_resp_data_len(cmd, sl);
1616 /* Report the result */
1617 cmd->scst_cmd_done(cmd, SCST_CMD_STATE_DEFAULT, SCST_CONTEXT_SAME);
1620 TRACE_EXIT_RES(res);
1624 spin_unlock_bh(&tgt_dev->tgt_dev_lock);
1625 scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
1629 spin_unlock_bh(&tgt_dev->tgt_dev_lock);
1630 res = SCST_EXEC_NOT_COMPLETED;
1634 static int scst_pre_select(struct scst_cmd *cmd)
1636 int res = SCST_EXEC_NOT_COMPLETED;
1640 if (scst_cmd_atomic(cmd)) {
1641 res = SCST_EXEC_NEED_THREAD;
1645 scst_block_dev_cmd(cmd, 1);
1647 /* Check for local events will be done when cmd will be executed */
1650 TRACE_EXIT_RES(res);
1654 static int scst_reserve_local(struct scst_cmd *cmd)
1656 int res = SCST_EXEC_NOT_COMPLETED, rc;
1657 struct scst_device *dev;
1658 struct scst_tgt_dev *tgt_dev_tmp;
1662 if (scst_cmd_atomic(cmd)) {
1663 res = SCST_EXEC_NEED_THREAD;
1667 if ((cmd->cdb[0] == RESERVE_10) && (cmd->cdb[2] & SCST_RES_3RDPTY)) {
1668 PRINT_ERROR("RESERVE_10: 3rdPty RESERVE not implemented "
1669 "(lun=%lld)", (long long unsigned int)cmd->lun);
1670 scst_set_cmd_error(cmd,
1671 SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
1677 if (dev->tst == SCST_CONTR_MODE_ONE_TASK_SET)
1678 scst_block_dev_cmd(cmd, 1);
1680 rc = scst_check_local_events(cmd);
1681 if (unlikely(rc != 0))
1684 spin_lock_bh(&dev->dev_lock);
1686 if (test_bit(SCST_TGT_DEV_RESERVED, &cmd->tgt_dev->tgt_dev_flags)) {
1687 spin_unlock_bh(&dev->dev_lock);
1688 scst_set_cmd_error_status(cmd, SAM_STAT_RESERVATION_CONFLICT);
1692 list_for_each_entry(tgt_dev_tmp, &dev->dev_tgt_dev_list,
1693 dev_tgt_dev_list_entry) {
1694 if (cmd->tgt_dev != tgt_dev_tmp)
1695 set_bit(SCST_TGT_DEV_RESERVED,
1696 &tgt_dev_tmp->tgt_dev_flags);
1698 dev->dev_reserved = 1;
1700 spin_unlock_bh(&dev->dev_lock);
1703 TRACE_EXIT_RES(res);
1707 /* Report the result */
1708 cmd->scst_cmd_done(cmd, SCST_CMD_STATE_DEFAULT, SCST_CONTEXT_SAME);
1709 res = SCST_EXEC_COMPLETED;
1713 static int scst_release_local(struct scst_cmd *cmd)
1715 int res = SCST_EXEC_NOT_COMPLETED, rc;
1716 struct scst_tgt_dev *tgt_dev_tmp;
1717 struct scst_device *dev;
1721 if (scst_cmd_atomic(cmd)) {
1722 res = SCST_EXEC_NEED_THREAD;
1728 if (dev->tst == SCST_CONTR_MODE_ONE_TASK_SET)
1729 scst_block_dev_cmd(cmd, 1);
1731 rc = scst_check_local_events(cmd);
1732 if (unlikely(rc != 0))
1735 spin_lock_bh(&dev->dev_lock);
1738 * The device could be RELEASED behind us, if RESERVING session
1739 * is closed (see scst_free_tgt_dev()), but this actually doesn't
1740 * matter, so use lock and no retest for DEV_RESERVED bits again
1742 if (test_bit(SCST_TGT_DEV_RESERVED, &cmd->tgt_dev->tgt_dev_flags)) {
1743 res = SCST_EXEC_COMPLETED;
1745 cmd->msg_status = 0;
1746 cmd->host_status = DID_OK;
1747 cmd->driver_status = 0;
1750 list_for_each_entry(tgt_dev_tmp,
1751 &dev->dev_tgt_dev_list,
1752 dev_tgt_dev_list_entry) {
1753 clear_bit(SCST_TGT_DEV_RESERVED,
1754 &tgt_dev_tmp->tgt_dev_flags);
1756 dev->dev_reserved = 0;
1759 spin_unlock_bh(&dev->dev_lock);
1761 if (res == SCST_EXEC_COMPLETED)
1765 TRACE_EXIT_RES(res);
1769 res = SCST_EXEC_COMPLETED;
1770 /* Report the result */
1771 cmd->scst_cmd_done(cmd, SCST_CMD_STATE_DEFAULT, SCST_CONTEXT_SAME);
1775 /* No locks, no IRQ or IRQ-disabled context allowed */
1776 int scst_check_local_events(struct scst_cmd *cmd)
1779 struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
1780 struct scst_device *dev = cmd->dev;
1785 * There's no race here, because we need to trace commands sent
1786 * *after* dev_double_ua_possible flag was set.
1788 if (unlikely(dev->dev_double_ua_possible))
1789 cmd->double_ua_possible = 1;
1791 if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
1792 TRACE_MGMT_DBG("ABORTED set, aborting cmd %p", cmd);
1793 goto out_uncomplete;
1796 /* Reserve check before Unit Attention */
1797 if (unlikely(test_bit(SCST_TGT_DEV_RESERVED,
1798 &tgt_dev->tgt_dev_flags))) {
1799 if (cmd->cdb[0] != INQUIRY &&
1800 cmd->cdb[0] != REPORT_LUNS &&
1801 cmd->cdb[0] != RELEASE &&
1802 cmd->cdb[0] != RELEASE_10 &&
1803 cmd->cdb[0] != REPORT_DEVICE_IDENTIFIER &&
1804 (cmd->cdb[0] != ALLOW_MEDIUM_REMOVAL ||
1805 (cmd->cdb[4] & 3)) &&
1806 cmd->cdb[0] != LOG_SENSE &&
1807 cmd->cdb[0] != REQUEST_SENSE) {
1808 scst_set_cmd_error_status(cmd,
1809 SAM_STAT_RESERVATION_CONFLICT);
1814 /* If we had internal bus reset, set the command error unit attention */
1815 if ((dev->scsi_dev != NULL) &&
1816 unlikely(dev->scsi_dev->was_reset)) {
1817 if (scst_is_ua_command(cmd)) {
1820 * Prevent more than 1 cmd to be triggered by
1823 spin_lock_bh(&dev->dev_lock);
1824 if (dev->scsi_dev->was_reset) {
1825 TRACE(TRACE_MGMT, "was_reset is %d", 1);
1826 scst_set_cmd_error(cmd,
1827 SCST_LOAD_SENSE(scst_sense_reset_UA));
1829 * It looks like it is safe to clear was_reset
1832 dev->scsi_dev->was_reset = 0;
1835 spin_unlock_bh(&dev->dev_lock);
1842 if (unlikely(test_bit(SCST_TGT_DEV_UA_PENDING,
1843 &cmd->tgt_dev->tgt_dev_flags))) {
1844 if (scst_is_ua_command(cmd)) {
1845 rc = scst_set_pending_UA(cmd);
1854 TRACE_EXIT_RES(res);
1859 sBUG_ON(!cmd->completed);
1866 EXPORT_SYMBOL(scst_check_local_events);
1869 void scst_inc_expected_sn(struct scst_tgt_dev *tgt_dev, atomic_t *slot)
1874 /* Optimized for lockless fast path */
1876 TRACE_SN("Slot %zd, *cur_sn_slot %d", slot - tgt_dev->sn_slots,
1879 if (!atomic_dec_and_test(slot))
1882 TRACE_SN("Slot is 0 (num_free_sn_slots=%d)",
1883 tgt_dev->num_free_sn_slots);
1884 if (tgt_dev->num_free_sn_slots < (int)ARRAY_SIZE(tgt_dev->sn_slots)-1) {
1885 spin_lock_irq(&tgt_dev->sn_lock);
1886 if (likely(tgt_dev->num_free_sn_slots < (int)ARRAY_SIZE(tgt_dev->sn_slots)-1)) {
1887 if (tgt_dev->num_free_sn_slots < 0)
1888 tgt_dev->cur_sn_slot = slot;
1890 * To be in-sync with SIMPLE case in scst_cmd_set_sn()
1893 tgt_dev->num_free_sn_slots++;
1894 TRACE_SN("Incremented num_free_sn_slots (%d)",
1895 tgt_dev->num_free_sn_slots);
1898 spin_unlock_irq(&tgt_dev->sn_lock);
1903 * No protection of expected_sn is needed, because only one thread
1904 * at time can be here (serialized by sn). Also it is supposed that
1905 * there could not be half-incremented halves.
1907 tgt_dev->expected_sn++;
1909 * Write must be before def_cmd_count read to be in sync. with
1910 * scst_post_exec_sn(). See comment in scst_send_for_exec().
1913 TRACE_SN("Next expected_sn: %ld", tgt_dev->expected_sn);
1920 static struct scst_cmd *scst_post_exec_sn(struct scst_cmd *cmd,
1923 /* For HQ commands SN is not set */
1924 bool inc_expected_sn = !cmd->inc_expected_sn_on_done &&
1925 cmd->sn_set && !cmd->retry;
1926 struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
1927 struct scst_cmd *res;
1931 if (inc_expected_sn)
1932 scst_inc_expected_sn(tgt_dev, cmd->sn_slot);
1935 scst_make_deferred_commands_active(tgt_dev);
1938 res = scst_check_deferred_commands(tgt_dev);
1940 TRACE_EXIT_HRES(res);
1944 /* cmd must be additionally referenced to not die inside */
1945 static int scst_do_real_exec(struct scst_cmd *cmd)
1947 int res = SCST_EXEC_NOT_COMPLETED;
1948 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 18)
1951 bool atomic = scst_cmd_atomic(cmd);
1952 struct scst_device *dev = cmd->dev;
1953 struct scst_dev_type *handler = dev->handler;
1954 struct io_context *old_ctx = NULL;
1955 bool ctx_changed = false;
1960 ctx_changed = scst_set_io_context(cmd, &old_ctx);
1962 cmd->state = SCST_CMD_STATE_REAL_EXECUTING;
1964 if (handler->exec) {
1965 if (unlikely(!dev->handler->exec_atomic && atomic)) {
1967 * It shouldn't be because of SCST_TGT_DEV_AFTER_*
1970 TRACE_DBG("Dev handler %s exec() needs thread "
1971 "context, rescheduling", dev->handler->name);
1972 res = SCST_EXEC_NEED_THREAD;
1976 TRACE_DBG("Calling dev handler %s exec(%p)",
1977 handler->name, cmd);
1978 TRACE_BUFF_FLAG(TRACE_SND_TOP, "Execing: ", cmd->cdb,
1980 scst_set_cur_start(cmd);
1981 res = handler->exec(cmd);
1982 TRACE_DBG("Dev handler %s exec() returned %d",
1983 handler->name, res);
1985 if (res == SCST_EXEC_COMPLETED)
1987 else if (res == SCST_EXEC_NEED_THREAD)
1990 scst_set_exec_time(cmd);
1992 sBUG_ON(res != SCST_EXEC_NOT_COMPLETED);
1995 TRACE_DBG("Sending cmd %p to SCSI mid-level", cmd);
1997 if (unlikely(dev->scsi_dev == NULL)) {
1998 PRINT_ERROR("Command for virtual device must be "
1999 "processed by device handler (LUN %lld)!",
2000 (long long unsigned int)cmd->lun);
2004 res = scst_check_local_events(cmd);
2005 if (unlikely(res != 0))
2008 #ifndef CONFIG_SCST_ALLOW_PASSTHROUGH_IO_SUBMIT_IN_SIRQ
2009 if (unlikely(atomic)) {
2010 TRACE_DBG("Pass-through exec() can not be called in atomic "
2011 "context, rescheduling to the thread (handler %s)",
2013 res = SCST_EXEC_NEED_THREAD;
2018 scst_set_cur_start(cmd);
2020 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
2021 if (unlikely(scst_alloc_request(cmd) != 0)) {
2023 res = SCST_EXEC_NEED_THREAD;
2026 PRINT_INFO("%s", "Unable to allocate request, "
2027 "sending BUSY status");
2032 scst_do_req(cmd->scsi_req, (void *)cmd->cdb,
2033 (void *)cmd->scsi_req->sr_buffer,
2034 cmd->scsi_req->sr_bufflen, scst_cmd_done, cmd->timeout,
2037 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 30)
2038 rc = scst_exec_req(dev->scsi_dev, cmd->cdb, cmd->cdb_len,
2039 cmd->data_direction, cmd->sg, cmd->bufflen, cmd->sg_cnt,
2040 cmd->timeout, cmd->retries, cmd, scst_cmd_done,
2041 atomic ? GFP_ATOMIC : GFP_KERNEL);
2043 rc = scst_scsi_exec_async(cmd, scst_cmd_done);
2045 if (unlikely(rc != 0)) {
2047 res = SCST_EXEC_NEED_THREAD;
2050 PRINT_ERROR("scst pass-through exec failed: %x", rc);
2057 res = SCST_EXEC_COMPLETED;
2061 scst_reset_io_context(cmd->tgt_dev, old_ctx);
2067 scst_set_exec_time(cmd);
2068 /* Restore the state */
2069 cmd->state = SCST_CMD_STATE_REAL_EXEC;
2073 scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
2076 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
2083 res = SCST_EXEC_COMPLETED;
2084 /* Report the result */
2085 cmd->scst_cmd_done(cmd, SCST_CMD_STATE_DEFAULT, SCST_CONTEXT_SAME);
2089 static inline int scst_real_exec(struct scst_cmd *cmd)
2095 BUILD_BUG_ON(SCST_CMD_STATE_RES_CONT_SAME != SCST_EXEC_NOT_COMPLETED);
2096 BUILD_BUG_ON(SCST_CMD_STATE_RES_CONT_NEXT != SCST_EXEC_COMPLETED);
2097 BUILD_BUG_ON(SCST_CMD_STATE_RES_NEED_THREAD != SCST_EXEC_NEED_THREAD);
2099 __scst_cmd_get(cmd);
2101 res = scst_do_real_exec(cmd);
2103 if (likely(res == SCST_EXEC_COMPLETED)) {
2104 scst_post_exec_sn(cmd, true);
2105 if (cmd->dev->scsi_dev != NULL)
2106 generic_unplug_device(
2107 cmd->dev->scsi_dev->request_queue);
2109 sBUG_ON(res != SCST_EXEC_NEED_THREAD);
2111 __scst_cmd_put(cmd);
2113 /* SCST_EXEC_* match SCST_CMD_STATE_RES_* */
2115 TRACE_EXIT_RES(res);
2119 static int scst_do_local_exec(struct scst_cmd *cmd)
2122 struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
2126 /* Check READ_ONLY device status */
2127 if ((cmd->op_flags & SCST_WRITE_MEDIUM) &&
2128 (tgt_dev->acg_dev->rd_only || cmd->dev->swp ||
2129 cmd->dev->rd_only)) {
2130 PRINT_WARNING("Attempt of write access to read-only device: "
2131 "initiator %s, LUN %lld, op %x",
2132 cmd->sess->initiator_name, cmd->lun, cmd->cdb[0]);
2133 scst_set_cmd_error(cmd,
2134 SCST_LOAD_SENSE(scst_sense_data_protect));
2138 if (!scst_is_cmd_local(cmd)) {
2139 res = SCST_EXEC_NOT_COMPLETED;
2143 switch (cmd->cdb[0]) {
2145 case MODE_SELECT_10:
2147 res = scst_pre_select(cmd);
2151 res = scst_reserve_local(cmd);
2155 res = scst_release_local(cmd);
2158 res = scst_report_luns_local(cmd);
2161 res = scst_request_sense_local(cmd);
2164 res = SCST_EXEC_NOT_COMPLETED;
2169 TRACE_EXIT_RES(res);
2173 /* Report the result */
2174 cmd->scst_cmd_done(cmd, SCST_CMD_STATE_DEFAULT, SCST_CONTEXT_SAME);
2175 res = SCST_EXEC_COMPLETED;
2179 static int scst_local_exec(struct scst_cmd *cmd)
2185 BUILD_BUG_ON(SCST_CMD_STATE_RES_CONT_SAME != SCST_EXEC_NOT_COMPLETED);
2186 BUILD_BUG_ON(SCST_CMD_STATE_RES_CONT_NEXT != SCST_EXEC_COMPLETED);
2187 BUILD_BUG_ON(SCST_CMD_STATE_RES_NEED_THREAD != SCST_EXEC_NEED_THREAD);
2189 __scst_cmd_get(cmd);
2191 res = scst_do_local_exec(cmd);
2192 if (likely(res == SCST_EXEC_NOT_COMPLETED))
2193 cmd->state = SCST_CMD_STATE_REAL_EXEC;
2194 else if (res == SCST_EXEC_COMPLETED)
2195 scst_post_exec_sn(cmd, true);
2197 sBUG_ON(res != SCST_EXEC_NEED_THREAD);
2199 __scst_cmd_put(cmd);
2201 /* SCST_EXEC_* match SCST_CMD_STATE_RES_* */
2202 TRACE_EXIT_RES(res);
2206 static int scst_exec(struct scst_cmd **active_cmd)
2208 struct scst_cmd *cmd = *active_cmd;
2209 struct scst_cmd *ref_cmd;
2210 struct scst_device *dev = cmd->dev;
2211 int res = SCST_CMD_STATE_RES_CONT_NEXT, count;
2215 if (unlikely(scst_inc_on_dev_cmd(cmd) != 0))
2218 /* To protect tgt_dev */
2220 __scst_cmd_get(ref_cmd);
2226 cmd->sent_for_exec = 1;
2228 * To sync with scst_abort_cmd(). The above assignment must
2229 * be before SCST_CMD_ABORTED test, done later in
2230 * scst_check_local_events(). It's far from here, so the order
2231 * is virtually guaranteed, but let's have it just in case.
2235 cmd->scst_cmd_done = scst_cmd_done_local;
2236 cmd->state = SCST_CMD_STATE_LOCAL_EXEC;
2238 rc = scst_do_local_exec(cmd);
2239 if (likely(rc == SCST_EXEC_NOT_COMPLETED))
2240 /* Nothing to do */;
2241 else if (rc == SCST_EXEC_NEED_THREAD) {
2242 TRACE_DBG("%s", "scst_do_local_exec() requested "
2243 "thread context, rescheduling");
2244 scst_dec_on_dev_cmd(cmd);
2245 res = SCST_CMD_STATE_RES_NEED_THREAD;
2248 sBUG_ON(rc != SCST_EXEC_COMPLETED);
2252 cmd->state = SCST_CMD_STATE_REAL_EXEC;
2254 rc = scst_do_real_exec(cmd);
2255 if (likely(rc == SCST_EXEC_COMPLETED))
2256 /* Nothing to do */;
2257 else if (rc == SCST_EXEC_NEED_THREAD) {
2258 TRACE_DBG("scst_real_exec() requested thread "
2259 "context, rescheduling (cmd %p)", cmd);
2260 scst_dec_on_dev_cmd(cmd);
2261 res = SCST_CMD_STATE_RES_NEED_THREAD;
2269 cmd = scst_post_exec_sn(cmd, false);
2273 if (unlikely(scst_inc_on_dev_cmd(cmd) != 0))
2276 __scst_cmd_put(ref_cmd);
2278 __scst_cmd_get(ref_cmd);
2286 if (dev->scsi_dev != NULL)
2287 generic_unplug_device(dev->scsi_dev->request_queue);
2290 __scst_cmd_put(ref_cmd);
2291 /* !! At this point sess, dev and tgt_dev can be already freed !! */
2294 TRACE_EXIT_RES(res);
2298 static int scst_send_for_exec(struct scst_cmd **active_cmd)
2301 struct scst_cmd *cmd = *active_cmd;
2302 struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
2303 typeof(tgt_dev->expected_sn) expected_sn;
2307 if (unlikely(cmd->internal))
2310 if (unlikely(cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE))
2313 sBUG_ON(!cmd->sn_set);
2315 expected_sn = tgt_dev->expected_sn;
2316 /* Optimized for lockless fast path */
2317 if ((cmd->sn != expected_sn) || (tgt_dev->hq_cmd_count > 0)) {
2318 spin_lock_irq(&tgt_dev->sn_lock);
2320 tgt_dev->def_cmd_count++;
2322 * Memory barrier is needed here to implement lockless fast
2323 * path. We need the exact order of read and write between
2324 * def_cmd_count and expected_sn. Otherwise, we can miss case,
2325 * when expected_sn was changed to be equal to cmd->sn while
2326 * we are queuing cmd the deferred list after the expected_sn
2327 * below. It will lead to a forever stuck command. But with
2328 * the barrier in such case __scst_check_deferred_commands()
2329 * will be called and it will take sn_lock, so we will be
2334 expected_sn = tgt_dev->expected_sn;
2335 if ((cmd->sn != expected_sn) || (tgt_dev->hq_cmd_count > 0)) {
2336 if (unlikely(test_bit(SCST_CMD_ABORTED,
2337 &cmd->cmd_flags))) {
2338 /* Necessary to allow aborting out of sn cmds */
2339 TRACE_MGMT_DBG("Aborting out of sn cmd %p "
2340 "(tag %llu, sn %lu)", cmd,
2341 (long long unsigned)cmd->tag, cmd->sn);
2342 tgt_dev->def_cmd_count--;
2343 scst_set_cmd_abnormal_done_state(cmd);
2344 res = SCST_CMD_STATE_RES_CONT_SAME;
2346 TRACE_SN("Deferring cmd %p (sn=%ld, set %d, "
2347 "expected_sn=%ld)", cmd, cmd->sn,
2348 cmd->sn_set, expected_sn);
2349 list_add_tail(&cmd->sn_cmd_list_entry,
2350 &tgt_dev->deferred_cmd_list);
2351 res = SCST_CMD_STATE_RES_CONT_NEXT;
2353 spin_unlock_irq(&tgt_dev->sn_lock);
2356 TRACE_SN("Somebody incremented expected_sn %ld, "
2357 "continuing", expected_sn);
2358 tgt_dev->def_cmd_count--;
2359 spin_unlock_irq(&tgt_dev->sn_lock);
2364 res = scst_exec(active_cmd);
2367 TRACE_EXIT_HRES(res);
2371 /* No locks supposed to be held */
2372 static int scst_check_sense(struct scst_cmd *cmd)
2375 struct scst_device *dev = cmd->dev;
2379 if (unlikely(cmd->ua_ignore))
2382 /* If we had internal bus reset behind us, set the command error UA */
2383 if ((dev->scsi_dev != NULL) &&
2384 unlikely(cmd->host_status == DID_RESET) &&
2385 scst_is_ua_command(cmd)) {
2386 TRACE(TRACE_MGMT, "DID_RESET: was_reset=%d host_status=%x",
2387 dev->scsi_dev->was_reset, cmd->host_status);
2388 scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_reset_UA));
2389 /* It looks like it is safe to clear was_reset here */
2390 dev->scsi_dev->was_reset = 0;
2393 if (unlikely(cmd->status == SAM_STAT_CHECK_CONDITION) &&
2394 SCST_SENSE_VALID(cmd->sense)) {
2395 PRINT_BUFF_FLAG(TRACE_SCSI, "Sense", cmd->sense,
2396 cmd->sense_valid_len);
2398 /* Check Unit Attention Sense Key */
2399 if (scst_is_ua_sense(cmd->sense, cmd->sense_valid_len)) {
2400 if (scst_analyze_sense(cmd->sense, cmd->sense_valid_len,
2401 SCST_SENSE_ASC_VALID,
2402 0, SCST_SENSE_ASC_UA_RESET, 0)) {
2403 if (cmd->double_ua_possible) {
2404 TRACE(TRACE_MGMT_MINOR, "Double UA "
2405 "detected for device %p", dev);
2406 TRACE(TRACE_MGMT_MINOR, "Retrying cmd"
2407 " %p (tag %llu)", cmd,
2408 (long long unsigned)cmd->tag);
2411 cmd->msg_status = 0;
2412 cmd->host_status = DID_OK;
2413 cmd->driver_status = 0;
2415 mempool_free(cmd->sense,
2416 scst_sense_mempool);
2419 scst_check_restore_sg_buff(cmd);
2421 sBUG_ON(cmd->dbl_ua_orig_resp_data_len < 0);
2422 cmd->data_direction =
2423 cmd->dbl_ua_orig_data_direction;
2424 cmd->resp_data_len =
2425 cmd->dbl_ua_orig_resp_data_len;
2427 cmd->state = SCST_CMD_STATE_REAL_EXEC;
2433 scst_dev_check_set_UA(dev, cmd, cmd->sense,
2434 cmd->sense_valid_len);
2438 if (unlikely(cmd->double_ua_possible)) {
2439 if (scst_is_ua_command(cmd)) {
2440 TRACE_MGMT_DBG("Clearing dbl_ua_possible flag (dev %p, "
2441 "cmd %p)", dev, cmd);
2443 * Lock used to protect other flags in the bitfield
2444 * (just in case, actually). Those flags can't be
2445 * changed in parallel, because the device is
2448 spin_lock_bh(&dev->dev_lock);
2449 dev->dev_double_ua_possible = 0;
2450 spin_unlock_bh(&dev->dev_lock);
2455 TRACE_EXIT_RES(res);
2459 static int scst_check_auto_sense(struct scst_cmd *cmd)
2465 if (unlikely(cmd->status == SAM_STAT_CHECK_CONDITION) &&
2466 (!SCST_SENSE_VALID(cmd->sense) ||
2467 SCST_NO_SENSE(cmd->sense))) {
2468 TRACE(TRACE_SCSI|TRACE_MINOR, "CHECK_CONDITION, but no sense: "
2469 "cmd->status=%x, cmd->msg_status=%x, "
2470 "cmd->host_status=%x, cmd->driver_status=%x (cmd %p)",
2471 cmd->status, cmd->msg_status, cmd->host_status,
2472 cmd->driver_status, cmd);
2474 } else if (unlikely(cmd->host_status)) {
2475 if ((cmd->host_status == DID_REQUEUE) ||
2476 (cmd->host_status == DID_IMM_RETRY) ||
2477 (cmd->host_status == DID_SOFT_ERROR) ||
2478 (cmd->host_status == DID_ABORT)) {
2481 TRACE(TRACE_SCSI|TRACE_MINOR, "Host status %x "
2482 "received, returning HARDWARE ERROR instead "
2483 "(cmd %p)", cmd->host_status, cmd);
2484 scst_set_cmd_error(cmd,
2485 SCST_LOAD_SENSE(scst_sense_hardw_error));
2489 TRACE_EXIT_RES(res);
2493 static int scst_pre_dev_done(struct scst_cmd *cmd)
2495 int res = SCST_CMD_STATE_RES_CONT_SAME, rc;
2499 if (unlikely(scst_check_auto_sense(cmd))) {
2500 PRINT_INFO("Command finished with CHECK CONDITION, but "
2501 "without sense data (opcode 0x%x), issuing "
2502 "REQUEST SENSE", cmd->cdb[0]);
2503 rc = scst_prepare_request_sense(cmd);
2505 res = SCST_CMD_STATE_RES_CONT_NEXT;
2507 PRINT_ERROR("%s", "Unable to issue REQUEST SENSE, "
2508 "returning HARDWARE ERROR");
2509 scst_set_cmd_error(cmd,
2510 SCST_LOAD_SENSE(scst_sense_hardw_error));
2513 } else if (unlikely(scst_check_sense(cmd)))
2516 if (likely(scsi_status_is_good(cmd->status))) {
2517 unsigned char type = cmd->dev->type;
2518 if (unlikely((cmd->cdb[0] == MODE_SENSE ||
2519 cmd->cdb[0] == MODE_SENSE_10)) &&
2520 (cmd->tgt_dev->acg_dev->rd_only || cmd->dev->swp ||
2521 cmd->dev->rd_only) &&
2522 (type == TYPE_DISK ||
2523 type == TYPE_WORM ||
2525 type == TYPE_TAPE)) {
2530 length = scst_get_buf_first(cmd, &address);
2532 PRINT_ERROR("%s", "Unable to get "
2533 "MODE_SENSE buffer");
2534 scst_set_cmd_error(cmd,
2536 scst_sense_hardw_error));
2538 } else if (length > 2 && cmd->cdb[0] == MODE_SENSE)
2539 address[2] |= 0x80; /* Write Protect*/
2540 else if (length > 3 && cmd->cdb[0] == MODE_SENSE_10)
2541 address[3] |= 0x80; /* Write Protect*/
2542 scst_put_buf(cmd, address);
2549 * Check and clear NormACA option for the device, if necessary,
2550 * since we don't support ACA
2552 if (unlikely((cmd->cdb[0] == INQUIRY)) &&
2553 /* Std INQUIRY data (no EVPD) */
2554 !(cmd->cdb[1] & SCST_INQ_EVPD) &&
2555 (cmd->resp_data_len > SCST_INQ_BYTE3)) {
2560 /* ToDo: all pages ?? */
2561 buflen = scst_get_buf_first(cmd, &buffer);
2562 if (buflen > SCST_INQ_BYTE3) {
2563 #ifdef CONFIG_SCST_EXTRACHECKS
2564 if (buffer[SCST_INQ_BYTE3] & SCST_INQ_NORMACA_BIT) {
2565 PRINT_INFO("NormACA set for device: "
2566 "lun=%lld, type 0x%02x. Clear it, "
2567 "since it's unsupported.",
2568 (long long unsigned int)cmd->lun,
2572 buffer[SCST_INQ_BYTE3] &= ~SCST_INQ_NORMACA_BIT;
2573 } else if (buflen != 0) {
2574 PRINT_ERROR("%s", "Unable to get INQUIRY "
2576 scst_set_cmd_error(cmd,
2577 SCST_LOAD_SENSE(scst_sense_hardw_error));
2581 scst_put_buf(cmd, buffer);
2587 if (unlikely((cmd->cdb[0] == MODE_SELECT) ||
2588 (cmd->cdb[0] == MODE_SELECT_10) ||
2589 (cmd->cdb[0] == LOG_SELECT))) {
2591 "MODE/LOG SELECT succeeded (LUN %lld)",
2592 (long long unsigned int)cmd->lun);
2593 cmd->state = SCST_CMD_STATE_MODE_SELECT_CHECKS;
2597 if ((cmd->cdb[0] == RESERVE) || (cmd->cdb[0] == RESERVE_10)) {
2598 if (!test_bit(SCST_TGT_DEV_RESERVED,
2599 &cmd->tgt_dev->tgt_dev_flags)) {
2600 struct scst_tgt_dev *tgt_dev_tmp;
2601 struct scst_device *dev = cmd->dev;
2604 "Real RESERVE failed lun=%lld, "
2606 (long long unsigned int)cmd->lun,
2608 PRINT_BUFF_FLAG(TRACE_SCSI, "Sense", cmd->sense,
2609 cmd->sense_valid_len);
2611 /* Clearing the reservation */
2612 spin_lock_bh(&dev->dev_lock);
2613 list_for_each_entry(tgt_dev_tmp,
2614 &dev->dev_tgt_dev_list,
2615 dev_tgt_dev_list_entry) {
2616 clear_bit(SCST_TGT_DEV_RESERVED,
2617 &tgt_dev_tmp->tgt_dev_flags);
2619 dev->dev_reserved = 0;
2620 spin_unlock_bh(&dev->dev_lock);
2624 /* Check for MODE PARAMETERS CHANGED UA */
2625 if ((cmd->dev->scsi_dev != NULL) &&
2626 (cmd->status == SAM_STAT_CHECK_CONDITION) &&
2627 scst_is_ua_sense(cmd->sense, cmd->sense_valid_len) &&
2628 scst_analyze_sense(cmd->sense, cmd->sense_valid_len,
2629 SCST_SENSE_ASCx_VALID,
2631 TRACE(TRACE_SCSI, "MODE PARAMETERS CHANGED UA (lun "
2632 "%lld)", (long long unsigned int)cmd->lun);
2633 cmd->state = SCST_CMD_STATE_MODE_SELECT_CHECKS;
2638 cmd->state = SCST_CMD_STATE_DEV_DONE;
2641 TRACE_EXIT_RES(res);
2645 static int scst_mode_select_checks(struct scst_cmd *cmd)
2647 int res = SCST_CMD_STATE_RES_CONT_SAME;
2648 int atomic = scst_cmd_atomic(cmd);
2652 if (likely(scsi_status_is_good(cmd->status))) {
2653 if (unlikely((cmd->cdb[0] == MODE_SELECT) ||
2654 (cmd->cdb[0] == MODE_SELECT_10) ||
2655 (cmd->cdb[0] == LOG_SELECT))) {
2656 struct scst_device *dev = cmd->dev;
2658 uint8_t sense_buffer[SCST_STANDARD_SENSE_LEN];
2660 if (atomic && (dev->scsi_dev != NULL)) {
2661 TRACE_DBG("%s", "MODE/LOG SELECT: thread "
2662 "context required");
2663 res = SCST_CMD_STATE_RES_NEED_THREAD;
2667 TRACE(TRACE_SCSI, "MODE/LOG SELECT succeeded, "
2668 "setting the SELECT UA (lun=%lld)",
2669 (long long unsigned int)cmd->lun);
2671 spin_lock_bh(&dev->dev_lock);
2672 if (cmd->cdb[0] == LOG_SELECT) {
2673 sl = scst_set_sense(sense_buffer,
2674 sizeof(sense_buffer),
2676 UNIT_ATTENTION, 0x2a, 0x02);
2678 sl = scst_set_sense(sense_buffer,
2679 sizeof(sense_buffer),
2681 UNIT_ATTENTION, 0x2a, 0x01);
2683 scst_dev_check_set_local_UA(dev, cmd, sense_buffer, sl);
2684 spin_unlock_bh(&dev->dev_lock);
2686 if (dev->scsi_dev != NULL)
2687 scst_obtain_device_parameters(dev);
2689 } else if ((cmd->status == SAM_STAT_CHECK_CONDITION) &&
2690 scst_is_ua_sense(cmd->sense, cmd->sense_valid_len) &&
2691 /* mode parameters changed */
2692 (scst_analyze_sense(cmd->sense, cmd->sense_valid_len,
2693 SCST_SENSE_ASCx_VALID,
2695 scst_analyze_sense(cmd->sense, cmd->sense_valid_len,
2696 SCST_SENSE_ASC_VALID,
2697 0, 0x29, 0) /* reset */ ||
2698 scst_analyze_sense(cmd->sense, cmd->sense_valid_len,
2699 SCST_SENSE_ASC_VALID,
2700 0, 0x28, 0) /* medium changed */ ||
2701 /* cleared by another ini (just in case) */
2702 scst_analyze_sense(cmd->sense, cmd->sense_valid_len,
2703 SCST_SENSE_ASC_VALID,
2706 TRACE_DBG("Possible parameters changed UA %x: "
2707 "thread context required", cmd->sense[12]);
2708 res = SCST_CMD_STATE_RES_NEED_THREAD;
2712 TRACE(TRACE_SCSI, "Possible parameters changed UA %x "
2713 "(LUN %lld): getting new parameters", cmd->sense[12],
2714 (long long unsigned int)cmd->lun);
2716 scst_obtain_device_parameters(cmd->dev);
2720 cmd->state = SCST_CMD_STATE_DEV_DONE;
2723 TRACE_EXIT_HRES(res);
2727 static void scst_inc_check_expected_sn(struct scst_cmd *cmd)
2729 if (likely(cmd->sn_set))
2730 scst_inc_expected_sn(cmd->tgt_dev, cmd->sn_slot);
2732 scst_make_deferred_commands_active(cmd->tgt_dev);
2735 static int scst_dev_done(struct scst_cmd *cmd)
2737 int res = SCST_CMD_STATE_RES_CONT_SAME;
2739 struct scst_device *dev = cmd->dev;
2743 state = SCST_CMD_STATE_PRE_XMIT_RESP;
2745 if (likely(!scst_is_cmd_fully_local(cmd)) &&
2746 likely(dev->handler->dev_done != NULL)) {
2749 if (unlikely(!dev->handler->dev_done_atomic &&
2750 scst_cmd_atomic(cmd))) {
2752 * It shouldn't be because of SCST_TGT_DEV_AFTER_*
2755 TRACE_DBG("Dev handler %s dev_done() needs thread "
2756 "context, rescheduling", dev->handler->name);
2757 res = SCST_CMD_STATE_RES_NEED_THREAD;
2761 TRACE_DBG("Calling dev handler %s dev_done(%p)",
2762 dev->handler->name, cmd);
2763 scst_set_cur_start(cmd);
2764 rc = dev->handler->dev_done(cmd);
2765 scst_set_dev_done_time(cmd);
2766 TRACE_DBG("Dev handler %s dev_done() returned %d",
2767 dev->handler->name, rc);
2768 if (rc != SCST_CMD_STATE_DEFAULT)
2773 case SCST_CMD_STATE_PRE_XMIT_RESP:
2774 case SCST_CMD_STATE_DEV_PARSE:
2775 case SCST_CMD_STATE_PRE_PARSE:
2776 case SCST_CMD_STATE_PREPARE_SPACE:
2777 case SCST_CMD_STATE_RDY_TO_XFER:
2778 case SCST_CMD_STATE_TGT_PRE_EXEC:
2779 case SCST_CMD_STATE_SEND_FOR_EXEC:
2780 case SCST_CMD_STATE_LOCAL_EXEC:
2781 case SCST_CMD_STATE_REAL_EXEC:
2782 case SCST_CMD_STATE_PRE_DEV_DONE:
2783 case SCST_CMD_STATE_MODE_SELECT_CHECKS:
2784 case SCST_CMD_STATE_DEV_DONE:
2785 case SCST_CMD_STATE_XMIT_RESP:
2786 case SCST_CMD_STATE_FINISHED:
2787 case SCST_CMD_STATE_FINISHED_INTERNAL:
2791 case SCST_CMD_STATE_NEED_THREAD_CTX:
2792 TRACE_DBG("Dev handler %s dev_done() requested "
2793 "thread context, rescheduling",
2794 dev->handler->name);
2795 res = SCST_CMD_STATE_RES_NEED_THREAD;
2800 PRINT_ERROR("Dev handler %s dev_done() returned "
2801 "invalid cmd state %d",
2802 dev->handler->name, state);
2804 PRINT_ERROR("Dev handler %s dev_done() returned "
2805 "error %d", dev->handler->name,
2808 scst_set_cmd_error(cmd,
2809 SCST_LOAD_SENSE(scst_sense_hardw_error));
2810 scst_set_cmd_abnormal_done_state(cmd);
2814 if (cmd->needs_unblocking)
2815 scst_unblock_dev_cmd(cmd);
2817 if (likely(cmd->dec_on_dev_needed))
2818 scst_dec_on_dev_cmd(cmd);
2820 if (cmd->inc_expected_sn_on_done && cmd->sent_for_exec)
2821 scst_inc_check_expected_sn(cmd);
2823 if (unlikely(cmd->internal))
2824 cmd->state = SCST_CMD_STATE_FINISHED_INTERNAL;
2827 TRACE_EXIT_HRES(res);
2831 static int scst_pre_xmit_response(struct scst_cmd *cmd)
2834 struct scst_session *sess = cmd->sess;
2838 EXTRACHECKS_BUG_ON(cmd->internal);
2840 #ifdef CONFIG_SCST_DEBUG_TM
2841 if (cmd->tm_dbg_delayed &&
2842 !test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)) {
2843 if (scst_cmd_atomic(cmd)) {
2844 TRACE_MGMT_DBG("%s",
2845 "DEBUG_TM delayed cmd needs a thread");
2846 res = SCST_CMD_STATE_RES_NEED_THREAD;
2849 TRACE_MGMT_DBG("Delaying cmd %p (tag %llu) for 1 second",
2851 schedule_timeout_uninterruptible(HZ);
2855 if (likely(cmd->tgt_dev != NULL)) {
2856 atomic_dec(&cmd->tgt_dev->tgt_dev_cmd_count);
2857 atomic_dec(&cmd->dev->dev_cmd_count);
2858 /* If expected values not set, expected direction is UNKNOWN */
2859 if (cmd->expected_data_direction & SCST_DATA_WRITE)
2860 atomic_dec(&cmd->dev->write_cmd_count);
2862 if (unlikely(cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE))
2863 scst_on_hq_cmd_response(cmd);
2865 if (unlikely(!cmd->sent_for_exec)) {
2866 TRACE_SN("cmd %p was not sent to mid-lev"
2867 " (sn %ld, set %d)",
2868 cmd, cmd->sn, cmd->sn_set);
2869 scst_unblock_deferred(cmd->tgt_dev, cmd);
2870 cmd->sent_for_exec = 1;
2875 * If we don't remove cmd from the search list here, before
2876 * submitting it for transmittion, we will have a race, when for
2877 * some reason cmd's release is delayed after transmittion and
2878 * initiator sends cmd with the same tag => it is possible that
2879 * a wrong cmd will be found by find() functions.
2881 spin_lock_irq(&sess->sess_list_lock);
2882 list_move_tail(&cmd->sess_cmd_list_entry,
2883 &sess->after_pre_xmit_cmd_list);
2884 spin_unlock_irq(&sess->sess_list_lock);
2887 smp_mb(); /* to sync with scst_abort_cmd() */
2889 if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)))
2890 scst_xmit_process_aborted_cmd(cmd);
2891 else if (unlikely(cmd->status == SAM_STAT_CHECK_CONDITION))
2892 scst_store_sense(cmd);
2894 if (unlikely(test_bit(SCST_CMD_NO_RESP, &cmd->cmd_flags))) {
2895 TRACE_MGMT_DBG("Flag NO_RESP set for cmd %p (tag %llu),"
2897 cmd, (long long unsigned int)cmd->tag);
2898 cmd->state = SCST_CMD_STATE_FINISHED;
2899 res = SCST_CMD_STATE_RES_CONT_SAME;
2903 cmd->state = SCST_CMD_STATE_XMIT_RESP;
2904 res = SCST_CMD_STATE_RES_CONT_SAME;
2907 TRACE_EXIT_HRES(res);
2911 static int scst_xmit_response(struct scst_cmd *cmd)
2913 struct scst_tgt_template *tgtt = cmd->tgtt;
2918 EXTRACHECKS_BUG_ON(cmd->internal);
2920 if (unlikely(!tgtt->xmit_response_atomic &&
2921 scst_cmd_atomic(cmd))) {
2923 * It shouldn't be because of SCST_TGT_DEV_AFTER_*
2926 TRACE_DBG("Target driver %s xmit_response() needs thread "
2927 "context, rescheduling", tgtt->name);
2928 res = SCST_CMD_STATE_RES_NEED_THREAD;
2933 int finished_cmds = atomic_read(&cmd->tgt->finished_cmds);
2935 res = SCST_CMD_STATE_RES_CONT_NEXT;
2936 cmd->state = SCST_CMD_STATE_XMIT_WAIT;
2938 TRACE_DBG("Calling xmit_response(%p)", cmd);
2940 #if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
2941 if (trace_flag & TRACE_SND_BOT) {
2943 struct scatterlist *sg;
2944 if (cmd->tgt_sg != NULL)
2949 TRACE(TRACE_SND_BOT, "Xmitting data for cmd %p "
2950 "(sg_cnt %d, sg %p, sg[0].page %p)",
2951 cmd, cmd->tgt_sg_cnt, sg,
2952 (void *)sg_page(&sg[0]));
2953 for (i = 0; i < cmd->tgt_sg_cnt; ++i) {
2954 PRINT_BUFF_FLAG(TRACE_SND_BOT,
2955 "Xmitting sg", sg_virt(&sg[i]),
2962 if (tgtt->on_hw_pending_cmd_timeout != NULL) {
2963 struct scst_session *sess = cmd->sess;
2964 cmd->hw_pending_start = jiffies;
2965 cmd->cmd_hw_pending = 1;
2966 if (!test_bit(SCST_SESS_HW_PENDING_WORK_SCHEDULED, &sess->sess_aflags)) {
2967 TRACE_DBG("Sched HW pending work for sess %p "
2968 "(max time %d)", sess,
2969 tgtt->max_hw_pending_time);
2970 set_bit(SCST_SESS_HW_PENDING_WORK_SCHEDULED,
2971 &sess->sess_aflags);
2972 schedule_delayed_work(&sess->hw_pending_work,
2973 tgtt->max_hw_pending_time * HZ);
2977 scst_set_cur_start(cmd);
2979 #ifdef CONFIG_SCST_DEBUG_RETRY
2980 if (((scst_random() % 100) == 77))
2981 rc = SCST_TGT_RES_QUEUE_FULL;
2984 rc = tgtt->xmit_response(cmd);
2985 TRACE_DBG("xmit_response() returned %d", rc);
2987 if (likely(rc == SCST_TGT_RES_SUCCESS))
2990 scst_set_xmit_time(cmd);
2992 cmd->cmd_hw_pending = 0;
2994 /* Restore the previous state */
2995 cmd->state = SCST_CMD_STATE_XMIT_RESP;
2998 case SCST_TGT_RES_QUEUE_FULL:
2999 if (scst_queue_retry_cmd(cmd, finished_cmds) == 0)
3004 case SCST_TGT_RES_NEED_THREAD_CTX:
3005 TRACE_DBG("Target driver %s xmit_response() "
3006 "requested thread context, rescheduling",
3008 res = SCST_CMD_STATE_RES_NEED_THREAD;
3018 /* Caution: cmd can be already dead here */
3019 TRACE_EXIT_HRES(res);
3023 if (rc == SCST_TGT_RES_FATAL_ERROR) {
3024 PRINT_ERROR("Target driver %s xmit_response() returned "
3025 "fatal error", tgtt->name);
3027 PRINT_ERROR("Target driver %s xmit_response() returned "
3028 "invalid value %d", tgtt->name, rc);
3030 scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
3031 cmd->state = SCST_CMD_STATE_FINISHED;
3032 res = SCST_CMD_STATE_RES_CONT_SAME;
3036 void scst_tgt_cmd_done(struct scst_cmd *cmd,
3037 enum scst_exec_context pref_context)
3041 sBUG_ON(cmd->state != SCST_CMD_STATE_XMIT_WAIT);
3043 scst_set_xmit_time(cmd);
3045 cmd->cmd_hw_pending = 0;
3047 cmd->state = SCST_CMD_STATE_FINISHED;
3048 scst_process_redirect_cmd(cmd, pref_context, 1);
3053 EXPORT_SYMBOL(scst_tgt_cmd_done);
3055 static int scst_finish_cmd(struct scst_cmd *cmd)
3058 struct scst_session *sess = cmd->sess;
3062 scst_update_lat_stats(cmd);
3064 if (unlikely(cmd->delivery_status != SCST_CMD_DELIVERY_SUCCESS)) {
3065 if ((cmd->tgt_dev != NULL) &&
3066 scst_is_ua_sense(cmd->sense, cmd->sense_valid_len)) {
3067 /* This UA delivery failed, so we need to requeue it */
3068 if (scst_cmd_atomic(cmd) &&
3069 scst_is_ua_global(cmd->sense, cmd->sense_valid_len)) {
3070 TRACE_MGMT_DBG("Requeuing of global UA for "
3071 "failed cmd %p needs a thread", cmd);
3072 res = SCST_CMD_STATE_RES_NEED_THREAD;
3075 scst_requeue_ua(cmd);
3079 atomic_dec(&sess->sess_cmd_count);
3081 spin_lock_irq(&sess->sess_list_lock);
3082 list_del(&cmd->sess_cmd_list_entry);
3083 spin_unlock_irq(&sess->sess_list_lock);
3086 smp_mb(); /* to sync with scst_abort_cmd() */
3088 if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
3089 TRACE_MGMT_DBG("Aborted cmd %p finished (cmd_ref %d, "
3090 "scst_cmd_count %d)", cmd, atomic_read(&cmd->cmd_ref),
3091 atomic_read(&scst_cmd_count));
3093 scst_finish_cmd_mgmt(cmd);
3096 __scst_cmd_put(cmd);
3098 res = SCST_CMD_STATE_RES_CONT_NEXT;
3101 TRACE_EXIT_HRES(res);
3106 * No locks, but it must be externally serialized (see comment for
3107 * scst_cmd_init_done() in scst.h)
3109 static void scst_cmd_set_sn(struct scst_cmd *cmd)
3111 struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
3112 unsigned long flags;
3116 if (scst_is_implicit_hq(cmd)) {
3117 TRACE_SN("Implicit HQ cmd %p", cmd);
3118 cmd->queue_type = SCST_CMD_QUEUE_HEAD_OF_QUEUE;
3121 EXTRACHECKS_BUG_ON(cmd->sn_set || cmd->hq_cmd_inced);
3123 /* Optimized for lockless fast path */
3125 scst_check_debug_sn(cmd);
3127 if (cmd->dev->queue_alg == SCST_CONTR_MODE_QUEUE_ALG_RESTRICTED_REORDER) {
3129 * Not the best way, but good enough until there is a
3130 * possibility to specify queue type during pass-through
3131 * commands submission.
3133 cmd->queue_type = SCST_CMD_QUEUE_ORDERED;
3136 switch (cmd->queue_type) {
3137 case SCST_CMD_QUEUE_SIMPLE:
3138 case SCST_CMD_QUEUE_UNTAGGED:
3139 #if 0 /* left for future performance investigations */
3140 if (scst_cmd_is_expected_set(cmd)) {
3141 if ((cmd->expected_data_direction == SCST_DATA_READ) &&
3142 (atomic_read(&cmd->dev->write_cmd_count) == 0))
3147 if (likely(tgt_dev->num_free_sn_slots >= 0)) {
3149 * atomic_inc_return() implies memory barrier to sync
3150 * with scst_inc_expected_sn()
3152 if (atomic_inc_return(tgt_dev->cur_sn_slot) == 1) {
3154 TRACE_SN("Incremented curr_sn %ld",
3157 cmd->sn_slot = tgt_dev->cur_sn_slot;
3158 cmd->sn = tgt_dev->curr_sn;
3160 tgt_dev->prev_cmd_ordered = 0;
3162 TRACE(TRACE_MINOR, "***WARNING*** Not enough SN slots "
3163 "%zd", ARRAY_SIZE(tgt_dev->sn_slots));
3168 case SCST_CMD_QUEUE_ORDERED:
3169 TRACE_SN("ORDERED cmd %p (op %x)", cmd, cmd->cdb[0]);
3171 if (!tgt_dev->prev_cmd_ordered) {
3172 spin_lock_irqsave(&tgt_dev->sn_lock, flags);
3173 if (tgt_dev->num_free_sn_slots >= 0) {
3174 tgt_dev->num_free_sn_slots--;
3175 if (tgt_dev->num_free_sn_slots >= 0) {
3177 /* Commands can finish in any order, so
3178 * we don't know which slot is empty.
3181 tgt_dev->cur_sn_slot++;
3182 if (tgt_dev->cur_sn_slot ==
3183 tgt_dev->sn_slots + ARRAY_SIZE(tgt_dev->sn_slots))
3184 tgt_dev->cur_sn_slot = tgt_dev->sn_slots;
3186 if (atomic_read(tgt_dev->cur_sn_slot) == 0)
3190 sBUG_ON(i == ARRAY_SIZE(tgt_dev->sn_slots));
3192 TRACE_SN("New cur SN slot %zd",
3193 tgt_dev->cur_sn_slot -
3197 spin_unlock_irqrestore(&tgt_dev->sn_lock, flags);
3199 tgt_dev->prev_cmd_ordered = 1;
3201 cmd->sn = tgt_dev->curr_sn;
3204 case SCST_CMD_QUEUE_HEAD_OF_QUEUE:
3205 TRACE_SN("HQ cmd %p (op %x)", cmd, cmd->cdb[0]);
3206 spin_lock_irqsave(&tgt_dev->sn_lock, flags);
3207 tgt_dev->hq_cmd_count++;
3208 spin_unlock_irqrestore(&tgt_dev->sn_lock, flags);
3209 cmd->hq_cmd_inced = 1;
3216 TRACE_SN("cmd(%p)->sn: %ld (tgt_dev %p, *cur_sn_slot %d, "
3217 "num_free_sn_slots %d, prev_cmd_ordered %ld, "
3218 "cur_sn_slot %zd)", cmd, cmd->sn, tgt_dev,
3219 atomic_read(tgt_dev->cur_sn_slot),
3220 tgt_dev->num_free_sn_slots, tgt_dev->prev_cmd_ordered,
3221 tgt_dev->cur_sn_slot-tgt_dev->sn_slots);
3231 * Returns 0 on success, > 0 when we need to wait for unblock,
3232 * < 0 if there is no device (lun) or device type handler.
3234 * No locks, but might be on IRQ, protection is done by the
3235 * suspended activity.
3237 static int scst_translate_lun(struct scst_cmd *cmd)
3239 struct scst_tgt_dev *tgt_dev = NULL;
3244 /* See comment about smp_mb() in scst_suspend_activity() */
3247 if (likely(!test_bit(SCST_FLAG_SUSPENDED, &scst_flags))) {
3248 struct list_head *sess_tgt_dev_list_head =
3249 &cmd->sess->sess_tgt_dev_list_hash[HASH_VAL(cmd->lun)];
3250 TRACE_DBG("Finding tgt_dev for cmd %p (lun %lld)", cmd,
3251 (long long unsigned int)cmd->lun);
3253 list_for_each_entry(tgt_dev, sess_tgt_dev_list_head,
3254 sess_tgt_dev_list_entry) {
3255 if (tgt_dev->lun == cmd->lun) {
3256 TRACE_DBG("tgt_dev %p found", tgt_dev);
3258 if (unlikely(tgt_dev->dev->handler ==
3259 &scst_null_devtype)) {
3260 PRINT_INFO("Dev handler for device "
3261 "%lld is NULL, the device will not "
3262 "be visible remotely",
3263 (long long unsigned int)cmd->lun);
3267 cmd->cmd_lists = tgt_dev->dev->p_cmd_lists;
3268 cmd->tgt_dev = tgt_dev;
3269 cmd->dev = tgt_dev->dev;
3277 "tgt_dev for LUN %lld not found, command to "
3279 (long long unsigned int)cmd->lun);
3283 TRACE_MGMT_DBG("%s", "FLAG SUSPENDED set, skipping");
3288 TRACE_EXIT_RES(res);
3293 * No locks, but might be on IRQ
3295 * Returns 0 on success, > 0 when we need to wait for unblock,
3296 * < 0 if there is no device (lun) or device type handler.
3298 static int __scst_init_cmd(struct scst_cmd *cmd)
3304 res = scst_translate_lun(cmd);
3305 if (likely(res == 0)) {
3307 bool failure = false;
3309 cmd->state = SCST_CMD_STATE_PRE_PARSE;
3311 cnt = atomic_inc_return(&cmd->tgt_dev->tgt_dev_cmd_count);
3312 if (unlikely(cnt > SCST_MAX_TGT_DEV_COMMANDS)) {
3313 TRACE(TRACE_MGMT_MINOR,
3314 "Too many pending commands (%d) in "
3315 "session, returning BUSY to initiator \"%s\"",
3316 cnt, (cmd->sess->initiator_name[0] == '\0') ?
3317 "Anonymous" : cmd->sess->initiator_name);
3321 cnt = atomic_inc_return(&cmd->dev->dev_cmd_count);
3322 if (unlikely(cnt > SCST_MAX_DEV_COMMANDS)) {
3324 TRACE(TRACE_MGMT_MINOR,
3325 "Too many pending device "
3326 "commands (%d), returning BUSY to "
3327 "initiator \"%s\"", cnt,
3328 (cmd->sess->initiator_name[0] == '\0') ?
3330 cmd->sess->initiator_name);
3335 /* If expected values not set, expected direction is UNKNOWN */
3336 if (cmd->expected_data_direction & SCST_DATA_WRITE)
3337 atomic_inc(&cmd->dev->write_cmd_count);
3339 if (unlikely(failure))
3342 if (!cmd->set_sn_on_restart_cmd)
3343 scst_cmd_set_sn(cmd);
3344 } else if (res < 0) {
3345 TRACE_DBG("Finishing cmd %p", cmd);
3346 scst_set_cmd_error(cmd,
3347 SCST_LOAD_SENSE(scst_sense_lun_not_supported));
3348 scst_set_cmd_abnormal_done_state(cmd);
3353 TRACE_EXIT_RES(res);
3358 scst_set_cmd_abnormal_done_state(cmd);
3362 /* Called under scst_init_lock and IRQs disabled */
3363 static void scst_do_job_init(void)
3364 __releases(&scst_init_lock)
3365 __acquires(&scst_init_lock)
3367 struct scst_cmd *cmd;
3374 * There is no need for read barrier here, because we don't care where
3375 * this check will be done.
3377 susp = test_bit(SCST_FLAG_SUSPENDED, &scst_flags);
3378 if (scst_init_poll_cnt > 0)
3379 scst_init_poll_cnt--;
3381 list_for_each_entry(cmd, &scst_init_cmd_list, cmd_list_entry) {
3383 if (susp && !test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))
3385 if (!test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)) {
3386 spin_unlock_irq(&scst_init_lock);
3387 rc = __scst_init_cmd(cmd);
3388 spin_lock_irq(&scst_init_lock);
3390 TRACE_MGMT_DBG("%s",
3391 "FLAG SUSPENDED set, restarting");
3395 TRACE_MGMT_DBG("Aborting not inited cmd %p (tag %llu)",
3396 cmd, (long long unsigned int)cmd->tag);
3397 scst_set_cmd_abnormal_done_state(cmd);
3401 * Deleting cmd from init cmd list after __scst_init_cmd()
3402 * is necessary to keep the check in scst_init_cmd() correct
3403 * to preserve the commands order.
3405 * We don't care about the race, when init cmd list is empty
3406 * and one command detected that it just was not empty, so
3407 * it's inserting to it, but another command at the same time
3408 * seeing init cmd list empty and goes directly, because it
3409 * could affect only commands from the same initiator to the
3410 * same tgt_dev, but scst_cmd_init_done*() doesn't guarantee
3411 * the order in case of simultaneous such calls anyway.
3413 TRACE_MGMT_DBG("Deleting cmd %p from init cmd list", cmd);
3414 smp_wmb(); /* enforce the required order */
3415 list_del(&cmd->cmd_list_entry);
3416 spin_unlock(&scst_init_lock);
3418 spin_lock(&cmd->cmd_lists->cmd_list_lock);
3419 TRACE_MGMT_DBG("Adding cmd %p to active cmd list", cmd);
3420 if (unlikely(cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE))
3421 list_add(&cmd->cmd_list_entry,
3422 &cmd->cmd_lists->active_cmd_list);
3424 list_add_tail(&cmd->cmd_list_entry,
3425 &cmd->cmd_lists->active_cmd_list);
3426 wake_up(&cmd->cmd_lists->cmd_list_waitQ);
3427 spin_unlock(&cmd->cmd_lists->cmd_list_lock);
3429 spin_lock(&scst_init_lock);
3433 /* It isn't really needed, but let's keep it */
3434 if (susp != test_bit(SCST_FLAG_SUSPENDED, &scst_flags))
3441 static inline int test_init_cmd_list(void)
3443 int res = (!list_empty(&scst_init_cmd_list) &&
3444 !test_bit(SCST_FLAG_SUSPENDED, &scst_flags)) ||
3445 unlikely(kthread_should_stop()) ||
3446 (scst_init_poll_cnt > 0);
3450 int scst_init_thread(void *arg)
3454 PRINT_INFO("Init thread started, PID %d", current->pid);
3456 current->flags |= PF_NOFREEZE;
3458 set_user_nice(current, -10);
3460 spin_lock_irq(&scst_init_lock);
3461 while (!kthread_should_stop()) {
3463 init_waitqueue_entry(&wait, current);
3465 if (!test_init_cmd_list()) {
3466 add_wait_queue_exclusive(&scst_init_cmd_list_waitQ,
3469 set_current_state(TASK_INTERRUPTIBLE);
3470 if (test_init_cmd_list())
3472 spin_unlock_irq(&scst_init_lock);
3474 spin_lock_irq(&scst_init_lock);
3476 set_current_state(TASK_RUNNING);
3477 remove_wait_queue(&scst_init_cmd_list_waitQ, &wait);
3481 spin_unlock_irq(&scst_init_lock);
3484 * If kthread_should_stop() is true, we are guaranteed to be
3485 * on the module unload, so scst_init_cmd_list must be empty.