4 * Copyright (C) 2004 - 2008 Vladislav Bolkhovitin <vst@vlnb.net>
5 * Copyright (C) 2004 - 2005 Leonid Stoljar
6 * Copyright (C) 2007 - 2008 CMS Distribution Limited
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation, version 2
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
19 #include <linux/init.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/list.h>
23 #include <linux/spinlock.h>
24 #include <linux/slab.h>
25 #include <linux/sched.h>
26 #include <linux/smp_lock.h>
27 #include <linux/unistd.h>
28 #include <linux/string.h>
29 #include <linux/kthread.h>
30 #include <linux/delay.h>
33 #include "scst_priv.h"
35 static void scst_cmd_set_sn(struct scst_cmd *cmd);
36 static int __scst_init_cmd(struct scst_cmd *cmd);
37 static void scst_finish_cmd_mgmt(struct scst_cmd *cmd);
38 static struct scst_cmd *__scst_find_cmd_by_tag(struct scst_session *sess,
40 static void scst_proccess_redirect_cmd(struct scst_cmd *cmd,
41 enum scst_exec_context context, int check_retries);
43 static inline void scst_schedule_tasklet(struct scst_cmd *cmd)
45 struct scst_tasklet *t = &scst_tasklets[smp_processor_id()];
48 spin_lock_irqsave(&t->tasklet_lock, flags);
49 TRACE_DBG("Adding cmd %p to tasklet %d cmd list", cmd,
51 list_add_tail(&cmd->cmd_list_entry, &t->tasklet_cmd_list);
52 spin_unlock_irqrestore(&t->tasklet_lock, flags);
54 tasklet_schedule(&t->tasklet);
58 * Must not be called in parallel with scst_unregister_session_ex() for the
61 struct scst_cmd *scst_rx_cmd(struct scst_session *sess,
62 const uint8_t *lun, int lun_len,
63 const uint8_t *cdb, int cdb_len, int atomic)
69 #ifdef CONFIG_SCST_EXTRACHECKS
70 if (unlikely(sess->shut_phase != SCST_SESS_SPH_READY)) {
71 PRINT_CRIT_ERROR("%s",
72 "New cmd while shutting down the session");
77 cmd = scst_alloc_cmd(atomic ? GFP_ATOMIC : GFP_KERNEL);
83 cmd->tgtt = sess->tgt->tgtt;
86 * For both wrong lun and CDB defer the error reporting for
87 * scst_cmd_init_done()
90 cmd->lun = scst_unpack_lun(lun, lun_len);
92 if (cdb_len <= SCST_MAX_CDB_SIZE) {
93 memcpy(cmd->cdb, cdb, cdb_len);
94 cmd->cdb_len = cdb_len;
97 TRACE_DBG("cmd %p, sess %p", cmd, sess);
104 EXPORT_SYMBOL(scst_rx_cmd);
107 * No locks, but might be on IRQ. Returns 0 on success, <0 if processing of
108 * this command should be stopped.
110 static int scst_init_cmd(struct scst_cmd *cmd, enum scst_exec_context *context)
116 /* See the comment in scst_do_job_init() */
117 if (unlikely(!list_empty(&scst_init_cmd_list))) {
118 TRACE_MGMT_DBG("%s", "init cmd list busy");
122 * Memory barrier isn't necessary here, because CPU appears to
123 * be self-consistent and we don't care about the race, described
124 * in comment in scst_do_job_init().
127 rc = __scst_init_cmd(cmd);
128 if (unlikely(rc > 0))
130 else if (unlikely(rc != 0))
133 /* Small context optimization */
134 if (((*context == SCST_CONTEXT_TASKLET) ||
135 (*context == SCST_CONTEXT_DIRECT_ATOMIC) ||
136 ((*context == SCST_CONTEXT_SAME) && scst_cmd_atomic(cmd))) &&
137 scst_cmd_is_expected_set(cmd)) {
138 if (cmd->expected_data_direction & SCST_DATA_WRITE) {
139 if (!test_bit(SCST_TGT_DEV_AFTER_INIT_WR_ATOMIC,
140 &cmd->tgt_dev->tgt_dev_flags))
141 *context = SCST_CONTEXT_THREAD;
143 if (!test_bit(SCST_TGT_DEV_AFTER_INIT_OTH_ATOMIC,
144 &cmd->tgt_dev->tgt_dev_flags))
145 *context = SCST_CONTEXT_THREAD;
154 if (cmd->preprocessing_only) {
156 * Poor man solution for single threaded targets, where
157 * blocking receiver at least sometimes means blocking all.
159 sBUG_ON(*context != SCST_CONTEXT_DIRECT);
161 scst_set_cmd_abnormal_done_state(cmd);
162 /* Keep initiator away from too many BUSY commands */
166 spin_lock_irqsave(&scst_init_lock, flags);
167 TRACE_MGMT_DBG("Adding cmd %p to init cmd list (scst_cmd_count "
168 "%d)", cmd, atomic_read(&scst_cmd_count));
169 list_add_tail(&cmd->cmd_list_entry, &scst_init_cmd_list);
170 if (test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))
171 scst_init_poll_cnt++;
172 spin_unlock_irqrestore(&scst_init_lock, flags);
173 wake_up(&scst_init_cmd_list_waitQ);
179 #ifdef CONFIG_SCST_MEASURE_LATENCY
180 static inline uint64_t scst_sec_to_nsec(time_t sec)
182 return (uint64_t)sec * 1000000000;
186 void scst_cmd_init_done(struct scst_cmd *cmd,
187 enum scst_exec_context pref_context)
190 struct scst_session *sess = cmd->sess;
195 #ifdef CONFIG_SCST_MEASURE_LATENCY
199 cmd->start = scst_sec_to_nsec(ts.tv_sec) + ts.tv_nsec;
200 TRACE_DBG("cmd %p (sess %p): start %lld (tv_sec %ld, "
201 "tv_nsec %ld)", cmd, sess, cmd->start, ts.tv_sec,
206 TRACE_DBG("Preferred context: %d (cmd %p)", pref_context, cmd);
207 TRACE(TRACE_SCSI, "tag=%llu, lun=%lld, CDB len=%d, queue_type=%x "
208 "(cmd %p)", (long long unsigned int)cmd->tag,
209 (long long unsigned int)cmd->lun, cmd->cdb_len,
210 cmd->queue_type, cmd);
211 PRINT_BUFF_FLAG(TRACE_SCSI|TRACE_RCV_BOT, "Recieving CDB",
212 cmd->cdb, cmd->cdb_len);
214 #ifdef CONFIG_SCST_EXTRACHECKS
215 if (unlikely((in_irq() || irqs_disabled())) &&
216 ((pref_context == SCST_CONTEXT_DIRECT) ||
217 (pref_context == SCST_CONTEXT_DIRECT_ATOMIC))) {
218 PRINT_ERROR("Wrong context %d in IRQ from target %s, use "
219 "SCST_CONTEXT_THREAD instead\n", pref_context,
221 pref_context = SCST_CONTEXT_THREAD;
225 atomic_inc(&sess->sess_cmd_count);
227 spin_lock_irqsave(&sess->sess_list_lock, flags);
229 if (unlikely(sess->init_phase != SCST_SESS_IPH_READY)) {
231 * We have to always keep command in the search list from the
232 * very beginning, because otherwise it can be missed during
233 * TM processing. This check is needed because there might be
234 * old, i.e. deferred, commands and new, i.e. just coming, ones.
236 if (cmd->search_cmd_list_entry.next == NULL)
237 list_add_tail(&cmd->search_cmd_list_entry,
238 &sess->search_cmd_list);
239 switch (sess->init_phase) {
240 case SCST_SESS_IPH_SUCCESS:
242 case SCST_SESS_IPH_INITING:
243 TRACE_DBG("Adding cmd %p to init deferred cmd list",
245 list_add_tail(&cmd->cmd_list_entry,
246 &sess->init_deferred_cmd_list);
247 spin_unlock_irqrestore(&sess->sess_list_lock, flags);
249 case SCST_SESS_IPH_FAILED:
250 spin_unlock_irqrestore(&sess->sess_list_lock, flags);
252 scst_set_cmd_abnormal_done_state(cmd);
258 list_add_tail(&cmd->search_cmd_list_entry,
259 &sess->search_cmd_list);
261 spin_unlock_irqrestore(&sess->sess_list_lock, flags);
263 if (unlikely(cmd->lun == NO_SUCH_LUN)) {
264 PRINT_ERROR("Wrong LUN %d, finishing cmd", -1);
265 scst_set_cmd_error(cmd,
266 SCST_LOAD_SENSE(scst_sense_lun_not_supported));
267 scst_set_cmd_abnormal_done_state(cmd);
271 if (unlikely(cmd->cdb_len == 0)) {
272 PRINT_ERROR("%s", "Wrong CDB len, finishing cmd");
273 scst_set_cmd_error(cmd,
274 SCST_LOAD_SENSE(scst_sense_invalid_opcode));
275 scst_set_cmd_abnormal_done_state(cmd);
279 if (unlikely(cmd->queue_type >= SCST_CMD_QUEUE_ACA)) {
280 PRINT_ERROR("Unsupported queue type %d", cmd->queue_type);
281 scst_set_cmd_error(cmd,
282 SCST_LOAD_SENSE(scst_sense_invalid_message));
283 scst_set_cmd_abnormal_done_state(cmd);
287 cmd->state = SCST_CMD_STATE_INIT;
288 /* cmd must be inited here to preserve the order */
289 rc = scst_init_cmd(cmd, &pref_context);
290 if (unlikely(rc < 0))
293 if (unlikely(cmd->status != SAM_STAT_GOOD))
294 scst_set_cmd_abnormal_done_state(cmd);
297 /* Here cmd must not be in any cmd list, no locks */
298 switch (pref_context) {
299 case SCST_CONTEXT_TASKLET:
300 scst_schedule_tasklet(cmd);
303 case SCST_CONTEXT_DIRECT:
304 scst_process_active_cmd(cmd, false);
305 /* For *NEED_THREAD wake_up() is already done */
308 case SCST_CONTEXT_DIRECT_ATOMIC:
309 scst_process_active_cmd(cmd, true);
310 /* For *NEED_THREAD wake_up() is already done */
314 PRINT_ERROR("Context %x is undefined, using the thread one",
317 case SCST_CONTEXT_THREAD:
318 spin_lock_irqsave(&cmd->cmd_lists->cmd_list_lock, flags);
319 TRACE_DBG("Adding cmd %p to active cmd list", cmd);
320 if (unlikely(cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE))
321 list_add(&cmd->cmd_list_entry,
322 &cmd->cmd_lists->active_cmd_list);
324 list_add_tail(&cmd->cmd_list_entry,
325 &cmd->cmd_lists->active_cmd_list);
326 wake_up(&cmd->cmd_lists->cmd_list_waitQ);
327 spin_unlock_irqrestore(&cmd->cmd_lists->cmd_list_lock, flags);
335 EXPORT_SYMBOL(scst_cmd_init_done);
337 static int scst_pre_parse(struct scst_cmd *cmd)
339 int res = SCST_CMD_STATE_RES_CONT_SAME;
340 struct scst_device *dev = cmd->dev;
345 cmd->inc_expected_sn_on_done = dev->handler->exec_sync ||
346 (!dev->has_own_order_mgmt &&
347 (dev->queue_alg == SCST_CONTR_MODE_QUEUE_ALG_RESTRICTED_REORDER ||
348 cmd->queue_type == SCST_CMD_QUEUE_ORDERED));
351 * Expected transfer data supplied by the SCSI transport via the
352 * target driver are untrusted, so we prefer to fetch them from CDB.
353 * Additionally, not all transports support supplying the expected
357 rc = scst_get_cdb_info(cmd);
358 if (unlikely(rc != 0)) {
360 PRINT_BUFFER("Failed CDB", cmd->cdb, cmd->cdb_len);
363 PRINT_ERROR("Unknown opcode 0x%02x for %s. "
364 "Should you update scst_scsi_op_table?",
365 cmd->cdb[0], dev->handler->name);
366 PRINT_BUFFER("Failed CDB", cmd->cdb, cmd->cdb_len);
367 #ifdef CONFIG_SCST_USE_EXPECTED_VALUES
368 if (scst_cmd_is_expected_set(cmd)) {
369 TRACE(TRACE_SCSI, "Using initiator supplied values: "
370 "direction %d, transfer_len %d",
371 cmd->expected_data_direction,
372 cmd->expected_transfer_len);
373 cmd->data_direction = cmd->expected_data_direction;
375 cmd->bufflen = cmd->expected_transfer_len;
376 /* Restore (possibly) lost CDB length */
377 cmd->cdb_len = scst_get_cdb_len(cmd->cdb);
378 if (cmd->cdb_len == -1) {
379 PRINT_ERROR("Unable to get CDB length for "
380 "opcode 0x%02x. Returning INVALID "
381 "OPCODE", cmd->cdb[0]);
382 scst_set_cmd_error(cmd,
383 SCST_LOAD_SENSE(scst_sense_invalid_opcode));
387 PRINT_ERROR("Unknown opcode 0x%02x for %s and "
388 "target %s not supplied expected values",
389 cmd->cdb[0], dev->handler->name, cmd->tgtt->name);
390 scst_set_cmd_error(cmd,
391 SCST_LOAD_SENSE(scst_sense_invalid_opcode));
395 scst_set_cmd_error(cmd,
396 SCST_LOAD_SENSE(scst_sense_invalid_opcode));
400 TRACE(TRACE_SCSI, "op_name <%s> (cmd %p), direction=%d "
401 "(expected %d, set %s), transfer_len=%d (expected "
402 "len %d), flags=%d", cmd->op_name, cmd,
403 cmd->data_direction, cmd->expected_data_direction,
404 scst_cmd_is_expected_set(cmd) ? "yes" : "no",
405 cmd->bufflen, cmd->expected_transfer_len,
408 if (unlikely((cmd->op_flags & SCST_UNKNOWN_LENGTH) != 0)) {
409 if (scst_cmd_is_expected_set(cmd)) {
411 * Command data length can't be easily
412 * determined from the CDB. ToDo, all such
413 * commands processing should be fixed. Until
414 * it's done, get the length from the supplied
415 * expected value, but limit it to some
416 * reasonable value (15MB).
418 cmd->bufflen = min(cmd->expected_transfer_len,
420 cmd->op_flags &= ~SCST_UNKNOWN_LENGTH;
426 if (unlikely(cmd->cdb[cmd->cdb_len - 1] & CONTROL_BYTE_NACA_BIT)) {
427 PRINT_ERROR("NACA bit in control byte CDB is not supported "
428 "(opcode 0x%02x)", cmd->cdb[0]);
429 scst_set_cmd_error(cmd,
430 SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
434 if (unlikely(cmd->cdb[cmd->cdb_len - 1] & CONTROL_BYTE_LINK_BIT)) {
435 PRINT_ERROR("Linked commands are not supported "
436 "(opcode 0x%02x)", cmd->cdb[0]);
437 scst_set_cmd_error(cmd,
438 SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
442 cmd->state = SCST_CMD_STATE_DEV_PARSE;
449 scst_set_cmd_abnormal_done_state(cmd);
450 res = SCST_CMD_STATE_RES_CONT_SAME;
454 #ifndef CONFIG_SCST_USE_EXPECTED_VALUES
455 static bool scst_is_allowed_to_mismatch_cmd(struct scst_cmd *cmd)
459 switch (cmd->cdb[0]) {
460 case TEST_UNIT_READY:
461 /* Crazy VMware people sometimes do TUR with READ direction */
468 /* VERIFY commands with BYTCHK unset shouldn't fail here */
469 if ((cmd->op_flags & SCST_VERIFY_BYTCHK_MISMATCH_ALLOWED) &&
470 (cmd->cdb[1] & BYTCHK) == 0)
479 static int scst_parse_cmd(struct scst_cmd *cmd)
481 int res = SCST_CMD_STATE_RES_CONT_SAME;
483 struct scst_device *dev = cmd->dev;
484 int orig_bufflen = cmd->bufflen;
488 if (likely(!scst_is_cmd_local(cmd))) {
489 if (unlikely(!dev->handler->parse_atomic &&
490 scst_cmd_atomic(cmd))) {
492 * It shouldn't be because of SCST_TGT_DEV_AFTER_*
495 TRACE_DBG("Dev handler %s parse() needs thread "
496 "context, rescheduling", dev->handler->name);
497 res = SCST_CMD_STATE_RES_NEED_THREAD;
501 TRACE_DBG("Calling dev handler %s parse(%p)",
502 dev->handler->name, cmd);
503 TRACE_BUFF_FLAG(TRACE_SND_BOT, "Parsing: ",
504 cmd->cdb, cmd->cdb_len);
505 state = dev->handler->parse(cmd);
506 /* Caution: cmd can be already dead here */
507 TRACE_DBG("Dev handler %s parse() returned %d",
508 dev->handler->name, state);
511 case SCST_CMD_STATE_NEED_THREAD_CTX:
512 TRACE_DBG("Dev handler %s parse() requested thread "
513 "context, rescheduling", dev->handler->name);
514 res = SCST_CMD_STATE_RES_NEED_THREAD;
517 case SCST_CMD_STATE_STOP:
518 TRACE_DBG("Dev handler %s parse() requested stop "
519 "processing", dev->handler->name);
520 res = SCST_CMD_STATE_RES_CONT_NEXT;
524 if (state == SCST_CMD_STATE_DEFAULT)
525 state = SCST_CMD_STATE_PREPARE_SPACE;
527 state = SCST_CMD_STATE_PREPARE_SPACE;
529 if (cmd->data_len == -1)
530 cmd->data_len = cmd->bufflen;
532 if (cmd->bufflen == 0) {
534 * According to SPC bufflen 0 for data transfer commands isn't
535 * an error, so we need to fix the transfer direction.
537 cmd->data_direction = SCST_DATA_NONE;
540 if (cmd->dh_data_buf_alloced &&
541 unlikely((orig_bufflen > cmd->bufflen))) {
542 PRINT_ERROR("Dev handler supplied data buffer (size %d), "
543 "is less, than required (size %d)", cmd->bufflen,
545 PRINT_BUFFER("Failed CDB", cmd->cdb, cmd->cdb_len);
549 if (unlikely(state == SCST_CMD_STATE_PRE_XMIT_RESP))
552 if (unlikely((cmd->bufflen == 0) &&
553 (cmd->op_flags & SCST_UNKNOWN_LENGTH))) {
554 PRINT_ERROR("Unknown data transfer length for opcode 0x%x "
555 "(handler %s, target %s)", cmd->cdb[0],
556 dev->handler->name, cmd->tgtt->name);
557 PRINT_BUFFER("Failed CDB", cmd->cdb, cmd->cdb_len);
561 #ifdef CONFIG_SCST_EXTRACHECKS
562 if ((cmd->bufflen != 0) &&
563 ((cmd->data_direction == SCST_DATA_NONE) ||
564 ((cmd->sg == NULL) && (state > SCST_CMD_STATE_PREPARE_SPACE)))) {
565 PRINT_ERROR("Dev handler %s parse() returned "
566 "invalid cmd data_direction %d, bufflen %d, state %d "
567 "or sg %p (opcode 0x%x)", dev->handler->name,
568 cmd->data_direction, cmd->bufflen, state, cmd->sg,
570 PRINT_BUFFER("Failed CDB", cmd->cdb, cmd->cdb_len);
575 if (scst_cmd_is_expected_set(cmd)) {
576 #ifdef CONFIG_SCST_USE_EXPECTED_VALUES
577 # ifdef CONFIG_SCST_EXTRACHECKS
578 if ((cmd->data_direction != cmd->expected_data_direction) ||
579 (cmd->bufflen != cmd->expected_transfer_len)) {
580 PRINT_WARNING("Expected values don't match decoded "
581 "ones: data_direction %d, "
582 "expected_data_direction %d, "
583 "bufflen %d, expected_transfer_len %d",
585 cmd->expected_data_direction,
586 cmd->bufflen, cmd->expected_transfer_len);
587 PRINT_BUFFER("Suspicious CDB", cmd->cdb, cmd->cdb_len);
590 cmd->data_direction = cmd->expected_data_direction;
591 cmd->bufflen = cmd->expected_transfer_len;
593 if (unlikely(cmd->data_direction !=
594 cmd->expected_data_direction)) {
595 if (((cmd->expected_data_direction != SCST_DATA_NONE) ||
596 (cmd->bufflen != 0)) &&
597 !scst_is_allowed_to_mismatch_cmd(cmd)) {
598 PRINT_ERROR("Expected data direction %d for "
599 "opcode 0x%02x (handler %s, target %s) "
602 cmd->expected_data_direction,
603 cmd->cdb[0], dev->handler->name,
604 cmd->tgtt->name, cmd->data_direction);
605 PRINT_BUFFER("Failed CDB",
606 cmd->cdb, cmd->cdb_len);
607 scst_set_cmd_error(cmd,
608 SCST_LOAD_SENSE(scst_sense_invalid_message));
612 if (unlikely(cmd->bufflen != cmd->expected_transfer_len)) {
613 TRACE(TRACE_MGMT_MINOR, "Warning: expected "
614 "transfer length %d for opcode 0x%02x "
615 "(handler %s, target %s) doesn't match "
616 "decoded value %d. Faulty initiator "
617 "(e.g. VMware is known to be such) or "
618 "scst_scsi_op_table should be updated?",
619 cmd->expected_transfer_len, cmd->cdb[0],
620 dev->handler->name, cmd->tgtt->name,
622 PRINT_BUFF_FLAG(TRACE_MGMT_MINOR, "Suspicious CDB",
623 cmd->cdb, cmd->cdb_len);
628 if (unlikely(cmd->data_direction == SCST_DATA_UNKNOWN)) {
629 PRINT_ERROR("Unknown data direction. Opcode 0x%x, handler %s, "
630 "target %s", cmd->cdb[0], dev->handler->name,
632 PRINT_BUFFER("Failed CDB", cmd->cdb, cmd->cdb_len);
638 case SCST_CMD_STATE_PREPARE_SPACE:
639 case SCST_CMD_STATE_PRE_PARSE:
640 case SCST_CMD_STATE_DEV_PARSE:
641 case SCST_CMD_STATE_RDY_TO_XFER:
642 case SCST_CMD_STATE_TGT_PRE_EXEC:
643 case SCST_CMD_STATE_SEND_FOR_EXEC:
644 case SCST_CMD_STATE_LOCAL_EXEC:
645 case SCST_CMD_STATE_REAL_EXEC:
646 case SCST_CMD_STATE_PRE_DEV_DONE:
647 case SCST_CMD_STATE_DEV_DONE:
648 case SCST_CMD_STATE_PRE_XMIT_RESP:
649 case SCST_CMD_STATE_XMIT_RESP:
650 case SCST_CMD_STATE_FINISHED:
651 case SCST_CMD_STATE_FINISHED_INTERNAL:
653 res = SCST_CMD_STATE_RES_CONT_SAME;
658 PRINT_ERROR("Dev handler %s parse() returned "
659 "invalid cmd state %d (opcode %d)",
660 dev->handler->name, state, cmd->cdb[0]);
662 PRINT_ERROR("Dev handler %s parse() returned "
663 "error %d (opcode %d)", dev->handler->name,
669 if (cmd->resp_data_len == -1) {
670 if (cmd->data_direction & SCST_DATA_READ)
671 cmd->resp_data_len = cmd->bufflen;
673 cmd->resp_data_len = 0;
677 TRACE_EXIT_HRES(res);
681 /* dev_done() will be called as part of the regular cmd's finish */
682 scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
684 #ifndef CONFIG_SCST_USE_EXPECTED_VALUES
687 cmd->state = SCST_CMD_STATE_PRE_DEV_DONE;
688 res = SCST_CMD_STATE_RES_CONT_SAME;
692 static int scst_prepare_space(struct scst_cmd *cmd)
694 int r = 0, res = SCST_CMD_STATE_RES_CONT_SAME;
698 if (cmd->data_direction == SCST_DATA_NONE)
701 if (cmd->tgt_need_alloc_data_buf) {
702 int orig_bufflen = cmd->bufflen;
704 TRACE_MEM("Custom tgt data buf allocation requested (cmd %p)",
707 r = cmd->tgtt->alloc_data_buf(cmd);
711 if (unlikely(cmd->bufflen == 0)) {
712 /* See comment in scst_alloc_space() */
717 cmd->tgt_data_buf_alloced = 1;
719 if (unlikely(orig_bufflen < cmd->bufflen)) {
720 PRINT_ERROR("Target driver allocated data "
721 "buffer (size %d), is less, than "
722 "required (size %d)", orig_bufflen,
726 TRACE_MEM("tgt_data_buf_alloced (cmd %p)", cmd);
732 if (!cmd->tgt_data_buf_alloced && !cmd->dh_data_buf_alloced) {
733 r = scst_alloc_space(cmd);
734 } else if (cmd->dh_data_buf_alloced && !cmd->tgt_data_buf_alloced) {
735 TRACE_MEM("dh_data_buf_alloced set (cmd %p)", cmd);
737 } else if (cmd->tgt_data_buf_alloced && !cmd->dh_data_buf_alloced) {
738 TRACE_MEM("tgt_data_buf_alloced set (cmd %p)", cmd);
739 cmd->sg = cmd->tgt_sg;
740 cmd->sg_cnt = cmd->tgt_sg_cnt;
741 cmd->in_sg = cmd->tgt_in_sg;
742 cmd->in_sg_cnt = cmd->tgt_in_sg_cnt;
745 TRACE_MEM("Both *_data_buf_alloced set (cmd %p, sg %p, "
746 "sg_cnt %d, tgt_sg %p, tgt_sg_cnt %d)", cmd, cmd->sg,
747 cmd->sg_cnt, cmd->tgt_sg, cmd->tgt_sg_cnt);
753 if (scst_cmd_atomic(cmd)) {
754 TRACE_MEM("%s", "Atomic memory allocation failed, "
755 "rescheduling to the thread");
756 res = SCST_CMD_STATE_RES_NEED_THREAD;
763 if (cmd->preprocessing_only) {
764 if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
765 TRACE_MGMT_DBG("ABORTED set, returning ABORTED for "
767 scst_set_cmd_abnormal_done_state(cmd);
768 res = SCST_CMD_STATE_RES_CONT_SAME;
772 res = SCST_CMD_STATE_RES_CONT_NEXT;
773 cmd->state = SCST_CMD_STATE_PREPROCESS_DONE;
775 TRACE_DBG("Calling preprocessing_done(cmd %p)", cmd);
776 cmd->tgtt->preprocessing_done(cmd);
777 TRACE_DBG("%s", "preprocessing_done() returned");
782 if (cmd->data_direction & SCST_DATA_WRITE)
783 cmd->state = SCST_CMD_STATE_RDY_TO_XFER;
785 cmd->state = SCST_CMD_STATE_TGT_PRE_EXEC;
788 TRACE_EXIT_HRES(res);
792 TRACE(TRACE_OUT_OF_MEM, "Unable to allocate or build requested buffer "
793 "(size %d), sending BUSY or QUEUE FULL status", cmd->bufflen);
795 scst_set_cmd_abnormal_done_state(cmd);
796 res = SCST_CMD_STATE_RES_CONT_SAME;
800 scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
801 scst_set_cmd_abnormal_done_state(cmd);
802 res = SCST_CMD_STATE_RES_CONT_SAME;
806 void scst_restart_cmd(struct scst_cmd *cmd, int status,
807 enum scst_exec_context pref_context)
811 TRACE_DBG("Preferred context: %d", pref_context);
812 TRACE_DBG("tag=%llu, status=%#x",
813 (long long unsigned int)scst_cmd_get_tag(cmd),
816 #ifdef CONFIG_SCST_EXTRACHECKS
817 if ((in_irq() || irqs_disabled()) &&
818 ((pref_context == SCST_CONTEXT_DIRECT) ||
819 (pref_context == SCST_CONTEXT_DIRECT_ATOMIC))) {
820 PRINT_ERROR("Wrong context %d in IRQ from target %s, use "
821 "SCST_CONTEXT_THREAD instead\n", pref_context,
823 pref_context = SCST_CONTEXT_THREAD;
828 case SCST_PREPROCESS_STATUS_SUCCESS:
829 if (cmd->data_direction & SCST_DATA_WRITE)
830 cmd->state = SCST_CMD_STATE_RDY_TO_XFER;
832 cmd->state = SCST_CMD_STATE_TGT_PRE_EXEC;
833 if (cmd->set_sn_on_restart_cmd)
834 scst_cmd_set_sn(cmd);
835 /* Small context optimization */
836 if ((pref_context == SCST_CONTEXT_TASKLET) ||
837 (pref_context == SCST_CONTEXT_DIRECT_ATOMIC) ||
838 ((pref_context == SCST_CONTEXT_SAME) &&
839 scst_cmd_atomic(cmd))) {
840 if (cmd->data_direction & SCST_DATA_WRITE) {
841 if (!test_bit(SCST_TGT_DEV_AFTER_RESTART_WR_ATOMIC,
842 &cmd->tgt_dev->tgt_dev_flags))
843 pref_context = SCST_CONTEXT_THREAD;
845 if (!test_bit(SCST_TGT_DEV_AFTER_RESTART_OTH_ATOMIC,
846 &cmd->tgt_dev->tgt_dev_flags))
847 pref_context = SCST_CONTEXT_THREAD;
852 case SCST_PREPROCESS_STATUS_ERROR_SENSE_SET:
853 scst_set_cmd_abnormal_done_state(cmd);
856 case SCST_PREPROCESS_STATUS_ERROR_FATAL:
857 set_bit(SCST_CMD_NO_RESP, &cmd->cmd_flags);
859 case SCST_PREPROCESS_STATUS_ERROR:
860 if (cmd->sense != NULL)
861 scst_set_cmd_error(cmd,
862 SCST_LOAD_SENSE(scst_sense_hardw_error));
863 scst_set_cmd_abnormal_done_state(cmd);
867 PRINT_ERROR("%s() received unknown status %x", __func__,
869 scst_set_cmd_abnormal_done_state(cmd);
873 scst_proccess_redirect_cmd(cmd, pref_context, 1);
878 EXPORT_SYMBOL(scst_restart_cmd);
881 static int scst_queue_retry_cmd(struct scst_cmd *cmd, int finished_cmds)
883 struct scst_tgt *tgt = cmd->sess->tgt;
889 spin_lock_irqsave(&tgt->tgt_lock, flags);
892 * Memory barrier is needed here, because we need the exact order
893 * between the read and write between retry_cmds and finished_cmds to
894 * not miss the case when a command finished while we queuing it for
895 * retry after the finished_cmds check.
898 TRACE_RETRY("TGT QUEUE FULL: incrementing retry_cmds %d",
900 if (finished_cmds != atomic_read(&tgt->finished_cmds)) {
901 /* At least one cmd finished, so try again */
903 TRACE_RETRY("Some command(s) finished, direct retry "
904 "(finished_cmds=%d, tgt->finished_cmds=%d, "
905 "retry_cmds=%d)", finished_cmds,
906 atomic_read(&tgt->finished_cmds), tgt->retry_cmds);
911 TRACE_RETRY("Adding cmd %p to retry cmd list", cmd);
912 list_add_tail(&cmd->cmd_list_entry, &tgt->retry_cmd_list);
914 if (!tgt->retry_timer_active) {
915 tgt->retry_timer.expires = jiffies + SCST_TGT_RETRY_TIMEOUT;
916 add_timer(&tgt->retry_timer);
917 tgt->retry_timer_active = 1;
921 spin_unlock_irqrestore(&tgt->tgt_lock, flags);
927 static int scst_rdy_to_xfer(struct scst_cmd *cmd)
933 if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
934 TRACE_MGMT_DBG("ABORTED set, aborting cmd %p", cmd);
938 if ((cmd->tgtt->rdy_to_xfer == NULL) || unlikely(cmd->internal)) {
939 cmd->state = SCST_CMD_STATE_TGT_PRE_EXEC;
940 res = SCST_CMD_STATE_RES_CONT_SAME;
944 if (unlikely(!cmd->tgtt->rdy_to_xfer_atomic && scst_cmd_atomic(cmd))) {
946 * It shouldn't be because of SCST_TGT_DEV_AFTER_*
949 TRACE_DBG("Target driver %s rdy_to_xfer() needs thread "
950 "context, rescheduling", cmd->tgtt->name);
951 res = SCST_CMD_STATE_RES_NEED_THREAD;
956 int finished_cmds = atomic_read(&cmd->sess->tgt->finished_cmds);
958 res = SCST_CMD_STATE_RES_CONT_NEXT;
959 cmd->state = SCST_CMD_STATE_DATA_WAIT;
961 TRACE_DBG("Calling rdy_to_xfer(%p)", cmd);
962 #ifdef CONFIG_SCST_DEBUG_RETRY
963 if (((scst_random() % 100) == 75))
964 rc = SCST_TGT_RES_QUEUE_FULL;
967 rc = cmd->tgtt->rdy_to_xfer(cmd);
968 TRACE_DBG("rdy_to_xfer() returned %d", rc);
970 if (likely(rc == SCST_TGT_RES_SUCCESS))
973 /* Restore the previous state */
974 cmd->state = SCST_CMD_STATE_RDY_TO_XFER;
977 case SCST_TGT_RES_QUEUE_FULL:
978 if (scst_queue_retry_cmd(cmd, finished_cmds) == 0)
983 case SCST_TGT_RES_NEED_THREAD_CTX:
984 TRACE_DBG("Target driver %s "
985 "rdy_to_xfer() requested thread "
986 "context, rescheduling", cmd->tgtt->name);
987 res = SCST_CMD_STATE_RES_NEED_THREAD;
997 TRACE_EXIT_HRES(res);
1001 if (rc == SCST_TGT_RES_FATAL_ERROR) {
1002 PRINT_ERROR("Target driver %s rdy_to_xfer() returned "
1003 "fatal error", cmd->tgtt->name);
1005 PRINT_ERROR("Target driver %s rdy_to_xfer() returned invalid "
1006 "value %d", cmd->tgtt->name, rc);
1008 scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
1011 scst_set_cmd_abnormal_done_state(cmd);
1012 res = SCST_CMD_STATE_RES_CONT_SAME;
1016 /* No locks, but might be in IRQ */
1017 static void scst_proccess_redirect_cmd(struct scst_cmd *cmd,
1018 enum scst_exec_context context, int check_retries)
1020 unsigned long flags;
1024 TRACE_DBG("Context: %x", context);
1026 if (context == SCST_CONTEXT_SAME)
1027 context = scst_cmd_atomic(cmd) ? SCST_CONTEXT_DIRECT_ATOMIC :
1028 SCST_CONTEXT_DIRECT;
1031 case SCST_CONTEXT_DIRECT_ATOMIC:
1032 scst_process_active_cmd(cmd, true);
1035 case SCST_CONTEXT_DIRECT:
1037 scst_check_retries(cmd->tgt);
1038 scst_process_active_cmd(cmd, false);
1042 PRINT_ERROR("Context %x is unknown, using the thread one",
1045 case SCST_CONTEXT_THREAD:
1047 scst_check_retries(cmd->tgt);
1048 spin_lock_irqsave(&cmd->cmd_lists->cmd_list_lock, flags);
1049 TRACE_DBG("Adding cmd %p to active cmd list", cmd);
1050 if (unlikely(cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE))
1051 list_add(&cmd->cmd_list_entry,
1052 &cmd->cmd_lists->active_cmd_list);
1054 list_add_tail(&cmd->cmd_list_entry,
1055 &cmd->cmd_lists->active_cmd_list);
1056 wake_up(&cmd->cmd_lists->cmd_list_waitQ);
1057 spin_unlock_irqrestore(&cmd->cmd_lists->cmd_list_lock, flags);
1060 case SCST_CONTEXT_TASKLET:
1062 scst_check_retries(cmd->tgt);
1063 scst_schedule_tasklet(cmd);
1071 void scst_rx_data(struct scst_cmd *cmd, int status,
1072 enum scst_exec_context pref_context)
1076 TRACE_DBG("Preferred context: %d", pref_context);
1077 TRACE(TRACE_SCSI, "cmd %p, status %#x", cmd, status);
1079 #ifdef CONFIG_SCST_EXTRACHECKS
1080 if ((in_irq() || irqs_disabled()) &&
1081 ((pref_context == SCST_CONTEXT_DIRECT) ||
1082 (pref_context == SCST_CONTEXT_DIRECT_ATOMIC))) {
1083 PRINT_ERROR("Wrong context %d in IRQ from target %s, use "
1084 "SCST_CONTEXT_THREAD instead\n", pref_context,
1086 pref_context = SCST_CONTEXT_THREAD;
1091 case SCST_RX_STATUS_SUCCESS:
1092 #if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
1093 if (trace_flag & TRACE_RCV_BOT) {
1095 struct scatterlist *sg;
1096 if (cmd->in_sg != NULL)
1098 else if (cmd->tgt_in_sg != NULL)
1099 sg = cmd->tgt_in_sg;
1100 else if (cmd->tgt_sg != NULL)
1105 TRACE_RECV_BOT("RX data for cmd %p "
1106 "(sg_cnt %d, sg %p, sg[0].page %p)",
1107 cmd, cmd->tgt_sg_cnt, sg,
1108 (void *)sg_page(&sg[0]));
1109 for (i = 0; i < cmd->tgt_sg_cnt; ++i) {
1110 PRINT_BUFF_FLAG(TRACE_RCV_BOT, "RX sg",
1111 sg_virt(&sg[i]), sg[i].length);
1116 cmd->state = SCST_CMD_STATE_TGT_PRE_EXEC;
1117 /* Small context optimization */
1118 if ((pref_context == SCST_CONTEXT_TASKLET) ||
1119 (pref_context == SCST_CONTEXT_DIRECT_ATOMIC) ||
1120 ((pref_context == SCST_CONTEXT_SAME) &&
1121 scst_cmd_atomic(cmd))) {
1122 if (!test_bit(SCST_TGT_DEV_AFTER_RX_DATA_ATOMIC,
1123 &cmd->tgt_dev->tgt_dev_flags))
1124 pref_context = SCST_CONTEXT_THREAD;
1128 case SCST_RX_STATUS_ERROR_SENSE_SET:
1129 scst_set_cmd_abnormal_done_state(cmd);
1132 case SCST_RX_STATUS_ERROR_FATAL:
1133 set_bit(SCST_CMD_NO_RESP, &cmd->cmd_flags);
1135 case SCST_RX_STATUS_ERROR:
1136 scst_set_cmd_error(cmd,
1137 SCST_LOAD_SENSE(scst_sense_hardw_error));
1138 scst_set_cmd_abnormal_done_state(cmd);
1142 PRINT_ERROR("scst_rx_data() received unknown status %x",
1144 scst_set_cmd_abnormal_done_state(cmd);
1148 scst_proccess_redirect_cmd(cmd, pref_context, 1);
1153 EXPORT_SYMBOL(scst_rx_data);
1155 static int scst_tgt_pre_exec(struct scst_cmd *cmd)
1157 int res = SCST_CMD_STATE_RES_CONT_SAME, rc;
1161 cmd->state = SCST_CMD_STATE_SEND_FOR_EXEC;
1163 if ((cmd->tgtt->pre_exec == NULL) || unlikely(cmd->internal))
1166 TRACE_DBG("Calling pre_exec(%p)", cmd);
1167 rc = cmd->tgtt->pre_exec(cmd);
1168 TRACE_DBG("pre_exec() returned %d", rc);
1170 if (unlikely(rc != SCST_PREPROCESS_STATUS_SUCCESS)) {
1172 case SCST_PREPROCESS_STATUS_ERROR_SENSE_SET:
1173 scst_set_cmd_abnormal_done_state(cmd);
1175 case SCST_PREPROCESS_STATUS_ERROR_FATAL:
1176 set_bit(SCST_CMD_NO_RESP, &cmd->cmd_flags);
1178 case SCST_PREPROCESS_STATUS_ERROR:
1179 scst_set_cmd_error(cmd,
1180 SCST_LOAD_SENSE(scst_sense_hardw_error));
1181 scst_set_cmd_abnormal_done_state(cmd);
1183 case SCST_PREPROCESS_STATUS_NEED_THREAD:
1184 TRACE_DBG("Target driver's %s pre_exec() requested "
1185 "thread context, rescheduling",
1187 res = SCST_CMD_STATE_RES_NEED_THREAD;
1188 cmd->state = SCST_CMD_STATE_TGT_PRE_EXEC;
1197 TRACE_EXIT_RES(res);
1201 static void scst_do_cmd_done(struct scst_cmd *cmd, int result,
1202 const uint8_t *rq_sense, int rq_sense_len, int resid)
1206 #ifdef CONFIG_SCST_MEASURE_LATENCY
1209 getnstimeofday(&ts);
1210 cmd->post_exec_start = scst_sec_to_nsec(ts.tv_sec) + ts.tv_nsec;
1211 TRACE_DBG("cmd %p (sess %p): post_exec_start %lld (tv_sec %ld, "
1212 "tv_nsec %ld)", cmd, cmd->sess, cmd->post_exec_start,
1213 ts.tv_sec, ts.tv_nsec);
1217 cmd->status = result & 0xff;
1218 cmd->msg_status = msg_byte(result);
1219 cmd->host_status = host_byte(result);
1220 cmd->driver_status = driver_byte(result);
1221 if (unlikely(resid != 0)) {
1222 #ifdef CONFIG_SCST_EXTRACHECKS
1223 if ((resid < 0) || (resid > cmd->resp_data_len)) {
1224 PRINT_ERROR("Wrong resid %d (cmd->resp_data_len=%d, "
1225 "op %x)", resid, cmd->resp_data_len,
1229 scst_set_resp_data_len(cmd, cmd->resp_data_len - resid);
1232 if (unlikely(cmd->status == SAM_STAT_CHECK_CONDITION)) {
1233 /* We might have double reset UA here */
1234 cmd->dbl_ua_orig_resp_data_len = cmd->resp_data_len;
1235 cmd->dbl_ua_orig_data_direction = cmd->data_direction;
1237 scst_alloc_set_sense(cmd, 1, rq_sense, rq_sense_len);
1240 TRACE(TRACE_SCSI, "cmd %p, result=%x, cmd->status=%x, resid=%d, "
1241 "cmd->msg_status=%x, cmd->host_status=%x, "
1242 "cmd->driver_status=%x (cmd %p)", cmd, result, cmd->status, resid,
1243 cmd->msg_status, cmd->host_status, cmd->driver_status, cmd);
1251 /* For small context optimization */
1252 static inline enum scst_exec_context scst_optimize_post_exec_context(
1253 struct scst_cmd *cmd, enum scst_exec_context context)
1255 if (((context == SCST_CONTEXT_SAME) && scst_cmd_atomic(cmd)) ||
1256 (context == SCST_CONTEXT_TASKLET) ||
1257 (context == SCST_CONTEXT_DIRECT_ATOMIC)) {
1258 if (!test_bit(SCST_TGT_DEV_AFTER_EXEC_ATOMIC,
1259 &cmd->tgt_dev->tgt_dev_flags))
1260 context = SCST_CONTEXT_THREAD;
1265 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
1266 static inline struct scst_cmd *scst_get_cmd(struct scsi_cmnd *scsi_cmd,
1267 struct scsi_request **req)
1269 struct scst_cmd *cmd = NULL;
1271 if (scsi_cmd && (*req = scsi_cmd->sc_request))
1272 cmd = (struct scst_cmd *)(*req)->upper_private_data;
1275 PRINT_ERROR("%s", "Request with NULL cmd");
1277 scsi_release_request(*req);
1283 static void scst_cmd_done(struct scsi_cmnd *scsi_cmd)
1285 struct scsi_request *req = NULL;
1286 struct scst_cmd *cmd;
1290 cmd = scst_get_cmd(scsi_cmd, &req);
1294 scst_do_cmd_done(cmd, req->sr_result, req->sr_sense_buffer,
1295 sizeof(req->sr_sense_buffer), scsi_cmd->resid);
1297 /* Clear out request structure */
1299 req->sr_sglist_len = 0;
1300 req->sr_bufflen = 0;
1301 req->sr_buffer = NULL;
1302 req->sr_underflow = 0;
1303 req->sr_request->rq_disk = NULL; /* disown request blk */
1305 scst_release_request(cmd);
1307 cmd->state = SCST_CMD_STATE_PRE_DEV_DONE;
1309 scst_proccess_redirect_cmd(cmd,
1310 scst_optimize_post_exec_context(cmd, scst_estimate_context()),
1317 #else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18) */
1318 static void scst_cmd_done(void *data, char *sense, int result, int resid)
1320 struct scst_cmd *cmd;
1324 cmd = (struct scst_cmd *)data;
1328 scst_do_cmd_done(cmd, result, sense, SCSI_SENSE_BUFFERSIZE, resid);
1330 cmd->state = SCST_CMD_STATE_PRE_DEV_DONE;
1332 scst_proccess_redirect_cmd(cmd,
1333 scst_optimize_post_exec_context(cmd, scst_estimate_context()), 0);
1339 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18) */
1341 static void scst_cmd_done_local(struct scst_cmd *cmd, int next_state,
1342 enum scst_exec_context pref_context)
1346 #ifdef CONFIG_SCST_MEASURE_LATENCY
1349 getnstimeofday(&ts);
1350 cmd->post_exec_start = scst_sec_to_nsec(ts.tv_sec) + ts.tv_nsec;
1351 TRACE_DBG("cmd %p (sess %p): post_exec_start %lld (tv_sec %ld, "
1352 "tv_nsec %ld)", cmd, cmd->sess, cmd->post_exec_start,
1353 ts.tv_sec, ts.tv_nsec);
1357 if (next_state == SCST_CMD_STATE_DEFAULT)
1358 next_state = SCST_CMD_STATE_PRE_DEV_DONE;
1360 #if defined(CONFIG_SCST_DEBUG)
1361 if (next_state == SCST_CMD_STATE_PRE_DEV_DONE) {
1362 if ((trace_flag & TRACE_RCV_TOP) && (cmd->sg != NULL)) {
1364 struct scatterlist *sg = cmd->sg;
1365 TRACE_RECV_TOP("Exec'd %d S/G(s) at %p sg[0].page at "
1366 "%p", cmd->sg_cnt, sg, (void *)sg_page(&sg[0]));
1367 for (i = 0; i < cmd->sg_cnt; ++i) {
1368 TRACE_BUFF_FLAG(TRACE_RCV_TOP,
1369 "Exec'd sg", sg_virt(&sg[i]),
1376 cmd->state = next_state;
1378 #ifdef CONFIG_SCST_EXTRACHECKS
1379 if ((next_state != SCST_CMD_STATE_PRE_DEV_DONE) &&
1380 (next_state != SCST_CMD_STATE_PRE_XMIT_RESP) &&
1381 (next_state != SCST_CMD_STATE_FINISHED) &&
1382 (next_state != SCST_CMD_STATE_FINISHED_INTERNAL)) {
1383 PRINT_ERROR("%s() received invalid cmd state %d (opcode %d)",
1384 __func__, next_state, cmd->cdb[0]);
1385 scst_set_cmd_error(cmd,
1386 SCST_LOAD_SENSE(scst_sense_hardw_error));
1387 scst_set_cmd_abnormal_done_state(cmd);
1390 pref_context = scst_optimize_post_exec_context(cmd, pref_context);
1391 scst_proccess_redirect_cmd(cmd, pref_context, 0);
1397 static int scst_report_luns_local(struct scst_cmd *cmd)
1403 struct scst_tgt_dev *tgt_dev = NULL;
1405 int offs, overflow = 0;
1409 rc = scst_check_local_events(cmd);
1410 if (unlikely(rc != 0))
1414 cmd->msg_status = 0;
1415 cmd->host_status = DID_OK;
1416 cmd->driver_status = 0;
1418 if ((cmd->cdb[2] != 0) && (cmd->cdb[2] != 2)) {
1419 PRINT_ERROR("Unsupported SELECT REPORT value %x in REPORT "
1420 "LUNS command", cmd->cdb[2]);
1424 buffer_size = scst_get_buf_first(cmd, &buffer);
1425 if (unlikely(buffer_size == 0))
1427 else if (unlikely(buffer_size < 0))
1430 if (buffer_size < 16)
1433 memset(buffer, 0, buffer_size);
1436 /* sess->sess_tgt_dev_list_hash is protected by suspended activity */
1437 for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
1438 struct list_head *sess_tgt_dev_list_head =
1439 &cmd->sess->sess_tgt_dev_list_hash[i];
1440 list_for_each_entry(tgt_dev, sess_tgt_dev_list_head,
1441 sess_tgt_dev_list_entry) {
1443 if (offs >= buffer_size) {
1444 scst_put_buf(cmd, buffer);
1445 buffer_size = scst_get_buf_next(cmd,
1447 if (buffer_size > 0) {
1448 memset(buffer, 0, buffer_size);
1455 if ((buffer_size - offs) < 8) {
1456 PRINT_ERROR("Buffer allocated for "
1457 "REPORT LUNS command doesn't "
1458 "allow to fit 8 byte entry "
1461 goto out_put_hw_err;
1463 buffer[offs] = (tgt_dev->lun >> 8) & 0xff;
1464 buffer[offs+1] = tgt_dev->lun & 0xff;
1472 scst_put_buf(cmd, buffer);
1474 /* Set the response header */
1475 buffer_size = scst_get_buf_first(cmd, &buffer);
1476 if (unlikely(buffer_size == 0))
1478 else if (unlikely(buffer_size < 0))
1482 buffer[0] = (dev_cnt >> 24) & 0xff;
1483 buffer[1] = (dev_cnt >> 16) & 0xff;
1484 buffer[2] = (dev_cnt >> 8) & 0xff;
1485 buffer[3] = dev_cnt & 0xff;
1487 scst_put_buf(cmd, buffer);
1490 if (dev_cnt < cmd->resp_data_len)
1491 scst_set_resp_data_len(cmd, dev_cnt);
1496 /* Clear left sense_reported_luns_data_changed UA, if any. */
1498 mutex_lock(&scst_mutex); /* protect sess_tgt_dev_list_hash */
1499 for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
1500 struct list_head *sess_tgt_dev_list_head =
1501 &cmd->sess->sess_tgt_dev_list_hash[i];
1503 list_for_each_entry(tgt_dev, sess_tgt_dev_list_head,
1504 sess_tgt_dev_list_entry) {
1505 struct scst_tgt_dev_UA *ua;
1507 spin_lock_bh(&tgt_dev->tgt_dev_lock);
1508 list_for_each_entry(ua, &tgt_dev->UA_list,
1510 if (scst_analyze_sense(ua->UA_sense_buffer,
1511 sizeof(ua->UA_sense_buffer),
1512 SCST_SENSE_ALL_VALID,
1513 SCST_LOAD_SENSE(scst_sense_reported_luns_data_changed))) {
1514 TRACE_MGMT_DBG("Freeing not needed "
1515 "REPORTED LUNS DATA CHANGED UA "
1517 list_del(&ua->UA_list_entry);
1518 mempool_free(ua, scst_ua_mempool);
1522 spin_unlock_bh(&tgt_dev->tgt_dev_lock);
1525 mutex_unlock(&scst_mutex);
1528 /* Report the result */
1529 cmd->scst_cmd_done(cmd, SCST_CMD_STATE_DEFAULT, SCST_CONTEXT_SAME);
1532 return SCST_EXEC_COMPLETED;
1535 scst_put_buf(cmd, buffer);
1538 scst_set_cmd_error(cmd,
1539 SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
1543 scst_put_buf(cmd, buffer);
1544 scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
1548 static int scst_pre_select(struct scst_cmd *cmd)
1550 int res = SCST_EXEC_NOT_COMPLETED;
1554 if (scst_cmd_atomic(cmd)) {
1555 res = SCST_EXEC_NEED_THREAD;
1559 scst_block_dev_cmd(cmd, 1);
1561 /* Check for local events will be done when cmd will be executed */
1564 TRACE_EXIT_RES(res);
1568 static int scst_reserve_local(struct scst_cmd *cmd)
1570 int res = SCST_EXEC_NOT_COMPLETED, rc;
1571 struct scst_device *dev;
1572 struct scst_tgt_dev *tgt_dev_tmp;
1576 if (scst_cmd_atomic(cmd)) {
1577 res = SCST_EXEC_NEED_THREAD;
1581 if ((cmd->cdb[0] == RESERVE_10) && (cmd->cdb[2] & SCST_RES_3RDPTY)) {
1582 PRINT_ERROR("RESERVE_10: 3rdPty RESERVE not implemented "
1583 "(lun=%lld)", (long long unsigned int)cmd->lun);
1584 scst_set_cmd_error(cmd,
1585 SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
1591 if (dev->tst == SCST_CONTR_MODE_ONE_TASK_SET)
1592 scst_block_dev_cmd(cmd, 1);
1594 rc = scst_check_local_events(cmd);
1595 if (unlikely(rc != 0))
1598 spin_lock_bh(&dev->dev_lock);
1600 if (test_bit(SCST_TGT_DEV_RESERVED, &cmd->tgt_dev->tgt_dev_flags)) {
1601 spin_unlock_bh(&dev->dev_lock);
1602 scst_set_cmd_error_status(cmd, SAM_STAT_RESERVATION_CONFLICT);
1606 list_for_each_entry(tgt_dev_tmp, &dev->dev_tgt_dev_list,
1607 dev_tgt_dev_list_entry) {
1608 if (cmd->tgt_dev != tgt_dev_tmp)
1609 set_bit(SCST_TGT_DEV_RESERVED,
1610 &tgt_dev_tmp->tgt_dev_flags);
1612 dev->dev_reserved = 1;
1614 spin_unlock_bh(&dev->dev_lock);
1617 TRACE_EXIT_RES(res);
1621 /* Report the result */
1622 cmd->scst_cmd_done(cmd, SCST_CMD_STATE_DEFAULT, SCST_CONTEXT_SAME);
1623 res = SCST_EXEC_COMPLETED;
1627 static int scst_release_local(struct scst_cmd *cmd)
1629 int res = SCST_EXEC_NOT_COMPLETED, rc;
1630 struct scst_tgt_dev *tgt_dev_tmp;
1631 struct scst_device *dev;
1635 if (scst_cmd_atomic(cmd)) {
1636 res = SCST_EXEC_NEED_THREAD;
1642 if (dev->tst == SCST_CONTR_MODE_ONE_TASK_SET)
1643 scst_block_dev_cmd(cmd, 1);
1645 rc = scst_check_local_events(cmd);
1646 if (unlikely(rc != 0))
1649 spin_lock_bh(&dev->dev_lock);
1652 * The device could be RELEASED behind us, if RESERVING session
1653 * is closed (see scst_free_tgt_dev()), but this actually doesn't
1654 * matter, so use lock and no retest for DEV_RESERVED bits again
1656 if (test_bit(SCST_TGT_DEV_RESERVED, &cmd->tgt_dev->tgt_dev_flags)) {
1657 res = SCST_EXEC_COMPLETED;
1659 cmd->msg_status = 0;
1660 cmd->host_status = DID_OK;
1661 cmd->driver_status = 0;
1664 list_for_each_entry(tgt_dev_tmp,
1665 &dev->dev_tgt_dev_list,
1666 dev_tgt_dev_list_entry) {
1667 clear_bit(SCST_TGT_DEV_RESERVED,
1668 &tgt_dev_tmp->tgt_dev_flags);
1670 dev->dev_reserved = 0;
1673 spin_unlock_bh(&dev->dev_lock);
1675 if (res == SCST_EXEC_COMPLETED)
1679 TRACE_EXIT_RES(res);
1683 res = SCST_EXEC_COMPLETED;
1684 /* Report the result */
1685 cmd->scst_cmd_done(cmd, SCST_CMD_STATE_DEFAULT, SCST_CONTEXT_SAME);
1689 /* No locks, no IRQ or IRQ-safe context allowed */
1690 int scst_check_local_events(struct scst_cmd *cmd)
1693 struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
1694 struct scst_device *dev = cmd->dev;
1699 * There's no race here, because we need to trace commands sent
1700 * *after* dev_double_ua_possible flag was set.
1702 if (unlikely(dev->dev_double_ua_possible))
1703 cmd->double_ua_possible = 1;
1705 if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
1706 TRACE_MGMT_DBG("ABORTED set, aborting cmd %p", cmd);
1707 goto out_uncomplete;
1710 /* Reserve check before Unit Attention */
1711 if (unlikely(test_bit(SCST_TGT_DEV_RESERVED,
1712 &tgt_dev->tgt_dev_flags))) {
1713 if (cmd->cdb[0] != INQUIRY &&
1714 cmd->cdb[0] != REPORT_LUNS &&
1715 cmd->cdb[0] != RELEASE &&
1716 cmd->cdb[0] != RELEASE_10 &&
1717 cmd->cdb[0] != REPORT_DEVICE_IDENTIFIER &&
1718 (cmd->cdb[0] != ALLOW_MEDIUM_REMOVAL ||
1719 (cmd->cdb[4] & 3)) &&
1720 cmd->cdb[0] != LOG_SENSE &&
1721 cmd->cdb[0] != REQUEST_SENSE) {
1722 scst_set_cmd_error_status(cmd,
1723 SAM_STAT_RESERVATION_CONFLICT);
1728 /* If we had internal bus reset, set the command error unit attention */
1729 if ((dev->scsi_dev != NULL) &&
1730 unlikely(dev->scsi_dev->was_reset)) {
1731 if (scst_is_ua_command(cmd)) {
1734 * Prevent more than 1 cmd to be triggered by
1737 spin_lock_bh(&dev->dev_lock);
1738 if (dev->scsi_dev->was_reset) {
1739 TRACE(TRACE_MGMT, "was_reset is %d", 1);
1740 scst_set_cmd_error(cmd,
1741 SCST_LOAD_SENSE(scst_sense_reset_UA));
1743 * It looks like it is safe to clear was_reset
1746 dev->scsi_dev->was_reset = 0;
1749 spin_unlock_bh(&dev->dev_lock);
1756 if (unlikely(test_bit(SCST_TGT_DEV_UA_PENDING,
1757 &cmd->tgt_dev->tgt_dev_flags))) {
1758 if (scst_is_ua_command(cmd)) {
1759 rc = scst_set_pending_UA(cmd);
1768 TRACE_EXIT_RES(res);
1773 sBUG_ON(!cmd->completed);
1780 EXPORT_SYMBOL(scst_check_local_events);
1783 void scst_inc_expected_sn(struct scst_tgt_dev *tgt_dev, atomic_t *slot)
1788 /* Optimized for lockless fast path */
1790 TRACE_SN("Slot %zd, *cur_sn_slot %d", slot - tgt_dev->sn_slots,
1793 if (!atomic_dec_and_test(slot))
1796 TRACE_SN("Slot is 0 (num_free_sn_slots=%d)",
1797 tgt_dev->num_free_sn_slots);
1798 if (tgt_dev->num_free_sn_slots < (int)ARRAY_SIZE(tgt_dev->sn_slots)-1) {
1799 spin_lock_irq(&tgt_dev->sn_lock);
1800 if (likely(tgt_dev->num_free_sn_slots < (int)ARRAY_SIZE(tgt_dev->sn_slots)-1)) {
1801 if (tgt_dev->num_free_sn_slots < 0)
1802 tgt_dev->cur_sn_slot = slot;
1804 * To be in-sync with SIMPLE case in scst_cmd_set_sn()
1807 tgt_dev->num_free_sn_slots++;
1808 TRACE_SN("Incremented num_free_sn_slots (%d)",
1809 tgt_dev->num_free_sn_slots);
1812 spin_unlock_irq(&tgt_dev->sn_lock);
1817 * No protection of expected_sn is needed, because only one thread
1818 * at time can be here (serialized by sn). Also it is supposed that
1819 * there could not be half-incremented halves.
1821 tgt_dev->expected_sn++;
1823 * Write must be before def_cmd_count read to be in sync. with
1824 * scst_post_exec_sn(). See comment in scst_send_for_exec().
1827 TRACE_SN("Next expected_sn: %ld", tgt_dev->expected_sn);
1834 static struct scst_cmd *scst_post_exec_sn(struct scst_cmd *cmd,
1837 /* For HQ commands SN is not set */
1838 bool inc_expected_sn = !cmd->inc_expected_sn_on_done &&
1839 cmd->sn_set && !cmd->retry;
1840 struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
1841 struct scst_cmd *res;
1845 if (inc_expected_sn)
1846 scst_inc_expected_sn(tgt_dev, cmd->sn_slot);
1849 scst_make_deferred_commands_active(tgt_dev);
1852 res = scst_check_deferred_commands(tgt_dev);
1854 TRACE_EXIT_HRES(res);
1858 /* cmd must be additionally referenced to not die inside */
1859 static int scst_do_real_exec(struct scst_cmd *cmd)
1861 int res = SCST_EXEC_NOT_COMPLETED;
1862 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 18)
1865 bool atomic = scst_cmd_atomic(cmd);
1866 struct scst_device *dev = cmd->dev;
1867 struct scst_dev_type *handler = dev->handler;
1868 struct io_context *old_ctx = NULL;
1869 bool ctx_changed = false;
1874 ctx_changed = scst_set_io_context(cmd, &old_ctx);
1876 cmd->state = SCST_CMD_STATE_REAL_EXECUTING;
1878 if (handler->exec) {
1879 if (unlikely(!dev->handler->exec_atomic && atomic)) {
1881 * It shouldn't be because of SCST_TGT_DEV_AFTER_*
1884 TRACE_DBG("Dev handler %s exec() needs thread "
1885 "context, rescheduling", dev->handler->name);
1886 res = SCST_EXEC_NEED_THREAD;
1890 TRACE_DBG("Calling dev handler %s exec(%p)",
1891 handler->name, cmd);
1892 TRACE_BUFF_FLAG(TRACE_SND_TOP, "Execing: ", cmd->cdb,
1894 res = handler->exec(cmd);
1895 TRACE_DBG("Dev handler %s exec() returned %d",
1896 handler->name, res);
1898 if (res == SCST_EXEC_COMPLETED)
1900 else if (res == SCST_EXEC_NEED_THREAD)
1903 sBUG_ON(res != SCST_EXEC_NOT_COMPLETED);
1906 TRACE_DBG("Sending cmd %p to SCSI mid-level", cmd);
1908 if (unlikely(dev->scsi_dev == NULL)) {
1909 PRINT_ERROR("Command for virtual device must be "
1910 "processed by device handler (lun %lld)!",
1911 (long long unsigned int)cmd->lun);
1915 res = scst_check_local_events(cmd);
1916 if (unlikely(res != 0))
1919 #ifndef CONFIG_SCST_ALLOW_PASSTHROUGH_IO_SUBMIT_IN_SIRQ
1920 if (unlikely(atomic)) {
1921 TRACE_DBG("Pass-through exec() can not be called in atomic "
1922 "context, rescheduling to the thread (handler %s)",
1924 res = SCST_EXEC_NEED_THREAD;
1929 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
1930 if (unlikely(scst_alloc_request(cmd) != 0)) {
1932 res = SCST_EXEC_NEED_THREAD;
1935 PRINT_INFO("%s", "Unable to allocate request, "
1936 "sending BUSY status");
1941 scst_do_req(cmd->scsi_req, (void *)cmd->cdb,
1942 (void *)cmd->scsi_req->sr_buffer,
1943 cmd->scsi_req->sr_bufflen, scst_cmd_done, cmd->timeout,
1946 rc = scst_exec_req(dev->scsi_dev, cmd->cdb, cmd->cdb_len,
1947 cmd->data_direction, cmd->sg, cmd->bufflen, cmd->sg_cnt,
1948 cmd->timeout, cmd->retries, cmd, scst_cmd_done,
1949 atomic ? GFP_ATOMIC : GFP_KERNEL);
1950 if (unlikely(rc != 0)) {
1952 res = SCST_EXEC_NEED_THREAD;
1955 PRINT_ERROR("scst_exec_req() failed: %d", res);
1962 res = SCST_EXEC_COMPLETED;
1966 scst_reset_io_context(cmd->tgt_dev, old_ctx);
1972 /* Restore the state */
1973 cmd->state = SCST_CMD_STATE_REAL_EXEC;
1977 scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
1980 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
1987 res = SCST_EXEC_COMPLETED;
1988 /* Report the result */
1989 cmd->scst_cmd_done(cmd, SCST_CMD_STATE_DEFAULT, SCST_CONTEXT_SAME);
1993 static inline int scst_real_exec(struct scst_cmd *cmd)
1999 BUILD_BUG_ON(SCST_CMD_STATE_RES_CONT_SAME != SCST_EXEC_NOT_COMPLETED);
2000 BUILD_BUG_ON(SCST_CMD_STATE_RES_CONT_NEXT != SCST_EXEC_COMPLETED);
2001 BUILD_BUG_ON(SCST_CMD_STATE_RES_NEED_THREAD != SCST_EXEC_NEED_THREAD);
2003 __scst_cmd_get(cmd);
2005 res = scst_do_real_exec(cmd);
2007 if (likely(res == SCST_EXEC_COMPLETED)) {
2008 scst_post_exec_sn(cmd, true);
2009 if (cmd->dev->scsi_dev != NULL)
2010 generic_unplug_device(
2011 cmd->dev->scsi_dev->request_queue);
2013 sBUG_ON(res != SCST_EXEC_NEED_THREAD);
2015 __scst_cmd_put(cmd);
2017 /* SCST_EXEC_* match SCST_CMD_STATE_RES_* */
2019 TRACE_EXIT_RES(res);
2023 static int scst_do_local_exec(struct scst_cmd *cmd)
2026 struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
2030 /* Check READ_ONLY device status */
2031 if (((tgt_dev->acg_dev->rd_only_flag) || cmd->dev->swp) &&
2032 (cmd->cdb[0] == WRITE_6 || /* ToDo: full list of the modify cmds */
2033 cmd->cdb[0] == WRITE_10 ||
2034 cmd->cdb[0] == WRITE_12 ||
2035 cmd->cdb[0] == WRITE_16 ||
2036 cmd->cdb[0] == WRITE_VERIFY ||
2037 cmd->cdb[0] == WRITE_VERIFY_12 ||
2038 cmd->cdb[0] == WRITE_VERIFY_16 ||
2039 (cmd->dev->handler->type == TYPE_TAPE &&
2040 (cmd->cdb[0] == ERASE || cmd->cdb[0] == WRITE_FILEMARKS)))) {
2041 PRINT_WARNING("Attempt of write access to read-only device: "
2042 "initiator %s, LUN %lld, op %x",
2043 cmd->sess->initiator_name, cmd->lun, cmd->cdb[0]);
2044 scst_set_cmd_error(cmd,
2045 SCST_LOAD_SENSE(scst_sense_data_protect));
2050 * Adding new commands here don't forget to update
2051 * scst_is_cmd_local() in scst.h, if necessary
2054 switch (cmd->cdb[0]) {
2056 case MODE_SELECT_10:
2058 res = scst_pre_select(cmd);
2062 res = scst_reserve_local(cmd);
2066 res = scst_release_local(cmd);
2069 res = scst_report_luns_local(cmd);
2072 res = SCST_EXEC_NOT_COMPLETED;
2077 TRACE_EXIT_RES(res);
2081 /* Report the result */
2082 cmd->scst_cmd_done(cmd, SCST_CMD_STATE_DEFAULT, SCST_CONTEXT_SAME);
2083 res = SCST_EXEC_COMPLETED;
2087 static int scst_local_exec(struct scst_cmd *cmd)
2093 BUILD_BUG_ON(SCST_CMD_STATE_RES_CONT_SAME != SCST_EXEC_NOT_COMPLETED);
2094 BUILD_BUG_ON(SCST_CMD_STATE_RES_CONT_NEXT != SCST_EXEC_COMPLETED);
2095 BUILD_BUG_ON(SCST_CMD_STATE_RES_NEED_THREAD != SCST_EXEC_NEED_THREAD);
2097 __scst_cmd_get(cmd);
2099 res = scst_do_local_exec(cmd);
2100 if (likely(res == SCST_EXEC_NOT_COMPLETED))
2101 cmd->state = SCST_CMD_STATE_REAL_EXEC;
2102 else if (res == SCST_EXEC_COMPLETED)
2103 scst_post_exec_sn(cmd, true);
2105 sBUG_ON(res != SCST_EXEC_NEED_THREAD);
2107 __scst_cmd_put(cmd);
2109 /* SCST_EXEC_* match SCST_CMD_STATE_RES_* */
2110 TRACE_EXIT_RES(res);
2114 static int scst_exec(struct scst_cmd **active_cmd)
2116 struct scst_cmd *cmd = *active_cmd;
2117 struct scst_cmd *ref_cmd;
2118 struct scst_device *dev = cmd->dev;
2119 int res = SCST_CMD_STATE_RES_CONT_NEXT, count;
2123 if (unlikely(scst_inc_on_dev_cmd(cmd) != 0))
2126 /* To protect tgt_dev */
2128 __scst_cmd_get(ref_cmd);
2134 cmd->sent_for_exec = 1;
2136 * To sync with scst_abort_cmd(). The above assignment must
2137 * be before SCST_CMD_ABORTED test, done later in
2138 * scst_check_local_events(). It's far from here, so the order
2139 * is virtually guaranteed, but let's have it just in case.
2143 cmd->scst_cmd_done = scst_cmd_done_local;
2144 cmd->state = SCST_CMD_STATE_LOCAL_EXEC;
2146 if (cmd->tgt_data_buf_alloced && cmd->dh_data_buf_alloced &&
2147 (cmd->data_direction & SCST_DATA_WRITE))
2148 scst_copy_sg(cmd, SCST_SG_COPY_FROM_TARGET);
2150 rc = scst_do_local_exec(cmd);
2151 if (likely(rc == SCST_EXEC_NOT_COMPLETED))
2152 /* Nothing to do */;
2153 else if (rc == SCST_EXEC_NEED_THREAD) {
2154 TRACE_DBG("%s", "scst_do_local_exec() requested "
2155 "thread context, rescheduling");
2156 scst_dec_on_dev_cmd(cmd);
2157 res = SCST_CMD_STATE_RES_NEED_THREAD;
2160 sBUG_ON(rc != SCST_EXEC_COMPLETED);
2164 cmd->state = SCST_CMD_STATE_REAL_EXEC;
2166 rc = scst_do_real_exec(cmd);
2167 if (likely(rc == SCST_EXEC_COMPLETED))
2168 /* Nothing to do */;
2169 else if (rc == SCST_EXEC_NEED_THREAD) {
2170 TRACE_DBG("scst_real_exec() requested thread "
2171 "context, rescheduling (cmd %p)", cmd);
2172 scst_dec_on_dev_cmd(cmd);
2173 res = SCST_CMD_STATE_RES_NEED_THREAD;
2181 cmd = scst_post_exec_sn(cmd, false);
2185 if (unlikely(scst_inc_on_dev_cmd(cmd) != 0))
2188 __scst_cmd_put(ref_cmd);
2190 __scst_cmd_get(ref_cmd);
2198 if (dev->scsi_dev != NULL)
2199 generic_unplug_device(dev->scsi_dev->request_queue);
2202 __scst_cmd_put(ref_cmd);
2203 /* !! At this point sess, dev and tgt_dev can be already freed !! */
2206 TRACE_EXIT_RES(res);
2210 static int scst_send_for_exec(struct scst_cmd **active_cmd)
2213 struct scst_cmd *cmd = *active_cmd;
2214 struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
2215 typeof(tgt_dev->expected_sn) expected_sn;
2219 #ifdef CONFIG_SCST_MEASURE_LATENCY
2220 if (cmd->pre_exec_finish == 0) {
2222 getnstimeofday(&ts);
2223 cmd->pre_exec_finish = scst_sec_to_nsec(ts.tv_sec) + ts.tv_nsec;
2224 TRACE_DBG("cmd %p (sess %p): pre_exec_finish %lld (tv_sec %ld, "
2225 "tv_nsec %ld)", cmd, cmd->sess, cmd->pre_exec_finish,
2226 ts.tv_sec, ts.tv_nsec);
2230 if (unlikely(cmd->internal))
2233 if (unlikely(cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE))
2236 sBUG_ON(!cmd->sn_set);
2238 expected_sn = tgt_dev->expected_sn;
2239 /* Optimized for lockless fast path */
2240 if ((cmd->sn != expected_sn) || (tgt_dev->hq_cmd_count > 0)) {
2241 spin_lock_irq(&tgt_dev->sn_lock);
2243 tgt_dev->def_cmd_count++;
2245 * Memory barrier is needed here to implement lockless fast
2246 * path. We need the exact order of read and write between
2247 * def_cmd_count and expected_sn. Otherwise, we can miss case,
2248 * when expected_sn was changed to be equal to cmd->sn while
2249 * we are queuing cmd the deferred list after the expected_sn
2250 * below. It will lead to a forever stuck command. But with
2251 * the barrier in such case __scst_check_deferred_commands()
2252 * will be called and it will take sn_lock, so we will be
2257 expected_sn = tgt_dev->expected_sn;
2258 if ((cmd->sn != expected_sn) || (tgt_dev->hq_cmd_count > 0)) {
2259 if (unlikely(test_bit(SCST_CMD_ABORTED,
2260 &cmd->cmd_flags))) {
2261 /* Necessary to allow aborting out of sn cmds */
2262 TRACE_MGMT_DBG("Aborting out of sn cmd %p "
2263 "(tag %llu, sn %lu)", cmd,
2264 (long long unsigned)cmd->tag, cmd->sn);
2265 tgt_dev->def_cmd_count--;
2266 scst_set_cmd_abnormal_done_state(cmd);
2267 res = SCST_CMD_STATE_RES_CONT_SAME;
2269 TRACE_SN("Deferring cmd %p (sn=%ld, set %d, "
2270 "expected_sn=%ld)", cmd, cmd->sn,
2271 cmd->sn_set, expected_sn);
2272 list_add_tail(&cmd->sn_cmd_list_entry,
2273 &tgt_dev->deferred_cmd_list);
2274 res = SCST_CMD_STATE_RES_CONT_NEXT;
2276 spin_unlock_irq(&tgt_dev->sn_lock);
2279 TRACE_SN("Somebody incremented expected_sn %ld, "
2280 "continuing", expected_sn);
2281 tgt_dev->def_cmd_count--;
2282 spin_unlock_irq(&tgt_dev->sn_lock);
2287 res = scst_exec(active_cmd);
2290 TRACE_EXIT_HRES(res);
2294 /* No locks supposed to be held */
2295 static int scst_check_sense(struct scst_cmd *cmd)
2298 struct scst_device *dev = cmd->dev;
2302 if (unlikely(cmd->ua_ignore))
2305 /* If we had internal bus reset behind us, set the command error UA */
2306 if ((dev->scsi_dev != NULL) &&
2307 unlikely(cmd->host_status == DID_RESET) &&
2308 scst_is_ua_command(cmd)) {
2309 TRACE(TRACE_MGMT, "DID_RESET: was_reset=%d host_status=%x",
2310 dev->scsi_dev->was_reset, cmd->host_status);
2311 scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_reset_UA));
2312 /* It looks like it is safe to clear was_reset here */
2313 dev->scsi_dev->was_reset = 0;
2316 if (unlikely(cmd->status == SAM_STAT_CHECK_CONDITION) &&
2317 SCST_SENSE_VALID(cmd->sense)) {
2318 PRINT_BUFF_FLAG(TRACE_SCSI, "Sense", cmd->sense,
2319 SCST_SENSE_BUFFERSIZE);
2321 /* Check Unit Attention Sense Key */
2322 if (scst_is_ua_sense(cmd->sense)) {
2323 if (scst_analyze_sense(cmd->sense,
2324 SCST_SENSE_BUFFERSIZE,
2325 SCST_SENSE_ASC_VALID,
2326 0, SCST_SENSE_ASC_UA_RESET, 0)) {
2327 if (cmd->double_ua_possible) {
2328 TRACE(TRACE_MGMT_MINOR, "Double UA "
2329 "detected for device %p", dev);
2330 TRACE(TRACE_MGMT_MINOR, "Retrying cmd"
2331 " %p (tag %llu)", cmd,
2332 (long long unsigned)cmd->tag);
2335 cmd->msg_status = 0;
2336 cmd->host_status = DID_OK;
2337 cmd->driver_status = 0;
2339 mempool_free(cmd->sense,
2340 scst_sense_mempool);
2343 scst_check_restore_sg_buff(cmd);
2345 sBUG_ON(cmd->dbl_ua_orig_resp_data_len < 0);
2346 cmd->data_direction =
2347 cmd->dbl_ua_orig_data_direction;
2348 cmd->resp_data_len =
2349 cmd->dbl_ua_orig_resp_data_len;
2351 cmd->state = SCST_CMD_STATE_REAL_EXEC;
2357 scst_dev_check_set_UA(dev, cmd, cmd->sense,
2358 SCST_SENSE_BUFFERSIZE);
2362 if (unlikely(cmd->double_ua_possible)) {
2363 if (scst_is_ua_command(cmd)) {
2364 TRACE_MGMT_DBG("Clearing dbl_ua_possible flag (dev %p, "
2365 "cmd %p)", dev, cmd);
2367 * Lock used to protect other flags in the bitfield
2368 * (just in case, actually). Those flags can't be
2369 * changed in parallel, because the device is
2372 spin_lock_bh(&dev->dev_lock);
2373 dev->dev_double_ua_possible = 0;
2374 spin_unlock_bh(&dev->dev_lock);
2379 TRACE_EXIT_RES(res);
2383 static int scst_check_auto_sense(struct scst_cmd *cmd)
2389 if (unlikely(cmd->status == SAM_STAT_CHECK_CONDITION) &&
2390 (!SCST_SENSE_VALID(cmd->sense) ||
2391 SCST_NO_SENSE(cmd->sense))) {
2392 TRACE(TRACE_SCSI|TRACE_MINOR, "CHECK_CONDITION, but no sense: "
2393 "cmd->status=%x, cmd->msg_status=%x, "
2394 "cmd->host_status=%x, cmd->driver_status=%x (cmd %p)",
2395 cmd->status, cmd->msg_status, cmd->host_status,
2396 cmd->driver_status, cmd);
2398 } else if (unlikely(cmd->host_status)) {
2399 if ((cmd->host_status == DID_REQUEUE) ||
2400 (cmd->host_status == DID_IMM_RETRY) ||
2401 (cmd->host_status == DID_SOFT_ERROR) ||
2402 (cmd->host_status == DID_ABORT)) {
2405 TRACE(TRACE_SCSI|TRACE_MINOR, "Host status %x "
2406 "received, returning HARDWARE ERROR instead "
2407 "(cmd %p)", cmd->host_status, cmd);
2408 scst_set_cmd_error(cmd,
2409 SCST_LOAD_SENSE(scst_sense_hardw_error));
2413 TRACE_EXIT_RES(res);
2417 static int scst_pre_dev_done(struct scst_cmd *cmd)
2419 int res = SCST_CMD_STATE_RES_CONT_SAME, rc;
2423 if (unlikely(scst_check_auto_sense(cmd))) {
2424 PRINT_INFO("Command finished with CHECK CONDITION, but "
2425 "without sense data (opcode 0x%x), issuing "
2426 "REQUEST SENSE", cmd->cdb[0]);
2427 rc = scst_prepare_request_sense(cmd);
2429 res = SCST_CMD_STATE_RES_CONT_NEXT;
2431 PRINT_ERROR("%s", "Unable to issue REQUEST SENSE, "
2432 "returning HARDWARE ERROR");
2433 scst_set_cmd_error(cmd,
2434 SCST_LOAD_SENSE(scst_sense_hardw_error));
2437 } else if (unlikely(scst_check_sense(cmd)))
2440 if (likely(scsi_status_is_good(cmd->status))) {
2441 unsigned char type = cmd->dev->handler->type;
2442 if (unlikely((cmd->cdb[0] == MODE_SENSE ||
2443 cmd->cdb[0] == MODE_SENSE_10)) &&
2444 cmd->tgt_dev->acg_dev->rd_only_flag &&
2445 (type == TYPE_DISK ||
2446 type == TYPE_WORM ||
2448 type == TYPE_TAPE)) {
2453 length = scst_get_buf_first(cmd, &address);
2455 PRINT_ERROR("%s", "Unable to get "
2456 "MODE_SENSE buffer");
2457 scst_set_cmd_error(cmd,
2459 scst_sense_hardw_error));
2461 } else if (length > 2 && cmd->cdb[0] == MODE_SENSE)
2462 address[2] |= 0x80; /* Write Protect*/
2463 else if (length > 3 && cmd->cdb[0] == MODE_SENSE_10)
2464 address[3] |= 0x80; /* Write Protect*/
2465 scst_put_buf(cmd, address);
2472 * Check and clear NormACA option for the device, if necessary,
2473 * since we don't support ACA
2475 if (unlikely((cmd->cdb[0] == INQUIRY)) &&
2476 /* Std INQUIRY data (no EVPD) */
2477 !(cmd->cdb[1] & SCST_INQ_EVPD) &&
2478 (cmd->resp_data_len > SCST_INQ_BYTE3)) {
2483 /* ToDo: all pages ?? */
2484 buflen = scst_get_buf_first(cmd, &buffer);
2485 if (buflen > SCST_INQ_BYTE3) {
2486 #ifdef CONFIG_SCST_EXTRACHECKS
2487 if (buffer[SCST_INQ_BYTE3] & SCST_INQ_NORMACA_BIT) {
2488 PRINT_INFO("NormACA set for device: "
2489 "lun=%lld, type 0x%02x. Clear it, "
2490 "since it's unsupported.",
2491 (long long unsigned int)cmd->lun,
2495 buffer[SCST_INQ_BYTE3] &= ~SCST_INQ_NORMACA_BIT;
2496 } else if (buflen != 0) {
2497 PRINT_ERROR("%s", "Unable to get INQUIRY "
2499 scst_set_cmd_error(cmd,
2500 SCST_LOAD_SENSE(scst_sense_hardw_error));
2504 scst_put_buf(cmd, buffer);
2510 if (unlikely((cmd->cdb[0] == MODE_SELECT) ||
2511 (cmd->cdb[0] == MODE_SELECT_10) ||
2512 (cmd->cdb[0] == LOG_SELECT))) {
2514 "MODE/LOG SELECT succeeded (LUN %lld)",
2515 (long long unsigned int)cmd->lun);
2516 cmd->state = SCST_CMD_STATE_MODE_SELECT_CHECKS;
2520 if ((cmd->cdb[0] == RESERVE) || (cmd->cdb[0] == RESERVE_10)) {
2521 if (!test_bit(SCST_TGT_DEV_RESERVED,
2522 &cmd->tgt_dev->tgt_dev_flags)) {
2523 struct scst_tgt_dev *tgt_dev_tmp;
2524 struct scst_device *dev = cmd->dev;
2527 "Real RESERVE failed lun=%lld, "
2529 (long long unsigned int)cmd->lun,
2531 PRINT_BUFF_FLAG(TRACE_SCSI, "Sense", cmd->sense,
2532 SCST_SENSE_BUFFERSIZE);
2534 /* Clearing the reservation */
2535 spin_lock_bh(&dev->dev_lock);
2536 list_for_each_entry(tgt_dev_tmp,
2537 &dev->dev_tgt_dev_list,
2538 dev_tgt_dev_list_entry) {
2539 clear_bit(SCST_TGT_DEV_RESERVED,
2540 &tgt_dev_tmp->tgt_dev_flags);
2542 dev->dev_reserved = 0;
2543 spin_unlock_bh(&dev->dev_lock);
2547 /* Check for MODE PARAMETERS CHANGED UA */
2548 if ((cmd->dev->scsi_dev != NULL) &&
2549 (cmd->status == SAM_STAT_CHECK_CONDITION) &&
2550 SCST_SENSE_VALID(cmd->sense) &&
2551 scst_is_ua_sense(cmd->sense) &&
2552 scst_analyze_sense(cmd->sense, SCST_SENSE_BUFFERSIZE,
2553 SCST_SENSE_ASCx_VALID,
2555 TRACE(TRACE_SCSI, "MODE PARAMETERS CHANGED UA (lun "
2556 "%lld)", (long long unsigned int)cmd->lun);
2557 cmd->state = SCST_CMD_STATE_MODE_SELECT_CHECKS;
2562 cmd->state = SCST_CMD_STATE_DEV_DONE;
2565 TRACE_EXIT_RES(res);
2569 static int scst_mode_select_checks(struct scst_cmd *cmd)
2571 int res = SCST_CMD_STATE_RES_CONT_SAME;
2572 int atomic = scst_cmd_atomic(cmd);
2576 if (likely(scsi_status_is_good(cmd->status))) {
2577 if (unlikely((cmd->cdb[0] == MODE_SELECT) ||
2578 (cmd->cdb[0] == MODE_SELECT_10) ||
2579 (cmd->cdb[0] == LOG_SELECT))) {
2580 struct scst_device *dev = cmd->dev;
2581 uint8_t sense_buffer[SCST_STANDARD_SENSE_LEN];
2583 if (atomic && (dev->scsi_dev != NULL)) {
2584 TRACE_DBG("%s", "MODE/LOG SELECT: thread "
2585 "context required");
2586 res = SCST_CMD_STATE_RES_NEED_THREAD;
2590 TRACE(TRACE_SCSI, "MODE/LOG SELECT succeeded, "
2591 "setting the SELECT UA (lun=%lld)",
2592 (long long unsigned int)cmd->lun);
2594 spin_lock_bh(&dev->dev_lock);
2595 if (cmd->cdb[0] == LOG_SELECT) {
2596 scst_set_sense(sense_buffer,
2597 sizeof(sense_buffer),
2599 UNIT_ATTENTION, 0x2a, 0x02);
2601 scst_set_sense(sense_buffer,
2602 sizeof(sense_buffer),
2604 UNIT_ATTENTION, 0x2a, 0x01);
2606 scst_dev_check_set_local_UA(dev, cmd, sense_buffer,
2607 sizeof(sense_buffer));
2608 spin_unlock_bh(&dev->dev_lock);
2610 if (dev->scsi_dev != NULL)
2611 scst_obtain_device_parameters(dev);
2613 } else if ((cmd->status == SAM_STAT_CHECK_CONDITION) &&
2614 SCST_SENSE_VALID(cmd->sense) &&
2615 scst_is_ua_sense(cmd->sense) &&
2616 /* mode parameters changed */
2617 (scst_analyze_sense(cmd->sense, SCST_SENSE_BUFFERSIZE,
2618 SCST_SENSE_ASCx_VALID,
2620 scst_analyze_sense(cmd->sense, SCST_SENSE_BUFFERSIZE,
2621 SCST_SENSE_ASC_VALID,
2622 0, 0x29, 0) /* reset */ ||
2623 scst_analyze_sense(cmd->sense, SCST_SENSE_BUFFERSIZE,
2624 SCST_SENSE_ASC_VALID,
2625 0, 0x28, 0) /* medium changed */ ||
2626 /* cleared by another ini (just in case) */
2627 scst_analyze_sense(cmd->sense, SCST_SENSE_BUFFERSIZE,
2628 SCST_SENSE_ASC_VALID,
2631 TRACE_DBG("Possible parameters changed UA %x: "
2632 "thread context required", cmd->sense[12]);
2633 res = SCST_CMD_STATE_RES_NEED_THREAD;
2637 TRACE(TRACE_SCSI, "Possible parameters changed UA %x "
2638 "(lun %lld): getting new parameters", cmd->sense[12],
2639 (long long unsigned int)cmd->lun);
2641 scst_obtain_device_parameters(cmd->dev);
2645 cmd->state = SCST_CMD_STATE_DEV_DONE;
2648 TRACE_EXIT_HRES(res);
2652 static void scst_inc_check_expected_sn(struct scst_cmd *cmd)
2654 if (likely(cmd->sn_set))
2655 scst_inc_expected_sn(cmd->tgt_dev, cmd->sn_slot);
2657 scst_make_deferred_commands_active(cmd->tgt_dev);
2660 static int scst_dev_done(struct scst_cmd *cmd)
2662 int res = SCST_CMD_STATE_RES_CONT_SAME;
2664 struct scst_device *dev = cmd->dev;
2668 state = SCST_CMD_STATE_PRE_XMIT_RESP;
2670 if (likely(!scst_is_cmd_local(cmd)) &&
2671 likely(dev->handler->dev_done != NULL)) {
2674 if (unlikely(!dev->handler->dev_done_atomic &&
2675 scst_cmd_atomic(cmd))) {
2677 * It shouldn't be because of SCST_TGT_DEV_AFTER_*
2680 TRACE_DBG("Dev handler %s dev_done() needs thread "
2681 "context, rescheduling", dev->handler->name);
2682 res = SCST_CMD_STATE_RES_NEED_THREAD;
2686 TRACE_DBG("Calling dev handler %s dev_done(%p)",
2687 dev->handler->name, cmd);
2688 rc = dev->handler->dev_done(cmd);
2689 TRACE_DBG("Dev handler %s dev_done() returned %d",
2690 dev->handler->name, rc);
2691 if (rc != SCST_CMD_STATE_DEFAULT)
2696 case SCST_CMD_STATE_PRE_XMIT_RESP:
2697 case SCST_CMD_STATE_DEV_PARSE:
2698 case SCST_CMD_STATE_PRE_PARSE:
2699 case SCST_CMD_STATE_PREPARE_SPACE:
2700 case SCST_CMD_STATE_RDY_TO_XFER:
2701 case SCST_CMD_STATE_TGT_PRE_EXEC:
2702 case SCST_CMD_STATE_SEND_FOR_EXEC:
2703 case SCST_CMD_STATE_LOCAL_EXEC:
2704 case SCST_CMD_STATE_REAL_EXEC:
2705 case SCST_CMD_STATE_PRE_DEV_DONE:
2706 case SCST_CMD_STATE_MODE_SELECT_CHECKS:
2707 case SCST_CMD_STATE_DEV_DONE:
2708 case SCST_CMD_STATE_XMIT_RESP:
2709 case SCST_CMD_STATE_FINISHED:
2710 case SCST_CMD_STATE_FINISHED_INTERNAL:
2714 case SCST_CMD_STATE_NEED_THREAD_CTX:
2715 TRACE_DBG("Dev handler %s dev_done() requested "
2716 "thread context, rescheduling",
2717 dev->handler->name);
2718 res = SCST_CMD_STATE_RES_NEED_THREAD;
2723 PRINT_ERROR("Dev handler %s dev_done() returned "
2724 "invalid cmd state %d",
2725 dev->handler->name, state);
2727 PRINT_ERROR("Dev handler %s dev_done() returned "
2728 "error %d", dev->handler->name,
2731 scst_set_cmd_error(cmd,
2732 SCST_LOAD_SENSE(scst_sense_hardw_error));
2733 scst_set_cmd_abnormal_done_state(cmd);
2737 if (cmd->needs_unblocking)
2738 scst_unblock_dev_cmd(cmd);
2740 if (likely(cmd->dec_on_dev_needed))
2741 scst_dec_on_dev_cmd(cmd);
2743 if (cmd->inc_expected_sn_on_done && cmd->sent_for_exec)
2744 scst_inc_check_expected_sn(cmd);
2746 if (unlikely(cmd->internal))
2747 cmd->state = SCST_CMD_STATE_FINISHED_INTERNAL;
2750 TRACE_EXIT_HRES(res);
2754 static int scst_pre_xmit_response(struct scst_cmd *cmd)
2760 EXTRACHECKS_BUG_ON(cmd->internal);
2762 #ifdef CONFIG_SCST_DEBUG_TM
2763 if (cmd->tm_dbg_delayed &&
2764 !test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)) {
2765 if (scst_cmd_atomic(cmd)) {
2766 TRACE_MGMT_DBG("%s",
2767 "DEBUG_TM delayed cmd needs a thread");
2768 res = SCST_CMD_STATE_RES_NEED_THREAD;
2771 TRACE_MGMT_DBG("Delaying cmd %p (tag %llu) for 1 second",
2773 schedule_timeout_uninterruptible(HZ);
2777 if (likely(cmd->tgt_dev != NULL)) {
2778 atomic_dec(&cmd->tgt_dev->tgt_dev_cmd_count);
2779 atomic_dec(&cmd->dev->dev_cmd_count);
2780 /* If expected values not set, expected direction is UNKNOWN */
2781 if (cmd->expected_data_direction & SCST_DATA_WRITE)
2782 atomic_dec(&cmd->dev->write_cmd_count);
2784 if (unlikely(cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE))
2785 scst_on_hq_cmd_response(cmd);
2787 if (unlikely(!cmd->sent_for_exec)) {
2788 TRACE_SN("cmd %p was not sent to mid-lev"
2789 " (sn %ld, set %d)",
2790 cmd, cmd->sn, cmd->sn_set);
2791 scst_unblock_deferred(cmd->tgt_dev, cmd);
2792 cmd->sent_for_exec = 1;
2797 * If we don't remove cmd from the search list here, before
2798 * submitting it for transmittion, we will have a race, when for
2799 * some reason cmd's release is delayed after transmittion and
2800 * initiator sends cmd with the same tag => it is possible that
2801 * a wrong cmd will be found by find() functions.
2803 spin_lock_irq(&cmd->sess->sess_list_lock);
2804 list_del(&cmd->search_cmd_list_entry);
2805 spin_unlock_irq(&cmd->sess->sess_list_lock);
2808 smp_mb(); /* to sync with scst_abort_cmd() */
2810 if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)))
2811 scst_xmit_process_aborted_cmd(cmd);
2813 if (unlikely(test_bit(SCST_CMD_NO_RESP, &cmd->cmd_flags))) {
2814 TRACE_MGMT_DBG("Flag NO_RESP set for cmd %p (tag %llu),"
2816 cmd, (long long unsigned int)cmd->tag);
2817 cmd->state = SCST_CMD_STATE_FINISHED;
2818 res = SCST_CMD_STATE_RES_CONT_SAME;
2822 if (cmd->tgt_data_buf_alloced && cmd->dh_data_buf_alloced &&
2823 (cmd->data_direction & SCST_DATA_READ))
2824 scst_copy_sg(cmd, SCST_SG_COPY_TO_TARGET);
2826 cmd->state = SCST_CMD_STATE_XMIT_RESP;
2827 res = SCST_CMD_STATE_RES_CONT_SAME;
2830 #ifdef CONFIG_SCST_MEASURE_LATENCY
2833 uint64_t finish, scst_time, proc_time;
2834 struct scst_session *sess = cmd->sess;
2836 getnstimeofday(&ts);
2837 finish = scst_sec_to_nsec(ts.tv_sec) + ts.tv_nsec;
2839 spin_lock_bh(&sess->meas_lock);
2841 scst_time = cmd->pre_exec_finish - cmd->start;
2842 scst_time += finish - cmd->post_exec_start;
2843 proc_time = finish - cmd->start;
2845 sess->scst_time += scst_time;
2846 sess->processing_time += proc_time;
2847 sess->processed_cmds++;
2849 spin_unlock_bh(&sess->meas_lock);
2851 TRACE_DBG("cmd %p (sess %p): finish %lld (tv_sec %ld, "
2852 "tv_nsec %ld), scst_time %lld, proc_time %lld",
2853 cmd, sess, finish, ts.tv_sec, ts.tv_nsec, scst_time,
2857 TRACE_EXIT_HRES(res);
2861 static int scst_xmit_response(struct scst_cmd *cmd)
2867 EXTRACHECKS_BUG_ON(cmd->internal);
2869 if (unlikely(!cmd->tgtt->xmit_response_atomic &&
2870 scst_cmd_atomic(cmd))) {
2872 * It shouldn't be because of SCST_TGT_DEV_AFTER_*
2875 TRACE_DBG("Target driver %s xmit_response() needs thread "
2876 "context, rescheduling", cmd->tgtt->name);
2877 res = SCST_CMD_STATE_RES_NEED_THREAD;
2882 int finished_cmds = atomic_read(&cmd->sess->tgt->finished_cmds);
2884 res = SCST_CMD_STATE_RES_CONT_NEXT;
2885 cmd->state = SCST_CMD_STATE_XMIT_WAIT;
2887 TRACE_DBG("Calling xmit_response(%p)", cmd);
2889 #if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
2890 if (trace_flag & TRACE_SND_BOT) {
2892 struct scatterlist *sg;
2893 if (cmd->tgt_sg != NULL)
2898 TRACE(TRACE_SND_BOT, "Xmitting data for cmd %p "
2899 "(sg_cnt %d, sg %p, sg[0].page %p)",
2900 cmd, cmd->tgt_sg_cnt, sg,
2901 (void *)sg_page(&sg[0]));
2902 for (i = 0; i < cmd->tgt_sg_cnt; ++i) {
2903 PRINT_BUFF_FLAG(TRACE_SND_BOT,
2904 "Xmitting sg", sg_virt(&sg[i]),
2911 #ifdef CONFIG_SCST_DEBUG_RETRY
2912 if (((scst_random() % 100) == 77))
2913 rc = SCST_TGT_RES_QUEUE_FULL;
2916 rc = cmd->tgtt->xmit_response(cmd);
2917 TRACE_DBG("xmit_response() returned %d", rc);
2919 if (likely(rc == SCST_TGT_RES_SUCCESS))
2922 /* Restore the previous state */
2923 cmd->state = SCST_CMD_STATE_XMIT_RESP;
2926 case SCST_TGT_RES_QUEUE_FULL:
2927 if (scst_queue_retry_cmd(cmd, finished_cmds) == 0)
2932 case SCST_TGT_RES_NEED_THREAD_CTX:
2933 TRACE_DBG("Target driver %s xmit_response() "
2934 "requested thread context, rescheduling",
2936 res = SCST_CMD_STATE_RES_NEED_THREAD;
2946 /* Caution: cmd can be already dead here */
2947 TRACE_EXIT_HRES(res);
2951 if (rc == SCST_TGT_RES_FATAL_ERROR) {
2952 PRINT_ERROR("Target driver %s xmit_response() returned "
2953 "fatal error", cmd->tgtt->name);
2955 PRINT_ERROR("Target driver %s xmit_response() returned "
2956 "invalid value %d", cmd->tgtt->name, rc);
2958 scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
2959 cmd->state = SCST_CMD_STATE_FINISHED;
2960 res = SCST_CMD_STATE_RES_CONT_SAME;
2964 void scst_tgt_cmd_done(struct scst_cmd *cmd,
2965 enum scst_exec_context pref_context)
2969 sBUG_ON(cmd->state != SCST_CMD_STATE_XMIT_WAIT);
2971 cmd->state = SCST_CMD_STATE_FINISHED;
2972 scst_proccess_redirect_cmd(cmd, pref_context, 1);
2977 EXPORT_SYMBOL(scst_tgt_cmd_done);
2979 static int scst_finish_cmd(struct scst_cmd *cmd)
2985 atomic_dec(&cmd->sess->sess_cmd_count);
2988 smp_mb(); /* to sync with scst_abort_cmd() */
2990 if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
2991 TRACE_MGMT_DBG("Aborted cmd %p finished (cmd_ref %d, "
2992 "scst_cmd_count %d)", cmd, atomic_read(&cmd->cmd_ref),
2993 atomic_read(&scst_cmd_count));
2995 scst_finish_cmd_mgmt(cmd);
2998 if (unlikely(cmd->delivery_status != SCST_CMD_DELIVERY_SUCCESS)) {
2999 if ((cmd->tgt_dev != NULL) &&
3000 scst_is_ua_sense(cmd->sense)) {
3001 /* This UA delivery failed, so requeue it */
3002 TRACE_MGMT_DBG("Requeuing UA for delivery failed cmd "
3004 scst_check_set_UA(cmd->tgt_dev, cmd->sense,
3005 SCST_SENSE_BUFFERSIZE,
3006 SCST_SET_UA_FLAG_AT_HEAD);
3010 __scst_cmd_put(cmd);
3012 res = SCST_CMD_STATE_RES_CONT_NEXT;
3014 TRACE_EXIT_HRES(res);
3019 * No locks, but it must be externally serialized (see comment for
3020 * scst_cmd_init_done() in scst.h)
3022 static void scst_cmd_set_sn(struct scst_cmd *cmd)
3024 struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
3025 unsigned long flags;
3029 if (scst_is_implicit_hq(cmd)) {
3030 TRACE_SN("Implicit HQ cmd %p", cmd);
3031 cmd->queue_type = SCST_CMD_QUEUE_HEAD_OF_QUEUE;
3034 EXTRACHECKS_BUG_ON(cmd->sn_set || cmd->hq_cmd_inced);
3036 /* Optimized for lockless fast path */
3038 scst_check_debug_sn(cmd);
3040 if (cmd->dev->queue_alg ==
3041 SCST_CONTR_MODE_QUEUE_ALG_RESTRICTED_REORDER) {
3043 * Not the best way, but well enough until there will be a
3044 * possibility to specify queue type during pass-through
3045 * commands submission.
3047 cmd->queue_type = SCST_CMD_QUEUE_ORDERED;
3050 switch (cmd->queue_type) {
3051 case SCST_CMD_QUEUE_SIMPLE:
3052 case SCST_CMD_QUEUE_UNTAGGED:
3053 #if 0 /* left for future performance investigations */
3054 if (scst_cmd_is_expected_set(cmd)) {
3055 if ((cmd->expected_data_direction == SCST_DATA_READ) &&
3056 (atomic_read(&cmd->dev->write_cmd_count) == 0))
3061 if (likely(tgt_dev->num_free_sn_slots >= 0)) {
3063 * atomic_inc_return() implies memory barrier to sync
3064 * with scst_inc_expected_sn()
3066 if (atomic_inc_return(tgt_dev->cur_sn_slot) == 1) {
3068 TRACE_SN("Incremented curr_sn %ld",
3071 cmd->sn_slot = tgt_dev->cur_sn_slot;
3072 cmd->sn = tgt_dev->curr_sn;
3074 tgt_dev->prev_cmd_ordered = 0;
3076 TRACE(TRACE_MINOR, "***WARNING*** Not enough SN slots "
3077 "%zd", ARRAY_SIZE(tgt_dev->sn_slots));
3082 case SCST_CMD_QUEUE_ORDERED:
3083 TRACE_SN("ORDERED cmd %p (op %x)", cmd, cmd->cdb[0]);
3085 if (!tgt_dev->prev_cmd_ordered) {
3086 spin_lock_irqsave(&tgt_dev->sn_lock, flags);
3087 if (tgt_dev->num_free_sn_slots >= 0) {
3088 tgt_dev->num_free_sn_slots--;
3089 if (tgt_dev->num_free_sn_slots >= 0) {
3091 /* Commands can finish in any order, so
3092 * we don't know which slot is empty.
3095 tgt_dev->cur_sn_slot++;
3096 if (tgt_dev->cur_sn_slot ==
3097 tgt_dev->sn_slots + ARRAY_SIZE(tgt_dev->sn_slots))
3098 tgt_dev->cur_sn_slot = tgt_dev->sn_slots;
3100 if (atomic_read(tgt_dev->cur_sn_slot) == 0)
3104 sBUG_ON(i == ARRAY_SIZE(tgt_dev->sn_slots));
3106 TRACE_SN("New cur SN slot %zd",
3107 tgt_dev->cur_sn_slot -
3111 spin_unlock_irqrestore(&tgt_dev->sn_lock, flags);
3113 tgt_dev->prev_cmd_ordered = 1;
3115 cmd->sn = tgt_dev->curr_sn;
3118 case SCST_CMD_QUEUE_HEAD_OF_QUEUE:
3119 TRACE_SN("HQ cmd %p (op %x)", cmd, cmd->cdb[0]);
3120 spin_lock_irqsave(&tgt_dev->sn_lock, flags);
3121 tgt_dev->hq_cmd_count++;
3122 spin_unlock_irqrestore(&tgt_dev->sn_lock, flags);
3123 cmd->hq_cmd_inced = 1;
3130 TRACE_SN("cmd(%p)->sn: %ld (tgt_dev %p, *cur_sn_slot %d, "
3131 "num_free_sn_slots %d, prev_cmd_ordered %ld, "
3132 "cur_sn_slot %zd)", cmd, cmd->sn, tgt_dev,
3133 atomic_read(tgt_dev->cur_sn_slot),
3134 tgt_dev->num_free_sn_slots, tgt_dev->prev_cmd_ordered,
3135 tgt_dev->cur_sn_slot-tgt_dev->sn_slots);
3145 * Returns 0 on success, > 0 when we need to wait for unblock,
3146 * < 0 if there is no device (lun) or device type handler.
3148 * No locks, but might be on IRQ, protection is done by the
3149 * suspended activity.
3151 static int scst_translate_lun(struct scst_cmd *cmd)
3153 struct scst_tgt_dev *tgt_dev = NULL;
3158 /* See comment about smp_mb() in scst_suspend_activity() */
3161 if (likely(!test_bit(SCST_FLAG_SUSPENDED, &scst_flags))) {
3162 struct list_head *sess_tgt_dev_list_head =
3163 &cmd->sess->sess_tgt_dev_list_hash[HASH_VAL(cmd->lun)];
3164 TRACE_DBG("Finding tgt_dev for cmd %p (lun %lld)", cmd,
3165 (long long unsigned int)cmd->lun);
3167 list_for_each_entry(tgt_dev, sess_tgt_dev_list_head,
3168 sess_tgt_dev_list_entry) {
3169 if (tgt_dev->lun == cmd->lun) {
3170 TRACE_DBG("tgt_dev %p found", tgt_dev);
3172 if (unlikely(tgt_dev->dev->handler ==
3173 &scst_null_devtype)) {
3174 PRINT_INFO("Dev handler for device "
3175 "%lld is NULL, the device will not "
3176 "be visible remotely",
3177 (long long unsigned int)cmd->lun);
3181 cmd->cmd_lists = tgt_dev->dev->p_cmd_lists;
3182 cmd->tgt_dev = tgt_dev;
3183 cmd->dev = tgt_dev->dev;
3191 "tgt_dev for lun %lld not found, command to "
3193 (long long unsigned int)cmd->lun);
3197 TRACE_MGMT_DBG("%s", "FLAG SUSPENDED set, skipping");
3202 TRACE_EXIT_RES(res);
3207 * No locks, but might be on IRQ
3209 * Returns 0 on success, > 0 when we need to wait for unblock,
3210 * < 0 if there is no device (lun) or device type handler.
3212 static int __scst_init_cmd(struct scst_cmd *cmd)
3218 res = scst_translate_lun(cmd);
3219 if (likely(res == 0)) {
3221 bool failure = false;
3223 cmd->state = SCST_CMD_STATE_PRE_PARSE;
3225 cnt = atomic_inc_return(&cmd->tgt_dev->tgt_dev_cmd_count);
3226 if (unlikely(cnt > SCST_MAX_TGT_DEV_COMMANDS)) {
3227 TRACE(TRACE_MGMT_MINOR,
3228 "Too many pending commands (%d) in "
3229 "session, returning BUSY to initiator \"%s\"",
3230 cnt, (cmd->sess->initiator_name[0] == '\0') ?
3231 "Anonymous" : cmd->sess->initiator_name);
3235 cnt = atomic_inc_return(&cmd->dev->dev_cmd_count);
3236 if (unlikely(cnt > SCST_MAX_DEV_COMMANDS)) {
3238 TRACE(TRACE_MGMT_MINOR,
3239 "Too many pending device "
3240 "commands (%d), returning BUSY to "
3241 "initiator \"%s\"", cnt,
3242 (cmd->sess->initiator_name[0] == '\0') ?
3244 cmd->sess->initiator_name);
3249 /* If expected values not set, expected direction is UNKNOWN */
3250 if (cmd->expected_data_direction & SCST_DATA_WRITE)
3251 atomic_inc(&cmd->dev->write_cmd_count);
3253 if (unlikely(failure))
3256 if (!cmd->set_sn_on_restart_cmd)
3257 scst_cmd_set_sn(cmd);
3258 } else if (res < 0) {
3259 TRACE_DBG("Finishing cmd %p", cmd);
3260 scst_set_cmd_error(cmd,
3261 SCST_LOAD_SENSE(scst_sense_lun_not_supported));
3262 scst_set_cmd_abnormal_done_state(cmd);
3267 TRACE_EXIT_RES(res);
3272 scst_set_cmd_abnormal_done_state(cmd);
3276 /* Called under scst_init_lock and IRQs disabled */
3277 static void scst_do_job_init(void)
3278 __releases(&scst_init_lock)
3279 __acquires(&scst_init_lock)
3281 struct scst_cmd *cmd;
3288 * There is no need for read barrier here, because we don't care where
3289 * this check will be done.
3291 susp = test_bit(SCST_FLAG_SUSPENDED, &scst_flags);
3292 if (scst_init_poll_cnt > 0)
3293 scst_init_poll_cnt--;
3295 list_for_each_entry(cmd, &scst_init_cmd_list, cmd_list_entry) {
3297 if (susp && !test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))
3299 if (!test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)) {
3300 spin_unlock_irq(&scst_init_lock);
3301 rc = __scst_init_cmd(cmd);
3302 spin_lock_irq(&scst_init_lock);
3304 TRACE_MGMT_DBG("%s",
3305 "FLAG SUSPENDED set, restarting");
3309 TRACE_MGMT_DBG("Aborting not inited cmd %p (tag %llu)",
3310 cmd, (long long unsigned int)cmd->tag);
3311 scst_set_cmd_abnormal_done_state(cmd);
3315 * Deleting cmd from init cmd list after __scst_init_cmd()
3316 * is necessary to keep the check in scst_init_cmd() correct
3317 * to preserve the commands order.
3319 * We don't care about the race, when init cmd list is empty
3320 * and one command detected that it just was not empty, so
3321 * it's inserting to it, but another command at the same time
3322 * seeing init cmd list empty and goes directly, because it
3323 * could affect only commands from the same initiator to the
3324 * same tgt_dev, but scst_cmd_init_done*() doesn't guarantee
3325 * the order in case of simultaneous such calls anyway.
3327 TRACE_MGMT_DBG("Deleting cmd %p from init cmd list", cmd);
3328 smp_wmb(); /* enforce the required order */
3329 list_del(&cmd->cmd_list_entry);
3330 spin_unlock(&scst_init_lock);
3332 spin_lock(&cmd->cmd_lists->cmd_list_lock);
3333 TRACE_MGMT_DBG("Adding cmd %p to active cmd list", cmd);
3334 if (unlikely(cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE))
3335 list_add(&cmd->cmd_list_entry,
3336 &cmd->cmd_lists->active_cmd_list);
3338 list_add_tail(&cmd->cmd_list_entry,
3339 &cmd->cmd_lists->active_cmd_list);
3340 wake_up(&cmd->cmd_lists->cmd_list_waitQ);
3341 spin_unlock(&cmd->cmd_lists->cmd_list_lock);
3343 spin_lock(&scst_init_lock);
3347 /* It isn't really needed, but let's keep it */
3348 if (susp != test_bit(SCST_FLAG_SUSPENDED, &scst_flags))
3355 static inline int test_init_cmd_list(void)
3357 int res = (!list_empty(&scst_init_cmd_list) &&
3358 !test_bit(SCST_FLAG_SUSPENDED, &scst_flags)) ||
3359 unlikely(kthread_should_stop()) ||
3360 (scst_init_poll_cnt > 0);
3364 int scst_init_thread(void *arg)
3368 PRINT_INFO("Init thread started, PID %d", current->pid);
3370 current->flags |= PF_NOFREEZE;
3372 set_user_nice(current, -10);
3374 spin_lock_irq(&scst_init_lock);
3375 while (!kthread_should_stop()) {
3377 init_waitqueue_entry(&wait, current);
3379 if (!test_init_cmd_list()) {
3380 add_wait_queue_exclusive(&scst_init_cmd_list_waitQ,
3383 set_current_state(TASK_INTERRUPTIBLE);
3384 if (test_init_cmd_list())
3386 spin_unlock_irq(&scst_init_lock);
3388 spin_lock_irq(&scst_init_lock);
3390 set_current_state(TASK_RUNNING);
3391 remove_wait_queue(&scst_init_cmd_list_waitQ, &wait);
3395 spin_unlock_irq(&scst_init_lock);
3398 * If kthread_should_stop() is true, we are guaranteed to be
3399 * on the module unload, so scst_init_cmd_list must be empty.
3401 sBUG_ON(!list_empty(&scst_init_cmd_list));
3403 PRINT_INFO("Init thread PID %d finished", current->pid);
3409 /* Called with no locks held */
3410 void scst_process_active_cmd(struct scst_cmd *cmd, bool atomic)
3416 EXTRACHECKS_BUG_ON(in_irq() || irqs_disabled());
3418 cmd->atomic = atomic;
3420 TRACE_DBG("cmd %p, atomic %d", cmd, atomic);
3423 switch (cmd->state) {
3424 case SCST_CMD_STATE_PRE_PARSE:
3425 res = scst_pre_parse(cmd);
3426 EXTRACHECKS_BUG_ON(res ==
3427 SCST_CMD_STATE_RES_NEED_THREAD);
3430 case SCST_CMD_STATE_DEV_PARSE:
3431 res = scst_parse_cmd(cmd);
3434 case SCST_CMD_STATE_PREPARE_SPACE:
3435 res = scst_prepare_space(cmd);
3438 case SCST_CMD_STATE_RDY_TO_XFER:
3439 res = scst_rdy_to_xfer(cmd);
3442 case SCST_CMD_STATE_TGT_PRE_EXEC:
3443 res = scst_tgt_pre_exec(cmd);
3446 case SCST_CMD_STATE_SEND_FOR_EXEC:
3447 if (tm_dbg_check_cmd(cmd) != 0) {
3448 res = SCST_CMD_STATE_RES_CONT_NEXT;
3449 TRACE_MGMT_DBG("Skipping cmd %p (tag %llu), "
3450 "because of TM DBG delay", cmd,
3451 (long long unsigned int)cmd->tag);
3454 res = scst_send_for_exec(&cmd);
3456 * !! At this point cmd, sess & tgt_dev can already be
3461 case SCST_CMD_STATE_LOCAL_EXEC:
3462 res = scst_local_exec(cmd);
3464 * !! At this point cmd, sess & tgt_dev can already be
3469 case SCST_CMD_STATE_REAL_EXEC:
3470 res = scst_real_exec(cmd);
3472 * !! At this point cmd, sess & tgt_dev can already be