4 * Copyright (C) 2004 - 2008 Vladislav Bolkhovitin <vst@vlnb.net>
5 * Copyright (C) 2004 - 2005 Leonid Stoljar
6 * Copyright (C) 2007 - 2008 CMS Distribution Limited
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation, version 2
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
19 #include <linux/init.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/list.h>
23 #include <linux/spinlock.h>
24 #include <linux/slab.h>
25 #include <linux/sched.h>
26 #include <linux/smp_lock.h>
27 #include <linux/unistd.h>
28 #include <linux/string.h>
29 #include <linux/kthread.h>
30 #include <linux/delay.h>
33 #include "scst_priv.h"
35 static void scst_cmd_set_sn(struct scst_cmd *cmd);
36 static int __scst_init_cmd(struct scst_cmd *cmd);
37 static void scst_finish_cmd_mgmt(struct scst_cmd *cmd);
39 static inline void scst_schedule_tasklet(struct scst_cmd *cmd)
41 struct scst_tasklet *t = &scst_tasklets[smp_processor_id()];
44 spin_lock_irqsave(&t->tasklet_lock, flags);
45 TRACE_DBG("Adding cmd %p to tasklet %d cmd list", cmd,
47 list_add_tail(&cmd->cmd_list_entry, &t->tasklet_cmd_list);
48 spin_unlock_irqrestore(&t->tasklet_lock, flags);
50 tasklet_schedule(&t->tasklet);
54 * Must not be called in parallel with scst_unregister_session_ex() for the
57 struct scst_cmd *scst_rx_cmd(struct scst_session *sess,
58 const uint8_t *lun, int lun_len,
59 const uint8_t *cdb, int cdb_len, int atomic)
65 #ifdef CONFIG_SCST_EXTRACHECKS
66 if (unlikely(sess->shut_phase != SCST_SESS_SPH_READY)) {
67 PRINT_CRIT_ERROR("%s",
68 "New cmd while shutting down the session");
73 cmd = scst_alloc_cmd(atomic ? GFP_ATOMIC : GFP_KERNEL);
79 cmd->tgtt = sess->tgt->tgtt;
82 * For both wrong lun and CDB defer the error reporting for
83 * scst_cmd_init_done()
86 cmd->lun = scst_unpack_lun(lun, lun_len);
88 if (cdb_len <= SCST_MAX_CDB_SIZE) {
89 memcpy(cmd->cdb, cdb, cdb_len);
90 cmd->cdb_len = cdb_len;
93 TRACE_DBG("cmd %p, sess %p", cmd, sess);
100 EXPORT_SYMBOL(scst_rx_cmd);
103 * No locks, but might be on IRQ. Returns 0 on success, <0 if processing of
104 * this command should be stopped.
106 static int scst_init_cmd(struct scst_cmd *cmd, enum scst_exec_context *context)
112 /* See the comment in scst_do_job_init() */
113 if (unlikely(!list_empty(&scst_init_cmd_list))) {
114 TRACE_MGMT_DBG("%s", "init cmd list busy");
118 * Memory barrier isn't necessary here, because CPU appears to
122 rc = __scst_init_cmd(cmd);
123 if (unlikely(rc > 0))
125 else if (unlikely(rc != 0))
128 /* Small context optimization */
129 if (((*context == SCST_CONTEXT_TASKLET) ||
130 (*context == SCST_CONTEXT_DIRECT_ATOMIC) ||
131 ((*context == SCST_CONTEXT_SAME) && scst_cmd_atomic(cmd))) &&
132 scst_cmd_is_expected_set(cmd)) {
133 if (cmd->expected_data_direction == SCST_DATA_WRITE) {
134 if (!test_bit(SCST_TGT_DEV_AFTER_INIT_WR_ATOMIC,
135 &cmd->tgt_dev->tgt_dev_flags))
136 *context = SCST_CONTEXT_THREAD;
138 if (!test_bit(SCST_TGT_DEV_AFTER_INIT_OTH_ATOMIC,
139 &cmd->tgt_dev->tgt_dev_flags))
140 *context = SCST_CONTEXT_THREAD;
149 if (cmd->preprocessing_only) {
151 * Poor man solution for single threaded targets, where
152 * blocking receiver at least sometimes means blocking all.
154 sBUG_ON(*context != SCST_CONTEXT_DIRECT);
156 scst_set_cmd_abnormal_done_state(cmd);
157 /* Keep initiator away from too many BUSY commands */
161 spin_lock_irqsave(&scst_init_lock, flags);
162 TRACE_MGMT_DBG("Adding cmd %p to init cmd list (scst_cmd_count "
163 "%d)", cmd, atomic_read(&scst_cmd_count));
164 list_add_tail(&cmd->cmd_list_entry, &scst_init_cmd_list);
165 if (test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))
166 scst_init_poll_cnt++;
167 spin_unlock_irqrestore(&scst_init_lock, flags);
168 wake_up(&scst_init_cmd_list_waitQ);
174 #ifdef CONFIG_SCST_MEASURE_LATENCY
175 static inline uint64_t scst_sec_to_nsec(time_t sec)
177 return (uint64_t)sec * 1000000000;
181 void scst_cmd_init_done(struct scst_cmd *cmd,
182 enum scst_exec_context pref_context)
185 struct scst_session *sess = cmd->sess;
190 #ifdef CONFIG_SCST_MEASURE_LATENCY
194 cmd->start = scst_sec_to_nsec(ts.tv_sec) + ts.tv_nsec;
195 TRACE_DBG("cmd %p (sess %p): start %lld (tv_sec %ld, "
196 "tv_nsec %ld)", cmd, sess, cmd->start, ts.tv_sec,
201 TRACE_DBG("Preferred context: %d (cmd %p)", pref_context, cmd);
202 TRACE(TRACE_SCSI, "tag=%llu, lun=%lld, CDB len=%d, queue_type=%x "
203 "(cmd %p)", (long long unsigned int)cmd->tag,
204 (long long unsigned int)cmd->lun, cmd->cdb_len,
205 cmd->queue_type, cmd);
206 PRINT_BUFF_FLAG(TRACE_SCSI|TRACE_RCV_BOT, "Recieving CDB",
207 cmd->cdb, cmd->cdb_len);
209 #ifdef CONFIG_SCST_EXTRACHECKS
210 if (unlikely((in_irq() || irqs_disabled())) &&
211 ((pref_context == SCST_CONTEXT_DIRECT) ||
212 (pref_context == SCST_CONTEXT_DIRECT_ATOMIC))) {
213 PRINT_ERROR("Wrong context %d in IRQ from target %s, use "
214 "SCST_CONTEXT_THREAD instead\n", pref_context,
216 pref_context = SCST_CONTEXT_THREAD;
220 atomic_inc(&sess->sess_cmd_count);
222 spin_lock_irqsave(&sess->sess_list_lock, flags);
224 if (unlikely(sess->init_phase != SCST_SESS_IPH_READY)) {
226 * We have to always keep command in the search list from the
227 * very beginning, because otherwise it can be missed during
228 * TM processing. This check is needed because there might be
229 * old, i.e. deferred, commands and new, i.e. just coming, ones.
231 if (cmd->search_cmd_list_entry.next == NULL)
232 list_add_tail(&cmd->search_cmd_list_entry,
233 &sess->search_cmd_list);
234 switch (sess->init_phase) {
235 case SCST_SESS_IPH_SUCCESS:
237 case SCST_SESS_IPH_INITING:
238 TRACE_DBG("Adding cmd %p to init deferred cmd list",
240 list_add_tail(&cmd->cmd_list_entry,
241 &sess->init_deferred_cmd_list);
242 spin_unlock_irqrestore(&sess->sess_list_lock, flags);
244 case SCST_SESS_IPH_FAILED:
245 spin_unlock_irqrestore(&sess->sess_list_lock, flags);
247 scst_set_cmd_abnormal_done_state(cmd);
253 list_add_tail(&cmd->search_cmd_list_entry,
254 &sess->search_cmd_list);
256 spin_unlock_irqrestore(&sess->sess_list_lock, flags);
258 if (unlikely(cmd->lun == NO_SUCH_LUN)) {
259 PRINT_ERROR("Wrong LUN %d, finishing cmd", -1);
260 scst_set_cmd_error(cmd,
261 SCST_LOAD_SENSE(scst_sense_lun_not_supported));
262 scst_set_cmd_abnormal_done_state(cmd);
266 if (unlikely(cmd->cdb_len == 0)) {
267 PRINT_ERROR("Wrong CDB len %d, finishing cmd", 0);
268 scst_set_cmd_error(cmd,
269 SCST_LOAD_SENSE(scst_sense_invalid_opcode));
270 scst_set_cmd_abnormal_done_state(cmd);
274 if (unlikely(cmd->queue_type >= SCST_CMD_QUEUE_ACA)) {
275 PRINT_ERROR("Unsupported queue type %d", cmd->queue_type);
276 scst_set_cmd_error(cmd,
277 SCST_LOAD_SENSE(scst_sense_invalid_message));
278 scst_set_cmd_abnormal_done_state(cmd);
282 cmd->state = SCST_CMD_STATE_INIT;
283 /* cmd must be inited here to preserve the order */
284 rc = scst_init_cmd(cmd, &pref_context);
285 if (unlikely(rc < 0))
289 /* Here cmd must not be in any cmd list, no locks */
290 switch (pref_context) {
291 case SCST_CONTEXT_TASKLET:
292 scst_schedule_tasklet(cmd);
295 case SCST_CONTEXT_DIRECT:
296 scst_process_active_cmd(cmd, false);
297 /* For *NEED_THREAD wake_up() is already done */
300 case SCST_CONTEXT_DIRECT_ATOMIC:
301 scst_process_active_cmd(cmd, true);
302 /* For *NEED_THREAD wake_up() is already done */
306 PRINT_ERROR("Context %x is undefined, using the thread one",
309 case SCST_CONTEXT_THREAD:
310 spin_lock_irqsave(&cmd->cmd_lists->cmd_list_lock, flags);
311 TRACE_DBG("Adding cmd %p to active cmd list", cmd);
312 if (unlikely(cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE))
313 list_add(&cmd->cmd_list_entry,
314 &cmd->cmd_lists->active_cmd_list);
316 list_add_tail(&cmd->cmd_list_entry,
317 &cmd->cmd_lists->active_cmd_list);
318 wake_up(&cmd->cmd_lists->cmd_list_waitQ);
319 spin_unlock_irqrestore(&cmd->cmd_lists->cmd_list_lock, flags);
327 EXPORT_SYMBOL(scst_cmd_init_done);
329 static int scst_pre_parse(struct scst_cmd *cmd)
331 int res = SCST_CMD_STATE_RES_CONT_SAME;
332 struct scst_device *dev = cmd->dev;
337 cmd->inc_expected_sn_on_done = dev->handler->exec_sync ||
338 (!dev->has_own_order_mgmt &&
339 (dev->queue_alg == SCST_CONTR_MODE_QUEUE_ALG_RESTRICTED_REORDER ||
340 cmd->queue_type == SCST_CMD_QUEUE_ORDERED));
343 * Expected transfer data supplied by the SCSI transport via the
344 * target driver are untrusted, so we prefer to fetch them from CDB.
345 * Additionally, not all transports support supplying the expected
349 rc = scst_get_cdb_info(cmd);
350 if (unlikely(rc != 0)) {
352 PRINT_BUFFER("Failed CDB", cmd->cdb, cmd->cdb_len);
355 PRINT_ERROR("Unknown opcode 0x%02x for %s. "
356 "Should you update scst_scsi_op_table?",
357 cmd->cdb[0], dev->handler->name);
358 PRINT_BUFFER("Failed CDB", cmd->cdb, cmd->cdb_len);
359 #ifdef CONFIG_SCST_USE_EXPECTED_VALUES
360 if (scst_cmd_is_expected_set(cmd)) {
361 TRACE(TRACE_SCSI, "Using initiator supplied values: "
362 "direction %d, transfer_len %d",
363 cmd->expected_data_direction,
364 cmd->expected_transfer_len);
365 cmd->data_direction = cmd->expected_data_direction;
367 cmd->bufflen = cmd->expected_transfer_len;
368 /* Restore (likely) lost CDB length */
369 cmd->cdb_len = scst_get_cdb_len(cmd->cdb);
370 if (cmd->cdb_len == -1) {
371 PRINT_ERROR("Unable to get CDB length for "
372 "opcode 0x%02x. Returning INVALID "
373 "OPCODE", cmd->cdb[0]);
374 scst_set_cmd_error(cmd,
375 SCST_LOAD_SENSE(scst_sense_invalid_opcode));
379 PRINT_ERROR("Unknown opcode 0x%02x for %s and "
380 "target %s not supplied expected values",
381 cmd->cdb[0], dev->handler->name, cmd->tgtt->name);
382 scst_set_cmd_error(cmd,
383 SCST_LOAD_SENSE(scst_sense_invalid_opcode));
387 scst_set_cmd_error(cmd,
388 SCST_LOAD_SENSE(scst_sense_invalid_opcode));
392 TRACE(TRACE_SCSI, "op_name <%s> (cmd %p), direction=%d "
393 "(expected %d, set %s), transfer_len=%d (expected "
394 "len %d), flags=%d", cmd->op_name, cmd,
395 cmd->data_direction, cmd->expected_data_direction,
396 scst_cmd_is_expected_set(cmd) ? "yes" : "no",
397 cmd->bufflen, cmd->expected_transfer_len,
400 if (unlikely((cmd->op_flags & SCST_UNKNOWN_LENGTH) != 0)) {
401 if (scst_cmd_is_expected_set(cmd)) {
403 * Command data length can't be easily
404 * determined from the CDB. ToDo, all such
405 * commands processing should be fixed. Until
406 * it's done, get the length from the supplied
407 * expected value, but limit it to some
408 * reasonable value (15MB).
410 cmd->bufflen = min(cmd->expected_transfer_len,
412 cmd->op_flags &= ~SCST_UNKNOWN_LENGTH;
418 if (unlikely(cmd->cdb[cmd->cdb_len - 1] & CONTROL_BYTE_NACA_BIT)) {
419 PRINT_ERROR("NACA bit in control byte CDB is not supported "
420 "(opcode 0x%02x)", cmd->cdb[0]);
421 scst_set_cmd_error(cmd,
422 SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
426 if (unlikely(cmd->cdb[cmd->cdb_len - 1] & CONTROL_BYTE_LINK_BIT)) {
427 PRINT_ERROR("Linked commands are not supported "
428 "(opcode 0x%02x)", cmd->cdb[0]);
429 scst_set_cmd_error(cmd,
430 SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
434 cmd->state = SCST_CMD_STATE_DEV_PARSE;
441 scst_set_cmd_abnormal_done_state(cmd);
442 res = SCST_CMD_STATE_RES_CONT_SAME;
446 #ifndef CONFIG_SCST_USE_EXPECTED_VALUES
447 static bool scst_is_allowed_to_mismatch_cmd(struct scst_cmd *cmd)
451 switch (cmd->cdb[0]) {
452 case TEST_UNIT_READY:
453 /* Crazy VMware people sometimes do TUR with READ direction */
460 /* VERIFY commands with BYTCHK unset shouldn't fail here */
461 if ((cmd->op_flags & SCST_VERIFY_BYTCHK_MISMATCH_ALLOWED) &&
462 (cmd->cdb[1] & BYTCHK) == 0)
471 static int scst_parse_cmd(struct scst_cmd *cmd)
473 int res = SCST_CMD_STATE_RES_CONT_SAME;
475 struct scst_device *dev = cmd->dev;
476 int orig_bufflen = cmd->bufflen;
480 if (likely(!scst_is_cmd_local(cmd))) {
481 if (unlikely(!dev->handler->parse_atomic &&
482 scst_cmd_atomic(cmd))) {
484 * It shouldn't be because of SCST_TGT_DEV_AFTER_*
487 TRACE_DBG("Dev handler %s parse() needs thread "
488 "context, rescheduling", dev->handler->name);
489 res = SCST_CMD_STATE_RES_NEED_THREAD;
493 TRACE_DBG("Calling dev handler %s parse(%p)",
494 dev->handler->name, cmd);
495 TRACE_BUFF_FLAG(TRACE_SND_BOT, "Parsing: ",
496 cmd->cdb, cmd->cdb_len);
497 state = dev->handler->parse(cmd);
498 /* Caution: cmd can be already dead here */
499 TRACE_DBG("Dev handler %s parse() returned %d",
500 dev->handler->name, state);
503 case SCST_CMD_STATE_NEED_THREAD_CTX:
504 TRACE_DBG("Dev handler %s parse() requested thread "
505 "context, rescheduling", dev->handler->name);
506 res = SCST_CMD_STATE_RES_NEED_THREAD;
509 case SCST_CMD_STATE_STOP:
510 TRACE_DBG("Dev handler %s parse() requested stop "
511 "processing", dev->handler->name);
512 res = SCST_CMD_STATE_RES_CONT_NEXT;
516 if (state == SCST_CMD_STATE_DEFAULT)
517 state = SCST_CMD_STATE_PREPARE_SPACE;
519 state = SCST_CMD_STATE_PREPARE_SPACE;
521 if (cmd->data_len == -1)
522 cmd->data_len = cmd->bufflen;
524 if (cmd->dh_data_buf_alloced &&
525 unlikely((orig_bufflen > cmd->bufflen))) {
526 PRINT_ERROR("Dev handler supplied data buffer (size %d), "
527 "is less, than required (size %d)", cmd->bufflen,
529 PRINT_BUFFER("Failed CDB", cmd->cdb, cmd->cdb_len);
533 if (unlikely(state == SCST_CMD_STATE_PRE_XMIT_RESP))
536 if (unlikely((cmd->bufflen == 0) &&
537 (cmd->op_flags & SCST_UNKNOWN_LENGTH))) {
538 PRINT_ERROR("Unknown data transfer length for opcode 0x%x "
539 "(handler %s, target %s)", cmd->cdb[0],
540 dev->handler->name, cmd->tgtt->name);
541 PRINT_BUFFER("Failed CDB", cmd->cdb, cmd->cdb_len);
545 #ifdef CONFIG_SCST_EXTRACHECKS
546 if ((cmd->bufflen != 0) &&
547 ((cmd->data_direction == SCST_DATA_NONE) ||
548 ((cmd->sg == NULL) && (state > SCST_CMD_STATE_PREPARE_SPACE)))) {
549 PRINT_ERROR("Dev handler %s parse() returned "
550 "invalid cmd data_direction %d, bufflen %d, state %d "
551 "or sg %p (opcode 0x%x)", dev->handler->name,
552 cmd->data_direction, cmd->bufflen, state, cmd->sg,
554 PRINT_BUFFER("Failed CDB", cmd->cdb, cmd->cdb_len);
559 if (scst_cmd_is_expected_set(cmd)) {
560 #ifdef CONFIG_SCST_USE_EXPECTED_VALUES
561 # ifdef CONFIG_SCST_EXTRACHECKS
562 if ((cmd->data_direction != cmd->expected_data_direction) ||
563 (cmd->bufflen != cmd->expected_transfer_len)) {
564 PRINT_WARNING("Expected values don't match decoded "
565 "ones: data_direction %d, "
566 "expected_data_direction %d, "
567 "bufflen %d, expected_transfer_len %d",
569 cmd->expected_data_direction,
570 cmd->bufflen, cmd->expected_transfer_len);
571 PRINT_BUFFER("Suspicious CDB", cmd->cdb, cmd->cdb_len);
574 cmd->data_direction = cmd->expected_data_direction;
575 cmd->bufflen = cmd->expected_transfer_len;
577 if (unlikely(cmd->data_direction !=
578 cmd->expected_data_direction)) {
579 if (((cmd->expected_data_direction != SCST_DATA_NONE) ||
580 (cmd->bufflen != 0)) &&
581 !scst_is_allowed_to_mismatch_cmd(cmd)) {
582 PRINT_ERROR("Expected data direction %d for "
583 "opcode 0x%02x (handler %s, target %s) "
586 cmd->expected_data_direction,
587 cmd->cdb[0], dev->handler->name,
588 cmd->tgtt->name, cmd->data_direction);
589 PRINT_BUFFER("Failed CDB",
590 cmd->cdb, cmd->cdb_len);
591 scst_set_cmd_error(cmd,
592 SCST_LOAD_SENSE(scst_sense_invalid_message));
596 if (unlikely(cmd->bufflen != cmd->expected_transfer_len)) {
597 TRACE(TRACE_MGMT_MINOR, "Warning: expected "
598 "transfer length %d for opcode 0x%02x "
599 "(handler %s, target %s) doesn't match "
600 "decoded value %d. Faulty initiator "
601 "(e.g. VMware is known to be such) or "
602 "scst_scsi_op_table should be updated?",
603 cmd->expected_transfer_len, cmd->cdb[0],
604 dev->handler->name, cmd->tgtt->name,
606 PRINT_BUFF_FLAG(TRACE_MGMT_MINOR, "Suspicious CDB",
607 cmd->cdb, cmd->cdb_len);
612 if (unlikely(cmd->data_direction == SCST_DATA_UNKNOWN)) {
613 PRINT_ERROR("Unknown data direction. Opcode 0x%x, handler %s, "
614 "target %s", cmd->cdb[0], dev->handler->name,
616 PRINT_BUFFER("Failed CDB", cmd->cdb, cmd->cdb_len);
622 case SCST_CMD_STATE_PREPARE_SPACE:
623 case SCST_CMD_STATE_PRE_PARSE:
624 case SCST_CMD_STATE_DEV_PARSE:
625 case SCST_CMD_STATE_RDY_TO_XFER:
626 case SCST_CMD_STATE_TGT_PRE_EXEC:
627 case SCST_CMD_STATE_SEND_FOR_EXEC:
628 case SCST_CMD_STATE_LOCAL_EXEC:
629 case SCST_CMD_STATE_REAL_EXEC:
630 case SCST_CMD_STATE_PRE_DEV_DONE:
631 case SCST_CMD_STATE_DEV_DONE:
632 case SCST_CMD_STATE_PRE_XMIT_RESP:
633 case SCST_CMD_STATE_XMIT_RESP:
634 case SCST_CMD_STATE_FINISHED:
636 res = SCST_CMD_STATE_RES_CONT_SAME;
641 PRINT_ERROR("Dev handler %s parse() returned "
642 "invalid cmd state %d (opcode %d)",
643 dev->handler->name, state, cmd->cdb[0]);
645 PRINT_ERROR("Dev handler %s parse() returned "
646 "error %d (opcode %d)", dev->handler->name,
652 if (cmd->resp_data_len == -1) {
653 if (cmd->data_direction == SCST_DATA_READ)
654 cmd->resp_data_len = cmd->bufflen;
656 cmd->resp_data_len = 0;
660 TRACE_EXIT_HRES(res);
664 /* dev_done() will be called as part of the regular cmd's finish */
665 scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
667 #ifndef CONFIG_SCST_USE_EXPECTED_VALUES
670 cmd->state = SCST_CMD_STATE_PRE_DEV_DONE;
671 res = SCST_CMD_STATE_RES_CONT_SAME;
675 static int scst_prepare_space(struct scst_cmd *cmd)
677 int r = 0, res = SCST_CMD_STATE_RES_CONT_SAME;
681 if (cmd->data_direction == SCST_DATA_NONE)
684 if (cmd->tgt_need_alloc_data_buf) {
685 int orig_bufflen = cmd->bufflen;
687 TRACE_MEM("Custom tgt data buf allocation requested (cmd %p)",
690 r = cmd->tgtt->alloc_data_buf(cmd);
694 if (unlikely(cmd->bufflen == 0)) {
695 /* See comment in scst_alloc_space() */
700 cmd->tgt_data_buf_alloced = 1;
702 if (unlikely(orig_bufflen < cmd->bufflen)) {
703 PRINT_ERROR("Target driver allocated data "
704 "buffer (size %d), is less, than "
705 "required (size %d)", orig_bufflen,
709 TRACE_MEM("tgt_data_buf_alloced (cmd %p)", cmd);
715 if (!cmd->tgt_data_buf_alloced && !cmd->dh_data_buf_alloced) {
716 r = scst_alloc_space(cmd);
717 cmd->tgt_sg = cmd->sg;
718 cmd->tgt_sg_cnt = cmd->sg_cnt;
719 } else if (cmd->dh_data_buf_alloced && !cmd->tgt_data_buf_alloced) {
720 TRACE_MEM("dh_data_buf_alloced set (cmd %p)", cmd);
721 cmd->tgt_sg = cmd->sg;
722 cmd->tgt_sg_cnt = cmd->sg_cnt;
724 } else if (cmd->tgt_data_buf_alloced && !cmd->dh_data_buf_alloced) {
725 TRACE_MEM("tgt_data_buf_alloced set (cmd %p)", cmd);
726 cmd->sg = cmd->tgt_sg;
727 cmd->sg_cnt = cmd->tgt_sg_cnt;
730 TRACE_MEM("Both *_data_buf_alloced set (cmd %p, sg %p, "
731 "sg_cnt %d, tgt_sg %p, tgt_sg_cnt %d)", cmd, cmd->sg,
732 cmd->sg_cnt, cmd->tgt_sg, cmd->tgt_sg_cnt);
738 if (scst_cmd_atomic(cmd)) {
739 TRACE_MEM("%s", "Atomic memory allocation failed, "
740 "rescheduling to the thread");
741 res = SCST_CMD_STATE_RES_NEED_THREAD;
748 if (cmd->preprocessing_only) {
749 if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
750 TRACE_MGMT_DBG("ABORTED set, returning ABORTED for "
752 scst_set_cmd_abnormal_done_state(cmd);
753 res = SCST_CMD_STATE_RES_CONT_SAME;
757 res = SCST_CMD_STATE_RES_CONT_NEXT;
758 cmd->state = SCST_CMD_STATE_PREPROCESS_DONE;
760 TRACE_DBG("Calling preprocessing_done(cmd %p)", cmd);
761 cmd->tgtt->preprocessing_done(cmd);
762 TRACE_DBG("%s", "preprocessing_done() returned");
767 switch (cmd->data_direction) {
768 case SCST_DATA_WRITE:
769 cmd->state = SCST_CMD_STATE_RDY_TO_XFER;
773 cmd->state = SCST_CMD_STATE_TGT_PRE_EXEC;
778 TRACE_EXIT_HRES(res);
782 TRACE(TRACE_OUT_OF_MEM, "Unable to allocate or build requested buffer "
783 "(size %d), sending BUSY or QUEUE FULL status", cmd->bufflen);
785 scst_set_cmd_abnormal_done_state(cmd);
786 res = SCST_CMD_STATE_RES_CONT_SAME;
790 scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
791 scst_set_cmd_abnormal_done_state(cmd);
792 res = SCST_CMD_STATE_RES_CONT_SAME;
796 void scst_restart_cmd(struct scst_cmd *cmd, int status,
797 enum scst_exec_context pref_context)
801 TRACE_DBG("Preferred context: %d", pref_context);
802 TRACE_DBG("tag=%llu, status=%#x",
803 (long long unsigned int)scst_cmd_get_tag(cmd),
806 #ifdef CONFIG_SCST_EXTRACHECKS
807 if ((in_irq() || irqs_disabled()) &&
808 ((pref_context == SCST_CONTEXT_DIRECT) ||
809 (pref_context == SCST_CONTEXT_DIRECT_ATOMIC))) {
810 PRINT_ERROR("Wrong context %d in IRQ from target %s, use "
811 "SCST_CONTEXT_THREAD instead\n", pref_context,
813 pref_context = SCST_CONTEXT_THREAD;
818 case SCST_PREPROCESS_STATUS_SUCCESS:
819 switch (cmd->data_direction) {
820 case SCST_DATA_WRITE:
821 cmd->state = SCST_CMD_STATE_RDY_TO_XFER;
824 cmd->state = SCST_CMD_STATE_TGT_PRE_EXEC;
827 if (cmd->set_sn_on_restart_cmd)
828 scst_cmd_set_sn(cmd);
829 /* Small context optimization */
830 if ((pref_context == SCST_CONTEXT_TASKLET) ||
831 (pref_context == SCST_CONTEXT_DIRECT_ATOMIC) ||
832 ((pref_context == SCST_CONTEXT_SAME) &&
833 scst_cmd_atomic(cmd))) {
834 if (cmd->data_direction == SCST_DATA_WRITE) {
835 if (!test_bit(SCST_TGT_DEV_AFTER_RESTART_WR_ATOMIC,
836 &cmd->tgt_dev->tgt_dev_flags))
837 pref_context = SCST_CONTEXT_THREAD;
839 if (!test_bit(SCST_TGT_DEV_AFTER_RESTART_OTH_ATOMIC,
840 &cmd->tgt_dev->tgt_dev_flags))
841 pref_context = SCST_CONTEXT_THREAD;
846 case SCST_PREPROCESS_STATUS_ERROR_SENSE_SET:
847 scst_set_cmd_abnormal_done_state(cmd);
850 case SCST_PREPROCESS_STATUS_ERROR_FATAL:
851 set_bit(SCST_CMD_NO_RESP, &cmd->cmd_flags);
853 case SCST_PREPROCESS_STATUS_ERROR:
854 scst_set_cmd_error(cmd,
855 SCST_LOAD_SENSE(scst_sense_hardw_error));
856 scst_set_cmd_abnormal_done_state(cmd);
860 PRINT_ERROR("%s() received unknown status %x", __func__,
862 scst_set_cmd_abnormal_done_state(cmd);
866 scst_proccess_redirect_cmd(cmd, pref_context, 1);
871 EXPORT_SYMBOL(scst_restart_cmd);
874 static int scst_queue_retry_cmd(struct scst_cmd *cmd, int finished_cmds)
876 struct scst_tgt *tgt = cmd->sess->tgt;
882 spin_lock_irqsave(&tgt->tgt_lock, flags);
885 TRACE_RETRY("TGT QUEUE FULL: incrementing retry_cmds %d",
887 if (finished_cmds != atomic_read(&tgt->finished_cmds)) {
888 /* At least one cmd finished, so try again */
890 TRACE_RETRY("Some command(s) finished, direct retry "
891 "(finished_cmds=%d, tgt->finished_cmds=%d, "
892 "retry_cmds=%d)", finished_cmds,
893 atomic_read(&tgt->finished_cmds), tgt->retry_cmds);
898 TRACE_RETRY("Adding cmd %p to retry cmd list", cmd);
899 list_add_tail(&cmd->cmd_list_entry, &tgt->retry_cmd_list);
901 if (!tgt->retry_timer_active) {
902 tgt->retry_timer.expires = jiffies + SCST_TGT_RETRY_TIMEOUT;
903 add_timer(&tgt->retry_timer);
904 tgt->retry_timer_active = 1;
908 spin_unlock_irqrestore(&tgt->tgt_lock, flags);
914 static int scst_rdy_to_xfer(struct scst_cmd *cmd)
920 if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
921 TRACE_MGMT_DBG("ABORTED set, aborting cmd %p", cmd);
925 if ((cmd->tgtt->rdy_to_xfer == NULL) || unlikely(cmd->internal)) {
926 cmd->state = SCST_CMD_STATE_TGT_PRE_EXEC;
927 res = SCST_CMD_STATE_RES_CONT_SAME;
931 if (unlikely(!cmd->tgtt->rdy_to_xfer_atomic && scst_cmd_atomic(cmd))) {
933 * It shouldn't be because of SCST_TGT_DEV_AFTER_*
936 TRACE_DBG("Target driver %s rdy_to_xfer() needs thread "
937 "context, rescheduling", cmd->tgtt->name);
938 res = SCST_CMD_STATE_RES_NEED_THREAD;
943 int finished_cmds = atomic_read(&cmd->sess->tgt->finished_cmds);
945 res = SCST_CMD_STATE_RES_CONT_NEXT;
946 cmd->state = SCST_CMD_STATE_DATA_WAIT;
948 TRACE_DBG("Calling rdy_to_xfer(%p)", cmd);
949 #ifdef CONFIG_SCST_DEBUG_RETRY
950 if (((scst_random() % 100) == 75))
951 rc = SCST_TGT_RES_QUEUE_FULL;
954 rc = cmd->tgtt->rdy_to_xfer(cmd);
955 TRACE_DBG("rdy_to_xfer() returned %d", rc);
957 if (likely(rc == SCST_TGT_RES_SUCCESS))
960 /* Restore the previous state */
961 cmd->state = SCST_CMD_STATE_RDY_TO_XFER;
964 case SCST_TGT_RES_QUEUE_FULL:
965 if (scst_queue_retry_cmd(cmd, finished_cmds) == 0)
970 case SCST_TGT_RES_NEED_THREAD_CTX:
971 TRACE_DBG("Target driver %s "
972 "rdy_to_xfer() requested thread "
973 "context, rescheduling", cmd->tgtt->name);
974 res = SCST_CMD_STATE_RES_NEED_THREAD;
984 TRACE_EXIT_HRES(res);
988 if (rc == SCST_TGT_RES_FATAL_ERROR) {
989 PRINT_ERROR("Target driver %s rdy_to_xfer() returned "
990 "fatal error", cmd->tgtt->name);
992 PRINT_ERROR("Target driver %s rdy_to_xfer() returned invalid "
993 "value %d", cmd->tgtt->name, rc);
995 scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
998 scst_set_cmd_abnormal_done_state(cmd);
999 res = SCST_CMD_STATE_RES_CONT_SAME;
1003 /* No locks, but might be in IRQ */
1004 void scst_proccess_redirect_cmd(struct scst_cmd *cmd,
1005 enum scst_exec_context context, int check_retries)
1007 unsigned long flags;
1011 TRACE_DBG("Context: %x", context);
1013 if (context == SCST_CONTEXT_SAME)
1014 context = scst_cmd_atomic(cmd) ? SCST_CONTEXT_DIRECT_ATOMIC :
1015 SCST_CONTEXT_DIRECT;
1018 case SCST_CONTEXT_DIRECT_ATOMIC:
1019 scst_process_active_cmd(cmd, true);
1022 case SCST_CONTEXT_DIRECT:
1024 scst_check_retries(cmd->tgt);
1025 scst_process_active_cmd(cmd, false);
1029 PRINT_ERROR("Context %x is unknown, using the thread one",
1032 case SCST_CONTEXT_THREAD:
1034 scst_check_retries(cmd->tgt);
1035 spin_lock_irqsave(&cmd->cmd_lists->cmd_list_lock, flags);
1036 TRACE_DBG("Adding cmd %p to active cmd list", cmd);
1037 if (unlikely(cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE))
1038 list_add(&cmd->cmd_list_entry,
1039 &cmd->cmd_lists->active_cmd_list);
1041 list_add_tail(&cmd->cmd_list_entry,
1042 &cmd->cmd_lists->active_cmd_list);
1043 wake_up(&cmd->cmd_lists->cmd_list_waitQ);
1044 spin_unlock_irqrestore(&cmd->cmd_lists->cmd_list_lock, flags);
1047 case SCST_CONTEXT_TASKLET:
1049 scst_check_retries(cmd->tgt);
1050 scst_schedule_tasklet(cmd);
1058 void scst_rx_data(struct scst_cmd *cmd, int status,
1059 enum scst_exec_context pref_context)
1063 TRACE_DBG("Preferred context: %d", pref_context);
1064 TRACE(TRACE_SCSI, "cmd %p, status %#x", cmd, status);
1066 #ifdef CONFIG_SCST_EXTRACHECKS
1067 if ((in_irq() || irqs_disabled()) &&
1068 ((pref_context == SCST_CONTEXT_DIRECT) ||
1069 (pref_context == SCST_CONTEXT_DIRECT_ATOMIC))) {
1070 PRINT_ERROR("Wrong context %d in IRQ from target %s, use "
1071 "SCST_CONTEXT_THREAD instead\n", pref_context,
1073 pref_context = SCST_CONTEXT_THREAD;
1078 case SCST_RX_STATUS_SUCCESS:
1079 #if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
1082 struct scatterlist *sg = cmd->tgt_sg;
1083 TRACE_RECV_BOT("RX data for cmd %p "
1084 "(sg_cnt %d, sg %p, sg[0].page %p)", cmd,
1085 cmd->tgt_sg_cnt, sg, (void *)sg_page(&sg[0]));
1086 for (i = 0; i < cmd->tgt_sg_cnt; ++i) {
1087 PRINT_BUFF_FLAG(TRACE_RCV_BOT, "RX sg",
1088 sg_virt(&sg[i]), sg[i].length);
1092 cmd->state = SCST_CMD_STATE_TGT_PRE_EXEC;
1093 /* Small context optimization */
1094 if ((pref_context == SCST_CONTEXT_TASKLET) ||
1095 (pref_context == SCST_CONTEXT_DIRECT_ATOMIC) ||
1096 ((pref_context == SCST_CONTEXT_SAME) &&
1097 scst_cmd_atomic(cmd))) {
1098 if (!test_bit(SCST_TGT_DEV_AFTER_RX_DATA_ATOMIC,
1099 &cmd->tgt_dev->tgt_dev_flags))
1100 pref_context = SCST_CONTEXT_THREAD;
1104 case SCST_RX_STATUS_ERROR_SENSE_SET:
1105 scst_set_cmd_abnormal_done_state(cmd);
1108 case SCST_RX_STATUS_ERROR_FATAL:
1109 set_bit(SCST_CMD_NO_RESP, &cmd->cmd_flags);
1111 case SCST_RX_STATUS_ERROR:
1112 scst_set_cmd_error(cmd,
1113 SCST_LOAD_SENSE(scst_sense_hardw_error));
1114 scst_set_cmd_abnormal_done_state(cmd);
1118 PRINT_ERROR("scst_rx_data() received unknown status %x",
1120 scst_set_cmd_abnormal_done_state(cmd);
1124 scst_proccess_redirect_cmd(cmd, pref_context, 1);
1129 EXPORT_SYMBOL(scst_rx_data);
1131 static int scst_tgt_pre_exec(struct scst_cmd *cmd)
1133 int res = SCST_CMD_STATE_RES_CONT_SAME, rc;
1137 cmd->state = SCST_CMD_STATE_SEND_FOR_EXEC;
1139 if ((cmd->tgtt->pre_exec == NULL) || unlikely(cmd->internal))
1142 TRACE_DBG("Calling pre_exec(%p)", cmd);
1143 rc = cmd->tgtt->pre_exec(cmd);
1144 TRACE_DBG("pre_exec() returned %d", rc);
1146 if (unlikely(rc != SCST_PREPROCESS_STATUS_SUCCESS)) {
1148 case SCST_PREPROCESS_STATUS_ERROR_SENSE_SET:
1149 scst_set_cmd_abnormal_done_state(cmd);
1151 case SCST_PREPROCESS_STATUS_ERROR_FATAL:
1152 set_bit(SCST_CMD_NO_RESP, &cmd->cmd_flags);
1154 case SCST_PREPROCESS_STATUS_ERROR:
1155 scst_set_cmd_error(cmd,
1156 SCST_LOAD_SENSE(scst_sense_hardw_error));
1157 scst_set_cmd_abnormal_done_state(cmd);
1159 case SCST_PREPROCESS_STATUS_NEED_THREAD:
1160 TRACE_DBG("Target driver's %s pre_exec() requested "
1161 "thread context, rescheduling",
1163 res = SCST_CMD_STATE_RES_NEED_THREAD;
1164 cmd->state = SCST_CMD_STATE_TGT_PRE_EXEC;
1173 TRACE_EXIT_RES(res);
1177 static void scst_do_cmd_done(struct scst_cmd *cmd, int result,
1178 const uint8_t *rq_sense, int rq_sense_len, int resid)
1182 #ifdef CONFIG_SCST_MEASURE_LATENCY
1185 getnstimeofday(&ts);
1186 cmd->post_exec_start = scst_sec_to_nsec(ts.tv_sec) + ts.tv_nsec;
1187 TRACE_DBG("cmd %p (sess %p): post_exec_start %lld (tv_sec %ld, "
1188 "tv_nsec %ld)", cmd, cmd->sess, cmd->post_exec_start,
1189 ts.tv_sec, ts.tv_nsec);
1193 cmd->status = result & 0xff;
1194 cmd->msg_status = msg_byte(result);
1195 cmd->host_status = host_byte(result);
1196 cmd->driver_status = driver_byte(result);
1197 if (unlikely(resid != 0)) {
1198 #ifdef CONFIG_SCST_EXTRACHECKS
1199 if ((resid < 0) || (resid > cmd->resp_data_len)) {
1200 PRINT_ERROR("Wrong resid %d (cmd->resp_data_len=%d, "
1201 "op %x)", resid, cmd->resp_data_len,
1205 scst_set_resp_data_len(cmd, cmd->resp_data_len - resid);
1208 if (unlikely(cmd->status == SAM_STAT_CHECK_CONDITION)) {
1209 /* We might have double reset UA here */
1210 cmd->dbl_ua_orig_resp_data_len = cmd->resp_data_len;
1211 cmd->dbl_ua_orig_data_direction = cmd->data_direction;
1213 scst_alloc_set_sense(cmd, 1, rq_sense, rq_sense_len);
1216 TRACE(TRACE_SCSI, "cmd %p, result=%x, cmd->status=%x, resid=%d, "
1217 "cmd->msg_status=%x, cmd->host_status=%x, "
1218 "cmd->driver_status=%x (cmd %p)", cmd, result, cmd->status, resid,
1219 cmd->msg_status, cmd->host_status, cmd->driver_status, cmd);
1227 /* For small context optimization */
1228 static inline enum scst_exec_context scst_optimize_post_exec_context(
1229 struct scst_cmd *cmd, enum scst_exec_context context)
1231 if (((context == SCST_CONTEXT_SAME) && scst_cmd_atomic(cmd)) ||
1232 (context == SCST_CONTEXT_TASKLET) ||
1233 (context == SCST_CONTEXT_DIRECT_ATOMIC)) {
1234 if (!test_bit(SCST_TGT_DEV_AFTER_EXEC_ATOMIC,
1235 &cmd->tgt_dev->tgt_dev_flags))
1236 context = SCST_CONTEXT_THREAD;
1241 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
1242 static inline struct scst_cmd *scst_get_cmd(struct scsi_cmnd *scsi_cmd,
1243 struct scsi_request **req)
1245 struct scst_cmd *cmd = NULL;
1247 if (scsi_cmd && (*req = scsi_cmd->sc_request))
1248 cmd = (struct scst_cmd *)(*req)->upper_private_data;
1251 PRINT_ERROR("%s", "Request with NULL cmd");
1253 scsi_release_request(*req);
1259 static void scst_cmd_done(struct scsi_cmnd *scsi_cmd)
1261 struct scsi_request *req = NULL;
1262 struct scst_cmd *cmd;
1266 cmd = scst_get_cmd(scsi_cmd, &req);
1270 scst_do_cmd_done(cmd, req->sr_result, req->sr_sense_buffer,
1271 sizeof(req->sr_sense_buffer), scsi_cmd->resid);
1273 /* Clear out request structure */
1275 req->sr_sglist_len = 0;
1276 req->sr_bufflen = 0;
1277 req->sr_buffer = NULL;
1278 req->sr_underflow = 0;
1279 req->sr_request->rq_disk = NULL; /* disown request blk */
1281 scst_release_request(cmd);
1283 cmd->state = SCST_CMD_STATE_PRE_DEV_DONE;
1285 scst_proccess_redirect_cmd(cmd,
1286 scst_optimize_post_exec_context(cmd, scst_estimate_context()),
1293 #else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18) */
1294 static void scst_cmd_done(void *data, char *sense, int result, int resid)
1296 struct scst_cmd *cmd;
1300 cmd = (struct scst_cmd *)data;
1304 scst_do_cmd_done(cmd, result, sense, SCST_SENSE_BUFFERSIZE, resid);
1306 cmd->state = SCST_CMD_STATE_PRE_DEV_DONE;
1308 scst_proccess_redirect_cmd(cmd,
1309 scst_optimize_post_exec_context(cmd, scst_estimate_context()),
1316 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18) */
1318 static void scst_cmd_done_local(struct scst_cmd *cmd, int next_state,
1319 enum scst_exec_context pref_context)
1323 #ifdef CONFIG_SCST_MEASURE_LATENCY
1326 getnstimeofday(&ts);
1327 cmd->post_exec_start = scst_sec_to_nsec(ts.tv_sec) + ts.tv_nsec;
1328 TRACE_DBG("cmd %p (sess %p): post_exec_start %lld (tv_sec %ld, "
1329 "tv_nsec %ld)", cmd, cmd->sess, cmd->post_exec_start,
1330 ts.tv_sec, ts.tv_nsec);
1334 if (next_state == SCST_CMD_STATE_DEFAULT)
1335 next_state = SCST_CMD_STATE_PRE_DEV_DONE;
1337 #if defined(CONFIG_SCST_DEBUG)
1338 if (next_state == SCST_CMD_STATE_PRE_DEV_DONE) {
1341 struct scatterlist *sg = cmd->sg;
1342 TRACE_RECV_TOP("Exec'd %d S/G(s) at %p sg[0].page at "
1343 "%p", cmd->sg_cnt, sg, (void *)sg_page(&sg[0]));
1344 for (i = 0; i < cmd->sg_cnt; ++i) {
1345 TRACE_BUFF_FLAG(TRACE_RCV_TOP,
1346 "Exec'd sg", sg_virt(&sg[i]),
1353 cmd->state = next_state;
1355 #ifdef CONFIG_SCST_EXTRACHECKS
1356 if ((next_state != SCST_CMD_STATE_PRE_DEV_DONE) &&
1357 (next_state != SCST_CMD_STATE_PRE_XMIT_RESP) &&
1358 (next_state != SCST_CMD_STATE_FINISHED)) {
1359 PRINT_ERROR("%s() received invalid cmd state %d (opcode %d)",
1360 __func__, next_state, cmd->cdb[0]);
1361 scst_set_cmd_error(cmd,
1362 SCST_LOAD_SENSE(scst_sense_hardw_error));
1363 scst_set_cmd_abnormal_done_state(cmd);
1366 pref_context = scst_optimize_post_exec_context(cmd, pref_context);
1367 scst_proccess_redirect_cmd(cmd, pref_context, 0);
1373 static int scst_report_luns_local(struct scst_cmd *cmd)
1379 struct scst_tgt_dev *tgt_dev = NULL;
1381 int offs, overflow = 0;
1385 rc = scst_check_local_events(cmd);
1386 if (unlikely(rc != 0))
1390 cmd->msg_status = 0;
1391 cmd->host_status = DID_OK;
1392 cmd->driver_status = 0;
1394 if ((cmd->cdb[2] != 0) && (cmd->cdb[2] != 2)) {
1395 PRINT_ERROR("Unsupported SELECT REPORT value %x in REPORT "
1396 "LUNS command", cmd->cdb[2]);
1400 buffer_size = scst_get_buf_first(cmd, &buffer);
1401 if (unlikely(buffer_size == 0))
1403 else if (unlikely(buffer_size < 0))
1406 if (buffer_size < 16)
1409 memset(buffer, 0, buffer_size);
1412 /* sess->sess_tgt_dev_list_hash is protected by suspended activity */
1413 for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
1414 struct list_head *sess_tgt_dev_list_head =
1415 &cmd->sess->sess_tgt_dev_list_hash[i];
1416 list_for_each_entry(tgt_dev, sess_tgt_dev_list_head,
1417 sess_tgt_dev_list_entry) {
1419 if (offs >= buffer_size) {
1420 scst_put_buf(cmd, buffer);
1421 buffer_size = scst_get_buf_next(cmd,
1423 if (buffer_size > 0) {
1424 memset(buffer, 0, buffer_size);
1431 if ((buffer_size - offs) < 8) {
1432 PRINT_ERROR("Buffer allocated for "
1433 "REPORT LUNS command doesn't "
1434 "allow to fit 8 byte entry "
1437 goto out_put_hw_err;
1439 buffer[offs] = (tgt_dev->lun >> 8) & 0xff;
1440 buffer[offs+1] = tgt_dev->lun & 0xff;
1448 scst_put_buf(cmd, buffer);
1450 /* Set the response header */
1451 buffer_size = scst_get_buf_first(cmd, &buffer);
1452 if (unlikely(buffer_size == 0))
1454 else if (unlikely(buffer_size < 0))
1458 buffer[0] = (dev_cnt >> 24) & 0xff;
1459 buffer[1] = (dev_cnt >> 16) & 0xff;
1460 buffer[2] = (dev_cnt >> 8) & 0xff;
1461 buffer[3] = dev_cnt & 0xff;
1463 scst_put_buf(cmd, buffer);
1466 if (dev_cnt < cmd->resp_data_len)
1467 scst_set_resp_data_len(cmd, dev_cnt);
1473 /* Report the result */
1474 cmd->scst_cmd_done(cmd, SCST_CMD_STATE_DEFAULT, SCST_CONTEXT_SAME);
1477 return SCST_EXEC_COMPLETED;
1480 scst_put_buf(cmd, buffer);
1483 scst_set_cmd_error(cmd,
1484 SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
1488 scst_put_buf(cmd, buffer);
1489 scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
1493 static int scst_pre_select(struct scst_cmd *cmd)
1495 int res = SCST_EXEC_NOT_COMPLETED;
1499 if (scst_cmd_atomic(cmd)) {
1500 res = SCST_EXEC_NEED_THREAD;
1504 scst_block_dev_cmd(cmd, 1);
1506 /* Check for local events will be done when cmd will be executed */
1509 TRACE_EXIT_RES(res);
1513 static int scst_reserve_local(struct scst_cmd *cmd)
1515 int res = SCST_EXEC_NOT_COMPLETED, rc;
1516 struct scst_device *dev;
1517 struct scst_tgt_dev *tgt_dev_tmp;
1521 if (scst_cmd_atomic(cmd)) {
1522 res = SCST_EXEC_NEED_THREAD;
1526 if ((cmd->cdb[0] == RESERVE_10) && (cmd->cdb[2] & SCST_RES_3RDPTY)) {
1527 PRINT_ERROR("RESERVE_10: 3rdPty RESERVE not implemented "
1528 "(lun=%lld)", (long long unsigned int)cmd->lun);
1529 scst_set_cmd_error(cmd,
1530 SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
1536 if (dev->tst == SCST_CONTR_MODE_ONE_TASK_SET)
1537 scst_block_dev_cmd(cmd, 1);
1539 rc = scst_check_local_events(cmd);
1540 if (unlikely(rc != 0))
1543 spin_lock_bh(&dev->dev_lock);
1545 if (test_bit(SCST_TGT_DEV_RESERVED, &cmd->tgt_dev->tgt_dev_flags)) {
1546 spin_unlock_bh(&dev->dev_lock);
1547 scst_set_cmd_error_status(cmd, SAM_STAT_RESERVATION_CONFLICT);
1551 list_for_each_entry(tgt_dev_tmp, &dev->dev_tgt_dev_list,
1552 dev_tgt_dev_list_entry) {
1553 if (cmd->tgt_dev != tgt_dev_tmp)
1554 set_bit(SCST_TGT_DEV_RESERVED,
1555 &tgt_dev_tmp->tgt_dev_flags);
1557 dev->dev_reserved = 1;
1559 spin_unlock_bh(&dev->dev_lock);
1562 TRACE_EXIT_RES(res);
1566 /* Report the result */
1567 cmd->scst_cmd_done(cmd, SCST_CMD_STATE_DEFAULT, SCST_CONTEXT_SAME);
1568 res = SCST_EXEC_COMPLETED;
1572 static int scst_release_local(struct scst_cmd *cmd)
1574 int res = SCST_EXEC_NOT_COMPLETED, rc;
1575 struct scst_tgt_dev *tgt_dev_tmp;
1576 struct scst_device *dev;
1580 if (scst_cmd_atomic(cmd)) {
1581 res = SCST_EXEC_NEED_THREAD;
1587 if (dev->tst == SCST_CONTR_MODE_ONE_TASK_SET)
1588 scst_block_dev_cmd(cmd, 1);
1590 rc = scst_check_local_events(cmd);
1591 if (unlikely(rc != 0))
1594 spin_lock_bh(&dev->dev_lock);
1597 * The device could be RELEASED behind us, if RESERVING session
1598 * is closed (see scst_free_tgt_dev()), but this actually doesn't
1599 * matter, so use lock and no retest for DEV_RESERVED bits again
1601 if (test_bit(SCST_TGT_DEV_RESERVED, &cmd->tgt_dev->tgt_dev_flags)) {
1602 res = SCST_EXEC_COMPLETED;
1604 cmd->msg_status = 0;
1605 cmd->host_status = DID_OK;
1606 cmd->driver_status = 0;
1609 list_for_each_entry(tgt_dev_tmp,
1610 &dev->dev_tgt_dev_list,
1611 dev_tgt_dev_list_entry) {
1612 clear_bit(SCST_TGT_DEV_RESERVED,
1613 &tgt_dev_tmp->tgt_dev_flags);
1615 dev->dev_reserved = 0;
1618 spin_unlock_bh(&dev->dev_lock);
1620 if (res == SCST_EXEC_COMPLETED)
1624 TRACE_EXIT_RES(res);
1628 res = SCST_EXEC_COMPLETED;
1629 /* Report the result */
1630 cmd->scst_cmd_done(cmd, SCST_CMD_STATE_DEFAULT, SCST_CONTEXT_SAME);
1634 /* No locks, no IRQ or IRQ-safe context allowed */
1635 int scst_check_local_events(struct scst_cmd *cmd)
1638 struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
1639 struct scst_device *dev = cmd->dev;
1644 * There's no race here, because we need to trace commands sent
1645 * *after* dev_double_ua_possible flag was set.
1647 if (unlikely(dev->dev_double_ua_possible))
1648 cmd->double_ua_possible = 1;
1650 if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
1651 TRACE_MGMT_DBG("ABORTED set, aborting cmd %p", cmd);
1652 goto out_uncomplete;
1655 /* Reserve check before Unit Attention */
1656 if (unlikely(test_bit(SCST_TGT_DEV_RESERVED,
1657 &tgt_dev->tgt_dev_flags))) {
1658 if (cmd->cdb[0] != INQUIRY &&
1659 cmd->cdb[0] != REPORT_LUNS &&
1660 cmd->cdb[0] != RELEASE &&
1661 cmd->cdb[0] != RELEASE_10 &&
1662 cmd->cdb[0] != REPORT_DEVICE_IDENTIFIER &&
1663 (cmd->cdb[0] != ALLOW_MEDIUM_REMOVAL ||
1664 (cmd->cdb[4] & 3)) &&
1665 cmd->cdb[0] != LOG_SENSE &&
1666 cmd->cdb[0] != REQUEST_SENSE) {
1667 scst_set_cmd_error_status(cmd,
1668 SAM_STAT_RESERVATION_CONFLICT);
1673 /* If we had internal bus reset, set the command error unit attention */
1674 if ((dev->scsi_dev != NULL) &&
1675 unlikely(dev->scsi_dev->was_reset)) {
1676 if (scst_is_ua_command(cmd)) {
1679 * Prevent more than 1 cmd to be triggered by
1682 spin_lock_bh(&dev->dev_lock);
1683 barrier(); /* to reread was_reset */
1684 if (dev->scsi_dev->was_reset) {
1685 TRACE(TRACE_MGMT, "was_reset is %d", 1);
1686 scst_set_cmd_error(cmd,
1687 SCST_LOAD_SENSE(scst_sense_reset_UA));
1689 * It looks like it is safe to clear was_reset
1692 dev->scsi_dev->was_reset = 0;
1695 spin_unlock_bh(&dev->dev_lock);
1702 if (unlikely(test_bit(SCST_TGT_DEV_UA_PENDING,
1703 &cmd->tgt_dev->tgt_dev_flags))) {
1704 if (scst_is_ua_command(cmd)) {
1705 rc = scst_set_pending_UA(cmd);
1714 TRACE_EXIT_RES(res);
1719 sBUG_ON(!cmd->completed);
1726 EXPORT_SYMBOL(scst_check_local_events);
1729 void scst_inc_expected_sn(struct scst_tgt_dev *tgt_dev, atomic_t *slot)
1734 /* Optimized for lockless fast path */
1736 TRACE_SN("Slot %zd, *cur_sn_slot %d", slot - tgt_dev->sn_slots,
1739 if (!atomic_dec_and_test(slot))
1742 TRACE_SN("Slot is 0 (num_free_sn_slots=%d)",
1743 tgt_dev->num_free_sn_slots);
1744 if (tgt_dev->num_free_sn_slots < (int)ARRAY_SIZE(tgt_dev->sn_slots)-1) {
1745 spin_lock_irq(&tgt_dev->sn_lock);
1746 if (likely(tgt_dev->num_free_sn_slots < (int)ARRAY_SIZE(tgt_dev->sn_slots)-1)) {
1747 if (tgt_dev->num_free_sn_slots < 0)
1748 tgt_dev->cur_sn_slot = slot;
1750 * To be in-sync with SIMPLE case in scst_cmd_set_sn()
1753 tgt_dev->num_free_sn_slots++;
1754 TRACE_SN("Incremented num_free_sn_slots (%d)",
1755 tgt_dev->num_free_sn_slots);
1758 spin_unlock_irq(&tgt_dev->sn_lock);
1763 * No locks is needed, because only one thread at time can
1764 * be here (serialized by sn). Also it is supposed that there
1765 * could not be half-incremented halves.
1767 tgt_dev->expected_sn++;
1768 smp_mb(); /* write must be before def_cmd_count read */
1769 TRACE_SN("Next expected_sn: %ld", tgt_dev->expected_sn);
1776 static struct scst_cmd *scst_post_exec_sn(struct scst_cmd *cmd,
1779 /* For HQ commands SN is not set */
1780 bool inc_expected_sn = !cmd->inc_expected_sn_on_done &&
1781 cmd->sn_set && !cmd->retry;
1782 struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
1783 struct scst_cmd *res;
1787 if (inc_expected_sn)
1788 scst_inc_expected_sn(tgt_dev, cmd->sn_slot);
1791 scst_make_deferred_commands_active(tgt_dev);
1794 res = scst_check_deferred_commands(tgt_dev);
1796 TRACE_EXIT_HRES(res);
1800 /* cmd must be additionally referenced to not die inside */
1801 static int scst_do_real_exec(struct scst_cmd *cmd)
1803 int res = SCST_EXEC_NOT_COMPLETED;
1804 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 18)
1807 struct scst_device *dev = cmd->dev;
1808 struct scst_dev_type *handler = dev->handler;
1812 cmd->state = SCST_CMD_STATE_REAL_EXECUTING;
1814 if (handler->exec) {
1815 if (unlikely(!dev->handler->exec_atomic &&
1816 scst_cmd_atomic(cmd))) {
1818 * It shouldn't be because of SCST_TGT_DEV_AFTER_*
1821 TRACE_DBG("Dev handler %s exec() needs thread "
1822 "context, rescheduling", dev->handler->name);
1823 res = SCST_EXEC_NEED_THREAD;
1827 TRACE_DBG("Calling dev handler %s exec(%p)",
1828 handler->name, cmd);
1829 TRACE_BUFF_FLAG(TRACE_SND_TOP, "Execing: ", cmd->cdb,
1831 res = handler->exec(cmd);
1832 TRACE_DBG("Dev handler %s exec() returned %d",
1833 handler->name, res);
1835 if (res == SCST_EXEC_COMPLETED)
1837 else if (res == SCST_EXEC_NEED_THREAD)
1840 sBUG_ON(res != SCST_EXEC_NOT_COMPLETED);
1843 TRACE_DBG("Sending cmd %p to SCSI mid-level", cmd);
1845 if (unlikely(dev->scsi_dev == NULL)) {
1846 PRINT_ERROR("Command for virtual device must be "
1847 "processed by device handler (lun %lld)!",
1848 (long long unsigned int)cmd->lun);
1852 res = scst_check_local_events(cmd);
1853 if (unlikely(res != 0))
1856 #ifndef CONFIG_SCST_ALLOW_PASSTHROUGH_IO_SUBMIT_IN_SIRQ
1857 if (unlikely(scst_cmd_atomic(cmd))) {
1858 TRACE_DBG("Pass-through exec() can not be called in atomic "
1859 "context, rescheduling to the thread (handler %s)",
1861 res = SCST_EXEC_NEED_THREAD;
1866 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
1867 if (unlikely(scst_alloc_request(cmd) != 0)) {
1868 if (scst_cmd_atomic(cmd)) {
1869 res = SCST_EXEC_NEED_THREAD;
1872 PRINT_INFO("%s", "Unable to allocate request, "
1873 "sending BUSY status");
1878 scst_do_req(cmd->scsi_req, (void *)cmd->cdb,
1879 (void *)cmd->scsi_req->sr_buffer,
1880 cmd->scsi_req->sr_bufflen, scst_cmd_done, cmd->timeout,
1883 rc = scst_exec_req(dev->scsi_dev, cmd->cdb, cmd->cdb_len,
1884 cmd->data_direction, cmd->sg, cmd->bufflen, cmd->sg_cnt,
1885 cmd->timeout, cmd->retries, cmd, scst_cmd_done,
1886 scst_cmd_atomic(cmd) ? GFP_ATOMIC : GFP_KERNEL);
1887 if (unlikely(rc != 0)) {
1888 if (scst_cmd_atomic(cmd)) {
1889 res = SCST_EXEC_NEED_THREAD;
1892 PRINT_ERROR("scst_exec_req() failed: %d", res);
1899 res = SCST_EXEC_COMPLETED;
1906 /* Restore the state */
1907 cmd->state = SCST_CMD_STATE_REAL_EXEC;
1911 scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
1914 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
1921 res = SCST_EXEC_COMPLETED;
1922 /* Report the result */
1923 cmd->scst_cmd_done(cmd, SCST_CMD_STATE_DEFAULT, SCST_CONTEXT_SAME);
1927 static inline int scst_real_exec(struct scst_cmd *cmd)
1933 BUILD_BUG_ON(SCST_CMD_STATE_RES_CONT_SAME != SCST_EXEC_NOT_COMPLETED);
1934 BUILD_BUG_ON(SCST_CMD_STATE_RES_CONT_NEXT != SCST_EXEC_COMPLETED);
1935 BUILD_BUG_ON(SCST_CMD_STATE_RES_NEED_THREAD != SCST_EXEC_NEED_THREAD);
1937 __scst_cmd_get(cmd);
1939 res = scst_do_real_exec(cmd);
1941 if (likely(res == SCST_EXEC_COMPLETED)) {
1942 scst_post_exec_sn(cmd, true);
1943 if (cmd->dev->scsi_dev != NULL)
1944 generic_unplug_device(
1945 cmd->dev->scsi_dev->request_queue);
1947 sBUG_ON(res != SCST_EXEC_NEED_THREAD);
1949 __scst_cmd_put(cmd);
1951 /* SCST_EXEC_* match SCST_CMD_STATE_RES_* */
1953 TRACE_EXIT_RES(res);
1957 static int scst_do_local_exec(struct scst_cmd *cmd)
1960 struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
1964 /* Check READ_ONLY device status */
1965 if (((tgt_dev->acg_dev->rd_only_flag) || cmd->dev->swp) &&
1966 (cmd->cdb[0] == WRITE_6 || /* ToDo: full list of the modify cmds */
1967 cmd->cdb[0] == WRITE_10 ||
1968 cmd->cdb[0] == WRITE_12 ||
1969 cmd->cdb[0] == WRITE_16 ||
1970 cmd->cdb[0] == WRITE_VERIFY ||
1971 cmd->cdb[0] == WRITE_VERIFY_12 ||
1972 cmd->cdb[0] == WRITE_VERIFY_16 ||
1973 (cmd->dev->handler->type == TYPE_TAPE &&
1974 (cmd->cdb[0] == ERASE || cmd->cdb[0] == WRITE_FILEMARKS)))) {
1975 scst_set_cmd_error(cmd,
1976 SCST_LOAD_SENSE(scst_sense_data_protect));
1981 * Adding new commands here don't forget to update
1982 * scst_is_cmd_local() in scst.h, if necessary
1985 switch (cmd->cdb[0]) {
1987 case MODE_SELECT_10:
1989 res = scst_pre_select(cmd);
1993 res = scst_reserve_local(cmd);
1997 res = scst_release_local(cmd);
2000 res = scst_report_luns_local(cmd);
2003 res = SCST_EXEC_NOT_COMPLETED;
2008 TRACE_EXIT_RES(res);
2012 /* Report the result */
2013 cmd->scst_cmd_done(cmd, SCST_CMD_STATE_DEFAULT, SCST_CONTEXT_SAME);
2014 res = SCST_EXEC_COMPLETED;
2018 static int scst_local_exec(struct scst_cmd *cmd)
2024 BUILD_BUG_ON(SCST_CMD_STATE_RES_CONT_SAME != SCST_EXEC_NOT_COMPLETED);
2025 BUILD_BUG_ON(SCST_CMD_STATE_RES_CONT_NEXT != SCST_EXEC_COMPLETED);
2026 BUILD_BUG_ON(SCST_CMD_STATE_RES_NEED_THREAD != SCST_EXEC_NEED_THREAD);
2028 __scst_cmd_get(cmd);
2030 res = scst_do_local_exec(cmd);
2031 if (likely(res == SCST_EXEC_NOT_COMPLETED))
2032 cmd->state = SCST_CMD_STATE_REAL_EXEC;
2033 else if (res == SCST_EXEC_COMPLETED)
2034 scst_post_exec_sn(cmd, true);
2036 sBUG_ON(res != SCST_EXEC_NEED_THREAD);
2038 __scst_cmd_put(cmd);
2040 /* SCST_EXEC_* match SCST_CMD_STATE_RES_* */
2041 TRACE_EXIT_RES(res);
2045 static int scst_exec(struct scst_cmd **active_cmd)
2047 struct scst_cmd *cmd = *active_cmd;
2048 struct scst_cmd *ref_cmd;
2049 struct scst_device *dev = cmd->dev;
2050 int res = SCST_CMD_STATE_RES_CONT_NEXT, count;
2054 if (unlikely(scst_inc_on_dev_cmd(cmd) != 0))
2057 /* To protect tgt_dev */
2059 __scst_cmd_get(ref_cmd);
2065 cmd->sent_for_exec = 1;
2066 cmd->scst_cmd_done = scst_cmd_done_local;
2067 cmd->state = SCST_CMD_STATE_LOCAL_EXEC;
2069 if (cmd->tgt_data_buf_alloced && cmd->dh_data_buf_alloced &&
2070 (cmd->data_direction == SCST_DATA_WRITE))
2071 scst_copy_sg(cmd, SCST_SG_COPY_FROM_TARGET);
2073 rc = scst_do_local_exec(cmd);
2074 if (likely(rc == SCST_EXEC_NOT_COMPLETED))
2075 /* Nothing to do */;
2076 else if (rc == SCST_EXEC_NEED_THREAD) {
2077 TRACE_DBG("%s", "scst_do_local_exec() requested "
2078 "thread context, rescheduling");
2079 scst_dec_on_dev_cmd(cmd);
2080 res = SCST_CMD_STATE_RES_NEED_THREAD;
2083 sBUG_ON(rc != SCST_EXEC_COMPLETED);
2087 cmd->state = SCST_CMD_STATE_REAL_EXEC;
2089 rc = scst_do_real_exec(cmd);
2090 if (likely(rc == SCST_EXEC_COMPLETED))
2091 /* Nothing to do */;
2092 else if (rc == SCST_EXEC_NEED_THREAD) {
2093 TRACE_DBG("scst_real_exec() requested thread "
2094 "context, rescheduling (cmd %p)", cmd);
2095 scst_dec_on_dev_cmd(cmd);
2096 res = SCST_CMD_STATE_RES_NEED_THREAD;
2104 cmd = scst_post_exec_sn(cmd, false);
2108 if (unlikely(scst_inc_on_dev_cmd(cmd) != 0))
2111 __scst_cmd_put(ref_cmd);
2113 __scst_cmd_get(ref_cmd);
2121 if (dev->scsi_dev != NULL)
2122 generic_unplug_device(dev->scsi_dev->request_queue);
2125 __scst_cmd_put(ref_cmd);
2126 /* !! At this point sess, dev and tgt_dev can be already freed !! */
2129 TRACE_EXIT_RES(res);
2133 static int scst_send_for_exec(struct scst_cmd **active_cmd)
2136 struct scst_cmd *cmd = *active_cmd;
2137 struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
2138 typeof(tgt_dev->expected_sn) expected_sn;
2142 #ifdef CONFIG_SCST_MEASURE_LATENCY
2143 if (cmd->pre_exec_finish == 0) {
2145 getnstimeofday(&ts);
2146 cmd->pre_exec_finish = scst_sec_to_nsec(ts.tv_sec) + ts.tv_nsec;
2147 TRACE_DBG("cmd %p (sess %p): pre_exec_finish %lld (tv_sec %ld, "
2148 "tv_nsec %ld)", cmd, cmd->sess, cmd->pre_exec_finish,
2149 ts.tv_sec, ts.tv_nsec);
2153 if (unlikely(cmd->internal))
2156 if (unlikely(cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE))
2159 sBUG_ON(!cmd->sn_set);
2161 expected_sn = tgt_dev->expected_sn;
2162 /* Optimized for lockless fast path */
2163 if ((cmd->sn != expected_sn) || (tgt_dev->hq_cmd_count > 0)) {
2164 spin_lock_irq(&tgt_dev->sn_lock);
2166 tgt_dev->def_cmd_count++;
2169 expected_sn = tgt_dev->expected_sn;
2170 if ((cmd->sn != expected_sn) || (tgt_dev->hq_cmd_count > 0)) {
2171 if (unlikely(test_bit(SCST_CMD_ABORTED,
2172 &cmd->cmd_flags))) {
2173 /* Necessary to allow aborting out of sn cmds */
2174 TRACE_MGMT_DBG("Aborting out of sn cmd %p "
2175 "(tag %llu, sn %lu)", cmd,
2176 (long long unsigned)cmd->tag, cmd->sn);
2177 tgt_dev->def_cmd_count--;
2178 scst_set_cmd_abnormal_done_state(cmd);
2179 res = SCST_CMD_STATE_RES_CONT_SAME;
2181 TRACE_SN("Deferring cmd %p (sn=%ld, set %d, "
2182 "expected_sn=%ld)", cmd, cmd->sn,
2183 cmd->sn_set, expected_sn);
2184 list_add_tail(&cmd->sn_cmd_list_entry,
2185 &tgt_dev->deferred_cmd_list);
2186 res = SCST_CMD_STATE_RES_CONT_NEXT;
2188 spin_unlock_irq(&tgt_dev->sn_lock);
2191 TRACE_SN("Somebody incremented expected_sn %ld, "
2192 "continuing", expected_sn);
2193 tgt_dev->def_cmd_count--;
2194 spin_unlock_irq(&tgt_dev->sn_lock);
2199 res = scst_exec(active_cmd);
2202 TRACE_EXIT_HRES(res);
2206 /* No locks supposed to be held */
2207 static int scst_check_sense(struct scst_cmd *cmd)
2210 struct scst_device *dev = cmd->dev;
2214 if (unlikely(cmd->ua_ignore))
2217 /* If we had internal bus reset behind us, set the command error UA */
2218 if ((dev->scsi_dev != NULL) &&
2219 unlikely(cmd->host_status == DID_RESET) &&
2220 scst_is_ua_command(cmd)) {
2221 TRACE(TRACE_MGMT, "DID_RESET: was_reset=%d host_status=%x",
2222 dev->scsi_dev->was_reset, cmd->host_status);
2223 scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_reset_UA));
2224 /* It looks like it is safe to clear was_reset here */
2225 dev->scsi_dev->was_reset = 0;
2228 if (unlikely(cmd->status == SAM_STAT_CHECK_CONDITION) &&
2229 SCST_SENSE_VALID(cmd->sense)) {
2230 PRINT_BUFF_FLAG(TRACE_SCSI, "Sense", cmd->sense,
2231 SCST_SENSE_BUFFERSIZE);
2233 /* Check Unit Attention Sense Key */
2234 if (scst_is_ua_sense(cmd->sense)) {
2235 if (cmd->sense[12] == SCST_SENSE_ASC_UA_RESET) {
2236 if (cmd->double_ua_possible) {
2237 TRACE(TRACE_MGMT_MINOR, "Double UA "
2238 "detected for device %p", dev);
2239 TRACE(TRACE_MGMT_MINOR, "Retrying cmd"
2240 " %p (tag %llu)", cmd,
2241 (long long unsigned)cmd->tag);
2244 cmd->msg_status = 0;
2245 cmd->host_status = DID_OK;
2246 cmd->driver_status = 0;
2248 mempool_free(cmd->sense,
2249 scst_sense_mempool);
2252 scst_check_restore_sg_buff(cmd);
2254 sBUG_ON(cmd->dbl_ua_orig_resp_data_len < 0);
2255 cmd->data_direction =
2256 cmd->dbl_ua_orig_data_direction;
2257 cmd->resp_data_len =
2258 cmd->dbl_ua_orig_resp_data_len;
2260 cmd->state = SCST_CMD_STATE_REAL_EXEC;
2266 scst_dev_check_set_UA(dev, cmd, cmd->sense,
2267 SCST_SENSE_BUFFERSIZE);
2271 if (unlikely(cmd->double_ua_possible)) {
2272 if (scst_is_ua_command(cmd)) {
2273 TRACE_MGMT_DBG("Clearing dbl_ua_possible flag (dev %p, "
2274 "cmd %p)", dev, cmd);
2276 * Lock used to protect other flags in the bitfield
2277 * (just in case, actually). Those 2 flags can't be
2278 * changed in parallel, because the device is
2281 spin_lock_bh(&dev->dev_lock);
2282 dev->dev_double_ua_possible = 0;
2283 dev->dev_serialized = 0;
2284 spin_unlock_bh(&dev->dev_lock);
2289 TRACE_EXIT_RES(res);
2293 static int scst_check_auto_sense(struct scst_cmd *cmd)
2299 if (unlikely(cmd->status == SAM_STAT_CHECK_CONDITION) &&
2300 (!SCST_SENSE_VALID(cmd->sense) ||
2301 SCST_NO_SENSE(cmd->sense))) {
2302 TRACE(TRACE_SCSI|TRACE_MINOR, "CHECK_CONDITION, but no sense: "
2303 "cmd->status=%x, cmd->msg_status=%x, "
2304 "cmd->host_status=%x, cmd->driver_status=%x (cmd %p)",
2305 cmd->status, cmd->msg_status, cmd->host_status,
2306 cmd->driver_status, cmd);
2308 } else if (unlikely(cmd->host_status)) {
2309 if ((cmd->host_status == DID_REQUEUE) ||
2310 (cmd->host_status == DID_IMM_RETRY) ||
2311 (cmd->host_status == DID_SOFT_ERROR) ||
2312 (cmd->host_status == DID_ABORT)) {
2315 TRACE(TRACE_SCSI|TRACE_MINOR, "Host status %x "
2316 "received, returning HARDWARE ERROR instead "
2317 "(cmd %p)", cmd->host_status, cmd);
2318 scst_set_cmd_error(cmd,
2319 SCST_LOAD_SENSE(scst_sense_hardw_error));
2323 TRACE_EXIT_RES(res);
2327 static int scst_pre_dev_done(struct scst_cmd *cmd)
2329 int res = SCST_CMD_STATE_RES_CONT_SAME, rc;
2333 if (unlikely(scst_check_auto_sense(cmd))) {
2334 PRINT_INFO("Command finished with CHECK CONDITION, but "
2335 "without sense data (opcode 0x%x), issuing "
2336 "REQUEST SENSE", cmd->cdb[0]);
2337 rc = scst_prepare_request_sense(cmd);
2339 res = SCST_CMD_STATE_RES_CONT_NEXT;
2341 PRINT_ERROR("%s", "Unable to issue REQUEST SENSE, "
2342 "returning HARDWARE ERROR");
2343 scst_set_cmd_error(cmd,
2344 SCST_LOAD_SENSE(scst_sense_hardw_error));
2347 } else if (unlikely(scst_check_sense(cmd)))
2350 if (likely(scsi_status_is_good(cmd->status))) {
2351 unsigned char type = cmd->dev->handler->type;
2352 if (unlikely((cmd->cdb[0] == MODE_SENSE ||
2353 cmd->cdb[0] == MODE_SENSE_10)) &&
2354 cmd->tgt_dev->acg_dev->rd_only_flag &&
2355 (type == TYPE_DISK ||
2356 type == TYPE_WORM ||
2358 type == TYPE_TAPE)) {
2363 length = scst_get_buf_first(cmd, &address);
2365 PRINT_ERROR("%s", "Unable to get "
2366 "MODE_SENSE buffer");
2367 scst_set_cmd_error(cmd,
2369 scst_sense_hardw_error));
2371 } else if (length > 2 && cmd->cdb[0] == MODE_SENSE)
2372 address[2] |= 0x80; /* Write Protect*/
2373 else if (length > 3 && cmd->cdb[0] == MODE_SENSE_10)
2374 address[3] |= 0x80; /* Write Protect*/
2375 scst_put_buf(cmd, address);
2382 * Check and clear NormACA option for the device, if necessary,
2383 * since we don't support ACA
2385 if (unlikely((cmd->cdb[0] == INQUIRY)) &&
2386 /* Std INQUIRY data (no EVPD) */
2387 !(cmd->cdb[1] & SCST_INQ_EVPD) &&
2388 (cmd->resp_data_len > SCST_INQ_BYTE3)) {
2393 /* ToDo: all pages ?? */
2394 buflen = scst_get_buf_first(cmd, &buffer);
2395 if (buflen > SCST_INQ_BYTE3) {
2396 #ifdef CONFIG_SCST_EXTRACHECKS
2397 if (buffer[SCST_INQ_BYTE3] & SCST_INQ_NORMACA_BIT) {
2398 PRINT_INFO("NormACA set for device: "
2399 "lun=%lld, type 0x%02x. Clear it, "
2400 "since it's unsupported.",
2401 (long long unsigned int)cmd->lun,
2405 buffer[SCST_INQ_BYTE3] &= ~SCST_INQ_NORMACA_BIT;
2406 } else if (buflen != 0) {
2407 PRINT_ERROR("%s", "Unable to get INQUIRY "
2409 scst_set_cmd_error(cmd,
2410 SCST_LOAD_SENSE(scst_sense_hardw_error));
2414 scst_put_buf(cmd, buffer);
2420 if (unlikely((cmd->cdb[0] == MODE_SELECT) ||
2421 (cmd->cdb[0] == MODE_SELECT_10) ||
2422 (cmd->cdb[0] == LOG_SELECT))) {
2424 "MODE/LOG SELECT succeeded (LUN %lld)",
2425 (long long unsigned int)cmd->lun);
2426 cmd->state = SCST_CMD_STATE_MODE_SELECT_CHECKS;
2430 if ((cmd->cdb[0] == RESERVE) || (cmd->cdb[0] == RESERVE_10)) {
2431 if (!test_bit(SCST_TGT_DEV_RESERVED,
2432 &cmd->tgt_dev->tgt_dev_flags)) {
2433 struct scst_tgt_dev *tgt_dev_tmp;
2434 struct scst_device *dev = cmd->dev;
2437 "Real RESERVE failed lun=%lld, "
2439 (long long unsigned int)cmd->lun,
2441 PRINT_BUFF_FLAG(TRACE_SCSI, "Sense", cmd->sense,
2442 SCST_SENSE_BUFFERSIZE);
2444 /* Clearing the reservation */
2445 spin_lock_bh(&dev->dev_lock);
2446 list_for_each_entry(tgt_dev_tmp,
2447 &dev->dev_tgt_dev_list,
2448 dev_tgt_dev_list_entry) {
2449 clear_bit(SCST_TGT_DEV_RESERVED,
2450 &tgt_dev_tmp->tgt_dev_flags);
2452 dev->dev_reserved = 0;
2453 spin_unlock_bh(&dev->dev_lock);
2457 /* Check for MODE PARAMETERS CHANGED UA */
2458 if ((cmd->dev->scsi_dev != NULL) &&
2459 (cmd->status == SAM_STAT_CHECK_CONDITION) &&
2460 SCST_SENSE_VALID(cmd->sense) &&
2461 scst_is_ua_sense(cmd->sense) &&
2462 (cmd->sense[12] == 0x2a) && (cmd->sense[13] == 0x01)) {
2464 "MODE PARAMETERS CHANGED UA (lun %lld)",
2465 (long long unsigned int)cmd->lun);
2466 cmd->state = SCST_CMD_STATE_MODE_SELECT_CHECKS;
2471 cmd->state = SCST_CMD_STATE_DEV_DONE;
2474 TRACE_EXIT_RES(res);
2478 static int scst_mode_select_checks(struct scst_cmd *cmd)
2480 int res = SCST_CMD_STATE_RES_CONT_SAME;
2481 int atomic = scst_cmd_atomic(cmd);
2485 if (likely(scsi_status_is_good(cmd->status))) {
2486 if (unlikely((cmd->cdb[0] == MODE_SELECT) ||
2487 (cmd->cdb[0] == MODE_SELECT_10) ||
2488 (cmd->cdb[0] == LOG_SELECT))) {
2489 struct scst_device *dev = cmd->dev;
2490 if (atomic && (dev->scsi_dev != NULL)) {
2491 TRACE_DBG("%s", "MODE/LOG SELECT: thread "
2492 "context required");
2493 res = SCST_CMD_STATE_RES_NEED_THREAD;
2497 TRACE(TRACE_SCSI, "MODE/LOG SELECT succeeded, "
2498 "setting the SELECT UA (lun=%lld)",
2499 (long long unsigned int)cmd->lun);
2501 spin_lock_bh(&dev->dev_lock);
2502 spin_lock(&scst_temp_UA_lock);
2503 if (cmd->cdb[0] == LOG_SELECT) {
2504 scst_set_sense(scst_temp_UA,
2505 sizeof(scst_temp_UA),
2506 UNIT_ATTENTION, 0x2a, 0x02);
2508 scst_set_sense(scst_temp_UA,
2509 sizeof(scst_temp_UA),
2510 UNIT_ATTENTION, 0x2a, 0x01);
2512 scst_dev_check_set_local_UA(dev, cmd, scst_temp_UA,
2513 sizeof(scst_temp_UA));
2514 spin_unlock(&scst_temp_UA_lock);
2515 spin_unlock_bh(&dev->dev_lock);
2517 if (dev->scsi_dev != NULL)
2518 scst_obtain_device_parameters(dev);
2520 } else if ((cmd->status == SAM_STAT_CHECK_CONDITION) &&
2521 SCST_SENSE_VALID(cmd->sense) &&
2522 scst_is_ua_sense(cmd->sense) &&
2523 (((cmd->sense[12] == 0x2a) && (cmd->sense[13] == 0x01)) ||
2524 (cmd->sense[12] == 0x29) /* reset */ ||
2525 (cmd->sense[12] == 0x28) /* medium changed */ ||
2526 /* cleared by another ini (just in case) */
2527 (cmd->sense[12] == 0x2F))) {
2529 TRACE_DBG("Possible parameters changed UA %x: "
2530 "thread context required", cmd->sense[12]);
2531 res = SCST_CMD_STATE_RES_NEED_THREAD;
2535 TRACE(TRACE_SCSI, "Possible parameters changed UA %x "
2536 "(lun %lld): getting new parameters", cmd->sense[12],
2537 (long long unsigned int)cmd->lun);
2539 scst_obtain_device_parameters(cmd->dev);
2543 cmd->state = SCST_CMD_STATE_DEV_DONE;
2546 TRACE_EXIT_HRES(res);
2550 static void scst_inc_check_expected_sn(struct scst_cmd *cmd)
2552 if (likely(cmd->sn_set))
2553 scst_inc_expected_sn(cmd->tgt_dev, cmd->sn_slot);
2555 scst_make_deferred_commands_active(cmd->tgt_dev);
2558 static int scst_dev_done(struct scst_cmd **pcmd)
2560 int res = SCST_CMD_STATE_RES_CONT_SAME;
2561 struct scst_cmd *cmd = *pcmd;
2563 struct scst_device *dev = cmd->dev;
2567 state = SCST_CMD_STATE_PRE_XMIT_RESP;
2569 if (likely(!scst_is_cmd_local(cmd)) &&
2570 likely(dev->handler->dev_done != NULL)) {
2573 if (unlikely(!dev->handler->dev_done_atomic &&
2574 scst_cmd_atomic(cmd))) {
2576 * It shouldn't be because of SCST_TGT_DEV_AFTER_*
2579 TRACE_DBG("Dev handler %s dev_done() needs thread "
2580 "context, rescheduling", dev->handler->name);
2581 res = SCST_CMD_STATE_RES_NEED_THREAD;
2585 TRACE_DBG("Calling dev handler %s dev_done(%p)",
2586 dev->handler->name, cmd);
2587 rc = dev->handler->dev_done(cmd);
2588 TRACE_DBG("Dev handler %s dev_done() returned %d",
2589 dev->handler->name, rc);
2590 if (rc != SCST_CMD_STATE_DEFAULT)
2595 case SCST_CMD_STATE_PRE_XMIT_RESP:
2596 case SCST_CMD_STATE_DEV_PARSE:
2597 case SCST_CMD_STATE_PRE_PARSE:
2598 case SCST_CMD_STATE_PREPARE_SPACE:
2599 case SCST_CMD_STATE_RDY_TO_XFER:
2600 case SCST_CMD_STATE_TGT_PRE_EXEC:
2601 case SCST_CMD_STATE_SEND_FOR_EXEC:
2602 case SCST_CMD_STATE_LOCAL_EXEC:
2603 case SCST_CMD_STATE_REAL_EXEC:
2604 case SCST_CMD_STATE_PRE_DEV_DONE:
2605 case SCST_CMD_STATE_MODE_SELECT_CHECKS:
2606 case SCST_CMD_STATE_DEV_DONE:
2607 case SCST_CMD_STATE_XMIT_RESP:
2608 case SCST_CMD_STATE_FINISHED:
2612 case SCST_CMD_STATE_NEED_THREAD_CTX:
2613 TRACE_DBG("Dev handler %s dev_done() requested "
2614 "thread context, rescheduling",
2615 dev->handler->name);
2616 res = SCST_CMD_STATE_RES_NEED_THREAD;
2621 PRINT_ERROR("Dev handler %s dev_done() returned "
2622 "invalid cmd state %d",
2623 dev->handler->name, state);
2625 PRINT_ERROR("Dev handler %s dev_done() returned "
2626 "error %d", dev->handler->name,
2629 scst_set_cmd_error(cmd,
2630 SCST_LOAD_SENSE(scst_sense_hardw_error));
2631 scst_set_cmd_abnormal_done_state(cmd);
2635 if (cmd->needs_unblocking)
2636 scst_unblock_dev_cmd(cmd);
2638 if (likely(cmd->dec_on_dev_needed))
2639 scst_dec_on_dev_cmd(cmd);
2641 if (cmd->inc_expected_sn_on_done && cmd->sent_for_exec)
2642 scst_inc_check_expected_sn(cmd);
2644 if (unlikely(cmd->cdb[0] == REQUEST_SENSE) && (cmd->internal))
2645 *pcmd = scst_complete_request_sense(cmd);
2648 TRACE_EXIT_HRES(res);
2652 static int scst_pre_xmit_response(struct scst_cmd *cmd)
2658 #ifdef CONFIG_SCST_DEBUG_TM
2659 if (cmd->tm_dbg_delayed &&
2660 !test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)) {
2661 if (scst_cmd_atomic(cmd)) {
2662 TRACE_MGMT_DBG("%s",
2663 "DEBUG_TM delayed cmd needs a thread");
2664 res = SCST_CMD_STATE_RES_NEED_THREAD;
2667 TRACE_MGMT_DBG("Delaying cmd %p (tag %llu) for 1 second",
2669 schedule_timeout_uninterruptible(HZ);
2673 if (likely(cmd->tgt_dev != NULL)) {
2674 atomic_dec(&cmd->tgt_dev->tgt_dev_cmd_count);
2675 atomic_dec(&cmd->dev->dev_cmd_count);
2676 /* If expected values not set, expected direction is UNKNOWN */
2677 if (cmd->expected_data_direction == SCST_DATA_WRITE)
2678 atomic_dec(&cmd->dev->write_cmd_count);
2680 if (unlikely(cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE))
2681 scst_on_hq_cmd_response(cmd);
2683 if (unlikely(!cmd->sent_for_exec)) {
2684 TRACE_SN("cmd %p was not sent to mid-lev"
2685 " (sn %ld, set %d)",
2686 cmd, cmd->sn, cmd->sn_set);
2687 scst_unblock_deferred(cmd->tgt_dev, cmd);
2688 cmd->sent_for_exec = 1;
2693 * If we don't remove cmd from the search list here, before
2694 * submitting it for transmittion, we will have a race, when for
2695 * some reason cmd's release is delayed after transmittion and
2696 * initiator sends cmd with the same tag => it is possible that
2697 * a wrong cmd will be found by find() functions.
2699 spin_lock_irq(&cmd->sess->sess_list_lock);
2700 list_del(&cmd->search_cmd_list_entry);
2701 spin_unlock_irq(&cmd->sess->sess_list_lock);
2704 smp_mb(); /* to sync with scst_abort_cmd() */
2706 if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)))
2707 scst_xmit_process_aborted_cmd(cmd);
2709 if (unlikely(test_bit(SCST_CMD_NO_RESP, &cmd->cmd_flags))) {
2710 TRACE_MGMT_DBG("Flag NO_RESP set for cmd %p (tag %llu),"
2712 cmd, (long long unsigned int)cmd->tag);
2713 cmd->state = SCST_CMD_STATE_FINISHED;
2714 res = SCST_CMD_STATE_RES_CONT_SAME;
2718 if (cmd->tgt_data_buf_alloced && cmd->dh_data_buf_alloced &&
2719 (cmd->data_direction == SCST_DATA_READ))
2720 scst_copy_sg(cmd, SCST_SG_COPY_TO_TARGET);
2722 cmd->state = SCST_CMD_STATE_XMIT_RESP;
2723 res = SCST_CMD_STATE_RES_CONT_SAME;
2726 #ifdef CONFIG_SCST_MEASURE_LATENCY
2729 uint64_t finish, scst_time, proc_time;
2730 struct scst_session *sess = cmd->sess;
2732 getnstimeofday(&ts);
2733 finish = scst_sec_to_nsec(ts.tv_sec) + ts.tv_nsec;
2735 spin_lock_bh(&sess->meas_lock);
2737 scst_time = cmd->pre_exec_finish - cmd->start;
2738 scst_time += finish - cmd->post_exec_start;
2739 proc_time = finish - cmd->start;
2741 sess->scst_time += scst_time;
2742 sess->processing_time += proc_time;
2743 sess->processed_cmds++;
2745 spin_unlock_bh(&sess->meas_lock);
2747 TRACE_DBG("cmd %p (sess %p): finish %lld (tv_sec %ld, "
2748 "tv_nsec %ld), scst_time %lld, proc_time %lld",
2749 cmd, sess, finish, ts.tv_sec, ts.tv_nsec, scst_time,
2753 TRACE_EXIT_HRES(res);
2757 static int scst_xmit_response(struct scst_cmd *cmd)
2763 if (unlikely(!cmd->tgtt->xmit_response_atomic &&
2764 scst_cmd_atomic(cmd))) {
2766 * It shouldn't be because of SCST_TGT_DEV_AFTER_*
2769 TRACE_DBG("Target driver %s xmit_response() needs thread "
2770 "context, rescheduling", cmd->tgtt->name);
2771 res = SCST_CMD_STATE_RES_NEED_THREAD;
2776 int finished_cmds = atomic_read(&cmd->sess->tgt->finished_cmds);
2778 res = SCST_CMD_STATE_RES_CONT_NEXT;
2779 cmd->state = SCST_CMD_STATE_XMIT_WAIT;
2781 TRACE_DBG("Calling xmit_response(%p)", cmd);
2783 #if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
2786 struct scatterlist *sg = cmd->tgt_sg;
2787 TRACE(TRACE_SND_BOT, "Xmitting data for cmd %p "
2788 "(sg_cnt %d, sg %p, sg[0].page %p)", cmd,
2789 cmd->tgt_sg_cnt, sg, (void *)sg_page(&sg[0]));
2790 for (i = 0; i < cmd->tgt_sg_cnt; ++i) {
2791 PRINT_BUFF_FLAG(TRACE_SND_BOT, "Xmitting sg",
2792 sg_virt(&sg[i]), sg[i].length);
2797 #ifdef CONFIG_SCST_DEBUG_RETRY
2798 if (((scst_random() % 100) == 77))
2799 rc = SCST_TGT_RES_QUEUE_FULL;
2802 rc = cmd->tgtt->xmit_response(cmd);
2803 TRACE_DBG("xmit_response() returned %d", rc);
2805 if (likely(rc == SCST_TGT_RES_SUCCESS))
2808 /* Restore the previous state */
2809 cmd->state = SCST_CMD_STATE_XMIT_RESP;
2812 case SCST_TGT_RES_QUEUE_FULL:
2813 if (scst_queue_retry_cmd(cmd, finished_cmds) == 0)
2818 case SCST_TGT_RES_NEED_THREAD_CTX:
2819 TRACE_DBG("Target driver %s xmit_response() "
2820 "requested thread context, rescheduling",
2822 res = SCST_CMD_STATE_RES_NEED_THREAD;
2832 /* Caution: cmd can be already dead here */
2833 TRACE_EXIT_HRES(res);
2837 if (rc == SCST_TGT_RES_FATAL_ERROR) {
2838 PRINT_ERROR("Target driver %s xmit_response() returned "
2839 "fatal error", cmd->tgtt->name);
2841 PRINT_ERROR("Target driver %s xmit_response() returned "
2842 "invalid value %d", cmd->tgtt->name, rc);
2844 scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
2845 cmd->state = SCST_CMD_STATE_FINISHED;
2846 res = SCST_CMD_STATE_RES_CONT_SAME;
2850 void scst_tgt_cmd_done(struct scst_cmd *cmd,
2851 enum scst_exec_context pref_context)
2855 sBUG_ON(cmd->state != SCST_CMD_STATE_XMIT_WAIT);
2857 cmd->state = SCST_CMD_STATE_FINISHED;
2858 scst_proccess_redirect_cmd(cmd, pref_context, 1);
2863 EXPORT_SYMBOL(scst_tgt_cmd_done);
2865 static int scst_finish_cmd(struct scst_cmd *cmd)
2871 atomic_dec(&cmd->sess->sess_cmd_count);
2874 smp_mb(); /* to sync with scst_abort_cmd() */
2876 if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
2877 TRACE_MGMT_DBG("Aborted cmd %p finished (cmd_ref %d, "
2878 "scst_cmd_count %d)", cmd, atomic_read(&cmd->cmd_ref),
2879 atomic_read(&scst_cmd_count));
2881 scst_finish_cmd_mgmt(cmd);
2884 if (unlikely(cmd->delivery_status != SCST_CMD_DELIVERY_SUCCESS)) {
2885 if ((cmd->tgt_dev != NULL) &&
2886 scst_is_ua_sense(cmd->sense)) {
2887 /* This UA delivery failed, so requeue it */
2888 TRACE_MGMT_DBG("Requeuing UA for delivery failed cmd "
2890 scst_check_set_UA(cmd->tgt_dev, cmd->sense,
2891 SCST_SENSE_BUFFERSIZE, 1);
2895 __scst_cmd_put(cmd);
2897 res = SCST_CMD_STATE_RES_CONT_NEXT;
2899 TRACE_EXIT_HRES(res);
2904 * No locks, but it must be externally serialized (see comment for
2905 * scst_cmd_init_done() in scst.h)
2907 static void scst_cmd_set_sn(struct scst_cmd *cmd)
2909 struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
2910 unsigned long flags;
2914 if (scst_is_implicit_hq(cmd)) {
2915 TRACE_SN("Implicit HQ cmd %p", cmd);
2916 cmd->queue_type = SCST_CMD_QUEUE_HEAD_OF_QUEUE;
2919 EXTRACHECKS_BUG_ON(cmd->sn_set || cmd->hq_cmd_inced);
2921 /* Optimized for lockless fast path */
2923 scst_check_debug_sn(cmd);
2925 if (cmd->dev->queue_alg ==
2926 SCST_CONTR_MODE_QUEUE_ALG_RESTRICTED_REORDER) {
2928 * Not the best way, but well enough until there will be a
2929 * possibility to specify queue type during pass-through
2930 * commands submission.
2932 cmd->queue_type = SCST_CMD_QUEUE_ORDERED;
2935 switch (cmd->queue_type) {
2936 case SCST_CMD_QUEUE_SIMPLE:
2937 case SCST_CMD_QUEUE_UNTAGGED:
2938 #if 0 /* left for future performance investigations */
2939 if (scst_cmd_is_expected_set(cmd)) {
2940 if ((cmd->expected_data_direction == SCST_DATA_READ) &&
2941 (atomic_read(&cmd->dev->write_cmd_count) == 0))
2946 if (likely(tgt_dev->num_free_sn_slots >= 0)) {
2948 * atomic_inc_return() implies memory barrier to sync
2949 * with scst_inc_expected_sn()
2951 if (atomic_inc_return(tgt_dev->cur_sn_slot) == 1) {
2953 TRACE_SN("Incremented curr_sn %ld",
2956 cmd->sn_slot = tgt_dev->cur_sn_slot;
2957 cmd->sn = tgt_dev->curr_sn;
2959 tgt_dev->prev_cmd_ordered = 0;
2961 TRACE(TRACE_MINOR, "***WARNING*** Not enough SN slots "
2962 "%zd", ARRAY_SIZE(tgt_dev->sn_slots));
2967 case SCST_CMD_QUEUE_ORDERED:
2968 TRACE_SN("ORDERED cmd %p (op %x)", cmd, cmd->cdb[0]);
2970 if (!tgt_dev->prev_cmd_ordered) {
2971 spin_lock_irqsave(&tgt_dev->sn_lock, flags);
2972 if (tgt_dev->num_free_sn_slots >= 0) {
2973 tgt_dev->num_free_sn_slots--;
2974 if (tgt_dev->num_free_sn_slots >= 0) {
2976 /* Commands can finish in any order, so
2977 * we don't know which slot is empty.
2980 tgt_dev->cur_sn_slot++;
2981 if (tgt_dev->cur_sn_slot ==
2982 tgt_dev->sn_slots + ARRAY_SIZE(tgt_dev->sn_slots))
2983 tgt_dev->cur_sn_slot = tgt_dev->sn_slots;
2985 if (atomic_read(tgt_dev->cur_sn_slot) == 0)
2989 sBUG_ON(i == ARRAY_SIZE(tgt_dev->sn_slots));
2991 TRACE_SN("New cur SN slot %zd",
2992 tgt_dev->cur_sn_slot -
2996 spin_unlock_irqrestore(&tgt_dev->sn_lock, flags);
2998 tgt_dev->prev_cmd_ordered = 1;
3000 cmd->sn = tgt_dev->curr_sn;
3003 case SCST_CMD_QUEUE_HEAD_OF_QUEUE:
3004 TRACE_SN("HQ cmd %p (op %x)", cmd, cmd->cdb[0]);
3005 spin_lock_irqsave(&tgt_dev->sn_lock, flags);
3006 tgt_dev->hq_cmd_count++;
3007 spin_unlock_irqrestore(&tgt_dev->sn_lock, flags);
3008 cmd->hq_cmd_inced = 1;
3015 TRACE_SN("cmd(%p)->sn: %ld (tgt_dev %p, *cur_sn_slot %d, "
3016 "num_free_sn_slots %d, prev_cmd_ordered %ld, "
3017 "cur_sn_slot %zd)", cmd, cmd->sn, tgt_dev,
3018 atomic_read(tgt_dev->cur_sn_slot),
3019 tgt_dev->num_free_sn_slots, tgt_dev->prev_cmd_ordered,
3020 tgt_dev->cur_sn_slot-tgt_dev->sn_slots);
3030 * Returns 0 on success, > 0 when we need to wait for unblock,
3031 * < 0 if there is no device (lun) or device type handler.
3033 * No locks, but might be on IRQ, protection is done by the
3034 * suspended activity.
3036 static int scst_translate_lun(struct scst_cmd *cmd)
3038 struct scst_tgt_dev *tgt_dev = NULL;
3045 if (likely(!test_bit(SCST_FLAG_SUSPENDED, &scst_flags))) {
3046 struct list_head *sess_tgt_dev_list_head =
3047 &cmd->sess->sess_tgt_dev_list_hash[HASH_VAL(cmd->lun)];
3048 TRACE_DBG("Finding tgt_dev for cmd %p (lun %lld)", cmd,
3049 (long long unsigned int)cmd->lun);
3051 list_for_each_entry(tgt_dev, sess_tgt_dev_list_head,
3052 sess_tgt_dev_list_entry) {
3053 if (tgt_dev->lun == cmd->lun) {
3054 TRACE_DBG("tgt_dev %p found", tgt_dev);
3056 if (unlikely(tgt_dev->dev->handler ==
3057 &scst_null_devtype)) {
3058 PRINT_INFO("Dev handler for device "
3059 "%lld is NULL, the device will not "
3060 "be visible remotely",
3061 (long long unsigned int)cmd->lun);
3065 cmd->cmd_lists = tgt_dev->dev->p_cmd_lists;
3066 cmd->tgt_dev = tgt_dev;
3067 cmd->dev = tgt_dev->dev;
3075 "tgt_dev for lun %lld not found, command to "
3077 (long long unsigned int)cmd->lun);
3081 TRACE_MGMT_DBG("%s", "FLAG SUSPENDED set, skipping");
3086 TRACE_EXIT_RES(res);
3091 * No locks, but might be on IRQ
3093 * Returns 0 on success, > 0 when we need to wait for unblock,
3094 * < 0 if there is no device (lun) or device type handler.
3096 static int __scst_init_cmd(struct scst_cmd *cmd)
3102 res = scst_translate_lun(cmd);
3103 if (likely(res == 0)) {
3105 bool failure = false;
3107 cmd->state = SCST_CMD_STATE_PRE_PARSE;
3109 cnt = atomic_inc_return(&cmd->tgt_dev->tgt_dev_cmd_count);
3110 if (unlikely(cnt > SCST_MAX_TGT_DEV_COMMANDS)) {
3111 TRACE(TRACE_MGMT_MINOR,
3112 "Too many pending commands (%d) in "
3113 "session, returning BUSY to initiator \"%s\"",
3114 cnt, (cmd->sess->initiator_name[0] == '\0') ?
3115 "Anonymous" : cmd->sess->initiator_name);
3119 cnt = atomic_inc_return(&cmd->dev->dev_cmd_count);
3120 if (unlikely(cnt > SCST_MAX_DEV_COMMANDS)) {
3122 TRACE(TRACE_MGMT_MINOR,
3123 "Too many pending device "
3124 "commands (%d), returning BUSY to "
3125 "initiator \"%s\"", cnt,
3126 (cmd->sess->initiator_name[0] == '\0') ?
3128 cmd->sess->initiator_name);
3133 /* If expected values not set, expected direction is UNKNOWN */
3134 if (cmd->expected_data_direction == SCST_DATA_WRITE)
3135 atomic_inc(&cmd->dev->write_cmd_count);
3137 if (unlikely(failure))
3140 if (!cmd->set_sn_on_restart_cmd)
3141 scst_cmd_set_sn(cmd);
3142 } else if (res < 0) {
3143 TRACE_DBG("Finishing cmd %p", cmd);
3144 scst_set_cmd_error(cmd,
3145 SCST_LOAD_SENSE(scst_sense_lun_not_supported));
3146 scst_set_cmd_abnormal_done_state(cmd);
3151 TRACE_EXIT_RES(res);
3156 scst_set_cmd_abnormal_done_state(cmd);
3160 /* Called under scst_init_lock and IRQs disabled */
3161 static void scst_do_job_init(void)
3162 __releases(&scst_init_lock)
3163 __acquires(&scst_init_lock)
3165 struct scst_cmd *cmd;
3172 * There is no need for read barrier here, because we don't care where
3173 * this check will be done.
3175 susp = test_bit(SCST_FLAG_SUSPENDED, &scst_flags);
3176 if (scst_init_poll_cnt > 0)
3177 scst_init_poll_cnt--;
3179 list_for_each_entry(cmd, &scst_init_cmd_list, cmd_list_entry) {
3181 if (susp && !test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))
3183 if (!test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)) {
3184 spin_unlock_irq(&scst_init_lock);
3185 rc = __scst_init_cmd(cmd);
3186 spin_lock_irq(&scst_init_lock);
3188 TRACE_MGMT_DBG("%s",
3189 "FLAG SUSPENDED set, restarting");
3193 TRACE_MGMT_DBG("Aborting not inited cmd %p (tag %llu)",
3194 cmd, (long long unsigned int)cmd->tag);
3195 scst_set_cmd_abnormal_done_state(cmd);
3199 * Deleting cmd from init cmd list after __scst_init_cmd()
3200 * is necessary to keep the check in scst_init_cmd() correct
3201 * to preserve the commands order.
3203 * We don't care about the race, when init cmd list is empty
3204 * and one command detected that it just was not empty, so
3205 * it's inserting to it, but another command at the same time
3206 * seeing init cmd list empty and goes directly, because it
3207 * could affect only commands from the same initiator to the
3208 * same tgt_dev, but init_cmd_done() doesn't guarantee the order
3209 * in case of simultaneous such calls anyway.
3211 TRACE_MGMT_DBG("Deleting cmd %p from init cmd list", cmd);
3213 list_del(&cmd->cmd_list_entry);
3214 spin_unlock(&scst_init_lock);
3216 spin_lock(&cmd->cmd_lists->cmd_list_lock);
3217 TRACE_MGMT_DBG("Adding cmd %p to active cmd list", cmd);
3218 if (unlikely(cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE))
3219 list_add(&cmd->cmd_list_entry,
3220 &cmd->cmd_lists->active_cmd_list);
3222 list_add_tail(&cmd->cmd_list_entry,
3223 &cmd->cmd_lists->active_cmd_list);
3224 wake_up(&cmd->cmd_lists->cmd_list_waitQ);
3225 spin_unlock(&cmd->cmd_lists->cmd_list_lock);
3227 spin_lock(&scst_init_lock);
3231 /* It isn't really needed, but let's keep it */
3232 if (susp != test_bit(SCST_FLAG_SUSPENDED, &scst_flags))
3239 static inline int test_init_cmd_list(void)
3241 int res = (!list_empty(&scst_init_cmd_list) &&
3242 !test_bit(SCST_FLAG_SUSPENDED, &scst_flags)) ||
3243 unlikely(kthread_should_stop()) ||
3244 (scst_init_poll_cnt > 0);
3248 int scst_init_cmd_thread(void *arg)
3252 PRINT_INFO("Init thread started, PID %d", current->pid);
3254 current->flags |= PF_NOFREEZE;
3256 set_user_nice(current, -10);
3258 spin_lock_irq(&scst_init_lock);
3259 while (!kthread_should_stop()) {
3261 init_waitqueue_entry(&wait, current);
3263 if (!test_init_cmd_list()) {
3264 add_wait_queue_exclusive(&scst_init_cmd_list_waitQ,
3267 set_current_state(TASK_INTERRUPTIBLE);
3268 if (test_init_cmd_list())
3270 spin_unlock_irq(&scst_init_lock);
3272 spin_lock_irq(&scst_init_lock);
3274 set_current_state(TASK_RUNNING);
3275 remove_wait_queue(&scst_init_cmd_list_waitQ, &wait);
3279 spin_unlock_irq(&scst_init_lock);
3282 * If kthread_should_stop() is true, we are guaranteed to be
3283 * on the module unload, so scst_init_cmd_list must be empty.
3285 sBUG_ON(!list_empty(&scst_init_cmd_list));
3287 PRINT_INFO("Init thread PID %d finished", current->pid);
3293 /* Called with no locks held */
3294 void scst_process_active_cmd(struct scst_cmd *cmd, bool atomic)
3300 EXTRACHECKS_BUG_ON(in_irq() || irqs_disabled());
3302 cmd->atomic = atomic;
3304 TRACE_DBG("cmd %p, atomic %d", cmd, atomic);
3307 switch (cmd->state) {
3308 case SCST_CMD_STATE_PRE_PARSE:
3309 res = scst_pre_parse(cmd);
3310 EXTRACHECKS_BUG_ON(res ==
3311 SCST_CMD_STATE_RES_NEED_THREAD);
3314 case SCST_CMD_STATE_DEV_PARSE:
3315 res = scst_parse_cmd(cmd);
3318 case SCST_CMD_STATE_PREPARE_SPACE:
3319 res = scst_prepare_space(cmd);
3322 case SCST_CMD_STATE_RDY_TO_XFER:
3323 res = scst_rdy_to_xfer(cmd);
3326 case SCST_CMD_STATE_TGT_PRE_EXEC:
3327 res = scst_tgt_pre_exec(cmd);
3330 case SCST_CMD_STATE_SEND_FOR_EXEC:
3331 if (tm_dbg_check_cmd(cmd) != 0) {
3332 res = SCST_CMD_STATE_RES_CONT_NEXT;
3333 TRACE_MGMT_DBG("Skipping cmd %p (tag %llu), "
3334 "because of TM DBG delay", cmd,
3335 (long long unsigned int)cmd->tag);
3338 res = scst_send_for_exec(&cmd);
3340 * !! At this point cmd, sess & tgt_dev can already be
3345 case SCST_CMD_STATE_LOCAL_EXEC:
3346 res = scst_local_exec(cmd);
3348 * !! At this point cmd, sess & tgt_dev can already be
3353 case SCST_CMD_STATE_REAL_EXEC:
3354 res = scst_real_exec(cmd);
3356 * !! At this point cmd, sess & tgt_dev can already be
3361 case SCST_CMD_STATE_PRE_DEV_DONE:
3362 res = scst_pre_dev_done(cmd);
3363 EXTRACHECKS_BUG_ON(res ==
3364 SCST_CMD_STATE_RES_NEED_THREAD);
3367 case SCST_CMD_STATE_MODE_SELECT_CHECKS:
3368 res = scst_mode_select_checks(cmd);
3371 case SCST_CMD_STATE_DEV_DONE:
3372 res = scst_dev_done(&cmd);
3375 case SCST_CMD_STATE_PRE_XMIT_RESP:
3376 res = scst_pre_xmit_response(cmd);
3377 EXTRACHECKS_BUG_ON(res ==
3378 SCST_CMD_STATE_RES_NEED_THREAD);
3381 case SCST_CMD_STATE_XMIT_RESP:
3382 res = scst_xmit_response(cmd);
3385 case SCST_CMD_STATE_FINISHED:
3386 res = scst_finish_cmd(cmd);
3387 EXTRACHECKS_BUG_ON(res ==
3388 SCST_CMD_STATE_RES_NEED_THREAD);
3392 PRINT_CRIT_ERROR("cmd (%p) in state %d, but shouldn't "
3393 "be", cmd, cmd->state);
3395 res = SCST_CMD_STATE_RES_CONT_NEXT;
3398 } while (res == SCST_CMD_STATE_RES_CONT_SAME);
3400 if (res == SCST_CMD_STATE_RES_CONT_NEXT) {
3402 } else if (res == SCST_CMD_STATE_RES_NEED_THREAD) {
3403 spin_lock_irq(&cmd->cmd_lists->cmd_list_lock);
3404 switch (cmd->state) {
3405 case SCST_CMD_STATE_PRE_PARSE:
3406 case SCST_CMD_STATE_DEV_PARSE:
3407 case SCST_CMD_STATE_PREPARE_SPACE:
3408 case SCST_CMD_STATE_RDY_TO_XFER:
3409 case SCST_CMD_STATE_TGT_PRE_EXEC:
3410 case SCST_CMD_STATE_SEND_FOR_EXEC:
3411 case SCST_CMD_STATE_LOCAL_EXEC:
3412 case SCST_CMD_STATE_REAL_EXEC:
3413 case SCST_CMD_STATE_PRE_DEV_DONE:
3414 case SCST_CMD_STATE_MODE_SELECT_CHECKS:
3415 case SCST_CMD_STATE_DEV_DONE:
3416 case SCST_CMD_STATE_PRE_XMIT_RESP:
3417 case SCST_CMD_STATE_XMIT_RESP:
3418 case SCST_CMD_STATE_FINISHED:
3419 TRACE_DBG("Adding cmd %p to head of active cmd list",
3421 list_add(&cmd->cmd_list_entry,
3422 &cmd->cmd_lists->active_cmd_list);
3424 #ifdef CONFIG_SCST_EXTRACHECKS
3425 /* not very valid commands */
3426 case SCST_CMD_STATE_DEFAULT:
3427 case SCST_CMD_STATE_NEED_THREAD_CTX:
3428 PRINT_CRIT_ERROR("cmd %p is in invalid state %d)", cmd,
3430 spin_unlock_irq(&cmd->cmd_lists->cmd_list_lock);
3432 spin_lock_irq(&cmd->cmd_lists->cmd_list_lock);
3438 wake_up(&cmd->cmd_lists->cmd_list_waitQ);
3439 spin_unlock_irq(&cmd->cmd_lists->cmd_list_lock);
3446 EXPORT_SYMBOL(scst_process_active_cmd);
3448 /* Called under cmd_list_lock and IRQs disabled */
3449 static void scst_do_job_active(struct list_head *cmd_list,
3450 spinlock_t *cmd_list_lock, bool atomic)
3451 __releases(cmd_list_lock)
3452 __acquires(cmd_list_lock)
3456 while (!list_empty(cmd_list)) {
3457 struct scst_cmd *cmd = list_entry(cmd_list->next, typeof(*cmd),
3459 TRACE_DBG("Deleting cmd %p from active cmd list", cmd);
3460 list_del(&cmd->cmd_list_entry);
3461 spin_unlock_irq(cmd_list_lock);
3462 scst_process_active_cmd(cmd, atomic);
3463 spin_lock_irq(cmd_list_lock);
3470 static inline int test_cmd_lists(struct scst_cmd_lists *p_cmd_lists)
3472 int res = !list_empty(&p_cmd_lists->active_cmd_list) ||
3473 unlikely(kthread_should_stop()) ||
3474 tm_dbg_is_release();
3478 int scst_cmd_thread(void *arg)
3480 struct scst_cmd_lists *p_cmd_lists = (struct scst_cmd_lists *)arg;
3484 PRINT_INFO("Processing thread started, PID %d", current->pid);
3487 set_user_nice(current, 10);
3489 current->flags |= PF_NOFREEZE;
3491 spin_lock_irq(&p_cmd_lists->cmd_list_lock);
3492 while (!kthread_should_stop()) {
3494 init_waitqueue_entry(&wait, current);
3496 if (!test_cmd_lists(p_cmd_lists)) {
3497 add_wait_queue_exclusive(&p_cmd_lists->cmd_list_waitQ,
3500 set_current_state(TASK_INTERRUPTIBLE);
3501 if (test_cmd_lists(p_cmd_lists))
3503 spin_unlock_irq(&p_cmd_lists->cmd_list_lock);