4 * Copyright (C) 2004-2007 Vladislav Bolkhovitin <vst@vlnb.net>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation, version 2
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
18 #include <linux/init.h>
19 #include <linux/kernel.h>
20 #include <linux/errno.h>
21 #include <linux/list.h>
22 #include <linux/spinlock.h>
23 #include <linux/slab.h>
24 #include <linux/sched.h>
25 #include <linux/smp_lock.h>
26 #include <linux/unistd.h>
27 #include <linux/string.h>
28 #include <linux/kthread.h>
29 #include <linux/delay.h>
32 #include "scst_priv.h"
34 static void scst_cmd_set_sn(struct scst_cmd *cmd);
35 static int __scst_init_cmd(struct scst_cmd *cmd);
36 static void scst_finish_cmd_mgmt(struct scst_cmd *cmd);
38 static inline void scst_schedule_tasklet(struct scst_cmd *cmd)
40 struct scst_tasklet *t = &scst_tasklets[smp_processor_id()];
43 spin_lock_irqsave(&t->tasklet_lock, flags);
44 TRACE_DBG("Adding cmd %p to tasklet %d cmd list", cmd,
46 list_add_tail(&cmd->cmd_list_entry, &t->tasklet_cmd_list);
47 spin_unlock_irqrestore(&t->tasklet_lock, flags);
49 tasklet_schedule(&t->tasklet);
53 * Must not be called in parallel with scst_unregister_session_ex() for the
56 struct scst_cmd *scst_rx_cmd(struct scst_session *sess,
57 const uint8_t *lun, int lun_len,
58 const uint8_t *cdb, int cdb_len, int atomic)
65 if (unlikely(sess->shut_phase != SCST_SESS_SPH_READY)) {
66 PRINT_CRIT_ERROR("%s", "New cmd while shutting down the session");
71 cmd = scst_alloc_cmd(atomic ? GFP_ATOMIC : GFP_KERNEL);
77 cmd->tgtt = sess->tgt->tgtt;
79 cmd->start_time = jiffies;
82 * For both wrong lun and CDB defer the error reporting for
83 * scst_cmd_init_done()
86 cmd->lun = scst_unpack_lun(lun, lun_len);
88 if (cdb_len <= SCST_MAX_CDB_SIZE) {
89 memcpy(cmd->cdb, cdb, cdb_len);
90 cmd->cdb_len = cdb_len;
93 TRACE_DBG("cmd %p, sess %p", cmd, sess);
101 static int scst_init_cmd(struct scst_cmd *cmd, int context)
107 /* See the comment in scst_do_job_init() */
108 if (unlikely(!list_empty(&scst_init_cmd_list))) {
109 TRACE_MGMT_DBG("%s", "init cmd list busy");
113 * Memory barrier isn't necessary here, because CPU appears to
117 rc = __scst_init_cmd(cmd);
118 if (unlikely(rc > 0))
120 else if (unlikely(rc != 0))
123 /* Small context optimization */
124 if (((context == SCST_CONTEXT_TASKLET) ||
125 (context == SCST_CONTEXT_DIRECT_ATOMIC)) &&
126 scst_cmd_is_expected_set(cmd)) {
127 if (cmd->expected_data_direction == SCST_DATA_WRITE) {
128 if (!test_bit(SCST_TGT_DEV_AFTER_INIT_WR_ATOMIC,
129 &cmd->tgt_dev->tgt_dev_flags))
130 context = SCST_CONTEXT_THREAD;
132 if (!test_bit(SCST_TGT_DEV_AFTER_INIT_OTH_ATOMIC,
133 &cmd->tgt_dev->tgt_dev_flags))
134 context = SCST_CONTEXT_THREAD;
139 TRACE_EXIT_RES(context);
143 if (cmd->preprocessing_only) {
145 * Poor man solution for single threaded targets, where
146 * blocking receiver at least sometimes means blocking all.
148 sBUG_ON(context != SCST_CONTEXT_DIRECT);
150 cmd->state = SCST_CMD_STATE_PRE_XMIT_RESP;
151 /* Keep initiator away from too many BUSY commands */
152 if (!in_interrupt() && !in_atomic())
158 spin_lock_irqsave(&scst_init_lock, flags);
159 TRACE_MGMT_DBG("Adding cmd %p to init cmd list (scst_cmd_count "
160 "%d)", cmd, atomic_read(&scst_cmd_count));
161 list_add_tail(&cmd->cmd_list_entry, &scst_init_cmd_list);
162 if (test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))
163 scst_init_poll_cnt++;
164 spin_unlock_irqrestore(&scst_init_lock, flags);
165 wake_up(&scst_init_cmd_list_waitQ);
171 #ifdef MEASURE_LATENCY
172 static inline uint64_t scst_sec_to_nsec(time_t sec)
174 return (uint64_t)sec * 1000000000;
178 void scst_cmd_init_done(struct scst_cmd *cmd, int pref_context)
181 struct scst_session *sess = cmd->sess;
185 #ifdef MEASURE_LATENCY
189 cmd->start = scst_sec_to_nsec(ts.tv_sec) + ts.tv_nsec;
190 TRACE_DBG("cmd %p (sess %p): start %Ld (tv_sec %ld, "
191 "tv_nsec %ld)", cmd, sess, cmd->start, ts.tv_sec,
196 TRACE_DBG("Preferred context: %d (cmd %p)", pref_context, cmd);
197 TRACE(TRACE_SCSI, "tag=%llu, lun=%Ld, CDB len=%d",
198 (long long unsigned int)cmd->tag,
199 (long long unsigned int)cmd->lun,
201 PRINT_BUFF_FLAG(TRACE_SCSI|TRACE_RCV_BOT, "Recieving CDB",
202 cmd->cdb, cmd->cdb_len);
205 if (unlikely(in_irq()) && ((pref_context == SCST_CONTEXT_DIRECT) ||
206 (pref_context == SCST_CONTEXT_DIRECT_ATOMIC))) {
207 PRINT_ERROR("Wrong context %d in IRQ from target %s, use "
208 "SCST_CONTEXT_TASKLET instead\n", pref_context,
210 pref_context = SCST_CONTEXT_TASKLET;
214 atomic_inc(&sess->sess_cmd_count);
216 spin_lock_irqsave(&sess->sess_list_lock, flags);
218 if (unlikely(sess->init_phase != SCST_SESS_IPH_READY)) {
220 * We have to always keep command in the search list from the
221 * very beginning, because otherwise it can be missed during
222 * TM processing. This check is needed because there might be
223 * old, i.e. deferred, commands and new, i.e. just coming, ones.
225 if (cmd->search_cmd_list_entry.next == NULL)
226 list_add_tail(&cmd->search_cmd_list_entry,
227 &sess->search_cmd_list);
228 switch (sess->init_phase) {
229 case SCST_SESS_IPH_SUCCESS:
231 case SCST_SESS_IPH_INITING:
232 TRACE_DBG("Adding cmd %p to init deferred cmd list", cmd);
233 list_add_tail(&cmd->cmd_list_entry,
234 &sess->init_deferred_cmd_list);
235 spin_unlock_irqrestore(&sess->sess_list_lock, flags);
237 case SCST_SESS_IPH_FAILED:
238 spin_unlock_irqrestore(&sess->sess_list_lock, flags);
240 cmd->state = SCST_CMD_STATE_PRE_XMIT_RESP;
246 list_add_tail(&cmd->search_cmd_list_entry, &sess->search_cmd_list);
248 spin_unlock_irqrestore(&sess->sess_list_lock, flags);
250 if (unlikely(cmd->lun == (lun_t)-1)) {
251 PRINT_ERROR("Wrong LUN %d, finishing cmd", -1);
252 scst_set_cmd_error(cmd,
253 SCST_LOAD_SENSE(scst_sense_lun_not_supported));
254 cmd->state = SCST_CMD_STATE_PRE_XMIT_RESP;
258 if (unlikely(cmd->cdb_len == 0)) {
259 PRINT_ERROR("Wrong CDB len %d, finishing cmd", 0);
260 scst_set_cmd_error(cmd,
261 SCST_LOAD_SENSE(scst_sense_invalid_opcode));
262 cmd->state = SCST_CMD_STATE_PRE_XMIT_RESP;
266 if (unlikely(cmd->queue_type >= SCST_CMD_QUEUE_ACA)) {
267 PRINT_ERROR("Unsupported queue type %d", cmd->queue_type);
268 scst_set_cmd_error(cmd,
269 SCST_LOAD_SENSE(scst_sense_invalid_message));
270 cmd->state = SCST_CMD_STATE_PRE_XMIT_RESP;
274 cmd->state = SCST_CMD_STATE_INIT;
275 /* cmd must be inited here to keep the order */
276 pref_context = scst_init_cmd(cmd, pref_context);
277 if (unlikely(pref_context < 0))
281 /* Here cmd must not be in any cmd list, no locks */
282 switch (pref_context) {
283 case SCST_CONTEXT_TASKLET:
284 scst_schedule_tasklet(cmd);
287 case SCST_CONTEXT_DIRECT:
288 case SCST_CONTEXT_DIRECT_ATOMIC:
289 scst_process_active_cmd(cmd, pref_context);
290 /* For *NEED_THREAD wake_up() is already done */
294 PRINT_ERROR("Context %x is undefined, using the thread one",
297 case SCST_CONTEXT_THREAD:
298 spin_lock_irqsave(&cmd->cmd_lists->cmd_list_lock, flags);
299 TRACE_DBG("Adding cmd %p to active cmd list", cmd);
300 if (unlikely(cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE))
301 list_add(&cmd->cmd_list_entry,
302 &cmd->cmd_lists->active_cmd_list);
304 list_add_tail(&cmd->cmd_list_entry,
305 &cmd->cmd_lists->active_cmd_list);
306 wake_up(&cmd->cmd_lists->cmd_list_waitQ);
307 spin_unlock_irqrestore(&cmd->cmd_lists->cmd_list_lock, flags);
316 static int scst_pre_parse(struct scst_cmd *cmd)
318 int res = SCST_CMD_STATE_RES_CONT_SAME;
319 struct scst_device *dev = cmd->dev;
324 cmd->inc_expected_sn_on_done = dev->handler->exec_sync ||
325 (!dev->has_own_order_mgmt &&
326 ((dev->queue_alg == SCST_CONTR_MODE_QUEUE_ALG_RESTRICTED_REORDER) ||
327 (cmd->queue_type == SCST_CMD_QUEUE_ORDERED)));
329 sBUG_ON(cmd->internal);
332 * Expected transfer data supplied by the SCSI transport via the
333 * target driver are untrusted, so we prefer to fetch them from CDB.
334 * Additionally, not all transports support supplying the expected
338 rc = scst_get_cdb_info(cmd);
339 if (unlikely(rc != 0)) {
341 PRINT_BUFFER("Failed CDB", cmd->cdb, cmd->cdb_len);
344 PRINT_ERROR("Unknown opcode 0x%02x for %s. "
345 "Should you update scst_scsi_op_table?",
346 cmd->cdb[0], dev->handler->name);
347 PRINT_BUFFER("Failed CDB", cmd->cdb, cmd->cdb_len);
348 #ifdef USE_EXPECTED_VALUES
349 if (scst_cmd_is_expected_set(cmd)) {
350 TRACE(TRACE_SCSI, "Using initiator supplied values: "
351 "direction %d, transfer_len %d",
352 cmd->expected_data_direction,
353 cmd->expected_transfer_len);
354 cmd->data_direction = cmd->expected_data_direction;
356 cmd->bufflen = cmd->expected_transfer_len;
357 /* Restore (likely) lost CDB length */
358 cmd->cdb_len = scst_get_cdb_len(cmd->cdb);
359 if (cmd->cdb_len == -1) {
360 PRINT_ERROR("Unable to get CDB length for "
361 "opcode 0x%02x. Returning INVALID "
362 "OPCODE", cmd->cdb[0]);
363 scst_set_cmd_error(cmd,
364 SCST_LOAD_SENSE(scst_sense_invalid_opcode));
368 PRINT_ERROR("Unknown opcode 0x%02x for %s and "
369 "target %s not supplied expected values",
370 cmd->cdb[0], dev->handler->name, cmd->tgtt->name);
371 scst_set_cmd_error(cmd,
372 SCST_LOAD_SENSE(scst_sense_invalid_opcode));
376 scst_set_cmd_error(cmd,
377 SCST_LOAD_SENSE(scst_sense_invalid_opcode));
381 TRACE(TRACE_SCSI, "op_name <%s>, direction=%d (expected %d, "
382 "set %s), transfer_len=%d (expected len %d), flags=%d",
383 cmd->op_name, cmd->data_direction,
384 cmd->expected_data_direction,
385 scst_cmd_is_expected_set(cmd) ? "yes" : "no",
386 cmd->bufflen, cmd->expected_transfer_len, cmd->op_flags);
388 if (unlikely((cmd->op_flags & SCST_UNKNOWN_LENGTH) != 0)) {
389 if (scst_cmd_is_expected_set(cmd)) {
391 * Command data length can't be easily
392 * determined from the CDB. ToDo, all such
393 * commands should be fixed. Until they are
394 * fixed, get it from the supplied expected
395 * value, but limit it to some reasonable
398 cmd->bufflen = min(cmd->expected_transfer_len,
400 cmd->op_flags &= ~SCST_UNKNOWN_LENGTH;
406 if (unlikely(cmd->cdb[cmd->cdb_len - 1] & CONTROL_BYTE_NACA_BIT)) {
407 PRINT_ERROR("NACA bit in control byte CDB is not supported "
408 "(opcode 0x%02x)", cmd->cdb[0]);
409 scst_set_cmd_error(cmd,
410 SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
414 if (unlikely(cmd->cdb[cmd->cdb_len - 1] & CONTROL_BYTE_LINK_BIT)) {
415 PRINT_ERROR("Linked commands are not supported "
416 "(opcode 0x%02x)", cmd->cdb[0]);
417 scst_set_cmd_error(cmd,
418 SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
422 cmd->state = SCST_CMD_STATE_DEV_PARSE;
429 cmd->state = SCST_CMD_STATE_PRE_XMIT_RESP;
430 res = SCST_CMD_STATE_RES_CONT_SAME;
434 static int scst_parse_cmd(struct scst_cmd *cmd)
436 int res = SCST_CMD_STATE_RES_CONT_SAME;
438 struct scst_device *dev = cmd->dev;
439 int orig_bufflen = cmd->bufflen;
443 if (likely(!scst_is_cmd_local(cmd))) {
444 TRACE_DBG("Calling dev handler %s parse(%p)",
445 dev->handler->name, cmd);
446 TRACE_BUFF_FLAG(TRACE_SND_BOT, "Parsing: ", cmd->cdb, cmd->cdb_len);
447 state = dev->handler->parse(cmd);
448 /* Caution: cmd can be already dead here */
449 TRACE_DBG("Dev handler %s parse() returned %d",
450 dev->handler->name, state);
453 case SCST_CMD_STATE_NEED_THREAD_CTX:
454 TRACE_DBG("Dev handler %s parse() requested thread "
455 "context, rescheduling", dev->handler->name);
456 res = SCST_CMD_STATE_RES_NEED_THREAD;
459 case SCST_CMD_STATE_STOP:
460 TRACE_DBG("Dev handler %s parse() requested stop "
461 "processing", dev->handler->name);
462 res = SCST_CMD_STATE_RES_CONT_NEXT;
466 if (state == SCST_CMD_STATE_DEFAULT)
467 state = SCST_CMD_STATE_PREPARE_SPACE;
469 state = SCST_CMD_STATE_PREPARE_SPACE;
471 if (cmd->data_len == -1)
472 cmd->data_len = cmd->bufflen;
474 if (cmd->data_buf_alloced && unlikely((orig_bufflen > cmd->bufflen))) {
475 PRINT_ERROR("Dev handler supplied data buffer (size %d), "
476 "is less, than required (size %d)", cmd->bufflen,
478 PRINT_BUFFER("Failed CDB", cmd->cdb, cmd->cdb_len);
482 if (unlikely(state == SCST_CMD_STATE_PRE_XMIT_RESP))
485 if (unlikely((cmd->bufflen == 0) &&
486 (cmd->op_flags & SCST_UNKNOWN_LENGTH))) {
487 PRINT_ERROR("Unknown data transfer length for opcode 0x%x "
488 "(handler %s, target %s)", cmd->cdb[0],
489 dev->handler->name, cmd->tgtt->name);
490 PRINT_BUFFER("Failed CDB", cmd->cdb, cmd->cdb_len);
495 if ((cmd->bufflen != 0) &&
496 ((cmd->data_direction == SCST_DATA_NONE) ||
497 ((cmd->sg == NULL) && (state > SCST_CMD_STATE_PREPARE_SPACE)))) {
498 PRINT_ERROR("Dev handler %s parse() returned "
499 "invalid cmd data_direction %d, bufflen %d, state %d "
500 "or sg %p (opcode 0x%x)", dev->handler->name,
501 cmd->data_direction, cmd->bufflen, state, cmd->sg,
503 PRINT_BUFFER("Failed CDB", cmd->cdb, cmd->cdb_len);
508 if (scst_cmd_is_expected_set(cmd)) {
509 #ifdef USE_EXPECTED_VALUES
511 if ((cmd->data_direction != cmd->expected_data_direction) ||
512 (cmd->bufflen != cmd->expected_transfer_len)) {
513 PRINT_ERROR("Expected values don't match decoded ones: "
514 "data_direction %d, expected_data_direction %d, "
515 "bufflen %d, expected_transfer_len %d",
516 cmd->data_direction, cmd->expected_data_direction,
517 cmd->bufflen, cmd->expected_transfer_len);
518 PRINT_BUFFER("Failed CDB", cmd->cdb, cmd->cdb_len);
521 cmd->data_direction = cmd->expected_data_direction;
522 cmd->bufflen = cmd->expected_transfer_len;
524 if (unlikely(cmd->data_direction != cmd->expected_data_direction)) {
525 if (((cmd->expected_data_direction != SCST_DATA_NONE) ||
526 (cmd->bufflen != 0)) &&
527 /* Crazy VMware people sometimes do TUR with READ direction */
528 !(cmd->cdb[0] == TEST_UNIT_READY)) {
529 PRINT_ERROR("Expected data direction %d for opcode "
530 "0x%02x (handler %s, target %s) doesn't match "
531 "decoded value %d", cmd->expected_data_direction,
532 cmd->cdb[0], dev->handler->name,
533 cmd->tgtt->name, cmd->data_direction);
534 PRINT_BUFFER("Failed CDB", cmd->cdb, cmd->cdb_len);
535 scst_set_cmd_error(cmd,
536 SCST_LOAD_SENSE(scst_sense_invalid_message));
540 if (unlikely(cmd->bufflen != cmd->expected_transfer_len)) {
541 TRACE(TRACE_MINOR, "Warning: expected transfer length "
542 "%d for opcode 0x%02x (handler %s, target %s) "
543 "doesn't match decoded value %d. Faulty "
544 "initiator (e.g. VMware is known to be such) or "
545 "scst_scsi_op_table should be updated?",
546 cmd->expected_transfer_len, cmd->cdb[0],
547 dev->handler->name, cmd->tgtt->name,
549 PRINT_BUFF_FLAG(TRACE_MINOR, "Suspicious CDB", cmd->cdb,
555 if (unlikely(cmd->data_direction == SCST_DATA_UNKNOWN)) {
556 PRINT_ERROR("Unknown data direction. Opcode 0x%x, handler %s, "
557 "target %s", cmd->cdb[0], dev->handler->name,
559 PRINT_BUFFER("Failed CDB", cmd->cdb, cmd->cdb_len);
565 case SCST_CMD_STATE_PREPARE_SPACE:
566 case SCST_CMD_STATE_PRE_PARSE:
567 case SCST_CMD_STATE_DEV_PARSE:
568 case SCST_CMD_STATE_RDY_TO_XFER:
569 case SCST_CMD_STATE_TGT_PRE_EXEC:
570 case SCST_CMD_STATE_SEND_TO_MIDLEV:
571 case SCST_CMD_STATE_PRE_DEV_DONE:
572 case SCST_CMD_STATE_DEV_DONE:
573 case SCST_CMD_STATE_PRE_XMIT_RESP:
574 case SCST_CMD_STATE_XMIT_RESP:
575 case SCST_CMD_STATE_FINISHED:
577 res = SCST_CMD_STATE_RES_CONT_SAME;
582 PRINT_ERROR("Dev handler %s parse() returned "
583 "invalid cmd state %d (opcode %d)",
584 dev->handler->name, state, cmd->cdb[0]);
586 PRINT_ERROR("Dev handler %s parse() returned "
587 "error %d (opcode %d)", dev->handler->name,
593 if (cmd->resp_data_len == -1) {
594 if (cmd->data_direction == SCST_DATA_READ)
595 cmd->resp_data_len = cmd->bufflen;
597 cmd->resp_data_len = 0;
601 TRACE_EXIT_HRES(res);
605 /* dev_done() will be called as part of the regular cmd's finish */
606 scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
608 #ifndef USE_EXPECTED_VALUES
611 cmd->state = SCST_CMD_STATE_PRE_DEV_DONE;
612 res = SCST_CMD_STATE_RES_CONT_SAME;
616 static int scst_prepare_space(struct scst_cmd *cmd)
618 int r = 0, res = SCST_CMD_STATE_RES_CONT_SAME;
622 if (cmd->data_direction == SCST_DATA_NONE)
625 if (cmd->data_buf_tgt_alloc) {
626 int orig_bufflen = cmd->bufflen;
628 TRACE_MEM("%s", "Custom tgt data buf allocation requested");
630 r = cmd->tgtt->alloc_data_buf(cmd);
634 if (unlikely(cmd->bufflen == 0)) {
635 /* See comment in scst_alloc_space() */
639 cmd->data_buf_alloced = 1;
640 if (unlikely(orig_bufflen < cmd->bufflen)) {
641 PRINT_ERROR("Target driver allocated data "
642 "buffer (size %d), is less, than "
643 "required (size %d)", orig_bufflen,
647 TRACE_MEM("%s", "data_buf_alloced, returning");
653 if (!cmd->data_buf_alloced)
654 r = scst_alloc_space(cmd);
656 TRACE_MEM("%s", "data_buf_alloced set, returning");
660 if (scst_cmd_atomic(cmd)) {
661 TRACE_MEM("%s", "Atomic memory allocation failed, "
662 "rescheduling to the thread");
663 res = SCST_CMD_STATE_RES_NEED_THREAD;
670 if (cmd->preprocessing_only) {
671 if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
672 TRACE_MGMT_DBG("ABORTED set, returning ABORTED for "
674 cmd->state = SCST_CMD_STATE_PRE_DEV_DONE;
675 res = SCST_CMD_STATE_RES_CONT_SAME;
679 res = SCST_CMD_STATE_RES_CONT_NEXT;
680 cmd->state = SCST_CMD_STATE_PREPROCESS_DONE;
682 TRACE_DBG("Calling preprocessing_done(cmd %p)", cmd);
683 cmd->tgtt->preprocessing_done(cmd);
684 TRACE_DBG("%s", "preprocessing_done() returned");
689 switch (cmd->data_direction) {
690 case SCST_DATA_WRITE:
691 cmd->state = SCST_CMD_STATE_RDY_TO_XFER;
695 cmd->state = SCST_CMD_STATE_TGT_PRE_EXEC;
700 TRACE_EXIT_HRES(res);
704 TRACE(TRACE_OUT_OF_MEM, "Unable to allocate or build requested buffer "
705 "(size %d), sending BUSY or QUEUE FULL status", cmd->bufflen);
707 cmd->state = SCST_CMD_STATE_PRE_DEV_DONE;
708 res = SCST_CMD_STATE_RES_CONT_SAME;
712 scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
713 cmd->state = SCST_CMD_STATE_PRE_DEV_DONE;
714 res = SCST_CMD_STATE_RES_CONT_SAME;
718 void scst_restart_cmd(struct scst_cmd *cmd, int status, int pref_context)
722 TRACE_DBG("Preferred context: %d", pref_context);
723 TRACE_DBG("tag=%llu, status=%#x",
724 (long long unsigned int)scst_cmd_get_tag(cmd),
728 if (in_irq() && ((pref_context == SCST_CONTEXT_DIRECT) ||
729 (pref_context == SCST_CONTEXT_DIRECT_ATOMIC))) {
730 PRINT_ERROR("Wrong context %d in IRQ from target %s, use "
731 "SCST_CONTEXT_TASKLET instead\n", pref_context,
733 pref_context = SCST_CONTEXT_TASKLET;
738 case SCST_PREPROCESS_STATUS_SUCCESS:
739 switch (cmd->data_direction) {
740 case SCST_DATA_WRITE:
741 cmd->state = SCST_CMD_STATE_RDY_TO_XFER;
744 cmd->state = SCST_CMD_STATE_TGT_PRE_EXEC;
747 if (cmd->set_sn_on_restart_cmd)
748 scst_cmd_set_sn(cmd);
749 /* Small context optimization */
750 if ((pref_context == SCST_CONTEXT_TASKLET) ||
751 (pref_context == SCST_CONTEXT_DIRECT_ATOMIC)) {
752 if (cmd->data_direction == SCST_DATA_WRITE) {
753 if (!test_bit(SCST_TGT_DEV_AFTER_RESTART_WR_ATOMIC,
754 &cmd->tgt_dev->tgt_dev_flags))
755 pref_context = SCST_CONTEXT_THREAD;
757 if (!test_bit(SCST_TGT_DEV_AFTER_RESTART_OTH_ATOMIC,
758 &cmd->tgt_dev->tgt_dev_flags))
759 pref_context = SCST_CONTEXT_THREAD;
764 case SCST_PREPROCESS_STATUS_ERROR_SENSE_SET:
765 cmd->state = SCST_CMD_STATE_PRE_DEV_DONE;
768 case SCST_PREPROCESS_STATUS_ERROR_FATAL:
769 set_bit(SCST_CMD_NO_RESP, &cmd->cmd_flags);
771 case SCST_PREPROCESS_STATUS_ERROR:
772 scst_set_cmd_error(cmd,
773 SCST_LOAD_SENSE(scst_sense_hardw_error));
774 cmd->state = SCST_CMD_STATE_PRE_DEV_DONE;
778 PRINT_ERROR("%s() received unknown status %x", __func__,
780 cmd->state = SCST_CMD_STATE_PRE_DEV_DONE;
784 scst_proccess_redirect_cmd(cmd, pref_context, 1);
791 static int scst_queue_retry_cmd(struct scst_cmd *cmd, int finished_cmds)
793 struct scst_tgt *tgt = cmd->sess->tgt;
799 spin_lock_irqsave(&tgt->tgt_lock, flags);
802 TRACE_RETRY("TGT QUEUE FULL: incrementing retry_cmds %d",
804 if (finished_cmds != atomic_read(&tgt->finished_cmds)) {
805 /* At least one cmd finished, so try again */
807 TRACE_RETRY("Some command(s) finished, direct retry "
808 "(finished_cmds=%d, tgt->finished_cmds=%d, "
809 "retry_cmds=%d)", finished_cmds,
810 atomic_read(&tgt->finished_cmds), tgt->retry_cmds);
815 TRACE_RETRY("Adding cmd %p to retry cmd list", cmd);
816 list_add_tail(&cmd->cmd_list_entry, &tgt->retry_cmd_list);
818 if (!tgt->retry_timer_active) {
819 tgt->retry_timer.expires = jiffies + SCST_TGT_RETRY_TIMEOUT;
820 add_timer(&tgt->retry_timer);
821 tgt->retry_timer_active = 1;
825 spin_unlock_irqrestore(&tgt->tgt_lock, flags);
831 static int scst_rdy_to_xfer(struct scst_cmd *cmd)
837 if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
838 TRACE_MGMT_DBG("ABORTED set, aborting cmd %p", cmd);
842 if (cmd->tgtt->rdy_to_xfer == NULL) {
843 cmd->state = SCST_CMD_STATE_TGT_PRE_EXEC;
844 res = SCST_CMD_STATE_RES_CONT_SAME;
849 int finished_cmds = atomic_read(&cmd->sess->tgt->finished_cmds);
851 res = SCST_CMD_STATE_RES_CONT_NEXT;
852 cmd->state = SCST_CMD_STATE_DATA_WAIT;
854 TRACE_DBG("Calling rdy_to_xfer(%p)", cmd);
856 if (((scst_random() % 100) == 75))
857 rc = SCST_TGT_RES_QUEUE_FULL;
860 rc = cmd->tgtt->rdy_to_xfer(cmd);
861 TRACE_DBG("rdy_to_xfer() returned %d", rc);
863 if (likely(rc == SCST_TGT_RES_SUCCESS))
866 /* Restore the previous state */
867 cmd->state = SCST_CMD_STATE_RDY_TO_XFER;
870 case SCST_TGT_RES_QUEUE_FULL:
871 if (scst_queue_retry_cmd(cmd, finished_cmds) == 0)
876 case SCST_TGT_RES_NEED_THREAD_CTX:
877 TRACE_DBG("Target driver %s "
878 "rdy_to_xfer() requested thread "
879 "context, rescheduling", cmd->tgtt->name);
880 res = SCST_CMD_STATE_RES_NEED_THREAD;
890 TRACE_EXIT_HRES(res);
894 if (rc == SCST_TGT_RES_FATAL_ERROR) {
895 PRINT_ERROR("Target driver %s rdy_to_xfer() returned "
896 "fatal error", cmd->tgtt->name);
898 PRINT_ERROR("Target driver %s rdy_to_xfer() returned invalid "
899 "value %d", cmd->tgtt->name, rc);
901 scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
904 cmd->state = SCST_CMD_STATE_PRE_DEV_DONE;
905 res = SCST_CMD_STATE_RES_CONT_SAME;
909 /* No locks, but might be in IRQ */
910 void scst_proccess_redirect_cmd(struct scst_cmd *cmd, int context,
917 TRACE_DBG("Context: %x", context);
919 switch (context & ~SCST_CONTEXT_PROCESSABLE) {
920 case SCST_CONTEXT_DIRECT_ATOMIC:
921 context &= ~SCST_CONTEXT_PROCESSABLE;
923 case SCST_CONTEXT_DIRECT:
925 scst_check_retries(cmd->tgt);
926 scst_process_active_cmd(cmd, context);
930 PRINT_ERROR("Context %x is unknown, using the thread one",
933 case SCST_CONTEXT_THREAD:
935 scst_check_retries(cmd->tgt);
936 spin_lock_irqsave(&cmd->cmd_lists->cmd_list_lock, flags);
937 TRACE_DBG("Adding cmd %p to active cmd list", cmd);
938 if (unlikely(cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE))
939 list_add(&cmd->cmd_list_entry,
940 &cmd->cmd_lists->active_cmd_list);
942 list_add_tail(&cmd->cmd_list_entry,
943 &cmd->cmd_lists->active_cmd_list);
944 wake_up(&cmd->cmd_lists->cmd_list_waitQ);
945 spin_unlock_irqrestore(&cmd->cmd_lists->cmd_list_lock, flags);
948 case SCST_CONTEXT_TASKLET:
950 scst_check_retries(cmd->tgt);
951 scst_schedule_tasklet(cmd);
959 void scst_rx_data(struct scst_cmd *cmd, int status, int pref_context)
963 TRACE_DBG("Preferred context: %d", pref_context);
964 TRACE(TRACE_SCSI, "tag=%llu status=%#x",
965 (long long unsigned int)scst_cmd_get_tag(cmd),
969 if (in_irq() && ((pref_context == SCST_CONTEXT_DIRECT) ||
970 (pref_context == SCST_CONTEXT_DIRECT_ATOMIC))) {
971 PRINT_ERROR("Wrong context %d in IRQ from target %s, use "
972 "SCST_CONTEXT_TASKLET instead\n", pref_context,
974 pref_context = SCST_CONTEXT_TASKLET;
979 case SCST_RX_STATUS_SUCCESS:
980 cmd->state = SCST_CMD_STATE_TGT_PRE_EXEC;
981 /* Small context optimization */
982 if ((pref_context == SCST_CONTEXT_TASKLET) ||
983 (pref_context == SCST_CONTEXT_DIRECT_ATOMIC)) {
984 if (!test_bit(SCST_TGT_DEV_AFTER_RX_DATA_ATOMIC,
985 &cmd->tgt_dev->tgt_dev_flags))
986 pref_context = SCST_CONTEXT_THREAD;
990 case SCST_RX_STATUS_ERROR_SENSE_SET:
991 cmd->state = SCST_CMD_STATE_PRE_DEV_DONE;
994 case SCST_RX_STATUS_ERROR_FATAL:
995 set_bit(SCST_CMD_NO_RESP, &cmd->cmd_flags);
997 case SCST_RX_STATUS_ERROR:
998 scst_set_cmd_error(cmd,
999 SCST_LOAD_SENSE(scst_sense_hardw_error));
1000 cmd->state = SCST_CMD_STATE_PRE_DEV_DONE;
1004 PRINT_ERROR("scst_rx_data() received unknown status %x",
1006 cmd->state = SCST_CMD_STATE_PRE_DEV_DONE;
1010 scst_proccess_redirect_cmd(cmd, pref_context, 1);
1016 static int scst_tgt_pre_exec(struct scst_cmd *cmd)
1018 int res = SCST_CMD_STATE_RES_CONT_SAME, rc;
1022 cmd->state = SCST_CMD_STATE_SEND_TO_MIDLEV;
1024 if (cmd->tgtt->pre_exec == NULL)
1027 TRACE_DBG("Calling pre_exec(%p)", cmd);
1028 rc = cmd->tgtt->pre_exec(cmd);
1029 TRACE_DBG("pre_exec() returned %d", rc);
1031 if (unlikely(rc != SCST_PREPROCESS_STATUS_SUCCESS)) {
1033 case SCST_PREPROCESS_STATUS_ERROR_SENSE_SET:
1034 cmd->state = SCST_CMD_STATE_PRE_DEV_DONE;
1036 case SCST_PREPROCESS_STATUS_ERROR_FATAL:
1037 set_bit(SCST_CMD_NO_RESP, &cmd->cmd_flags);
1039 case SCST_PREPROCESS_STATUS_ERROR:
1040 scst_set_cmd_error(cmd,
1041 SCST_LOAD_SENSE(scst_sense_hardw_error));
1042 cmd->state = SCST_CMD_STATE_PRE_DEV_DONE;
1044 case SCST_PREPROCESS_STATUS_NEED_THREAD:
1045 TRACE_DBG("Target driver's %s pre_exec() requested "
1046 "thread context, rescheduling", cmd->tgtt->name);
1047 res = SCST_CMD_STATE_RES_NEED_THREAD;
1048 cmd->state = SCST_CMD_STATE_TGT_PRE_EXEC;
1057 TRACE_EXIT_RES(res);
1061 static void scst_do_cmd_done(struct scst_cmd *cmd, int result,
1062 const uint8_t *rq_sense, int rq_sense_len, int resid)
1066 #ifdef MEASURE_LATENCY
1069 getnstimeofday(&ts);
1070 cmd->post_exec_start = scst_sec_to_nsec(ts.tv_sec) + ts.tv_nsec;
1071 TRACE_DBG("cmd %p (sess %p): post_exec_start %Ld (tv_sec %ld, "
1072 "tv_nsec %ld)", cmd, cmd->sess, cmd->post_exec_start, ts.tv_sec,
1077 cmd->status = result & 0xff;
1078 cmd->msg_status = msg_byte(result);
1079 cmd->host_status = host_byte(result);
1080 cmd->driver_status = driver_byte(result);
1081 if (unlikely(resid != 0)) {
1083 if ((resid < 0) || (resid > cmd->resp_data_len)) {
1084 PRINT_ERROR("Wrong resid %d (cmd->resp_data_len=%d, "
1085 "op %x)", resid, cmd->resp_data_len,
1089 scst_set_resp_data_len(cmd, cmd->resp_data_len - resid);
1092 if (cmd->status == SAM_STAT_CHECK_CONDITION)
1093 scst_alloc_set_sense(cmd, scst_is_context_gfp_atomic(),
1094 rq_sense, rq_sense_len);
1096 TRACE(TRACE_SCSI, "result=%x, cmd->status=%x, resid=%d, "
1097 "cmd->msg_status=%x, cmd->host_status=%x, "
1098 "cmd->driver_status=%x", result, cmd->status, resid,
1099 cmd->msg_status, cmd->host_status, cmd->driver_status);
1107 /* For small context optimization */
1108 static inline int scst_optimize_post_exec_context(struct scst_cmd *cmd,
1111 if ((context == SCST_CONTEXT_TASKLET) ||
1112 (context == SCST_CONTEXT_DIRECT_ATOMIC)) {
1113 if (!test_bit(SCST_TGT_DEV_AFTER_EXEC_ATOMIC,
1114 &cmd->tgt_dev->tgt_dev_flags))
1115 context = SCST_CONTEXT_THREAD;
1120 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
1121 static inline struct scst_cmd *scst_get_cmd(struct scsi_cmnd *scsi_cmd,
1122 struct scsi_request **req)
1124 struct scst_cmd *cmd = NULL;
1126 if (scsi_cmd && (*req = scsi_cmd->sc_request))
1127 cmd = (struct scst_cmd *)(*req)->upper_private_data;
1130 PRINT_ERROR("%s", "Request with NULL cmd");
1132 scsi_release_request(*req);
1138 static void scst_cmd_done(struct scsi_cmnd *scsi_cmd)
1140 struct scsi_request *req = NULL;
1141 struct scst_cmd *cmd;
1145 cmd = scst_get_cmd(scsi_cmd, &req);
1149 scst_do_cmd_done(cmd, req->sr_result, req->sr_sense_buffer,
1150 sizeof(req->sr_sense_buffer), scsi_cmd->resid);
1152 /* Clear out request structure */
1154 req->sr_sglist_len = 0;
1155 req->sr_bufflen = 0;
1156 req->sr_buffer = NULL;
1157 req->sr_underflow = 0;
1158 req->sr_request->rq_disk = NULL; /* disown request blk */
1160 scst_release_request(cmd);
1162 cmd->state = SCST_CMD_STATE_PRE_DEV_DONE;
1164 scst_proccess_redirect_cmd(cmd,
1165 scst_optimize_post_exec_context(cmd, scst_get_context()), 0);
1171 #else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18) */
1172 static void scst_cmd_done(void *data, char *sense, int result, int resid)
1174 struct scst_cmd *cmd;
1178 cmd = (struct scst_cmd *)data;
1182 scst_do_cmd_done(cmd, result, sense, SCST_SENSE_BUFFERSIZE, resid);
1184 cmd->state = SCST_CMD_STATE_PRE_DEV_DONE;
1186 scst_proccess_redirect_cmd(cmd,
1187 scst_optimize_post_exec_context(cmd, scst_get_context()), 0);
1193 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18) */
1195 static void scst_cmd_done_local(struct scst_cmd *cmd, int next_state)
1201 #ifdef MEASURE_LATENCY
1204 getnstimeofday(&ts);
1205 cmd->post_exec_start = scst_sec_to_nsec(ts.tv_sec) + ts.tv_nsec;
1206 TRACE_DBG("cmd %p (sess %p): post_exec_start %Ld (tv_sec %ld, "
1207 "tv_nsec %ld)", cmd, cmd->sess, cmd->post_exec_start, ts.tv_sec,
1212 if (next_state == SCST_CMD_STATE_DEFAULT)
1213 next_state = SCST_CMD_STATE_PRE_DEV_DONE;
1216 if (next_state == SCST_CMD_STATE_PRE_DEV_DONE) {
1219 struct scatterlist *sg = cmd->sg;
1220 TRACE_RECV_TOP("Exec'd %d S/G(s) at %p sg[0].page at "
1221 "%p", cmd->sg_cnt, sg, (void *)sg_page(&sg[0]));
1222 for (i = 0; i < cmd->sg_cnt; ++i) {
1223 TRACE_BUFF_FLAG(TRACE_RCV_TOP,
1224 "Exec'd sg", sg_virt(&sg[i]),
1233 if ((next_state != SCST_CMD_STATE_PRE_DEV_DONE) &&
1234 (next_state != SCST_CMD_STATE_PRE_XMIT_RESP) &&
1235 (next_state != SCST_CMD_STATE_FINISHED)) {
1236 PRINT_ERROR("scst_cmd_done_local() received invalid cmd "
1237 "state %d (opcode %d)", next_state, cmd->cdb[0]);
1238 scst_set_cmd_error(cmd,
1239 SCST_LOAD_SENSE(scst_sense_hardw_error));
1240 next_state = SCST_CMD_STATE_PRE_DEV_DONE;
1243 cmd->state = next_state;
1245 context = scst_optimize_post_exec_context(cmd, scst_get_context());
1246 if (cmd->context_processable)
1247 context |= SCST_CONTEXT_PROCESSABLE;
1248 scst_proccess_redirect_cmd(cmd, context, 0);
1254 static int scst_report_luns_local(struct scst_cmd *cmd)
1260 struct scst_tgt_dev *tgt_dev = NULL;
1262 int offs, overflow = 0;
1266 rc = scst_check_local_events(cmd);
1267 if (unlikely(rc != 0))
1271 cmd->msg_status = 0;
1272 cmd->host_status = DID_OK;
1273 cmd->driver_status = 0;
1275 if ((cmd->cdb[2] != 0) && (cmd->cdb[2] != 2)) {
1276 PRINT_ERROR("Unsupported SELECT REPORT value %x in REPORT "
1277 "LUNS command", cmd->cdb[2]);
1281 buffer_size = scst_get_buf_first(cmd, &buffer);
1282 if (unlikely(buffer_size <= 0))
1285 if (buffer_size < 16)
1288 memset(buffer, 0, buffer_size);
1291 /* sess->sess_tgt_dev_list_hash is protected by suspended activity */
1292 for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
1293 struct list_head *sess_tgt_dev_list_head =
1294 &cmd->sess->sess_tgt_dev_list_hash[i];
1295 list_for_each_entry(tgt_dev, sess_tgt_dev_list_head,
1296 sess_tgt_dev_list_entry) {
1298 if (offs >= buffer_size) {
1299 scst_put_buf(cmd, buffer);
1300 buffer_size = scst_get_buf_next(cmd, &buffer);
1301 if (buffer_size > 0) {
1302 memset(buffer, 0, buffer_size);
1309 if ((buffer_size - offs) < 8) {
1310 PRINT_ERROR("Buffer allocated for REPORT "
1311 "LUNS command doesn't allow to fit 8 "
1312 "byte entry (buffer_size=%d)",
1314 goto out_put_hw_err;
1316 buffer[offs] = (tgt_dev->lun >> 8) & 0xff;
1317 buffer[offs+1] = tgt_dev->lun & 0xff;
1325 scst_put_buf(cmd, buffer);
1327 /* Set the response header */
1328 buffer_size = scst_get_buf_first(cmd, &buffer);
1329 if (unlikely(buffer_size <= 0))
1332 buffer[0] = (dev_cnt >> 24) & 0xff;
1333 buffer[1] = (dev_cnt >> 16) & 0xff;
1334 buffer[2] = (dev_cnt >> 8) & 0xff;
1335 buffer[3] = dev_cnt & 0xff;
1336 scst_put_buf(cmd, buffer);
1339 if (dev_cnt < cmd->resp_data_len)
1340 scst_set_resp_data_len(cmd, dev_cnt);
1346 /* Report the result */
1347 scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1350 return SCST_EXEC_COMPLETED;
1353 scst_put_buf(cmd, buffer);
1356 scst_set_cmd_error(cmd,
1357 SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
1361 scst_put_buf(cmd, buffer);
1362 scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
1366 static int scst_pre_select(struct scst_cmd *cmd)
1368 int res = SCST_EXEC_NOT_COMPLETED;
1372 if (scst_cmd_atomic(cmd)) {
1373 res = SCST_EXEC_NEED_THREAD;
1377 if (cmd->local_exec_done)
1380 cmd->local_exec_done = 1;
1382 scst_block_dev_cmd(cmd, 1);
1384 /* Check for local events will be done when cmd will be executed */
1387 TRACE_EXIT_RES(res);
1391 static int scst_reserve_local(struct scst_cmd *cmd)
1393 int res = SCST_EXEC_NOT_COMPLETED, rc;
1394 struct scst_device *dev;
1395 struct scst_tgt_dev *tgt_dev_tmp;
1399 if (scst_cmd_atomic(cmd)) {
1400 res = SCST_EXEC_NEED_THREAD;
1404 if (cmd->local_exec_done)
1407 cmd->local_exec_done = 1;
1409 if ((cmd->cdb[0] == RESERVE_10) && (cmd->cdb[2] & SCST_RES_3RDPTY)) {
1410 PRINT_ERROR("RESERVE_10: 3rdPty RESERVE not implemented "
1411 "(lun=%Ld)", (long long unsigned int)cmd->lun);
1412 scst_set_cmd_error(cmd,
1413 SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
1419 if (dev->tst == SCST_CONTR_MODE_ONE_TASK_SET)
1420 scst_block_dev_cmd(cmd, 1);
1422 rc = scst_check_local_events(cmd);
1423 if (unlikely(rc != 0))
1426 spin_lock_bh(&dev->dev_lock);
1428 if (test_bit(SCST_TGT_DEV_RESERVED, &cmd->tgt_dev->tgt_dev_flags)) {
1429 spin_unlock_bh(&dev->dev_lock);
1430 scst_set_cmd_error_status(cmd, SAM_STAT_RESERVATION_CONFLICT);
1434 list_for_each_entry(tgt_dev_tmp, &dev->dev_tgt_dev_list,
1435 dev_tgt_dev_list_entry) {
1436 if (cmd->tgt_dev != tgt_dev_tmp)
1437 set_bit(SCST_TGT_DEV_RESERVED,
1438 &tgt_dev_tmp->tgt_dev_flags);
1440 dev->dev_reserved = 1;
1442 spin_unlock_bh(&dev->dev_lock);
1445 TRACE_EXIT_RES(res);
1449 /* Report the result */
1450 scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1451 res = SCST_EXEC_COMPLETED;
1455 static int scst_release_local(struct scst_cmd *cmd)
1457 int res = SCST_EXEC_NOT_COMPLETED, rc;
1458 struct scst_tgt_dev *tgt_dev_tmp;
1459 struct scst_device *dev;
1463 if (scst_cmd_atomic(cmd)) {
1464 res = SCST_EXEC_NEED_THREAD;
1468 if (cmd->local_exec_done)
1471 cmd->local_exec_done = 1;
1475 if (dev->tst == SCST_CONTR_MODE_ONE_TASK_SET)
1476 scst_block_dev_cmd(cmd, 1);
1478 rc = scst_check_local_events(cmd);
1479 if (unlikely(rc != 0))
1482 spin_lock_bh(&dev->dev_lock);
1485 * The device could be RELEASED behind us, if RESERVING session
1486 * is closed (see scst_free_tgt_dev()), but this actually doesn't
1487 * matter, so use lock and no retest for DEV_RESERVED bits again
1489 if (test_bit(SCST_TGT_DEV_RESERVED, &cmd->tgt_dev->tgt_dev_flags)) {
1490 res = SCST_EXEC_COMPLETED;
1492 cmd->msg_status = 0;
1493 cmd->host_status = DID_OK;
1494 cmd->driver_status = 0;
1497 list_for_each_entry(tgt_dev_tmp,
1498 &dev->dev_tgt_dev_list,
1499 dev_tgt_dev_list_entry) {
1500 clear_bit(SCST_TGT_DEV_RESERVED,
1501 &tgt_dev_tmp->tgt_dev_flags);
1503 dev->dev_reserved = 0;
1506 spin_unlock_bh(&dev->dev_lock);
1508 if (res == SCST_EXEC_COMPLETED)
1512 TRACE_EXIT_RES(res);
1516 res = SCST_EXEC_COMPLETED;
1517 /* Report the result */
1518 scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1522 /* No locks, no IRQ or IRQ-safe context allowed */
1523 int scst_check_local_events(struct scst_cmd *cmd)
1526 struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
1530 if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
1531 TRACE_MGMT_DBG("ABORTED set, aborting cmd %p", cmd);
1532 goto out_uncomplete;
1535 /* Reserve check before Unit Attention */
1536 if (unlikely(test_bit(SCST_TGT_DEV_RESERVED, &tgt_dev->tgt_dev_flags))) {
1537 if ((cmd->cdb[0] != INQUIRY) && (cmd->cdb[0] != REPORT_LUNS) &&
1538 (cmd->cdb[0] != RELEASE) && (cmd->cdb[0] != RELEASE_10) &&
1539 (cmd->cdb[0] != REPORT_DEVICE_IDENTIFIER) &&
1540 (cmd->cdb[0] != ALLOW_MEDIUM_REMOVAL || (cmd->cdb[4] & 3)) &&
1541 (cmd->cdb[0] != LOG_SENSE) && (cmd->cdb[0] != REQUEST_SENSE)) {
1542 scst_set_cmd_error_status(cmd, SAM_STAT_RESERVATION_CONFLICT);
1547 /* If we had internal bus reset, set the command error unit attention */
1548 if ((cmd->dev->scsi_dev != NULL) &&
1549 unlikely(cmd->dev->scsi_dev->was_reset)) {
1550 if (scst_is_ua_command(cmd)) {
1551 struct scst_device *dev = cmd->dev;
1553 /* Prevent more than 1 cmd to be triggered by was_reset */
1554 spin_lock_bh(&dev->dev_lock);
1555 barrier(); /* to reread was_reset */
1556 if (dev->scsi_dev->was_reset) {
1557 TRACE(TRACE_MGMT, "was_reset is %d", 1);
1558 scst_set_cmd_error(cmd,
1559 SCST_LOAD_SENSE(scst_sense_reset_UA));
1560 /* It looks like it is safe to clear was_reset here */
1561 dev->scsi_dev->was_reset = 0;
1564 spin_unlock_bh(&dev->dev_lock);
1571 if (unlikely(test_bit(SCST_TGT_DEV_UA_PENDING,
1572 &cmd->tgt_dev->tgt_dev_flags))) {
1573 if (scst_is_ua_command(cmd)) {
1574 rc = scst_set_pending_UA(cmd);
1583 TRACE_EXIT_RES(res);
1588 sBUG_ON(!cmd->completed);
1597 * The result of cmd execution, if any, should be reported
1598 * via scst_cmd_done_local()
1600 static int scst_pre_exec(struct scst_cmd *cmd)
1602 int res = SCST_EXEC_NOT_COMPLETED;
1603 struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
1608 * This function can be called several times for the same cmd, so it
1609 * can't change any state in a non-reentrable way or use something
1610 * like local_exec_done!!
1613 /* Check READ_ONLY device status */
1614 if (((tgt_dev->acg_dev->rd_only_flag) || cmd->dev->swp) &&
1615 (cmd->cdb[0] == WRITE_6 || /* ToDo: full list of the modify cmds */
1616 cmd->cdb[0] == WRITE_10 ||
1617 cmd->cdb[0] == WRITE_12 ||
1618 cmd->cdb[0] == WRITE_16 ||
1619 cmd->cdb[0] == WRITE_VERIFY ||
1620 cmd->cdb[0] == WRITE_VERIFY_12 ||
1621 cmd->cdb[0] == WRITE_VERIFY_16 ||
1622 (cmd->dev->handler->type == TYPE_TAPE &&
1623 (cmd->cdb[0] == ERASE || cmd->cdb[0] == WRITE_FILEMARKS)))) {
1624 scst_set_cmd_error(cmd,
1625 SCST_LOAD_SENSE(scst_sense_data_protect));
1630 TRACE_EXIT_RES(res);
1634 res = SCST_EXEC_COMPLETED;
1635 /* Report the result */
1636 scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1641 * The result of cmd execution, if any, should be reported
1642 * via scst_cmd_done_local()
1644 static inline int scst_local_exec(struct scst_cmd *cmd)
1646 int res = SCST_EXEC_NOT_COMPLETED;
1651 * Adding new commands here don't forget to update
1652 * scst_is_cmd_local() in scst.h, if necessary
1655 switch (cmd->cdb[0]) {
1657 case MODE_SELECT_10:
1659 res = scst_pre_select(cmd);
1663 res = scst_reserve_local(cmd);
1667 res = scst_release_local(cmd);
1670 res = scst_report_luns_local(cmd);
1674 TRACE_EXIT_RES(res);
1678 /* cmd must be additionally referenced to not die inside */
1679 static int scst_do_send_to_midlev(struct scst_cmd *cmd)
1681 int rc = SCST_EXEC_NOT_COMPLETED;
1682 struct scst_device *dev = cmd->dev;
1683 struct scst_dev_type *handler = dev->handler;
1687 cmd->sent_to_midlev = 1;
1688 cmd->state = SCST_CMD_STATE_EXECUTING;
1689 cmd->scst_cmd_done = scst_cmd_done_local;
1691 rc = scst_pre_exec(cmd);
1692 if (rc != SCST_EXEC_NOT_COMPLETED) {
1693 if (rc == SCST_EXEC_COMPLETED)
1695 else if (rc == SCST_EXEC_NEED_THREAD)
1701 rc = scst_local_exec(cmd);
1702 if (rc != SCST_EXEC_NOT_COMPLETED) {
1703 if (rc == SCST_EXEC_COMPLETED)
1705 else if (rc == SCST_EXEC_NEED_THREAD)
1711 if (!handler->exec_sync)
1712 cmd->context_processable = 0;
1714 if (handler->exec) {
1715 TRACE_DBG("Calling dev handler %s exec(%p)",
1716 handler->name, cmd);
1717 TRACE_BUFF_FLAG(TRACE_SND_TOP, "Execing: ", cmd->cdb, cmd->cdb_len);
1718 cmd->scst_cmd_done = scst_cmd_done_local;
1719 rc = handler->exec(cmd);
1720 TRACE_DBG("Dev handler %s exec() returned %d",
1722 if (rc == SCST_EXEC_COMPLETED)
1724 else if (rc == SCST_EXEC_NEED_THREAD)
1726 else if (rc != SCST_EXEC_NOT_COMPLETED)
1730 TRACE_DBG("Sending cmd %p to SCSI mid-level", cmd);
1732 if (unlikely(dev->scsi_dev == NULL)) {
1733 PRINT_ERROR("Command for virtual device must be "
1734 "processed by device handler (lun %Ld)!",
1735 (long long unsigned int)cmd->lun);
1739 rc = scst_check_local_events(cmd);
1740 if (unlikely(rc != 0))
1743 #ifndef ALLOW_PASSTHROUGH_IO_SUBMIT_IN_SIRQ
1744 if (scst_cmd_atomic(cmd)) {
1745 TRACE_DBG("Pass-through exec() can not be called in atomic "
1746 "context, rescheduling to the thread (handler %s)",
1748 rc = SCST_EXEC_NEED_THREAD;
1753 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
1754 if (unlikely(scst_alloc_request(cmd) != 0)) {
1755 if (scst_cmd_atomic(cmd)) {
1756 rc = SCST_EXEC_NEED_THREAD;
1759 PRINT_INFO("%s", "Unable to allocate request, "
1760 "sending BUSY status");
1765 scst_do_req(cmd->scsi_req, (void *)cmd->cdb,
1766 (void *)cmd->scsi_req->sr_buffer,
1767 cmd->scsi_req->sr_bufflen, scst_cmd_done, cmd->timeout,
1770 rc = scst_exec_req(dev->scsi_dev, cmd->cdb, cmd->cdb_len,
1771 cmd->data_direction, cmd->sg, cmd->bufflen, cmd->sg_cnt,
1772 cmd->timeout, cmd->retries, cmd, scst_cmd_done,
1773 scst_cmd_atomic(cmd) ? GFP_ATOMIC : GFP_KERNEL);
1774 if (unlikely(rc != 0)) {
1775 if (scst_cmd_atomic(cmd)) {
1776 rc = SCST_EXEC_NEED_THREAD;
1779 PRINT_INFO("scst_exec_req() failed: %d", rc);
1785 rc = SCST_EXEC_COMPLETED;
1792 /* Restore the state */
1793 cmd->sent_to_midlev = 0;
1794 cmd->state = SCST_CMD_STATE_SEND_TO_MIDLEV;
1798 PRINT_ERROR("Dev handler %s exec() or scst_local_exec() returned "
1799 "invalid code %d", handler->name, rc);
1803 scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
1805 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
1812 rc = SCST_EXEC_COMPLETED;
1813 /* Report the result */
1814 scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1819 void scst_inc_expected_sn(struct scst_tgt_dev *tgt_dev, atomic_t *slot)
1824 /* Optimized for lockless fast path */
1826 TRACE_SN("Slot %zd, *cur_sn_slot %d", slot - tgt_dev->sn_slots,
1829 if (!atomic_dec_and_test(slot))
1832 TRACE_SN("Slot is 0 (num_free_sn_slots=%d)",
1833 tgt_dev->num_free_sn_slots);
1834 if (tgt_dev->num_free_sn_slots < (int)ARRAY_SIZE(tgt_dev->sn_slots)-1) {
1835 spin_lock_irq(&tgt_dev->sn_lock);
1836 if (likely(tgt_dev->num_free_sn_slots < (int)ARRAY_SIZE(tgt_dev->sn_slots)-1)) {
1837 if (tgt_dev->num_free_sn_slots < 0)
1838 tgt_dev->cur_sn_slot = slot;
1839 smp_mb(); /* to be in-sync with SIMPLE case in scst_cmd_set_sn() */
1840 tgt_dev->num_free_sn_slots++;
1841 TRACE_SN("Incremented num_free_sn_slots (%d)",
1842 tgt_dev->num_free_sn_slots);
1845 spin_unlock_irq(&tgt_dev->sn_lock);
1850 * No locks is needed, because only one thread at time can
1851 * be here (serialized by sn). Also it is supposed that there
1852 * could not be half-incremented halves.
1854 tgt_dev->expected_sn++;
1855 smp_mb(); /* write must be before def_cmd_count read */
1856 TRACE_SN("Next expected_sn: %ld", tgt_dev->expected_sn);
1862 static int scst_process_internal_cmd(struct scst_cmd *cmd)
1864 int res = SCST_CMD_STATE_RES_CONT_NEXT, rc;
1868 __scst_cmd_get(cmd);
1870 rc = scst_do_send_to_midlev(cmd);
1871 if (rc == SCST_EXEC_NEED_THREAD) {
1872 TRACE_DBG("%s", "scst_do_send_to_midlev() requested "
1873 "thread context, rescheduling");
1874 res = SCST_CMD_STATE_RES_NEED_THREAD;
1876 struct scst_device *dev = cmd->dev;
1877 sBUG_ON(rc != SCST_EXEC_COMPLETED);
1878 if (dev->scsi_dev != NULL)
1879 generic_unplug_device(dev->scsi_dev->request_queue);
1882 __scst_cmd_put(cmd);
1884 TRACE_EXIT_RES(res);
1888 static int scst_send_to_midlev(struct scst_cmd **active_cmd)
1891 struct scst_cmd *cmd = *active_cmd;
1892 struct scst_cmd *ref_cmd;
1893 struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
1894 struct scst_device *dev = cmd->dev;
1895 typeof(tgt_dev->expected_sn) expected_sn;
1900 res = SCST_CMD_STATE_RES_CONT_NEXT;
1902 if (unlikely(cmd->internal || cmd->retry)) {
1903 res = scst_process_internal_cmd(cmd);
1907 #ifdef MEASURE_LATENCY
1908 if (cmd->pre_exec_finish == 0) {
1910 getnstimeofday(&ts);
1911 cmd->pre_exec_finish = scst_sec_to_nsec(ts.tv_sec) + ts.tv_nsec;
1912 TRACE_DBG("cmd %p (sess %p): pre_exec_finish %Ld (tv_sec %ld, "
1913 "tv_nsec %ld)", cmd, sess, cmd->pre_exec_finish, ts.tv_sec,
1918 if (unlikely(scst_inc_on_dev_cmd(cmd) != 0))
1922 __scst_cmd_get(ref_cmd);
1924 if (unlikely(cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE))
1927 sBUG_ON(!cmd->sn_set);
1929 expected_sn = tgt_dev->expected_sn;
1930 /* Optimized for lockless fast path */
1931 if ((cmd->sn != expected_sn) || (tgt_dev->hq_cmd_count > 0)) {
1932 spin_lock_irq(&tgt_dev->sn_lock);
1934 tgt_dev->def_cmd_count++;
1937 expected_sn = tgt_dev->expected_sn;
1938 if ((cmd->sn != expected_sn) || (tgt_dev->hq_cmd_count > 0)) {
1939 /* We are under IRQ lock, but dev->dev_lock is BH one */
1940 int cmd_blocking = scst_pre_dec_on_dev_cmd(cmd);
1941 if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
1942 /* Necessary to allow aborting out of sn cmds */
1943 TRACE_MGMT_DBG("Aborting out of sn cmd %p (tag %llu)",
1945 (long long unsigned)cmd->tag);
1946 tgt_dev->def_cmd_count--;
1947 cmd->state = SCST_CMD_STATE_PRE_DEV_DONE;
1948 res = SCST_CMD_STATE_RES_CONT_SAME;
1950 TRACE_SN("Deferring cmd %p (sn=%ld, set %d, "
1951 "expected_sn=%ld)", cmd, cmd->sn,
1952 cmd->sn_set, expected_sn);
1953 list_add_tail(&cmd->sn_cmd_list_entry,
1954 &tgt_dev->deferred_cmd_list);
1956 spin_unlock_irq(&tgt_dev->sn_lock);
1958 __scst_dec_on_dev_cmd(dev, cmd_blocking);
1962 TRACE_SN("Somebody incremented expected_sn %ld, "
1963 "continuing", expected_sn);
1964 tgt_dev->def_cmd_count--;
1965 spin_unlock_irq(&tgt_dev->sn_lock);
1972 atomic_t *slot = cmd->sn_slot;
1973 /* For HQ commands SN is not set */
1974 int inc_expected_sn = !cmd->inc_expected_sn_on_done &&
1977 rc = scst_do_send_to_midlev(cmd);
1978 if (rc == SCST_EXEC_NEED_THREAD) {
1979 TRACE_DBG("%s", "scst_do_send_to_midlev() requested "
1980 "thread context, rescheduling");
1981 res = SCST_CMD_STATE_RES_NEED_THREAD;
1982 scst_dec_on_dev_cmd(cmd);
1989 sBUG_ON(rc != SCST_EXEC_COMPLETED);
1993 if (inc_expected_sn)
1994 scst_inc_expected_sn(tgt_dev, slot);
1996 cmd = scst_check_deferred_commands(tgt_dev);
2000 if (unlikely(scst_inc_on_dev_cmd(cmd) != 0))
2003 __scst_cmd_put(ref_cmd);
2005 __scst_cmd_get(ref_cmd);
2009 if (dev->scsi_dev != NULL)
2010 generic_unplug_device(dev->scsi_dev->request_queue);
2013 __scst_cmd_put(ref_cmd);
2014 /* !! At this point sess, dev and tgt_dev can be already freed !! */
2017 TRACE_EXIT_HRES(res);
2021 /* No locks supposed to be held */
2022 static int scst_check_sense(struct scst_cmd *cmd)
2025 struct scst_device *dev = cmd->dev;
2026 int dbl_ua_possible, ua_sent = 0;
2030 /* If we had internal bus reset behind us, set the command error UA */
2031 if ((dev->scsi_dev != NULL) &&
2032 unlikely(cmd->host_status == DID_RESET) &&
2033 scst_is_ua_command(cmd)) {
2034 TRACE(TRACE_MGMT, "DID_RESET: was_reset=%d host_status=%x",
2035 dev->scsi_dev->was_reset, cmd->host_status);
2036 scst_set_cmd_error(cmd,
2037 SCST_LOAD_SENSE(scst_sense_reset_UA));
2040 /* It looks like it is safe to clear was_reset here */
2041 dev->scsi_dev->was_reset = 0;
2044 dbl_ua_possible = dev->dev_double_ua_possible;
2045 TRACE_DBG("cmd %p dbl_ua_possible %d", cmd, dbl_ua_possible);
2046 if (unlikely(dbl_ua_possible)) {
2047 spin_lock_bh(&dev->dev_lock);
2048 barrier(); /* to reread dev_double_ua_possible */
2049 dbl_ua_possible = dev->dev_double_ua_possible;
2050 if (dbl_ua_possible)
2051 ua_sent = dev->dev_reset_ua_sent;
2053 spin_unlock_bh(&dev->dev_lock);
2056 if (unlikely(cmd->status == SAM_STAT_CHECK_CONDITION) &&
2057 SCST_SENSE_VALID(cmd->sense)) {
2058 PRINT_BUFF_FLAG(TRACE_SCSI, "Sense", cmd->sense,
2059 SCST_SENSE_BUFFERSIZE);
2060 /* Check Unit Attention Sense Key */
2061 if (scst_is_ua_sense(cmd->sense)) {
2062 if (cmd->sense[12] == SCST_SENSE_ASC_UA_RESET) {
2063 if (dbl_ua_possible) {
2065 TRACE(TRACE_MGMT_MINOR, "%s",
2066 "Double UA detected");
2068 TRACE(TRACE_MGMT_MINOR, "Retrying cmd %p "
2070 (long long unsigned)cmd->tag);
2072 cmd->msg_status = 0;
2073 cmd->host_status = DID_OK;
2074 cmd->driver_status = 0;
2075 mempool_free(cmd->sense, scst_sense_mempool);
2078 cmd->state = SCST_CMD_STATE_SEND_TO_MIDLEV;
2081 * Dev is still blocked by this cmd, so
2082 * it's OK to clear SCST_DEV_SERIALIZED
2085 dev->dev_double_ua_possible = 0;
2086 dev->dev_serialized = 0;
2087 dev->dev_reset_ua_sent = 0;
2090 dev->dev_reset_ua_sent = 1;
2093 if (cmd->ua_ignore == 0) {
2094 if (unlikely(dbl_ua_possible)) {
2095 __scst_dev_check_set_UA(dev, cmd,
2097 SCST_SENSE_BUFFERSIZE);
2099 scst_dev_check_set_UA(dev, cmd,
2101 SCST_SENSE_BUFFERSIZE);
2107 if (unlikely(dbl_ua_possible)) {
2108 if (ua_sent && scst_is_ua_command(cmd)) {
2109 TRACE_MGMT_DBG("%s", "Clearing dbl_ua_possible flag");
2110 dev->dev_double_ua_possible = 0;
2111 dev->dev_serialized = 0;
2112 dev->dev_reset_ua_sent = 0;
2114 spin_unlock_bh(&dev->dev_lock);
2118 TRACE_EXIT_RES(res);
2122 spin_unlock_bh(&dev->dev_lock);
2126 static int scst_check_auto_sense(struct scst_cmd *cmd)
2132 if (unlikely(cmd->status == SAM_STAT_CHECK_CONDITION) &&
2133 (!SCST_SENSE_VALID(cmd->sense) ||
2134 SCST_NO_SENSE(cmd->sense))) {
2135 TRACE(TRACE_SCSI|TRACE_MINOR, "CHECK_CONDITION, but no sense: "
2136 "cmd->status=%x, cmd->msg_status=%x, "
2137 "cmd->host_status=%x, cmd->driver_status=%x", cmd->status,
2138 cmd->msg_status, cmd->host_status, cmd->driver_status);
2140 } else if (unlikely(cmd->host_status)) {
2141 if ((cmd->host_status == DID_REQUEUE) ||
2142 (cmd->host_status == DID_IMM_RETRY) ||
2143 (cmd->host_status == DID_SOFT_ERROR) ||
2144 (cmd->host_status == DID_ABORT)) {
2147 TRACE(TRACE_SCSI|TRACE_MINOR, "Host status %x "
2148 "received, returning HARDWARE ERROR instead",
2150 scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
2154 TRACE_EXIT_RES(res);
2158 static int scst_done_cmd_check(struct scst_cmd *cmd, int *pres)
2164 if (unlikely(cmd->cdb[0] == REQUEST_SENSE)) {
2166 cmd = scst_complete_request_sense(cmd);
2167 } else if (unlikely(scst_check_auto_sense(cmd))) {
2168 PRINT_INFO("Command finished with CHECK CONDITION, but "
2169 "without sense data (opcode 0x%x), issuing "
2170 "REQUEST SENSE", cmd->cdb[0]);
2171 rc = scst_prepare_request_sense(cmd);
2177 PRINT_ERROR("%s", "Unable to issue REQUEST SENSE, "
2178 "returning HARDWARE ERROR");
2179 scst_set_cmd_error(cmd,
2180 SCST_LOAD_SENSE(scst_sense_hardw_error));
2182 } else if (unlikely(scst_check_sense(cmd))) {
2183 *pres = SCST_CMD_STATE_RES_CONT_SAME;
2188 if (likely(scsi_status_is_good(cmd->status))) {
2189 unsigned char type = cmd->dev->handler->type;
2190 if (unlikely((cmd->cdb[0] == MODE_SENSE ||
2191 cmd->cdb[0] == MODE_SENSE_10)) &&
2192 cmd->tgt_dev->acg_dev->rd_only_flag &&
2193 (type == TYPE_DISK || type == TYPE_WORM || type == TYPE_MOD ||
2194 type == TYPE_TAPE)) {
2198 length = scst_get_buf_first(cmd, &address);
2201 if (length > 2 && cmd->cdb[0] == MODE_SENSE)
2202 address[2] |= 0x80; /* Write Protect*/
2203 else if (length > 3 && cmd->cdb[0] == MODE_SENSE_10)
2204 address[3] |= 0x80; /* Write Protect*/
2205 scst_put_buf(cmd, address);
2209 * Check and clear NormACA option for the device, if necessary,
2210 * since we don't support ACA
2212 if (unlikely((cmd->cdb[0] == INQUIRY)) &&
2213 !(cmd->cdb[1] & SCST_INQ_EVPD/* Std INQUIRY data (no EVPD) */) &&
2214 (cmd->resp_data_len > SCST_INQ_BYTE3)) {
2218 /* ToDo: all pages ?? */
2219 buflen = scst_get_buf_first(cmd, &buffer);
2220 if (buflen > SCST_INQ_BYTE3) {
2222 if (buffer[SCST_INQ_BYTE3] & SCST_INQ_NORMACA_BIT) {
2223 PRINT_INFO("NormACA set for device: "
2224 "lun=%Ld, type 0x%02x",
2225 (long long unsigned int)cmd->lun,
2229 buffer[SCST_INQ_BYTE3] &= ~SCST_INQ_NORMACA_BIT;
2231 PRINT_ERROR("%s", "Unable to get INQUIRY "
2233 scst_set_cmd_error(cmd,
2234 SCST_LOAD_SENSE(scst_sense_hardw_error));
2237 scst_put_buf(cmd, buffer);
2240 if (unlikely((cmd->cdb[0] == MODE_SELECT) ||
2241 (cmd->cdb[0] == MODE_SELECT_10) ||
2242 (cmd->cdb[0] == LOG_SELECT))) {
2243 TRACE(TRACE_SCSI, "MODE/LOG SELECT succeeded (LUN %Ld)",
2244 (long long unsigned int)cmd->lun);
2245 cmd->state = SCST_CMD_STATE_MODE_SELECT_CHECKS;
2246 *pres = SCST_CMD_STATE_RES_CONT_SAME;
2251 if ((cmd->cdb[0] == RESERVE) || (cmd->cdb[0] == RESERVE_10)) {
2252 if (!test_bit(SCST_TGT_DEV_RESERVED,
2253 &cmd->tgt_dev->tgt_dev_flags)) {
2254 struct scst_tgt_dev *tgt_dev_tmp;
2255 struct scst_device *dev = cmd->dev;
2257 TRACE(TRACE_SCSI, "Real RESERVE failed lun=%Ld, "
2259 (long long unsigned int)cmd->lun,
2261 PRINT_BUFF_FLAG(TRACE_SCSI, "Sense", cmd->sense,
2262 SCST_SENSE_BUFFERSIZE);
2264 /* Clearing the reservation */
2265 spin_lock_bh(&dev->dev_lock);
2266 list_for_each_entry(tgt_dev_tmp, &dev->dev_tgt_dev_list,
2267 dev_tgt_dev_list_entry) {
2268 clear_bit(SCST_TGT_DEV_RESERVED,
2269 &tgt_dev_tmp->tgt_dev_flags);
2271 dev->dev_reserved = 0;
2272 spin_unlock_bh(&dev->dev_lock);
2276 /* Check for MODE PARAMETERS CHANGED UA */
2277 if ((cmd->dev->scsi_dev != NULL) &&
2278 (cmd->status == SAM_STAT_CHECK_CONDITION) &&
2279 SCST_SENSE_VALID(cmd->sense) &&
2280 scst_is_ua_sense(cmd->sense) &&
2281 (cmd->sense[12] == 0x2a) && (cmd->sense[13] == 0x01)) {
2283 "MODE PARAMETERS CHANGED UA (lun %Ld)",
2284 (long long unsigned int)cmd->lun);
2285 cmd->state = SCST_CMD_STATE_MODE_SELECT_CHECKS;
2286 *pres = SCST_CMD_STATE_RES_CONT_SAME;
2293 TRACE_EXIT_RES(res);
2297 static int scst_pre_dev_done(struct scst_cmd *cmd)
2299 int res = SCST_CMD_STATE_RES_CONT_SAME, rc;
2303 rc = scst_done_cmd_check(cmd, &res);
2307 cmd->state = SCST_CMD_STATE_DEV_DONE;
2310 TRACE_EXIT_HRES(res);
2314 static int scst_mode_select_checks(struct scst_cmd *cmd)
2316 int res = SCST_CMD_STATE_RES_CONT_SAME;
2317 int atomic = scst_cmd_atomic(cmd);
2321 if (likely(scsi_status_is_good(cmd->status))) {
2322 if (unlikely((cmd->cdb[0] == MODE_SELECT) ||
2323 (cmd->cdb[0] == MODE_SELECT_10) ||
2324 (cmd->cdb[0] == LOG_SELECT))) {
2325 struct scst_device *dev = cmd->dev;
2326 if (atomic && (dev->scsi_dev != NULL)) {
2327 TRACE_DBG("%s", "MODE/LOG SELECT: thread "
2328 "context required");
2329 res = SCST_CMD_STATE_RES_NEED_THREAD;
2333 TRACE(TRACE_SCSI, "MODE/LOG SELECT succeeded, "
2334 "setting the SELECT UA (lun=%Ld)",
2335 (long long unsigned int)cmd->lun);
2337 spin_lock_bh(&dev->dev_lock);
2338 spin_lock(&scst_temp_UA_lock);
2339 if (cmd->cdb[0] == LOG_SELECT) {
2340 scst_set_sense(scst_temp_UA,
2341 sizeof(scst_temp_UA),
2342 UNIT_ATTENTION, 0x2a, 0x02);
2344 scst_set_sense(scst_temp_UA,
2345 sizeof(scst_temp_UA),
2346 UNIT_ATTENTION, 0x2a, 0x01);
2348 scst_dev_check_set_local_UA(dev, cmd, scst_temp_UA,
2349 sizeof(scst_temp_UA));
2350 spin_unlock(&scst_temp_UA_lock);
2351 spin_unlock_bh(&dev->dev_lock);
2353 if (dev->scsi_dev != NULL)
2354 scst_obtain_device_parameters(dev);
2356 } else if ((cmd->status == SAM_STAT_CHECK_CONDITION) &&
2357 SCST_SENSE_VALID(cmd->sense) &&
2358 scst_is_ua_sense(cmd->sense) &&
2359 (((cmd->sense[12] == 0x2a) && (cmd->sense[13] == 0x01)) ||
2360 (cmd->sense[12] == 0x29) /* reset */ ||
2361 (cmd->sense[12] == 0x28) /* medium changed */ ||
2362 (cmd->sense[12] == 0x2F) /* cleared by another ini (just in case) */)) {
2364 TRACE_DBG("Possible parameters changed UA %x: "
2365 "thread context required", cmd->sense[12]);
2366 res = SCST_CMD_STATE_RES_NEED_THREAD;
2370 TRACE(TRACE_SCSI, "Possible parameters changed UA %x "
2371 "(lun %Ld): getting new parameters", cmd->sense[12],
2372 (long long unsigned int)cmd->lun);
2374 scst_obtain_device_parameters(cmd->dev);
2378 cmd->state = SCST_CMD_STATE_DEV_DONE;
2381 TRACE_EXIT_HRES(res);
2385 static void scst_inc_check_expected_sn(struct scst_cmd *cmd)
2387 if (likely(cmd->sn_set))
2388 scst_inc_expected_sn(cmd->tgt_dev, cmd->sn_slot);
2390 scst_make_deferred_commands_active(cmd->tgt_dev, cmd);
2393 static int scst_dev_done(struct scst_cmd *cmd)
2395 int res = SCST_CMD_STATE_RES_CONT_SAME;
2400 state = SCST_CMD_STATE_PRE_XMIT_RESP;
2401 if (likely(!scst_is_cmd_local(cmd)) &&
2402 likely(cmd->dev->handler->dev_done != NULL)) {
2404 TRACE_DBG("Calling dev handler %s dev_done(%p)",
2405 cmd->dev->handler->name, cmd);
2406 rc = cmd->dev->handler->dev_done(cmd);
2407 TRACE_DBG("Dev handler %s dev_done() returned %d",
2408 cmd->dev->handler->name, rc);
2409 if (rc != SCST_CMD_STATE_DEFAULT)
2414 case SCST_CMD_STATE_PRE_XMIT_RESP:
2415 case SCST_CMD_STATE_DEV_PARSE:
2416 case SCST_CMD_STATE_PRE_PARSE:
2417 case SCST_CMD_STATE_PREPARE_SPACE:
2418 case SCST_CMD_STATE_RDY_TO_XFER:
2419 case SCST_CMD_STATE_TGT_PRE_EXEC:
2420 case SCST_CMD_STATE_SEND_TO_MIDLEV:
2421 case SCST_CMD_STATE_PRE_DEV_DONE:
2422 case SCST_CMD_STATE_MODE_SELECT_CHECKS:
2423 case SCST_CMD_STATE_DEV_DONE:
2424 case SCST_CMD_STATE_XMIT_RESP:
2425 case SCST_CMD_STATE_FINISHED:
2427 res = SCST_CMD_STATE_RES_CONT_SAME;
2430 case SCST_CMD_STATE_NEED_THREAD_CTX:
2431 TRACE_DBG("Dev handler %s dev_done() requested "
2432 "thread context, rescheduling",
2433 cmd->dev->handler->name);
2434 res = SCST_CMD_STATE_RES_NEED_THREAD;
2439 PRINT_ERROR("Dev handler %s dev_done() returned "
2440 "invalid cmd state %d",
2441 cmd->dev->handler->name, state);
2443 PRINT_ERROR("Dev handler %s dev_done() returned "
2444 "error %d", cmd->dev->handler->name,
2447 scst_set_cmd_error(cmd,
2448 SCST_LOAD_SENSE(scst_sense_hardw_error));
2449 cmd->state = SCST_CMD_STATE_PRE_XMIT_RESP;
2450 res = SCST_CMD_STATE_RES_CONT_SAME;
2454 if (cmd->needs_unblocking)
2455 scst_unblock_dev_cmd(cmd);
2457 if (likely(cmd->dec_on_dev_needed))
2458 scst_dec_on_dev_cmd(cmd);
2460 if (cmd->inc_expected_sn_on_done && cmd->sent_to_midlev)
2461 scst_inc_check_expected_sn(cmd);
2463 TRACE_EXIT_HRES(res);
2467 static int scst_pre_xmit_response(struct scst_cmd *cmd)
2474 if (cmd->tm_dbg_delayed && !test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)) {
2475 if (scst_cmd_atomic(cmd)) {
2476 TRACE_MGMT_DBG("%s", "DEBUG_TM delayed cmd needs a thread");
2477 res = SCST_CMD_STATE_RES_NEED_THREAD;
2480 TRACE_MGMT_DBG("Delaying cmd %p (tag %llu) for 1 second",
2482 schedule_timeout_uninterruptible(HZ);
2486 if (likely(cmd->tgt_dev != NULL)) {
2487 atomic_dec(&cmd->tgt_dev->tgt_dev_cmd_count);
2488 atomic_dec(&cmd->dev->dev_cmd_count);
2489 /* If expected values not set, expected direction is UNKNOWN */
2490 if (cmd->expected_data_direction == SCST_DATA_WRITE)
2491 atomic_dec(&cmd->dev->write_cmd_count);
2493 if (unlikely(cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE))
2494 scst_on_hq_cmd_response(cmd);
2496 if (unlikely(!cmd->sent_to_midlev)) {
2497 TRACE_SN("cmd %p was not sent to mid-lev (sn %ld, set %d)",
2498 cmd, cmd->sn, cmd->sn_set);
2499 scst_unblock_deferred(cmd->tgt_dev, cmd);
2500 cmd->sent_to_midlev = 1;
2505 * If we don't remove cmd from the search list here, before
2506 * submitting it for transmittion, we will have a race, when for
2507 * some reason cmd's release is delayed after transmittion and
2508 * initiator sends cmd with the same tag => it is possible that
2509 * a wrong cmd will be found by find() functions.
2511 spin_lock_irq(&cmd->sess->sess_list_lock);
2512 list_del(&cmd->search_cmd_list_entry);
2513 spin_unlock_irq(&cmd->sess->sess_list_lock);
2516 smp_mb(); /* to sync with scst_abort_cmd() */
2518 if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)))
2519 scst_xmit_process_aborted_cmd(cmd);
2521 if (unlikely(test_bit(SCST_CMD_NO_RESP, &cmd->cmd_flags))) {
2522 TRACE_MGMT_DBG("Flag NO_RESP set for cmd %p (tag %llu), skipping",
2523 cmd, (long long unsigned int)cmd->tag);
2524 cmd->state = SCST_CMD_STATE_FINISHED;
2525 res = SCST_CMD_STATE_RES_CONT_SAME;
2529 cmd->state = SCST_CMD_STATE_XMIT_RESP;
2530 res = SCST_CMD_STATE_RES_CONT_SAME;
2533 #ifdef MEASURE_LATENCY
2536 uint64_t finish, scst_time, proc_time;
2537 struct scst_session *sess = cmd->sess;
2539 getnstimeofday(&ts);
2540 finish = scst_sec_to_nsec(ts.tv_sec) + ts.tv_nsec;
2542 spin_lock_bh(&sess->meas_lock);
2544 scst_time = cmd->pre_exec_finish - cmd->start;
2545 scst_time += finish - cmd->post_exec_start;
2546 proc_time = finish - cmd->start;
2548 sess->scst_time += scst_time;
2549 sess->processing_time += proc_time;
2550 sess->processed_cmds++;
2552 spin_unlock_bh(&sess->meas_lock);
2554 TRACE_DBG("cmd %p (sess %p): finish %Ld (tv_sec %ld, "
2555 "tv_nsec %ld), scst_time %Ld, proc_time %Ld", cmd, sess,
2556 finish, ts.tv_sec, ts.tv_nsec, scst_time, proc_time);
2559 TRACE_EXIT_HRES(res);
2563 static int scst_xmit_response(struct scst_cmd *cmd)
2570 int finished_cmds = atomic_read(&cmd->sess->tgt->finished_cmds);
2572 res = SCST_CMD_STATE_RES_CONT_NEXT;
2573 cmd->state = SCST_CMD_STATE_XMIT_WAIT;
2575 TRACE_DBG("Calling xmit_response(%p)", cmd);
2580 struct scatterlist *sg = cmd->sg;
2581 TRACE_SEND_BOT("Xmitting %d S/G(s) at %p sg[0].page at %p",
2582 cmd->sg_cnt, sg, (void *)sg_page(&sg[0]));
2583 for (i = 0; i < cmd->sg_cnt; ++i) {
2584 TRACE_BUFF_FLAG(TRACE_SND_BOT,
2585 "Xmitting sg", sg_virt(&sg[i]),
2592 if (((scst_random() % 100) == 77))
2593 rc = SCST_TGT_RES_QUEUE_FULL;
2596 rc = cmd->tgtt->xmit_response(cmd);
2597 TRACE_DBG("xmit_response() returned %d", rc);
2599 if (likely(rc == SCST_TGT_RES_SUCCESS))
2602 /* Restore the previous state */
2603 cmd->state = SCST_CMD_STATE_XMIT_RESP;
2606 case SCST_TGT_RES_QUEUE_FULL:
2607 if (scst_queue_retry_cmd(cmd, finished_cmds) == 0)
2612 case SCST_TGT_RES_NEED_THREAD_CTX:
2613 TRACE_DBG("Target driver %s xmit_response() "
2614 "requested thread context, rescheduling",
2616 res = SCST_CMD_STATE_RES_NEED_THREAD;
2626 /* Caution: cmd can be already dead here */
2627 TRACE_EXIT_HRES(res);
2631 if (rc == SCST_TGT_RES_FATAL_ERROR) {
2632 PRINT_ERROR("Target driver %s xmit_response() returned "
2633 "fatal error", cmd->tgtt->name);
2635 PRINT_ERROR("Target driver %s xmit_response() returned "
2636 "invalid value %d", cmd->tgtt->name, rc);
2638 scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
2639 cmd->state = SCST_CMD_STATE_FINISHED;
2640 res = SCST_CMD_STATE_RES_CONT_SAME;
2644 void scst_tgt_cmd_done(struct scst_cmd *cmd)
2648 sBUG_ON(cmd->state != SCST_CMD_STATE_XMIT_WAIT);
2650 cmd->state = SCST_CMD_STATE_FINISHED;
2651 scst_proccess_redirect_cmd(cmd, scst_get_context(), 1);
2657 static int scst_finish_cmd(struct scst_cmd *cmd)
2663 atomic_dec(&cmd->sess->sess_cmd_count);
2666 smp_mb(); /* to sync with scst_abort_cmd() */
2668 if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
2669 TRACE_MGMT_DBG("Aborted cmd %p finished (cmd_ref %d, "
2670 "scst_cmd_count %d)", cmd, atomic_read(&cmd->cmd_ref),
2671 atomic_read(&scst_cmd_count));
2673 scst_finish_cmd_mgmt(cmd);
2676 if (unlikely(cmd->delivery_status != SCST_CMD_DELIVERY_SUCCESS)) {
2677 if ((cmd->tgt_dev != NULL) &&
2678 scst_is_ua_sense(cmd->sense)) {
2679 /* This UA delivery failed, so requeue it */
2680 TRACE_MGMT_DBG("Requeuing UA for delivery failed cmd "
2682 scst_check_set_UA(cmd->tgt_dev, cmd->sense,
2683 SCST_SENSE_BUFFERSIZE, 1);
2687 __scst_cmd_put(cmd);
2689 res = SCST_CMD_STATE_RES_CONT_NEXT;
2691 TRACE_EXIT_HRES(res);
2696 * No locks, but it must be externally serialized (see comment for
2697 * scst_cmd_init_done() in scst.h)
2699 static void scst_cmd_set_sn(struct scst_cmd *cmd)
2701 struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
2702 unsigned long flags;
2706 if (scst_is_implicit_hq(cmd)) {
2707 TRACE(TRACE_SCSI|TRACE_SCSI_SERIALIZING, "Implicit HQ cmd %p", cmd);
2708 cmd->queue_type = SCST_CMD_QUEUE_HEAD_OF_QUEUE;
2711 EXTRACHECKS_BUG_ON(cmd->sn_set || cmd->hq_cmd_inced);
2713 /* Optimized for lockless fast path */
2715 scst_check_debug_sn(cmd);
2717 if (cmd->dev->queue_alg == SCST_CONTR_MODE_QUEUE_ALG_RESTRICTED_REORDER) {
2719 * Not the best way, but well enough until there will be a
2720 * possibility to specify queue type during pass-through
2721 * commands submission.
2723 cmd->queue_type = SCST_CMD_QUEUE_ORDERED;
2726 switch (cmd->queue_type) {
2727 case SCST_CMD_QUEUE_SIMPLE:
2728 case SCST_CMD_QUEUE_UNTAGGED:
2729 #if 1 /* temporary, ToDo */
2730 if (scst_cmd_is_expected_set(cmd)) {
2731 if ((cmd->expected_data_direction == SCST_DATA_READ) &&
2732 (atomic_read(&cmd->dev->write_cmd_count) == 0))
2737 if (likely(tgt_dev->num_free_sn_slots >= 0)) {
2739 * atomic_inc_return() implies memory barrier to sync
2740 * with scst_inc_expected_sn()
2742 if (atomic_inc_return(tgt_dev->cur_sn_slot) == 1) {
2744 TRACE_SN("Incremented curr_sn %ld",
2747 cmd->sn_slot = tgt_dev->cur_sn_slot;
2748 cmd->sn = tgt_dev->curr_sn;
2750 tgt_dev->prev_cmd_ordered = 0;
2752 TRACE(TRACE_MINOR, "***WARNING*** Not enough SN slots "
2753 "%zd", ARRAY_SIZE(tgt_dev->sn_slots));
2758 case SCST_CMD_QUEUE_ORDERED:
2759 TRACE(TRACE_SCSI|TRACE_SCSI_SERIALIZING, "ORDERED cmd %p "
2760 "(op %x)", cmd, cmd->cdb[0]);
2762 if (!tgt_dev->prev_cmd_ordered) {
2763 spin_lock_irqsave(&tgt_dev->sn_lock, flags);
2764 if (tgt_dev->num_free_sn_slots >= 0) {
2765 tgt_dev->num_free_sn_slots--;
2766 if (tgt_dev->num_free_sn_slots >= 0) {
2769 * Commands can finish in any order, so we don't
2770 * know, which slot is empty.
2773 tgt_dev->cur_sn_slot++;
2774 if (tgt_dev->cur_sn_slot == tgt_dev->sn_slots +
2775 ARRAY_SIZE(tgt_dev->sn_slots))
2776 tgt_dev->cur_sn_slot = tgt_dev->sn_slots;
2778 if (atomic_read(tgt_dev->cur_sn_slot) == 0)
2782 sBUG_ON(i == ARRAY_SIZE(tgt_dev->sn_slots));
2784 TRACE_SN("New cur SN slot %zd",
2785 tgt_dev->cur_sn_slot-tgt_dev->sn_slots);
2788 spin_unlock_irqrestore(&tgt_dev->sn_lock, flags);
2790 tgt_dev->prev_cmd_ordered = 1;
2792 cmd->sn = tgt_dev->curr_sn;
2795 case SCST_CMD_QUEUE_HEAD_OF_QUEUE:
2796 TRACE(TRACE_SCSI|TRACE_SCSI_SERIALIZING, "HQ cmd %p "
2797 "(op %x)", cmd, cmd->cdb[0]);
2798 spin_lock_irqsave(&tgt_dev->sn_lock, flags);
2799 tgt_dev->hq_cmd_count++;
2800 spin_unlock_irqrestore(&tgt_dev->sn_lock, flags);
2801 cmd->hq_cmd_inced = 1;
2808 TRACE_SN("cmd(%p)->sn: %ld (tgt_dev %p, *cur_sn_slot %d, "
2809 "num_free_sn_slots %d, prev_cmd_ordered %ld, "
2810 "cur_sn_slot %zd)", cmd, cmd->sn, tgt_dev,
2811 atomic_read(tgt_dev->cur_sn_slot),
2812 tgt_dev->num_free_sn_slots, tgt_dev->prev_cmd_ordered,
2813 tgt_dev->cur_sn_slot-tgt_dev->sn_slots);
2823 * Returns 0 on success, > 0 when we need to wait for unblock,
2824 * < 0 if there is no device (lun) or device type handler.
2826 * No locks, but might be on IRQ, protection is done by the
2827 * suspended activity.
2829 static int scst_translate_lun(struct scst_cmd *cmd)
2831 struct scst_tgt_dev *tgt_dev = NULL;
2838 if (likely(!test_bit(SCST_FLAG_SUSPENDED, &scst_flags))) {
2839 struct list_head *sess_tgt_dev_list_head =
2840 &cmd->sess->sess_tgt_dev_list_hash[HASH_VAL(cmd->lun)];
2841 TRACE_DBG("Finding tgt_dev for cmd %p (lun %Ld)", cmd,
2842 (long long unsigned int)cmd->lun);
2844 list_for_each_entry(tgt_dev, sess_tgt_dev_list_head,
2845 sess_tgt_dev_list_entry) {
2846 if (tgt_dev->lun == cmd->lun) {
2847 TRACE_DBG("tgt_dev %p found", tgt_dev);
2849 if (unlikely(tgt_dev->dev->handler == &scst_null_devtype)) {
2850 PRINT_INFO("Dev handler for device "
2851 "%Ld is NULL, the device will not be "
2853 (long long unsigned int)cmd->lun);
2857 cmd->cmd_lists = tgt_dev->dev->p_cmd_lists;
2858 cmd->tgt_dev = tgt_dev;
2859 cmd->dev = tgt_dev->dev;
2866 TRACE(TRACE_MINOR, "tgt_dev for lun %Ld not found, command to "
2867 "unexisting LU?", (long long unsigned int)cmd->lun);
2871 TRACE_MGMT_DBG("%s", "FLAG SUSPENDED set, skipping");
2876 TRACE_EXIT_RES(res);
2881 * No locks, but might be on IRQ
2883 * Returns 0 on success, > 0 when we need to wait for unblock,
2884 * < 0 if there is no device (lun) or device type handler.
2886 static int __scst_init_cmd(struct scst_cmd *cmd)
2892 res = scst_translate_lun(cmd);
2893 if (likely(res == 0)) {
2895 bool failure = false;
2897 cmd->state = SCST_CMD_STATE_PRE_PARSE;
2899 cnt = atomic_inc_return(&cmd->tgt_dev->tgt_dev_cmd_count);
2900 if (unlikely(cnt > SCST_MAX_TGT_DEV_COMMANDS)) {
2901 TRACE(TRACE_MGMT_MINOR, "Too many pending commands (%d) in "
2902 "session, returning BUSY to initiator \"%s\"",
2903 cnt, (cmd->sess->initiator_name[0] == '\0') ?
2904 "Anonymous" : cmd->sess->initiator_name);
2908 cnt = atomic_inc_return(&cmd->dev->dev_cmd_count);
2909 if (unlikely(cnt > SCST_MAX_DEV_COMMANDS)) {
2911 TRACE(TRACE_MGMT_MINOR, "Too many pending device "
2912 "commands (%d), returning BUSY to "
2913 "initiator \"%s\"", cnt,
2914 (cmd->sess->initiator_name[0] == '\0') ?
2915 "Anonymous" : cmd->sess->initiator_name);
2920 /* If expected values not set, expected direction is UNKNOWN */
2921 if (cmd->expected_data_direction == SCST_DATA_WRITE)
2922 atomic_inc(&cmd->dev->write_cmd_count);
2924 if (unlikely(failure))
2927 if (!cmd->set_sn_on_restart_cmd)
2928 scst_cmd_set_sn(cmd);
2929 } else if (res < 0) {
2930 TRACE_DBG("Finishing cmd %p", cmd);
2931 scst_set_cmd_error(cmd,
2932 SCST_LOAD_SENSE(scst_sense_lun_not_supported));
2933 cmd->state = SCST_CMD_STATE_PRE_XMIT_RESP;
2938 TRACE_EXIT_RES(res);
2943 cmd->state = SCST_CMD_STATE_PRE_XMIT_RESP;
2947 /* Called under scst_init_lock and IRQs disabled */
2948 static void scst_do_job_init(void)
2950 struct scst_cmd *cmd;
2957 * There is no need for read barrier here, because we don't care where
2958 * this check will be done.
2960 susp = test_bit(SCST_FLAG_SUSPENDED, &scst_flags);
2961 if (scst_init_poll_cnt > 0)
2962 scst_init_poll_cnt--;
2964 list_for_each_entry(cmd, &scst_init_cmd_list, cmd_list_entry) {
2966 if (susp && !test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))
2968 if (!test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)) {
2969 spin_unlock_irq(&scst_init_lock);
2970 rc = __scst_init_cmd(cmd);
2971 spin_lock_irq(&scst_init_lock);
2973 TRACE_MGMT_DBG("%s", "FLAG SUSPENDED set, restarting");
2977 TRACE_MGMT_DBG("Aborting not inited cmd %p (tag %llu)",
2978 cmd, (long long unsigned int)cmd->tag);
2979 cmd->state = SCST_CMD_STATE_PRE_XMIT_RESP;
2983 * Deleting cmd from init cmd list after __scst_init_cmd()
2984 * is necessary to keep the check in scst_init_cmd() correct
2985 * to preserve the commands order.
2987 * We don't care about the race, when init cmd list is empty
2988 * and one command detected that it just was not empty, so
2989 * it's inserting to it, but another command at the same time
2990 * seeing init cmd list empty and goes directly, because it
2991 * could affect only commands from the same initiator to the
2992 * same tgt_dev, but init_cmd_done() doesn't guarantee the order
2993 * in case of simultaneous such calls anyway.
2995 TRACE_MGMT_DBG("Deleting cmd %p from init cmd list", cmd);
2997 list_del(&cmd->cmd_list_entry);
2998 spin_unlock(&scst_init_lock);
3000 spin_lock(&cmd->cmd_lists->cmd_list_lock);
3001 TRACE_MGMT_DBG("Adding cmd %p to active cmd list", cmd);
3002 if (unlikely(cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE))
3003 list_add(&cmd->cmd_list_entry,
3004 &cmd->cmd_lists->active_cmd_list);
3006 list_add_tail(&cmd->cmd_list_entry,
3007 &cmd->cmd_lists->active_cmd_list);
3008 wake_up(&cmd->cmd_lists->cmd_list_waitQ);
3009 spin_unlock(&cmd->cmd_lists->cmd_list_lock);
3011 spin_lock(&scst_init_lock);
3015 /* It isn't really needed, but let's keep it */
3016 if (susp != test_bit(SCST_FLAG_SUSPENDED, &scst_flags))
3023 static inline int test_init_cmd_list(void)
3025 int res = (!list_empty(&scst_init_cmd_list) &&
3026 !test_bit(SCST_FLAG_SUSPENDED, &scst_flags)) ||
3027 unlikely(kthread_should_stop()) ||
3028 (scst_init_poll_cnt > 0);
3032 int scst_init_cmd_thread(void *arg)
3036 PRINT_INFO("Init thread started, PID %d", current->pid);
3038 current->flags |= PF_NOFREEZE;
3040 set_user_nice(current, -10);
3042 spin_lock_irq(&scst_init_lock);
3043 while (!kthread_should_stop()) {
3045 init_waitqueue_entry(&wait, current);
3047 if (!test_init_cmd_list()) {
3048 add_wait_queue_exclusive(&scst_init_cmd_list_waitQ,
3051 set_current_state(TASK_INTERRUPTIBLE);
3052 if (test_init_cmd_list())
3054 spin_unlock_irq(&scst_init_lock);
3056 spin_lock_irq(&scst_init_lock);
3058 set_current_state(TASK_RUNNING);
3059 remove_wait_queue(&scst_init_cmd_list_waitQ, &wait);
3063 spin_unlock_irq(&scst_init_lock);
3066 * If kthread_should_stop() is true, we are guaranteed to be
3067 * on the module unload, so scst_init_cmd_list must be empty.
3069 sBUG_ON(!list_empty(&scst_init_cmd_list));
3071 PRINT_INFO("Init thread PID %d finished", current->pid);
3077 /* Called with no locks held */
3078 void scst_process_active_cmd(struct scst_cmd *cmd, int context)
3084 EXTRACHECKS_BUG_ON(in_irq());
3086 cmd->context_processable = context | SCST_CONTEXT_PROCESSABLE;
3087 context &= ~SCST_CONTEXT_PROCESSABLE;
3088 cmd->atomic = (context == SCST_CONTEXT_DIRECT_ATOMIC);
3090 TRACE_DBG("cmd %p, context_processable %d, atomic %d", cmd,
3091 cmd->context_processable, cmd->atomic);
3094 switch (cmd->state) {
3095 case SCST_CMD_STATE_PRE_PARSE:
3096 res = scst_pre_parse(cmd);
3097 EXTRACHECKS_BUG_ON(res ==
3098 SCST_CMD_STATE_RES_NEED_THREAD);
3101 case SCST_CMD_STATE_DEV_PARSE:
3102 res = scst_parse_cmd(cmd);
3105 case SCST_CMD_STATE_PREPARE_SPACE:
3106 res = scst_prepare_space(cmd);
3109 case SCST_CMD_STATE_RDY_TO_XFER:
3110 res = scst_rdy_to_xfer(cmd);
3113 case SCST_CMD_STATE_TGT_PRE_EXEC:
3114 res = scst_tgt_pre_exec(cmd);
3117 case SCST_CMD_STATE_SEND_TO_MIDLEV:
3118 if (tm_dbg_check_cmd(cmd) != 0) {
3119 res = SCST_CMD_STATE_RES_CONT_NEXT;
3120 TRACE_MGMT_DBG("Skipping cmd %p (tag %llu), "
3121 "because of TM DBG delay", cmd,
3122 (long long unsigned int)cmd->tag);
3125 res = scst_send_to_midlev(&cmd);
3126 /* !! At this point cmd, sess & tgt_dev can be already freed !! */
3129 case SCST_CMD_STATE_PRE_DEV_DONE:
3130 res = scst_pre_dev_done(cmd);
3131 EXTRACHECKS_BUG_ON(res ==
3132 SCST_CMD_STATE_RES_NEED_THREAD);
3135 case SCST_CMD_STATE_MODE_SELECT_CHECKS:
3136 res = scst_mode_select_checks(cmd);
3139 case SCST_CMD_STATE_DEV_DONE:
3140 res = scst_dev_done(cmd);
3143 case SCST_CMD_STATE_PRE_XMIT_RESP:
3144 res = scst_pre_xmit_response(cmd);
3145 EXTRACHECKS_BUG_ON(res ==
3146 SCST_CMD_STATE_RES_NEED_THREAD);
3149 case SCST_CMD_STATE_XMIT_RESP:
3150 res = scst_xmit_response(cmd);
3153 case SCST_CMD_STATE_FINISHED:
3154 res = scst_finish_cmd(cmd);
3155 EXTRACHECKS_BUG_ON(res ==
3156 SCST_CMD_STATE_RES_NEED_THREAD);
3160 PRINT_CRIT_ERROR("cmd (%p) in state %d, but shouldn't "
3161 "be", cmd, cmd->state);
3163 res = SCST_CMD_STATE_RES_CONT_NEXT;
3166 } while (res == SCST_CMD_STATE_RES_CONT_SAME);
3168 if (res == SCST_CMD_STATE_RES_CONT_NEXT) {
3170 } else if (res == SCST_CMD_STATE_RES_NEED_THREAD) {
3171 spin_lock_irq(&cmd->cmd_lists->cmd_list_lock);
3172 switch (cmd->state) {
3173 case SCST_CMD_STATE_PRE_PARSE:
3174 case SCST_CMD_STATE_DEV_PARSE:
3175 case SCST_CMD_STATE_PREPARE_SPACE:
3176 case SCST_CMD_STATE_RDY_TO_XFER:
3177 case SCST_CMD_STATE_TGT_PRE_EXEC:
3178 case SCST_CMD_STATE_SEND_TO_MIDLEV:
3179 case SCST_CMD_STATE_PRE_DEV_DONE:
3180 case SCST_CMD_STATE_MODE_SELECT_CHECKS:
3181 case SCST_CMD_STATE_DEV_DONE:
3182 case SCST_CMD_STATE_PRE_XMIT_RESP:
3183 case SCST_CMD_STATE_XMIT_RESP:
3184 case SCST_CMD_STATE_FINISHED:
3185 TRACE_DBG("Adding cmd %p to head of active cmd list", cmd);
3186 list_add(&cmd->cmd_list_entry,
3187 &cmd->cmd_lists->active_cmd_list);
3190 /* not very valid commands */
3191 case SCST_CMD_STATE_DEFAULT:
3192 case SCST_CMD_STATE_NEED_THREAD_CTX:
3193 PRINT_CRIT_ERROR("cmd %p is in state %d, not putting on "
3194 "useful list (left on scst cmd list)", cmd,
3196 spin_unlock_irq(&cmd->cmd_lists->cmd_list_lock);
3198 spin_lock_irq(&cmd->cmd_lists->cmd_list_lock);
3204 wake_up(&cmd->cmd_lists->cmd_list_waitQ);
3205 spin_unlock_irq(&cmd->cmd_lists->cmd_list_lock);
3213 /* Called under cmd_list_lock and IRQs disabled */
3214 static void scst_do_job_active(struct list_head *cmd_list,
3215 spinlock_t *cmd_list_lock, int context)
3221 int c = context & ~SCST_CONTEXT_PROCESSABLE;
3222 sBUG_ON((c != SCST_CONTEXT_DIRECT_ATOMIC) &&
3223 (c != SCST_CONTEXT_DIRECT));
3227 while (!list_empty(cmd_list)) {
3228 struct scst_cmd *cmd = list_entry(cmd_list->next, typeof(*cmd),
3230 TRACE_DBG("Deleting cmd %p from active cmd list", cmd);
3231 list_del(&cmd->cmd_list_entry);
3232 spin_unlock_irq(cmd_list_lock);
3233 scst_process_active_cmd(cmd, context);
3234 spin_lock_irq(cmd_list_lock);
3241 static inline int test_cmd_lists(struct scst_cmd_lists *p_cmd_lists)
3243 int res = !list_empty(&p_cmd_lists->active_cmd_list) ||
3244 unlikely(kthread_should_stop()) ||
3245 tm_dbg_is_release();
3249 int scst_cmd_thread(void *arg)
3251 struct scst_cmd_lists *p_cmd_lists = (struct scst_cmd_lists *)arg;
3255 PRINT_INFO("Processing thread started, PID %d", current->pid);
3258 set_user_nice(current, 10);
3260 current->flags |= PF_NOFREEZE;
3262 spin_lock_irq(&p_cmd_lists->cmd_list_lock);
3263 while (!kthread_should_stop()) {
3265 init_waitqueue_entry(&wait, current);
3267 if (!test_cmd_lists(p_cmd_lists)) {
3268 add_wait_queue_exclusive(&p_cmd_lists->cmd_list_waitQ,
3271 set_current_state(TASK_INTERRUPTIBLE);
3272 if (test_cmd_lists(p_cmd_lists))
3274 spin_unlock_irq(&p_cmd_lists->cmd_list_lock);
3276 spin_lock_irq(&p_cmd_lists->cmd_list_lock);
3278 set_current_state(TASK_RUNNING);
3279 remove_wait_queue(&p_cmd_lists->cmd_list_waitQ, &wait);
3282 if (tm_dbg_is_release()) {
3283 spin_unlock_irq(&p_cmd_lists->cmd_list_lock);
3284 tm_dbg_check_released_cmds();
3285 spin_lock_irq(&p_cmd_lists->cmd_list_lock);
3288 scst_do_job_active(&p_cmd_lists->active_cmd_list,
3289 &p_cmd_lists->cmd_list_lock, SCST_CONTEXT_DIRECT |
3290 SCST_CONTEXT_PROCESSABLE);
3292 spin_unlock_irq(&p_cmd_lists->cmd_list_lock);
3296 * If kthread_should_stop() is true, we are guaranteed to be either
3297 * on the module unload, or there must be at least one other thread to
3298 * process the commands lists.
3300 if (p_cmd_lists == &scst_main_cmd_lists) {
3301 sBUG_ON((scst_threads_info.nr_cmd_threads == 1) &&
3302 !list_empty(&scst_main_cmd_lists.active_cmd_list));
3306 PRINT_INFO("Processing thread PID %d finished", current->pid);
3312 void scst_cmd_tasklet(long p)
3314 struct scst_tasklet *t = (struct scst_tasklet *)p;
3318 spin_lock_irq(&t->tasklet_lock);
3319 scst_do_job_active(&t->tasklet_cmd_list, &t->tasklet_lock,
3320 SCST_CONTEXT_DIRECT_ATOMIC);
3321 spin_unlock_irq(&t->tasklet_lock);
3328 * Returns 0 on success, < 0 if there is no device handler or
3329 * > 0 if SCST_FLAG_SUSPENDED set and SCST_FLAG_SUSPENDING - not.
3330 * No locks, protection is done by the suspended activity.
3332 static int scst_mgmt_translate_lun(struct scst_mgmt_cmd *mcmd)
3334 struct scst_tgt_dev *tgt_dev = NULL;
3335 struct list_head *sess_tgt_dev_list_head;
3340 TRACE_DBG("Finding tgt_dev for mgmt cmd %p (lun %Ld)", mcmd,
3341 (long long unsigned int)mcmd->lun);
3345 if (unlikely(test_bit(SCST_FLAG_SUSPENDED, &scst_flags) &&
3346 !test_bit(SCST_FLAG_SUSPENDING, &scst_flags))) {
3347 TRACE_MGMT_DBG("%s", "FLAG SUSPENDED set, skipping");
3353 sess_tgt_dev_list_head =
3354 &mcmd->sess->sess_tgt_dev_list_hash[HASH_VAL(mcmd->lun)];
3355 list_for_each_entry(tgt_dev, sess_tgt_dev_list_head,
3356 sess_tgt_dev_list_entry) {
3357 if (tgt_dev->lun == mcmd->lun) {
3358 TRACE_DBG("tgt_dev %p found", tgt_dev);
3359 mcmd->mcmd_tgt_dev = tgt_dev;
3364 if (mcmd->mcmd_tgt_dev == NULL)
3368 TRACE_EXIT_HRES(res);
3373 void scst_done_cmd_mgmt(struct scst_cmd *cmd)
3375 struct scst_mgmt_cmd_stub *mstb;
3377 unsigned long flags;
3381 TRACE_MGMT_DBG("cmd %p done (tag %llu)",
3382 cmd, (long long unsigned int)cmd->tag);
3384 spin_lock_irqsave(&scst_mcmd_lock, flags);
3386 list_for_each_entry(mstb, &cmd->mgmt_cmd_list,
3387 cmd_mgmt_cmd_list_entry) {
3388 struct scst_mgmt_cmd *mcmd = mstb->mcmd;
3390 TRACE_MGMT_DBG("mcmd %p, mcmd->cmd_done_wait_count %d",
3391 mcmd, mcmd->cmd_done_wait_count);
3393 mcmd->cmd_done_wait_count--;
3394 if (mcmd->cmd_done_wait_count > 0) {
3395 TRACE_MGMT_DBG("cmd_done_wait_count(%d) not 0, "
3396 "skipping", mcmd->cmd_done_wait_count);
3400 if (mcmd->completed) {
3401 sBUG_ON(mcmd->nexus_loss_check_done);
3402 mcmd->completed = 0;
3403 mcmd->state = SCST_MGMT_CMD_STATE_CHECK_NEXUS_LOSS;
3404 TRACE_MGMT_DBG("Adding mgmt cmd %p to active mgmt cmd "
3406 list_add_tail(&mcmd->mgmt_cmd_list_entry,
3407 &scst_active_mgmt_cmd_list);
3412 spin_unlock_irqrestore(&scst_mcmd_lock, flags);
3415 wake_up(&scst_mgmt_cmd_list_waitQ);
3422 static void scst_finish_cmd_mgmt(struct scst_cmd *cmd)
3424 struct scst_mgmt_cmd_stub *mstb, *t;
3426 unsigned long flags;
3430 TRACE_MGMT_DBG("cmd %p finished (tag %llu)",
3431 cmd, (long long unsigned int)cmd->tag);
3433 spin_lock_irqsave(&scst_mcmd_lock, flags);
3435 list_for_each_entry_safe(mstb, t, &cmd->mgmt_cmd_list,
3436 cmd_mgmt_cmd_list_entry) {
3437 struct scst_mgmt_cmd *mcmd = mstb->mcmd;
3439 TRACE_MGMT_DBG("mcmd %p, mcmd->cmd_finish_wait_count %d",
3440 mcmd, mcmd->cmd_finish_wait_count);
3442 list_del(&mstb->cmd_mgmt_cmd_list_entry);
3443 mempool_free(mstb, scst_mgmt_stub_mempool);
3446 mcmd->completed_cmd_count++;
3448 mcmd->cmd_finish_wait_count--;
3449 if (mcmd->cmd_finish_wait_count > 0) {
3450 TRACE_MGMT_DBG("cmd_finish_wait_count(%d) not 0, "
3451 "skipping", mcmd->cmd_finish_wait_count);
3455 if (mcmd->completed) {
3456 mcmd->state = SCST_MGMT_CMD_STATE_DONE;
3457 TRACE_MGMT_DBG("Adding mgmt cmd %p to active mgmt cmd "
3459 list_add_tail(&mcmd->mgmt_cmd_list_entry,
3460 &scst_active_mgmt_cmd_list);
3465 spin_unlock_irqrestore(&scst_mcmd_lock, flags);
3468 wake_up(&scst_mgmt_cmd_list_waitQ);
3474 static int scst_call_dev_task_mgmt_fn(struct scst_mgmt_cmd *mcmd,
3475 struct scst_tgt_dev *tgt_dev, int set_status)
3477 int res = SCST_DEV_TM_NOT_COMPLETED;
3478 struct scst_dev_type *h = tgt_dev->dev->handler;
3480 if (h->task_mgmt_fn) {
3481 TRACE_MGMT_DBG("Calling dev handler %s task_mgmt_fn(fn=%d)",
3483 EXTRACHECKS_BUG_ON(in_irq());
3484 res = h->task_mgmt_fn(mcmd, tgt_dev);
3485 TRACE_MGMT_DBG("Dev handler %s task_mgmt_fn() returned %d",
3487 if (set_status && (res != SCST_DEV_TM_NOT_COMPLETED))
3493 static inline int scst_is_strict_mgmt_fn(int mgmt_fn)
3496 #ifdef ABORT_CONSIDER_FINISHED_TASKS_AS_NOT_EXISTING
3497 case SCST_ABORT_TASK:
3500 case SCST_ABORT_TASK_SET:
3501 case SCST_CLEAR_TASK_SET:
3510 * Might be called under sess_list_lock and IRQ off + BHs also off
3511 * Returns -1 if command is being executed (ABORT failed), 0 otherwise
3513 void scst_abort_cmd(struct scst_cmd *cmd, struct scst_mgmt_cmd *mcmd,
3514 int other_ini, int call_dev_task_mgmt_fn)
3516 unsigned long flags;
3517 static spinlock_t other_ini_lock = SPIN_LOCK_UNLOCKED;
3521 TRACE(((mcmd != NULL) && (mcmd->fn == SCST_ABORT_TASK)) ? TRACE_MGMT_MINOR : TRACE_MGMT,
3522 "Aborting cmd %p (tag %llu, op %x)",
3523 cmd, (long long unsigned int)cmd->tag, cmd->cdb[0]);
3525 /* To protect from concurrent aborts */
3526 spin_lock_irqsave(&other_ini_lock, flags);
3529 /* Might be necessary if command aborted several times */
3530 if (!test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)) {
3531 set_bit(SCST_CMD_ABORTED_OTHER, &cmd->cmd_flags);
3532 smp_mb__after_set_bit();