4 * Copyright (C) 2004-2006 Vladislav Bolkhovitin <vst@vlnb.net>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation, version 2
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
18 #include <linux/init.h>
19 #include <linux/kernel.h>
20 #include <linux/errno.h>
21 #include <linux/list.h>
22 #include <linux/spinlock.h>
23 #include <linux/slab.h>
24 #include <linux/sched.h>
25 #include <linux/smp_lock.h>
26 #include <asm/unistd.h>
27 #include <asm/string.h>
29 #include "scst_debug.h"
31 #include "scst_priv.h"
33 static int scst_do_job_init(struct list_head *init_cmd_list);
35 static int __scst_process_active_cmd(struct scst_cmd *cmd, int context,
38 static void scst_complete_cmd_mgmt(struct scst_cmd *cmd,
39 struct scst_mgmt_cmd *mcmd);
41 /* scst_list_lock assumed to be held */
42 static inline int scst_process_active_cmd(struct scst_cmd *cmd, int context,
43 unsigned long *pflags, int left_locked)
49 TRACE_DBG("Moving cmd %p to cmd list", cmd);
50 list_move_tail(&cmd->cmd_list_entry, &scst_cmd_list);
52 /* This is an inline func., so unneeded code will be optimized out */
54 spin_unlock_irqrestore(&scst_list_lock, *pflags);
56 spin_unlock_irq(&scst_list_lock);
58 res = __scst_process_active_cmd(cmd, context, left_locked);
64 static inline void scst_schedule_tasklet(void)
66 struct tasklet_struct *t = &scst_tasklets[smp_processor_id()];
68 #if 0 /* Looks like #else is better for performance */
69 if ((!test_bit(TASKLET_STATE_SCHED, &t->state)) || (scst_num_cpus == 1))
73 * We suppose that other CPU(s) are rather idle, so we
74 * ask one of them to help
76 TRACE_DBG("Tasklet on CPU %d busy, waking up the thread "
77 "instead", smp_processor_id());
78 wake_up(&scst_list_waitQ);
86 * Must not been called in parallel with scst_unregister_session() for the
89 struct scst_cmd *scst_rx_cmd(struct scst_session *sess,
90 const uint8_t *lun, int lun_len,
91 const uint8_t *cdb, int cdb_len, int atomic)
98 if (unlikely(sess->shutting_down)) {
99 PRINT_ERROR_PR("%s", "New cmd while shutting down the session");
104 cmd = scst_alloc_cmd(atomic ? GFP_ATOMIC : GFP_KERNEL);
109 cmd->tgt = sess->tgt;
110 cmd->tgtt = sess->tgt->tgtt;
111 cmd->state = SCST_CMD_STATE_INIT_WAIT;
114 * For both wrong lun and CDB defer the error reporting for
115 * scst_cmd_init_done()
118 cmd->lun = scst_unpack_lun(lun, lun_len);
120 if (cdb_len <= MAX_COMMAND_SIZE) {
121 memcpy(cmd->cdb, cdb, cdb_len);
122 cmd->cdb_len = cdb_len;
125 TRACE_DBG("cmd %p, sess %p", cmd, sess);
133 void scst_cmd_init_done(struct scst_cmd *cmd, int pref_context)
136 unsigned long flags = 0;
137 struct scst_session *sess = cmd->sess;
141 TRACE_DBG("Preferred context: %d (cmd %p)", pref_context, cmd);
142 TRACE(TRACE_SCSI, "tag=%d, lun=%Ld, CDB len=%d", cmd->tag,
143 (uint64_t)cmd->lun, cmd->cdb_len);
144 TRACE_BUFF_FLAG(TRACE_SCSI|TRACE_RECV_BOT, "Recieving CDB",
145 cmd->cdb, cmd->cdb_len);
147 if (unlikely(in_irq()) && ((pref_context == SCST_CONTEXT_DIRECT) ||
148 (pref_context == SCST_CONTEXT_DIRECT_ATOMIC)))
150 PRINT_ERROR_PR("Wrong context %d in IRQ from target %s, use "
151 "SCST_CONTEXT_TASKLET instead\n", pref_context,
153 pref_context = SCST_CONTEXT_TASKLET;
156 spin_lock_irqsave(&scst_list_lock, flags);
158 /* Let's make it here, this will save us a lock or atomic */
159 sess->sess_cmd_count++;
161 list_add_tail(&cmd->search_cmd_list_entry, &sess->search_cmd_list);
163 if (unlikely(sess->init_phase != SCST_SESS_IPH_READY)) {
164 switch(sess->init_phase) {
165 case SCST_SESS_IPH_SUCCESS:
167 case SCST_SESS_IPH_INITING:
168 TRACE_DBG("Adding cmd %p to init deferred cmd list", cmd);
169 list_add_tail(&cmd->cmd_list_entry,
170 &sess->init_deferred_cmd_list);
171 goto out_unlock_flags;
172 case SCST_SESS_IPH_FAILED:
174 cmd->state = SCST_CMD_STATE_XMIT_RESP;
175 TRACE_DBG("Adding cmd %p to active cmd list", cmd);
176 list_add_tail(&cmd->cmd_list_entry,
177 &scst_active_cmd_list);
184 if (unlikely(cmd->lun == (lun_t)-1)) {
185 PRINT_ERROR("Wrong LUN %d, finishing cmd", -1);
186 scst_set_cmd_error(cmd,
187 SCST_LOAD_SENSE(scst_sense_lun_not_supported));
188 cmd->state = SCST_CMD_STATE_XMIT_RESP;
189 TRACE_DBG("Moving cmd %p to active cmd list", cmd);
190 list_add_tail(&cmd->cmd_list_entry, &scst_active_cmd_list);
194 if (unlikely(cmd->cdb_len == 0)) {
195 PRINT_ERROR("Wrong CDB len %d, finishing cmd", 0);
196 scst_set_cmd_error(cmd,
197 SCST_LOAD_SENSE(scst_sense_invalid_opcode));
198 cmd->state = SCST_CMD_STATE_XMIT_RESP;
199 TRACE_DBG("Adding cmd %p to active cmd list", cmd);
200 list_add_tail(&cmd->cmd_list_entry, &scst_active_cmd_list);
204 cmd->state = SCST_CMD_STATE_INIT;
206 TRACE_DBG("Moving cmd %p to init cmd list", cmd);
207 list_add_tail(&cmd->cmd_list_entry, &scst_init_cmd_list);
209 switch (pref_context) {
210 case SCST_CONTEXT_DIRECT:
211 case SCST_CONTEXT_DIRECT_ATOMIC:
212 res = scst_do_job_init(&scst_init_cmd_list);
214 goto out_unlock_flags;
217 case SCST_CONTEXT_THREAD:
218 goto out_thread_unlock_flags;
220 case SCST_CONTEXT_TASKLET:
221 scst_schedule_tasklet();
222 goto out_unlock_flags;
225 PRINT_ERROR_PR("Context %x is undefined, using thread one",
227 goto out_thread_unlock_flags;
231 switch (pref_context) {
232 case SCST_CONTEXT_DIRECT:
233 case SCST_CONTEXT_DIRECT_ATOMIC:
234 scst_process_active_cmd(cmd, pref_context, &flags, 0);
237 case SCST_CONTEXT_THREAD:
238 goto out_thread_unlock_flags;
240 case SCST_CONTEXT_TASKLET:
241 scst_schedule_tasklet();
242 goto out_unlock_flags;
245 PRINT_ERROR_PR("Context %x is undefined, using thread one",
247 goto out_thread_unlock_flags;
255 spin_unlock_irqrestore(&scst_list_lock, flags);
258 out_thread_unlock_flags:
259 cmd->non_atomic_only = 1;
260 spin_unlock_irqrestore(&scst_list_lock, flags);
261 wake_up(&scst_list_waitQ);
265 static int scst_parse_cmd(struct scst_cmd *cmd)
267 int res = SCST_CMD_STATE_RES_CONT_SAME;
269 struct scst_tgt_dev *tgt_dev_saved = cmd->tgt_dev;
270 struct scst_device *dev = cmd->dev;
271 struct scst_info_cdb cdb_info;
272 int atomic = scst_cmd_atomic(cmd);
277 if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
278 TRACE_DBG("ABORTED set, returning ABORTED "
283 if (atomic && !dev->handler->parse_atomic) {
284 TRACE_DBG("Dev handler %s parse() can not be "
285 "called in atomic context, rescheduling to the thread",
287 res = SCST_CMD_STATE_RES_NEED_THREAD;
292 * Expected transfer data supplied by the SCSI transport via the
293 * target driver are untrusted, so we prefer to fetch them from CDB.
294 * Additionally, not all transports support supplying the expected
298 if (unlikely(scst_get_cdb_info(cmd->cdb, dev->handler->type,
304 PRINT_INFO_PR("Unknown opcode 0x%02x for %s. "
305 "Should you update scst_scsi_op_table?",
306 cmd->cdb[0], dev->handler->name);
308 if (scst_cmd_is_expected_set(cmd)) {
309 TRACE(TRACE_MINOR, "Using initiator supplied values: "
310 "direction %d, transfer_len %d",
311 cmd->expected_data_direction,
312 cmd->expected_transfer_len);
313 cmd->data_direction = cmd->expected_data_direction;
314 cmd->bufflen = cmd->expected_transfer_len;
315 /* Restore (most probably) lost CDB length */
316 cmd->cdb_len = scst_get_cdb_len(cmd->cdb);
317 if (cmd->cdb_len == -1) {
318 PRINT_ERROR_PR("Unable to get CDB length for "
319 "opcode 0x%02x. Returning INVALID "
320 "OPCODE", cmd->cdb[0]);
321 scst_set_cmd_error(cmd,
322 SCST_LOAD_SENSE(scst_sense_invalid_opcode));
327 PRINT_ERROR_PR("Unknown opcode 0x%02x for %s and "
328 "target %s not supplied expected values. "
329 "Returning INVALID OPCODE.", cmd->cdb[0],
330 dev->handler->name, cmd->tgtt->name);
331 scst_set_cmd_error(cmd,
332 SCST_LOAD_SENSE(scst_sense_invalid_opcode));
336 TRACE(TRACE_SCSI, "op_name <%s>, direction=%d (expected %d, "
337 "set %s), transfer_len=%d (expected len %d), flags=%d",
338 cdb_info.op_name, cdb_info.direction,
339 cmd->expected_data_direction,
340 scst_cmd_is_expected_set(cmd) ? "yes" : "no",
341 cdb_info.transfer_len, cmd->expected_transfer_len,
344 /* Restore (most probably) lost CDB length */
345 cmd->cdb_len = cdb_info.cdb_len;
347 cmd->data_direction = cdb_info.direction;
348 if (!(cdb_info.flags & SCST_UNKNOWN_LENGTH))
349 cmd->bufflen = cdb_info.transfer_len;
350 /* else cmd->bufflen remained as it was inited in 0 */
353 if (unlikely(cmd->cdb[cmd->cdb_len - 1] & CONTROL_BYTE_NACA_BIT)) {
354 PRINT_ERROR_PR("NACA bit in control byte CDB is not supported "
355 "(opcode 0x%02x)", cmd->cdb[0]);
356 scst_set_cmd_error(cmd,
357 SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
361 if (unlikely(cmd->cdb[cmd->cdb_len - 1] & CONTROL_BYTE_LINK_BIT)) {
362 PRINT_ERROR_PR("Linked commands are not supported "
363 "(opcode 0x%02x)", cmd->cdb[0]);
364 scst_set_cmd_error(cmd,
365 SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
369 if (likely(!scst_is_cmd_local(cmd))) {
370 TRACE_DBG("Calling dev handler %s parse(%p)",
371 dev->handler->name, cmd);
372 TRACE_BUFF_FLAG(TRACE_SEND_BOT, "Parsing: ", cmd->cdb, cmd->cdb_len);
373 state = dev->handler->parse(cmd, &cdb_info);
374 TRACE_DBG("Dev handler %s parse() returned %d",
375 dev->handler->name, state);
377 if (state == SCST_CMD_STATE_DEFAULT)
378 state = SCST_CMD_STATE_PREPARE_SPACE;
381 state = SCST_CMD_STATE_PREPARE_SPACE;
383 if (scst_cmd_is_expected_set(cmd)) {
384 if (cmd->expected_transfer_len < cmd->bufflen) {
385 TRACE(TRACE_SCSI, "cmd->expected_transfer_len(%d) < "
386 "cmd->bufflen(%d), using expected_transfer_len "
387 "instead", cmd->expected_transfer_len,
389 cmd->bufflen = cmd->expected_transfer_len;
393 if (cmd->data_len == -1)
394 cmd->data_len = cmd->bufflen;
397 if (state != SCST_CMD_STATE_NEED_THREAD_CTX) {
398 if (((cmd->data_direction == SCST_DATA_UNKNOWN) &&
399 (state != SCST_CMD_STATE_DEV_PARSE)) ||
400 ((cmd->bufflen != 0) &&
401 (cmd->data_direction == SCST_DATA_NONE)) ||
402 ((cmd->bufflen == 0) &&
403 (cmd->data_direction != SCST_DATA_NONE)) ||
404 ((cmd->bufflen != 0) && (cmd->sg == NULL) &&
405 (state > SCST_CMD_STATE_PREPARE_SPACE)))
407 PRINT_ERROR_PR("Dev handler %s parse() returned "
408 "invalid cmd data_direction %d, "
409 "bufflen %zd or state %d (opcode 0x%x)",
411 cmd->data_direction, cmd->bufflen,
419 case SCST_CMD_STATE_PREPARE_SPACE:
420 case SCST_CMD_STATE_DEV_PARSE:
421 case SCST_CMD_STATE_RDY_TO_XFER:
422 case SCST_CMD_STATE_SEND_TO_MIDLEV:
423 case SCST_CMD_STATE_DEV_DONE:
424 case SCST_CMD_STATE_XMIT_RESP:
425 case SCST_CMD_STATE_FINISHED:
427 res = SCST_CMD_STATE_RES_CONT_SAME;
430 case SCST_CMD_STATE_REINIT:
431 cmd->tgt_dev_saved = tgt_dev_saved;
433 res = SCST_CMD_STATE_RES_RESTART;
437 case SCST_CMD_STATE_NEED_THREAD_CTX:
438 TRACE_DBG("Dev handler %s parse() requested thread "
439 "context, rescheduling", dev->handler->name);
440 res = SCST_CMD_STATE_RES_NEED_THREAD;
446 PRINT_ERROR_PR("Dev handler %s parse() returned "
447 "invalid cmd state %d (opcode %d)",
448 dev->handler->name, state, cmd->cdb[0]);
450 PRINT_ERROR_PR("Dev handler %s parse() returned "
451 "error %d (opcode %d)", dev->handler->name,
457 if ((cmd->resp_data_len == -1) && set_dir) {
458 if (cmd->data_direction == SCST_DATA_READ)
459 cmd->resp_data_len = cmd->bufflen;
461 cmd->resp_data_len = 0;
465 TRACE_EXIT_HRES(res);
469 /* dev_done() will be called as part of the regular cmd's finish */
470 scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
471 cmd->state = SCST_CMD_STATE_DEV_DONE;
472 res = SCST_CMD_STATE_RES_CONT_SAME;
476 cmd->state = SCST_CMD_STATE_XMIT_RESP;
477 res = SCST_CMD_STATE_RES_CONT_SAME;
481 void scst_cmd_mem_work_fn(void *p)
485 spin_lock_bh(&scst_cmd_mem_lock);
487 scst_cur_max_cmd_mem += (scst_cur_max_cmd_mem >> 3);
488 if (scst_cur_max_cmd_mem < scst_max_cmd_mem) {
489 TRACE_MGMT_DBG("%s", "Schedule cmd_mem_work");
490 schedule_delayed_work(&scst_cmd_mem_work, SCST_CMD_MEM_TIMEOUT);
492 scst_cur_max_cmd_mem = scst_max_cmd_mem;
493 clear_bit(SCST_FLAG_CMD_MEM_WORK_SCHEDULED, &scst_flags);
495 TRACE_MGMT_DBG("New max cmd mem %ld Mb", scst_cur_max_cmd_mem >> 20);
497 spin_unlock_bh(&scst_cmd_mem_lock);
503 int scst_check_mem(struct scst_cmd *cmd)
509 if (cmd->mem_checked)
512 spin_lock_bh(&scst_cmd_mem_lock);
514 scst_cur_cmd_mem += cmd->bufflen;
515 cmd->mem_checked = 1;
516 if (likely(scst_cur_cmd_mem <= scst_cur_max_cmd_mem))
519 TRACE(TRACE_OUT_OF_MEM, "Total memory allocated by commands (%ld Kb) "
520 "is too big, returning QUEUE FULL to initiator \"%s\" (maximum "
521 "allowed %ld Kb)", scst_cur_cmd_mem >> 10,
522 (cmd->sess->initiator_name[0] == '\0') ?
523 "Anonymous" : cmd->sess->initiator_name,
524 scst_cur_max_cmd_mem >> 10);
526 scst_cur_cmd_mem -= cmd->bufflen;
527 cmd->mem_checked = 0;
529 cmd->state = SCST_CMD_STATE_XMIT_RESP;
533 spin_unlock_bh(&scst_cmd_mem_lock);
540 static void scst_low_cur_max_cmd_mem(void)
544 if (test_bit(SCST_FLAG_CMD_MEM_WORK_SCHEDULED, &scst_flags)) {
545 cancel_delayed_work(&scst_cmd_mem_work);
546 flush_scheduled_work();
547 clear_bit(SCST_FLAG_CMD_MEM_WORK_SCHEDULED, &scst_flags);
550 spin_lock_bh(&scst_cmd_mem_lock);
552 scst_cur_max_cmd_mem = (scst_cur_cmd_mem >> 1) +
553 (scst_cur_cmd_mem >> 2);
554 if (scst_cur_max_cmd_mem < 16*1024*1024)
555 scst_cur_max_cmd_mem = 16*1024*1024;
557 if (!test_bit(SCST_FLAG_CMD_MEM_WORK_SCHEDULED, &scst_flags)) {
558 TRACE_MGMT_DBG("%s", "Schedule cmd_mem_work");
559 schedule_delayed_work(&scst_cmd_mem_work, SCST_CMD_MEM_TIMEOUT);
560 set_bit(SCST_FLAG_CMD_MEM_WORK_SCHEDULED, &scst_flags);
563 spin_unlock_bh(&scst_cmd_mem_lock);
565 TRACE_MGMT_DBG("New max cmd mem %ld Mb", scst_cur_max_cmd_mem >> 20);
571 static int scst_prepare_space(struct scst_cmd *cmd)
573 int r, res = SCST_CMD_STATE_RES_CONT_SAME;
577 if (cmd->data_direction == SCST_DATA_NONE) {
578 cmd->state = SCST_CMD_STATE_SEND_TO_MIDLEV;
582 r = scst_check_mem(cmd);
583 if (unlikely(r != 0))
586 if (cmd->data_buf_tgt_alloc) {
587 TRACE_MEM("%s", "Custom tgt data buf allocation requested");
588 r = cmd->tgtt->alloc_data_buf(cmd);
589 cmd->data_buf_alloced = (r == 0);
591 r = scst_alloc_space(cmd);
594 if (scst_cmd_atomic(cmd)) {
595 TRACE_MEM("%s", "Atomic memory allocation failed, "
596 "rescheduling to the thread");
597 res = SCST_CMD_STATE_RES_NEED_THREAD;
603 switch (cmd->data_direction) {
604 case SCST_DATA_WRITE:
605 cmd->state = SCST_CMD_STATE_RDY_TO_XFER;
609 cmd->state = SCST_CMD_STATE_SEND_TO_MIDLEV;
614 TRACE_EXIT_HRES(res);
618 TRACE(TRACE_OUT_OF_MEM, "Unable to allocate or build requested buffer "
619 "(size %zd), sending BUSY or QUEUE FULL status", cmd->bufflen);
620 scst_low_cur_max_cmd_mem();
622 cmd->state = SCST_CMD_STATE_DEV_DONE;
623 res = SCST_CMD_STATE_RES_CONT_SAME;
628 static int scst_queue_retry_cmd(struct scst_cmd *cmd, int finished_cmds)
630 struct scst_tgt *tgt = cmd->sess->tgt;
636 spin_lock_irqsave(&tgt->tgt_lock, flags);
639 TRACE(TRACE_RETRY, "TGT QUEUE FULL: incrementing retry_cmds %d",
641 if (finished_cmds != atomic_read(&tgt->finished_cmds)) {
642 /* At least one cmd finished, so try again */
644 TRACE(TRACE_RETRY, "TGT QUEUE FULL, direct retry "
645 "(finished_cmds=%d, tgt->finished_cmds=%d, "
646 "retry_cmds=%d)", finished_cmds,
647 atomic_read(&tgt->finished_cmds), tgt->retry_cmds);
652 TRACE(TRACE_RETRY, "Moving cmd %p to retry cmd list", cmd);
653 /* IRQ already off */
654 spin_lock(&scst_list_lock);
655 list_move_tail(&cmd->cmd_list_entry, &tgt->retry_cmd_list);
656 spin_unlock(&scst_list_lock);
658 if (!tgt->retry_timer_active) {
659 tgt->retry_timer.expires = jiffies + SCST_TGT_RETRY_TIMEOUT;
660 add_timer(&tgt->retry_timer);
661 tgt->retry_timer_active = 1;
665 spin_unlock_irqrestore(&tgt->tgt_lock, flags);
671 static int scst_rdy_to_xfer(struct scst_cmd *cmd)
674 int atomic = scst_cmd_atomic(cmd);
678 if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)))
680 TRACE_DBG("ABORTED set, returning ABORTED for "
685 if (atomic && !cmd->tgtt->rdy_to_xfer_atomic) {
686 TRACE_DBG("%s", "rdy_to_xfer() can not be "
687 "called in atomic context, rescheduling to the thread");
688 res = SCST_CMD_STATE_RES_NEED_THREAD;
693 int finished_cmds = atomic_read(&cmd->sess->tgt->finished_cmds);
695 res = SCST_CMD_STATE_RES_CONT_NEXT;
696 cmd->state = SCST_CMD_STATE_DATA_WAIT;
698 TRACE_DBG("Calling rdy_to_xfer(%p)", cmd);
700 if (((scst_random() % 100) == 75))
701 rc = SCST_TGT_RES_QUEUE_FULL;
704 rc = cmd->tgtt->rdy_to_xfer(cmd);
705 TRACE_DBG("rdy_to_xfer() returned %d", rc);
707 if (likely(rc == SCST_TGT_RES_SUCCESS))
710 /* Restore the previous state */
711 cmd->state = SCST_CMD_STATE_RDY_TO_XFER;
714 case SCST_TGT_RES_QUEUE_FULL:
716 if (scst_queue_retry_cmd(cmd, finished_cmds) == 0)
722 case SCST_TGT_RES_NEED_THREAD_CTX:
724 TRACE_DBG("Target driver %s "
725 "rdy_to_xfer() requested thread "
726 "context, rescheduling", cmd->tgtt->name);
727 res = SCST_CMD_STATE_RES_NEED_THREAD;
738 TRACE_EXIT_HRES(res);
742 if (rc == SCST_TGT_RES_FATAL_ERROR) {
743 PRINT_ERROR_PR("Target driver %s rdy_to_xfer() returned "
744 "fatal error", cmd->tgtt->name);
746 PRINT_ERROR_PR("Target driver %s rdy_to_xfer() returned invalid "
747 "value %d", cmd->tgtt->name, rc);
749 scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
752 cmd->state = SCST_CMD_STATE_DEV_DONE;
753 res = SCST_CMD_STATE_RES_CONT_SAME;
757 void scst_proccess_redirect_cmd(struct scst_cmd *cmd, int context,
765 TRACE_DBG("Context: %d", context);
768 case SCST_CONTEXT_DIRECT:
769 case SCST_CONTEXT_DIRECT_ATOMIC:
771 scst_check_retries(cmd->tgt, 0);
772 cmd->non_atomic_only = 0;
773 rc = __scst_process_active_cmd(cmd, context, 0);
774 if (rc == SCST_CMD_STATE_RES_NEED_THREAD)
779 PRINT_ERROR_PR("Context %x is unknown, using the thread one",
782 case SCST_CONTEXT_THREAD:
784 scst_check_retries(cmd->tgt, 1);
787 case SCST_CONTEXT_TASKLET:
789 scst_check_retries(cmd->tgt, 1);
790 cmd->non_atomic_only = 0;
791 spin_lock_irqsave(&scst_list_lock, flags);
792 TRACE_DBG("Moving cmd %p to active cmd list", cmd);
793 list_move_tail(&cmd->cmd_list_entry, &scst_active_cmd_list);
794 spin_unlock_irqrestore(&scst_list_lock, flags);
795 scst_schedule_tasklet();
803 cmd->non_atomic_only = 1;
804 spin_lock_irqsave(&scst_list_lock, flags);
805 TRACE_DBG("Moving cmd %p to active cmd list", cmd);
806 list_move_tail(&cmd->cmd_list_entry, &scst_active_cmd_list);
807 spin_unlock_irqrestore(&scst_list_lock, flags);
808 wake_up(&scst_list_waitQ);
812 void scst_rx_data(struct scst_cmd *cmd, int status, int pref_context)
816 TRACE_DBG("Preferred context: %d", pref_context);
817 TRACE(TRACE_SCSI, "tag=%d status=%#x", scst_cmd_get_tag(cmd), status);
818 cmd->non_atomic_only = 0;
820 if (in_irq() && ((pref_context == SCST_CONTEXT_DIRECT) ||
821 (pref_context == SCST_CONTEXT_DIRECT_ATOMIC)))
823 PRINT_ERROR_PR("Wrong context %d in IRQ from target %s, use "
824 "SCST_CONTEXT_TASKLET instead\n", pref_context,
826 pref_context = SCST_CONTEXT_TASKLET;
830 case SCST_RX_STATUS_SUCCESS:
831 cmd->state = SCST_CMD_STATE_SEND_TO_MIDLEV;
834 case SCST_RX_STATUS_ERROR_SENSE_SET:
835 cmd->state = SCST_CMD_STATE_DEV_DONE;
838 case SCST_RX_STATUS_ERROR_FATAL:
839 set_bit(SCST_CMD_NO_RESP, &cmd->cmd_flags);
841 case SCST_RX_STATUS_ERROR:
842 scst_set_cmd_error(cmd,
843 SCST_LOAD_SENSE(scst_sense_hardw_error));
844 cmd->state = SCST_CMD_STATE_DEV_DONE;
848 PRINT_ERROR_PR("scst_rx_data() received unknown status %x",
850 cmd->state = SCST_CMD_STATE_DEV_DONE;
854 scst_proccess_redirect_cmd(cmd, pref_context, 1);
860 /* No locks supposed to be held */
861 static void scst_check_sense(struct scst_cmd *cmd, const uint8_t *rq_sense,
862 int rq_sense_len, int *next_state)
865 struct scst_device *dev = cmd->dev;
866 int dbl_ua_possible, ua_sent = 0;
870 /* If we had a internal bus reset behind us, set the command error UA */
871 if ((dev->scsi_dev != NULL) &&
872 unlikely(cmd->host_status == DID_RESET) &&
873 scst_is_ua_command(cmd))
875 TRACE(TRACE_MGMT, "DID_RESET: was_reset=%d host_status=%x",
876 dev->scsi_dev->was_reset, cmd->host_status);
877 scst_set_cmd_error(cmd,
878 SCST_LOAD_SENSE(scst_sense_reset_UA));
881 /* It looks like it is safe to clear was_reset here */
882 dev->scsi_dev->was_reset = 0;
886 if (rq_sense != NULL) {
887 sense_valid = SCST_SENSE_VALID(rq_sense);
889 memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer));
891 * We checked that rq_sense_len < sizeof(cmd->sense_buffer)
894 memcpy(cmd->sense_buffer, rq_sense, rq_sense_len);
897 sense_valid = SCST_SENSE_VALID(cmd->sense_buffer);
899 dbl_ua_possible = dev->dev_double_ua_possible;
900 TRACE_DBG("cmd %p dbl_ua_possible %d", cmd, dbl_ua_possible);
901 if (unlikely(dbl_ua_possible)) {
902 spin_lock_bh(&dev->dev_lock);
903 barrier(); /* to reread dev_double_ua_possible */
904 dbl_ua_possible = dev->dev_double_ua_possible;
906 ua_sent = dev->dev_reset_ua_sent;
908 spin_unlock_bh(&dev->dev_lock);
912 TRACE_BUFF_FLAG(TRACE_SCSI, "Sense", cmd->sense_buffer,
913 sizeof(cmd->sense_buffer));
914 /* Check Unit Attention Sense Key */
915 if (cmd->sense_buffer[2] == UNIT_ATTENTION) {
916 if (cmd->sense_buffer[12] == SCST_SENSE_ASC_UA_RESET) {
920 TRACE(TRACE_MGMT, "%s",
921 "Double UA detected");
923 TRACE(TRACE_MGMT, "Retrying cmd %p "
924 "(tag %d)", cmd, cmd->tag);
926 cmd->masked_status = 0;
928 cmd->host_status = DID_OK;
929 cmd->driver_status = 0;
930 memset(cmd->sense_buffer, 0,
931 sizeof(cmd->sense_buffer));
933 *next_state = SCST_CMD_STATE_SEND_TO_MIDLEV;
935 * Dev is still blocked by this cmd, so
936 * it's OK to clear SCST_DEV_SERIALIZED
939 dev->dev_double_ua_possible = 0;
940 dev->dev_serialized = 0;
941 dev->dev_reset_ua_sent = 0;
944 dev->dev_reset_ua_sent = 1;
947 if (cmd->ua_ignore == 0) {
948 if (unlikely(dbl_ua_possible)) {
949 __scst_process_UA(dev, cmd,
951 sizeof(cmd->sense_buffer), 0);
953 scst_process_UA(dev, cmd,
955 sizeof(cmd->sense_buffer), 0);
961 if (unlikely(dbl_ua_possible)) {
962 if (ua_sent && scst_is_ua_command(cmd)) {
963 TRACE_MGMT_DBG("%s", "Clearing dbl_ua_possible flag");
964 dev->dev_double_ua_possible = 0;
965 dev->dev_serialized = 0;
966 dev->dev_reset_ua_sent = 0;
968 spin_unlock_bh(&dev->dev_lock);
976 spin_unlock_bh(&dev->dev_lock);
980 static int scst_check_auto_sense(struct scst_cmd *cmd)
986 if (unlikely(cmd->status == SAM_STAT_CHECK_CONDITION) &&
987 (!SCST_SENSE_VALID(cmd->sense_buffer) ||
988 SCST_NO_SENSE(cmd->sense_buffer)))
990 TRACE(TRACE_SCSI|TRACE_MINOR, "CHECK_CONDITION, but no sense: "
991 "cmd->status=%x, cmd->masked_status=%x, "
992 "cmd->msg_status=%x, cmd->host_status=%x, "
993 "cmd->driver_status=%x", cmd->status, cmd->masked_status,
994 cmd->msg_status, cmd->host_status, cmd->driver_status);
996 } else if (unlikely(cmd->host_status)) {
997 if ((cmd->host_status == DID_REQUEUE) ||
998 (cmd->host_status == DID_IMM_RETRY) ||
999 (cmd->host_status == DID_SOFT_ERROR)) {
1002 TRACE(TRACE_SCSI|TRACE_MINOR, "Host status %x "
1003 "received, returning HARDWARE ERROR instead",
1005 scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
1009 TRACE_EXIT_RES(res);
1013 static void scst_do_cmd_done(struct scst_cmd *cmd, int result,
1014 const uint8_t *rq_sense, int rq_sense_len, int *next_state)
1020 cmd->status = result & 0xff;
1021 cmd->masked_status = status_byte(result);
1022 cmd->msg_status = msg_byte(result);
1023 cmd->host_status = host_byte(result);
1024 cmd->driver_status = driver_byte(result);
1025 TRACE(TRACE_SCSI, "result=%x, cmd->status=%x, "
1026 "cmd->masked_status=%x, cmd->msg_status=%x, cmd->host_status=%x, "
1027 "cmd->driver_status=%x", result, cmd->status,
1028 cmd->masked_status, cmd->msg_status, cmd->host_status,
1029 cmd->driver_status);
1033 scst_dec_on_dev_cmd(cmd);
1035 type = cmd->dev->handler->type;
1036 if ((cmd->cdb[0] == MODE_SENSE || cmd->cdb[0] == MODE_SENSE_10) &&
1037 cmd->tgt_dev->acg_dev->rd_only_flag &&
1038 (type == TYPE_DISK || type == TYPE_WORM || type == TYPE_MOD ||
1039 type == TYPE_TAPE)) {
1043 length = scst_get_buf_first(cmd, &address);
1044 TRACE_DBG("length %d", length);
1045 if (unlikely(length <= 0)) {
1046 PRINT_ERROR_PR("%s: scst_get_buf_first() failed",
1050 if (length > 2 && cmd->cdb[0] == MODE_SENSE) {
1051 address[2] |= 0x80; /* Write Protect*/
1053 else if (length > 3 && cmd->cdb[0] == MODE_SENSE_10) {
1054 address[3] |= 0x80; /* Write Protect*/
1056 scst_put_buf(cmd, address);
1060 scst_check_sense(cmd, rq_sense, rq_sense_len, next_state);
1066 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
1067 static inline struct scst_cmd *scst_get_cmd(struct scsi_cmnd *scsi_cmd,
1068 struct scsi_request **req)
1070 struct scst_cmd *cmd = NULL;
1072 if (scsi_cmd && (*req = scsi_cmd->sc_request))
1073 cmd = (struct scst_cmd *)(*req)->upper_private_data;
1076 PRINT_ERROR_PR("%s", "Request with NULL cmd");
1078 scsi_release_request(*req);
1084 static void scst_cmd_done(struct scsi_cmnd *scsi_cmd)
1086 struct scsi_request *req = NULL;
1087 struct scst_cmd *cmd;
1095 * We don't use scsi_cmd->resid, because:
1096 * 1. Many low level initiator drivers don't use (set) this field
1097 * 2. We determine the command's buffer size directly from CDB,
1098 * so scsi_cmd->resid is not relevant for us, and target drivers
1099 * should know the residual, if necessary, by comparing expected
1100 * and actual transfer sizes.
1103 cmd = scst_get_cmd(scsi_cmd, &req);
1107 next_state = SCST_CMD_STATE_DEV_DONE;
1108 scst_do_cmd_done(cmd, req->sr_result, req->sr_sense_buffer,
1109 sizeof(req->sr_sense_buffer), &next_state);
1111 /* Clear out request structure */
1113 req->sr_sglist_len = 0;
1114 req->sr_bufflen = 0;
1115 req->sr_buffer = NULL;
1116 req->sr_underflow = 0;
1117 req->sr_request->rq_disk = NULL; /* disown request blk */
1119 cmd->bufflen = req->sr_bufflen; //??
1121 scst_release_request(cmd);
1123 cmd->state = next_state;
1124 cmd->non_atomic_only = 0;
1126 scst_proccess_redirect_cmd(cmd, scst_get_context(), 0);
1132 #else /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) */
1133 static void scst_cmd_done(void *data, char *sense, int result, int resid)
1135 struct scst_cmd *cmd;
1143 * We don't use resid, because:
1144 * 1. Many low level initiator drivers don't use (set) this field
1145 * 2. We determine the command's buffer size directly from CDB,
1146 * so resid is not relevant for us, and target drivers
1147 * should know the residual, if necessary, by comparing expected
1148 * and actual transfer sizes.
1151 cmd = (struct scst_cmd *)data;
1155 next_state = SCST_CMD_STATE_DEV_DONE;
1156 scst_do_cmd_done(cmd, result, sense, SCSI_SENSE_BUFFERSIZE,
1159 cmd->state = next_state;
1160 cmd->non_atomic_only = 0;
1162 scst_proccess_redirect_cmd(cmd, scst_get_context(), 0);
1168 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) */
1170 static void scst_cmd_done_local(struct scst_cmd *cmd, int next_state)
1176 scst_dec_on_dev_cmd(cmd);
1178 if (next_state == SCST_CMD_STATE_DEFAULT)
1179 next_state = SCST_CMD_STATE_DEV_DONE;
1181 if (next_state == SCST_CMD_STATE_DEV_DONE) {
1182 #if defined(DEBUG) || defined(TRACING)
1185 struct scatterlist *sg = cmd->sg;
1186 TRACE(TRACE_RECV_TOP,
1187 "Exec'd %d S/G(s) at %p sg[0].page at %p",
1188 cmd->sg_cnt, sg, (void*)sg[0].page);
1189 for(i = 0; i < cmd->sg_cnt; ++i) {
1190 TRACE_BUFF_FLAG(TRACE_RECV_TOP,
1191 "Exec'd sg", page_address(sg[i].page),
1200 if ((next_state != SCST_CMD_STATE_DEV_DONE) &&
1201 (next_state != SCST_CMD_STATE_XMIT_RESP) &&
1202 (next_state != SCST_CMD_STATE_FINISHED))
1204 PRINT_ERROR_PR("scst_cmd_done_local() received invalid cmd "
1205 "state %d (opcode %d)", next_state, cmd->cdb[0]);
1206 scst_set_cmd_error(cmd,
1207 SCST_LOAD_SENSE(scst_sense_hardw_error));
1208 next_state = SCST_CMD_STATE_DEV_DONE;
1211 if (scst_check_auto_sense(cmd)) {
1212 PRINT_ERROR_PR("CHECK_CONDITION, but no valid sense for "
1213 "opcode %d", cmd->cdb[0]);
1217 scst_check_sense(cmd, NULL, 0, &next_state);
1219 cmd->state = next_state;
1220 cmd->non_atomic_only = 0;
1222 scst_proccess_redirect_cmd(cmd, scst_get_context(), 0);
1228 static int scst_report_luns_local(struct scst_cmd *cmd)
1230 int res = SCST_EXEC_COMPLETED;
1233 struct scst_tgt_dev *tgt_dev = NULL;
1239 cmd->masked_status = 0;
1240 cmd->msg_status = 0;
1241 cmd->host_status = DID_OK;
1242 cmd->driver_status = 0;
1244 /* ToDo: use full SG buffer, not only the first entry */
1245 buffer_size = scst_get_buf_first(cmd, &buffer);
1246 if (unlikely(buffer_size <= 0))
1249 if (buffer_size < 16) {
1253 memset(buffer, 0, buffer_size);
1255 /* sess->sess_tgt_dev_list is protected by suspended activity */
1256 list_for_each_entry(tgt_dev, &cmd->sess->sess_tgt_dev_list,
1257 sess_tgt_dev_list_entry)
1259 if (8 + 8 * dev_cnt + 2 <= buffer_size) {
1260 buffer[8 + 8 * dev_cnt] = (tgt_dev->acg_dev->lun >> 8) & 0xff;
1261 buffer[8 + 8 * dev_cnt + 1] = tgt_dev->acg_dev->lun & 0xff;
1264 /* Tmp, until ToDo above done */
1265 if (dev_cnt >= ((PAGE_SIZE >> 3) - 2))
1269 /* Set the response header */
1271 buffer[0] = (dev_cnt >> 24) & 0xff;
1272 buffer[1] = (dev_cnt >> 16) & 0xff;
1273 buffer[2] = (dev_cnt >> 8) & 0xff;
1274 buffer[3] = dev_cnt & 0xff;
1278 scst_put_buf(cmd, buffer);
1280 if (buffer_size > dev_cnt)
1281 scst_set_resp_data_len(cmd, dev_cnt);
1286 /* Report the result */
1287 scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1289 TRACE_EXIT_RES(res);
1293 scst_put_buf(cmd, buffer);
1296 scst_set_cmd_error(cmd,
1297 SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
1301 static int scst_pre_select(struct scst_cmd *cmd)
1303 int res = SCST_EXEC_NOT_COMPLETED;
1307 if (scst_cmd_atomic(cmd)) {
1308 res = SCST_EXEC_NEED_THREAD;
1312 scst_block_dev(cmd->dev, 1);
1313 /* Device will be unblocked in scst_done_cmd_check() */
1315 if (test_bit(SCST_TGT_DEV_UA_PENDING, &cmd->tgt_dev->tgt_dev_flags)) {
1316 int rc = scst_set_pending_UA(cmd);
1318 res = SCST_EXEC_COMPLETED;
1320 /* Report the result */
1321 scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1327 TRACE_EXIT_RES(res);
1331 static inline void scst_report_reserved(struct scst_cmd *cmd)
1335 scst_set_cmd_error_status(cmd, SAM_STAT_RESERVATION_CONFLICT);
1337 /* Report the result */
1338 scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1344 static int scst_reserve_local(struct scst_cmd *cmd)
1346 int res = SCST_EXEC_NOT_COMPLETED;
1347 struct scst_device *dev;
1348 struct scst_tgt_dev *tgt_dev_tmp;
1352 if (scst_cmd_atomic(cmd)) {
1353 res = SCST_EXEC_NEED_THREAD;
1357 if ((cmd->cdb[0] == RESERVE_10) && (cmd->cdb[2] & SCST_RES_3RDPTY)) {
1358 PRINT_ERROR_PR("RESERVE_10: 3rdPty RESERVE not implemented "
1359 "(lun=%Ld)", (uint64_t)cmd->lun);
1360 scst_set_cmd_error(cmd,
1361 SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
1363 res = SCST_EXEC_COMPLETED;
1368 scst_block_dev(dev, 1);
1369 /* Device will be unblocked in scst_done_cmd_check() */
1371 spin_lock_bh(&dev->dev_lock);
1373 if (test_bit(SCST_TGT_DEV_RESERVED, &cmd->tgt_dev->tgt_dev_flags)) {
1374 scst_report_reserved(cmd);
1375 /* !! At this point cmd, sess & tgt_dev can be already freed !! */
1376 res = SCST_EXEC_COMPLETED;
1380 list_for_each_entry(tgt_dev_tmp, &dev->dev_tgt_dev_list,
1381 dev_tgt_dev_list_entry)
1383 if (cmd->tgt_dev != tgt_dev_tmp)
1384 set_bit(SCST_TGT_DEV_RESERVED,
1385 &tgt_dev_tmp->tgt_dev_flags);
1387 dev->dev_reserved = 1;
1390 spin_unlock_bh(&dev->dev_lock);
1393 TRACE_EXIT_RES(res);
1397 static int scst_release_local(struct scst_cmd *cmd)
1399 int res = SCST_EXEC_NOT_COMPLETED;
1400 struct scst_tgt_dev *tgt_dev_tmp;
1401 struct scst_device *dev;
1407 scst_block_dev(dev, 1);
1409 TRACE_MGMT_DBG("Blocking cmd %p (tag %d)", cmd, cmd->tag);
1411 spin_lock_bh(&dev->dev_lock);
1414 * The device could be RELEASED behind us, if RESERVING session
1415 * is closed (see scst_free_tgt_dev()), but this actually doesn't
1416 * matter, so use lock and no retest for DEV_RESERVED bits again
1418 if (test_bit(SCST_TGT_DEV_RESERVED, &cmd->tgt_dev->tgt_dev_flags)) {
1419 res = SCST_EXEC_COMPLETED;
1421 cmd->masked_status = 0;
1422 cmd->msg_status = 0;
1423 cmd->host_status = DID_OK;
1424 cmd->driver_status = 0;
1426 list_for_each_entry(tgt_dev_tmp,
1427 &dev->dev_tgt_dev_list,
1428 dev_tgt_dev_list_entry)
1430 clear_bit(SCST_TGT_DEV_RESERVED,
1431 &tgt_dev_tmp->tgt_dev_flags);
1433 dev->dev_reserved = 0;
1436 spin_unlock_bh(&dev->dev_lock);
1438 if (res == SCST_EXEC_COMPLETED) {
1440 /* Report the result */
1441 scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1444 TRACE_EXIT_RES(res);
1449 * The result of cmd execution, if any, should be reported
1450 * via scst_cmd_done_local()
1452 static int scst_pre_exec(struct scst_cmd *cmd)
1454 int res = SCST_EXEC_NOT_COMPLETED, rc;
1455 struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
1459 /* Reserve check before Unit Attention */
1460 if (unlikely(test_bit(SCST_TGT_DEV_RESERVED, &tgt_dev->tgt_dev_flags)) &&
1461 (cmd->cdb[0] != INQUIRY) &&
1462 (cmd->cdb[0] != REPORT_LUNS) &&
1463 (cmd->cdb[0] != RELEASE) &&
1464 (cmd->cdb[0] != RELEASE_10) &&
1465 (cmd->cdb[0] != REPORT_DEVICE_IDENTIFIER) &&
1466 (cmd->cdb[0] != ALLOW_MEDIUM_REMOVAL || (cmd->cdb[4] & 3)) &&
1467 (cmd->cdb[0] != LOG_SENSE) && (cmd->cdb[0] != REQUEST_SENSE))
1469 scst_report_reserved(cmd);
1470 res = SCST_EXEC_COMPLETED;
1474 /* If we had a internal bus reset, set the command error unit attention */
1475 if ((cmd->dev->scsi_dev != NULL) &&
1476 unlikely(cmd->dev->scsi_dev->was_reset) &&
1477 scst_is_ua_command(cmd))
1479 struct scst_device *dev = cmd->dev;
1481 /* Prevent more than 1 cmd to be triggered by was_reset */
1482 spin_lock_bh(&dev->dev_lock);
1483 barrier(); /* to reread was_reset */
1484 if (dev->scsi_dev->was_reset) {
1485 TRACE(TRACE_MGMT, "was_reset is %d", 1);
1486 scst_set_cmd_error(cmd,
1487 SCST_LOAD_SENSE(scst_sense_reset_UA));
1488 /* It looks like it is safe to clear was_reset here */
1489 dev->scsi_dev->was_reset = 0;
1493 spin_unlock_bh(&dev->dev_lock);
1499 if (test_bit(SCST_TGT_DEV_UA_PENDING, &cmd->tgt_dev->tgt_dev_flags) &&
1500 scst_is_ua_command(cmd))
1502 rc = scst_set_pending_UA(cmd);
1507 /* Check READ_ONLY device status */
1508 if (tgt_dev->acg_dev->rd_only_flag &&
1509 (cmd->cdb[0] == WRITE_6 || /* ToDo: full list of the modify cmds */
1510 cmd->cdb[0] == WRITE_10 ||
1511 cmd->cdb[0] == WRITE_12 ||
1512 cmd->cdb[0] == WRITE_16 ||
1513 cmd->cdb[0] == WRITE_VERIFY ||
1514 cmd->cdb[0] == WRITE_VERIFY_12 ||
1515 cmd->cdb[0] == WRITE_VERIFY_16 ||
1516 (cmd->dev->handler->type == TYPE_TAPE &&
1517 (cmd->cdb[0] == ERASE || cmd->cdb[0] == WRITE_FILEMARKS))))
1519 scst_set_cmd_error(cmd,
1520 SCST_LOAD_SENSE(scst_sense_data_protect));
1524 TRACE_EXIT_RES(res);
1528 res = SCST_EXEC_COMPLETED;
1530 /* Report the result */
1531 scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1536 * The result of cmd execution, if any, should be reported
1537 * via scst_cmd_done_local()
1539 static inline int scst_local_exec(struct scst_cmd *cmd)
1541 int res = SCST_EXEC_NOT_COMPLETED;
1546 * Adding new commands here don't forget to update
1547 * scst_is_cmd_local() in scsi_tgt.h, if necessary
1550 switch (cmd->cdb[0]) {
1552 case MODE_SELECT_10:
1554 res = scst_pre_select(cmd);
1558 res = scst_reserve_local(cmd);
1562 res = scst_release_local(cmd);
1565 res = scst_report_luns_local(cmd);
1569 TRACE_EXIT_RES(res);
1573 static int scst_do_send_to_midlev(struct scst_cmd *cmd)
1575 int rc = SCST_EXEC_NOT_COMPLETED;
1579 cmd->sent_to_midlev = 1;
1580 cmd->state = SCST_CMD_STATE_EXECUTING;
1581 cmd->scst_cmd_done = scst_cmd_done_local;
1583 set_bit(SCST_CMD_EXECUTING, &cmd->cmd_flags);
1584 smp_mb__after_set_bit();
1586 if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
1587 TRACE_DBG("ABORTED set, aborting cmd %p", cmd);
1591 rc = scst_pre_exec(cmd);
1592 /* !! At this point cmd, sess & tgt_dev can be already freed !! */
1593 if (rc != SCST_EXEC_NOT_COMPLETED) {
1594 if (rc == SCST_EXEC_COMPLETED)
1596 else if (rc == SCST_EXEC_NEED_THREAD)
1602 rc = scst_local_exec(cmd);
1603 /* !! At this point cmd, sess & tgt_dev can be already freed !! */
1604 if (rc != SCST_EXEC_NOT_COMPLETED) {
1605 if (rc == SCST_EXEC_COMPLETED)
1607 else if (rc == SCST_EXEC_NEED_THREAD)
1613 if (cmd->dev->handler->exec) {
1614 struct scst_device *dev = cmd->dev;
1615 TRACE_DBG("Calling dev handler %s exec(%p)",
1616 dev->handler->name, cmd);
1617 TRACE_BUFF_FLAG(TRACE_SEND_TOP, "Execing: ", cmd->cdb, cmd->cdb_len);
1618 cmd->scst_cmd_done = scst_cmd_done_local;
1619 rc = dev->handler->exec(cmd);
1620 /* !! At this point cmd, sess & tgt_dev can be already freed !! */
1621 TRACE_DBG("Dev handler %s exec() returned %d",
1622 dev->handler->name, rc);
1623 if (rc != SCST_EXEC_NOT_COMPLETED) {
1624 if (rc == SCST_EXEC_COMPLETED)
1626 else if (rc == SCST_EXEC_NEED_THREAD)
1633 TRACE_DBG("Sending cmd %p to SCSI mid-level", cmd);
1635 if (unlikely(cmd->dev->scsi_dev == NULL)) {
1636 PRINT_ERROR_PR("Command for virtual device must be "
1637 "processed by device handler (lun %Ld)!",
1638 (uint64_t)cmd->lun);
1642 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
1643 if (scst_alloc_request(cmd) != 0) {
1644 PRINT_INFO_PR("%s", "Unable to allocate request, "
1645 "sending BUSY status");
1649 scst_do_req(cmd->scsi_req, (void *)cmd->cdb,
1650 (void *)cmd->scsi_req->sr_buffer,
1651 cmd->scsi_req->sr_bufflen, scst_cmd_done, cmd->timeout,
1654 rc = scst_exec_req(cmd->dev->scsi_dev, cmd->cdb, cmd->cdb_len,
1655 cmd->data_direction, cmd->sg, cmd->bufflen, cmd->sg_cnt,
1656 cmd->timeout, cmd->retries, cmd, scst_cmd_done,
1659 PRINT_INFO_PR("scst_exec_req() failed: %d", rc);
1664 rc = SCST_EXEC_COMPLETED;
1671 /* Restore the state */
1672 cmd->sent_to_midlev = 0;
1673 cmd->state = SCST_CMD_STATE_SEND_TO_MIDLEV;
1677 PRINT_ERROR_PR("Dev handler %s exec() or scst_local_exec() returned "
1678 "invalid code %d", cmd->dev->handler->name, rc);
1682 scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
1684 cmd->state = SCST_CMD_STATE_DEV_DONE;
1685 rc = SCST_EXEC_COMPLETED;
1686 scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1689 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
1693 cmd->state = SCST_CMD_STATE_DEV_DONE;
1694 rc = SCST_EXEC_COMPLETED;
1695 scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1700 rc = SCST_EXEC_COMPLETED;
1701 /* Report the result. The cmd is not completed */
1702 scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1706 static int scst_send_to_midlev(struct scst_cmd *cmd)
1709 struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
1710 struct scst_device *dev = cmd->dev;
1713 int atomic = scst_cmd_atomic(cmd);
1717 res = SCST_CMD_STATE_RES_CONT_NEXT;
1719 if (atomic && dev->handler->exec && !dev->handler->exec_atomic) {
1720 TRACE_DBG("Dev handler %s exec() can not be "
1721 "called in atomic context, rescheduling to the thread",
1722 dev->handler->name);
1723 res = SCST_CMD_STATE_RES_NEED_THREAD;
1727 if (unlikely(scst_inc_on_dev_cmd(cmd) != 0))
1730 scst_inc_cmd_count(); /* protect dev & tgt_dev */
1732 if (unlikely(cmd->internal) || unlikely(cmd->retry)) {
1733 rc = scst_do_send_to_midlev(cmd);
1734 /* !! At this point cmd, sess & tgt_dev can be already freed !! */
1735 if (rc == SCST_EXEC_NEED_THREAD) {
1736 TRACE_DBG("%s", "scst_do_send_to_midlev() requested "
1737 "thread context, rescheduling");
1738 res = SCST_CMD_STATE_RES_NEED_THREAD;
1739 scst_dec_on_dev_cmd(cmd);
1740 goto out_dec_cmd_count;
1742 BUG_ON(rc != SCST_EXEC_COMPLETED);
1747 expected_sn = tgt_dev->expected_sn;
1748 if (cmd->sn != expected_sn) {
1749 spin_lock_bh(&tgt_dev->sn_lock);
1750 tgt_dev->def_cmd_count++;
1752 barrier(); /* to reread expected_sn */
1753 expected_sn = tgt_dev->expected_sn;
1754 if (cmd->sn != expected_sn) {
1755 scst_dec_on_dev_cmd(cmd);
1756 TRACE(TRACE_SCSI_SERIALIZING, "Delaying cmd %p (sn=%d, "
1757 "expected_sn=%d)", cmd, cmd->sn, expected_sn);
1758 list_add_tail(&cmd->sn_cmd_list_entry,
1759 &tgt_dev->deferred_cmd_list);
1760 spin_unlock_bh(&tgt_dev->sn_lock);
1761 /* !! At this point cmd can be already freed !! */
1762 goto out_dec_cmd_count;
1764 TRACE(TRACE_SCSI_SERIALIZING, "Somebody incremented "
1765 "expected_sn %d, continuing", expected_sn);
1766 tgt_dev->def_cmd_count--;
1767 spin_unlock_bh(&tgt_dev->sn_lock);
1773 rc = scst_do_send_to_midlev(cmd);
1774 if (rc == SCST_EXEC_NEED_THREAD) {
1775 TRACE_DBG("%s", "scst_do_send_to_midlev() requested "
1776 "thread context, rescheduling");
1777 res = SCST_CMD_STATE_RES_NEED_THREAD;
1778 scst_dec_on_dev_cmd(cmd);
1782 goto out_dec_cmd_count;
1784 BUG_ON(rc != SCST_EXEC_COMPLETED);
1785 /* !! At this point cmd can be already freed !! */
1787 expected_sn = __scst_inc_expected_sn(tgt_dev);
1788 cmd = scst_check_deferred_commands(tgt_dev, expected_sn);
1791 if (unlikely(scst_inc_on_dev_cmd(cmd) != 0))
1796 if (dev->scsi_dev != NULL)
1797 generic_unplug_device(dev->scsi_dev->request_queue);
1800 scst_dec_cmd_count();
1801 /* !! At this point sess, dev and tgt_dev can be already freed !! */
1804 TRACE_EXIT_HRES(res);
1808 static struct scst_cmd *scst_create_prepare_internal_cmd(
1809 struct scst_cmd *orig_cmd, int bufsize)
1811 struct scst_cmd *res;
1812 int gfp_mask = scst_cmd_atomic(orig_cmd) ? GFP_ATOMIC : GFP_KERNEL;
1816 res = scst_alloc_cmd(gfp_mask);
1817 if (unlikely(res == NULL)) {
1821 res->sess = orig_cmd->sess;
1822 res->state = SCST_CMD_STATE_SEND_TO_MIDLEV;
1823 res->atomic = scst_cmd_atomic(orig_cmd);
1825 res->tgtt = orig_cmd->tgtt;
1826 res->tgt = orig_cmd->tgt;
1827 res->dev = orig_cmd->dev;
1828 res->tgt_dev = orig_cmd->tgt_dev;
1829 res->lun = orig_cmd->lun;
1830 res->queue_type = SCST_CMD_QUEUE_HEAD_OF_QUEUE;
1831 res->data_direction = SCST_DATA_UNKNOWN;
1832 res->orig_cmd = orig_cmd;
1834 res->bufflen = bufsize;
1836 if (scst_alloc_space(res) != 0)
1837 PRINT_ERROR("Unable to create buffer (size %d) for "
1838 "internal cmd", bufsize);
1843 TRACE_EXIT_HRES((unsigned long)res);
1847 scst_destroy_cmd(res);
1852 static void scst_free_internal_cmd(struct scst_cmd *cmd)
1856 if (cmd->bufflen > 0)
1857 scst_release_space(cmd);
1858 scst_destroy_cmd(cmd);
1864 static int scst_prepare_request_sense(struct scst_cmd *orig_cmd)
1866 int res = SCST_CMD_STATE_RES_RESTART;
1867 #define sbuf_size 252
1868 static const unsigned char request_sense[6] =
1869 { REQUEST_SENSE, 0, 0, 0, sbuf_size, 0 };
1870 struct scst_cmd *rs_cmd;
1874 rs_cmd = scst_create_prepare_internal_cmd(orig_cmd, sbuf_size);
1878 memcpy(rs_cmd->cdb, request_sense, sizeof(request_sense));
1879 rs_cmd->cdb_len = sizeof(request_sense);
1880 rs_cmd->data_direction = SCST_DATA_READ;
1882 spin_lock_irq(&scst_list_lock);
1883 list_add(&rs_cmd->cmd_list_entry, &scst_active_cmd_list);
1884 spin_unlock_irq(&scst_list_lock);
1887 TRACE_EXIT_RES(res);
1896 static struct scst_cmd *scst_complete_request_sense(struct scst_cmd *cmd)
1898 struct scst_cmd *orig_cmd = cmd->orig_cmd;
1906 len = scst_get_buf_first(cmd, &buf);
1908 if ((cmd->status == 0) && SCST_SENSE_VALID(buf) &&
1909 (!SCST_NO_SENSE(buf)))
1911 TRACE_BUFF_FLAG(TRACE_SCSI, "REQUEST SENSE returned",
1913 memcpy(orig_cmd->sense_buffer, buf,
1914 (sizeof(orig_cmd->sense_buffer) > len) ?
1915 len : sizeof(orig_cmd->sense_buffer));
1917 PRINT_ERROR_PR("%s", "Unable to get the sense via "
1918 "REQUEST SENSE, returning HARDWARE ERROR");
1919 scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
1922 scst_put_buf(cmd, buf);
1924 scst_free_internal_cmd(cmd);
1926 TRACE_EXIT_HRES((unsigned long)orig_cmd);
1930 static int scst_done_cmd_check(struct scst_cmd *cmd, int *pres)
1937 if (cmd->cdb[0] == REQUEST_SENSE) {
1939 cmd = scst_complete_request_sense(cmd);
1940 } else if (scst_check_auto_sense(cmd)) {
1941 PRINT_INFO_PR("Command finished with CHECK CONDITION, but "
1942 "without sense data (opcode 0x%x), issuing "
1943 "REQUEST SENSE", cmd->cdb[0]);
1944 rc = scst_prepare_request_sense(cmd);
1950 PRINT_ERROR_PR("%s", "Unable to issue REQUEST SENSE, "
1951 "returning HARDWARE ERROR");
1952 scst_set_cmd_error(cmd,
1953 SCST_LOAD_SENSE(scst_sense_hardw_error));
1957 type = cmd->dev->handler->type;
1958 if ((cmd->cdb[0] == MODE_SENSE || cmd->cdb[0] == MODE_SENSE_10) &&
1959 cmd->tgt_dev->acg_dev->rd_only_flag &&
1960 (type == TYPE_DISK || type == TYPE_WORM || type == TYPE_MOD ||
1966 length = scst_get_buf_first(cmd, &address);
1969 if (length > 2 && cmd->cdb[0] == MODE_SENSE)
1970 address[2] |= 0x80; /* Write Protect*/
1971 else if (length > 3 && cmd->cdb[0] == MODE_SENSE_10)
1972 address[3] |= 0x80; /* Write Protect*/
1973 scst_put_buf(cmd, address);
1977 * Check and clear NormACA option for the device, if necessary,
1978 * since we don't support ACA
1980 if ((cmd->cdb[0] == INQUIRY) &&
1981 !(cmd->cdb[1] & SCST_INQ_EVPD/* Std INQUIRY data (no EVPD) */) &&
1982 (cmd->resp_data_len > SCST_INQ_BYTE3))
1987 /* ToDo: all pages ?? */
1988 buflen = scst_get_buf_first(cmd, &buffer);
1990 if (buflen > SCST_INQ_BYTE3) {
1992 if (buffer[SCST_INQ_BYTE3] & SCST_INQ_NORMACA_BIT) {
1993 PRINT_INFO_PR("NormACA set for device: "
1994 "lun=%Ld, type 0x%02x",
1995 (uint64_t)cmd->lun, buffer[0]);
1998 buffer[SCST_INQ_BYTE3] &= ~SCST_INQ_NORMACA_BIT;
2000 scst_set_cmd_error(cmd,
2001 SCST_LOAD_SENSE(scst_sense_hardw_error));
2003 scst_put_buf(cmd, buffer);
2007 if (unlikely((cmd->cdb[0] == RESERVE) || (cmd->cdb[0] == RESERVE_10))) {
2008 if ((cmd->status != 0) && !test_bit(SCST_TGT_DEV_RESERVED,
2009 &cmd->tgt_dev->tgt_dev_flags)) {
2010 struct scst_tgt_dev *tgt_dev_tmp;
2011 TRACE(TRACE_SCSI, "Real RESERVE failed lun=%Ld, status=%x",
2012 (uint64_t)cmd->lun, cmd->masked_status);
2013 TRACE_BUFF_FLAG(TRACE_SCSI, "Sense", cmd->sense_buffer,
2014 sizeof(cmd->sense_buffer));
2015 /* Clearing the reservation */
2016 list_for_each_entry(tgt_dev_tmp, &cmd->dev->dev_tgt_dev_list,
2017 dev_tgt_dev_list_entry) {
2018 clear_bit(SCST_TGT_DEV_RESERVED,
2019 &tgt_dev_tmp->tgt_dev_flags);
2021 cmd->dev->dev_reserved = 0;
2023 scst_unblock_dev(cmd->dev);
2026 if (unlikely((cmd->cdb[0] == MODE_SELECT) ||
2027 (cmd->cdb[0] == MODE_SELECT_10) ||
2028 (cmd->cdb[0] == LOG_SELECT)))
2030 if (cmd->status == 0) {
2031 TRACE(TRACE_SCSI, "MODE/LOG SELECT succeeded, "
2032 "setting the SELECT UA (lun=%Ld)",
2033 (uint64_t)cmd->lun);
2034 spin_lock_bh(&scst_temp_UA_lock);
2035 if (cmd->cdb[0] == LOG_SELECT) {
2036 scst_set_sense(scst_temp_UA,
2037 sizeof(scst_temp_UA),
2038 UNIT_ATTENTION, 0x2a, 0x02);
2040 scst_set_sense(scst_temp_UA,
2041 sizeof(scst_temp_UA),
2042 UNIT_ATTENTION, 0x2a, 0x01);
2044 scst_process_UA(cmd->dev, cmd, scst_temp_UA,
2045 sizeof(scst_temp_UA), 1);
2046 spin_unlock_bh(&scst_temp_UA_lock);
2048 scst_unblock_dev(cmd->dev);
2052 TRACE_EXIT_RES(res);
2056 static int scst_dev_done(struct scst_cmd *cmd)
2058 int res = SCST_CMD_STATE_RES_CONT_SAME;
2060 int atomic = scst_cmd_atomic(cmd);
2064 if (atomic && !cmd->dev->handler->dev_done_atomic &&
2065 cmd->dev->handler->dev_done)
2067 TRACE_DBG("Dev handler %s dev_done() can not be "
2068 "called in atomic context, rescheduling to the thread",
2069 cmd->dev->handler->name);
2070 res = SCST_CMD_STATE_RES_NEED_THREAD;
2074 if (scst_done_cmd_check(cmd, &res))
2077 state = SCST_CMD_STATE_XMIT_RESP;
2078 if (likely(!scst_is_cmd_local(cmd)) &&
2079 likely(cmd->dev->handler->dev_done != NULL))
2082 TRACE_DBG("Calling dev handler %s dev_done(%p)",
2083 cmd->dev->handler->name, cmd);
2084 rc = cmd->dev->handler->dev_done(cmd);
2085 TRACE_DBG("Dev handler %s dev_done() returned %d",
2086 cmd->dev->handler->name, rc);
2087 if (rc != SCST_CMD_STATE_DEFAULT)
2092 case SCST_CMD_STATE_REINIT:
2094 res = SCST_CMD_STATE_RES_RESTART;
2097 case SCST_CMD_STATE_DEV_PARSE:
2098 case SCST_CMD_STATE_PREPARE_SPACE:
2099 case SCST_CMD_STATE_RDY_TO_XFER:
2100 case SCST_CMD_STATE_SEND_TO_MIDLEV:
2101 case SCST_CMD_STATE_DEV_DONE:
2102 case SCST_CMD_STATE_XMIT_RESP:
2103 case SCST_CMD_STATE_FINISHED:
2105 res = SCST_CMD_STATE_RES_CONT_SAME;
2108 case SCST_CMD_STATE_NEED_THREAD_CTX:
2109 TRACE_DBG("Dev handler %s dev_done() requested "
2110 "thread context, rescheduling",
2111 cmd->dev->handler->name);
2112 res = SCST_CMD_STATE_RES_NEED_THREAD;
2117 PRINT_ERROR_PR("Dev handler %s dev_done() returned "
2118 "invalid cmd state %d",
2119 cmd->dev->handler->name, state);
2121 PRINT_ERROR_PR("Dev handler %s dev_done() returned "
2122 "error %d", cmd->dev->handler->name,
2125 scst_set_cmd_error(cmd,
2126 SCST_LOAD_SENSE(scst_sense_hardw_error));
2127 cmd->state = SCST_CMD_STATE_XMIT_RESP;
2128 res = SCST_CMD_STATE_RES_CONT_SAME;
2133 TRACE_EXIT_HRES(res);
2137 static int scst_xmit_response(struct scst_cmd *cmd)
2140 int atomic = scst_cmd_atomic(cmd);
2145 * Check here also in order to avoid unnecessary delays of other
2148 if (unlikely(cmd->sent_to_midlev == 0) &&
2149 (cmd->tgt_dev != NULL))
2151 TRACE(TRACE_SCSI_SERIALIZING,
2152 "cmd %p was not sent to mid-lev (sn %d)", cmd, cmd->sn);
2153 scst_inc_expected_sn_unblock(cmd->tgt_dev, cmd, 0);
2154 cmd->sent_to_midlev = 1;
2157 if (atomic && !cmd->tgtt->xmit_response_atomic) {
2158 TRACE_DBG("%s", "xmit_response() can not be "
2159 "called in atomic context, rescheduling to the thread");
2160 res = SCST_CMD_STATE_RES_NEED_THREAD;
2164 set_bit(SCST_CMD_XMITTING, &cmd->cmd_flags);
2165 smp_mb__after_set_bit();
2167 if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
2168 if (test_bit(SCST_CMD_ABORTED_OTHER, &cmd->cmd_flags)) {
2169 TRACE_MGMT_DBG("Flag ABORTED OTHER set for cmd %p "
2170 "(tag %d), returning TASK ABORTED", cmd, cmd->tag);
2171 scst_set_cmd_error_status(cmd, SAM_STAT_TASK_ABORTED);
2175 if (unlikely(test_bit(SCST_CMD_NO_RESP, &cmd->cmd_flags))) {
2176 TRACE_MGMT_DBG("Flag NO_RESP set for cmd %p (tag %d), skipping",
2178 cmd->state = SCST_CMD_STATE_FINISHED;
2179 res = SCST_CMD_STATE_RES_CONT_SAME;
2184 if (cmd->tm_dbg_delayed && !test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)) {
2185 if (atomic && !cmd->tgtt->xmit_response_atomic) {
2186 TRACE_MGMT_DBG("%s", "DEBUG_TM delayed cmd needs a thread");
2187 res = SCST_CMD_STATE_RES_NEED_THREAD;
2190 TRACE_MGMT_DBG("Delaying cmd %p (tag %d) for 1 second",
2192 schedule_timeout_uninterruptible(HZ);
2197 int finished_cmds = atomic_read(&cmd->sess->tgt->finished_cmds);
2199 res = SCST_CMD_STATE_RES_CONT_NEXT;
2200 cmd->state = SCST_CMD_STATE_XMIT_WAIT;
2202 TRACE_DBG("Calling xmit_response(%p)", cmd);
2204 #if defined(DEBUG) || defined(TRACING)
2207 struct scatterlist *sg = cmd->sg;
2208 TRACE(TRACE_SEND_BOT,
2209 "Xmitting %d S/G(s) at %p sg[0].page at %p",
2210 cmd->sg_cnt, sg, (void*)sg[0].page);
2211 for(i = 0; i < cmd->sg_cnt; ++i) {
2212 TRACE_BUFF_FLAG(TRACE_SEND_BOT,
2213 "Xmitting sg", page_address(sg[i].page),
2220 if (((scst_random() % 100) == 77))
2221 rc = SCST_TGT_RES_QUEUE_FULL;
2224 rc = cmd->tgtt->xmit_response(cmd);
2225 TRACE_DBG("xmit_response() returned %d", rc);
2227 if (likely(rc == SCST_TGT_RES_SUCCESS))
2230 /* Restore the previous state */
2231 cmd->state = SCST_CMD_STATE_XMIT_RESP;
2234 case SCST_TGT_RES_QUEUE_FULL:
2236 if (scst_queue_retry_cmd(cmd, finished_cmds) == 0)
2242 case SCST_TGT_RES_NEED_THREAD_CTX:
2244 TRACE_DBG("Target driver %s xmit_response() "
2245 "requested thread context, rescheduling",
2247 res = SCST_CMD_STATE_RES_NEED_THREAD;
2258 /* Caution: cmd can be already dead here */
2259 TRACE_EXIT_HRES(res);
2263 if (rc == SCST_TGT_RES_FATAL_ERROR) {
2264 PRINT_ERROR_PR("Target driver %s xmit_response() returned "
2265 "fatal error", cmd->tgtt->name);
2267 PRINT_ERROR_PR("Target driver %s xmit_response() returned "
2268 "invalid value %d", cmd->tgtt->name, rc);
2270 scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
2271 cmd->state = SCST_CMD_STATE_FINISHED;
2272 res = SCST_CMD_STATE_RES_CONT_SAME;
2276 static int scst_finish_cmd(struct scst_cmd *cmd)
2282 if (cmd->mem_checked) {
2283 spin_lock_bh(&scst_cmd_mem_lock);
2284 scst_cur_cmd_mem -= cmd->bufflen;
2285 spin_unlock_bh(&scst_cmd_mem_lock);
2288 spin_lock_irq(&scst_list_lock);
2290 TRACE_DBG("Deleting cmd %p from cmd list", cmd);
2291 list_del(&cmd->cmd_list_entry);
2294 scst_complete_cmd_mgmt(cmd, cmd->mgmt_cmnd);
2296 if (likely(cmd->tgt_dev != NULL))
2297 cmd->tgt_dev->cmd_count--;
2299 cmd->sess->sess_cmd_count--;
2301 list_del(&cmd->search_cmd_list_entry);
2303 spin_unlock_irq(&scst_list_lock);
2307 res = SCST_CMD_STATE_RES_CONT_NEXT;
2309 TRACE_EXIT_HRES(res);
2313 void scst_tgt_cmd_done(struct scst_cmd *cmd)
2317 BUG_ON(cmd->state != SCST_CMD_STATE_XMIT_WAIT);
2319 cmd->state = SCST_CMD_STATE_FINISHED;
2320 scst_proccess_redirect_cmd(cmd, scst_get_context(), 1);
2327 * Returns 0 on success, > 0 when we need to wait for unblock,
2328 * < 0 if there is no device (lun) or device type handler.
2329 * Called under scst_list_lock and IRQs disabled
2331 static int scst_translate_lun(struct scst_cmd *cmd)
2333 struct scst_tgt_dev *tgt_dev = NULL;
2338 scst_inc_cmd_count();
2340 if (likely(!test_bit(SCST_FLAG_SUSPENDED, &scst_flags))) {
2342 TRACE_DBG("Finding tgt_dev for cmd %p (lun %Ld)", cmd,
2343 (uint64_t)cmd->lun);
2344 list_for_each_entry(tgt_dev, &cmd->sess->sess_tgt_dev_list,
2345 sess_tgt_dev_list_entry)
2347 if (tgt_dev->acg_dev->lun == cmd->lun) {
2348 TRACE_DBG("tgt_dev %p found", tgt_dev);
2350 if (unlikely(tgt_dev->acg_dev->dev->handler == NULL)) {
2351 PRINT_INFO_PR("Dev handler for device "
2352 "%Ld is NULL, the device will not be "
2353 "visible remotely", (uint64_t)cmd->lun);
2357 if (cmd->state == SCST_CMD_STATE_REINIT) {
2358 cmd->tgt_dev_saved->cmd_count--;
2359 TRACE(TRACE_SCSI_SERIALIZING,
2360 "SCST_CMD_STATE_REINIT: "
2361 "incrementing expected_sn on tgt_dev_saved %p",
2362 cmd->tgt_dev_saved);
2363 scst_inc_expected_sn_unblock(
2364 cmd->tgt_dev_saved, cmd, 1);
2366 cmd->tgt_dev = tgt_dev;
2367 tgt_dev->cmd_count++;
2368 cmd->dev = tgt_dev->acg_dev->dev;
2370 /* ToDo: cmd->queue_type */
2372 /* scst_list_lock is enough to protect that */
2373 cmd->sn = tgt_dev->next_sn;
2376 TRACE(TRACE_DEBUG/*TRACE_SCSI_SERIALIZING*/,
2377 "cmd->sn: %d", cmd->sn);
2384 TRACE_DBG("tgt_dev for lun %Ld not found, command to "
2385 "unexisting LU?", (uint64_t)cmd->lun);
2386 scst_dec_cmd_count();
2389 if ( !cmd->sess->waiting) {
2390 TRACE_DBG("Adding session %p to scst_dev_wait_sess_list",
2392 list_add_tail(&cmd->sess->dev_wait_sess_list_entry,
2393 &scst_dev_wait_sess_list);
2394 cmd->sess->waiting = 1;
2396 scst_dec_cmd_count();
2400 TRACE_EXIT_RES(res);
2404 /* Called under scst_list_lock and IRQs disabled */
2405 static int scst_process_init_cmd(struct scst_cmd *cmd)
2411 res = scst_translate_lun(cmd);
2412 if (likely(res == 0)) {
2413 cmd->state = SCST_CMD_STATE_DEV_PARSE;
2414 if (cmd->tgt_dev->cmd_count > SCST_MAX_DEVICE_COMMANDS) {
2415 TRACE(TRACE_RETRY, "Too many pending commands in "
2416 "session, returning BUSY to initiator \"%s\"",
2417 (cmd->sess->initiator_name[0] == '\0') ?
2418 "Anonymous" : cmd->sess->initiator_name);
2420 cmd->state = SCST_CMD_STATE_XMIT_RESP;
2422 TRACE_DBG("Moving cmd %p to active cmd list", cmd);
2423 list_move_tail(&cmd->cmd_list_entry, &scst_active_cmd_list);
2424 } else if (res < 0) {
2425 TRACE_DBG("Finishing cmd %p", cmd);
2426 scst_set_cmd_error(cmd,
2427 SCST_LOAD_SENSE(scst_sense_lun_not_supported));
2428 cmd->state = SCST_CMD_STATE_XMIT_RESP;
2429 TRACE_DBG("Moving cmd %p to active cmd list", cmd);
2430 list_move_tail(&cmd->cmd_list_entry, &scst_active_cmd_list);
2433 TRACE_EXIT_RES(res);
2438 * Called under scst_list_lock and IRQs disabled
2439 * We don't drop it anywhere inside, because command execution
2440 * have to be serialized, i.e. commands must be executed in order
2441 * of their arrival, and we set this order inside scst_translate_lun().
2443 static int scst_do_job_init(struct list_head *init_cmd_list)
2449 if (!test_bit(SCST_FLAG_SUSPENDED, &scst_flags)) {
2450 while (!list_empty(init_cmd_list)) {
2451 struct scst_cmd *cmd = list_entry(init_cmd_list->next,
2454 res = scst_process_init_cmd(cmd);
2460 TRACE_EXIT_RES(res);
2464 /* Called with no locks held */
2465 static int __scst_process_active_cmd(struct scst_cmd *cmd, int context,
2476 cmd->atomic = ((context & ~SCST_PROCESSIBLE_ENV) ==
2477 SCST_CONTEXT_DIRECT_ATOMIC);
2478 cmd->processible_env = (context & SCST_PROCESSIBLE_ENV) != 0;
2481 switch (cmd->state) {
2482 case SCST_CMD_STATE_DEV_PARSE:
2483 res = scst_parse_cmd(cmd);
2486 case SCST_CMD_STATE_PREPARE_SPACE:
2487 res = scst_prepare_space(cmd);
2490 case SCST_CMD_STATE_RDY_TO_XFER:
2491 res = scst_rdy_to_xfer(cmd);
2494 case SCST_CMD_STATE_SEND_TO_MIDLEV:
2495 res = scst_send_to_midlev(cmd);
2496 /* !! At this point cmd, sess & tgt_dev can be already freed !! */
2499 case SCST_CMD_STATE_DEV_DONE:
2500 res = scst_dev_done(cmd);
2503 case SCST_CMD_STATE_XMIT_RESP:
2504 res = scst_xmit_response(cmd);
2507 case SCST_CMD_STATE_FINISHED:
2508 res = scst_finish_cmd(cmd);
2512 PRINT_ERROR("cmd (%p) in state %d, but shouldn't be",
2515 res = SCST_CMD_STATE_RES_CONT_NEXT;
2518 } while(res == SCST_CMD_STATE_RES_CONT_SAME);
2520 if (res == SCST_CMD_STATE_RES_CONT_NEXT) {
2522 spin_lock_irq(&scst_list_lock);
2523 } else if (res == SCST_CMD_STATE_RES_NEED_THREAD) {
2524 spin_lock_irq(&scst_list_lock);
2526 switch (cmd->state) {
2527 case SCST_CMD_STATE_DEV_PARSE:
2528 case SCST_CMD_STATE_PREPARE_SPACE:
2529 case SCST_CMD_STATE_RDY_TO_XFER:
2530 case SCST_CMD_STATE_SEND_TO_MIDLEV:
2531 case SCST_CMD_STATE_DEV_DONE:
2532 case SCST_CMD_STATE_XMIT_RESP:
2533 case SCST_CMD_STATE_FINISHED:
2534 TRACE_DBG("Moving cmd %p to active cmd list", cmd);
2535 list_move(&cmd->cmd_list_entry, &scst_active_cmd_list);
2538 /* not very valid commands */
2539 case SCST_CMD_STATE_DEFAULT:
2540 case SCST_CMD_STATE_NEED_THREAD_CTX:
2541 PRINT_ERROR_PR("cmd %p is in state %d, not putting on "
2542 "useful list (left on scst cmd list)", cmd,
2544 spin_unlock_irq(&scst_list_lock);
2546 spin_lock_irq(&scst_list_lock);
2552 cmd->non_atomic_only = 1;
2554 spin_unlock_irq(&scst_list_lock);
2555 wake_up(&scst_list_waitQ);
2556 } else if (res == SCST_CMD_STATE_RES_RESTART) {
2557 if (cmd->state == SCST_CMD_STATE_REINIT) {
2558 spin_lock_irq(&scst_list_lock);
2559 TRACE_DBG("Moving cmd %p to head of init cmd list", cmd);
2560 list_move(&cmd->cmd_list_entry, &scst_init_cmd_list);
2562 spin_unlock_irq(&scst_list_lock);
2568 TRACE_EXIT_RES(res);
2572 /* Called under scst_list_lock and IRQs disabled */
2573 static void scst_do_job_active(struct list_head *active_cmd_list, int context)
2576 struct scst_cmd *cmd;
2577 int atomic = ((context & ~SCST_PROCESSIBLE_ENV) ==
2578 SCST_CONTEXT_DIRECT_ATOMIC);
2584 int c = (context & ~SCST_PROCESSIBLE_ENV);
2585 WARN_ON((c != SCST_CONTEXT_DIRECT_ATOMIC) &&
2586 (c != SCST_CONTEXT_DIRECT));
2590 tm_dbg_check_released_cmds();
2593 list_for_each_entry(cmd, active_cmd_list, cmd_list_entry) {
2594 if (atomic && cmd->non_atomic_only) {
2595 TRACE(TRACE_DEBUG, "Skipping non-atomic cmd %p", cmd);
2598 if (tm_dbg_check_cmd(cmd) != 0)
2600 res = scst_process_active_cmd(cmd, context, NULL, 1);
2601 if (res == SCST_CMD_STATE_RES_CONT_NEXT) {
2603 } else if (res == SCST_CMD_STATE_RES_NEED_THREAD) {
2605 } else if (res == SCST_CMD_STATE_RES_RESTART) {
2615 static inline int test_cmd_lists(void)
2617 int res = !list_empty(&scst_active_cmd_list) ||
2618 (!list_empty(&scst_init_cmd_list) &&
2619 !test_bit(SCST_FLAG_SUSPENDED, &scst_flags)) ||
2620 test_bit(SCST_FLAG_SHUTDOWN, &scst_flags) ||
2621 unlikely(scst_shut_threads_count > 0) ||
2622 tm_dbg_is_release();
2626 int scst_cmd_thread(void *arg)
2628 static spinlock_t lock = SPIN_LOCK_UNLOCKED;
2634 n = scst_thread_num++;
2636 daemonize("scsi_tgt%d", n);
2637 recalc_sigpending();
2638 set_user_nice(current, 10);
2639 current->flags |= PF_NOFREEZE;
2641 spin_lock_irq(&scst_list_lock);
2644 init_waitqueue_entry(&wait, current);
2646 if (!test_cmd_lists()) {
2647 add_wait_queue_exclusive(&scst_list_waitQ, &wait);
2649 set_current_state(TASK_INTERRUPTIBLE);
2650 if (test_cmd_lists())
2652 spin_unlock_irq(&scst_list_lock);
2654 spin_lock_irq(&scst_list_lock);
2656 set_current_state(TASK_RUNNING);
2657 remove_wait_queue(&scst_list_waitQ, &wait);
2660 scst_do_job_init(&scst_init_cmd_list);
2661 scst_do_job_active(&scst_active_cmd_list,
2662 SCST_CONTEXT_DIRECT|SCST_PROCESSIBLE_ENV);
2664 if (unlikely(test_bit(SCST_FLAG_SHUTDOWN, &scst_flags)) &&
2665 list_empty(&scst_cmd_list) &&
2666 list_empty(&scst_active_cmd_list) &&
2667 list_empty(&scst_init_cmd_list)) {
2671 if (unlikely(scst_shut_threads_count > 0)) {
2672 scst_shut_threads_count--;
2676 spin_unlock_irq(&scst_list_lock);
2678 if (atomic_dec_and_test(&scst_threads_count) && scst_shutdown_mutex) {
2679 smp_mb__after_atomic_dec();
2680 TRACE_DBG("%s", "Releasing scst_shutdown_mutex");
2681 up(scst_shutdown_mutex);
2688 void scst_cmd_tasklet(long p)
2692 spin_lock_irq(&scst_list_lock);
2694 scst_do_job_init(&scst_init_cmd_list);
2695 scst_do_job_active(&scst_active_cmd_list,
2696 SCST_CONTEXT_DIRECT_ATOMIC|SCST_PROCESSIBLE_ENV);
2698 spin_unlock_irq(&scst_list_lock);
2705 * Returns 0 on success, < 0 if there is no device handler or
2706 * > 0 if SCST_FLAG_SUSPENDED set.
2708 static int scst_mgmt_translate_lun(struct scst_mgmt_cmd *mcmd)
2710 struct scst_tgt_dev *tgt_dev = NULL;
2715 TRACE_DBG("Finding tgt_dev for mgmt cmd %p (lun %Ld)", mcmd,
2716 (uint64_t)mcmd->lun);
2718 spin_lock_irq(&scst_list_lock);
2719 scst_inc_cmd_count();
2720 if (likely(!test_bit(SCST_FLAG_SUSPENDED, &scst_flags))) {
2721 list_for_each_entry(tgt_dev, &mcmd->sess->sess_tgt_dev_list,
2722 sess_tgt_dev_list_entry)
2724 if (tgt_dev->acg_dev->lun == mcmd->lun) {
2725 TRACE_DBG("tgt_dev %p found", tgt_dev);
2726 mcmd->mcmd_tgt_dev = tgt_dev;
2731 if (mcmd->mcmd_tgt_dev == NULL)
2732 scst_dec_cmd_count();
2734 if ( !mcmd->sess->waiting) {
2735 TRACE_DBG("Adding session %p to scst_dev_wait_sess_list",
2737 list_add_tail(&mcmd->sess->dev_wait_sess_list_entry,
2738 &scst_dev_wait_sess_list);
2739 mcmd->sess->waiting = 1;
2741 scst_dec_cmd_count();
2744 spin_unlock_irq(&scst_list_lock);
2746 TRACE_EXIT_HRES(res);
2750 /* Called under scst_list_lock and IRQ off */
2751 static void scst_complete_cmd_mgmt(struct scst_cmd *cmd,
2752 struct scst_mgmt_cmd *mcmd)
2756 TRACE_MGMT_DBG("cmd %p completed (tag %d, mcmd %p, "
2757 "mcmd->cmd_wait_count %d)", cmd, cmd->tag, mcmd,
2758 mcmd->cmd_wait_count);
2760 cmd->mgmt_cmnd = NULL;
2763 mcmd->completed_cmd_count++;
2765 mcmd->cmd_wait_count--;
2766 if (mcmd->cmd_wait_count > 0) {
2767 TRACE_MGMT_DBG("cmd_wait_count(%d) not 0, skipping",
2768 mcmd->cmd_wait_count);
2772 mcmd->state = SCST_MGMT_CMD_STATE_DONE;
2774 if (mcmd->completed) {
2775 TRACE_MGMT_DBG("Moving mgmt cmd %p to active mgmt cmd list",
2777 list_move_tail(&mcmd->mgmt_cmd_list_entry,
2778 &scst_active_mgmt_cmd_list);
2781 wake_up(&scst_mgmt_cmd_list_waitQ);
2788 static int scst_call_dev_task_mgmt_fn(struct scst_mgmt_cmd *mcmd,
2789 struct scst_tgt_dev *tgt_dev, int set_status)
2791 int res = SCST_DEV_TM_NOT_COMPLETED;
2792 if (tgt_dev->acg_dev->dev->handler->task_mgmt_fn) {
2793 int irq = irqs_disabled();
2794 TRACE_MGMT_DBG("Calling dev handler %s task_mgmt_fn(fn=%d)",
2795 tgt_dev->acg_dev->dev->handler->name, mcmd->fn);
2801 res = tgt_dev->acg_dev->dev->handler->task_mgmt_fn(mcmd,
2805 TRACE_MGMT_DBG("Dev handler %s task_mgmt_fn() returned %d",
2806 tgt_dev->acg_dev->dev->handler->name, res);
2807 if (set_status && (res != SCST_DEV_TM_NOT_COMPLETED)) {
2808 mcmd->status = (res == SCST_DEV_TM_COMPLETED_SUCCESS) ?
2809 SCST_MGMT_STATUS_SUCCESS :
2810 SCST_MGMT_STATUS_FAILED;
2816 static inline int scst_is_strict_mgmt_fn(int mgmt_fn)
2819 case SCST_ABORT_TASK:
2820 case SCST_ABORT_TASK_SET:
2821 case SCST_CLEAR_TASK_SET:
2829 * Called under scst_list_lock and IRQ off (to protect cmd
2830 * from being destroyed) + BHs also off
2831 * Returns -1 if command is being executed (ABORT failed), 0 otherwise
2833 void scst_abort_cmd(struct scst_cmd *cmd, struct scst_mgmt_cmd *mcmd,
2834 int other_ini, int call_dev_task_mgmt_fn)
2838 TRACE(TRACE_MGMT, "Aborting cmd %p (tag %d)", cmd, cmd->tag);
2841 set_bit(SCST_CMD_ABORTED_OTHER, &cmd->cmd_flags);
2842 smp_mb__after_set_bit();
2844 set_bit(SCST_CMD_ABORTED, &cmd->cmd_flags);
2845 smp_mb__after_set_bit();
2847 if (call_dev_task_mgmt_fn && cmd->tgt_dev)
2848 scst_call_dev_task_mgmt_fn(mcmd, cmd->tgt_dev, 0);
2852 if (cmd->tgtt->tm_sync_reply)
2855 if (scst_is_strict_mgmt_fn(mcmd->fn))
2856 defer = test_bit(SCST_CMD_EXECUTING,
2859 defer = test_bit(SCST_CMD_XMITTING,
2865 * Delay the response until the command's finish in
2866 * order to guarantee that "no further responses from
2867 * the task are sent to the SCSI initiator port" after
2868 * response from the TM function is sent (SAM)
2870 TRACE(TRACE_MGMT, "cmd %p (tag %d) being executed/"
2871 "xmitted (state %d), deferring ABORT...", cmd,
2872 cmd->tag, cmd->state);
2874 if (cmd->mgmt_cmnd) {
2875 printk(KERN_ALERT "cmd %p (tag %d, state %d) "
2876 "has non-NULL mgmt_cmnd %p!!! Current "
2877 "mcmd %p\n", cmd, cmd->tag, cmd->state,
2878 cmd->mgmt_cmnd, mcmd);
2881 BUG_ON(cmd->mgmt_cmnd);
2882 mcmd->cmd_wait_count++;
2883 cmd->mgmt_cmnd = mcmd;
2887 tm_dbg_release_cmd(cmd);
2893 /* Called under scst_list_lock and IRQ off */
2894 static int scst_set_mcmd_next_state(struct scst_mgmt_cmd *mcmd)
2897 if (mcmd->cmd_wait_count != 0) {
2898 TRACE_MGMT_DBG("cmd_wait_count(%d) not 0, preparing to "
2899 "wait", mcmd->cmd_wait_count);
2900 mcmd->state = SCST_MGMT_CMD_STATE_EXECUTING;
2903 mcmd->state = SCST_MGMT_CMD_STATE_DONE;
2906 mcmd->completed = 1;
2910 static void scst_unblock_aborted_cmds(int scst_mutex_held)
2912 struct scst_device *dev;
2917 if (!scst_mutex_held)
2920 list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
2921 struct scst_cmd *cmd, *tcmd;
2922 spin_lock_bh(&dev->dev_lock);
2923 list_for_each_entry_safe(cmd, tcmd, &dev->blocked_cmd_list,
2924 blocked_cmd_list_entry) {
2925 if (test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)) {
2926 list_del(&cmd->blocked_cmd_list_entry);
2927 TRACE_MGMT_DBG("Moving aborted blocked cmd %p "
2928 "to active cmd list", cmd);
2929 spin_lock_irq(&scst_list_lock);
2930 list_move_tail(&cmd->cmd_list_entry,
2931 &scst_active_cmd_list);
2932 spin_unlock_irq(&scst_list_lock);
2936 spin_unlock_bh(&dev->dev_lock);
2939 if (!scst_mutex_held)
2943 wake_up(&scst_list_waitQ);
2949 /* Returns 0 if the command processing should be continued, <0 otherwise */
2950 static void __scst_abort_task_set(struct scst_mgmt_cmd *mcmd,
2951 struct scst_tgt_dev *tgt_dev, int other_ini, int scst_mutex_held)
2953 struct scst_cmd *cmd;
2954 struct scst_session *sess = tgt_dev->sess;
2959 spin_lock_irq(&scst_list_lock);
2961 TRACE_DBG("Searching in search cmd list (sess=%p)", sess);
2962 list_for_each_entry(cmd, &sess->search_cmd_list,
2963 search_cmd_list_entry) {
2964 if ((cmd->tgt_dev == NULL) &&
2965 (cmd->lun == tgt_dev->acg_dev->lun))
2967 if (cmd->tgt_dev != tgt_dev)
2969 scst_abort_cmd(cmd, mcmd, other_ini, 0);
2971 spin_unlock_irq(&scst_list_lock);
2974 scst_unblock_aborted_cmds(scst_mutex_held);
2980 /* Returns 0 if the command processing should be continued, <0 otherwise */
2981 static int scst_abort_task_set(struct scst_mgmt_cmd *mcmd)
2984 struct scst_tgt_dev *tgt_dev = mcmd->mcmd_tgt_dev;
2985 struct scst_device *dev = tgt_dev->acg_dev->dev;
2987 TRACE(TRACE_MGMT, "Aborting task set (lun=%d, mcmd=%p)",
2988 tgt_dev->acg_dev->lun, mcmd);
2990 spin_lock_bh(&dev->dev_lock);
2991 __scst_block_dev(dev);
2992 spin_unlock_bh(&dev->dev_lock);
2994 __scst_abort_task_set(mcmd, tgt_dev, 0, 0);
2995 scst_call_dev_task_mgmt_fn(mcmd, tgt_dev, 0);
2997 res = scst_set_mcmd_next_state(mcmd);
2999 TRACE_EXIT_RES(res);
3003 static int scst_check_delay_mgmt_cmd(struct scst_mgmt_cmd *mcmd, int locked)
3006 * No need for special protection for SCST_FLAG_TM_ACTIVE, since
3007 * we could be called from the only thread.
3009 if (test_bit(SCST_FLAG_TM_ACTIVE, &scst_flags)) {
3010 TRACE_MGMT_DBG("Moving mgmt cmd %p to delayed mgmt cmd list",
3013 spin_lock_irq(&scst_list_lock);
3014 list_move_tail(&mcmd->mgmt_cmd_list_entry,
3015 &scst_delayed_mgmt_cmd_list);
3017 spin_unlock_irq(&scst_list_lock);
3020 set_bit(SCST_FLAG_TM_ACTIVE, &scst_flags);
3025 /* Returns 0 if the command processing should be continued,
3026 * >0, if it should be requeued, <0 otherwise */
3027 static int scst_mgmt_cmd_init(struct scst_mgmt_cmd *mcmd)
3033 res = scst_check_delay_mgmt_cmd(mcmd, 1);
3037 if (mcmd->fn == SCST_ABORT_TASK) {
3038 struct scst_session *sess = mcmd->sess;
3039 struct scst_cmd *cmd;
3042 spin_lock_irq(&scst_list_lock);
3043 cmd = __scst_find_cmd_by_tag(sess, mcmd->tag);
3045 TRACE(TRACE_MGMT, "ABORT TASK failed: command for "
3046 "tag %d not found", mcmd->tag);
3047 mcmd->status = SCST_MGMT_STATUS_FAILED;
3048 mcmd->state = SCST_MGMT_CMD_STATE_DONE;
3050 TRACE(TRACE_MGMT, "Cmd %p for tag %d (sn %d) found, "
3051 "aborting it", cmd, mcmd->tag, cmd->sn);
3052 mcmd->cmd_to_abort = cmd;
3053 scst_abort_cmd(cmd, mcmd, 0, 1);
3054 res = scst_set_mcmd_next_state(mcmd);
3055 mcmd->cmd_to_abort = NULL; /* just in case */
3057 spin_unlock_irq(&scst_list_lock);
3061 rc = scst_mgmt_translate_lun(mcmd);
3063 PRINT_ERROR_PR("Corresponding device for lun %Ld not "
3064 "found", (uint64_t)mcmd->lun);
3065 mcmd->status = SCST_MGMT_STATUS_FAILED;
3066 mcmd->state = SCST_MGMT_CMD_STATE_DONE;
3068 mcmd->state = SCST_MGMT_CMD_STATE_READY;
3074 TRACE_EXIT_RES(res);
3078 /* Returns 0 if the command processing should be continued, <0 otherwise */
3079 static int scst_target_reset(struct scst_mgmt_cmd *mcmd)
3082 struct scst_device *dev, *d;
3083 struct scst_tgt_dev *tgt_dev;
3085 LIST_HEAD(host_devs);
3089 TRACE(TRACE_MGMT, "Target reset (mcmd %p, cmd count %d)",
3090 mcmd, mcmd->sess->sess_cmd_count);
3094 list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
3097 spin_lock_bh(&dev->dev_lock);
3098 __scst_block_dev(dev);
3099 scst_process_reset(dev, mcmd->sess, NULL, mcmd);
3100 spin_unlock_bh(&dev->dev_lock);
3104 list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
3105 dev_tgt_dev_list_entry)
3108 rc = scst_call_dev_task_mgmt_fn(mcmd, tgt_dev, 0);
3109 if (rc == SCST_DEV_TM_NOT_COMPLETED)
3111 else if (rc == SCST_DEV_TM_COMPLETED_FAILED)
3112 mcmd->status = SCST_MGMT_STATUS_FAILED;
3117 if (dev->scsi_dev == NULL)
3120 list_for_each_entry(d, &host_devs, reset_dev_list_entry) {
3121 if (dev->scsi_dev->host->host_no ==
3122 d->scsi_dev->host->host_no)
3129 list_add_tail(&dev->reset_dev_list_entry, &host_devs);
3133 * We suppose here that for all commands that already on devices
3134 * on/after scsi_reset_provider() completion callbacks will be called.
3137 list_for_each_entry(dev, &host_devs, reset_dev_list_entry) {
3138 /* dev->scsi_dev must be non-NULL here */
3139 TRACE(TRACE_MGMT, "Resetting host %d bus ",
3140 dev->scsi_dev->host->host_no);
3141 rc = scsi_reset_provider(dev->scsi_dev, SCSI_TRY_RESET_BUS);
3142 TRACE(TRACE_MGMT, "Result of host %d bus reset: %s",
3143 dev->scsi_dev->host->host_no,
3144 (rc == SUCCESS) ? "SUCCESS" : "FAILED");
3145 if (rc != SUCCESS) {
3146 /* SCSI_TRY_RESET_BUS is also done by scsi_reset_provider() */
3147 mcmd->status = SCST_MGMT_STATUS_FAILED;
3151 list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
3152 if (dev->scsi_dev != NULL)
3153 dev->scsi_dev->was_reset = 0;
3158 spin_lock_irq(&scst_list_lock);
3159 tm_dbg_task_mgmt("TARGET RESET");
3160 res = scst_set_mcmd_next_state(mcmd);
3161 spin_unlock_irq(&scst_list_lock);
3163 TRACE_EXIT_RES(res);
3167 /* Returns 0 if the command processing should be continued, <0 otherwise */
3168 static int scst_lun_reset(struct scst_mgmt_cmd *mcmd)
3171 struct scst_tgt_dev *tgt_dev = mcmd->mcmd_tgt_dev;
3172 struct scst_device *dev = tgt_dev->acg_dev->dev;
3176 TRACE(TRACE_MGMT, "Resetting lun %d (mcmd %p)", tgt_dev->acg_dev->lun,
3179 spin_lock_bh(&dev->dev_lock);
3180 __scst_block_dev(dev);
3181 scst_process_reset(dev, mcmd->sess, NULL, mcmd);
3182 spin_unlock_bh(&dev->dev_lock);
3184 rc = scst_call_dev_task_mgmt_fn(mcmd, tgt_dev, 1);
3185 if (rc != SCST_DEV_TM_NOT_COMPLETED)
3188 if (dev->scsi_dev != NULL) {
3189 TRACE(TRACE_MGMT, "Resetting host %d bus ",
3190 dev->scsi_dev->host->host_no);
3191 rc = scsi_reset_provider(dev->scsi_dev, SCSI_TRY_RESET_DEVICE);
3193 mcmd->status = SCST_MGMT_STATUS_FAILED;
3194 dev->scsi_dev->was_reset = 0;
3198 spin_lock_irq(&scst_list_lock);
3199 tm_dbg_task_mgmt("LUN RESET");
3200 res = scst_set_mcmd_next_state(mcmd);
3201 spin_unlock_irq(&scst_list_lock);
3203 TRACE_EXIT_RES(res);
3207 /* Returns 0 if the command processing should be continued, <0 otherwise */
3208 static int scst_abort_all_nexus_loss_sess(struct scst_mgmt_cmd *mcmd,
3212 struct scst_session *sess = mcmd->sess;
3213 struct scst_tgt_dev *tgt_dev;
3218 TRACE(TRACE_MGMT, "Nexus loss for sess %p (mcmd %p)", sess,
3221 TRACE(TRACE_MGMT, "Aborting all from sess %p (mcmd %p)", sess,
3226 list_for_each_entry(tgt_dev, &sess->sess_tgt_dev_list,
3227 sess_tgt_dev_list_entry)
3229 struct scst_device *dev = tgt_dev->acg_dev->dev;
3232 spin_lock_bh(&dev->dev_lock);
3233 __scst_block_dev(dev);
3234 spin_unlock_bh(&dev->dev_lock);
3236 rc = scst_call_dev_task_mgmt_fn(mcmd, tgt_dev, 0);
3237 if (rc == SCST_DEV_TM_COMPLETED_FAILED)
3238 mcmd->status = SCST_MGMT_STATUS_FAILED;
3240 __scst_abort_task_set(mcmd, tgt_dev, !nexus_loss, 1);
3242 scst_reset_tgt_dev(tgt_dev, 1);
3246 spin_lock_irq(&scst_list_lock);
3247 res = scst_set_mcmd_next_state(mcmd);
3248 spin_unlock_irq(&scst_list_lock);
3250 TRACE_EXIT_RES(res);
3254 /* Returns 0 if the command processing should be continued, <0 otherwise */
3255 static int scst_abort_all_nexus_loss_tgt(struct scst_mgmt_cmd *mcmd,
3259 struct scst_tgt *tgt = mcmd->sess->tgt;
3260 struct scst_session *sess;
3261 struct scst_device *dev;
3262 struct scst_tgt_dev *tgt_dev;
3267 TRACE(TRACE_MGMT, "I_T Nexus loss (tgt %p, mcmd %p)", tgt,
3270 TRACE(TRACE_MGMT, "Aborting all from tgt %p (mcmd %p)", tgt,
3276 list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
3277 spin_lock_bh(&dev->dev_lock);
3278 __scst_block_dev(dev);
3279 spin_unlock_bh(&dev->dev_lock);
3282 list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) {
3283 list_for_each_entry(tgt_dev, &sess->sess_tgt_dev_list,
3284 sess_tgt_dev_list_entry)
3288 rc = scst_call_dev_task_mgmt_fn(mcmd, tgt_dev, 0);
3289 if (rc == SCST_DEV_TM_COMPLETED_FAILED)
3290 mcmd->status = SCST_MGMT_STATUS_FAILED;
3292 __scst_abort_task_set(mcmd, tgt_dev, !nexus_loss, 1);
3294 scst_reset_tgt_dev(tgt_dev, 1);
3300 spin_lock_irq(&scst_list_lock);
3301 res = scst_set_mcmd_next_state(mcmd);
3302 spin_unlock_irq(&scst_list_lock);
3304 TRACE_EXIT_RES(res);
3308 /* Returns 0 if the command processing should be continued, <0 otherwise */
3309 static int scst_mgmt_cmd_exec(struct scst_mgmt_cmd *mcmd)
3315 mcmd->status = SCST_MGMT_STATUS_SUCCESS;
3318 case SCST_ABORT_TASK_SET:
3319 case SCST_CLEAR_TASK_SET:
3320 res = scst_abort_task_set(mcmd);
3323 case SCST_LUN_RESET:
3324 res = scst_lun_reset(mcmd);
3327 case SCST_TARGET_RESET:
3328 res = scst_target_reset(mcmd);
3331 case SCST_ABORT_ALL_TASKS_SESS:
3332 res = scst_abort_all_nexus_loss_sess(mcmd, 0);
3335 case SCST_NEXUS_LOSS_SESS:
3336 res = scst_abort_all_nexus_loss_sess(mcmd, 1);
3339 case SCST_ABORT_ALL_TASKS:
3340 res = scst_abort_all_nexus_loss_tgt(mcmd, 0);
3343 case SCST_NEXUS_LOSS:
3344 res = scst_abort_all_nexus_loss_tgt(mcmd, 1);
3347 case SCST_CLEAR_ACA:
3348 scst_call_dev_task_mgmt_fn(mcmd, mcmd->mcmd_tgt_dev, 1);
3349 /* Nothing to do (yet) */
3353 PRINT_ERROR_PR("Unknown task management function %d", mcmd->fn);
3354 mcmd->status = SCST_MGMT_STATUS_FAILED;
3358 TRACE_EXIT_RES(res);
3362 static void scst_mgmt_cmd_send_done(struct scst_mgmt_cmd *mcmd)
3364 struct scst_device *dev;
3365 struct scst_tgt_dev *tgt_dev;
3369 clear_bit(SCST_FLAG_TM_ACTIVE, &scst_flags);
3370 if (!list_empty(&scst_delayed_mgmt_cmd_list)) {
3371 struct scst_mgmt_cmd *m;
3372 spin_lock_irq(&scst_list_lock);
3373 m = list_entry(scst_delayed_mgmt_cmd_list.next, typeof(*m),
3374 mgmt_cmd_list_entry);
3375 TRACE_MGMT_DBG("Moving delayed mgmt cmd %p to active mgmt "
3377 list_move(&m->mgmt_cmd_list_entry, &scst_active_mgmt_cmd_list);
3378 spin_unlock_irq(&scst_list_lock);
3381 mcmd->state = SCST_MGMT_CMD_STATE_FINISHED;
3382 if (scst_is_strict_mgmt_fn(mcmd->fn) && (mcmd->completed_cmd_count > 0))
3383 mcmd->status = SCST_MGMT_STATUS_FAILED;
3385 if (mcmd->sess->tgt->tgtt->task_mgmt_fn_done) {
3386 TRACE_DBG("Calling target %s task_mgmt_fn_done()",
3387 mcmd->sess->tgt->tgtt->name);
3388 mcmd->sess->tgt->tgtt->task_mgmt_fn_done(mcmd);
3389 TRACE_MGMT_DBG("Dev handler %s task_mgmt_fn_done() returned",
3390 mcmd->sess->tgt->tgtt->name);
3394 case SCST_ABORT_TASK_SET:
3395 case SCST_CLEAR_TASK_SET:
3396 case SCST_LUN_RESET:
3397 scst_unblock_dev(mcmd->mcmd_tgt_dev->acg_dev->dev);
3400 case SCST_TARGET_RESET:
3401 case SCST_ABORT_ALL_TASKS:
3402 case SCST_NEXUS_LOSS:
3404 list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
3405 scst_unblock_dev(dev);
3410 case SCST_NEXUS_LOSS_SESS:
3411 case SCST_ABORT_ALL_TASKS_SESS:
3413 list_for_each_entry(tgt_dev, &mcmd->sess->sess_tgt_dev_list,
3414 sess_tgt_dev_list_entry) {
3415 scst_unblock_dev(tgt_dev->acg_dev->dev);
3420 case SCST_CLEAR_ACA:
3425 mcmd->tgt_priv = NULL;
3431 /* Returns >0, if cmd should be requeued */
3432 static int scst_process_mgmt_cmd(struct scst_mgmt_cmd *mcmd)
3438 TRACE_DBG("mcmd %p, state %d", mcmd, mcmd->state);
3441 switch (mcmd->state) {
3442 case SCST_MGMT_CMD_STATE_INIT:
3443 res = scst_mgmt_cmd_init(mcmd);
3448 case SCST_MGMT_CMD_STATE_READY:
3449 if (scst_mgmt_cmd_exec(mcmd))
3453 case SCST_MGMT_CMD_STATE_DONE:
3454 scst_mgmt_cmd_send_done(mcmd);
3457 case SCST_MGMT_CMD_STATE_FINISHED:
3461 case SCST_MGMT_CMD_STATE_EXECUTING:
3466 PRINT_ERROR_PR("Unknown state %d of management command",
3474 TRACE_EXIT_RES(res);
3478 scst_free_mgmt_cmd(mcmd, 1);
3482 static inline int test_mgmt_cmd_list(void)
3484 int res = (!list_empty(&scst_active_mgmt_cmd_list) &&
3485 !test_bit(SCST_FLAG_SUSPENDED, &scst_flags)) ||
3486 test_bit(SCST_FLAG_SHUTDOWN, &scst_flags);
3490 int scst_mgmt_cmd_thread(void *arg)
3492 struct scst_mgmt_cmd *mcmd;
3496 daemonize("scsi_tgt_mc");
3497 recalc_sigpending();
3498 current->flags |= PF_NOFREEZE;
3500 spin_lock_irq(&scst_list_lock);
3503 init_waitqueue_entry(&wait, current);
3505 if (!test_mgmt_cmd_list()) {
3506 add_wait_queue_exclusive(&scst_mgmt_cmd_list_waitQ,
3509 set_current_state(TASK_INTERRUPTIBLE);
3510 if (test_mgmt_cmd_list())
3512 spin_unlock_irq(&scst_list_lock);
3514 spin_lock_irq(&scst_list_lock);
3516 set_current_state(TASK_RUNNING);
3517 remove_wait_queue(&scst_mgmt_cmd_list_waitQ, &wait);
3520 while (!list_empty(&scst_active_mgmt_cmd_list) &&
3521 !test_bit(SCST_FLAG_SUSPENDED, &scst_flags))
3524 mcmd = list_entry(scst_active_mgmt_cmd_list.next,
3525 typeof(*mcmd), mgmt_cmd_list_entry);
3526 TRACE_MGMT_DBG("Moving mgmt cmd %p to mgmt cmd list",
3528 list_move_tail(&mcmd->mgmt_cmd_list_entry,
3529 &scst_mgmt_cmd_list);
3530 spin_unlock_irq(&scst_list_lock);
3531 rc = scst_process_mgmt_cmd(mcmd);
3532 spin_lock_irq(&scst_list_lock);
3534 TRACE_MGMT_DBG("Moving mgmt cmd %p to head "
3535 "of active mgmt cmd list", mcmd);
3536 list_move(&mcmd->mgmt_cmd_list_entry,
3537 &scst_active_mgmt_cmd_list);
3541 if (test_bit(SCST_FLAG_SHUTDOWN, &scst_flags) &&
3542 list_empty(&scst_active_mgmt_cmd_list))
3547 spin_unlock_irq(&scst_list_lock);
3549 if (atomic_dec_and_test(&scst_threads_count) && scst_shutdown_mutex) {
3550 smp_mb__after_atomic_dec();
3551 TRACE_DBG("%s", "Releasing scst_shutdown_mutex");
3552 up(scst_shutdown_mutex);
3559 static struct scst_mgmt_cmd *scst_pre_rx_mgmt_cmd(struct scst_session
3560 *sess, int fn, int atomic, void *tgt_priv)
3562 struct scst_mgmt_cmd *mcmd = NULL;
3566 if (unlikely(sess->tgt->tgtt->task_mgmt_fn_done == NULL)) {
3567 PRINT_ERROR_PR("New mgmt cmd, but task_mgmt_fn_done() is NULL "
3568 "(target %s)", sess->tgt->tgtt->name);
3572 mcmd = scst_alloc_mgmt_cmd(atomic ? GFP_ATOMIC : GFP_KERNEL);
3578 mcmd->state = SCST_MGMT_CMD_STATE_INIT;
3579 mcmd->tgt_priv = tgt_priv;
3586 static int scst_post_rx_mgmt_cmd(struct scst_session *sess,
3587 struct scst_mgmt_cmd *mcmd)
3589 unsigned long flags;
3594 scst_sess_get(sess);
3596 spin_lock_irqsave(&scst_list_lock, flags);
3598 sess->sess_cmd_count++;
3601 if (unlikely(sess->shutting_down)) {
3602 PRINT_ERROR_PR("%s",
3603 "New mgmt cmd while shutting down the session");
3608 if (unlikely(sess->init_phase != SCST_SESS_IPH_READY)) {
3609 switch(sess->init_phase) {
3610 case SCST_SESS_IPH_INITING:
3611 TRACE_DBG("Adding mcmd %p to init deferred mcmd list",
3613 list_add_tail(&mcmd->mgmt_cmd_list_entry,
3614 &sess->init_deferred_mcmd_list);
3616 case SCST_SESS_IPH_SUCCESS:
3618 case SCST_SESS_IPH_FAILED:
3626 TRACE_MGMT_DBG("Adding mgmt cmd %p to active mgmt cmd list", mcmd);
3627 list_add_tail(&mcmd->mgmt_cmd_list_entry, &scst_active_mgmt_cmd_list);
3629 spin_unlock_irqrestore(&scst_list_lock, flags);
3631 wake_up(&scst_mgmt_cmd_list_waitQ);