4 * Copyright (C) 2004-2006 Vladislav Bolkhovitin <vst@vlnb.net>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation, version 2
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
18 #include <linux/init.h>
19 #include <linux/kernel.h>
20 #include <linux/errno.h>
21 #include <linux/list.h>
22 #include <linux/spinlock.h>
23 #include <linux/slab.h>
24 #include <linux/sched.h>
25 #include <linux/smp_lock.h>
26 #include <asm/unistd.h>
27 #include <asm/string.h>
29 #include "scst_debug.h"
31 #include "scst_priv.h"
33 static int scst_do_job_init(struct list_head *init_cmd_list);
35 static int __scst_process_active_cmd(struct scst_cmd *cmd, int context,
38 static void scst_complete_cmd_mgmt(struct scst_cmd *cmd,
39 struct scst_mgmt_cmd *mcmd);
41 /* scst_list_lock assumed to be held */
42 static inline int scst_process_active_cmd(struct scst_cmd *cmd, int context,
43 unsigned long *pflags, int left_locked)
49 TRACE_DBG("Moving cmd %p to cmd list", cmd);
50 list_move_tail(&cmd->cmd_list_entry, &scst_cmd_list);
52 /* This is an inline func., so unneeded code will be optimized out */
54 spin_unlock_irqrestore(&scst_list_lock, *pflags);
56 spin_unlock_irq(&scst_list_lock);
58 res = __scst_process_active_cmd(cmd, context, left_locked);
64 static inline void scst_schedule_tasklet(void)
66 struct tasklet_struct *t = &scst_tasklets[smp_processor_id()];
68 #if 0 /* Looks like #else is better for performance */
69 if ((!test_bit(TASKLET_STATE_SCHED, &t->state)) || (scst_num_cpus == 1))
73 * We suppose that other CPU(s) are rather idle, so we
74 * ask one of them to help
76 TRACE_DBG("Tasklet on CPU %d busy, waking up the thread "
77 "instead", smp_processor_id());
78 wake_up(&scst_list_waitQ);
86 * Must not been called in parallel with scst_unregister_session() for the
89 struct scst_cmd *scst_rx_cmd(struct scst_session *sess,
90 const uint8_t *lun, int lun_len,
91 const uint8_t *cdb, int cdb_len, int atomic)
98 if (unlikely(sess->shutting_down)) {
99 PRINT_ERROR_PR("%s", "New cmd while shutting down the session");
104 cmd = scst_alloc_cmd(atomic ? GFP_ATOMIC : GFP_KERNEL);
109 cmd->tgt = sess->tgt;
110 cmd->tgtt = sess->tgt->tgtt;
111 cmd->state = SCST_CMD_STATE_INIT_WAIT;
114 * For both wrong lun and CDB defer the error reporting for
115 * scst_cmd_init_done()
118 cmd->lun = scst_unpack_lun(lun, lun_len);
120 if (cdb_len <= MAX_COMMAND_SIZE) {
121 memcpy(cmd->cdb, cdb, cdb_len);
122 cmd->cdb_len = cdb_len;
125 TRACE_DBG("cmd %p, sess %p", cmd, sess);
133 void scst_cmd_init_done(struct scst_cmd *cmd, int pref_context)
136 unsigned long flags = 0;
137 struct scst_session *sess = cmd->sess;
141 TRACE_DBG("Preferred context: %d (cmd %p)", pref_context, cmd);
142 TRACE(TRACE_SCSI, "tag=%d, lun=%Ld, CDB len=%d", cmd->tag,
143 (uint64_t)cmd->lun, cmd->cdb_len);
144 TRACE_BUFF_FLAG(TRACE_SCSI|TRACE_RECV_BOT, "Recieving CDB",
145 cmd->cdb, cmd->cdb_len);
147 if (unlikely(in_irq()) && ((pref_context == SCST_CONTEXT_DIRECT) ||
148 (pref_context == SCST_CONTEXT_DIRECT_ATOMIC)))
150 PRINT_ERROR_PR("Wrong context %d in IRQ from target %s, use "
151 "SCST_CONTEXT_TASKLET instead\n", pref_context,
153 pref_context = SCST_CONTEXT_TASKLET;
156 spin_lock_irqsave(&scst_list_lock, flags);
158 /* Let's make it here, this will save us a lock or atomic */
159 sess->sess_cmd_count++;
161 list_add_tail(&cmd->search_cmd_list_entry, &sess->search_cmd_list);
163 if (unlikely(sess->init_phase != SCST_SESS_IPH_READY)) {
164 switch(sess->init_phase) {
165 case SCST_SESS_IPH_SUCCESS:
167 case SCST_SESS_IPH_INITING:
168 TRACE_DBG("Adding cmd %p to init deferred cmd list", cmd);
169 list_add_tail(&cmd->cmd_list_entry,
170 &sess->init_deferred_cmd_list);
171 goto out_unlock_flags;
172 case SCST_SESS_IPH_FAILED:
174 cmd->state = SCST_CMD_STATE_XMIT_RESP;
175 TRACE_DBG("Adding cmd %p to active cmd list", cmd);
176 list_add_tail(&cmd->cmd_list_entry,
177 &scst_active_cmd_list);
184 if (unlikely(cmd->lun == (lun_t)-1)) {
185 PRINT_ERROR("Wrong LUN %d, finishing cmd", -1);
186 scst_set_cmd_error(cmd,
187 SCST_LOAD_SENSE(scst_sense_lun_not_supported));
188 cmd->state = SCST_CMD_STATE_XMIT_RESP;
189 TRACE_DBG("Moving cmd %p to active cmd list", cmd);
190 list_add_tail(&cmd->cmd_list_entry, &scst_active_cmd_list);
194 if (unlikely(cmd->cdb_len == 0)) {
195 PRINT_ERROR("Wrong CDB len %d, finishing cmd", 0);
196 scst_set_cmd_error(cmd,
197 SCST_LOAD_SENSE(scst_sense_invalid_opcode));
198 cmd->state = SCST_CMD_STATE_XMIT_RESP;
199 TRACE_DBG("Adding cmd %p to active cmd list", cmd);
200 list_add_tail(&cmd->cmd_list_entry, &scst_active_cmd_list);
204 cmd->state = SCST_CMD_STATE_INIT;
206 TRACE_DBG("Moving cmd %p to init cmd list", cmd);
207 list_add_tail(&cmd->cmd_list_entry, &scst_init_cmd_list);
209 switch (pref_context) {
210 case SCST_CONTEXT_DIRECT:
211 case SCST_CONTEXT_DIRECT_ATOMIC:
212 res = scst_do_job_init(&scst_init_cmd_list);
214 goto out_unlock_flags;
217 case SCST_CONTEXT_THREAD:
218 goto out_thread_unlock_flags;
220 case SCST_CONTEXT_TASKLET:
221 scst_schedule_tasklet();
222 goto out_unlock_flags;
225 PRINT_ERROR_PR("Context %x is undefined, using thread one",
227 goto out_thread_unlock_flags;
231 switch (pref_context) {
232 case SCST_CONTEXT_DIRECT:
233 case SCST_CONTEXT_DIRECT_ATOMIC:
234 scst_process_active_cmd(cmd, pref_context, &flags, 0);
237 case SCST_CONTEXT_THREAD:
238 goto out_thread_unlock_flags;
240 case SCST_CONTEXT_TASKLET:
241 scst_schedule_tasklet();
242 goto out_unlock_flags;
245 PRINT_ERROR_PR("Context %x is undefined, using thread one",
247 goto out_thread_unlock_flags;
255 spin_unlock_irqrestore(&scst_list_lock, flags);
258 out_thread_unlock_flags:
259 cmd->non_atomic_only = 1;
260 spin_unlock_irqrestore(&scst_list_lock, flags);
261 wake_up(&scst_list_waitQ);
265 static int scst_parse_cmd(struct scst_cmd *cmd)
267 int res = SCST_CMD_STATE_RES_CONT_SAME;
269 struct scst_tgt_dev *tgt_dev_saved = cmd->tgt_dev;
270 struct scst_device *dev = cmd->dev;
271 struct scst_info_cdb cdb_info;
272 int atomic = scst_cmd_atomic(cmd);
277 if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
278 TRACE_DBG("ABORTED set, returning ABORTED "
283 if (atomic && !dev->handler->parse_atomic) {
284 TRACE_DBG("Dev handler %s parse() can not be "
285 "called in atomic context, rescheduling to the thread",
287 res = SCST_CMD_STATE_RES_NEED_THREAD;
292 * Expected transfer data supplied by the SCSI transport via the
293 * target driver are untrusted, so we prefer to fetch them from CDB.
294 * Additionally, not all transports support supplying the expected
298 if (unlikely(scst_get_cdb_info(cmd->cdb, dev->handler->type,
304 PRINT_INFO_PR("Unknown opcode 0x%02x for %s. "
305 "Should you update scst_scsi_op_table?",
306 cmd->cdb[0], dev->handler->name);
308 if (scst_cmd_is_expected_set(cmd)) {
309 TRACE(TRACE_MINOR, "Using initiator supplied values: "
310 "direction %d, transfer_len %d",
311 cmd->expected_data_direction,
312 cmd->expected_transfer_len);
313 cmd->data_direction = cmd->expected_data_direction;
314 cmd->bufflen = cmd->expected_transfer_len;
315 /* Restore (most probably) lost CDB length */
316 cmd->cdb_len = scst_get_cdb_len(cmd->cdb);
317 if (cmd->cdb_len == -1) {
318 PRINT_ERROR_PR("Unable to get CDB length for "
319 "opcode 0x%02x. Returning INVALID "
320 "OPCODE", cmd->cdb[0]);
321 scst_set_cmd_error(cmd,
322 SCST_LOAD_SENSE(scst_sense_invalid_opcode));
327 PRINT_ERROR_PR("Unknown opcode 0x%02x for %s and "
328 "target %s not supplied expected values. "
329 "Returning INVALID OPCODE.", cmd->cdb[0],
330 dev->handler->name, cmd->tgtt->name);
331 scst_set_cmd_error(cmd,
332 SCST_LOAD_SENSE(scst_sense_invalid_opcode));
336 TRACE(TRACE_SCSI, "op_name <%s>, direction=%d (expected %d, "
337 "set %s), transfer_len=%d (expected len %d), flags=%d",
338 cdb_info.op_name, cdb_info.direction,
339 cmd->expected_data_direction,
340 scst_cmd_is_expected_set(cmd) ? "yes" : "no",
341 cdb_info.transfer_len, cmd->expected_transfer_len,
344 /* Restore (most probably) lost CDB length */
345 cmd->cdb_len = cdb_info.cdb_len;
347 cmd->data_direction = cdb_info.direction;
348 if (!(cdb_info.flags & SCST_UNKNOWN_LENGTH))
349 cmd->bufflen = cdb_info.transfer_len;
350 /* else cmd->bufflen remained as it was inited in 0 */
353 if (unlikely(cmd->cdb[cmd->cdb_len - 1] & CONTROL_BYTE_NACA_BIT)) {
354 PRINT_ERROR_PR("NACA bit in control byte CDB is not supported "
355 "(opcode 0x%02x)", cmd->cdb[0]);
356 scst_set_cmd_error(cmd,
357 SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
361 if (unlikely(cmd->cdb[cmd->cdb_len - 1] & CONTROL_BYTE_LINK_BIT)) {
362 PRINT_ERROR_PR("Linked commands are not supported "
363 "(opcode 0x%02x)", cmd->cdb[0]);
364 scst_set_cmd_error(cmd,
365 SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
369 if (likely(!scst_is_cmd_local(cmd))) {
370 TRACE_DBG("Calling dev handler %s parse(%p)",
371 dev->handler->name, cmd);
372 TRACE_BUFF_FLAG(TRACE_SEND_BOT, "Parsing: ", cmd->cdb, cmd->cdb_len);
373 state = dev->handler->parse(cmd, &cdb_info);
374 TRACE_DBG("Dev handler %s parse() returned %d",
375 dev->handler->name, state);
377 if (state == SCST_CMD_STATE_DEFAULT)
378 state = SCST_CMD_STATE_PREPARE_SPACE;
381 state = SCST_CMD_STATE_PREPARE_SPACE;
383 if (scst_cmd_is_expected_set(cmd)) {
384 if (cmd->expected_transfer_len < cmd->bufflen) {
385 TRACE(TRACE_SCSI, "cmd->expected_transfer_len(%d) < "
386 "cmd->bufflen(%d), using expected_transfer_len "
387 "instead", cmd->expected_transfer_len,
389 cmd->bufflen = cmd->expected_transfer_len;
393 if (cmd->data_len == -1)
394 cmd->data_len = cmd->bufflen;
397 if (state != SCST_CMD_STATE_NEED_THREAD_CTX) {
398 if (((cmd->data_direction == SCST_DATA_UNKNOWN) &&
399 (state != SCST_CMD_STATE_DEV_PARSE)) ||
400 ((cmd->bufflen != 0) &&
401 (cmd->data_direction == SCST_DATA_NONE)) ||
402 ((cmd->bufflen == 0) &&
403 (cmd->data_direction != SCST_DATA_NONE)) ||
404 ((cmd->bufflen != 0) && (cmd->sg == NULL) &&
405 (state > SCST_CMD_STATE_PREPARE_SPACE)))
407 PRINT_ERROR_PR("Dev handler %s parse() returned "
408 "invalid cmd data_direction %d, "
409 "bufflen %zd or state %d (opcode 0x%x)",
411 cmd->data_direction, cmd->bufflen,
419 case SCST_CMD_STATE_PREPARE_SPACE:
420 case SCST_CMD_STATE_DEV_PARSE:
421 case SCST_CMD_STATE_RDY_TO_XFER:
422 case SCST_CMD_STATE_SEND_TO_MIDLEV:
423 case SCST_CMD_STATE_DEV_DONE:
424 case SCST_CMD_STATE_XMIT_RESP:
425 case SCST_CMD_STATE_FINISHED:
427 res = SCST_CMD_STATE_RES_CONT_SAME;
430 case SCST_CMD_STATE_REINIT:
431 cmd->tgt_dev_saved = tgt_dev_saved;
433 res = SCST_CMD_STATE_RES_RESTART;
437 case SCST_CMD_STATE_NEED_THREAD_CTX:
438 TRACE_DBG("Dev handler %s parse() requested thread "
439 "context, rescheduling", dev->handler->name);
440 res = SCST_CMD_STATE_RES_NEED_THREAD;
446 PRINT_ERROR_PR("Dev handler %s parse() returned "
447 "invalid cmd state %d (opcode %d)",
448 dev->handler->name, state, cmd->cdb[0]);
450 PRINT_ERROR_PR("Dev handler %s parse() returned "
451 "error %d (opcode %d)", dev->handler->name,
457 if ((cmd->resp_data_len == -1) && set_dir) {
458 if (cmd->data_direction == SCST_DATA_READ)
459 cmd->resp_data_len = cmd->bufflen;
461 cmd->resp_data_len = 0;
465 TRACE_EXIT_HRES(res);
469 /* dev_done() will be called as part of the regular cmd's finish */
470 scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
471 cmd->state = SCST_CMD_STATE_DEV_DONE;
472 res = SCST_CMD_STATE_RES_CONT_SAME;
476 cmd->state = SCST_CMD_STATE_XMIT_RESP;
477 res = SCST_CMD_STATE_RES_CONT_SAME;
481 void scst_cmd_mem_work_fn(void *p)
485 spin_lock_bh(&scst_cmd_mem_lock);
487 scst_cur_max_cmd_mem += (scst_cur_max_cmd_mem >> 3);
488 if (scst_cur_max_cmd_mem < scst_max_cmd_mem) {
489 TRACE_MGMT_DBG("%s", "Schedule cmd_mem_work");
490 schedule_delayed_work(&scst_cmd_mem_work, SCST_CMD_MEM_TIMEOUT);
492 scst_cur_max_cmd_mem = scst_max_cmd_mem;
493 clear_bit(SCST_FLAG_CMD_MEM_WORK_SCHEDULED, &scst_flags);
495 TRACE_MGMT_DBG("New max cmd mem %ld Mb", scst_cur_max_cmd_mem >> 20);
497 spin_unlock_bh(&scst_cmd_mem_lock);
503 int scst_check_mem(struct scst_cmd *cmd)
509 if (cmd->mem_checked)
512 spin_lock_bh(&scst_cmd_mem_lock);
514 scst_cur_cmd_mem += cmd->bufflen;
515 cmd->mem_checked = 1;
516 if (likely(scst_cur_cmd_mem <= scst_cur_max_cmd_mem))
519 TRACE(TRACE_OUT_OF_MEM, "Total memory allocated by commands (%ld Kb) "
520 "is too big, returning QUEUE FULL to initiator \"%s\" (maximum "
521 "allowed %ld Kb)", scst_cur_cmd_mem >> 10,
522 (cmd->sess->initiator_name[0] == '\0') ?
523 "Anonymous" : cmd->sess->initiator_name,
524 scst_cur_max_cmd_mem >> 10);
526 scst_cur_cmd_mem -= cmd->bufflen;
527 cmd->mem_checked = 0;
529 cmd->state = SCST_CMD_STATE_XMIT_RESP;
533 spin_unlock_bh(&scst_cmd_mem_lock);
540 static void scst_low_cur_max_cmd_mem(void)
544 if (test_bit(SCST_FLAG_CMD_MEM_WORK_SCHEDULED, &scst_flags)) {
545 cancel_delayed_work(&scst_cmd_mem_work);
546 flush_scheduled_work();
547 clear_bit(SCST_FLAG_CMD_MEM_WORK_SCHEDULED, &scst_flags);
550 spin_lock_bh(&scst_cmd_mem_lock);
552 scst_cur_max_cmd_mem = (scst_cur_cmd_mem >> 1) +
553 (scst_cur_cmd_mem >> 2);
554 if (scst_cur_max_cmd_mem < 16*1024*1024)
555 scst_cur_max_cmd_mem = 16*1024*1024;
557 if (!test_bit(SCST_FLAG_CMD_MEM_WORK_SCHEDULED, &scst_flags)) {
558 TRACE_MGMT_DBG("%s", "Schedule cmd_mem_work");
559 schedule_delayed_work(&scst_cmd_mem_work, SCST_CMD_MEM_TIMEOUT);
560 set_bit(SCST_FLAG_CMD_MEM_WORK_SCHEDULED, &scst_flags);
563 spin_unlock_bh(&scst_cmd_mem_lock);
565 TRACE_MGMT_DBG("New max cmd mem %ld Mb", scst_cur_max_cmd_mem >> 20);
571 static int scst_prepare_space(struct scst_cmd *cmd)
573 int r, res = SCST_CMD_STATE_RES_CONT_SAME;
577 if (cmd->data_direction == SCST_DATA_NONE) {
578 cmd->state = SCST_CMD_STATE_SEND_TO_MIDLEV;
582 r = scst_check_mem(cmd);
583 if (unlikely(r != 0))
586 if (cmd->data_buf_tgt_alloc) {
587 TRACE_MEM("%s", "Custom tgt data buf allocation requested");
588 r = cmd->tgtt->alloc_data_buf(cmd);
589 cmd->data_buf_alloced = (r == 0);
591 r = scst_alloc_space(cmd);
594 if (scst_cmd_atomic(cmd)) {
595 TRACE_MEM("%s", "Atomic memory allocation failed, "
596 "rescheduling to the thread");
597 res = SCST_CMD_STATE_RES_NEED_THREAD;
603 switch (cmd->data_direction) {
604 case SCST_DATA_WRITE:
605 cmd->state = SCST_CMD_STATE_RDY_TO_XFER;
609 cmd->state = SCST_CMD_STATE_SEND_TO_MIDLEV;
614 TRACE_EXIT_HRES(res);
618 TRACE(TRACE_OUT_OF_MEM, "Unable to allocate or build requested buffer "
619 "(size %zd), sending BUSY or QUEUE FULL status", cmd->bufflen);
620 scst_low_cur_max_cmd_mem();
622 cmd->state = SCST_CMD_STATE_DEV_DONE;
623 res = SCST_CMD_STATE_RES_CONT_SAME;
628 static int scst_queue_retry_cmd(struct scst_cmd *cmd, int finished_cmds)
630 struct scst_tgt *tgt = cmd->sess->tgt;
636 spin_lock_irqsave(&tgt->tgt_lock, flags);
639 TRACE(TRACE_RETRY, "TGT QUEUE FULL: incrementing retry_cmds %d",
641 if (finished_cmds != atomic_read(&tgt->finished_cmds)) {
642 /* At least one cmd finished, so try again */
644 TRACE(TRACE_RETRY, "TGT QUEUE FULL, direct retry "
645 "(finished_cmds=%d, tgt->finished_cmds=%d, "
646 "retry_cmds=%d)", finished_cmds,
647 atomic_read(&tgt->finished_cmds), tgt->retry_cmds);
652 TRACE(TRACE_RETRY, "Moving cmd %p to retry cmd list", cmd);
653 /* IRQ already off */
654 spin_lock(&scst_list_lock);
655 list_move_tail(&cmd->cmd_list_entry, &tgt->retry_cmd_list);
656 spin_unlock(&scst_list_lock);
658 if (!tgt->retry_timer_active) {
659 tgt->retry_timer.expires = jiffies + SCST_TGT_RETRY_TIMEOUT;
660 add_timer(&tgt->retry_timer);
661 tgt->retry_timer_active = 1;
665 spin_unlock_irqrestore(&tgt->tgt_lock, flags);
671 static int scst_rdy_to_xfer(struct scst_cmd *cmd)
674 int atomic = scst_cmd_atomic(cmd);
678 if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)))
680 TRACE_DBG("ABORTED set, returning ABORTED for "
685 if (atomic && !cmd->tgtt->rdy_to_xfer_atomic) {
686 TRACE_DBG("%s", "rdy_to_xfer() can not be "
687 "called in atomic context, rescheduling to the thread");
688 res = SCST_CMD_STATE_RES_NEED_THREAD;
693 int finished_cmds = atomic_read(&cmd->sess->tgt->finished_cmds);
695 res = SCST_CMD_STATE_RES_CONT_NEXT;
696 cmd->state = SCST_CMD_STATE_DATA_WAIT;
698 TRACE_DBG("Calling rdy_to_xfer(%p)", cmd);
700 if (((scst_random() % 100) == 75))
701 rc = SCST_TGT_RES_QUEUE_FULL;
704 rc = cmd->tgtt->rdy_to_xfer(cmd);
705 TRACE_DBG("rdy_to_xfer() returned %d", rc);
707 if (likely(rc == SCST_TGT_RES_SUCCESS))
710 /* Restore the previous state */
711 cmd->state = SCST_CMD_STATE_RDY_TO_XFER;
714 case SCST_TGT_RES_QUEUE_FULL:
716 if (scst_queue_retry_cmd(cmd, finished_cmds) == 0)
722 case SCST_TGT_RES_NEED_THREAD_CTX:
724 TRACE_DBG("Target driver %s "
725 "rdy_to_xfer() requested thread "
726 "context, rescheduling", cmd->tgtt->name);
727 res = SCST_CMD_STATE_RES_NEED_THREAD;
738 TRACE_EXIT_HRES(res);
742 if (rc == SCST_TGT_RES_FATAL_ERROR) {
743 PRINT_ERROR_PR("Target driver %s rdy_to_xfer() returned "
744 "fatal error", cmd->tgtt->name);
746 PRINT_ERROR_PR("Target driver %s rdy_to_xfer() returned invalid "
747 "value %d", cmd->tgtt->name, rc);
749 scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
752 cmd->state = SCST_CMD_STATE_DEV_DONE;
753 res = SCST_CMD_STATE_RES_CONT_SAME;
757 void scst_proccess_redirect_cmd(struct scst_cmd *cmd, int context,
765 TRACE_DBG("Context: %d", context);
768 case SCST_CONTEXT_DIRECT:
769 case SCST_CONTEXT_DIRECT_ATOMIC:
771 scst_check_retries(cmd->tgt, 0);
772 cmd->non_atomic_only = 0;
773 rc = __scst_process_active_cmd(cmd, context, 0);
774 if (rc == SCST_CMD_STATE_RES_NEED_THREAD)
779 PRINT_ERROR_PR("Context %x is unknown, using the thread one",
782 case SCST_CONTEXT_THREAD:
784 scst_check_retries(cmd->tgt, 1);
787 case SCST_CONTEXT_TASKLET:
789 scst_check_retries(cmd->tgt, 1);
790 cmd->non_atomic_only = 0;
791 spin_lock_irqsave(&scst_list_lock, flags);
792 TRACE_DBG("Moving cmd %p to active cmd list", cmd);
793 list_move_tail(&cmd->cmd_list_entry, &scst_active_cmd_list);
794 spin_unlock_irqrestore(&scst_list_lock, flags);
795 scst_schedule_tasklet();
803 cmd->non_atomic_only = 1;
804 spin_lock_irqsave(&scst_list_lock, flags);
805 TRACE_DBG("Moving cmd %p to active cmd list", cmd);
806 list_move_tail(&cmd->cmd_list_entry, &scst_active_cmd_list);
807 spin_unlock_irqrestore(&scst_list_lock, flags);
808 wake_up(&scst_list_waitQ);
812 void scst_rx_data(struct scst_cmd *cmd, int status, int pref_context)
816 TRACE_DBG("Preferred context: %d", pref_context);
817 TRACE(TRACE_SCSI, "tag=%d status=%#x", scst_cmd_get_tag(cmd), status);
818 cmd->non_atomic_only = 0;
820 if (in_irq() && ((pref_context == SCST_CONTEXT_DIRECT) ||
821 (pref_context == SCST_CONTEXT_DIRECT_ATOMIC)))
823 PRINT_ERROR_PR("Wrong context %d in IRQ from target %s, use "
824 "SCST_CONTEXT_TASKLET instead\n", pref_context,
826 pref_context = SCST_CONTEXT_TASKLET;
830 case SCST_RX_STATUS_SUCCESS:
831 cmd->state = SCST_CMD_STATE_SEND_TO_MIDLEV;
834 case SCST_RX_STATUS_ERROR_SENSE_SET:
835 cmd->state = SCST_CMD_STATE_DEV_DONE;
838 case SCST_RX_STATUS_ERROR_FATAL:
839 set_bit(SCST_CMD_NO_RESP, &cmd->cmd_flags);
841 case SCST_RX_STATUS_ERROR:
842 scst_set_cmd_error(cmd,
843 SCST_LOAD_SENSE(scst_sense_hardw_error));
844 cmd->state = SCST_CMD_STATE_DEV_DONE;
848 PRINT_ERROR_PR("scst_rx_data() received unknown status %x",
850 cmd->state = SCST_CMD_STATE_DEV_DONE;
854 scst_proccess_redirect_cmd(cmd, pref_context, 1);
860 /* No locks supposed to be held */
861 static void scst_check_sense(struct scst_cmd *cmd, const uint8_t *rq_sense,
862 int rq_sense_len, int *next_state)
865 struct scst_device *dev = cmd->dev;
866 int dbl_ua_possible, ua_sent = 0;
870 /* If we had a internal bus reset behind us, set the command error UA */
871 if ((dev->scsi_dev != NULL) &&
872 unlikely(cmd->host_status == DID_RESET) &&
873 scst_is_ua_command(cmd))
875 TRACE(TRACE_MGMT, "DID_RESET: was_reset=%d host_status=%x",
876 dev->scsi_dev->was_reset, cmd->host_status);
877 scst_set_cmd_error(cmd,
878 SCST_LOAD_SENSE(scst_sense_reset_UA));
881 /* It looks like it is safe to clear was_reset here */
882 dev->scsi_dev->was_reset = 0;
886 if (rq_sense != NULL) {
887 sense_valid = SCST_SENSE_VALID(rq_sense);
889 memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer));
891 * We checked that rq_sense_len < sizeof(cmd->sense_buffer)
894 memcpy(cmd->sense_buffer, rq_sense, rq_sense_len);
897 sense_valid = SCST_SENSE_VALID(cmd->sense_buffer);
899 dbl_ua_possible = dev->dev_double_ua_possible;
900 TRACE_DBG("cmd %p dbl_ua_possible %d", cmd, dbl_ua_possible);
901 if (unlikely(dbl_ua_possible)) {
902 spin_lock_bh(&dev->dev_lock);
903 barrier(); /* to reread dev_double_ua_possible */
904 dbl_ua_possible = dev->dev_double_ua_possible;
906 ua_sent = dev->dev_reset_ua_sent;
908 spin_unlock_bh(&dev->dev_lock);
912 TRACE_BUFF_FLAG(TRACE_SCSI, "Sense", cmd->sense_buffer,
913 sizeof(cmd->sense_buffer));
914 /* Check Unit Attention Sense Key */
915 if (cmd->sense_buffer[2] == UNIT_ATTENTION) {
916 if (cmd->sense_buffer[12] == SCST_SENSE_ASC_UA_RESET) {
920 TRACE(TRACE_MGMT, "%s",
921 "Double UA detected");
923 TRACE(TRACE_MGMT, "Retrying cmd %p "
924 "(tag %d)", cmd, cmd->tag);
926 cmd->masked_status = 0;
928 cmd->host_status = DID_OK;
929 cmd->driver_status = 0;
930 memset(cmd->sense_buffer, 0,
931 sizeof(cmd->sense_buffer));
933 *next_state = SCST_CMD_STATE_SEND_TO_MIDLEV;
935 * Dev is still blocked by this cmd, so
936 * it's OK to clear SCST_DEV_SERIALIZED
939 dev->dev_double_ua_possible = 0;
940 dev->dev_serialized = 0;
941 dev->dev_reset_ua_sent = 0;
944 dev->dev_reset_ua_sent = 1;
947 if (cmd->ua_ignore == 0) {
948 if (unlikely(dbl_ua_possible)) {
949 __scst_process_UA(dev, cmd,
951 sizeof(cmd->sense_buffer), 0);
953 scst_process_UA(dev, cmd,
955 sizeof(cmd->sense_buffer), 0);
961 if (unlikely(dbl_ua_possible)) {
962 if (ua_sent && scst_is_ua_command(cmd)) {
963 TRACE_MGMT_DBG("%s", "Clearing dbl_ua_possible flag");
964 dev->dev_double_ua_possible = 0;
965 dev->dev_serialized = 0;
966 dev->dev_reset_ua_sent = 0;
968 spin_unlock_bh(&dev->dev_lock);
976 spin_unlock_bh(&dev->dev_lock);
980 static int scst_check_auto_sense(struct scst_cmd *cmd)
986 if (unlikely(cmd->status == SAM_STAT_CHECK_CONDITION) &&
987 (!SCST_SENSE_VALID(cmd->sense_buffer) ||
988 SCST_NO_SENSE(cmd->sense_buffer)))
990 TRACE(TRACE_SCSI|TRACE_MINOR, "CHECK_CONDITION, but no sense: "
991 "cmd->status=%x, cmd->masked_status=%x, "
992 "cmd->msg_status=%x, cmd->host_status=%x, "
993 "cmd->driver_status=%x", cmd->status, cmd->masked_status,
994 cmd->msg_status, cmd->host_status, cmd->driver_status);
996 } else if (unlikely(cmd->host_status)) {
997 if ((cmd->host_status == DID_REQUEUE) ||
998 (cmd->host_status == DID_IMM_RETRY) ||
999 (cmd->host_status == DID_SOFT_ERROR)) {
1002 TRACE(TRACE_SCSI|TRACE_MINOR, "Host status %x "
1003 "received, returning HARDWARE ERROR instead",
1005 scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
1009 TRACE_EXIT_RES(res);
1013 static void scst_do_cmd_done(struct scst_cmd *cmd, int result,
1014 const uint8_t *rq_sense, int rq_sense_len, int resid,
1021 cmd->status = result & 0xff;
1022 cmd->masked_status = status_byte(result);
1023 cmd->msg_status = msg_byte(result);
1024 cmd->host_status = host_byte(result);
1025 cmd->driver_status = driver_byte(result);
1026 if (unlikely(resid != 0)) {
1028 if ((resid < 0) || (resid >= cmd->resp_data_len)) {
1029 PRINT_ERROR_PR("Wrong resid %d (cmd->resp_data_len=%d)",
1030 resid, cmd->resp_data_len);
1033 scst_set_resp_data_len(cmd, cmd->resp_data_len - resid);
1036 TRACE(TRACE_SCSI, "result=%x, cmd->status=%x, resid=%d, "
1037 "cmd->masked_status=%x, cmd->msg_status=%x, cmd->host_status=%x, "
1038 "cmd->driver_status=%x", result, cmd->status, resid,
1039 cmd->masked_status, cmd->msg_status, cmd->host_status,
1040 cmd->driver_status);
1044 scst_dec_on_dev_cmd(cmd);
1046 type = cmd->dev->handler->type;
1047 if ((cmd->cdb[0] == MODE_SENSE || cmd->cdb[0] == MODE_SENSE_10) &&
1048 cmd->tgt_dev->acg_dev->rd_only_flag &&
1049 (type == TYPE_DISK || type == TYPE_WORM || type == TYPE_MOD ||
1050 type == TYPE_TAPE)) {
1054 length = scst_get_buf_first(cmd, &address);
1055 TRACE_DBG("length %d", length);
1056 if (unlikely(length <= 0)) {
1057 PRINT_ERROR_PR("%s: scst_get_buf_first() failed",
1061 if (length > 2 && cmd->cdb[0] == MODE_SENSE) {
1062 address[2] |= 0x80; /* Write Protect*/
1064 else if (length > 3 && cmd->cdb[0] == MODE_SENSE_10) {
1065 address[3] |= 0x80; /* Write Protect*/
1067 scst_put_buf(cmd, address);
1071 scst_check_sense(cmd, rq_sense, rq_sense_len, next_state);
1077 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
1078 static inline struct scst_cmd *scst_get_cmd(struct scsi_cmnd *scsi_cmd,
1079 struct scsi_request **req)
1081 struct scst_cmd *cmd = NULL;
1083 if (scsi_cmd && (*req = scsi_cmd->sc_request))
1084 cmd = (struct scst_cmd *)(*req)->upper_private_data;
1087 PRINT_ERROR_PR("%s", "Request with NULL cmd");
1089 scsi_release_request(*req);
1095 static void scst_cmd_done(struct scsi_cmnd *scsi_cmd)
1097 struct scsi_request *req = NULL;
1098 struct scst_cmd *cmd;
1105 cmd = scst_get_cmd(scsi_cmd, &req);
1109 next_state = SCST_CMD_STATE_DEV_DONE;
1110 scst_do_cmd_done(cmd, req->sr_result, req->sr_sense_buffer,
1111 sizeof(req->sr_sense_buffer), scsi_cmd->resid, &next_state);
1113 /* Clear out request structure */
1115 req->sr_sglist_len = 0;
1116 req->sr_bufflen = 0;
1117 req->sr_buffer = NULL;
1118 req->sr_underflow = 0;
1119 req->sr_request->rq_disk = NULL; /* disown request blk */
1121 cmd->bufflen = req->sr_bufflen; //??
1123 scst_release_request(cmd);
1125 cmd->state = next_state;
1126 cmd->non_atomic_only = 0;
1128 scst_proccess_redirect_cmd(cmd, scst_get_context(), 0);
1134 #else /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) */
1135 static void scst_cmd_done(void *data, char *sense, int result, int resid)
1137 struct scst_cmd *cmd;
1145 * We don't use resid, because:
1146 * 1. Many low level initiator drivers don't use (set) this field
1147 * 2. We determine the command's buffer size directly from CDB,
1148 * so resid is not relevant for us, and target drivers
1149 * should know the residual, if necessary, by comparing expected
1150 * and actual transfer sizes.
1153 cmd = (struct scst_cmd *)data;
1157 next_state = SCST_CMD_STATE_DEV_DONE;
1158 scst_do_cmd_done(cmd, result, sense, SCSI_SENSE_BUFFERSIZE, resid,
1161 cmd->state = next_state;
1162 cmd->non_atomic_only = 0;
1164 scst_proccess_redirect_cmd(cmd, scst_get_context(), 0);
1170 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) */
1172 static void scst_cmd_done_local(struct scst_cmd *cmd, int next_state)
1178 scst_dec_on_dev_cmd(cmd);
1180 if (next_state == SCST_CMD_STATE_DEFAULT)
1181 next_state = SCST_CMD_STATE_DEV_DONE;
1183 if (next_state == SCST_CMD_STATE_DEV_DONE) {
1184 #if defined(DEBUG) || defined(TRACING)
1187 struct scatterlist *sg = cmd->sg;
1188 TRACE(TRACE_RECV_TOP,
1189 "Exec'd %d S/G(s) at %p sg[0].page at %p",
1190 cmd->sg_cnt, sg, (void*)sg[0].page);
1191 for(i = 0; i < cmd->sg_cnt; ++i) {
1192 TRACE_BUFF_FLAG(TRACE_RECV_TOP,
1193 "Exec'd sg", page_address(sg[i].page),
1202 if ((next_state != SCST_CMD_STATE_DEV_DONE) &&
1203 (next_state != SCST_CMD_STATE_XMIT_RESP) &&
1204 (next_state != SCST_CMD_STATE_FINISHED))
1206 PRINT_ERROR_PR("scst_cmd_done_local() received invalid cmd "
1207 "state %d (opcode %d)", next_state, cmd->cdb[0]);
1208 scst_set_cmd_error(cmd,
1209 SCST_LOAD_SENSE(scst_sense_hardw_error));
1210 next_state = SCST_CMD_STATE_DEV_DONE;
1213 if (scst_check_auto_sense(cmd)) {
1214 PRINT_ERROR_PR("CHECK_CONDITION, but no valid sense for "
1215 "opcode %d", cmd->cdb[0]);
1219 scst_check_sense(cmd, NULL, 0, &next_state);
1221 cmd->state = next_state;
1222 cmd->non_atomic_only = 0;
1224 scst_proccess_redirect_cmd(cmd, scst_get_context(), 0);
1230 static int scst_report_luns_local(struct scst_cmd *cmd)
1232 int res = SCST_EXEC_COMPLETED;
1235 struct scst_tgt_dev *tgt_dev = NULL;
1241 cmd->masked_status = 0;
1242 cmd->msg_status = 0;
1243 cmd->host_status = DID_OK;
1244 cmd->driver_status = 0;
1246 /* ToDo: use full SG buffer, not only the first entry */
1247 buffer_size = scst_get_buf_first(cmd, &buffer);
1248 if (unlikely(buffer_size <= 0))
1251 if (buffer_size < 16) {
1255 memset(buffer, 0, buffer_size);
1257 /* sess->sess_tgt_dev_list is protected by suspended activity */
1258 list_for_each_entry(tgt_dev, &cmd->sess->sess_tgt_dev_list,
1259 sess_tgt_dev_list_entry)
1261 if (8 + 8 * dev_cnt + 2 <= buffer_size) {
1262 buffer[8 + 8 * dev_cnt] = (tgt_dev->acg_dev->lun >> 8) & 0xff;
1263 buffer[8 + 8 * dev_cnt + 1] = tgt_dev->acg_dev->lun & 0xff;
1266 /* Tmp, until ToDo above done */
1267 if (dev_cnt >= ((PAGE_SIZE >> 3) - 2))
1271 /* Set the response header */
1273 buffer[0] = (dev_cnt >> 24) & 0xff;
1274 buffer[1] = (dev_cnt >> 16) & 0xff;
1275 buffer[2] = (dev_cnt >> 8) & 0xff;
1276 buffer[3] = dev_cnt & 0xff;
1280 scst_put_buf(cmd, buffer);
1282 if (buffer_size > dev_cnt)
1283 scst_set_resp_data_len(cmd, dev_cnt);
1288 /* Report the result */
1289 scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1291 TRACE_EXIT_RES(res);
1295 scst_put_buf(cmd, buffer);
1298 scst_set_cmd_error(cmd,
1299 SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
1303 static int scst_pre_select(struct scst_cmd *cmd)
1305 int res = SCST_EXEC_NOT_COMPLETED;
1309 if (scst_cmd_atomic(cmd)) {
1310 res = SCST_EXEC_NEED_THREAD;
1314 scst_block_dev(cmd->dev, 1);
1315 /* Device will be unblocked in scst_done_cmd_check() */
1317 if (test_bit(SCST_TGT_DEV_UA_PENDING, &cmd->tgt_dev->tgt_dev_flags)) {
1318 int rc = scst_set_pending_UA(cmd);
1320 res = SCST_EXEC_COMPLETED;
1322 /* Report the result */
1323 scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1329 TRACE_EXIT_RES(res);
1333 static inline void scst_report_reserved(struct scst_cmd *cmd)
1337 scst_set_cmd_error_status(cmd, SAM_STAT_RESERVATION_CONFLICT);
1339 /* Report the result */
1340 scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1346 static int scst_reserve_local(struct scst_cmd *cmd)
1348 int res = SCST_EXEC_NOT_COMPLETED;
1349 struct scst_device *dev;
1350 struct scst_tgt_dev *tgt_dev_tmp;
1354 if (scst_cmd_atomic(cmd)) {
1355 res = SCST_EXEC_NEED_THREAD;
1359 if ((cmd->cdb[0] == RESERVE_10) && (cmd->cdb[2] & SCST_RES_3RDPTY)) {
1360 PRINT_ERROR_PR("RESERVE_10: 3rdPty RESERVE not implemented "
1361 "(lun=%Ld)", (uint64_t)cmd->lun);
1362 scst_set_cmd_error(cmd,
1363 SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
1365 res = SCST_EXEC_COMPLETED;
1370 scst_block_dev(dev, 1);
1371 /* Device will be unblocked in scst_done_cmd_check() */
1373 spin_lock_bh(&dev->dev_lock);
1375 if (test_bit(SCST_TGT_DEV_RESERVED, &cmd->tgt_dev->tgt_dev_flags)) {
1376 scst_report_reserved(cmd);
1377 /* !! At this point cmd, sess & tgt_dev can be already freed !! */
1378 res = SCST_EXEC_COMPLETED;
1382 list_for_each_entry(tgt_dev_tmp, &dev->dev_tgt_dev_list,
1383 dev_tgt_dev_list_entry)
1385 if (cmd->tgt_dev != tgt_dev_tmp)
1386 set_bit(SCST_TGT_DEV_RESERVED,
1387 &tgt_dev_tmp->tgt_dev_flags);
1389 dev->dev_reserved = 1;
1392 spin_unlock_bh(&dev->dev_lock);
1395 TRACE_EXIT_RES(res);
1399 static int scst_release_local(struct scst_cmd *cmd)
1401 int res = SCST_EXEC_NOT_COMPLETED;
1402 struct scst_tgt_dev *tgt_dev_tmp;
1403 struct scst_device *dev;
1409 scst_block_dev(dev, 1);
1411 TRACE_MGMT_DBG("Blocking cmd %p (tag %d)", cmd, cmd->tag);
1413 spin_lock_bh(&dev->dev_lock);
1416 * The device could be RELEASED behind us, if RESERVING session
1417 * is closed (see scst_free_tgt_dev()), but this actually doesn't
1418 * matter, so use lock and no retest for DEV_RESERVED bits again
1420 if (test_bit(SCST_TGT_DEV_RESERVED, &cmd->tgt_dev->tgt_dev_flags)) {
1421 res = SCST_EXEC_COMPLETED;
1423 cmd->masked_status = 0;
1424 cmd->msg_status = 0;
1425 cmd->host_status = DID_OK;
1426 cmd->driver_status = 0;
1428 list_for_each_entry(tgt_dev_tmp,
1429 &dev->dev_tgt_dev_list,
1430 dev_tgt_dev_list_entry)
1432 clear_bit(SCST_TGT_DEV_RESERVED,
1433 &tgt_dev_tmp->tgt_dev_flags);
1435 dev->dev_reserved = 0;
1438 spin_unlock_bh(&dev->dev_lock);
1440 if (res == SCST_EXEC_COMPLETED) {
1442 /* Report the result */
1443 scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1446 TRACE_EXIT_RES(res);
1451 * The result of cmd execution, if any, should be reported
1452 * via scst_cmd_done_local()
1454 static int scst_pre_exec(struct scst_cmd *cmd)
1456 int res = SCST_EXEC_NOT_COMPLETED, rc;
1457 struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
1461 /* Reserve check before Unit Attention */
1462 if (unlikely(test_bit(SCST_TGT_DEV_RESERVED, &tgt_dev->tgt_dev_flags)) &&
1463 (cmd->cdb[0] != INQUIRY) &&
1464 (cmd->cdb[0] != REPORT_LUNS) &&
1465 (cmd->cdb[0] != RELEASE) &&
1466 (cmd->cdb[0] != RELEASE_10) &&
1467 (cmd->cdb[0] != REPORT_DEVICE_IDENTIFIER) &&
1468 (cmd->cdb[0] != ALLOW_MEDIUM_REMOVAL || (cmd->cdb[4] & 3)) &&
1469 (cmd->cdb[0] != LOG_SENSE) && (cmd->cdb[0] != REQUEST_SENSE))
1471 scst_report_reserved(cmd);
1472 res = SCST_EXEC_COMPLETED;
1476 /* If we had a internal bus reset, set the command error unit attention */
1477 if ((cmd->dev->scsi_dev != NULL) &&
1478 unlikely(cmd->dev->scsi_dev->was_reset) &&
1479 scst_is_ua_command(cmd))
1481 struct scst_device *dev = cmd->dev;
1483 /* Prevent more than 1 cmd to be triggered by was_reset */
1484 spin_lock_bh(&dev->dev_lock);
1485 barrier(); /* to reread was_reset */
1486 if (dev->scsi_dev->was_reset) {
1487 TRACE(TRACE_MGMT, "was_reset is %d", 1);
1488 scst_set_cmd_error(cmd,
1489 SCST_LOAD_SENSE(scst_sense_reset_UA));
1490 /* It looks like it is safe to clear was_reset here */
1491 dev->scsi_dev->was_reset = 0;
1495 spin_unlock_bh(&dev->dev_lock);
1501 if (test_bit(SCST_TGT_DEV_UA_PENDING, &cmd->tgt_dev->tgt_dev_flags) &&
1502 scst_is_ua_command(cmd))
1504 rc = scst_set_pending_UA(cmd);
1509 /* Check READ_ONLY device status */
1510 if (tgt_dev->acg_dev->rd_only_flag &&
1511 (cmd->cdb[0] == WRITE_6 || /* ToDo: full list of the modify cmds */
1512 cmd->cdb[0] == WRITE_10 ||
1513 cmd->cdb[0] == WRITE_12 ||
1514 cmd->cdb[0] == WRITE_16 ||
1515 cmd->cdb[0] == WRITE_VERIFY ||
1516 cmd->cdb[0] == WRITE_VERIFY_12 ||
1517 cmd->cdb[0] == WRITE_VERIFY_16 ||
1518 (cmd->dev->handler->type == TYPE_TAPE &&
1519 (cmd->cdb[0] == ERASE || cmd->cdb[0] == WRITE_FILEMARKS))))
1521 scst_set_cmd_error(cmd,
1522 SCST_LOAD_SENSE(scst_sense_data_protect));
1526 TRACE_EXIT_RES(res);
1530 res = SCST_EXEC_COMPLETED;
1532 /* Report the result */
1533 scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1538 * The result of cmd execution, if any, should be reported
1539 * via scst_cmd_done_local()
1541 static inline int scst_local_exec(struct scst_cmd *cmd)
1543 int res = SCST_EXEC_NOT_COMPLETED;
1548 * Adding new commands here don't forget to update
1549 * scst_is_cmd_local() in scsi_tgt.h, if necessary
1552 switch (cmd->cdb[0]) {
1554 case MODE_SELECT_10:
1556 res = scst_pre_select(cmd);
1560 res = scst_reserve_local(cmd);
1564 res = scst_release_local(cmd);
1567 res = scst_report_luns_local(cmd);
1571 TRACE_EXIT_RES(res);
1575 static int scst_do_send_to_midlev(struct scst_cmd *cmd)
1577 int rc = SCST_EXEC_NOT_COMPLETED;
1581 cmd->sent_to_midlev = 1;
1582 cmd->state = SCST_CMD_STATE_EXECUTING;
1583 cmd->scst_cmd_done = scst_cmd_done_local;
1585 set_bit(SCST_CMD_EXECUTING, &cmd->cmd_flags);
1586 smp_mb__after_set_bit();
1588 if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
1589 TRACE_DBG("ABORTED set, aborting cmd %p", cmd);
1593 rc = scst_pre_exec(cmd);
1594 /* !! At this point cmd, sess & tgt_dev can be already freed !! */
1595 if (rc != SCST_EXEC_NOT_COMPLETED) {
1596 if (rc == SCST_EXEC_COMPLETED)
1598 else if (rc == SCST_EXEC_NEED_THREAD)
1604 rc = scst_local_exec(cmd);
1605 /* !! At this point cmd, sess & tgt_dev can be already freed !! */
1606 if (rc != SCST_EXEC_NOT_COMPLETED) {
1607 if (rc == SCST_EXEC_COMPLETED)
1609 else if (rc == SCST_EXEC_NEED_THREAD)
1615 if (cmd->dev->handler->exec) {
1616 struct scst_device *dev = cmd->dev;
1617 TRACE_DBG("Calling dev handler %s exec(%p)",
1618 dev->handler->name, cmd);
1619 TRACE_BUFF_FLAG(TRACE_SEND_TOP, "Execing: ", cmd->cdb, cmd->cdb_len);
1620 cmd->scst_cmd_done = scst_cmd_done_local;
1621 rc = dev->handler->exec(cmd);
1622 /* !! At this point cmd, sess & tgt_dev can be already freed !! */
1623 TRACE_DBG("Dev handler %s exec() returned %d",
1624 dev->handler->name, rc);
1625 if (rc != SCST_EXEC_NOT_COMPLETED) {
1626 if (rc == SCST_EXEC_COMPLETED)
1628 else if (rc == SCST_EXEC_NEED_THREAD)
1635 TRACE_DBG("Sending cmd %p to SCSI mid-level", cmd);
1637 if (unlikely(cmd->dev->scsi_dev == NULL)) {
1638 PRINT_ERROR_PR("Command for virtual device must be "
1639 "processed by device handler (lun %Ld)!",
1640 (uint64_t)cmd->lun);
1644 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
1645 if (scst_alloc_request(cmd) != 0) {
1646 PRINT_INFO_PR("%s", "Unable to allocate request, "
1647 "sending BUSY status");
1651 scst_do_req(cmd->scsi_req, (void *)cmd->cdb,
1652 (void *)cmd->scsi_req->sr_buffer,
1653 cmd->scsi_req->sr_bufflen, scst_cmd_done, cmd->timeout,
1656 rc = scst_exec_req(cmd->dev->scsi_dev, cmd->cdb, cmd->cdb_len,
1657 cmd->data_direction, cmd->sg, cmd->bufflen, cmd->sg_cnt,
1658 cmd->timeout, cmd->retries, cmd, scst_cmd_done,
1661 PRINT_INFO_PR("scst_exec_req() failed: %d", rc);
1666 rc = SCST_EXEC_COMPLETED;
1673 /* Restore the state */
1674 cmd->sent_to_midlev = 0;
1675 cmd->state = SCST_CMD_STATE_SEND_TO_MIDLEV;
1679 PRINT_ERROR_PR("Dev handler %s exec() or scst_local_exec() returned "
1680 "invalid code %d", cmd->dev->handler->name, rc);
1684 scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
1686 cmd->state = SCST_CMD_STATE_DEV_DONE;
1687 rc = SCST_EXEC_COMPLETED;
1688 scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1691 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
1695 cmd->state = SCST_CMD_STATE_DEV_DONE;
1696 rc = SCST_EXEC_COMPLETED;
1697 scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1702 rc = SCST_EXEC_COMPLETED;
1703 /* Report the result. The cmd is not completed */
1704 scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1708 static int scst_send_to_midlev(struct scst_cmd *cmd)
1711 struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
1712 struct scst_device *dev = cmd->dev;
1715 int atomic = scst_cmd_atomic(cmd);
1719 res = SCST_CMD_STATE_RES_CONT_NEXT;
1721 if (atomic && dev->handler->exec && !dev->handler->exec_atomic) {
1722 TRACE_DBG("Dev handler %s exec() can not be "
1723 "called in atomic context, rescheduling to the thread",
1724 dev->handler->name);
1725 res = SCST_CMD_STATE_RES_NEED_THREAD;
1729 if (unlikely(scst_inc_on_dev_cmd(cmd) != 0))
1732 scst_inc_cmd_count(); /* protect dev & tgt_dev */
1734 if (unlikely(cmd->internal) || unlikely(cmd->retry)) {
1735 rc = scst_do_send_to_midlev(cmd);
1736 /* !! At this point cmd, sess & tgt_dev can be already freed !! */
1737 if (rc == SCST_EXEC_NEED_THREAD) {
1738 TRACE_DBG("%s", "scst_do_send_to_midlev() requested "
1739 "thread context, rescheduling");
1740 res = SCST_CMD_STATE_RES_NEED_THREAD;
1741 scst_dec_on_dev_cmd(cmd);
1742 goto out_dec_cmd_count;
1744 BUG_ON(rc != SCST_EXEC_COMPLETED);
1749 expected_sn = tgt_dev->expected_sn;
1750 if (cmd->sn != expected_sn) {
1751 spin_lock_bh(&tgt_dev->sn_lock);
1752 tgt_dev->def_cmd_count++;
1754 barrier(); /* to reread expected_sn */
1755 expected_sn = tgt_dev->expected_sn;
1756 if (cmd->sn != expected_sn) {
1757 scst_dec_on_dev_cmd(cmd);
1758 TRACE(TRACE_SCSI_SERIALIZING, "Delaying cmd %p (sn=%d, "
1759 "expected_sn=%d)", cmd, cmd->sn, expected_sn);
1760 list_add_tail(&cmd->sn_cmd_list_entry,
1761 &tgt_dev->deferred_cmd_list);
1762 spin_unlock_bh(&tgt_dev->sn_lock);
1763 /* !! At this point cmd can be already freed !! */
1764 goto out_dec_cmd_count;
1766 TRACE(TRACE_SCSI_SERIALIZING, "Somebody incremented "
1767 "expected_sn %d, continuing", expected_sn);
1768 tgt_dev->def_cmd_count--;
1769 spin_unlock_bh(&tgt_dev->sn_lock);
1775 rc = scst_do_send_to_midlev(cmd);
1776 if (rc == SCST_EXEC_NEED_THREAD) {
1777 TRACE_DBG("%s", "scst_do_send_to_midlev() requested "
1778 "thread context, rescheduling");
1779 res = SCST_CMD_STATE_RES_NEED_THREAD;
1780 scst_dec_on_dev_cmd(cmd);
1784 goto out_dec_cmd_count;
1786 BUG_ON(rc != SCST_EXEC_COMPLETED);
1787 /* !! At this point cmd can be already freed !! */
1789 expected_sn = __scst_inc_expected_sn(tgt_dev);
1790 cmd = scst_check_deferred_commands(tgt_dev, expected_sn);
1793 if (unlikely(scst_inc_on_dev_cmd(cmd) != 0))
1798 if (dev->scsi_dev != NULL)
1799 generic_unplug_device(dev->scsi_dev->request_queue);
1802 scst_dec_cmd_count();
1803 /* !! At this point sess, dev and tgt_dev can be already freed !! */
1806 TRACE_EXIT_HRES(res);
1810 static struct scst_cmd *scst_create_prepare_internal_cmd(
1811 struct scst_cmd *orig_cmd, int bufsize)
1813 struct scst_cmd *res;
1814 int gfp_mask = scst_cmd_atomic(orig_cmd) ? GFP_ATOMIC : GFP_KERNEL;
1818 res = scst_alloc_cmd(gfp_mask);
1819 if (unlikely(res == NULL)) {
1823 res->sess = orig_cmd->sess;
1824 res->state = SCST_CMD_STATE_SEND_TO_MIDLEV;
1825 res->atomic = scst_cmd_atomic(orig_cmd);
1827 res->tgtt = orig_cmd->tgtt;
1828 res->tgt = orig_cmd->tgt;
1829 res->dev = orig_cmd->dev;
1830 res->tgt_dev = orig_cmd->tgt_dev;
1831 res->lun = orig_cmd->lun;
1832 res->queue_type = SCST_CMD_QUEUE_HEAD_OF_QUEUE;
1833 res->data_direction = SCST_DATA_UNKNOWN;
1834 res->orig_cmd = orig_cmd;
1836 res->bufflen = bufsize;
1838 if (scst_alloc_space(res) != 0)
1839 PRINT_ERROR("Unable to create buffer (size %d) for "
1840 "internal cmd", bufsize);
1845 TRACE_EXIT_HRES((unsigned long)res);
1849 scst_destroy_cmd(res);
1854 static void scst_free_internal_cmd(struct scst_cmd *cmd)
1858 if (cmd->bufflen > 0)
1859 scst_release_space(cmd);
1860 scst_destroy_cmd(cmd);
1866 static int scst_prepare_request_sense(struct scst_cmd *orig_cmd)
1868 int res = SCST_CMD_STATE_RES_RESTART;
1869 #define sbuf_size 252
1870 static const unsigned char request_sense[6] =
1871 { REQUEST_SENSE, 0, 0, 0, sbuf_size, 0 };
1872 struct scst_cmd *rs_cmd;
1876 rs_cmd = scst_create_prepare_internal_cmd(orig_cmd, sbuf_size);
1880 memcpy(rs_cmd->cdb, request_sense, sizeof(request_sense));
1881 rs_cmd->cdb_len = sizeof(request_sense);
1882 rs_cmd->data_direction = SCST_DATA_READ;
1884 spin_lock_irq(&scst_list_lock);
1885 list_add(&rs_cmd->cmd_list_entry, &scst_active_cmd_list);
1886 spin_unlock_irq(&scst_list_lock);
1889 TRACE_EXIT_RES(res);
1898 static struct scst_cmd *scst_complete_request_sense(struct scst_cmd *cmd)
1900 struct scst_cmd *orig_cmd = cmd->orig_cmd;
1908 len = scst_get_buf_first(cmd, &buf);
1910 if ((cmd->status == 0) && SCST_SENSE_VALID(buf) &&
1911 (!SCST_NO_SENSE(buf)))
1913 TRACE_BUFF_FLAG(TRACE_SCSI, "REQUEST SENSE returned",
1915 memcpy(orig_cmd->sense_buffer, buf,
1916 (sizeof(orig_cmd->sense_buffer) > len) ?
1917 len : sizeof(orig_cmd->sense_buffer));
1919 PRINT_ERROR_PR("%s", "Unable to get the sense via "
1920 "REQUEST SENSE, returning HARDWARE ERROR");
1921 scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
1924 scst_put_buf(cmd, buf);
1926 scst_free_internal_cmd(cmd);
1928 TRACE_EXIT_HRES((unsigned long)orig_cmd);
1932 static int scst_done_cmd_check(struct scst_cmd *cmd, int *pres)
1939 if (cmd->cdb[0] == REQUEST_SENSE) {
1941 cmd = scst_complete_request_sense(cmd);
1942 } else if (scst_check_auto_sense(cmd)) {
1943 PRINT_INFO_PR("Command finished with CHECK CONDITION, but "
1944 "without sense data (opcode 0x%x), issuing "
1945 "REQUEST SENSE", cmd->cdb[0]);
1946 rc = scst_prepare_request_sense(cmd);
1952 PRINT_ERROR_PR("%s", "Unable to issue REQUEST SENSE, "
1953 "returning HARDWARE ERROR");
1954 scst_set_cmd_error(cmd,
1955 SCST_LOAD_SENSE(scst_sense_hardw_error));
1959 type = cmd->dev->handler->type;
1960 if ((cmd->cdb[0] == MODE_SENSE || cmd->cdb[0] == MODE_SENSE_10) &&
1961 cmd->tgt_dev->acg_dev->rd_only_flag &&
1962 (type == TYPE_DISK || type == TYPE_WORM || type == TYPE_MOD ||
1968 length = scst_get_buf_first(cmd, &address);
1971 if (length > 2 && cmd->cdb[0] == MODE_SENSE)
1972 address[2] |= 0x80; /* Write Protect*/
1973 else if (length > 3 && cmd->cdb[0] == MODE_SENSE_10)
1974 address[3] |= 0x80; /* Write Protect*/
1975 scst_put_buf(cmd, address);
1979 * Check and clear NormACA option for the device, if necessary,
1980 * since we don't support ACA
1982 if ((cmd->cdb[0] == INQUIRY) &&
1983 !(cmd->cdb[1] & SCST_INQ_EVPD/* Std INQUIRY data (no EVPD) */) &&
1984 (cmd->resp_data_len > SCST_INQ_BYTE3))
1989 /* ToDo: all pages ?? */
1990 buflen = scst_get_buf_first(cmd, &buffer);
1992 if (buflen > SCST_INQ_BYTE3) {
1994 if (buffer[SCST_INQ_BYTE3] & SCST_INQ_NORMACA_BIT) {
1995 PRINT_INFO_PR("NormACA set for device: "
1996 "lun=%Ld, type 0x%02x",
1997 (uint64_t)cmd->lun, buffer[0]);
2000 buffer[SCST_INQ_BYTE3] &= ~SCST_INQ_NORMACA_BIT;
2002 scst_set_cmd_error(cmd,
2003 SCST_LOAD_SENSE(scst_sense_hardw_error));
2005 scst_put_buf(cmd, buffer);
2009 if (unlikely((cmd->cdb[0] == RESERVE) || (cmd->cdb[0] == RESERVE_10))) {
2010 if ((cmd->status != 0) && !test_bit(SCST_TGT_DEV_RESERVED,
2011 &cmd->tgt_dev->tgt_dev_flags)) {
2012 struct scst_tgt_dev *tgt_dev_tmp;
2013 TRACE(TRACE_SCSI, "Real RESERVE failed lun=%Ld, status=%x",
2014 (uint64_t)cmd->lun, cmd->masked_status);
2015 TRACE_BUFF_FLAG(TRACE_SCSI, "Sense", cmd->sense_buffer,
2016 sizeof(cmd->sense_buffer));
2017 /* Clearing the reservation */
2018 list_for_each_entry(tgt_dev_tmp, &cmd->dev->dev_tgt_dev_list,
2019 dev_tgt_dev_list_entry) {
2020 clear_bit(SCST_TGT_DEV_RESERVED,
2021 &tgt_dev_tmp->tgt_dev_flags);
2023 cmd->dev->dev_reserved = 0;
2025 scst_unblock_dev(cmd->dev);
2028 if (unlikely((cmd->cdb[0] == MODE_SELECT) ||
2029 (cmd->cdb[0] == MODE_SELECT_10) ||
2030 (cmd->cdb[0] == LOG_SELECT)))
2032 if (cmd->status == 0) {
2033 TRACE(TRACE_SCSI, "MODE/LOG SELECT succeeded, "
2034 "setting the SELECT UA (lun=%Ld)",
2035 (uint64_t)cmd->lun);
2036 spin_lock_bh(&scst_temp_UA_lock);
2037 if (cmd->cdb[0] == LOG_SELECT) {
2038 scst_set_sense(scst_temp_UA,
2039 sizeof(scst_temp_UA),
2040 UNIT_ATTENTION, 0x2a, 0x02);
2042 scst_set_sense(scst_temp_UA,
2043 sizeof(scst_temp_UA),
2044 UNIT_ATTENTION, 0x2a, 0x01);
2046 scst_process_UA(cmd->dev, cmd, scst_temp_UA,
2047 sizeof(scst_temp_UA), 1);
2048 spin_unlock_bh(&scst_temp_UA_lock);
2050 scst_unblock_dev(cmd->dev);
2054 TRACE_EXIT_RES(res);
2058 static int scst_dev_done(struct scst_cmd *cmd)
2060 int res = SCST_CMD_STATE_RES_CONT_SAME;
2062 int atomic = scst_cmd_atomic(cmd);
2066 if (atomic && !cmd->dev->handler->dev_done_atomic &&
2067 cmd->dev->handler->dev_done)
2069 TRACE_DBG("Dev handler %s dev_done() can not be "
2070 "called in atomic context, rescheduling to the thread",
2071 cmd->dev->handler->name);
2072 res = SCST_CMD_STATE_RES_NEED_THREAD;
2076 if (scst_done_cmd_check(cmd, &res))
2079 state = SCST_CMD_STATE_XMIT_RESP;
2080 if (likely(!scst_is_cmd_local(cmd)) &&
2081 likely(cmd->dev->handler->dev_done != NULL))
2084 TRACE_DBG("Calling dev handler %s dev_done(%p)",
2085 cmd->dev->handler->name, cmd);
2086 rc = cmd->dev->handler->dev_done(cmd);
2087 TRACE_DBG("Dev handler %s dev_done() returned %d",
2088 cmd->dev->handler->name, rc);
2089 if (rc != SCST_CMD_STATE_DEFAULT)
2094 case SCST_CMD_STATE_REINIT:
2096 res = SCST_CMD_STATE_RES_RESTART;
2099 case SCST_CMD_STATE_DEV_PARSE:
2100 case SCST_CMD_STATE_PREPARE_SPACE:
2101 case SCST_CMD_STATE_RDY_TO_XFER:
2102 case SCST_CMD_STATE_SEND_TO_MIDLEV:
2103 case SCST_CMD_STATE_DEV_DONE:
2104 case SCST_CMD_STATE_XMIT_RESP:
2105 case SCST_CMD_STATE_FINISHED:
2107 res = SCST_CMD_STATE_RES_CONT_SAME;
2110 case SCST_CMD_STATE_NEED_THREAD_CTX:
2111 TRACE_DBG("Dev handler %s dev_done() requested "
2112 "thread context, rescheduling",
2113 cmd->dev->handler->name);
2114 res = SCST_CMD_STATE_RES_NEED_THREAD;
2119 PRINT_ERROR_PR("Dev handler %s dev_done() returned "
2120 "invalid cmd state %d",
2121 cmd->dev->handler->name, state);
2123 PRINT_ERROR_PR("Dev handler %s dev_done() returned "
2124 "error %d", cmd->dev->handler->name,
2127 scst_set_cmd_error(cmd,
2128 SCST_LOAD_SENSE(scst_sense_hardw_error));
2129 cmd->state = SCST_CMD_STATE_XMIT_RESP;
2130 res = SCST_CMD_STATE_RES_CONT_SAME;
2135 TRACE_EXIT_HRES(res);
2139 static int scst_xmit_response(struct scst_cmd *cmd)
2142 int atomic = scst_cmd_atomic(cmd);
2147 * Check here also in order to avoid unnecessary delays of other
2150 if (unlikely(cmd->sent_to_midlev == 0) &&
2151 (cmd->tgt_dev != NULL))
2153 TRACE(TRACE_SCSI_SERIALIZING,
2154 "cmd %p was not sent to mid-lev (sn %d)", cmd, cmd->sn);
2155 scst_inc_expected_sn_unblock(cmd->tgt_dev, cmd, 0);
2156 cmd->sent_to_midlev = 1;
2159 if (atomic && !cmd->tgtt->xmit_response_atomic) {
2160 TRACE_DBG("%s", "xmit_response() can not be "
2161 "called in atomic context, rescheduling to the thread");
2162 res = SCST_CMD_STATE_RES_NEED_THREAD;
2166 set_bit(SCST_CMD_XMITTING, &cmd->cmd_flags);
2167 smp_mb__after_set_bit();
2169 if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
2170 if (test_bit(SCST_CMD_ABORTED_OTHER, &cmd->cmd_flags)) {
2171 TRACE_MGMT_DBG("Flag ABORTED OTHER set for cmd %p "
2172 "(tag %d), returning TASK ABORTED", cmd, cmd->tag);
2173 scst_set_cmd_error_status(cmd, SAM_STAT_TASK_ABORTED);
2177 if (unlikely(test_bit(SCST_CMD_NO_RESP, &cmd->cmd_flags))) {
2178 TRACE_MGMT_DBG("Flag NO_RESP set for cmd %p (tag %d), skipping",
2180 cmd->state = SCST_CMD_STATE_FINISHED;
2181 res = SCST_CMD_STATE_RES_CONT_SAME;
2186 if (cmd->tm_dbg_delayed && !test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)) {
2187 if (atomic && !cmd->tgtt->xmit_response_atomic) {
2188 TRACE_MGMT_DBG("%s", "DEBUG_TM delayed cmd needs a thread");
2189 res = SCST_CMD_STATE_RES_NEED_THREAD;
2192 TRACE_MGMT_DBG("Delaying cmd %p (tag %d) for 1 second",
2194 schedule_timeout_uninterruptible(HZ);
2199 int finished_cmds = atomic_read(&cmd->sess->tgt->finished_cmds);
2201 res = SCST_CMD_STATE_RES_CONT_NEXT;
2202 cmd->state = SCST_CMD_STATE_XMIT_WAIT;
2204 TRACE_DBG("Calling xmit_response(%p)", cmd);
2206 #if defined(DEBUG) || defined(TRACING)
2209 struct scatterlist *sg = cmd->sg;
2210 TRACE(TRACE_SEND_BOT,
2211 "Xmitting %d S/G(s) at %p sg[0].page at %p",
2212 cmd->sg_cnt, sg, (void*)sg[0].page);
2213 for(i = 0; i < cmd->sg_cnt; ++i) {
2214 TRACE_BUFF_FLAG(TRACE_SEND_BOT,
2215 "Xmitting sg", page_address(sg[i].page),
2222 if (((scst_random() % 100) == 77))
2223 rc = SCST_TGT_RES_QUEUE_FULL;
2226 rc = cmd->tgtt->xmit_response(cmd);
2227 TRACE_DBG("xmit_response() returned %d", rc);
2229 if (likely(rc == SCST_TGT_RES_SUCCESS))
2232 /* Restore the previous state */
2233 cmd->state = SCST_CMD_STATE_XMIT_RESP;
2236 case SCST_TGT_RES_QUEUE_FULL:
2238 if (scst_queue_retry_cmd(cmd, finished_cmds) == 0)
2244 case SCST_TGT_RES_NEED_THREAD_CTX:
2246 TRACE_DBG("Target driver %s xmit_response() "
2247 "requested thread context, rescheduling",
2249 res = SCST_CMD_STATE_RES_NEED_THREAD;
2260 /* Caution: cmd can be already dead here */
2261 TRACE_EXIT_HRES(res);
2265 if (rc == SCST_TGT_RES_FATAL_ERROR) {
2266 PRINT_ERROR_PR("Target driver %s xmit_response() returned "
2267 "fatal error", cmd->tgtt->name);
2269 PRINT_ERROR_PR("Target driver %s xmit_response() returned "
2270 "invalid value %d", cmd->tgtt->name, rc);
2272 scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
2273 cmd->state = SCST_CMD_STATE_FINISHED;
2274 res = SCST_CMD_STATE_RES_CONT_SAME;
2278 static int scst_finish_cmd(struct scst_cmd *cmd)
2284 if (cmd->mem_checked) {
2285 spin_lock_bh(&scst_cmd_mem_lock);
2286 scst_cur_cmd_mem -= cmd->bufflen;
2287 spin_unlock_bh(&scst_cmd_mem_lock);
2290 spin_lock_irq(&scst_list_lock);
2292 TRACE_DBG("Deleting cmd %p from cmd list", cmd);
2293 list_del(&cmd->cmd_list_entry);
2296 scst_complete_cmd_mgmt(cmd, cmd->mgmt_cmnd);
2298 if (likely(cmd->tgt_dev != NULL))
2299 cmd->tgt_dev->cmd_count--;
2301 cmd->sess->sess_cmd_count--;
2303 list_del(&cmd->search_cmd_list_entry);
2305 spin_unlock_irq(&scst_list_lock);
2309 res = SCST_CMD_STATE_RES_CONT_NEXT;
2311 TRACE_EXIT_HRES(res);
2315 void scst_tgt_cmd_done(struct scst_cmd *cmd)
2319 BUG_ON(cmd->state != SCST_CMD_STATE_XMIT_WAIT);
2321 cmd->state = SCST_CMD_STATE_FINISHED;
2322 scst_proccess_redirect_cmd(cmd, scst_get_context(), 1);
2329 * Returns 0 on success, > 0 when we need to wait for unblock,
2330 * < 0 if there is no device (lun) or device type handler.
2331 * Called under scst_list_lock and IRQs disabled
2333 static int scst_translate_lun(struct scst_cmd *cmd)
2335 struct scst_tgt_dev *tgt_dev = NULL;
2340 scst_inc_cmd_count();
2342 if (likely(!test_bit(SCST_FLAG_SUSPENDED, &scst_flags))) {
2344 TRACE_DBG("Finding tgt_dev for cmd %p (lun %Ld)", cmd,
2345 (uint64_t)cmd->lun);
2346 list_for_each_entry(tgt_dev, &cmd->sess->sess_tgt_dev_list,
2347 sess_tgt_dev_list_entry)
2349 if (tgt_dev->acg_dev->lun == cmd->lun) {
2350 TRACE_DBG("tgt_dev %p found", tgt_dev);
2352 if (unlikely(tgt_dev->acg_dev->dev->handler == NULL)) {
2353 PRINT_INFO_PR("Dev handler for device "
2354 "%Ld is NULL, the device will not be "
2355 "visible remotely", (uint64_t)cmd->lun);
2359 if (cmd->state == SCST_CMD_STATE_REINIT) {
2360 cmd->tgt_dev_saved->cmd_count--;
2361 TRACE(TRACE_SCSI_SERIALIZING,
2362 "SCST_CMD_STATE_REINIT: "
2363 "incrementing expected_sn on tgt_dev_saved %p",
2364 cmd->tgt_dev_saved);
2365 scst_inc_expected_sn_unblock(
2366 cmd->tgt_dev_saved, cmd, 1);
2368 cmd->tgt_dev = tgt_dev;
2369 tgt_dev->cmd_count++;
2370 cmd->dev = tgt_dev->acg_dev->dev;
2372 /* ToDo: cmd->queue_type */
2374 /* scst_list_lock is enough to protect that */
2375 cmd->sn = tgt_dev->next_sn;
2378 TRACE(TRACE_DEBUG/*TRACE_SCSI_SERIALIZING*/,
2379 "cmd->sn: %d", cmd->sn);
2386 TRACE_DBG("tgt_dev for lun %Ld not found, command to "
2387 "unexisting LU?", (uint64_t)cmd->lun);
2388 scst_dec_cmd_count();
2391 if ( !cmd->sess->waiting) {
2392 TRACE_DBG("Adding session %p to scst_dev_wait_sess_list",
2394 list_add_tail(&cmd->sess->dev_wait_sess_list_entry,
2395 &scst_dev_wait_sess_list);
2396 cmd->sess->waiting = 1;
2398 scst_dec_cmd_count();
2402 TRACE_EXIT_RES(res);
2406 /* Called under scst_list_lock and IRQs disabled */
2407 static int scst_process_init_cmd(struct scst_cmd *cmd)
2413 res = scst_translate_lun(cmd);
2414 if (likely(res == 0)) {
2415 cmd->state = SCST_CMD_STATE_DEV_PARSE;
2416 if (cmd->tgt_dev->cmd_count > SCST_MAX_DEVICE_COMMANDS) {
2417 TRACE(TRACE_RETRY, "Too many pending commands in "
2418 "session, returning BUSY to initiator \"%s\"",
2419 (cmd->sess->initiator_name[0] == '\0') ?
2420 "Anonymous" : cmd->sess->initiator_name);
2422 cmd->state = SCST_CMD_STATE_XMIT_RESP;
2424 TRACE_DBG("Moving cmd %p to active cmd list", cmd);
2425 list_move_tail(&cmd->cmd_list_entry, &scst_active_cmd_list);
2426 } else if (res < 0) {
2427 TRACE_DBG("Finishing cmd %p", cmd);
2428 scst_set_cmd_error(cmd,
2429 SCST_LOAD_SENSE(scst_sense_lun_not_supported));
2430 cmd->state = SCST_CMD_STATE_XMIT_RESP;
2431 TRACE_DBG("Moving cmd %p to active cmd list", cmd);
2432 list_move_tail(&cmd->cmd_list_entry, &scst_active_cmd_list);
2435 TRACE_EXIT_RES(res);
2440 * Called under scst_list_lock and IRQs disabled
2441 * We don't drop it anywhere inside, because command execution
2442 * have to be serialized, i.e. commands must be executed in order
2443 * of their arrival, and we set this order inside scst_translate_lun().
2445 static int scst_do_job_init(struct list_head *init_cmd_list)
2451 if (!test_bit(SCST_FLAG_SUSPENDED, &scst_flags)) {
2452 while (!list_empty(init_cmd_list)) {
2453 struct scst_cmd *cmd = list_entry(init_cmd_list->next,
2456 res = scst_process_init_cmd(cmd);
2462 TRACE_EXIT_RES(res);
2466 /* Called with no locks held */
2467 static int __scst_process_active_cmd(struct scst_cmd *cmd, int context,
2478 cmd->atomic = ((context & ~SCST_PROCESSIBLE_ENV) ==
2479 SCST_CONTEXT_DIRECT_ATOMIC);
2480 cmd->processible_env = (context & SCST_PROCESSIBLE_ENV) != 0;
2483 switch (cmd->state) {
2484 case SCST_CMD_STATE_DEV_PARSE:
2485 res = scst_parse_cmd(cmd);
2488 case SCST_CMD_STATE_PREPARE_SPACE:
2489 res = scst_prepare_space(cmd);
2492 case SCST_CMD_STATE_RDY_TO_XFER:
2493 res = scst_rdy_to_xfer(cmd);
2496 case SCST_CMD_STATE_SEND_TO_MIDLEV:
2497 res = scst_send_to_midlev(cmd);
2498 /* !! At this point cmd, sess & tgt_dev can be already freed !! */
2501 case SCST_CMD_STATE_DEV_DONE:
2502 res = scst_dev_done(cmd);
2505 case SCST_CMD_STATE_XMIT_RESP:
2506 res = scst_xmit_response(cmd);
2509 case SCST_CMD_STATE_FINISHED:
2510 res = scst_finish_cmd(cmd);
2514 PRINT_ERROR("cmd (%p) in state %d, but shouldn't be",
2517 res = SCST_CMD_STATE_RES_CONT_NEXT;
2520 } while(res == SCST_CMD_STATE_RES_CONT_SAME);
2522 if (res == SCST_CMD_STATE_RES_CONT_NEXT) {
2524 spin_lock_irq(&scst_list_lock);
2525 } else if (res == SCST_CMD_STATE_RES_NEED_THREAD) {
2526 spin_lock_irq(&scst_list_lock);
2528 switch (cmd->state) {
2529 case SCST_CMD_STATE_DEV_PARSE:
2530 case SCST_CMD_STATE_PREPARE_SPACE:
2531 case SCST_CMD_STATE_RDY_TO_XFER:
2532 case SCST_CMD_STATE_SEND_TO_MIDLEV:
2533 case SCST_CMD_STATE_DEV_DONE:
2534 case SCST_CMD_STATE_XMIT_RESP:
2535 case SCST_CMD_STATE_FINISHED:
2536 TRACE_DBG("Moving cmd %p to active cmd list", cmd);
2537 list_move(&cmd->cmd_list_entry, &scst_active_cmd_list);
2540 /* not very valid commands */
2541 case SCST_CMD_STATE_DEFAULT:
2542 case SCST_CMD_STATE_NEED_THREAD_CTX:
2543 PRINT_ERROR_PR("cmd %p is in state %d, not putting on "
2544 "useful list (left on scst cmd list)", cmd,
2546 spin_unlock_irq(&scst_list_lock);
2548 spin_lock_irq(&scst_list_lock);
2554 cmd->non_atomic_only = 1;
2556 spin_unlock_irq(&scst_list_lock);
2557 wake_up(&scst_list_waitQ);
2558 } else if (res == SCST_CMD_STATE_RES_RESTART) {
2559 if (cmd->state == SCST_CMD_STATE_REINIT) {
2560 spin_lock_irq(&scst_list_lock);
2561 TRACE_DBG("Moving cmd %p to head of init cmd list", cmd);
2562 list_move(&cmd->cmd_list_entry, &scst_init_cmd_list);
2564 spin_unlock_irq(&scst_list_lock);
2570 TRACE_EXIT_RES(res);
2574 /* Called under scst_list_lock and IRQs disabled */
2575 static void scst_do_job_active(struct list_head *active_cmd_list, int context)
2578 struct scst_cmd *cmd;
2579 int atomic = ((context & ~SCST_PROCESSIBLE_ENV) ==
2580 SCST_CONTEXT_DIRECT_ATOMIC);
2586 int c = (context & ~SCST_PROCESSIBLE_ENV);
2587 WARN_ON((c != SCST_CONTEXT_DIRECT_ATOMIC) &&
2588 (c != SCST_CONTEXT_DIRECT));
2592 tm_dbg_check_released_cmds();
2595 list_for_each_entry(cmd, active_cmd_list, cmd_list_entry) {
2596 if (atomic && cmd->non_atomic_only) {
2597 TRACE(TRACE_DEBUG, "Skipping non-atomic cmd %p", cmd);
2600 if (tm_dbg_check_cmd(cmd) != 0)
2602 res = scst_process_active_cmd(cmd, context, NULL, 1);
2603 if (res == SCST_CMD_STATE_RES_CONT_NEXT) {
2605 } else if (res == SCST_CMD_STATE_RES_NEED_THREAD) {
2607 } else if (res == SCST_CMD_STATE_RES_RESTART) {
2617 static inline int test_cmd_lists(void)
2619 int res = !list_empty(&scst_active_cmd_list) ||
2620 (!list_empty(&scst_init_cmd_list) &&
2621 !test_bit(SCST_FLAG_SUSPENDED, &scst_flags)) ||
2622 test_bit(SCST_FLAG_SHUTDOWN, &scst_flags) ||
2623 unlikely(scst_shut_threads_count > 0) ||
2624 tm_dbg_is_release();
2628 int scst_cmd_thread(void *arg)
2630 static spinlock_t lock = SPIN_LOCK_UNLOCKED;
2636 n = scst_thread_num++;
2638 daemonize("scsi_tgt%d", n);
2639 recalc_sigpending();
2640 set_user_nice(current, 10);
2641 current->flags |= PF_NOFREEZE;
2643 spin_lock_irq(&scst_list_lock);
2646 init_waitqueue_entry(&wait, current);
2648 if (!test_cmd_lists()) {
2649 add_wait_queue_exclusive(&scst_list_waitQ, &wait);
2651 set_current_state(TASK_INTERRUPTIBLE);
2652 if (test_cmd_lists())
2654 spin_unlock_irq(&scst_list_lock);
2656 spin_lock_irq(&scst_list_lock);
2658 set_current_state(TASK_RUNNING);
2659 remove_wait_queue(&scst_list_waitQ, &wait);
2662 scst_do_job_init(&scst_init_cmd_list);
2663 scst_do_job_active(&scst_active_cmd_list,
2664 SCST_CONTEXT_DIRECT|SCST_PROCESSIBLE_ENV);
2666 if (unlikely(test_bit(SCST_FLAG_SHUTDOWN, &scst_flags)) &&
2667 list_empty(&scst_cmd_list) &&
2668 list_empty(&scst_active_cmd_list) &&
2669 list_empty(&scst_init_cmd_list)) {
2673 if (unlikely(scst_shut_threads_count > 0)) {
2674 scst_shut_threads_count--;
2678 spin_unlock_irq(&scst_list_lock);
2680 if (atomic_dec_and_test(&scst_threads_count) && scst_shutdown_mutex) {
2681 smp_mb__after_atomic_dec();
2682 TRACE_DBG("%s", "Releasing scst_shutdown_mutex");
2683 up(scst_shutdown_mutex);
2690 void scst_cmd_tasklet(long p)
2694 spin_lock_irq(&scst_list_lock);
2696 scst_do_job_init(&scst_init_cmd_list);
2697 scst_do_job_active(&scst_active_cmd_list,
2698 SCST_CONTEXT_DIRECT_ATOMIC|SCST_PROCESSIBLE_ENV);
2700 spin_unlock_irq(&scst_list_lock);
2707 * Returns 0 on success, < 0 if there is no device handler or
2708 * > 0 if SCST_FLAG_SUSPENDED set.
2710 static int scst_mgmt_translate_lun(struct scst_mgmt_cmd *mcmd)
2712 struct scst_tgt_dev *tgt_dev = NULL;
2717 TRACE_DBG("Finding tgt_dev for mgmt cmd %p (lun %Ld)", mcmd,
2718 (uint64_t)mcmd->lun);
2720 spin_lock_irq(&scst_list_lock);
2721 scst_inc_cmd_count();
2722 if (likely(!test_bit(SCST_FLAG_SUSPENDED, &scst_flags))) {
2723 list_for_each_entry(tgt_dev, &mcmd->sess->sess_tgt_dev_list,
2724 sess_tgt_dev_list_entry)
2726 if (tgt_dev->acg_dev->lun == mcmd->lun) {
2727 TRACE_DBG("tgt_dev %p found", tgt_dev);
2728 mcmd->mcmd_tgt_dev = tgt_dev;
2733 if (mcmd->mcmd_tgt_dev == NULL)
2734 scst_dec_cmd_count();
2736 if ( !mcmd->sess->waiting) {
2737 TRACE_DBG("Adding session %p to scst_dev_wait_sess_list",
2739 list_add_tail(&mcmd->sess->dev_wait_sess_list_entry,
2740 &scst_dev_wait_sess_list);
2741 mcmd->sess->waiting = 1;
2743 scst_dec_cmd_count();
2746 spin_unlock_irq(&scst_list_lock);
2748 TRACE_EXIT_HRES(res);
2752 /* Called under scst_list_lock and IRQ off */
2753 static void scst_complete_cmd_mgmt(struct scst_cmd *cmd,
2754 struct scst_mgmt_cmd *mcmd)
2758 TRACE_MGMT_DBG("cmd %p completed (tag %d, mcmd %p, "
2759 "mcmd->cmd_wait_count %d)", cmd, cmd->tag, mcmd,
2760 mcmd->cmd_wait_count);
2762 cmd->mgmt_cmnd = NULL;
2765 mcmd->completed_cmd_count++;
2767 mcmd->cmd_wait_count--;
2768 if (mcmd->cmd_wait_count > 0) {
2769 TRACE_MGMT_DBG("cmd_wait_count(%d) not 0, skipping",
2770 mcmd->cmd_wait_count);
2774 mcmd->state = SCST_MGMT_CMD_STATE_DONE;
2776 if (mcmd->completed) {
2777 TRACE_MGMT_DBG("Moving mgmt cmd %p to active mgmt cmd list",
2779 list_move_tail(&mcmd->mgmt_cmd_list_entry,
2780 &scst_active_mgmt_cmd_list);
2783 wake_up(&scst_mgmt_cmd_list_waitQ);
2790 static int scst_call_dev_task_mgmt_fn(struct scst_mgmt_cmd *mcmd,
2791 struct scst_tgt_dev *tgt_dev, int set_status)
2793 int res = SCST_DEV_TM_NOT_COMPLETED;
2794 if (tgt_dev->acg_dev->dev->handler->task_mgmt_fn) {
2795 int irq = irqs_disabled();
2796 TRACE_MGMT_DBG("Calling dev handler %s task_mgmt_fn(fn=%d)",
2797 tgt_dev->acg_dev->dev->handler->name, mcmd->fn);
2803 res = tgt_dev->acg_dev->dev->handler->task_mgmt_fn(mcmd,
2807 TRACE_MGMT_DBG("Dev handler %s task_mgmt_fn() returned %d",
2808 tgt_dev->acg_dev->dev->handler->name, res);
2809 if (set_status && (res != SCST_DEV_TM_NOT_COMPLETED)) {
2810 mcmd->status = (res == SCST_DEV_TM_COMPLETED_SUCCESS) ?
2811 SCST_MGMT_STATUS_SUCCESS :
2812 SCST_MGMT_STATUS_FAILED;
2818 static inline int scst_is_strict_mgmt_fn(int mgmt_fn)
2821 case SCST_ABORT_TASK:
2822 case SCST_ABORT_TASK_SET:
2823 case SCST_CLEAR_TASK_SET:
2831 * Called under scst_list_lock and IRQ off (to protect cmd
2832 * from being destroyed) + BHs also off
2833 * Returns -1 if command is being executed (ABORT failed), 0 otherwise
2835 void scst_abort_cmd(struct scst_cmd *cmd, struct scst_mgmt_cmd *mcmd,
2836 int other_ini, int call_dev_task_mgmt_fn)
2840 TRACE(TRACE_MGMT, "Aborting cmd %p (tag %d)", cmd, cmd->tag);
2843 set_bit(SCST_CMD_ABORTED_OTHER, &cmd->cmd_flags);
2844 smp_mb__after_set_bit();
2846 set_bit(SCST_CMD_ABORTED, &cmd->cmd_flags);
2847 smp_mb__after_set_bit();
2849 if (call_dev_task_mgmt_fn && cmd->tgt_dev)
2850 scst_call_dev_task_mgmt_fn(mcmd, cmd->tgt_dev, 0);
2854 if (cmd->tgtt->tm_sync_reply)
2857 if (scst_is_strict_mgmt_fn(mcmd->fn))
2858 defer = test_bit(SCST_CMD_EXECUTING,
2861 defer = test_bit(SCST_CMD_XMITTING,
2867 * Delay the response until the command's finish in
2868 * order to guarantee that "no further responses from
2869 * the task are sent to the SCSI initiator port" after
2870 * response from the TM function is sent (SAM)
2872 TRACE(TRACE_MGMT, "cmd %p (tag %d) being executed/"
2873 "xmitted (state %d), deferring ABORT...", cmd,
2874 cmd->tag, cmd->state);
2876 if (cmd->mgmt_cmnd) {
2877 printk(KERN_ALERT "cmd %p (tag %d, state %d) "
2878 "has non-NULL mgmt_cmnd %p!!! Current "
2879 "mcmd %p\n", cmd, cmd->tag, cmd->state,
2880 cmd->mgmt_cmnd, mcmd);
2883 BUG_ON(cmd->mgmt_cmnd);
2884 mcmd->cmd_wait_count++;
2885 cmd->mgmt_cmnd = mcmd;
2889 tm_dbg_release_cmd(cmd);
2895 /* Called under scst_list_lock and IRQ off */
2896 static int scst_set_mcmd_next_state(struct scst_mgmt_cmd *mcmd)
2899 if (mcmd->cmd_wait_count != 0) {
2900 TRACE_MGMT_DBG("cmd_wait_count(%d) not 0, preparing to "
2901 "wait", mcmd->cmd_wait_count);
2902 mcmd->state = SCST_MGMT_CMD_STATE_EXECUTING;
2905 mcmd->state = SCST_MGMT_CMD_STATE_DONE;
2908 mcmd->completed = 1;
2912 static void scst_unblock_aborted_cmds(int scst_mutex_held)
2914 struct scst_device *dev;
2919 if (!scst_mutex_held)
2922 list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
2923 struct scst_cmd *cmd, *tcmd;
2924 spin_lock_bh(&dev->dev_lock);
2925 list_for_each_entry_safe(cmd, tcmd, &dev->blocked_cmd_list,
2926 blocked_cmd_list_entry) {
2927 if (test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)) {
2928 list_del(&cmd->blocked_cmd_list_entry);
2929 TRACE_MGMT_DBG("Moving aborted blocked cmd %p "
2930 "to active cmd list", cmd);
2931 spin_lock_irq(&scst_list_lock);
2932 list_move_tail(&cmd->cmd_list_entry,
2933 &scst_active_cmd_list);
2934 spin_unlock_irq(&scst_list_lock);
2938 spin_unlock_bh(&dev->dev_lock);
2941 if (!scst_mutex_held)
2945 wake_up(&scst_list_waitQ);
2951 /* Returns 0 if the command processing should be continued, <0 otherwise */
2952 static void __scst_abort_task_set(struct scst_mgmt_cmd *mcmd,
2953 struct scst_tgt_dev *tgt_dev, int other_ini, int scst_mutex_held)
2955 struct scst_cmd *cmd;
2956 struct scst_session *sess = tgt_dev->sess;
2961 spin_lock_irq(&scst_list_lock);
2963 TRACE_DBG("Searching in search cmd list (sess=%p)", sess);
2964 list_for_each_entry(cmd, &sess->search_cmd_list,
2965 search_cmd_list_entry) {
2966 if ((cmd->tgt_dev == NULL) &&
2967 (cmd->lun == tgt_dev->acg_dev->lun))
2969 if (cmd->tgt_dev != tgt_dev)
2971 scst_abort_cmd(cmd, mcmd, other_ini, 0);
2973 spin_unlock_irq(&scst_list_lock);
2976 scst_unblock_aborted_cmds(scst_mutex_held);
2982 /* Returns 0 if the command processing should be continued, <0 otherwise */
2983 static int scst_abort_task_set(struct scst_mgmt_cmd *mcmd)
2986 struct scst_tgt_dev *tgt_dev = mcmd->mcmd_tgt_dev;
2987 struct scst_device *dev = tgt_dev->acg_dev->dev;
2989 TRACE(TRACE_MGMT, "Aborting task set (lun=%d, mcmd=%p)",
2990 tgt_dev->acg_dev->lun, mcmd);
2992 spin_lock_bh(&dev->dev_lock);
2993 __scst_block_dev(dev);
2994 spin_unlock_bh(&dev->dev_lock);
2996 __scst_abort_task_set(mcmd, tgt_dev, 0, 0);
2997 scst_call_dev_task_mgmt_fn(mcmd, tgt_dev, 0);
2999 res = scst_set_mcmd_next_state(mcmd);
3001 TRACE_EXIT_RES(res);
3005 static int scst_check_delay_mgmt_cmd(struct scst_mgmt_cmd *mcmd, int locked)
3008 * No need for special protection for SCST_FLAG_TM_ACTIVE, since
3009 * we could be called from the only thread.
3011 if (test_bit(SCST_FLAG_TM_ACTIVE, &scst_flags)) {
3012 TRACE_MGMT_DBG("Moving mgmt cmd %p to delayed mgmt cmd list",
3015 spin_lock_irq(&scst_list_lock);
3016 list_move_tail(&mcmd->mgmt_cmd_list_entry,
3017 &scst_delayed_mgmt_cmd_list);
3019 spin_unlock_irq(&scst_list_lock);
3022 set_bit(SCST_FLAG_TM_ACTIVE, &scst_flags);
3027 /* Returns 0 if the command processing should be continued,
3028 * >0, if it should be requeued, <0 otherwise */
3029 static int scst_mgmt_cmd_init(struct scst_mgmt_cmd *mcmd)
3035 res = scst_check_delay_mgmt_cmd(mcmd, 1);
3039 if (mcmd->fn == SCST_ABORT_TASK) {
3040 struct scst_session *sess = mcmd->sess;
3041 struct scst_cmd *cmd;
3044 spin_lock_irq(&scst_list_lock);
3045 cmd = __scst_find_cmd_by_tag(sess, mcmd->tag);
3047 TRACE(TRACE_MGMT, "ABORT TASK failed: command for "
3048 "tag %d not found", mcmd->tag);
3049 mcmd->status = SCST_MGMT_STATUS_FAILED;
3050 mcmd->state = SCST_MGMT_CMD_STATE_DONE;
3052 TRACE(TRACE_MGMT, "Cmd %p for tag %d (sn %d) found, "
3053 "aborting it", cmd, mcmd->tag, cmd->sn);
3054 mcmd->cmd_to_abort = cmd;
3055 scst_abort_cmd(cmd, mcmd, 0, 1);
3056 res = scst_set_mcmd_next_state(mcmd);
3057 mcmd->cmd_to_abort = NULL; /* just in case */
3059 spin_unlock_irq(&scst_list_lock);
3063 rc = scst_mgmt_translate_lun(mcmd);
3065 PRINT_ERROR_PR("Corresponding device for lun %Ld not "
3066 "found", (uint64_t)mcmd->lun);
3067 mcmd->status = SCST_MGMT_STATUS_FAILED;
3068 mcmd->state = SCST_MGMT_CMD_STATE_DONE;
3070 mcmd->state = SCST_MGMT_CMD_STATE_READY;
3076 TRACE_EXIT_RES(res);
3080 /* Returns 0 if the command processing should be continued, <0 otherwise */
3081 static int scst_target_reset(struct scst_mgmt_cmd *mcmd)
3084 struct scst_device *dev, *d;
3085 struct scst_tgt_dev *tgt_dev;
3087 LIST_HEAD(host_devs);
3091 TRACE(TRACE_MGMT, "Target reset (mcmd %p, cmd count %d)",
3092 mcmd, mcmd->sess->sess_cmd_count);
3096 list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
3099 spin_lock_bh(&dev->dev_lock);
3100 __scst_block_dev(dev);
3101 scst_process_reset(dev, mcmd->sess, NULL, mcmd);
3102 spin_unlock_bh(&dev->dev_lock);
3106 list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
3107 dev_tgt_dev_list_entry)
3110 rc = scst_call_dev_task_mgmt_fn(mcmd, tgt_dev, 0);
3111 if (rc == SCST_DEV_TM_NOT_COMPLETED)
3113 else if (rc == SCST_DEV_TM_COMPLETED_FAILED)
3114 mcmd->status = SCST_MGMT_STATUS_FAILED;
3119 if (dev->scsi_dev == NULL)
3122 list_for_each_entry(d, &host_devs, reset_dev_list_entry) {
3123 if (dev->scsi_dev->host->host_no ==
3124 d->scsi_dev->host->host_no)
3131 list_add_tail(&dev->reset_dev_list_entry, &host_devs);
3135 * We suppose here that for all commands that already on devices
3136 * on/after scsi_reset_provider() completion callbacks will be called.
3139 list_for_each_entry(dev, &host_devs, reset_dev_list_entry) {
3140 /* dev->scsi_dev must be non-NULL here */
3141 TRACE(TRACE_MGMT, "Resetting host %d bus ",
3142 dev->scsi_dev->host->host_no);
3143 rc = scsi_reset_provider(dev->scsi_dev, SCSI_TRY_RESET_BUS);
3144 TRACE(TRACE_MGMT, "Result of host %d bus reset: %s",
3145 dev->scsi_dev->host->host_no,
3146 (rc == SUCCESS) ? "SUCCESS" : "FAILED");
3147 if (rc != SUCCESS) {
3148 /* SCSI_TRY_RESET_BUS is also done by scsi_reset_provider() */
3149 mcmd->status = SCST_MGMT_STATUS_FAILED;
3153 list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
3154 if (dev->scsi_dev != NULL)
3155 dev->scsi_dev->was_reset = 0;
3160 spin_lock_irq(&scst_list_lock);
3161 tm_dbg_task_mgmt("TARGET RESET");
3162 res = scst_set_mcmd_next_state(mcmd);
3163 spin_unlock_irq(&scst_list_lock);
3165 TRACE_EXIT_RES(res);
3169 /* Returns 0 if the command processing should be continued, <0 otherwise */
3170 static int scst_lun_reset(struct scst_mgmt_cmd *mcmd)
3173 struct scst_tgt_dev *tgt_dev = mcmd->mcmd_tgt_dev;
3174 struct scst_device *dev = tgt_dev->acg_dev->dev;
3178 TRACE(TRACE_MGMT, "Resetting lun %d (mcmd %p)", tgt_dev->acg_dev->lun,
3181 spin_lock_bh(&dev->dev_lock);
3182 __scst_block_dev(dev);
3183 scst_process_reset(dev, mcmd->sess, NULL, mcmd);
3184 spin_unlock_bh(&dev->dev_lock);
3186 rc = scst_call_dev_task_mgmt_fn(mcmd, tgt_dev, 1);
3187 if (rc != SCST_DEV_TM_NOT_COMPLETED)
3190 if (dev->scsi_dev != NULL) {
3191 TRACE(TRACE_MGMT, "Resetting host %d bus ",
3192 dev->scsi_dev->host->host_no);
3193 rc = scsi_reset_provider(dev->scsi_dev, SCSI_TRY_RESET_DEVICE);
3195 mcmd->status = SCST_MGMT_STATUS_FAILED;
3196 dev->scsi_dev->was_reset = 0;
3200 spin_lock_irq(&scst_list_lock);
3201 tm_dbg_task_mgmt("LUN RESET");
3202 res = scst_set_mcmd_next_state(mcmd);
3203 spin_unlock_irq(&scst_list_lock);
3205 TRACE_EXIT_RES(res);
3209 /* Returns 0 if the command processing should be continued, <0 otherwise */
3210 static int scst_abort_all_nexus_loss_sess(struct scst_mgmt_cmd *mcmd,
3214 struct scst_session *sess = mcmd->sess;
3215 struct scst_tgt_dev *tgt_dev;
3220 TRACE(TRACE_MGMT, "Nexus loss for sess %p (mcmd %p)", sess,
3223 TRACE(TRACE_MGMT, "Aborting all from sess %p (mcmd %p)", sess,
3228 list_for_each_entry(tgt_dev, &sess->sess_tgt_dev_list,
3229 sess_tgt_dev_list_entry)
3231 struct scst_device *dev = tgt_dev->acg_dev->dev;
3234 spin_lock_bh(&dev->dev_lock);
3235 __scst_block_dev(dev);
3236 spin_unlock_bh(&dev->dev_lock);
3238 rc = scst_call_dev_task_mgmt_fn(mcmd, tgt_dev, 0);
3239 if (rc == SCST_DEV_TM_COMPLETED_FAILED)
3240 mcmd->status = SCST_MGMT_STATUS_FAILED;
3242 __scst_abort_task_set(mcmd, tgt_dev, !nexus_loss, 1);
3244 scst_reset_tgt_dev(tgt_dev, 1);
3248 spin_lock_irq(&scst_list_lock);
3249 res = scst_set_mcmd_next_state(mcmd);
3250 spin_unlock_irq(&scst_list_lock);
3252 TRACE_EXIT_RES(res);
3256 /* Returns 0 if the command processing should be continued, <0 otherwise */
3257 static int scst_abort_all_nexus_loss_tgt(struct scst_mgmt_cmd *mcmd,
3261 struct scst_tgt *tgt = mcmd->sess->tgt;
3262 struct scst_session *sess;
3263 struct scst_device *dev;
3264 struct scst_tgt_dev *tgt_dev;
3269 TRACE(TRACE_MGMT, "I_T Nexus loss (tgt %p, mcmd %p)", tgt,
3272 TRACE(TRACE_MGMT, "Aborting all from tgt %p (mcmd %p)", tgt,
3278 list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
3279 spin_lock_bh(&dev->dev_lock);
3280 __scst_block_dev(dev);
3281 spin_unlock_bh(&dev->dev_lock);
3284 list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) {
3285 list_for_each_entry(tgt_dev, &sess->sess_tgt_dev_list,
3286 sess_tgt_dev_list_entry)
3290 rc = scst_call_dev_task_mgmt_fn(mcmd, tgt_dev, 0);
3291 if (rc == SCST_DEV_TM_COMPLETED_FAILED)
3292 mcmd->status = SCST_MGMT_STATUS_FAILED;
3294 __scst_abort_task_set(mcmd, tgt_dev, !nexus_loss, 1);
3296 scst_reset_tgt_dev(tgt_dev, 1);
3302 spin_lock_irq(&scst_list_lock);
3303 res = scst_set_mcmd_next_state(mcmd);
3304 spin_unlock_irq(&scst_list_lock);
3306 TRACE_EXIT_RES(res);
3310 /* Returns 0 if the command processing should be continued, <0 otherwise */
3311 static int scst_mgmt_cmd_exec(struct scst_mgmt_cmd *mcmd)
3317 mcmd->status = SCST_MGMT_STATUS_SUCCESS;
3320 case SCST_ABORT_TASK_SET:
3321 case SCST_CLEAR_TASK_SET:
3322 res = scst_abort_task_set(mcmd);
3325 case SCST_LUN_RESET:
3326 res = scst_lun_reset(mcmd);
3329 case SCST_TARGET_RESET:
3330 res = scst_target_reset(mcmd);
3333 case SCST_ABORT_ALL_TASKS_SESS:
3334 res = scst_abort_all_nexus_loss_sess(mcmd, 0);
3337 case SCST_NEXUS_LOSS_SESS:
3338 res = scst_abort_all_nexus_loss_sess(mcmd, 1);
3341 case SCST_ABORT_ALL_TASKS:
3342 res = scst_abort_all_nexus_loss_tgt(mcmd, 0);
3345 case SCST_NEXUS_LOSS:
3346 res = scst_abort_all_nexus_loss_tgt(mcmd, 1);
3349 case SCST_CLEAR_ACA:
3350 scst_call_dev_task_mgmt_fn(mcmd, mcmd->mcmd_tgt_dev, 1);
3351 /* Nothing to do (yet) */
3355 PRINT_ERROR_PR("Unknown task management function %d", mcmd->fn);
3356 mcmd->status = SCST_MGMT_STATUS_FAILED;
3360 TRACE_EXIT_RES(res);
3364 static void scst_mgmt_cmd_send_done(struct scst_mgmt_cmd *mcmd)
3366 struct scst_device *dev;
3367 struct scst_tgt_dev *tgt_dev;
3371 clear_bit(SCST_FLAG_TM_ACTIVE, &scst_flags);
3372 if (!list_empty(&scst_delayed_mgmt_cmd_list)) {
3373 struct scst_mgmt_cmd *m;
3374 spin_lock_irq(&scst_list_lock);
3375 m = list_entry(scst_delayed_mgmt_cmd_list.next, typeof(*m),
3376 mgmt_cmd_list_entry);
3377 TRACE_MGMT_DBG("Moving delayed mgmt cmd %p to active mgmt "
3379 list_move(&m->mgmt_cmd_list_entry, &scst_active_mgmt_cmd_list);
3380 spin_unlock_irq(&scst_list_lock);
3383 mcmd->state = SCST_MGMT_CMD_STATE_FINISHED;
3384 if (scst_is_strict_mgmt_fn(mcmd->fn) && (mcmd->completed_cmd_count > 0))
3385 mcmd->status = SCST_MGMT_STATUS_FAILED;
3387 if (mcmd->sess->tgt->tgtt->task_mgmt_fn_done) {
3388 TRACE_DBG("Calling target %s task_mgmt_fn_done()",
3389 mcmd->sess->tgt->tgtt->name);
3390 mcmd->sess->tgt->tgtt->task_mgmt_fn_done(mcmd);
3391 TRACE_MGMT_DBG("Dev handler %s task_mgmt_fn_done() returned",
3392 mcmd->sess->tgt->tgtt->name);
3396 case SCST_ABORT_TASK_SET:
3397 case SCST_CLEAR_TASK_SET:
3398 case SCST_LUN_RESET:
3399 scst_unblock_dev(mcmd->mcmd_tgt_dev->acg_dev->dev);
3402 case SCST_TARGET_RESET:
3403 case SCST_ABORT_ALL_TASKS:
3404 case SCST_NEXUS_LOSS:
3406 list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
3407 scst_unblock_dev(dev);
3412 case SCST_NEXUS_LOSS_SESS:
3413 case SCST_ABORT_ALL_TASKS_SESS:
3415 list_for_each_entry(tgt_dev, &mcmd->sess->sess_tgt_dev_list,
3416 sess_tgt_dev_list_entry) {
3417 scst_unblock_dev(tgt_dev->acg_dev->dev);
3422 case SCST_CLEAR_ACA:
3427 mcmd->tgt_priv = NULL;
3433 /* Returns >0, if cmd should be requeued */
3434 static int scst_process_mgmt_cmd(struct scst_mgmt_cmd *mcmd)
3440 TRACE_DBG("mcmd %p, state %d", mcmd, mcmd->state);
3443 switch (mcmd->state) {
3444 case SCST_MGMT_CMD_STATE_INIT:
3445 res = scst_mgmt_cmd_init(mcmd);
3450 case SCST_MGMT_CMD_STATE_READY:
3451 if (scst_mgmt_cmd_exec(mcmd))
3455 case SCST_MGMT_CMD_STATE_DONE:
3456 scst_mgmt_cmd_send_done(mcmd);
3459 case SCST_MGMT_CMD_STATE_FINISHED:
3463 case SCST_MGMT_CMD_STATE_EXECUTING:
3468 PRINT_ERROR_PR("Unknown state %d of management command",
3476 TRACE_EXIT_RES(res);
3480 scst_free_mgmt_cmd(mcmd, 1);
3484 static inline int test_mgmt_cmd_list(void)
3486 int res = (!list_empty(&scst_active_mgmt_cmd_list) &&
3487 !test_bit(SCST_FLAG_SUSPENDED, &scst_flags)) ||
3488 test_bit(SCST_FLAG_SHUTDOWN, &scst_flags);
3492 int scst_mgmt_cmd_thread(void *arg)
3494 struct scst_mgmt_cmd *mcmd;
3498 daemonize("scsi_tgt_mc");
3499 recalc_sigpending();
3500 current->flags |= PF_NOFREEZE;
3502 spin_lock_irq(&scst_list_lock);
3505 init_waitqueue_entry(&wait, current);
3507 if (!test_mgmt_cmd_list()) {
3508 add_wait_queue_exclusive(&scst_mgmt_cmd_list_waitQ,
3511 set_current_state(TASK_INTERRUPTIBLE);
3512 if (test_mgmt_cmd_list())
3514 spin_unlock_irq(&scst_list_lock);
3516 spin_lock_irq(&scst_list_lock);
3518 set_current_state(TASK_RUNNING);
3519 remove_wait_queue(&scst_mgmt_cmd_list_waitQ, &wait);
3522 while (!list_empty(&scst_active_mgmt_cmd_list) &&
3523 !test_bit(SCST_FLAG_SUSPENDED, &scst_flags))
3526 mcmd = list_entry(scst_active_mgmt_cmd_list.next,
3527 typeof(*mcmd), mgmt_cmd_list_entry);
3528 TRACE_MGMT_DBG("Moving mgmt cmd %p to mgmt cmd list",
3530 list_move_tail(&mcmd->mgmt_cmd_list_entry,
3531 &scst_mgmt_cmd_list);
3532 spin_unlock_irq(&scst_list_lock);
3533 rc = scst_process_mgmt_cmd(mcmd);
3534 spin_lock_irq(&scst_list_lock);
3536 TRACE_MGMT_DBG("Moving mgmt cmd %p to head "
3537 "of active mgmt cmd list", mcmd);
3538 list_move(&mcmd->mgmt_cmd_list_entry,
3539 &scst_active_mgmt_cmd_list);
3543 if (test_bit(SCST_FLAG_SHUTDOWN, &scst_flags) &&
3544 list_empty(&scst_active_mgmt_cmd_list))
3549 spin_unlock_irq(&scst_list_lock);
3551 if (atomic_dec_and_test(&scst_threads_count) && scst_shutdown_mutex) {
3552 smp_mb__after_atomic_dec();
3553 TRACE_DBG("%s", "Releasing scst_shutdown_mutex");
3554 up(scst_shutdown_mutex);
3561 static struct scst_mgmt_cmd *scst_pre_rx_mgmt_cmd(struct scst_session
3562 *sess, int fn, int atomic, void *tgt_priv)
3564 struct scst_mgmt_cmd *mcmd = NULL;
3568 if (unlikely(sess->tgt->tgtt->task_mgmt_fn_done == NULL)) {
3569 PRINT_ERROR_PR("New mgmt cmd, but task_mgmt_fn_done() is NULL "
3570 "(target %s)", sess->tgt->tgtt->name);
3574 mcmd = scst_alloc_mgmt_cmd(atomic ? GFP_ATOMIC : GFP_KERNEL);
3580 mcmd->state = SCST_MGMT_CMD_STATE_INIT;
3581 mcmd->tgt_priv = tgt_priv;
3588 static int scst_post_rx_mgmt_cmd(struct scst_session *sess,
3589 struct scst_mgmt_cmd *mcmd)
3591 unsigned long flags;
3596 scst_sess_get(sess);
3598 spin_lock_irqsave(&scst_list_lock, flags);
3600 sess->sess_cmd_count++;
3603 if (unlikely(sess->shutting_down)) {
3604 PRINT_ERROR_PR("%s",
3605 "New mgmt cmd while shutting down the session");
3610 if (unlikely(sess->init_phase != SCST_SESS_IPH_READY)) {
3611 switch(sess->init_phase) {
3612 case SCST_SESS_IPH_INITING:
3613 TRACE_DBG("Adding mcmd %p to init deferred mcmd list",
3615 list_add_tail(&mcmd->mgmt_cmd_list_entry,
3616 &sess->init_deferred_mcmd_list);
3618 case SCST_SESS_IPH_SUCCESS:
3620 case SCST_SESS_IPH_FAILED:
3628 TRACE_MGMT_DBG("Adding mgmt cmd %p to active mgmt cmd list", mcmd);
3629 list_add_tail(&mcmd->mgmt_cmd_list_entry, &scst_active_mgmt_cmd_list);
3631 spin_unlock_irqrestore(&scst_list_lock, flags);
3633 wake_up(&scst_mgmt_cmd_list_waitQ);