4 * Copyright (C) 2004-2006 Vladislav Bolkhovitin <vst@vlnb.net>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation, version 2
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
18 #include <linux/init.h>
19 #include <linux/kernel.h>
20 #include <linux/errno.h>
21 #include <linux/list.h>
22 #include <linux/spinlock.h>
23 #include <linux/slab.h>
24 #include <linux/sched.h>
25 #include <linux/smp_lock.h>
26 #include <asm/unistd.h>
27 #include <asm/string.h>
29 #include "scst_debug.h"
31 #include "scst_priv.h"
33 static int scst_do_job_init(struct list_head *init_cmd_list);
35 static int __scst_process_active_cmd(struct scst_cmd *cmd, int context,
38 static void scst_complete_cmd_mgmt(struct scst_cmd *cmd,
39 struct scst_mgmt_cmd *mcmd);
41 /* scst_list_lock assumed to be held */
42 static inline int scst_process_active_cmd(struct scst_cmd *cmd, int context,
43 unsigned long *pflags, int left_locked)
49 TRACE_DBG("Moving cmd %p to cmd list", cmd);
50 list_move_tail(&cmd->cmd_list_entry, &scst_cmd_list);
52 /* This is an inline func., so unneeded code will be optimized out */
54 spin_unlock_irqrestore(&scst_list_lock, *pflags);
56 spin_unlock_irq(&scst_list_lock);
58 res = __scst_process_active_cmd(cmd, context, left_locked);
64 static inline void scst_schedule_tasklet(void)
66 struct tasklet_struct *t = &scst_tasklets[smp_processor_id()];
68 #if 0 /* Looks like #else is better for performance */
69 if ((!test_bit(TASKLET_STATE_SCHED, &t->state)) || (scst_num_cpus == 1))
73 * We suppose that other CPU(s) are rather idle, so we
74 * ask one of them to help
76 TRACE_DBG("Tasklet on CPU %d busy, waking up the thread "
77 "instead", smp_processor_id());
78 wake_up(&scst_list_waitQ);
86 * Must not been called in parallel with scst_unregister_session() for the
89 struct scst_cmd *scst_rx_cmd(struct scst_session *sess,
90 const uint8_t *lun, int lun_len,
91 const uint8_t *cdb, int cdb_len, int atomic)
98 if (unlikely(sess->shutting_down)) {
99 PRINT_ERROR_PR("%s", "New cmd while shutting down the session");
104 cmd = scst_alloc_cmd(atomic ? GFP_ATOMIC : GFP_KERNEL);
109 cmd->tgt = sess->tgt;
110 cmd->tgtt = sess->tgt->tgtt;
111 cmd->state = SCST_CMD_STATE_INIT_WAIT;
114 * For both wrong lun and CDB defer the error reporting for
115 * scst_cmd_init_done()
118 cmd->lun = scst_unpack_lun(lun, lun_len);
120 if (cdb_len <= MAX_COMMAND_SIZE) {
121 memcpy(cmd->cdb, cdb, cdb_len);
122 cmd->cdb_len = cdb_len;
125 TRACE_DBG("cmd %p, sess %p", cmd, sess);
133 void scst_cmd_init_done(struct scst_cmd *cmd, int pref_context)
136 unsigned long flags = 0;
137 struct scst_session *sess = cmd->sess;
141 TRACE_DBG("Preferred context: %d (cmd %p)", pref_context, cmd);
142 TRACE(TRACE_SCSI, "tag=%d, lun=%Ld, CDB len=%d", cmd->tag,
143 (uint64_t)cmd->lun, cmd->cdb_len);
144 TRACE_BUFF_FLAG(TRACE_SCSI|TRACE_RECV_BOT, "Recieving CDB",
145 cmd->cdb, cmd->cdb_len);
147 if (unlikely(in_irq()) && ((pref_context == SCST_CONTEXT_DIRECT) ||
148 (pref_context == SCST_CONTEXT_DIRECT_ATOMIC)))
150 PRINT_ERROR_PR("Wrong context %d in IRQ from target %s, use "
151 "SCST_CONTEXT_TASKLET instead\n", pref_context,
153 pref_context = SCST_CONTEXT_TASKLET;
156 spin_lock_irqsave(&scst_list_lock, flags);
158 /* Let's make it here, this will save us a lock or atomic */
159 sess->sess_cmd_count++;
161 list_add_tail(&cmd->search_cmd_list_entry, &sess->search_cmd_list);
163 if (unlikely(sess->init_phase != SCST_SESS_IPH_READY)) {
164 switch(sess->init_phase) {
165 case SCST_SESS_IPH_SUCCESS:
167 case SCST_SESS_IPH_INITING:
168 TRACE_DBG("Adding cmd %p to init deferred cmd list", cmd);
169 list_add_tail(&cmd->cmd_list_entry,
170 &sess->init_deferred_cmd_list);
171 goto out_unlock_flags;
172 case SCST_SESS_IPH_FAILED:
174 cmd->state = SCST_CMD_STATE_XMIT_RESP;
175 TRACE_DBG("Adding cmd %p to active cmd list", cmd);
176 list_add_tail(&cmd->cmd_list_entry,
177 &scst_active_cmd_list);
184 if (unlikely(cmd->lun == (lun_t)-1)) {
185 PRINT_ERROR("Wrong LUN %d, finishing cmd", -1);
186 scst_set_cmd_error(cmd,
187 SCST_LOAD_SENSE(scst_sense_lun_not_supported));
188 cmd->state = SCST_CMD_STATE_XMIT_RESP;
189 TRACE_DBG("Moving cmd %p to active cmd list", cmd);
190 list_add_tail(&cmd->cmd_list_entry, &scst_active_cmd_list);
194 if (unlikely(cmd->cdb_len == 0)) {
195 PRINT_ERROR("Wrong CDB len %d, finishing cmd", 0);
196 scst_set_cmd_error(cmd,
197 SCST_LOAD_SENSE(scst_sense_invalid_opcode));
198 cmd->state = SCST_CMD_STATE_XMIT_RESP;
199 TRACE_DBG("Adding cmd %p to active cmd list", cmd);
200 list_add_tail(&cmd->cmd_list_entry, &scst_active_cmd_list);
204 cmd->state = SCST_CMD_STATE_INIT;
206 TRACE_DBG("Moving cmd %p to init cmd list", cmd);
207 list_add_tail(&cmd->cmd_list_entry, &scst_init_cmd_list);
209 switch (pref_context) {
210 case SCST_CONTEXT_DIRECT:
211 case SCST_CONTEXT_DIRECT_ATOMIC:
212 res = scst_do_job_init(&scst_init_cmd_list);
214 goto out_unlock_flags;
217 case SCST_CONTEXT_THREAD:
218 goto out_thread_unlock_flags;
220 case SCST_CONTEXT_TASKLET:
221 scst_schedule_tasklet();
222 goto out_unlock_flags;
225 PRINT_ERROR_PR("Context %x is undefined, using thread one",
227 goto out_thread_unlock_flags;
231 switch (pref_context) {
232 case SCST_CONTEXT_DIRECT:
233 case SCST_CONTEXT_DIRECT_ATOMIC:
234 scst_process_active_cmd(cmd, pref_context, &flags, 0);
237 case SCST_CONTEXT_THREAD:
238 goto out_thread_unlock_flags;
240 case SCST_CONTEXT_TASKLET:
241 scst_schedule_tasklet();
242 goto out_unlock_flags;
245 PRINT_ERROR_PR("Context %x is undefined, using thread one",
247 goto out_thread_unlock_flags;
255 spin_unlock_irqrestore(&scst_list_lock, flags);
258 out_thread_unlock_flags:
259 cmd->non_atomic_only = 1;
260 spin_unlock_irqrestore(&scst_list_lock, flags);
261 wake_up(&scst_list_waitQ);
265 static int scst_parse_cmd(struct scst_cmd *cmd)
267 int res = SCST_CMD_STATE_RES_CONT_SAME;
269 struct scst_tgt_dev *tgt_dev_saved = cmd->tgt_dev;
270 struct scst_device *dev = cmd->dev;
271 struct scst_info_cdb cdb_info;
272 int atomic = scst_cmd_atomic(cmd);
277 if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
278 TRACE_DBG("ABORTED set, returning ABORTED "
283 if (atomic && !dev->handler->parse_atomic) {
284 TRACE_DBG("Dev handler %s parse() can not be "
285 "called in atomic context, rescheduling to the thread",
287 res = SCST_CMD_STATE_RES_NEED_THREAD;
292 * Expected transfer data supplied by the SCSI transport via the
293 * target driver are untrusted, so we prefer to fetch them from CDB.
294 * Additionally, not all transports support supplying the expected
298 if (unlikely(scst_get_cdb_info(cmd->cdb, dev->handler->type,
304 PRINT_INFO_PR("Unknown opcode 0x%02x for %s. "
305 "Should you update scst_scsi_op_table?",
306 cmd->cdb[0], dev->handler->name);
308 if (scst_cmd_is_expected_set(cmd)) {
309 TRACE(TRACE_MINOR, "Using initiator supplied values: "
310 "direction %d, transfer_len %d",
311 cmd->expected_data_direction,
312 cmd->expected_transfer_len);
313 cmd->data_direction = cmd->expected_data_direction;
314 cmd->bufflen = cmd->expected_transfer_len;
315 /* Restore (most probably) lost CDB length */
316 cmd->cdb_len = scst_get_cdb_len(cmd->cdb);
317 if (cmd->cdb_len == -1) {
318 PRINT_ERROR_PR("Unable to get CDB length for "
319 "opcode 0x%02x. Returning INVALID "
320 "OPCODE", cmd->cdb[0]);
321 scst_set_cmd_error(cmd,
322 SCST_LOAD_SENSE(scst_sense_invalid_opcode));
327 PRINT_ERROR_PR("Unknown opcode 0x%02x for %s and "
328 "target %s not supplied expected values. "
329 "Returning INVALID OPCODE.", cmd->cdb[0],
330 dev->handler->name, cmd->tgtt->name);
331 scst_set_cmd_error(cmd,
332 SCST_LOAD_SENSE(scst_sense_invalid_opcode));
336 TRACE(TRACE_SCSI, "op_name <%s>, direction=%d (expected %d, "
337 "set %s), transfer_len=%d (expected len %d), flags=%d",
338 cdb_info.op_name, cdb_info.direction,
339 cmd->expected_data_direction,
340 scst_cmd_is_expected_set(cmd) ? "yes" : "no",
341 cdb_info.transfer_len, cmd->expected_transfer_len,
344 /* Restore (most probably) lost CDB length */
345 cmd->cdb_len = cdb_info.cdb_len;
347 cmd->data_direction = cdb_info.direction;
348 if (!(cdb_info.flags & SCST_UNKNOWN_LENGTH))
349 cmd->bufflen = cdb_info.transfer_len;
350 /* else cmd->bufflen remained as it was inited in 0 */
353 if (unlikely(cmd->cdb[cmd->cdb_len - 1] & CONTROL_BYTE_NACA_BIT)) {
354 PRINT_ERROR_PR("NACA bit in control byte CDB is not supported "
355 "(opcode 0x%02x)", cmd->cdb[0]);
356 scst_set_cmd_error(cmd,
357 SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
361 if (unlikely(cmd->cdb[cmd->cdb_len - 1] & CONTROL_BYTE_LINK_BIT)) {
362 PRINT_ERROR_PR("Linked commands are not supported "
363 "(opcode 0x%02x)", cmd->cdb[0]);
364 scst_set_cmd_error(cmd,
365 SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
369 if (likely(!scst_is_cmd_local(cmd))) {
370 TRACE_DBG("Calling dev handler %s parse(%p)",
371 dev->handler->name, cmd);
372 TRACE_BUFF_FLAG(TRACE_SEND_BOT, "Parsing: ", cmd->cdb, cmd->cdb_len);
373 state = dev->handler->parse(cmd, &cdb_info);
374 TRACE_DBG("Dev handler %s parse() returned %d",
375 dev->handler->name, state);
377 if (cmd->data_len == -1)
378 cmd->data_len = cmd->bufflen;
380 if (state == SCST_CMD_STATE_DEFAULT)
381 state = SCST_CMD_STATE_PREPARE_SPACE;
384 state = SCST_CMD_STATE_PREPARE_SPACE;
387 if (state != SCST_CMD_STATE_NEED_THREAD_CTX) {
388 if (((cmd->data_direction == SCST_DATA_UNKNOWN) &&
389 (state != SCST_CMD_STATE_DEV_PARSE)) ||
390 ((cmd->bufflen != 0) &&
391 (cmd->data_direction == SCST_DATA_NONE)) ||
392 ((cmd->bufflen == 0) &&
393 (cmd->data_direction != SCST_DATA_NONE)) ||
394 ((cmd->bufflen != 0) && (cmd->sg == NULL) &&
395 (state > SCST_CMD_STATE_PREPARE_SPACE)))
397 PRINT_ERROR_PR("Dev handler %s parse() returned "
398 "invalid cmd data_direction %d, "
399 "bufflen %zd or state %d (opcode 0x%x)",
401 cmd->data_direction, cmd->bufflen,
409 case SCST_CMD_STATE_PREPARE_SPACE:
410 case SCST_CMD_STATE_DEV_PARSE:
411 case SCST_CMD_STATE_RDY_TO_XFER:
412 case SCST_CMD_STATE_SEND_TO_MIDLEV:
413 case SCST_CMD_STATE_DEV_DONE:
414 case SCST_CMD_STATE_XMIT_RESP:
415 case SCST_CMD_STATE_FINISHED:
417 res = SCST_CMD_STATE_RES_CONT_SAME;
420 case SCST_CMD_STATE_REINIT:
421 cmd->tgt_dev_saved = tgt_dev_saved;
423 res = SCST_CMD_STATE_RES_RESTART;
427 case SCST_CMD_STATE_NEED_THREAD_CTX:
428 TRACE_DBG("Dev handler %s parse() requested thread "
429 "context, rescheduling", dev->handler->name);
430 res = SCST_CMD_STATE_RES_NEED_THREAD;
436 PRINT_ERROR_PR("Dev handler %s parse() returned "
437 "invalid cmd state %d (opcode %d)",
438 dev->handler->name, state, cmd->cdb[0]);
440 PRINT_ERROR_PR("Dev handler %s parse() returned "
441 "error %d (opcode %d)", dev->handler->name,
447 if ((cmd->resp_data_len == -1) && set_dir) {
448 if (cmd->data_direction == SCST_DATA_READ)
449 cmd->resp_data_len = cmd->bufflen;
451 cmd->resp_data_len = 0;
455 TRACE_EXIT_HRES(res);
459 /* dev_done() will be called as part of the regular cmd's finish */
460 scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
461 cmd->state = SCST_CMD_STATE_DEV_DONE;
462 res = SCST_CMD_STATE_RES_CONT_SAME;
466 cmd->state = SCST_CMD_STATE_XMIT_RESP;
467 res = SCST_CMD_STATE_RES_CONT_SAME;
471 void scst_cmd_mem_work_fn(void *p)
475 spin_lock_bh(&scst_cmd_mem_lock);
477 scst_cur_max_cmd_mem += (scst_cur_max_cmd_mem >> 3);
478 if (scst_cur_max_cmd_mem < scst_max_cmd_mem) {
479 TRACE_MGMT_DBG("%s", "Schedule cmd_mem_work");
480 schedule_delayed_work(&scst_cmd_mem_work, SCST_CMD_MEM_TIMEOUT);
482 scst_cur_max_cmd_mem = scst_max_cmd_mem;
483 clear_bit(SCST_FLAG_CMD_MEM_WORK_SCHEDULED, &scst_flags);
485 TRACE_MGMT_DBG("New max cmd mem %ld Mb", scst_cur_max_cmd_mem >> 20);
487 spin_unlock_bh(&scst_cmd_mem_lock);
493 int scst_check_mem(struct scst_cmd *cmd)
499 if (cmd->mem_checked)
502 spin_lock_bh(&scst_cmd_mem_lock);
504 scst_cur_cmd_mem += cmd->bufflen;
505 cmd->mem_checked = 1;
506 if (likely(scst_cur_cmd_mem <= scst_cur_max_cmd_mem))
509 TRACE(TRACE_OUT_OF_MEM, "Total memory allocated by commands (%ld Kb) "
510 "is too big, returning QUEUE FULL to initiator \"%s\" (maximum "
511 "allowed %ld Kb)", scst_cur_cmd_mem >> 10,
512 (cmd->sess->initiator_name[0] == '\0') ?
513 "Anonymous" : cmd->sess->initiator_name,
514 scst_cur_max_cmd_mem >> 10);
516 scst_cur_cmd_mem -= cmd->bufflen;
517 cmd->mem_checked = 0;
519 cmd->state = SCST_CMD_STATE_XMIT_RESP;
523 spin_unlock_bh(&scst_cmd_mem_lock);
530 static void scst_low_cur_max_cmd_mem(void)
534 if (test_bit(SCST_FLAG_CMD_MEM_WORK_SCHEDULED, &scst_flags)) {
535 cancel_delayed_work(&scst_cmd_mem_work);
536 flush_scheduled_work();
537 clear_bit(SCST_FLAG_CMD_MEM_WORK_SCHEDULED, &scst_flags);
540 spin_lock_bh(&scst_cmd_mem_lock);
542 scst_cur_max_cmd_mem = (scst_cur_cmd_mem >> 1) +
543 (scst_cur_cmd_mem >> 2);
544 if (scst_cur_max_cmd_mem < 16*1024*1024)
545 scst_cur_max_cmd_mem = 16*1024*1024;
547 if (!test_bit(SCST_FLAG_CMD_MEM_WORK_SCHEDULED, &scst_flags)) {
548 TRACE_MGMT_DBG("%s", "Schedule cmd_mem_work");
549 schedule_delayed_work(&scst_cmd_mem_work, SCST_CMD_MEM_TIMEOUT);
550 set_bit(SCST_FLAG_CMD_MEM_WORK_SCHEDULED, &scst_flags);
553 spin_unlock_bh(&scst_cmd_mem_lock);
555 TRACE_MGMT_DBG("New max cmd mem %ld Mb", scst_cur_max_cmd_mem >> 20);
561 static int scst_prepare_space(struct scst_cmd *cmd)
563 int r, res = SCST_CMD_STATE_RES_CONT_SAME;
567 if (cmd->data_direction == SCST_DATA_NONE) {
568 cmd->state = SCST_CMD_STATE_SEND_TO_MIDLEV;
572 r = scst_check_mem(cmd);
573 if (unlikely(r != 0))
576 if (cmd->data_buf_tgt_alloc) {
577 TRACE_MEM("%s", "Custom tgt data buf allocation requested");
578 r = cmd->tgtt->alloc_data_buf(cmd);
579 cmd->data_buf_alloced = (r == 0);
581 r = scst_alloc_space(cmd);
584 if (scst_cmd_atomic(cmd)) {
585 TRACE_MEM("%s", "Atomic memory allocation failed, "
586 "rescheduling to the thread");
587 res = SCST_CMD_STATE_RES_NEED_THREAD;
593 switch (cmd->data_direction) {
594 case SCST_DATA_WRITE:
595 cmd->state = SCST_CMD_STATE_RDY_TO_XFER;
599 cmd->state = SCST_CMD_STATE_SEND_TO_MIDLEV;
604 TRACE_EXIT_HRES(res);
608 TRACE(TRACE_OUT_OF_MEM, "Unable to allocate or build requested buffer "
609 "(size %zd), sending BUSY or QUEUE FULL status", cmd->bufflen);
610 scst_low_cur_max_cmd_mem();
612 cmd->state = SCST_CMD_STATE_DEV_DONE;
613 res = SCST_CMD_STATE_RES_CONT_SAME;
618 static int scst_queue_retry_cmd(struct scst_cmd *cmd, int finished_cmds)
620 struct scst_tgt *tgt = cmd->sess->tgt;
626 spin_lock_irqsave(&tgt->tgt_lock, flags);
629 TRACE(TRACE_RETRY, "TGT QUEUE FULL: incrementing retry_cmds %d",
631 if (finished_cmds != atomic_read(&tgt->finished_cmds)) {
632 /* At least one cmd finished, so try again */
634 TRACE(TRACE_RETRY, "TGT QUEUE FULL, direct retry "
635 "(finished_cmds=%d, tgt->finished_cmds=%d, "
636 "retry_cmds=%d)", finished_cmds,
637 atomic_read(&tgt->finished_cmds), tgt->retry_cmds);
642 TRACE(TRACE_RETRY, "Moving cmd %p to retry cmd list", cmd);
643 /* IRQ already off */
644 spin_lock(&scst_list_lock);
645 list_move_tail(&cmd->cmd_list_entry, &tgt->retry_cmd_list);
646 spin_unlock(&scst_list_lock);
648 if (!tgt->retry_timer_active) {
649 tgt->retry_timer.expires = jiffies + SCST_TGT_RETRY_TIMEOUT;
650 add_timer(&tgt->retry_timer);
651 tgt->retry_timer_active = 1;
655 spin_unlock_irqrestore(&tgt->tgt_lock, flags);
661 static int scst_rdy_to_xfer(struct scst_cmd *cmd)
664 int atomic = scst_cmd_atomic(cmd);
668 if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)))
670 TRACE_DBG("ABORTED set, returning ABORTED for "
675 if (atomic && !cmd->tgtt->rdy_to_xfer_atomic) {
676 TRACE_DBG("%s", "rdy_to_xfer() can not be "
677 "called in atomic context, rescheduling to the thread");
678 res = SCST_CMD_STATE_RES_NEED_THREAD;
683 int finished_cmds = atomic_read(&cmd->sess->tgt->finished_cmds);
685 res = SCST_CMD_STATE_RES_CONT_NEXT;
686 cmd->state = SCST_CMD_STATE_DATA_WAIT;
688 TRACE_DBG("Calling rdy_to_xfer(%p)", cmd);
690 if (((scst_random() % 100) == 75))
691 rc = SCST_TGT_RES_QUEUE_FULL;
694 rc = cmd->tgtt->rdy_to_xfer(cmd);
695 TRACE_DBG("rdy_to_xfer() returned %d", rc);
697 if (likely(rc == SCST_TGT_RES_SUCCESS))
700 /* Restore the previous state */
701 cmd->state = SCST_CMD_STATE_RDY_TO_XFER;
704 case SCST_TGT_RES_QUEUE_FULL:
706 if (scst_queue_retry_cmd(cmd, finished_cmds) == 0)
712 case SCST_TGT_RES_NEED_THREAD_CTX:
714 TRACE_DBG("Target driver %s "
715 "rdy_to_xfer() requested thread "
716 "context, rescheduling", cmd->tgtt->name);
717 res = SCST_CMD_STATE_RES_NEED_THREAD;
728 TRACE_EXIT_HRES(res);
732 if (rc == SCST_TGT_RES_FATAL_ERROR) {
733 PRINT_ERROR_PR("Target driver %s rdy_to_xfer() returned "
734 "fatal error", cmd->tgtt->name);
736 PRINT_ERROR_PR("Target driver %s rdy_to_xfer() returned invalid "
737 "value %d", cmd->tgtt->name, rc);
739 scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
742 cmd->state = SCST_CMD_STATE_DEV_DONE;
743 res = SCST_CMD_STATE_RES_CONT_SAME;
747 void scst_rx_data(struct scst_cmd *cmd, int status, int pref_context)
753 TRACE_DBG("Preferred context: %d", pref_context);
754 TRACE(TRACE_SCSI, "tag=%d status=%#x", scst_cmd_get_tag(cmd), status);
755 cmd->non_atomic_only = 0;
757 if (in_irq() && ((pref_context == SCST_CONTEXT_DIRECT) ||
758 (pref_context == SCST_CONTEXT_DIRECT_ATOMIC)))
760 PRINT_ERROR_PR("Wrong context %d in IRQ from target %s, use "
761 "SCST_CONTEXT_TASKLET instead\n", pref_context,
763 pref_context = SCST_CONTEXT_TASKLET;
767 case SCST_RX_STATUS_SUCCESS:
768 cmd->state = SCST_CMD_STATE_SEND_TO_MIDLEV;
771 case SCST_RX_STATUS_ERROR_SENSE_SET:
772 cmd->state = SCST_CMD_STATE_DEV_DONE;
775 case SCST_RX_STATUS_ERROR_FATAL:
776 set_bit(SCST_CMD_NO_RESP, &cmd->cmd_flags);
778 case SCST_RX_STATUS_ERROR:
779 scst_set_cmd_error(cmd,
780 SCST_LOAD_SENSE(scst_sense_hardw_error));
781 cmd->state = SCST_CMD_STATE_DEV_DONE;
785 PRINT_ERROR_PR("scst_rx_data() received unknown status %x",
790 switch (pref_context) {
791 case SCST_CONTEXT_DIRECT:
792 case SCST_CONTEXT_DIRECT_ATOMIC:
793 scst_check_retries(cmd->tgt, 0);
794 __scst_process_active_cmd(cmd, pref_context, 0);
798 PRINT_ERROR_PR("Context %x is undefined, using thread one",
801 case SCST_CONTEXT_THREAD:
802 spin_lock_irqsave(&scst_list_lock, flags);
803 TRACE_DBG("Moving cmd %p to active cmd list", cmd);
804 list_move_tail(&cmd->cmd_list_entry, &scst_active_cmd_list);
805 cmd->non_atomic_only = 1;
806 spin_unlock_irqrestore(&scst_list_lock, flags);
807 scst_check_retries(cmd->tgt, 1);
808 wake_up(&scst_list_waitQ);
811 case SCST_CONTEXT_TASKLET:
812 spin_lock_irqsave(&scst_list_lock, flags);
813 TRACE_DBG("Moving cmd %p to active cmd list", cmd);
814 list_move_tail(&cmd->cmd_list_entry, &scst_active_cmd_list);
815 spin_unlock_irqrestore(&scst_list_lock, flags);
816 scst_schedule_tasklet();
817 scst_check_retries(cmd->tgt, 0);
825 /* No locks supposed to be held */
826 static void scst_check_sense(struct scst_cmd *cmd, const uint8_t *rq_sense,
827 int rq_sense_len, int *next_state)
830 struct scst_device *dev = cmd->dev;
831 int dbl_ua_possible, ua_sent = 0;
835 /* If we had a internal bus reset behind us, set the command error UA */
836 if ((dev->scsi_dev != NULL) &&
837 unlikely(cmd->host_status == DID_RESET) &&
838 scst_is_ua_command(cmd))
840 TRACE(TRACE_MGMT, "DID_RESET: was_reset=%d host_status=%x",
841 dev->scsi_dev->was_reset, cmd->host_status);
842 scst_set_cmd_error(cmd,
843 SCST_LOAD_SENSE(scst_sense_reset_UA));
846 /* It looks like it is safe to clear was_reset here */
847 dev->scsi_dev->was_reset = 0;
851 if (rq_sense != NULL) {
852 sense_valid = SCST_SENSE_VALID(rq_sense);
854 memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer));
856 * We checked that rq_sense_len < sizeof(cmd->sense_buffer)
859 memcpy(cmd->sense_buffer, rq_sense, rq_sense_len);
862 sense_valid = SCST_SENSE_VALID(cmd->sense_buffer);
864 dbl_ua_possible = dev->dev_double_ua_possible;
865 TRACE_DBG("cmd %p dbl_ua_possible %d", cmd, dbl_ua_possible);
866 if (unlikely(dbl_ua_possible)) {
867 spin_lock_bh(&dev->dev_lock);
868 barrier(); /* to reread dev_double_ua_possible */
869 dbl_ua_possible = dev->dev_double_ua_possible;
871 ua_sent = dev->dev_reset_ua_sent;
873 spin_unlock_bh(&dev->dev_lock);
877 TRACE_BUFF_FLAG(TRACE_SCSI, "Sense", cmd->sense_buffer,
878 sizeof(cmd->sense_buffer));
879 /* Check Unit Attention Sense Key */
880 if (cmd->sense_buffer[2] == UNIT_ATTENTION) {
881 if (cmd->sense_buffer[12] == SCST_SENSE_ASC_UA_RESET) {
885 TRACE(TRACE_MGMT, "%s",
886 "Double UA detected");
888 TRACE(TRACE_MGMT, "Retrying cmd %p "
889 "(tag %d)", cmd, cmd->tag);
891 cmd->masked_status = 0;
893 cmd->host_status = DID_OK;
894 cmd->driver_status = 0;
895 memset(cmd->sense_buffer, 0,
896 sizeof(cmd->sense_buffer));
898 *next_state = SCST_CMD_STATE_SEND_TO_MIDLEV;
900 * Dev is still blocked by this cmd, so
901 * it's OK to clear SCST_DEV_SERIALIZED
904 dev->dev_double_ua_possible = 0;
905 dev->dev_serialized = 0;
906 dev->dev_reset_ua_sent = 0;
909 dev->dev_reset_ua_sent = 1;
912 if (cmd->ua_ignore == 0) {
913 if (unlikely(dbl_ua_possible)) {
914 __scst_process_UA(dev, cmd,
916 sizeof(cmd->sense_buffer), 0);
918 scst_process_UA(dev, cmd,
920 sizeof(cmd->sense_buffer), 0);
926 if (unlikely(dbl_ua_possible)) {
927 if (ua_sent && scst_is_ua_command(cmd)) {
928 TRACE_MGMT_DBG("%s", "Clearing dbl_ua_possible flag");
929 dev->dev_double_ua_possible = 0;
930 dev->dev_serialized = 0;
931 dev->dev_reset_ua_sent = 0;
933 spin_unlock_bh(&dev->dev_lock);
941 spin_unlock_bh(&dev->dev_lock);
945 static int scst_check_auto_sense(struct scst_cmd *cmd)
951 if (unlikely(cmd->status == SAM_STAT_CHECK_CONDITION) &&
952 (!SCST_SENSE_VALID(cmd->sense_buffer) ||
953 SCST_NO_SENSE(cmd->sense_buffer)))
955 TRACE(TRACE_SCSI|TRACE_MINOR, "CHECK_CONDITION, but no sense: "
956 "cmd->status=%x, cmd->masked_status=%x, "
957 "cmd->msg_status=%x, cmd->host_status=%x, "
958 "cmd->driver_status=%x", cmd->status, cmd->masked_status,
959 cmd->msg_status, cmd->host_status, cmd->driver_status);
961 } else if (unlikely(cmd->host_status)) {
962 if ((cmd->host_status == DID_REQUEUE) ||
963 (cmd->host_status == DID_IMM_RETRY) ||
964 (cmd->host_status == DID_SOFT_ERROR)) {
967 TRACE(TRACE_SCSI|TRACE_MINOR, "Host status %x "
968 "received, returning HARDWARE ERROR instead",
970 scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
978 static void scst_do_cmd_done(struct scst_cmd *cmd, int result,
979 const uint8_t *rq_sense, int rq_sense_len, int *next_state)
985 cmd->status = result & 0xff;
986 cmd->masked_status = status_byte(result);
987 cmd->msg_status = msg_byte(result);
988 cmd->host_status = host_byte(result);
989 cmd->driver_status = driver_byte(result);
990 TRACE(TRACE_SCSI, "result=%x, cmd->status=%x, "
991 "cmd->masked_status=%x, cmd->msg_status=%x, cmd->host_status=%x, "
992 "cmd->driver_status=%x", result, cmd->status,
993 cmd->masked_status, cmd->msg_status, cmd->host_status,
998 scst_dec_on_dev_cmd(cmd);
1000 type = cmd->dev->handler->type;
1001 if ((cmd->cdb[0] == MODE_SENSE || cmd->cdb[0] == MODE_SENSE_10) &&
1002 cmd->tgt_dev->acg_dev->rd_only_flag &&
1003 (type == TYPE_DISK || type == TYPE_WORM || type == TYPE_MOD ||
1004 type == TYPE_TAPE)) {
1008 length = scst_get_buf_first(cmd, &address);
1009 TRACE_DBG("length %d", length);
1010 if (unlikely(length <= 0)) {
1011 PRINT_ERROR_PR("%s: scst_get_buf_first() failed",
1015 if (length > 2 && cmd->cdb[0] == MODE_SENSE) {
1016 address[2] |= 0x80; /* Write Protect*/
1018 else if (length > 3 && cmd->cdb[0] == MODE_SENSE_10) {
1019 address[3] |= 0x80; /* Write Protect*/
1021 scst_put_buf(cmd, address);
1025 scst_check_sense(cmd, rq_sense, rq_sense_len, next_state);
1031 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
1032 static inline struct scst_cmd *scst_get_cmd(struct scsi_cmnd *scsi_cmd,
1033 struct scsi_request **req)
1035 struct scst_cmd *cmd = NULL;
1037 if (scsi_cmd && (*req = scsi_cmd->sc_request))
1038 cmd = (struct scst_cmd *)(*req)->upper_private_data;
1041 PRINT_ERROR_PR("%s", "Request with NULL cmd");
1043 scsi_release_request(*req);
1049 static void scst_cmd_done(struct scsi_cmnd *scsi_cmd)
1051 struct scsi_request *req = NULL;
1052 struct scst_cmd *cmd;
1060 * We don't use scsi_cmd->resid, because:
1061 * 1. Many low level initiator drivers don't use (set) this field
1062 * 2. We determine the command's buffer size directly from CDB,
1063 * so scsi_cmd->resid is not relevant for us, and target drivers
1064 * should know the residual, if necessary, by comparing expected
1065 * and actual transfer sizes.
1068 cmd = scst_get_cmd(scsi_cmd, &req);
1072 next_state = SCST_CMD_STATE_DEV_DONE;
1073 scst_do_cmd_done(cmd, req->sr_result, req->sr_sense_buffer,
1074 sizeof(req->sr_sense_buffer), &next_state);
1076 /* Clear out request structure */
1078 req->sr_sglist_len = 0;
1079 req->sr_bufflen = 0;
1080 req->sr_buffer = NULL;
1081 req->sr_underflow = 0;
1082 req->sr_request->rq_disk = NULL; /* disown request blk */
1084 cmd->bufflen = req->sr_bufflen; //??
1086 scst_release_request(cmd);
1088 cmd->state = next_state;
1089 cmd->non_atomic_only = 0;
1091 __scst_process_active_cmd(cmd, scst_get_context(), 0);
1097 #else /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) */
1098 static void scst_cmd_done(void *data, char *sense, int result, int resid)
1100 struct scst_cmd *cmd;
1108 * We don't use resid, because:
1109 * 1. Many low level initiator drivers don't use (set) this field
1110 * 2. We determine the command's buffer size directly from CDB,
1111 * so resid is not relevant for us, and target drivers
1112 * should know the residual, if necessary, by comparing expected
1113 * and actual transfer sizes.
1116 cmd = (struct scst_cmd *)data;
1120 next_state = SCST_CMD_STATE_DEV_DONE;
1121 scst_do_cmd_done(cmd, result, sense, SCSI_SENSE_BUFFERSIZE,
1124 cmd->state = next_state;
1125 cmd->non_atomic_only = 0;
1127 __scst_process_active_cmd(cmd, scst_get_context(), 0);
1133 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) */
1135 static void scst_cmd_done_local(struct scst_cmd *cmd, int next_state)
1141 scst_dec_on_dev_cmd(cmd);
1143 if (next_state == SCST_CMD_STATE_DEFAULT)
1144 next_state = SCST_CMD_STATE_DEV_DONE;
1146 if (next_state == SCST_CMD_STATE_DEV_DONE) {
1147 #if defined(DEBUG) || defined(TRACING)
1150 struct scatterlist *sg = cmd->sg;
1151 TRACE(TRACE_RECV_TOP,
1152 "Exec'd %d S/G(s) at %p sg[0].page at %p",
1153 cmd->sg_cnt, sg, (void*)sg[0].page);
1154 for(i = 0; i < cmd->sg_cnt; ++i) {
1155 TRACE_BUFF_FLAG(TRACE_RECV_TOP,
1156 "Exec'd sg:", page_address(sg[i].page),
1165 if ((next_state != SCST_CMD_STATE_DEV_DONE) &&
1166 (next_state != SCST_CMD_STATE_XMIT_RESP) &&
1167 (next_state != SCST_CMD_STATE_FINISHED))
1169 PRINT_ERROR_PR("scst_cmd_done_local() received invalid cmd "
1170 "state %d (opcode %d)", next_state, cmd->cdb[0]);
1171 scst_set_cmd_error(cmd,
1172 SCST_LOAD_SENSE(scst_sense_hardw_error));
1173 next_state = SCST_CMD_STATE_DEV_DONE;
1176 if (scst_check_auto_sense(cmd)) {
1177 PRINT_ERROR_PR("CHECK_CONDITION, but no valid sense for "
1178 "opcode %d", cmd->cdb[0]);
1182 scst_check_sense(cmd, NULL, 0, &next_state);
1184 cmd->state = next_state;
1185 cmd->non_atomic_only = 0;
1187 __scst_process_active_cmd(cmd, scst_get_context(), 0);
1193 static int scst_report_luns_local(struct scst_cmd *cmd)
1195 int res = SCST_EXEC_COMPLETED;
1198 struct scst_tgt_dev *tgt_dev = NULL;
1204 cmd->masked_status = 0;
1205 cmd->msg_status = 0;
1206 cmd->host_status = DID_OK;
1207 cmd->driver_status = 0;
1209 /* ToDo: use full SG buffer, not only the first entry */
1210 buffer_size = scst_get_buf_first(cmd, &buffer);
1211 if (unlikely(buffer_size <= 0))
1214 if (buffer_size < 16) {
1218 memset(buffer, 0, buffer_size);
1220 /* sess->sess_tgt_dev_list is protected by suspended activity */
1221 list_for_each_entry(tgt_dev, &cmd->sess->sess_tgt_dev_list,
1222 sess_tgt_dev_list_entry)
1224 if (8 + 8 * dev_cnt + 2 <= buffer_size) {
1225 buffer[8 + 8 * dev_cnt] = (tgt_dev->acg_dev->lun >> 8) & 0xff;
1226 buffer[8 + 8 * dev_cnt + 1] = tgt_dev->acg_dev->lun & 0xff;
1229 /* Tmp, until ToDo above done */
1230 if (dev_cnt >= ((PAGE_SIZE >> 3) - 2))
1234 /* Set the response header */
1236 buffer[0] = (dev_cnt >> 24) & 0xff;
1237 buffer[1] = (dev_cnt >> 16) & 0xff;
1238 buffer[2] = (dev_cnt >> 8) & 0xff;
1239 buffer[3] = dev_cnt & 0xff;
1243 scst_put_buf(cmd, buffer);
1245 if (buffer_size > dev_cnt)
1246 scst_set_resp_data_len(cmd, dev_cnt);
1251 /* Report the result */
1252 scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1254 TRACE_EXIT_RES(res);
1258 scst_put_buf(cmd, buffer);
1261 scst_set_cmd_error(cmd,
1262 SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
1266 static int scst_pre_select(struct scst_cmd *cmd)
1268 int res = SCST_EXEC_NOT_COMPLETED;
1272 if (scst_cmd_atomic(cmd)) {
1273 res = SCST_EXEC_NEED_THREAD;
1277 scst_block_dev(cmd->dev, 1);
1278 /* Device will be unblocked in scst_done_cmd_check() */
1280 if (test_bit(SCST_TGT_DEV_UA_PENDING, &cmd->tgt_dev->tgt_dev_flags)) {
1281 int rc = scst_set_pending_UA(cmd);
1283 res = SCST_EXEC_COMPLETED;
1285 /* Report the result */
1286 scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1292 TRACE_EXIT_RES(res);
1296 static inline void scst_report_reserved(struct scst_cmd *cmd)
1300 scst_set_cmd_error_status(cmd, SAM_STAT_RESERVATION_CONFLICT);
1302 /* Report the result */
1303 scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1309 static int scst_reserve_local(struct scst_cmd *cmd)
1311 int res = SCST_EXEC_NOT_COMPLETED;
1312 struct scst_device *dev;
1313 struct scst_tgt_dev *tgt_dev_tmp;
1317 if (scst_cmd_atomic(cmd)) {
1318 res = SCST_EXEC_NEED_THREAD;
1322 if ((cmd->cdb[0] == RESERVE_10) && (cmd->cdb[2] & SCST_RES_3RDPTY)) {
1323 PRINT_ERROR_PR("RESERVE_10: 3rdPty RESERVE not implemented "
1324 "(lun=%Ld)", (uint64_t)cmd->lun);
1325 scst_set_cmd_error(cmd,
1326 SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
1328 res = SCST_EXEC_COMPLETED;
1333 scst_block_dev(dev, 1);
1334 /* Device will be unblocked in scst_done_cmd_check() */
1336 spin_lock_bh(&dev->dev_lock);
1338 if (test_bit(SCST_TGT_DEV_RESERVED, &cmd->tgt_dev->tgt_dev_flags)) {
1339 scst_report_reserved(cmd);
1340 /* !! At this point cmd, sess & tgt_dev can be already freed !! */
1341 res = SCST_EXEC_COMPLETED;
1345 list_for_each_entry(tgt_dev_tmp, &dev->dev_tgt_dev_list,
1346 dev_tgt_dev_list_entry)
1348 if (cmd->tgt_dev != tgt_dev_tmp)
1349 set_bit(SCST_TGT_DEV_RESERVED,
1350 &tgt_dev_tmp->tgt_dev_flags);
1352 dev->dev_reserved = 1;
1355 spin_unlock_bh(&dev->dev_lock);
1358 TRACE_EXIT_RES(res);
1362 static int scst_release_local(struct scst_cmd *cmd)
1364 int res = SCST_EXEC_NOT_COMPLETED;
1365 struct scst_tgt_dev *tgt_dev_tmp;
1366 struct scst_device *dev;
1372 scst_block_dev(dev, 1);
1374 TRACE_MGMT_DBG("Blocking cmd %p (tag %d)", cmd, cmd->tag);
1376 spin_lock_bh(&dev->dev_lock);
1379 * The device could be RELEASED behind us, if RESERVING session
1380 * is closed (see scst_free_tgt_dev()), but this actually doesn't
1381 * matter, so use lock and no retest for DEV_RESERVED bits again
1383 if (test_bit(SCST_TGT_DEV_RESERVED, &cmd->tgt_dev->tgt_dev_flags)) {
1384 res = SCST_EXEC_COMPLETED;
1386 cmd->masked_status = 0;
1387 cmd->msg_status = 0;
1388 cmd->host_status = DID_OK;
1389 cmd->driver_status = 0;
1391 list_for_each_entry(tgt_dev_tmp,
1392 &dev->dev_tgt_dev_list,
1393 dev_tgt_dev_list_entry)
1395 clear_bit(SCST_TGT_DEV_RESERVED,
1396 &tgt_dev_tmp->tgt_dev_flags);
1398 dev->dev_reserved = 0;
1401 spin_unlock_bh(&dev->dev_lock);
1403 if (res == SCST_EXEC_COMPLETED) {
1405 /* Report the result */
1406 scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1409 TRACE_EXIT_RES(res);
1414 * The result of cmd execution, if any, should be reported
1415 * via scst_cmd_done_local()
1417 static int scst_pre_exec(struct scst_cmd *cmd)
1419 int res = SCST_EXEC_NOT_COMPLETED, rc;
1420 struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
1424 /* Reserve check before Unit Attention */
1425 if (unlikely(test_bit(SCST_TGT_DEV_RESERVED, &tgt_dev->tgt_dev_flags)) &&
1426 (cmd->cdb[0] != INQUIRY) &&
1427 (cmd->cdb[0] != REPORT_LUNS) &&
1428 (cmd->cdb[0] != RELEASE) &&
1429 (cmd->cdb[0] != RELEASE_10) &&
1430 (cmd->cdb[0] != REPORT_DEVICE_IDENTIFIER) &&
1431 (cmd->cdb[0] != ALLOW_MEDIUM_REMOVAL || (cmd->cdb[4] & 3)) &&
1432 (cmd->cdb[0] != LOG_SENSE) && (cmd->cdb[0] != REQUEST_SENSE))
1434 scst_report_reserved(cmd);
1435 res = SCST_EXEC_COMPLETED;
1439 /* If we had a internal bus reset, set the command error unit attention */
1440 if ((cmd->dev->scsi_dev != NULL) &&
1441 unlikely(cmd->dev->scsi_dev->was_reset) &&
1442 scst_is_ua_command(cmd))
1444 struct scst_device *dev = cmd->dev;
1446 /* Prevent more than 1 cmd to be triggered by was_reset */
1447 spin_lock_bh(&dev->dev_lock);
1448 barrier(); /* to reread was_reset */
1449 if (dev->scsi_dev->was_reset) {
1450 TRACE(TRACE_MGMT, "was_reset is %d", 1);
1451 scst_set_cmd_error(cmd,
1452 SCST_LOAD_SENSE(scst_sense_reset_UA));
1453 /* It looks like it is safe to clear was_reset here */
1454 dev->scsi_dev->was_reset = 0;
1458 spin_unlock_bh(&dev->dev_lock);
1464 if (test_bit(SCST_TGT_DEV_UA_PENDING, &cmd->tgt_dev->tgt_dev_flags) &&
1465 scst_is_ua_command(cmd))
1467 rc = scst_set_pending_UA(cmd);
1472 /* Check READ_ONLY device status */
1473 if (tgt_dev->acg_dev->rd_only_flag &&
1474 (cmd->cdb[0] == WRITE_6 || /* ToDo: full list of the modify cmds */
1475 cmd->cdb[0] == WRITE_10 ||
1476 cmd->cdb[0] == WRITE_12 ||
1477 cmd->cdb[0] == WRITE_16 ||
1478 cmd->cdb[0] == WRITE_VERIFY ||
1479 cmd->cdb[0] == WRITE_VERIFY_12 ||
1480 cmd->cdb[0] == WRITE_VERIFY_16 ||
1481 (cmd->dev->handler->type == TYPE_TAPE &&
1482 (cmd->cdb[0] == ERASE || cmd->cdb[0] == WRITE_FILEMARKS))))
1484 scst_set_cmd_error(cmd,
1485 SCST_LOAD_SENSE(scst_sense_data_protect));
1489 TRACE_EXIT_RES(res);
1493 res = SCST_EXEC_COMPLETED;
1495 /* Report the result */
1496 scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1501 * The result of cmd execution, if any, should be reported
1502 * via scst_cmd_done_local()
1504 static inline int scst_local_exec(struct scst_cmd *cmd)
1506 int res = SCST_EXEC_NOT_COMPLETED;
1511 * Adding new commands here don't forget to update
1512 * scst_is_cmd_local() in scsi_tgt.h, if necessary
1515 switch (cmd->cdb[0]) {
1517 case MODE_SELECT_10:
1519 res = scst_pre_select(cmd);
1523 res = scst_reserve_local(cmd);
1527 res = scst_release_local(cmd);
1530 res = scst_report_luns_local(cmd);
1534 TRACE_EXIT_RES(res);
1538 static int scst_do_send_to_midlev(struct scst_cmd *cmd)
1540 int rc = SCST_EXEC_NOT_COMPLETED;
1544 cmd->sent_to_midlev = 1;
1545 cmd->state = SCST_CMD_STATE_EXECUTING;
1546 cmd->scst_cmd_done = scst_cmd_done_local;
1548 set_bit(SCST_CMD_EXECUTING, &cmd->cmd_flags);
1549 smp_mb__after_set_bit();
1551 if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
1552 TRACE_DBG("ABORTED set, aborting cmd %p", cmd);
1556 rc = scst_pre_exec(cmd);
1557 /* !! At this point cmd, sess & tgt_dev can be already freed !! */
1558 if (rc != SCST_EXEC_NOT_COMPLETED) {
1559 if (rc == SCST_EXEC_COMPLETED)
1561 else if (rc == SCST_EXEC_NEED_THREAD)
1567 rc = scst_local_exec(cmd);
1568 /* !! At this point cmd, sess & tgt_dev can be already freed !! */
1569 if (rc != SCST_EXEC_NOT_COMPLETED) {
1570 if (rc == SCST_EXEC_COMPLETED)
1572 else if (rc == SCST_EXEC_NEED_THREAD)
1578 if (cmd->dev->handler->exec) {
1579 struct scst_device *dev = cmd->dev;
1580 TRACE_DBG("Calling dev handler %s exec(%p)",
1581 dev->handler->name, cmd);
1582 TRACE_BUFF_FLAG(TRACE_SEND_TOP, "Execing: ", cmd->cdb, cmd->cdb_len);
1583 cmd->scst_cmd_done = scst_cmd_done_local;
1584 rc = dev->handler->exec(cmd);
1585 /* !! At this point cmd, sess & tgt_dev can be already freed !! */
1586 TRACE_DBG("Dev handler %s exec() returned %d",
1587 dev->handler->name, rc);
1588 if (rc != SCST_EXEC_NOT_COMPLETED) {
1589 if (rc == SCST_EXEC_COMPLETED)
1591 else if (rc == SCST_EXEC_NEED_THREAD)
1598 TRACE_DBG("Sending cmd %p to SCSI mid-level", cmd);
1600 if (unlikely(cmd->dev->scsi_dev == NULL)) {
1601 PRINT_ERROR_PR("Command for virtual device must be "
1602 "processed by device handler (lun %Ld)!",
1603 (uint64_t)cmd->lun);
1607 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
1608 if (scst_alloc_request(cmd) != 0) {
1609 PRINT_INFO_PR("%s", "Unable to allocate request, "
1610 "sending BUSY status");
1614 scst_do_req(cmd->scsi_req, (void *)cmd->cdb,
1615 (void *)cmd->scsi_req->sr_buffer,
1616 cmd->scsi_req->sr_bufflen, scst_cmd_done, cmd->timeout,
1619 rc = scst_exec_req(cmd->dev->scsi_dev, cmd->cdb, cmd->cdb_len,
1620 cmd->data_direction, cmd->sg, cmd->bufflen, cmd->sg_cnt,
1621 cmd->timeout, cmd->retries, cmd, scst_cmd_done,
1624 PRINT_INFO_PR("scst_exec_req() failed: %d", rc);
1629 rc = SCST_EXEC_COMPLETED;
1636 /* Restore the state */
1637 cmd->sent_to_midlev = 0;
1638 cmd->state = SCST_CMD_STATE_SEND_TO_MIDLEV;
1642 PRINT_ERROR_PR("Dev handler %s exec() or scst_local_exec() returned "
1643 "invalid code %d", cmd->dev->handler->name, rc);
1647 scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
1649 cmd->state = SCST_CMD_STATE_DEV_DONE;
1650 rc = SCST_EXEC_COMPLETED;
1651 scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1654 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
1658 cmd->state = SCST_CMD_STATE_DEV_DONE;
1659 rc = SCST_EXEC_COMPLETED;
1660 scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1665 rc = SCST_EXEC_COMPLETED;
1666 /* Report the result. The cmd is not completed */
1667 scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1671 static int scst_send_to_midlev(struct scst_cmd *cmd)
1674 struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
1675 struct scst_device *dev = cmd->dev;
1678 int atomic = scst_cmd_atomic(cmd);
1682 res = SCST_CMD_STATE_RES_CONT_NEXT;
1684 if (atomic && dev->handler->exec && !dev->handler->exec_atomic) {
1685 TRACE_DBG("Dev handler %s exec() can not be "
1686 "called in atomic context, rescheduling to the thread",
1687 dev->handler->name);
1688 res = SCST_CMD_STATE_RES_NEED_THREAD;
1692 if (unlikely(scst_inc_on_dev_cmd(cmd) != 0))
1695 scst_inc_cmd_count(); /* protect dev & tgt_dev */
1697 if (unlikely(cmd->internal) || unlikely(cmd->retry)) {
1698 rc = scst_do_send_to_midlev(cmd);
1699 /* !! At this point cmd, sess & tgt_dev can be already freed !! */
1700 if (rc == SCST_EXEC_NEED_THREAD) {
1701 TRACE_DBG("%s", "scst_do_send_to_midlev() requested "
1702 "thread context, rescheduling");
1703 res = SCST_CMD_STATE_RES_NEED_THREAD;
1704 scst_dec_on_dev_cmd(cmd);
1705 goto out_dec_cmd_count;
1707 BUG_ON(rc != SCST_EXEC_COMPLETED);
1712 expected_sn = tgt_dev->expected_sn;
1713 if (cmd->sn != expected_sn) {
1714 spin_lock_bh(&tgt_dev->sn_lock);
1715 tgt_dev->def_cmd_count++;
1717 barrier(); /* to reread expected_sn */
1718 expected_sn = tgt_dev->expected_sn;
1719 if (cmd->sn != expected_sn) {
1720 scst_dec_on_dev_cmd(cmd);
1721 TRACE(TRACE_SCSI_SERIALIZING, "Delaying cmd %p (sn=%d, "
1722 "expected_sn=%d)", cmd, cmd->sn, expected_sn);
1723 list_add_tail(&cmd->sn_cmd_list_entry,
1724 &tgt_dev->deferred_cmd_list);
1725 spin_unlock_bh(&tgt_dev->sn_lock);
1726 /* !! At this point cmd can be already freed !! */
1727 goto out_dec_cmd_count;
1729 TRACE(TRACE_SCSI_SERIALIZING, "Somebody incremented "
1730 "expected_sn %d, continuing", expected_sn);
1731 tgt_dev->def_cmd_count--;
1732 spin_unlock_bh(&tgt_dev->sn_lock);
1738 rc = scst_do_send_to_midlev(cmd);
1739 if (rc == SCST_EXEC_NEED_THREAD) {
1740 TRACE_DBG("%s", "scst_do_send_to_midlev() requested "
1741 "thread context, rescheduling");
1742 res = SCST_CMD_STATE_RES_NEED_THREAD;
1743 scst_dec_on_dev_cmd(cmd);
1747 goto out_dec_cmd_count;
1749 BUG_ON(rc != SCST_EXEC_COMPLETED);
1750 /* !! At this point cmd can be already freed !! */
1752 expected_sn = __scst_inc_expected_sn(tgt_dev);
1753 cmd = scst_check_deferred_commands(tgt_dev, expected_sn);
1756 if (unlikely(scst_inc_on_dev_cmd(cmd) != 0))
1761 if (dev->scsi_dev != NULL)
1762 generic_unplug_device(dev->scsi_dev->request_queue);
1765 scst_dec_cmd_count();
1766 /* !! At this point sess, dev and tgt_dev can be already freed !! */
1769 TRACE_EXIT_HRES(res);
1773 static struct scst_cmd *scst_create_prepare_internal_cmd(
1774 struct scst_cmd *orig_cmd, int bufsize)
1776 struct scst_cmd *res;
1777 int gfp_mask = scst_cmd_atomic(orig_cmd) ? GFP_ATOMIC : GFP_KERNEL;
1781 res = scst_alloc_cmd(gfp_mask);
1782 if (unlikely(res == NULL)) {
1786 res->sess = orig_cmd->sess;
1787 res->state = SCST_CMD_STATE_SEND_TO_MIDLEV;
1788 res->atomic = scst_cmd_atomic(orig_cmd);
1790 res->tgtt = orig_cmd->tgtt;
1791 res->tgt = orig_cmd->tgt;
1792 res->dev = orig_cmd->dev;
1793 res->tgt_dev = orig_cmd->tgt_dev;
1794 res->lun = orig_cmd->lun;
1795 res->queue_type = SCST_CMD_QUEUE_HEAD_OF_QUEUE;
1796 res->data_direction = SCST_DATA_UNKNOWN;
1797 res->orig_cmd = orig_cmd;
1799 res->bufflen = bufsize;
1801 if (scst_alloc_space(res) != 0)
1802 PRINT_ERROR("Unable to create buffer (size %d) for "
1803 "internal cmd", bufsize);
1808 TRACE_EXIT_HRES((unsigned long)res);
1812 scst_destroy_cmd(res);
1817 static void scst_free_internal_cmd(struct scst_cmd *cmd)
1821 if (cmd->bufflen > 0)
1822 scst_release_space(cmd);
1823 scst_destroy_cmd(cmd);
1829 static int scst_prepare_request_sense(struct scst_cmd *orig_cmd)
1831 int res = SCST_CMD_STATE_RES_RESTART;
1832 #define sbuf_size 252
1833 static const unsigned char request_sense[6] =
1834 { REQUEST_SENSE, 0, 0, 0, sbuf_size, 0 };
1835 struct scst_cmd *rs_cmd;
1839 rs_cmd = scst_create_prepare_internal_cmd(orig_cmd, sbuf_size);
1843 memcpy(rs_cmd->cdb, request_sense, sizeof(request_sense));
1844 rs_cmd->cdb_len = sizeof(request_sense);
1845 rs_cmd->data_direction = SCST_DATA_READ;
1847 spin_lock_irq(&scst_list_lock);
1848 list_add(&rs_cmd->cmd_list_entry, &scst_active_cmd_list);
1849 spin_unlock_irq(&scst_list_lock);
1852 TRACE_EXIT_RES(res);
1861 static struct scst_cmd *scst_complete_request_sense(struct scst_cmd *cmd)
1863 struct scst_cmd *orig_cmd = cmd->orig_cmd;
1871 len = scst_get_buf_first(cmd, &buf);
1873 if ((cmd->status == 0) && SCST_SENSE_VALID(buf) &&
1874 (!SCST_NO_SENSE(buf)))
1876 TRACE_BUFF_FLAG(TRACE_SCSI, "REQUEST SENSE returned",
1878 memcpy(orig_cmd->sense_buffer, buf,
1879 (sizeof(orig_cmd->sense_buffer) > len) ?
1880 len : sizeof(orig_cmd->sense_buffer));
1882 PRINT_ERROR_PR("%s", "Unable to get the sense via "
1883 "REQUEST SENSE, returning HARDWARE ERROR");
1884 scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
1887 scst_put_buf(cmd, buf);
1889 scst_free_internal_cmd(cmd);
1891 TRACE_EXIT_HRES((unsigned long)orig_cmd);
1895 static int scst_done_cmd_check(struct scst_cmd *cmd, int *pres)
1902 if (cmd->cdb[0] == REQUEST_SENSE) {
1904 cmd = scst_complete_request_sense(cmd);
1905 } else if (scst_check_auto_sense(cmd)) {
1906 PRINT_INFO_PR("Command finished with CHECK CONDITION, but "
1907 "without sense data (opcode 0x%x), issuing "
1908 "REQUEST SENSE", cmd->cdb[0]);
1909 rc = scst_prepare_request_sense(cmd);
1915 PRINT_ERROR_PR("%s", "Unable to issue REQUEST SENSE, "
1916 "returning HARDWARE ERROR");
1917 scst_set_cmd_error(cmd,
1918 SCST_LOAD_SENSE(scst_sense_hardw_error));
1922 type = cmd->dev->handler->type;
1923 if ((cmd->cdb[0] == MODE_SENSE || cmd->cdb[0] == MODE_SENSE_10) &&
1924 cmd->tgt_dev->acg_dev->rd_only_flag &&
1925 (type == TYPE_DISK || type == TYPE_WORM || type == TYPE_MOD ||
1931 length = scst_get_buf_first(cmd, &address);
1934 if (length > 2 && cmd->cdb[0] == MODE_SENSE)
1935 address[2] |= 0x80; /* Write Protect*/
1936 else if (length > 3 && cmd->cdb[0] == MODE_SENSE_10)
1937 address[3] |= 0x80; /* Write Protect*/
1938 scst_put_buf(cmd, address);
1942 * Check and clear NormACA option for the device, if necessary,
1943 * since we don't support ACA
1945 if ((cmd->cdb[0] == INQUIRY) &&
1946 !(cmd->cdb[1] & SCST_INQ_EVPD/* Std INQUIRY data (no EVPD) */) &&
1947 (cmd->resp_data_len > SCST_INQ_BYTE3))
1952 /* ToDo: all pages ?? */
1953 buflen = scst_get_buf_first(cmd, &buffer);
1955 if (buflen > SCST_INQ_BYTE3) {
1957 if (buffer[SCST_INQ_BYTE3] & SCST_INQ_NORMACA_BIT) {
1958 PRINT_INFO_PR("NormACA set for device: "
1959 "lun=%Ld, type 0x%02x",
1960 (uint64_t)cmd->lun, buffer[0]);
1963 buffer[SCST_INQ_BYTE3] &= ~SCST_INQ_NORMACA_BIT;
1965 scst_set_cmd_error(cmd,
1966 SCST_LOAD_SENSE(scst_sense_hardw_error));
1968 scst_put_buf(cmd, buffer);
1972 if (unlikely((cmd->cdb[0] == RESERVE) || (cmd->cdb[0] == RESERVE_10))) {
1973 if ((cmd->status != 0) && !test_bit(SCST_TGT_DEV_RESERVED,
1974 &cmd->tgt_dev->tgt_dev_flags)) {
1975 struct scst_tgt_dev *tgt_dev_tmp;
1976 TRACE(TRACE_SCSI, "Real RESERVE failed lun=%Ld, status=%x",
1977 (uint64_t)cmd->lun, cmd->masked_status);
1978 TRACE_BUFF_FLAG(TRACE_SCSI, "Sense", cmd->sense_buffer,
1979 sizeof(cmd->sense_buffer));
1980 /* Clearing the reservation */
1981 list_for_each_entry(tgt_dev_tmp, &cmd->dev->dev_tgt_dev_list,
1982 dev_tgt_dev_list_entry) {
1983 clear_bit(SCST_TGT_DEV_RESERVED,
1984 &tgt_dev_tmp->tgt_dev_flags);
1986 cmd->dev->dev_reserved = 0;
1988 scst_unblock_dev(cmd->dev);
1991 if (unlikely((cmd->cdb[0] == MODE_SELECT) ||
1992 (cmd->cdb[0] == MODE_SELECT_10) ||
1993 (cmd->cdb[0] == LOG_SELECT)))
1995 if (cmd->status == 0) {
1996 TRACE(TRACE_SCSI, "MODE/LOG SELECT succeeded, "
1997 "setting the SELECT UA (lun=%Ld)",
1998 (uint64_t)cmd->lun);
1999 spin_lock_bh(&scst_temp_UA_lock);
2000 if (cmd->cdb[0] == LOG_SELECT) {
2001 scst_set_sense(scst_temp_UA,
2002 sizeof(scst_temp_UA),
2003 UNIT_ATTENTION, 0x2a, 0x02);
2005 scst_set_sense(scst_temp_UA,
2006 sizeof(scst_temp_UA),
2007 UNIT_ATTENTION, 0x2a, 0x01);
2009 scst_process_UA(cmd->dev, cmd, scst_temp_UA,
2010 sizeof(scst_temp_UA), 1);
2011 spin_unlock_bh(&scst_temp_UA_lock);
2013 scst_unblock_dev(cmd->dev);
2017 TRACE_EXIT_RES(res);
2021 static int scst_dev_done(struct scst_cmd *cmd)
2023 int res = SCST_CMD_STATE_RES_CONT_SAME;
2025 int atomic = scst_cmd_atomic(cmd);
2029 if (atomic && !cmd->dev->handler->dev_done_atomic &&
2030 cmd->dev->handler->dev_done)
2032 TRACE_DBG("Dev handler %s dev_done() can not be "
2033 "called in atomic context, rescheduling to the thread",
2034 cmd->dev->handler->name);
2035 res = SCST_CMD_STATE_RES_NEED_THREAD;
2039 if (scst_done_cmd_check(cmd, &res))
2042 state = SCST_CMD_STATE_XMIT_RESP;
2043 if (likely(!scst_is_cmd_local(cmd)) &&
2044 likely(cmd->dev->handler->dev_done != NULL))
2047 TRACE_DBG("Calling dev handler %s dev_done(%p)",
2048 cmd->dev->handler->name, cmd);
2049 rc = cmd->dev->handler->dev_done(cmd);
2050 TRACE_DBG("Dev handler %s dev_done() returned %d",
2051 cmd->dev->handler->name, rc);
2052 if (rc != SCST_CMD_STATE_DEFAULT)
2057 case SCST_CMD_STATE_REINIT:
2059 res = SCST_CMD_STATE_RES_RESTART;
2062 case SCST_CMD_STATE_DEV_PARSE:
2063 case SCST_CMD_STATE_PREPARE_SPACE:
2064 case SCST_CMD_STATE_RDY_TO_XFER:
2065 case SCST_CMD_STATE_SEND_TO_MIDLEV:
2066 case SCST_CMD_STATE_DEV_DONE:
2067 case SCST_CMD_STATE_XMIT_RESP:
2068 case SCST_CMD_STATE_FINISHED:
2070 res = SCST_CMD_STATE_RES_CONT_SAME;
2073 case SCST_CMD_STATE_NEED_THREAD_CTX:
2074 TRACE_DBG("Dev handler %s dev_done() requested "
2075 "thread context, rescheduling",
2076 cmd->dev->handler->name);
2077 res = SCST_CMD_STATE_RES_NEED_THREAD;
2082 PRINT_ERROR_PR("Dev handler %s dev_done() returned "
2083 "invalid cmd state %d",
2084 cmd->dev->handler->name, state);
2086 PRINT_ERROR_PR("Dev handler %s dev_done() returned "
2087 "error %d", cmd->dev->handler->name,
2090 scst_set_cmd_error(cmd,
2091 SCST_LOAD_SENSE(scst_sense_hardw_error));
2092 cmd->state = SCST_CMD_STATE_XMIT_RESP;
2093 res = SCST_CMD_STATE_RES_CONT_SAME;
2098 TRACE_EXIT_HRES(res);
2102 static int scst_xmit_response(struct scst_cmd *cmd)
2105 int atomic = scst_cmd_atomic(cmd);
2110 * Check here also in order to avoid unnecessary delays of other
2113 if (unlikely(cmd->sent_to_midlev == 0) &&
2114 (cmd->tgt_dev != NULL))
2116 TRACE(TRACE_SCSI_SERIALIZING,
2117 "cmd %p was not sent to mid-lev (sn %d)", cmd, cmd->sn);
2118 scst_inc_expected_sn_unblock(cmd->tgt_dev, cmd, 0);
2119 cmd->sent_to_midlev = 1;
2122 if (atomic && !cmd->tgtt->xmit_response_atomic) {
2123 TRACE_DBG("%s", "xmit_response() can not be "
2124 "called in atomic context, rescheduling to the thread");
2125 res = SCST_CMD_STATE_RES_NEED_THREAD;
2129 set_bit(SCST_CMD_XMITTING, &cmd->cmd_flags);
2130 smp_mb__after_set_bit();
2132 if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
2133 if (test_bit(SCST_CMD_ABORTED_OTHER, &cmd->cmd_flags)) {
2134 TRACE_MGMT_DBG("Flag ABORTED OTHER set for cmd %p "
2135 "(tag %d), returning TASK ABORTED", cmd, cmd->tag);
2136 scst_set_cmd_error_status(cmd, SAM_STAT_TASK_ABORTED);
2140 if (unlikely(test_bit(SCST_CMD_NO_RESP, &cmd->cmd_flags))) {
2141 TRACE_MGMT_DBG("Flag NO_RESP set for cmd %p (tag %d), skipping",
2143 cmd->state = SCST_CMD_STATE_FINISHED;
2144 res = SCST_CMD_STATE_RES_CONT_SAME;
2149 if (cmd->tm_dbg_delayed && !test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)) {
2150 if (atomic && !cmd->tgtt->xmit_response_atomic) {
2151 TRACE_MGMT_DBG("%s", "DEBUG_TM delayed cmd needs a thread");
2152 res = SCST_CMD_STATE_RES_NEED_THREAD;
2155 TRACE_MGMT_DBG("Delaying cmd %p (tag %d) for 1 second",
2157 schedule_timeout_uninterruptible(HZ);
2162 int finished_cmds = atomic_read(&cmd->sess->tgt->finished_cmds);
2164 res = SCST_CMD_STATE_RES_CONT_NEXT;
2165 cmd->state = SCST_CMD_STATE_XMIT_WAIT;
2167 TRACE_DBG("Calling xmit_response(%p)", cmd);
2169 #if defined(DEBUG) || defined(TRACING)
2172 struct scatterlist *sg = cmd->sg;
2173 TRACE(TRACE_SEND_BOT,
2174 "Xmitting %d S/G(s) at %p sg[0].page at %p",
2175 cmd->sg_cnt, sg, (void*)sg[0].page);
2176 for(i = 0; i < cmd->sg_cnt; ++i) {
2177 TRACE_BUFF_FLAG(TRACE_SEND_BOT,
2178 "Xmitting sg:", page_address(sg[i].page),
2185 if (((scst_random() % 100) == 77))
2186 rc = SCST_TGT_RES_QUEUE_FULL;
2189 rc = cmd->tgtt->xmit_response(cmd);
2190 TRACE_DBG("xmit_response() returned %d", rc);
2192 if (likely(rc == SCST_TGT_RES_SUCCESS))
2195 /* Restore the previous state */
2196 cmd->state = SCST_CMD_STATE_XMIT_RESP;
2199 case SCST_TGT_RES_QUEUE_FULL:
2201 if (scst_queue_retry_cmd(cmd, finished_cmds) == 0)
2207 case SCST_TGT_RES_NEED_THREAD_CTX:
2209 TRACE_DBG("Target driver %s xmit_response() "
2210 "requested thread context, rescheduling",
2212 res = SCST_CMD_STATE_RES_NEED_THREAD;
2223 /* Caution: cmd can be already dead here */
2224 TRACE_EXIT_HRES(res);
2228 if (rc == SCST_TGT_RES_FATAL_ERROR) {
2229 PRINT_ERROR_PR("Target driver %s xmit_response() returned "
2230 "fatal error", cmd->tgtt->name);
2232 PRINT_ERROR_PR("Target driver %s xmit_response() returned "
2233 "invalid value %d", cmd->tgtt->name, rc);
2235 scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
2236 cmd->state = SCST_CMD_STATE_FINISHED;
2237 res = SCST_CMD_STATE_RES_CONT_SAME;
2241 static int scst_finish_cmd(struct scst_cmd *cmd)
2247 if (cmd->mem_checked) {
2248 spin_lock_bh(&scst_cmd_mem_lock);
2249 scst_cur_cmd_mem -= cmd->bufflen;
2250 spin_unlock_bh(&scst_cmd_mem_lock);
2253 spin_lock_irq(&scst_list_lock);
2255 TRACE_DBG("Deleting cmd %p from cmd list", cmd);
2256 list_del(&cmd->cmd_list_entry);
2259 scst_complete_cmd_mgmt(cmd, cmd->mgmt_cmnd);
2261 if (likely(cmd->tgt_dev != NULL))
2262 cmd->tgt_dev->cmd_count--;
2264 cmd->sess->sess_cmd_count--;
2266 list_del(&cmd->search_cmd_list_entry);
2268 spin_unlock_irq(&scst_list_lock);
2272 res = SCST_CMD_STATE_RES_CONT_NEXT;
2274 TRACE_EXIT_HRES(res);
2278 void scst_tgt_cmd_done(struct scst_cmd *cmd)
2281 unsigned long flags;
2286 BUG_ON(cmd->state != SCST_CMD_STATE_XMIT_WAIT);
2289 context = SCST_CONTEXT_TASKLET;
2291 context = scst_get_context();
2293 TRACE_DBG("Context: %d", context);
2294 cmd->non_atomic_only = 0;
2295 cmd->state = SCST_CMD_STATE_FINISHED;
2298 case SCST_CONTEXT_DIRECT:
2299 case SCST_CONTEXT_DIRECT_ATOMIC:
2301 scst_check_retries(cmd->tgt, 0);
2302 res = __scst_process_active_cmd(cmd, context, 0);
2303 BUG_ON(res == SCST_CMD_STATE_RES_NEED_THREAD);
2306 case SCST_CONTEXT_TASKLET:
2308 spin_lock_irqsave(&scst_list_lock, flags);
2309 TRACE_DBG("Moving cmd %p to active cmd list", cmd);
2310 list_move_tail(&cmd->cmd_list_entry, &scst_active_cmd_list);
2311 spin_unlock_irqrestore(&scst_list_lock, flags);
2312 scst_schedule_tasklet();
2313 scst_check_retries(cmd->tgt, 0);
2327 * Returns 0 on success, > 0 when we need to wait for unblock,
2328 * < 0 if there is no device (lun) or device type handler.
2329 * Called under scst_list_lock and IRQs disabled
2331 static int scst_translate_lun(struct scst_cmd *cmd)
2333 struct scst_tgt_dev *tgt_dev = NULL;
2338 scst_inc_cmd_count();
2340 if (likely(!test_bit(SCST_FLAG_SUSPENDED, &scst_flags))) {
2342 TRACE_DBG("Finding tgt_dev for cmd %p (lun %Ld)", cmd,
2343 (uint64_t)cmd->lun);
2344 list_for_each_entry(tgt_dev, &cmd->sess->sess_tgt_dev_list,
2345 sess_tgt_dev_list_entry)
2347 if (tgt_dev->acg_dev->lun == cmd->lun) {
2348 TRACE_DBG("tgt_dev %p found", tgt_dev);
2350 if (unlikely(tgt_dev->acg_dev->dev->handler == NULL)) {
2351 PRINT_INFO_PR("Dev handler for device "
2352 "%Ld is NULL, the device will not be "
2353 "visible remotely", (uint64_t)cmd->lun);
2357 if (cmd->state == SCST_CMD_STATE_REINIT) {
2358 cmd->tgt_dev_saved->cmd_count--;
2359 TRACE(TRACE_SCSI_SERIALIZING,
2360 "SCST_CMD_STATE_REINIT: "
2361 "incrementing expected_sn on tgt_dev_saved %p",
2362 cmd->tgt_dev_saved);
2363 scst_inc_expected_sn_unblock(
2364 cmd->tgt_dev_saved, cmd, 1);
2366 cmd->tgt_dev = tgt_dev;
2367 tgt_dev->cmd_count++;
2368 cmd->dev = tgt_dev->acg_dev->dev;
2370 /* ToDo: cmd->queue_type */
2372 /* scst_list_lock is enough to protect that */
2373 cmd->sn = tgt_dev->next_sn;
2376 TRACE(TRACE_DEBUG/*TRACE_SCSI_SERIALIZING*/,
2377 "cmd->sn: %d", cmd->sn);
2384 TRACE_DBG("tgt_dev for lun %Ld not found, command to "
2385 "unexisting LU?", (uint64_t)cmd->lun);
2386 scst_dec_cmd_count();
2389 if ( !cmd->sess->waiting) {
2390 TRACE_DBG("Adding session %p to scst_dev_wait_sess_list",
2392 list_add_tail(&cmd->sess->dev_wait_sess_list_entry,
2393 &scst_dev_wait_sess_list);
2394 cmd->sess->waiting = 1;
2396 scst_dec_cmd_count();
2400 TRACE_EXIT_RES(res);
2404 /* Called under scst_list_lock and IRQs disabled */
2405 static int scst_process_init_cmd(struct scst_cmd *cmd)
2411 res = scst_translate_lun(cmd);
2412 if (likely(res == 0)) {
2413 cmd->state = SCST_CMD_STATE_DEV_PARSE;
2414 if (cmd->tgt_dev->cmd_count > SCST_MAX_DEVICE_COMMANDS) {
2415 TRACE(TRACE_RETRY, "Too many pending commands in "
2416 "session, returning BUSY to initiator \"%s\"",
2417 (cmd->sess->initiator_name[0] == '\0') ?
2418 "Anonymous" : cmd->sess->initiator_name);
2420 cmd->state = SCST_CMD_STATE_XMIT_RESP;
2422 TRACE_DBG("Moving cmd %p to active cmd list", cmd);
2423 list_move_tail(&cmd->cmd_list_entry, &scst_active_cmd_list);
2424 } else if (res < 0) {
2425 TRACE_DBG("Finishing cmd %p", cmd);
2426 scst_set_cmd_error(cmd,
2427 SCST_LOAD_SENSE(scst_sense_lun_not_supported));
2428 cmd->state = SCST_CMD_STATE_XMIT_RESP;
2429 TRACE_DBG("Moving cmd %p to active cmd list", cmd);
2430 list_move_tail(&cmd->cmd_list_entry, &scst_active_cmd_list);
2433 TRACE_EXIT_RES(res);
2438 * Called under scst_list_lock and IRQs disabled
2439 * We don't drop it anywhere inside, because command execution
2440 * have to be serialized, i.e. commands must be executed in order
2441 * of their arrival, and we set this order inside scst_translate_lun().
2443 static int scst_do_job_init(struct list_head *init_cmd_list)
2449 if (!test_bit(SCST_FLAG_SUSPENDED, &scst_flags)) {
2450 while (!list_empty(init_cmd_list)) {
2451 struct scst_cmd *cmd = list_entry(init_cmd_list->next,
2454 res = scst_process_init_cmd(cmd);
2460 TRACE_EXIT_RES(res);
2464 /* Called with no locks held */
2465 static int __scst_process_active_cmd(struct scst_cmd *cmd, int context,
2474 cmd->atomic = ((context & ~SCST_PROCESSIBLE_ENV) ==
2475 SCST_CONTEXT_DIRECT_ATOMIC);
2476 cmd->processible_env = (context & SCST_PROCESSIBLE_ENV) != 0;
2479 switch (cmd->state) {
2480 case SCST_CMD_STATE_DEV_PARSE:
2481 res = scst_parse_cmd(cmd);
2484 case SCST_CMD_STATE_PREPARE_SPACE:
2485 res = scst_prepare_space(cmd);
2488 case SCST_CMD_STATE_RDY_TO_XFER:
2489 res = scst_rdy_to_xfer(cmd);
2492 case SCST_CMD_STATE_SEND_TO_MIDLEV:
2493 res = scst_send_to_midlev(cmd);
2494 /* !! At this point cmd, sess & tgt_dev can be already freed !! */
2497 case SCST_CMD_STATE_DEV_DONE:
2498 res = scst_dev_done(cmd);
2501 case SCST_CMD_STATE_XMIT_RESP:
2502 res = scst_xmit_response(cmd);
2505 case SCST_CMD_STATE_FINISHED:
2506 res = scst_finish_cmd(cmd);
2510 PRINT_ERROR("cmd (%p) in state %d, but shouldn't be",
2513 res = SCST_CMD_STATE_RES_CONT_NEXT;
2516 } while(res == SCST_CMD_STATE_RES_CONT_SAME);
2518 if (res == SCST_CMD_STATE_RES_CONT_NEXT) {
2520 spin_lock_irq(&scst_list_lock);
2521 } else if (res == SCST_CMD_STATE_RES_NEED_THREAD) {
2522 spin_lock_irq(&scst_list_lock);
2524 switch (cmd->state) {
2525 case SCST_CMD_STATE_DEV_PARSE:
2526 case SCST_CMD_STATE_PREPARE_SPACE:
2527 case SCST_CMD_STATE_RDY_TO_XFER:
2528 case SCST_CMD_STATE_SEND_TO_MIDLEV:
2529 case SCST_CMD_STATE_DEV_DONE:
2530 case SCST_CMD_STATE_XMIT_RESP:
2531 case SCST_CMD_STATE_FINISHED:
2532 TRACE_DBG("Moving cmd %p to active cmd list", cmd);
2533 list_move(&cmd->cmd_list_entry, &scst_active_cmd_list);
2536 /* not very valid commands */
2537 case SCST_CMD_STATE_DEFAULT:
2538 case SCST_CMD_STATE_NEED_THREAD_CTX:
2539 PRINT_ERROR_PR("cmd %p is in state %d, not putting on "
2540 "useful list (left on scst cmd list)", cmd,
2542 spin_unlock_irq(&scst_list_lock);
2544 spin_lock_irq(&scst_list_lock);
2550 cmd->non_atomic_only = 1;
2552 spin_unlock_irq(&scst_list_lock);
2553 wake_up(&scst_list_waitQ);
2554 } else if (res == SCST_CMD_STATE_RES_RESTART) {
2555 if (cmd->state == SCST_CMD_STATE_REINIT) {
2556 spin_lock_irq(&scst_list_lock);
2557 TRACE_DBG("Moving cmd %p to head of init cmd list", cmd);
2558 list_move(&cmd->cmd_list_entry, &scst_init_cmd_list);
2560 spin_unlock_irq(&scst_list_lock);
2566 TRACE_EXIT_RES(res);
2570 /* Called under scst_list_lock and IRQs disabled */
2571 static void scst_do_job_active(struct list_head *active_cmd_list, int context)
2574 struct scst_cmd *cmd;
2575 int atomic = ((context & ~SCST_PROCESSIBLE_ENV) ==
2576 SCST_CONTEXT_DIRECT_ATOMIC);
2580 tm_dbg_check_released_cmds();
2583 list_for_each_entry(cmd, active_cmd_list, cmd_list_entry) {
2584 if (atomic && cmd->non_atomic_only) {
2585 TRACE(TRACE_DEBUG, "Skipping non-atomic cmd %p", cmd);
2588 if (tm_dbg_check_cmd(cmd) != 0)
2590 res = scst_process_active_cmd(cmd, context, NULL, 1);
2591 if (res == SCST_CMD_STATE_RES_CONT_NEXT) {
2593 } else if (res == SCST_CMD_STATE_RES_NEED_THREAD) {
2595 } else if (res == SCST_CMD_STATE_RES_RESTART) {
2605 static inline int test_cmd_lists(void)
2607 int res = !list_empty(&scst_active_cmd_list) ||
2608 (!list_empty(&scst_init_cmd_list) &&
2609 !test_bit(SCST_FLAG_SUSPENDED, &scst_flags)) ||
2610 test_bit(SCST_FLAG_SHUTDOWN, &scst_flags) ||
2611 unlikely(scst_shut_threads_count > 0) ||
2612 tm_dbg_is_release();
2616 int scst_cmd_thread(void *arg)
2618 static spinlock_t lock = SPIN_LOCK_UNLOCKED;
2624 n = scst_thread_num++;
2626 daemonize("scsi_tgt%d", n);
2627 recalc_sigpending();
2628 set_user_nice(current, 10);
2629 current->flags |= PF_NOFREEZE;
2631 spin_lock_irq(&scst_list_lock);
2634 init_waitqueue_entry(&wait, current);
2636 if (!test_cmd_lists()) {
2637 add_wait_queue_exclusive(&scst_list_waitQ, &wait);
2639 set_current_state(TASK_INTERRUPTIBLE);
2640 if (test_cmd_lists())
2642 spin_unlock_irq(&scst_list_lock);
2644 spin_lock_irq(&scst_list_lock);
2646 set_current_state(TASK_RUNNING);
2647 remove_wait_queue(&scst_list_waitQ, &wait);
2650 scst_do_job_init(&scst_init_cmd_list);
2651 scst_do_job_active(&scst_active_cmd_list,
2652 SCST_CONTEXT_THREAD|SCST_PROCESSIBLE_ENV);
2654 if (unlikely(test_bit(SCST_FLAG_SHUTDOWN, &scst_flags)) &&
2655 list_empty(&scst_cmd_list) &&
2656 list_empty(&scst_active_cmd_list) &&
2657 list_empty(&scst_init_cmd_list)) {
2661 if (unlikely(scst_shut_threads_count > 0)) {
2662 scst_shut_threads_count--;
2666 spin_unlock_irq(&scst_list_lock);
2668 if (atomic_dec_and_test(&scst_threads_count) && scst_shutdown_mutex) {
2669 smp_mb__after_atomic_dec();
2670 TRACE_DBG("%s", "Releasing scst_shutdown_mutex");
2671 up(scst_shutdown_mutex);
2678 void scst_cmd_tasklet(long p)
2682 spin_lock_irq(&scst_list_lock);
2684 scst_do_job_init(&scst_init_cmd_list);
2685 scst_do_job_active(&scst_active_cmd_list,
2686 SCST_CONTEXT_DIRECT_ATOMIC|SCST_PROCESSIBLE_ENV);
2688 spin_unlock_irq(&scst_list_lock);
2695 * Returns 0 on success, < 0 if there is no device handler or
2696 * > 0 if SCST_FLAG_SUSPENDED set.
2698 static int scst_mgmt_translate_lun(struct scst_mgmt_cmd *mcmd)
2700 struct scst_tgt_dev *tgt_dev = NULL;
2705 TRACE_DBG("Finding tgt_dev for mgmt cmd %p (lun %Ld)", mcmd,
2706 (uint64_t)mcmd->lun);
2708 spin_lock_irq(&scst_list_lock);
2709 scst_inc_cmd_count();
2710 if (likely(!test_bit(SCST_FLAG_SUSPENDED, &scst_flags))) {
2711 list_for_each_entry(tgt_dev, &mcmd->sess->sess_tgt_dev_list,
2712 sess_tgt_dev_list_entry)
2714 if (tgt_dev->acg_dev->lun == mcmd->lun) {
2715 TRACE_DBG("tgt_dev %p found", tgt_dev);
2716 mcmd->mcmd_tgt_dev = tgt_dev;
2721 if (mcmd->mcmd_tgt_dev == NULL)
2722 scst_dec_cmd_count();
2724 if ( !mcmd->sess->waiting) {
2725 TRACE_DBG("Adding session %p to scst_dev_wait_sess_list",
2727 list_add_tail(&mcmd->sess->dev_wait_sess_list_entry,
2728 &scst_dev_wait_sess_list);
2729 mcmd->sess->waiting = 1;
2731 scst_dec_cmd_count();
2734 spin_unlock_irq(&scst_list_lock);
2736 TRACE_EXIT_HRES(res);
2740 /* Called under scst_list_lock and IRQ off */
2741 static void scst_complete_cmd_mgmt(struct scst_cmd *cmd,
2742 struct scst_mgmt_cmd *mcmd)
2746 TRACE_MGMT_DBG("cmd %p completed (tag %d, mcmd %p, "
2747 "mcmd->cmd_wait_count %d)", cmd, cmd->tag, mcmd,
2748 mcmd->cmd_wait_count);
2750 cmd->mgmt_cmnd = NULL;
2753 mcmd->completed_cmd_count++;
2755 mcmd->cmd_wait_count--;
2756 if (mcmd->cmd_wait_count > 0) {
2757 TRACE_MGMT_DBG("cmd_wait_count(%d) not 0, skipping",
2758 mcmd->cmd_wait_count);
2762 mcmd->state = SCST_MGMT_CMD_STATE_DONE;
2764 if (mcmd->completed) {
2765 TRACE_MGMT_DBG("Moving mgmt cmd %p to active mgmt cmd list",
2767 list_move_tail(&mcmd->mgmt_cmd_list_entry,
2768 &scst_active_mgmt_cmd_list);
2771 wake_up(&scst_mgmt_cmd_list_waitQ);
2778 static int scst_call_dev_task_mgmt_fn(struct scst_mgmt_cmd *mcmd,
2779 struct scst_tgt_dev *tgt_dev, int set_status)
2781 int res = SCST_DEV_TM_NOT_COMPLETED;
2782 if (tgt_dev->acg_dev->dev->handler->task_mgmt_fn) {
2783 TRACE_MGMT_DBG("Calling dev handler %s task_mgmt_fn(fn=%d)",
2784 tgt_dev->acg_dev->dev->handler->name, mcmd->fn);
2785 res = tgt_dev->acg_dev->dev->handler->task_mgmt_fn(mcmd,
2787 TRACE_MGMT_DBG("Dev handler %s task_mgmt_fn() returned %d",
2788 tgt_dev->acg_dev->dev->handler->name, res);
2789 if (set_status && (res != SCST_DEV_TM_NOT_COMPLETED)) {
2790 mcmd->status = (res == SCST_DEV_TM_COMPLETED_SUCCESS) ?
2791 SCST_MGMT_STATUS_SUCCESS :
2792 SCST_MGMT_STATUS_FAILED;
2798 static inline int scst_is_strict_mgmt_fn(int mgmt_fn)
2801 case SCST_ABORT_TASK:
2802 case SCST_ABORT_TASK_SET:
2803 case SCST_CLEAR_TASK_SET:
2811 * Called under scst_list_lock and IRQ off (to protect cmd
2812 * from being destroyed).
2813 * Returns -1 if command is being executed (ABORT failed), 0 otherwise
2815 void scst_abort_cmd(struct scst_cmd *cmd, struct scst_mgmt_cmd *mcmd,
2816 int other_ini, int call_dev_task_mgmt_fn)
2820 TRACE(TRACE_MGMT, "Aborting cmd %p (tag %d)", cmd, cmd->tag);
2823 set_bit(SCST_CMD_ABORTED_OTHER, &cmd->cmd_flags);
2824 smp_mb__after_set_bit();
2826 set_bit(SCST_CMD_ABORTED, &cmd->cmd_flags);
2827 smp_mb__after_set_bit();
2829 if (call_dev_task_mgmt_fn && cmd->tgt_dev)
2830 scst_call_dev_task_mgmt_fn(mcmd, cmd->tgt_dev, 0);
2834 if (cmd->tgtt->tm_sync_reply)
2837 if (scst_is_strict_mgmt_fn(mcmd->fn))
2838 defer = test_bit(SCST_CMD_EXECUTING,
2841 defer = test_bit(SCST_CMD_XMITTING,
2847 * Delay the response until the command's finish in
2848 * order to guarantee that "no further responses from
2849 * the task are sent to the SCSI initiator port" after
2850 * response from the TM function is sent (SAM)
2852 TRACE(TRACE_MGMT, "cmd %p (tag %d) being executed/"
2853 "xmitted (state %d), deferring ABORT...", cmd,
2854 cmd->tag, cmd->state);
2856 if (cmd->mgmt_cmnd) {
2857 printk(KERN_ALERT "cmd %p (tag %d, state %d) "
2858 "has non-NULL mgmt_cmnd %p!!! Current "
2859 "mcmd %p\n", cmd, cmd->tag, cmd->state,
2860 cmd->mgmt_cmnd, mcmd);
2863 BUG_ON(cmd->mgmt_cmnd);
2864 mcmd->cmd_wait_count++;
2865 cmd->mgmt_cmnd = mcmd;
2869 tm_dbg_release_cmd(cmd);
2875 /* Called under scst_list_lock and IRQ off */
2876 static int scst_set_mcmd_next_state(struct scst_mgmt_cmd *mcmd)
2879 if (mcmd->cmd_wait_count != 0) {
2880 TRACE_MGMT_DBG("cmd_wait_count(%d) not 0, preparing to "
2881 "wait", mcmd->cmd_wait_count);
2882 mcmd->state = SCST_MGMT_CMD_STATE_EXECUTING;
2885 mcmd->state = SCST_MGMT_CMD_STATE_DONE;
2888 mcmd->completed = 1;
2892 static void scst_unblock_aborted_cmds(int scst_mutex_held)
2894 struct scst_device *dev;
2899 if (!scst_mutex_held)
2902 list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
2903 struct scst_cmd *cmd, *tcmd;
2904 spin_lock_bh(&dev->dev_lock);
2905 list_for_each_entry_safe(cmd, tcmd, &dev->blocked_cmd_list,
2906 blocked_cmd_list_entry) {
2907 if (test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)) {
2908 list_del(&cmd->blocked_cmd_list_entry);
2909 TRACE_MGMT_DBG("Moving aborted blocked cmd %p "
2910 "to active cmd list", cmd);
2911 spin_lock_irq(&scst_list_lock);
2912 list_move_tail(&cmd->cmd_list_entry,
2913 &scst_active_cmd_list);
2914 spin_unlock_irq(&scst_list_lock);
2918 spin_unlock_bh(&dev->dev_lock);
2921 if (!scst_mutex_held)
2925 wake_up(&scst_list_waitQ);
2931 /* Returns 0 if the command processing should be continued, <0 otherwise */
2932 static void __scst_abort_task_set(struct scst_mgmt_cmd *mcmd,
2933 struct scst_tgt_dev *tgt_dev, int other_ini, int scst_mutex_held)
2935 struct scst_cmd *cmd;
2936 struct scst_session *sess = tgt_dev->sess;
2940 spin_lock_irq(&scst_list_lock);
2942 TRACE_DBG("Searching in search cmd list (sess=%p)", sess);
2943 list_for_each_entry(cmd, &sess->search_cmd_list,
2944 search_cmd_list_entry) {
2945 if ((cmd->tgt_dev == NULL) &&
2946 (cmd->lun == tgt_dev->acg_dev->lun))
2948 if (cmd->tgt_dev != tgt_dev)
2950 scst_abort_cmd(cmd, mcmd, other_ini, 0);
2952 spin_unlock_irq(&scst_list_lock);
2954 scst_unblock_aborted_cmds(scst_mutex_held);
2960 /* Returns 0 if the command processing should be continued, <0 otherwise */
2961 static int scst_abort_task_set(struct scst_mgmt_cmd *mcmd)
2964 struct scst_tgt_dev *tgt_dev = mcmd->mcmd_tgt_dev;
2965 struct scst_device *dev = tgt_dev->acg_dev->dev;
2967 TRACE(TRACE_MGMT, "Aborting task set (lun=%d, mcmd=%p)",
2968 tgt_dev->acg_dev->lun, mcmd);
2970 spin_lock_bh(&dev->dev_lock);
2971 __scst_block_dev(dev);
2972 spin_unlock_bh(&dev->dev_lock);
2974 __scst_abort_task_set(mcmd, tgt_dev, 0, 0);
2975 scst_call_dev_task_mgmt_fn(mcmd, tgt_dev, 0);
2977 res = scst_set_mcmd_next_state(mcmd);
2979 TRACE_EXIT_RES(res);
2983 static int scst_check_delay_mgmt_cmd(struct scst_mgmt_cmd *mcmd, int locked)
2986 * No need for special protection for SCST_FLAG_TM_ACTIVE, since
2987 * we could be called from the only thread.
2989 if (test_bit(SCST_FLAG_TM_ACTIVE, &scst_flags)) {
2990 TRACE_MGMT_DBG("Moving mgmt cmd %p to delayed mgmt cmd list",
2993 spin_lock_irq(&scst_list_lock);
2994 list_move_tail(&mcmd->mgmt_cmd_list_entry,
2995 &scst_delayed_mgmt_cmd_list);
2997 spin_unlock_irq(&scst_list_lock);
3000 set_bit(SCST_FLAG_TM_ACTIVE, &scst_flags);
3005 /* Returns 0 if the command processing should be continued,
3006 * >0, if it should be requeued, <0 otherwise */
3007 static int scst_mgmt_cmd_init(struct scst_mgmt_cmd *mcmd)
3013 res = scst_check_delay_mgmt_cmd(mcmd, 1);
3017 if (mcmd->fn == SCST_ABORT_TASK) {
3018 struct scst_session *sess = mcmd->sess;
3019 struct scst_cmd *cmd;
3021 spin_lock_irq(&scst_list_lock);
3022 cmd = __scst_find_cmd_by_tag(sess, mcmd->tag);
3024 TRACE(TRACE_MGMT, "ABORT TASK failed: command for "
3025 "tag %d not found", mcmd->tag);
3026 mcmd->status = SCST_MGMT_STATUS_FAILED;
3027 mcmd->state = SCST_MGMT_CMD_STATE_DONE;
3029 TRACE(TRACE_MGMT, "Cmd %p for tag %d (sn %d) found, "
3030 "aborting it", cmd, mcmd->tag, cmd->sn);
3031 mcmd->cmd_to_abort = cmd;
3032 scst_abort_cmd(cmd, mcmd, 0, 1);
3033 res = scst_set_mcmd_next_state(mcmd);
3034 mcmd->cmd_to_abort = NULL; /* just in case */
3036 spin_unlock_irq(&scst_list_lock);
3039 rc = scst_mgmt_translate_lun(mcmd);
3041 PRINT_ERROR_PR("Corresponding device for lun %Ld not "
3042 "found", (uint64_t)mcmd->lun);
3043 mcmd->status = SCST_MGMT_STATUS_FAILED;
3044 mcmd->state = SCST_MGMT_CMD_STATE_DONE;
3046 mcmd->state = SCST_MGMT_CMD_STATE_READY;
3052 TRACE_EXIT_RES(res);
3056 /* Returns 0 if the command processing should be continued, <0 otherwise */
3057 static int scst_target_reset(struct scst_mgmt_cmd *mcmd)
3060 struct scst_device *dev, *d;
3061 struct scst_tgt_dev *tgt_dev;
3063 LIST_HEAD(host_devs);
3067 TRACE(TRACE_MGMT, "Target reset (mcmd %p, cmd count %d)",
3068 mcmd, mcmd->sess->sess_cmd_count);
3072 list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
3075 spin_lock_bh(&dev->dev_lock);
3076 __scst_block_dev(dev);
3077 scst_process_reset(dev, mcmd->sess, NULL, mcmd);
3078 spin_unlock_bh(&dev->dev_lock);
3082 list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
3083 dev_tgt_dev_list_entry)
3086 rc = scst_call_dev_task_mgmt_fn(mcmd, tgt_dev, 0);
3087 if (rc == SCST_DEV_TM_NOT_COMPLETED)
3089 else if (rc == SCST_DEV_TM_COMPLETED_FAILED)
3090 mcmd->status = SCST_MGMT_STATUS_FAILED;
3095 if (dev->scsi_dev == NULL)
3098 list_for_each_entry(d, &host_devs, reset_dev_list_entry) {
3099 if (dev->scsi_dev->host->host_no ==
3100 d->scsi_dev->host->host_no)
3107 list_add_tail(&dev->reset_dev_list_entry, &host_devs);
3111 * We suppose here that for all commands that already on devices
3112 * on/after scsi_reset_provider() completion callbacks will be called.
3115 list_for_each_entry(dev, &host_devs, reset_dev_list_entry) {
3116 /* dev->scsi_dev must be non-NULL here */
3117 TRACE(TRACE_MGMT, "Resetting host %d bus ",
3118 dev->scsi_dev->host->host_no);
3119 rc = scsi_reset_provider(dev->scsi_dev, SCSI_TRY_RESET_BUS);
3120 TRACE(TRACE_MGMT, "Result of host %d bus reset: %s",
3121 dev->scsi_dev->host->host_no,
3122 (rc == SUCCESS) ? "SUCCESS" : "FAILED");
3123 if (rc != SUCCESS) {
3124 /* SCSI_TRY_RESET_BUS is also done by scsi_reset_provider() */
3125 mcmd->status = SCST_MGMT_STATUS_FAILED;
3129 list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
3130 if (dev->scsi_dev != NULL)
3131 dev->scsi_dev->was_reset = 0;
3136 spin_lock_irq(&scst_list_lock);
3137 tm_dbg_task_mgmt("TARGET RESET");
3138 res = scst_set_mcmd_next_state(mcmd);
3139 spin_unlock_irq(&scst_list_lock);
3141 TRACE_EXIT_RES(res);
3145 /* Returns 0 if the command processing should be continued, <0 otherwise */
3146 static int scst_lun_reset(struct scst_mgmt_cmd *mcmd)
3149 struct scst_tgt_dev *tgt_dev = mcmd->mcmd_tgt_dev;
3150 struct scst_device *dev = tgt_dev->acg_dev->dev;
3154 TRACE(TRACE_MGMT, "Resetting lun %d (mcmd %p)", tgt_dev->acg_dev->lun,
3157 spin_lock_bh(&dev->dev_lock);
3158 __scst_block_dev(dev);
3159 scst_process_reset(dev, mcmd->sess, NULL, mcmd);
3160 spin_unlock_bh(&dev->dev_lock);
3162 rc = scst_call_dev_task_mgmt_fn(mcmd, tgt_dev, 1);
3163 if (rc != SCST_DEV_TM_NOT_COMPLETED)
3166 if (dev->scsi_dev != NULL) {
3167 TRACE(TRACE_MGMT, "Resetting host %d bus ",
3168 dev->scsi_dev->host->host_no);
3169 rc = scsi_reset_provider(dev->scsi_dev, SCSI_TRY_RESET_DEVICE);
3171 mcmd->status = SCST_MGMT_STATUS_FAILED;
3172 dev->scsi_dev->was_reset = 0;
3176 spin_lock_irq(&scst_list_lock);
3177 tm_dbg_task_mgmt("LUN RESET");
3178 res = scst_set_mcmd_next_state(mcmd);
3179 spin_unlock_irq(&scst_list_lock);
3181 TRACE_EXIT_RES(res);
3185 /* Returns 0 if the command processing should be continued, <0 otherwise */
3186 static int scst_abort_all_nexus_loss_sess(struct scst_mgmt_cmd *mcmd,
3190 struct scst_session *sess = mcmd->sess;
3191 struct scst_tgt_dev *tgt_dev;
3196 TRACE(TRACE_MGMT, "Nexus loss for sess %p (mcmd %p)", sess,
3199 TRACE(TRACE_MGMT, "Aborting all from sess %p (mcmd %p)", sess,
3204 list_for_each_entry(tgt_dev, &sess->sess_tgt_dev_list,
3205 sess_tgt_dev_list_entry)
3207 struct scst_device *dev = tgt_dev->acg_dev->dev;
3210 spin_lock_bh(&dev->dev_lock);
3211 __scst_block_dev(dev);
3212 spin_unlock_bh(&dev->dev_lock);
3214 rc = scst_call_dev_task_mgmt_fn(mcmd, tgt_dev, 0);
3215 if (rc == SCST_DEV_TM_COMPLETED_FAILED)
3216 mcmd->status = SCST_MGMT_STATUS_FAILED;
3218 __scst_abort_task_set(mcmd, tgt_dev, !nexus_loss, 1);
3220 scst_reset_tgt_dev(tgt_dev, 1);
3224 spin_lock_irq(&scst_list_lock);
3225 res = scst_set_mcmd_next_state(mcmd);
3226 spin_unlock_irq(&scst_list_lock);
3228 TRACE_EXIT_RES(res);
3232 /* Returns 0 if the command processing should be continued, <0 otherwise */
3233 static int scst_abort_all_nexus_loss_tgt(struct scst_mgmt_cmd *mcmd,
3237 struct scst_tgt *tgt = mcmd->sess->tgt;
3238 struct scst_session *sess;
3239 struct scst_device *dev;
3240 struct scst_tgt_dev *tgt_dev;
3245 TRACE(TRACE_MGMT, "I_T Nexus loss (tgt %p, mcmd %p)", tgt,
3248 TRACE(TRACE_MGMT, "Aborting all from tgt %p (mcmd %p)", tgt,
3254 list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
3255 spin_lock_bh(&dev->dev_lock);
3256 __scst_block_dev(dev);
3257 spin_unlock_bh(&dev->dev_lock);
3260 list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) {
3261 list_for_each_entry(tgt_dev, &sess->sess_tgt_dev_list,
3262 sess_tgt_dev_list_entry)
3266 rc = scst_call_dev_task_mgmt_fn(mcmd, tgt_dev, 0);
3267 if (rc == SCST_DEV_TM_COMPLETED_FAILED)
3268 mcmd->status = SCST_MGMT_STATUS_FAILED;
3270 __scst_abort_task_set(mcmd, tgt_dev, !nexus_loss, 1);
3272 scst_reset_tgt_dev(tgt_dev, 1);
3278 spin_lock_irq(&scst_list_lock);
3279 res = scst_set_mcmd_next_state(mcmd);
3280 spin_unlock_irq(&scst_list_lock);
3282 TRACE_EXIT_RES(res);
3286 /* Returns 0 if the command processing should be continued, <0 otherwise */
3287 static int scst_mgmt_cmd_exec(struct scst_mgmt_cmd *mcmd)
3293 mcmd->status = SCST_MGMT_STATUS_SUCCESS;
3296 case SCST_ABORT_TASK_SET:
3297 case SCST_CLEAR_TASK_SET:
3298 res = scst_abort_task_set(mcmd);
3301 case SCST_LUN_RESET:
3302 res = scst_lun_reset(mcmd);
3305 case SCST_TARGET_RESET:
3306 res = scst_target_reset(mcmd);
3309 case SCST_ABORT_ALL_TASKS_SESS:
3310 res = scst_abort_all_nexus_loss_sess(mcmd, 0);
3313 case SCST_NEXUS_LOSS_SESS:
3314 res = scst_abort_all_nexus_loss_sess(mcmd, 1);
3317 case SCST_ABORT_ALL_TASKS:
3318 res = scst_abort_all_nexus_loss_tgt(mcmd, 0);
3321 case SCST_NEXUS_LOSS:
3322 res = scst_abort_all_nexus_loss_tgt(mcmd, 1);
3325 case SCST_CLEAR_ACA:
3326 scst_call_dev_task_mgmt_fn(mcmd, mcmd->mcmd_tgt_dev, 1);
3327 /* Nothing to do (yet) */
3331 PRINT_ERROR_PR("Unknown task management function %d", mcmd->fn);
3332 mcmd->status = SCST_MGMT_STATUS_FAILED;
3336 TRACE_EXIT_RES(res);
3340 static void scst_mgmt_cmd_send_done(struct scst_mgmt_cmd *mcmd)
3342 struct scst_device *dev;
3343 struct scst_tgt_dev *tgt_dev;
3347 clear_bit(SCST_FLAG_TM_ACTIVE, &scst_flags);
3348 if (!list_empty(&scst_delayed_mgmt_cmd_list)) {
3349 struct scst_mgmt_cmd *m;
3350 spin_lock_irq(&scst_list_lock);
3351 m = list_entry(scst_delayed_mgmt_cmd_list.next, typeof(*m),
3352 mgmt_cmd_list_entry);
3353 TRACE_MGMT_DBG("Moving delayed mgmt cmd %p to active mgmt "
3355 list_move(&m->mgmt_cmd_list_entry, &scst_active_mgmt_cmd_list);
3356 spin_unlock_irq(&scst_list_lock);
3359 mcmd->state = SCST_MGMT_CMD_STATE_FINISHED;
3360 if (scst_is_strict_mgmt_fn(mcmd->fn) && (mcmd->completed_cmd_count > 0))
3361 mcmd->status = SCST_MGMT_STATUS_FAILED;
3363 if (mcmd->sess->tgt->tgtt->task_mgmt_fn_done) {
3364 TRACE_DBG("Calling target %s task_mgmt_fn_done()",
3365 mcmd->sess->tgt->tgtt->name);
3366 mcmd->sess->tgt->tgtt->task_mgmt_fn_done(mcmd);
3367 TRACE_MGMT_DBG("Dev handler %s task_mgmt_fn_done() returned",
3368 mcmd->sess->tgt->tgtt->name);
3372 case SCST_ABORT_TASK_SET:
3373 case SCST_CLEAR_TASK_SET:
3374 case SCST_LUN_RESET:
3375 scst_unblock_dev(mcmd->mcmd_tgt_dev->acg_dev->dev);
3378 case SCST_TARGET_RESET:
3379 case SCST_ABORT_ALL_TASKS:
3380 case SCST_NEXUS_LOSS:
3382 list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
3383 scst_unblock_dev(dev);
3388 case SCST_NEXUS_LOSS_SESS:
3389 case SCST_ABORT_ALL_TASKS_SESS:
3391 list_for_each_entry(tgt_dev, &mcmd->sess->sess_tgt_dev_list,
3392 sess_tgt_dev_list_entry) {
3393 scst_unblock_dev(tgt_dev->acg_dev->dev);
3398 case SCST_CLEAR_ACA:
3403 mcmd->tgt_priv = NULL;
3409 /* Returns >0, if cmd should be requeued */
3410 static int scst_process_mgmt_cmd(struct scst_mgmt_cmd *mcmd)
3416 TRACE_DBG("mcmd %p, state %d", mcmd, mcmd->state);
3419 switch (mcmd->state) {
3420 case SCST_MGMT_CMD_STATE_INIT:
3421 res = scst_mgmt_cmd_init(mcmd);
3426 case SCST_MGMT_CMD_STATE_READY:
3427 if (scst_mgmt_cmd_exec(mcmd))
3431 case SCST_MGMT_CMD_STATE_DONE:
3432 scst_mgmt_cmd_send_done(mcmd);
3435 case SCST_MGMT_CMD_STATE_FINISHED:
3439 case SCST_MGMT_CMD_STATE_EXECUTING:
3444 PRINT_ERROR_PR("Unknown state %d of management command",
3452 TRACE_EXIT_RES(res);
3456 scst_free_mgmt_cmd(mcmd, 1);
3460 static inline int test_mgmt_cmd_list(void)
3462 int res = (!list_empty(&scst_active_mgmt_cmd_list) &&
3463 !test_bit(SCST_FLAG_SUSPENDED, &scst_flags)) ||
3464 test_bit(SCST_FLAG_SHUTDOWN, &scst_flags);
3468 int scst_mgmt_cmd_thread(void *arg)
3470 struct scst_mgmt_cmd *mcmd;
3474 daemonize("scsi_tgt_mc");
3475 recalc_sigpending();
3476 current->flags |= PF_NOFREEZE;
3478 spin_lock_irq(&scst_list_lock);
3481 init_waitqueue_entry(&wait, current);
3483 if (!test_mgmt_cmd_list()) {
3484 add_wait_queue_exclusive(&scst_mgmt_cmd_list_waitQ,
3487 set_current_state(TASK_INTERRUPTIBLE);
3488 if (test_mgmt_cmd_list())
3490 spin_unlock_irq(&scst_list_lock);
3492 spin_lock_irq(&scst_list_lock);
3494 set_current_state(TASK_RUNNING);
3495 remove_wait_queue(&scst_mgmt_cmd_list_waitQ, &wait);
3498 while (!list_empty(&scst_active_mgmt_cmd_list) &&
3499 !test_bit(SCST_FLAG_SUSPENDED, &scst_flags))
3502 mcmd = list_entry(scst_active_mgmt_cmd_list.next,
3503 typeof(*mcmd), mgmt_cmd_list_entry);
3504 TRACE_MGMT_DBG("Moving mgmt cmd %p to mgmt cmd list",
3506 list_move_tail(&mcmd->mgmt_cmd_list_entry,
3507 &scst_mgmt_cmd_list);
3508 spin_unlock_irq(&scst_list_lock);
3509 rc = scst_process_mgmt_cmd(mcmd);
3510 spin_lock_irq(&scst_list_lock);
3512 TRACE_MGMT_DBG("Moving mgmt cmd %p to head "
3513 "of active mgmt cmd list", mcmd);
3514 list_move(&mcmd->mgmt_cmd_list_entry,
3515 &scst_active_mgmt_cmd_list);
3519 if (test_bit(SCST_FLAG_SHUTDOWN, &scst_flags) &&
3520 list_empty(&scst_active_mgmt_cmd_list))
3525 spin_unlock_irq(&scst_list_lock);
3527 if (atomic_dec_and_test(&scst_threads_count) && scst_shutdown_mutex) {
3528 smp_mb__after_atomic_dec();
3529 TRACE_DBG("%s", "Releasing scst_shutdown_mutex");
3530 up(scst_shutdown_mutex);
3537 static struct scst_mgmt_cmd *scst_pre_rx_mgmt_cmd(struct scst_session
3538 *sess, int fn, int atomic, void *tgt_priv)
3540 struct scst_mgmt_cmd *mcmd = NULL;
3544 if (unlikely(sess->tgt->tgtt->task_mgmt_fn_done == NULL)) {
3545 PRINT_ERROR_PR("New mgmt cmd, but task_mgmt_fn_done() is NULL "
3546 "(target %s)", sess->tgt->tgtt->name);
3550 mcmd = scst_alloc_mgmt_cmd(atomic ? GFP_ATOMIC : GFP_KERNEL);
3556 mcmd->state = SCST_MGMT_CMD_STATE_INIT;
3557 mcmd->tgt_priv = tgt_priv;
3564 static int scst_post_rx_mgmt_cmd(struct scst_session *sess,
3565 struct scst_mgmt_cmd *mcmd)
3567 unsigned long flags;
3572 scst_sess_get(sess);
3574 spin_lock_irqsave(&scst_list_lock, flags);
3576 sess->sess_cmd_count++;
3579 if (unlikely(sess->shutting_down)) {
3580 PRINT_ERROR_PR("%s",
3581 "New mgmt cmd while shutting down the session");
3586 if (unlikely(sess->init_phase != SCST_SESS_IPH_READY)) {
3587 switch(sess->init_phase) {
3588 case SCST_SESS_IPH_INITING:
3589 TRACE_DBG("Adding mcmd %p to init deferred mcmd list",
3591 list_add_tail(&mcmd->mgmt_cmd_list_entry,
3592 &sess->init_deferred_mcmd_list);
3594 case SCST_SESS_IPH_SUCCESS:
3596 case SCST_SESS_IPH_FAILED:
3604 TRACE_MGMT_DBG("Adding mgmt cmd %p to active mgmt cmd list", mcmd);
3605 list_add_tail(&mcmd->mgmt_cmd_list_entry, &scst_active_mgmt_cmd_list);
3607 spin_unlock_irqrestore(&scst_list_lock, flags);
3609 wake_up(&scst_mgmt_cmd_list_waitQ);
3616 spin_unlock_irqrestore(&scst_list_lock, flags);
3621 * Must not been called in parallel with scst_unregister_session() for the
3624 int scst_rx_mgmt_fn_lun(struct scst_session *sess, int fn,
3625 const uint8_t *lun, int lun_len, int atomic,
3629 struct scst_mgmt_cmd *mcmd = NULL;