4 * Copyright (C) 2004-2006 Vladislav Bolkhovitin <vst@vlnb.net>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation, version 2
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
18 #include <linux/init.h>
19 #include <linux/kernel.h>
20 #include <linux/errno.h>
21 #include <linux/list.h>
22 #include <linux/spinlock.h>
23 #include <linux/slab.h>
24 #include <linux/sched.h>
25 #include <linux/smp_lock.h>
26 #include <asm/unistd.h>
27 #include <asm/string.h>
29 #include "scst_debug.h"
31 #include "scst_priv.h"
33 static int scst_do_job_init(struct list_head *init_cmd_list);
35 static int __scst_process_active_cmd(struct scst_cmd *cmd, int context,
38 static void scst_complete_cmd_mgmt(struct scst_cmd *cmd,
39 struct scst_mgmt_cmd *mcmd);
41 /* scst_list_lock assumed to be held */
42 static inline int scst_process_active_cmd(struct scst_cmd *cmd, int context,
43 unsigned long *pflags, int left_locked)
49 TRACE_DBG("Moving cmd %p to cmd list", cmd);
50 list_move_tail(&cmd->cmd_list_entry, &scst_cmd_list);
52 /* This is an inline func., so unneeded code will be optimized out */
54 spin_unlock_irqrestore(&scst_list_lock, *pflags);
56 spin_unlock_irq(&scst_list_lock);
58 res = __scst_process_active_cmd(cmd, context, left_locked);
64 static inline void scst_schedule_tasklet(void)
66 struct tasklet_struct *t = &scst_tasklets[smp_processor_id()];
68 #if 0 /* Looks like #else is better for performance */
69 if ((!test_bit(TASKLET_STATE_SCHED, &t->state)) || (scst_num_cpus == 1))
73 * We suppose that other CPU(s) are rather idle, so we
74 * ask one of them to help
76 TRACE_DBG("Tasklet on CPU %d busy, waking up the thread "
77 "instead", smp_processor_id());
78 wake_up(&scst_list_waitQ);
86 * Must not been called in parallel with scst_unregister_session() for the
89 struct scst_cmd *scst_rx_cmd(struct scst_session *sess,
90 const uint8_t *lun, int lun_len,
91 const uint8_t *cdb, int cdb_len, int atomic)
98 if (unlikely(sess->shutting_down)) {
99 PRINT_ERROR_PR("%s", "New cmd while shutting down the session");
104 cmd = scst_alloc_cmd(atomic ? GFP_ATOMIC : GFP_KERNEL);
109 cmd->tgt = sess->tgt;
110 cmd->tgtt = sess->tgt->tgtt;
111 cmd->state = SCST_CMD_STATE_INIT_WAIT;
114 * For both wrong lun and CDB defer the error reporting for
115 * scst_cmd_init_done()
118 cmd->lun = scst_unpack_lun(lun, lun_len);
120 if (cdb_len <= MAX_COMMAND_SIZE) {
121 memcpy(cmd->cdb, cdb, cdb_len);
122 cmd->cdb_len = cdb_len;
125 TRACE_DBG("cmd %p, sess %p", cmd, sess);
133 void scst_cmd_init_done(struct scst_cmd *cmd, int pref_context)
136 unsigned long flags = 0;
137 struct scst_session *sess = cmd->sess;
141 TRACE_DBG("Preferred context: %d (cmd %p)", pref_context, cmd);
142 TRACE(TRACE_SCSI, "tag=%d, lun=%Ld, CDB len=%d", cmd->tag,
143 (uint64_t)cmd->lun, cmd->cdb_len);
144 TRACE_BUFF_FLAG(TRACE_SCSI|TRACE_RECV_BOT, "Recieving CDB",
145 cmd->cdb, cmd->cdb_len);
147 if (unlikely(in_irq()) && ((pref_context == SCST_CONTEXT_DIRECT) ||
148 (pref_context == SCST_CONTEXT_DIRECT_ATOMIC)))
150 PRINT_ERROR_PR("Wrong context %d in IRQ from target %s, use "
151 "SCST_CONTEXT_TASKLET instead\n", pref_context,
153 pref_context = SCST_CONTEXT_TASKLET;
156 spin_lock_irqsave(&scst_list_lock, flags);
158 /* Let's make it here, this will save us a lock or atomic */
159 sess->sess_cmd_count++;
161 list_add_tail(&cmd->search_cmd_list_entry, &sess->search_cmd_list);
163 if (unlikely(sess->init_phase != SCST_SESS_IPH_READY)) {
164 switch(sess->init_phase) {
165 case SCST_SESS_IPH_SUCCESS:
167 case SCST_SESS_IPH_INITING:
168 TRACE_DBG("Adding cmd %p to init deferred cmd list", cmd);
169 list_add_tail(&cmd->cmd_list_entry,
170 &sess->init_deferred_cmd_list);
171 goto out_unlock_flags;
172 case SCST_SESS_IPH_FAILED:
174 cmd->state = SCST_CMD_STATE_XMIT_RESP;
175 TRACE_DBG("Adding cmd %p to active cmd list", cmd);
176 list_add_tail(&cmd->cmd_list_entry,
177 &scst_active_cmd_list);
184 if (unlikely(cmd->lun == (lun_t)-1)) {
185 PRINT_ERROR("Wrong LUN %d, finishing cmd", -1);
186 scst_set_cmd_error(cmd,
187 SCST_LOAD_SENSE(scst_sense_lun_not_supported));
188 cmd->state = SCST_CMD_STATE_XMIT_RESP;
189 TRACE_DBG("Moving cmd %p to active cmd list", cmd);
190 list_add_tail(&cmd->cmd_list_entry, &scst_active_cmd_list);
194 if (unlikely(cmd->cdb_len == 0)) {
195 PRINT_ERROR("Wrong CDB len %d, finishing cmd", 0);
196 scst_set_cmd_error(cmd,
197 SCST_LOAD_SENSE(scst_sense_invalid_opcode));
198 cmd->state = SCST_CMD_STATE_XMIT_RESP;
199 TRACE_DBG("Adding cmd %p to active cmd list", cmd);
200 list_add_tail(&cmd->cmd_list_entry, &scst_active_cmd_list);
204 cmd->state = SCST_CMD_STATE_INIT;
206 TRACE_DBG("Moving cmd %p to init cmd list", cmd);
207 list_add_tail(&cmd->cmd_list_entry, &scst_init_cmd_list);
209 switch (pref_context) {
210 case SCST_CONTEXT_DIRECT:
211 case SCST_CONTEXT_DIRECT_ATOMIC:
212 res = scst_do_job_init(&scst_init_cmd_list);
214 goto out_unlock_flags;
217 case SCST_CONTEXT_THREAD:
218 goto out_thread_unlock_flags;
220 case SCST_CONTEXT_TASKLET:
221 scst_schedule_tasklet();
222 goto out_unlock_flags;
225 PRINT_ERROR_PR("Context %x is undefined, using thread one",
227 goto out_thread_unlock_flags;
231 switch (pref_context) {
232 case SCST_CONTEXT_DIRECT:
233 case SCST_CONTEXT_DIRECT_ATOMIC:
234 scst_process_active_cmd(cmd, pref_context, &flags, 0);
237 case SCST_CONTEXT_THREAD:
238 goto out_thread_unlock_flags;
240 case SCST_CONTEXT_TASKLET:
241 scst_schedule_tasklet();
242 goto out_unlock_flags;
245 PRINT_ERROR_PR("Context %x is undefined, using thread one",
247 goto out_thread_unlock_flags;
255 spin_unlock_irqrestore(&scst_list_lock, flags);
258 out_thread_unlock_flags:
259 cmd->non_atomic_only = 1;
260 spin_unlock_irqrestore(&scst_list_lock, flags);
261 wake_up(&scst_list_waitQ);
265 static int scst_parse_cmd(struct scst_cmd *cmd)
267 int res = SCST_CMD_STATE_RES_CONT_SAME;
269 struct scst_tgt_dev *tgt_dev_saved = cmd->tgt_dev;
270 struct scst_device *dev = cmd->dev;
271 struct scst_info_cdb cdb_info;
272 int atomic = scst_cmd_atomic(cmd);
277 if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
278 TRACE_DBG("ABORTED set, returning ABORTED "
283 if (atomic && !dev->handler->parse_atomic) {
284 TRACE_DBG("Dev handler %s parse() can not be "
285 "called in atomic context, rescheduling to the thread",
287 res = SCST_CMD_STATE_RES_NEED_THREAD;
292 * Expected transfer data supplied by the SCSI transport via the
293 * target driver are untrusted, so we prefer to fetch them from CDB.
294 * Additionally, not all transports support supplying the expected
298 if (unlikely(scst_get_cdb_info(cmd->cdb, dev->handler->type,
304 PRINT_INFO_PR("Unknown opcode 0x%02x for %s. "
305 "Should you update scst_scsi_op_table?",
306 cmd->cdb[0], dev->handler->name);
308 if (scst_cmd_is_expected_set(cmd)) {
309 TRACE(TRACE_MINOR, "Using initiator supplied values: "
310 "direction %d, transfer_len %d",
311 cmd->expected_data_direction,
312 cmd->expected_transfer_len);
313 cmd->data_direction = cmd->expected_data_direction;
314 cmd->bufflen = cmd->expected_transfer_len;
315 /* Restore (most probably) lost CDB length */
316 cmd->cdb_len = scst_get_cdb_len(cmd->cdb);
317 if (cmd->cdb_len == -1) {
318 PRINT_ERROR_PR("Unable to get CDB length for "
319 "opcode 0x%02x. Returning INVALID "
320 "OPCODE", cmd->cdb[0]);
321 scst_set_cmd_error(cmd,
322 SCST_LOAD_SENSE(scst_sense_invalid_opcode));
327 PRINT_ERROR_PR("Unknown opcode 0x%02x for %s and "
328 "target %s not supplied expected values. "
329 "Returning INVALID OPCODE.", cmd->cdb[0],
330 dev->handler->name, cmd->tgtt->name);
331 scst_set_cmd_error(cmd,
332 SCST_LOAD_SENSE(scst_sense_invalid_opcode));
336 TRACE(TRACE_SCSI, "op_name <%s>, direction=%d (expected %d, "
337 "set %s), transfer_len=%d (expected len %d), flags=%d",
338 cdb_info.op_name, cdb_info.direction,
339 cmd->expected_data_direction,
340 scst_cmd_is_expected_set(cmd) ? "yes" : "no",
341 cdb_info.transfer_len, cmd->expected_transfer_len,
344 /* Restore (most probably) lost CDB length */
345 cmd->cdb_len = cdb_info.cdb_len;
347 cmd->data_direction = cdb_info.direction;
348 if (!(cdb_info.flags & SCST_UNKNOWN_LENGTH))
349 cmd->bufflen = cdb_info.transfer_len;
350 /* else cmd->bufflen remained as it was inited in 0 */
353 if (unlikely(cmd->cdb[cmd->cdb_len - 1] & CONTROL_BYTE_NACA_BIT)) {
354 PRINT_ERROR_PR("NACA bit in control byte CDB is not supported "
355 "(opcode 0x%02x)", cmd->cdb[0]);
356 scst_set_cmd_error(cmd,
357 SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
361 if (unlikely(cmd->cdb[cmd->cdb_len - 1] & CONTROL_BYTE_LINK_BIT)) {
362 PRINT_ERROR_PR("Linked commands are not supported "
363 "(opcode 0x%02x)", cmd->cdb[0]);
364 scst_set_cmd_error(cmd,
365 SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
369 if (likely(!scst_is_cmd_local(cmd))) {
370 TRACE_DBG("Calling dev handler %s parse(%p)",
371 dev->handler->name, cmd);
372 TRACE_BUFF_FLAG(TRACE_SEND_BOT, "Parsing: ", cmd->cdb, cmd->cdb_len);
373 state = dev->handler->parse(cmd, &cdb_info);
374 TRACE_DBG("Dev handler %s parse() returned %d",
375 dev->handler->name, state);
377 if (cmd->data_len == -1)
378 cmd->data_len = cmd->bufflen;
380 if (state == SCST_CMD_STATE_DEFAULT)
381 state = SCST_CMD_STATE_PREPARE_SPACE;
384 state = SCST_CMD_STATE_PREPARE_SPACE;
387 if (state != SCST_CMD_STATE_NEED_THREAD_CTX) {
388 if (((cmd->data_direction == SCST_DATA_UNKNOWN) &&
389 (state != SCST_CMD_STATE_DEV_PARSE)) ||
390 ((cmd->bufflen != 0) &&
391 (cmd->data_direction == SCST_DATA_NONE)) ||
392 ((cmd->bufflen == 0) &&
393 (cmd->data_direction != SCST_DATA_NONE)) ||
394 ((cmd->bufflen != 0) && (cmd->sg == NULL) &&
395 (state > SCST_CMD_STATE_PREPARE_SPACE)))
397 PRINT_ERROR_PR("Dev handler %s parse() returned "
398 "invalid cmd data_direction %d, "
399 "bufflen %zd or state %d (opcode 0x%x)",
401 cmd->data_direction, cmd->bufflen,
409 case SCST_CMD_STATE_PREPARE_SPACE:
410 case SCST_CMD_STATE_DEV_PARSE:
411 case SCST_CMD_STATE_RDY_TO_XFER:
412 case SCST_CMD_STATE_SEND_TO_MIDLEV:
413 case SCST_CMD_STATE_DEV_DONE:
414 case SCST_CMD_STATE_XMIT_RESP:
415 case SCST_CMD_STATE_FINISHED:
417 res = SCST_CMD_STATE_RES_CONT_SAME;
420 case SCST_CMD_STATE_REINIT:
421 cmd->tgt_dev_saved = tgt_dev_saved;
423 res = SCST_CMD_STATE_RES_RESTART;
427 case SCST_CMD_STATE_NEED_THREAD_CTX:
428 TRACE_DBG("Dev handler %s parse() requested thread "
429 "context, rescheduling", dev->handler->name);
430 res = SCST_CMD_STATE_RES_NEED_THREAD;
436 PRINT_ERROR_PR("Dev handler %s parse() returned "
437 "invalid cmd state %d (opcode %d)",
438 dev->handler->name, state, cmd->cdb[0]);
440 PRINT_ERROR_PR("Dev handler %s parse() returned "
441 "error %d (opcode %d)", dev->handler->name,
447 if ((cmd->resp_data_len == -1) && set_dir) {
448 if (cmd->data_direction == SCST_DATA_READ)
449 cmd->resp_data_len = cmd->bufflen;
451 cmd->resp_data_len = 0;
455 TRACE_EXIT_HRES(res);
459 /* dev_done() will be called as part of the regular cmd's finish */
460 scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
461 cmd->state = SCST_CMD_STATE_DEV_DONE;
462 res = SCST_CMD_STATE_RES_CONT_SAME;
466 cmd->state = SCST_CMD_STATE_XMIT_RESP;
467 res = SCST_CMD_STATE_RES_CONT_SAME;
471 static int scst_prepare_space(struct scst_cmd *cmd)
473 int r, res = SCST_CMD_STATE_RES_CONT_SAME;
477 if (cmd->data_direction == SCST_DATA_NONE) {
478 cmd->state = SCST_CMD_STATE_SEND_TO_MIDLEV;
482 if (cmd->data_buf_tgt_alloc) {
483 TRACE_MEM("%s", "Custom tgt data buf allocation requested");
484 r = cmd->tgtt->alloc_data_buf(cmd);
485 cmd->data_buf_alloced = (r == 0);
487 r = scst_alloc_space(cmd);
490 if (scst_cmd_atomic(cmd)) {
491 TRACE_MEM("%s", "Atomic memory allocation failed, "
492 "rescheduling to the thread");
493 res = SCST_CMD_STATE_RES_NEED_THREAD;
499 switch (cmd->data_direction) {
500 case SCST_DATA_WRITE:
501 cmd->state = SCST_CMD_STATE_RDY_TO_XFER;
505 cmd->state = SCST_CMD_STATE_SEND_TO_MIDLEV;
510 TRACE_EXIT_HRES(res);
514 TRACE(TRACE_OUT_OF_MEM, "Unable to allocate or build requested buffer "
515 "(size %zd), sending BUSY status", cmd->bufflen);
517 cmd->state = SCST_CMD_STATE_DEV_DONE;
518 res = SCST_CMD_STATE_RES_CONT_SAME;
523 static int scst_queue_retry_cmd(struct scst_cmd *cmd, int finished_cmds)
525 struct scst_tgt *tgt = cmd->sess->tgt;
531 spin_lock_irqsave(&tgt->tgt_lock, flags);
534 TRACE(TRACE_RETRY, "TGT QUEUE FULL: incrementing retry_cmds %d",
536 if (finished_cmds != atomic_read(&tgt->finished_cmds)) {
537 /* At least one cmd finished, so try again */
539 TRACE(TRACE_RETRY, "TGT QUEUE FULL, direct retry "
540 "(finished_cmds=%d, tgt->finished_cmds=%d, "
541 "retry_cmds=%d)", finished_cmds,
542 atomic_read(&tgt->finished_cmds), tgt->retry_cmds);
547 TRACE(TRACE_RETRY, "Moving cmd %p to retry cmd list", cmd);
548 /* IRQ already off */
549 spin_lock(&scst_list_lock);
550 list_move_tail(&cmd->cmd_list_entry, &tgt->retry_cmd_list);
551 spin_unlock(&scst_list_lock);
553 if (!tgt->retry_timer_active) {
554 tgt->retry_timer.expires = jiffies + SCST_TGT_RETRY_TIMEOUT;
555 add_timer(&tgt->retry_timer);
556 tgt->retry_timer_active = 1;
560 spin_unlock_irqrestore(&tgt->tgt_lock, flags);
566 static int scst_rdy_to_xfer(struct scst_cmd *cmd)
569 int atomic = scst_cmd_atomic(cmd);
573 if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)))
575 TRACE_DBG("ABORTED set, returning ABORTED for "
580 if (atomic && !cmd->tgtt->rdy_to_xfer_atomic) {
581 TRACE_DBG("%s", "rdy_to_xfer() can not be "
582 "called in atomic context, rescheduling to the thread");
583 res = SCST_CMD_STATE_RES_NEED_THREAD;
588 int finished_cmds = atomic_read(&cmd->sess->tgt->finished_cmds);
590 res = SCST_CMD_STATE_RES_CONT_NEXT;
591 cmd->state = SCST_CMD_STATE_DATA_WAIT;
593 TRACE_DBG("Calling rdy_to_xfer(%p)", cmd);
595 if (((scst_random() % 100) == 75))
596 rc = SCST_TGT_RES_QUEUE_FULL;
599 rc = cmd->tgtt->rdy_to_xfer(cmd);
600 TRACE_DBG("rdy_to_xfer() returned %d", rc);
602 if (likely(rc == SCST_TGT_RES_SUCCESS))
605 /* Restore the previous state */
606 cmd->state = SCST_CMD_STATE_RDY_TO_XFER;
609 case SCST_TGT_RES_QUEUE_FULL:
611 if (scst_queue_retry_cmd(cmd, finished_cmds) == 0)
617 case SCST_TGT_RES_NEED_THREAD_CTX:
619 TRACE_DBG("Target driver %s "
620 "rdy_to_xfer() requested thread "
621 "context, rescheduling", cmd->tgtt->name);
622 res = SCST_CMD_STATE_RES_NEED_THREAD;
633 TRACE_EXIT_HRES(res);
637 if (rc == SCST_TGT_RES_FATAL_ERROR) {
638 PRINT_ERROR_PR("Target driver %s rdy_to_xfer() returned "
639 "fatal error", cmd->tgtt->name);
641 PRINT_ERROR_PR("Target driver %s rdy_to_xfer() returned invalid "
642 "value %d", cmd->tgtt->name, rc);
644 scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
647 cmd->state = SCST_CMD_STATE_DEV_DONE;
648 res = SCST_CMD_STATE_RES_CONT_SAME;
652 void scst_rx_data(struct scst_cmd *cmd, int status, int pref_context)
658 TRACE_DBG("Preferred context: %d", pref_context);
659 TRACE(TRACE_SCSI, "tag=%d status=%#x", scst_cmd_get_tag(cmd), status);
660 cmd->non_atomic_only = 0;
662 if (in_irq() && ((pref_context == SCST_CONTEXT_DIRECT) ||
663 (pref_context == SCST_CONTEXT_DIRECT_ATOMIC)))
665 PRINT_ERROR_PR("Wrong context %d in IRQ from target %s, use "
666 "SCST_CONTEXT_TASKLET instead\n", pref_context,
668 pref_context = SCST_CONTEXT_TASKLET;
672 case SCST_RX_STATUS_SUCCESS:
673 cmd->state = SCST_CMD_STATE_SEND_TO_MIDLEV;
676 case SCST_RX_STATUS_ERROR_SENSE_SET:
677 cmd->state = SCST_CMD_STATE_DEV_DONE;
680 case SCST_RX_STATUS_ERROR_FATAL:
681 set_bit(SCST_CMD_NO_RESP, &cmd->cmd_flags);
683 case SCST_RX_STATUS_ERROR:
684 scst_set_cmd_error(cmd,
685 SCST_LOAD_SENSE(scst_sense_hardw_error));
686 cmd->state = SCST_CMD_STATE_DEV_DONE;
690 PRINT_ERROR_PR("scst_rx_data() received unknown status %x",
695 switch (pref_context) {
696 case SCST_CONTEXT_DIRECT:
697 case SCST_CONTEXT_DIRECT_ATOMIC:
698 scst_check_retries(cmd->tgt, 0);
699 __scst_process_active_cmd(cmd, pref_context, 0);
703 PRINT_ERROR_PR("Context %x is undefined, using thread one",
706 case SCST_CONTEXT_THREAD:
707 spin_lock_irqsave(&scst_list_lock, flags);
708 TRACE_DBG("Moving cmd %p to active cmd list", cmd);
709 list_move_tail(&cmd->cmd_list_entry, &scst_active_cmd_list);
710 cmd->non_atomic_only = 1;
711 spin_unlock_irqrestore(&scst_list_lock, flags);
712 scst_check_retries(cmd->tgt, 1);
713 wake_up(&scst_list_waitQ);
716 case SCST_CONTEXT_TASKLET:
717 spin_lock_irqsave(&scst_list_lock, flags);
718 TRACE_DBG("Moving cmd %p to active cmd list", cmd);
719 list_move_tail(&cmd->cmd_list_entry, &scst_active_cmd_list);
720 spin_unlock_irqrestore(&scst_list_lock, flags);
721 scst_schedule_tasklet();
722 scst_check_retries(cmd->tgt, 0);
730 /* No locks supposed to be held */
731 static void scst_check_sense(struct scst_cmd *cmd, struct scsi_request *req,
735 struct scst_device *dev = cmd->dev;
736 int dbl_ua_possible, ua_sent = 0;
740 /* If we had a internal bus reset behind us, set the command error UA */
741 if ((dev->scsi_dev != NULL) &&
742 unlikely(cmd->host_status == DID_RESET) &&
743 scst_is_ua_command(cmd))
745 TRACE(TRACE_MGMT, "DID_RESET: was_reset=%d host_status=%x",
746 dev->scsi_dev->was_reset, cmd->host_status);
747 scst_set_cmd_error(cmd,
748 SCST_LOAD_SENSE(scst_sense_reset_UA));
751 /* It looks like it is safe to clear was_reset here */
752 dev->scsi_dev->was_reset = 0;
757 sense_valid = SCST_SENSE_VALID(req->sr_sense_buffer);
759 memcpy(cmd->sense_buffer, req->sr_sense_buffer,
760 sizeof(cmd->sense_buffer));
763 sense_valid = SCST_SENSE_VALID(cmd->sense_buffer);
765 dbl_ua_possible = dev->dev_double_ua_possible;
766 TRACE_DBG("cmd %p dbl_ua_possible %d", cmd, dbl_ua_possible);
767 if (unlikely(dbl_ua_possible)) {
768 spin_lock_bh(&dev->dev_lock);
769 barrier(); /* to reread dev_double_ua_possible */
770 dbl_ua_possible = dev->dev_double_ua_possible;
772 ua_sent = dev->dev_reset_ua_sent;
774 spin_unlock_bh(&dev->dev_lock);
778 TRACE_BUFF_FLAG(TRACE_SCSI, "Sense", cmd->sense_buffer,
779 sizeof(cmd->sense_buffer));
780 /* Check Unit Attention Sense Key */
781 if (cmd->sense_buffer[2] == UNIT_ATTENTION) {
782 if (cmd->sense_buffer[12] == SCST_SENSE_ASC_UA_RESET) {
786 TRACE(TRACE_MGMT, "%s",
787 "Double UA detected");
789 TRACE(TRACE_MGMT, "Retrying cmd %p "
790 "(tag %d)", cmd, cmd->tag);
792 cmd->masked_status = 0;
794 cmd->host_status = DID_OK;
795 cmd->driver_status = 0;
796 memset(cmd->sense_buffer, 0,
797 sizeof(cmd->sense_buffer));
799 *next_state = SCST_CMD_STATE_SEND_TO_MIDLEV;
801 * Dev is still blocked by this cmd, so
802 * it's OK to clear SCST_DEV_SERIALIZED
805 dev->dev_double_ua_possible = 0;
806 dev->dev_serialized = 0;
807 dev->dev_reset_ua_sent = 0;
810 dev->dev_reset_ua_sent = 1;
813 if (cmd->ua_ignore == 0) {
814 if (unlikely(dbl_ua_possible)) {
815 __scst_process_UA(dev, cmd,
817 sizeof(cmd->sense_buffer), 0);
819 scst_process_UA(dev, cmd,
821 sizeof(cmd->sense_buffer), 0);
827 if (unlikely(dbl_ua_possible)) {
828 if (ua_sent && scst_is_ua_command(cmd)) {
829 TRACE_MGMT_DBG("%s", "Clearing dbl_ua_possible flag");
830 dev->dev_double_ua_possible = 0;
831 dev->dev_serialized = 0;
832 dev->dev_reset_ua_sent = 0;
834 spin_unlock_bh(&dev->dev_lock);
842 spin_unlock_bh(&dev->dev_lock);
846 static int scst_check_auto_sense(struct scst_cmd *cmd)
852 if (unlikely(cmd->status == SAM_STAT_CHECK_CONDITION) &&
853 (!SCST_SENSE_VALID(cmd->sense_buffer) ||
854 SCST_NO_SENSE(cmd->sense_buffer)))
856 TRACE(TRACE_SCSI|TRACE_MINOR, "CHECK_CONDITION, but no sense: "
857 "cmd->status=%x, cmd->masked_status=%x, "
858 "cmd->msg_status=%x, cmd->host_status=%x, "
859 "cmd->driver_status=%x", cmd->status, cmd->masked_status,
860 cmd->msg_status, cmd->host_status, cmd->driver_status);
862 } else if (unlikely(cmd->host_status)) {
863 if ((cmd->host_status == DID_REQUEUE) ||
864 (cmd->host_status == DID_IMM_RETRY) ||
865 (cmd->host_status == DID_SOFT_ERROR)) {
868 TRACE(TRACE_SCSI|TRACE_MINOR, "Host status %x "
869 "received, returning HARDWARE ERROR instead",
871 scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
879 static void scst_do_cmd_done(struct scst_cmd *cmd,
880 struct scsi_request *req, int *next_state)
884 cmd->status = req->sr_result & 0xff;
885 cmd->masked_status = status_byte(req->sr_result);
886 cmd->msg_status = msg_byte(req->sr_result);
887 cmd->host_status = host_byte(req->sr_result);
888 cmd->driver_status = driver_byte(req->sr_result);
889 TRACE(TRACE_SCSI, "req->sr_result=%x, cmd->status=%x, "
890 "cmd->masked_status=%x, cmd->msg_status=%x, cmd->host_status=%x, "
891 "cmd->driver_status=%x", req->sr_result, cmd->status,
892 cmd->masked_status, cmd->msg_status, cmd->host_status,
895 scst_check_sense(cmd, req, next_state);
897 cmd->bufflen = req->sr_bufflen; //??
899 /* Clear out request structure */
901 req->sr_sglist_len = 0;
903 req->sr_buffer = NULL;
904 req->sr_underflow = 0;
905 req->sr_request->rq_disk = NULL; /* disown request blk */ ;
911 static inline struct scst_cmd *scst_get_cmd(struct scsi_cmnd *scsi_cmd,
912 struct scsi_request **req)
914 struct scst_cmd *cmd = NULL;
916 if (scsi_cmd && (*req = scsi_cmd->sc_request))
917 cmd = (struct scst_cmd *)(*req)->upper_private_data;
920 PRINT_ERROR_PR("%s", "Request with NULL cmd");
922 scsi_release_request(*req);
928 static void scst_cmd_done(struct scsi_cmnd *scsi_cmd)
930 struct scsi_request *req = NULL;
931 struct scst_cmd *cmd;
940 * We don't use scsi_cmd->resid, because:
941 * 1. Many low level initiator drivers don't use (set) this field
942 * 2. We determine the command's buffer size directly from CDB,
943 * so scsi_cmd->resid is not relevant for us, and target drivers
944 * should know the residual, if necessary, by comparing expected
945 * and actual transfer sizes.
948 cmd = scst_get_cmd(scsi_cmd, &req);
954 scst_dec_on_dev_cmd(cmd);
956 type = cmd->dev->handler->type;
957 if ((cmd->cdb[0] == MODE_SENSE || cmd->cdb[0] == MODE_SENSE_10) &&
958 cmd->tgt_dev->acg_dev->rd_only_flag &&
959 (type == TYPE_DISK || type == TYPE_WORM || type == TYPE_MOD ||
960 type == TYPE_TAPE)) {
964 length = scst_get_buf_first(cmd, &address);
965 TRACE_DBG("length %d", length);
966 if (unlikely(length <= 0)) {
969 if (length > 2 && cmd->cdb[0] == MODE_SENSE) {
970 address[2] |= 0x80; /* Write Protect*/
972 else if (length > 3 && cmd->cdb[0] == MODE_SENSE_10) {
973 address[3] |= 0x80; /* Write Protect*/
975 scst_put_buf(cmd, address);
978 next_state = SCST_CMD_STATE_DEV_DONE;
980 scst_do_cmd_done(cmd, req, &next_state);
982 scst_release_request(cmd);
984 cmd->state = next_state;
985 cmd->non_atomic_only = 0;
987 __scst_process_active_cmd(cmd, scst_get_context(), 0);
994 static void scst_cmd_done_local(struct scst_cmd *cmd, int next_state)
1000 scst_dec_on_dev_cmd(cmd);
1002 if (next_state == SCST_CMD_STATE_DEFAULT)
1003 next_state = SCST_CMD_STATE_DEV_DONE;
1005 if (next_state == SCST_CMD_STATE_DEV_DONE) {
1006 #if defined(DEBUG) || defined(TRACING)
1009 struct scatterlist *sg = cmd->sg;
1010 TRACE(TRACE_RECV_TOP,
1011 "Exec'd %d S/G(s) at %p sg[0].page at %p",
1012 cmd->sg_cnt, sg, (void*)sg[0].page);
1013 for(i = 0; i < cmd->sg_cnt; ++i) {
1014 TRACE_BUFF_FLAG(TRACE_RECV_TOP,
1015 "Exec'd sg:", page_address(sg[i].page),
1024 if ((next_state != SCST_CMD_STATE_DEV_DONE) &&
1025 (next_state != SCST_CMD_STATE_XMIT_RESP) &&
1026 (next_state != SCST_CMD_STATE_FINISHED))
1028 PRINT_ERROR_PR("scst_cmd_done_local() received invalid cmd "
1029 "state %d (opcode %d)", next_state, cmd->cdb[0]);
1030 scst_set_cmd_error(cmd,
1031 SCST_LOAD_SENSE(scst_sense_hardw_error));
1032 next_state = SCST_CMD_STATE_DEV_DONE;
1035 if (scst_check_auto_sense(cmd)) {
1036 PRINT_ERROR_PR("CHECK_CONDITION, but no valid sense for "
1037 "opcode %d", cmd->cdb[0]);
1041 scst_check_sense(cmd, NULL, &next_state);
1043 cmd->state = next_state;
1044 cmd->non_atomic_only = 0;
1046 __scst_process_active_cmd(cmd, scst_get_context(), 0);
1052 static int scst_report_luns_local(struct scst_cmd *cmd)
1054 int res = SCST_EXEC_COMPLETED;
1057 struct scst_tgt_dev *tgt_dev = NULL;
1063 cmd->masked_status = 0;
1064 cmd->msg_status = 0;
1065 cmd->host_status = DID_OK;
1066 cmd->driver_status = 0;
1068 /* ToDo: use full SG buffer, not only the first entry */
1069 buffer_size = scst_get_buf_first(cmd, &buffer);
1070 if (unlikely(buffer_size <= 0))
1073 if (buffer_size < 16) {
1077 memset(buffer, 0, buffer_size);
1079 /* sess->sess_tgt_dev_list is protected by suspended activity */
1080 list_for_each_entry(tgt_dev, &cmd->sess->sess_tgt_dev_list,
1081 sess_tgt_dev_list_entry)
1083 if (8 + 8 * dev_cnt + 2 <= buffer_size) {
1084 buffer[8 + 8 * dev_cnt] = (tgt_dev->acg_dev->lun >> 8) & 0xff;
1085 buffer[8 + 8 * dev_cnt + 1] = tgt_dev->acg_dev->lun & 0xff;
1088 /* Tmp, until ToDo above done */
1089 if (dev_cnt >= ((PAGE_SIZE >> 3) - 2))
1093 /* Set the response header */
1095 buffer[0] = (dev_cnt >> 24) & 0xff;
1096 buffer[1] = (dev_cnt >> 16) & 0xff;
1097 buffer[2] = (dev_cnt >> 8) & 0xff;
1098 buffer[3] = dev_cnt & 0xff;
1102 scst_put_buf(cmd, buffer);
1104 if (buffer_size > dev_cnt)
1105 scst_set_resp_data_len(cmd, dev_cnt);
1110 /* Report the result */
1111 scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1113 TRACE_EXIT_RES(res);
1117 scst_put_buf(cmd, buffer);
1120 scst_set_cmd_error(cmd,
1121 SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
1125 static int scst_pre_select(struct scst_cmd *cmd)
1127 int res = SCST_EXEC_NOT_COMPLETED;
1131 if (scst_cmd_atomic(cmd)) {
1132 res = SCST_EXEC_NEED_THREAD;
1136 scst_block_dev(cmd->dev, 1);
1137 /* Device will be unblocked in scst_done_cmd_check() */
1139 if (test_bit(SCST_TGT_DEV_UA_PENDING, &cmd->tgt_dev->tgt_dev_flags)) {
1140 int rc = scst_set_pending_UA(cmd);
1142 res = SCST_EXEC_COMPLETED;
1144 /* Report the result */
1145 scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1151 TRACE_EXIT_RES(res);
1155 static inline void scst_report_reserved(struct scst_cmd *cmd)
1159 scst_set_cmd_error_status(cmd, SAM_STAT_RESERVATION_CONFLICT);
1161 /* Report the result */
1162 scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1168 static int scst_reserve_local(struct scst_cmd *cmd)
1170 int res = SCST_EXEC_NOT_COMPLETED;
1171 struct scst_device *dev;
1172 struct scst_tgt_dev *tgt_dev_tmp;
1176 if (scst_cmd_atomic(cmd)) {
1177 res = SCST_EXEC_NEED_THREAD;
1181 if ((cmd->cdb[0] == RESERVE_10) && (cmd->cdb[2] & SCST_RES_3RDPTY)) {
1182 PRINT_ERROR_PR("RESERVE_10: 3rdPty RESERVE not implemented "
1183 "(lun=%Ld)", (uint64_t)cmd->lun);
1184 scst_set_cmd_error(cmd,
1185 SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
1187 res = SCST_EXEC_COMPLETED;
1192 scst_block_dev(dev, 1);
1193 /* Device will be unblocked in scst_done_cmd_check() */
1195 spin_lock_bh(&dev->dev_lock);
1197 if (test_bit(SCST_TGT_DEV_RESERVED, &cmd->tgt_dev->tgt_dev_flags)) {
1198 scst_report_reserved(cmd);
1199 /* !! At this point cmd, sess & tgt_dev can be already freed !! */
1200 res = SCST_EXEC_COMPLETED;
1204 list_for_each_entry(tgt_dev_tmp, &dev->dev_tgt_dev_list,
1205 dev_tgt_dev_list_entry)
1207 if (cmd->tgt_dev != tgt_dev_tmp)
1208 set_bit(SCST_TGT_DEV_RESERVED,
1209 &tgt_dev_tmp->tgt_dev_flags);
1211 dev->dev_reserved = 1;
1214 spin_unlock_bh(&dev->dev_lock);
1217 TRACE_EXIT_RES(res);
1221 static int scst_release_local(struct scst_cmd *cmd)
1223 int res = SCST_EXEC_NOT_COMPLETED;
1224 struct scst_tgt_dev *tgt_dev_tmp;
1225 struct scst_device *dev;
1231 scst_block_dev(dev, 1);
1233 TRACE_MGMT_DBG("Blocking cmd %p (tag %d)", cmd, cmd->tag);
1235 spin_lock_bh(&dev->dev_lock);
1238 * The device could be RELEASED behind us, if RESERVING session
1239 * is closed (see scst_free_tgt_dev()), but this actually doesn't
1240 * matter, so use lock and no retest for DEV_RESERVED bits again
1242 if (test_bit(SCST_TGT_DEV_RESERVED, &cmd->tgt_dev->tgt_dev_flags)) {
1243 res = SCST_EXEC_COMPLETED;
1245 cmd->masked_status = 0;
1246 cmd->msg_status = 0;
1247 cmd->host_status = DID_OK;
1248 cmd->driver_status = 0;
1250 list_for_each_entry(tgt_dev_tmp,
1251 &dev->dev_tgt_dev_list,
1252 dev_tgt_dev_list_entry)
1254 clear_bit(SCST_TGT_DEV_RESERVED,
1255 &tgt_dev_tmp->tgt_dev_flags);
1257 dev->dev_reserved = 0;
1260 spin_unlock_bh(&dev->dev_lock);
1262 if (res == SCST_EXEC_COMPLETED) {
1264 /* Report the result */
1265 scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1268 TRACE_EXIT_RES(res);
1273 * The result of cmd execution, if any, should be reported
1274 * via scst_cmd_done_local()
1276 static int scst_pre_exec(struct scst_cmd *cmd)
1278 int res = SCST_EXEC_NOT_COMPLETED, rc;
1279 struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
1283 /* Reserve check before Unit Attention */
1284 if (unlikely(test_bit(SCST_TGT_DEV_RESERVED, &tgt_dev->tgt_dev_flags)) &&
1285 (cmd->cdb[0] != INQUIRY) &&
1286 (cmd->cdb[0] != REPORT_LUNS) &&
1287 (cmd->cdb[0] != RELEASE) &&
1288 (cmd->cdb[0] != RELEASE_10) &&
1289 (cmd->cdb[0] != REPORT_DEVICE_IDENTIFIER) &&
1290 (cmd->cdb[0] != ALLOW_MEDIUM_REMOVAL || (cmd->cdb[4] & 3)) &&
1291 (cmd->cdb[0] != LOG_SENSE) && (cmd->cdb[0] != REQUEST_SENSE))
1293 scst_report_reserved(cmd);
1294 res = SCST_EXEC_COMPLETED;
1298 /* If we had a internal bus reset, set the command error unit attention */
1299 if ((cmd->dev->scsi_dev != NULL) &&
1300 unlikely(cmd->dev->scsi_dev->was_reset) &&
1301 scst_is_ua_command(cmd))
1303 struct scst_device *dev = cmd->dev;
1305 /* Prevent more than 1 cmd to be triggered by was_reset */
1306 spin_lock_bh(&dev->dev_lock);
1307 barrier(); /* to reread was_reset */
1308 if (dev->scsi_dev->was_reset) {
1309 TRACE(TRACE_MGMT, "was_reset is %d", 1);
1310 scst_set_cmd_error(cmd,
1311 SCST_LOAD_SENSE(scst_sense_reset_UA));
1312 /* It looks like it is safe to clear was_reset here */
1313 dev->scsi_dev->was_reset = 0;
1317 spin_unlock_bh(&dev->dev_lock);
1323 if (test_bit(SCST_TGT_DEV_UA_PENDING, &cmd->tgt_dev->tgt_dev_flags) &&
1324 scst_is_ua_command(cmd))
1326 rc = scst_set_pending_UA(cmd);
1331 /* Check READ_ONLY device status */
1332 if (tgt_dev->acg_dev->rd_only_flag &&
1333 (cmd->cdb[0] == WRITE_6 || /* ToDo: full list of the modify cmds */
1334 cmd->cdb[0] == WRITE_10 ||
1335 cmd->cdb[0] == WRITE_12 ||
1336 cmd->cdb[0] == WRITE_16 ||
1337 cmd->cdb[0] == WRITE_VERIFY ||
1338 cmd->cdb[0] == WRITE_VERIFY_12 ||
1339 cmd->cdb[0] == WRITE_VERIFY_16 ||
1340 (cmd->dev->handler->type == TYPE_TAPE &&
1341 (cmd->cdb[0] == ERASE || cmd->cdb[0] == WRITE_FILEMARKS))))
1343 scst_set_cmd_error(cmd,
1344 SCST_LOAD_SENSE(scst_sense_data_protect));
1348 TRACE_EXIT_RES(res);
1352 res = SCST_EXEC_COMPLETED;
1354 /* Report the result */
1355 scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1360 * The result of cmd execution, if any, should be reported
1361 * via scst_cmd_done_local()
1363 static inline int scst_local_exec(struct scst_cmd *cmd)
1365 int res = SCST_EXEC_NOT_COMPLETED;
1370 * Adding new commands here don't forget to update
1371 * scst_is_cmd_local() in scsi_tgt.h, if necessary
1374 switch (cmd->cdb[0]) {
1376 case MODE_SELECT_10:
1378 res = scst_pre_select(cmd);
1382 res = scst_reserve_local(cmd);
1386 res = scst_release_local(cmd);
1389 res = scst_report_luns_local(cmd);
1393 TRACE_EXIT_RES(res);
1397 static int scst_do_send_to_midlev(struct scst_cmd *cmd)
1399 int rc = SCST_EXEC_NOT_COMPLETED;
1403 cmd->sent_to_midlev = 1;
1404 cmd->state = SCST_CMD_STATE_EXECUTING;
1405 cmd->scst_cmd_done = scst_cmd_done_local;
1407 set_bit(SCST_CMD_EXECUTING, &cmd->cmd_flags);
1408 smp_mb__after_set_bit();
1410 if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
1411 TRACE_DBG("ABORTED set, aborting cmd %p", cmd);
1415 rc = scst_pre_exec(cmd);
1416 /* !! At this point cmd, sess & tgt_dev can be already freed !! */
1417 if (rc != SCST_EXEC_NOT_COMPLETED) {
1418 if (rc == SCST_EXEC_COMPLETED)
1420 else if (rc == SCST_EXEC_NEED_THREAD)
1426 rc = scst_local_exec(cmd);
1427 /* !! At this point cmd, sess & tgt_dev can be already freed !! */
1428 if (rc != SCST_EXEC_NOT_COMPLETED) {
1429 if (rc == SCST_EXEC_COMPLETED)
1431 else if (rc == SCST_EXEC_NEED_THREAD)
1437 if (cmd->dev->handler->exec) {
1438 struct scst_device *dev = cmd->dev;
1439 TRACE_DBG("Calling dev handler %s exec(%p)",
1440 dev->handler->name, cmd);
1441 TRACE_BUFF_FLAG(TRACE_SEND_TOP, "Execing: ", cmd->cdb, cmd->cdb_len);
1442 cmd->scst_cmd_done = scst_cmd_done_local;
1443 rc = dev->handler->exec(cmd);
1444 /* !! At this point cmd, sess & tgt_dev can be already freed !! */
1445 TRACE_DBG("Dev handler %s exec() returned %d",
1446 dev->handler->name, rc);
1447 if (rc != SCST_EXEC_NOT_COMPLETED) {
1448 if (rc == SCST_EXEC_COMPLETED)
1450 else if (rc == SCST_EXEC_NEED_THREAD)
1457 TRACE_DBG("Sending cmd %p to SCSI mid-level", cmd);
1459 if (unlikely(cmd->dev->scsi_dev == NULL)) {
1460 PRINT_ERROR_PR("Command for virtual device must be "
1461 "processed by device handler (lun %Ld)!",
1462 (uint64_t)cmd->lun);
1466 if (scst_alloc_request(cmd) != 0) {
1467 PRINT_INFO_PR("%s", "Unable to allocate request, "
1468 "sending BUSY status");
1472 scst_do_req(cmd->scsi_req, (void *)cmd->cdb,
1473 (void *)cmd->scsi_req->sr_buffer,
1474 cmd->scsi_req->sr_bufflen, scst_cmd_done, cmd->timeout,
1477 rc = SCST_EXEC_COMPLETED;
1484 /* Restore the state */
1485 cmd->sent_to_midlev = 0;
1486 cmd->state = SCST_CMD_STATE_SEND_TO_MIDLEV;
1490 PRINT_ERROR_PR("Dev handler %s exec() or scst_local_exec() returned "
1491 "invalid code %d", cmd->dev->handler->name, rc);
1495 scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
1497 cmd->state = SCST_CMD_STATE_DEV_DONE;
1498 rc = SCST_EXEC_COMPLETED;
1499 scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1505 cmd->state = SCST_CMD_STATE_DEV_DONE;
1506 rc = SCST_EXEC_COMPLETED;
1507 scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1511 rc = SCST_EXEC_COMPLETED;
1512 /* Report the result. The cmd is not completed */
1513 scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1517 static int scst_send_to_midlev(struct scst_cmd *cmd)
1520 struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
1521 struct scst_device *dev = cmd->dev;
1524 int atomic = scst_cmd_atomic(cmd);
1528 res = SCST_CMD_STATE_RES_CONT_NEXT;
1530 if (atomic && dev->handler->exec && !dev->handler->exec_atomic) {
1531 TRACE_DBG("Dev handler %s exec() can not be "
1532 "called in atomic context, rescheduling to the thread",
1533 dev->handler->name);
1534 res = SCST_CMD_STATE_RES_NEED_THREAD;
1538 if (unlikely(scst_inc_on_dev_cmd(cmd) != 0))
1541 scst_inc_cmd_count(); /* protect dev & tgt_dev */
1543 if (unlikely(cmd->internal) || unlikely(cmd->retry)) {
1544 rc = scst_do_send_to_midlev(cmd);
1545 /* !! At this point cmd, sess & tgt_dev can be already freed !! */
1546 if (rc == SCST_EXEC_NEED_THREAD) {
1547 TRACE_DBG("%s", "scst_do_send_to_midlev() requested "
1548 "thread context, rescheduling");
1549 res = SCST_CMD_STATE_RES_NEED_THREAD;
1550 scst_dec_on_dev_cmd(cmd);
1551 goto out_dec_cmd_count;
1553 BUG_ON(rc != SCST_EXEC_COMPLETED);
1558 expected_sn = tgt_dev->expected_sn;
1559 if (cmd->sn != expected_sn) {
1560 spin_lock_bh(&tgt_dev->sn_lock);
1561 tgt_dev->def_cmd_count++;
1563 barrier(); /* to reread expected_sn */
1564 expected_sn = tgt_dev->expected_sn;
1565 if (cmd->sn != expected_sn) {
1566 scst_dec_on_dev_cmd(cmd);
1567 TRACE(TRACE_SCSI_SERIALIZING, "Delaying cmd %p (sn=%d, "
1568 "expected_sn=%d)", cmd, cmd->sn, expected_sn);
1569 list_add_tail(&cmd->sn_cmd_list_entry,
1570 &tgt_dev->deferred_cmd_list);
1571 spin_unlock_bh(&tgt_dev->sn_lock);
1572 /* !! At this point cmd can be already freed !! */
1573 goto out_dec_cmd_count;
1575 TRACE(TRACE_SCSI_SERIALIZING, "Somebody incremented "
1576 "expected_sn %d, continuing", expected_sn);
1577 tgt_dev->def_cmd_count--;
1578 spin_unlock_bh(&tgt_dev->sn_lock);
1584 rc = scst_do_send_to_midlev(cmd);
1585 if (rc == SCST_EXEC_NEED_THREAD) {
1586 TRACE_DBG("%s", "scst_do_send_to_midlev() requested "
1587 "thread context, rescheduling");
1588 res = SCST_CMD_STATE_RES_NEED_THREAD;
1589 scst_dec_on_dev_cmd(cmd);
1593 goto out_dec_cmd_count;
1595 BUG_ON(rc != SCST_EXEC_COMPLETED);
1596 /* !! At this point cmd can be already freed !! */
1598 expected_sn = __scst_inc_expected_sn(tgt_dev);
1599 cmd = scst_check_deferred_commands(tgt_dev, expected_sn);
1602 if (unlikely(scst_inc_on_dev_cmd(cmd) != 0))
1607 if (dev->scsi_dev != NULL)
1608 generic_unplug_device(dev->scsi_dev->request_queue);
1611 scst_dec_cmd_count();
1612 /* !! At this point sess, dev and tgt_dev can be already freed !! */
1615 TRACE_EXIT_HRES(res);
1619 static struct scst_cmd *scst_create_prepare_internal_cmd(
1620 struct scst_cmd *orig_cmd, int bufsize)
1622 struct scst_cmd *res;
1623 int gfp_mask = scst_cmd_atomic(orig_cmd) ? GFP_ATOMIC : GFP_KERNEL;
1627 res = scst_alloc_cmd(gfp_mask);
1628 if (unlikely(res == NULL)) {
1632 res->sess = orig_cmd->sess;
1633 res->state = SCST_CMD_STATE_SEND_TO_MIDLEV;
1634 res->atomic = scst_cmd_atomic(orig_cmd);
1636 res->tgtt = orig_cmd->tgtt;
1637 res->tgt = orig_cmd->tgt;
1638 res->dev = orig_cmd->dev;
1639 res->tgt_dev = orig_cmd->tgt_dev;
1640 res->lun = orig_cmd->lun;
1641 res->queue_type = SCST_CMD_QUEUE_HEAD_OF_QUEUE;
1642 res->data_direction = SCST_DATA_UNKNOWN;
1643 res->orig_cmd = orig_cmd;
1645 res->bufflen = bufsize;
1647 if (scst_alloc_space(res) != 0)
1648 PRINT_ERROR("Unable to create buffer (size %d) for "
1649 "internal cmd", bufsize);
1654 TRACE_EXIT_HRES((unsigned long)res);
1658 scst_destroy_cmd(res);
1663 static void scst_free_internal_cmd(struct scst_cmd *cmd)
1667 if (cmd->bufflen > 0)
1668 scst_release_space(cmd);
1669 scst_destroy_cmd(cmd);
1675 static int scst_prepare_request_sense(struct scst_cmd *orig_cmd)
1677 int res = SCST_CMD_STATE_RES_RESTART;
1678 #define sbuf_size 252
1679 static const unsigned char request_sense[6] =
1680 { REQUEST_SENSE, 0, 0, 0, sbuf_size, 0 };
1681 struct scst_cmd *rs_cmd;
1685 rs_cmd = scst_create_prepare_internal_cmd(orig_cmd, sbuf_size);
1689 memcpy(rs_cmd->cdb, request_sense, sizeof(request_sense));
1690 rs_cmd->cdb_len = sizeof(request_sense);
1691 rs_cmd->data_direction = SCST_DATA_READ;
1693 spin_lock_irq(&scst_list_lock);
1694 list_add(&rs_cmd->cmd_list_entry, &scst_active_cmd_list);
1695 spin_unlock_irq(&scst_list_lock);
1698 TRACE_EXIT_RES(res);
1707 static struct scst_cmd *scst_complete_request_sense(struct scst_cmd *cmd)
1709 struct scst_cmd *orig_cmd = cmd->orig_cmd;
1717 len = scst_get_buf_first(cmd, &buf);
1719 if ((cmd->status == 0) && SCST_SENSE_VALID(buf) &&
1720 (!SCST_NO_SENSE(buf)))
1722 TRACE_BUFF_FLAG(TRACE_SCSI, "REQUEST SENSE returned",
1724 memcpy(orig_cmd->sense_buffer, buf,
1725 (sizeof(orig_cmd->sense_buffer) > len) ?
1726 len : sizeof(orig_cmd->sense_buffer));
1728 PRINT_ERROR_PR("%s", "Unable to get the sense via "
1729 "REQUEST SENSE, returning HARDWARE ERROR");
1730 scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
1733 scst_put_buf(cmd, buf);
1735 scst_free_internal_cmd(cmd);
1737 TRACE_EXIT_HRES((unsigned long)orig_cmd);
1741 static int scst_done_cmd_check(struct scst_cmd *cmd, int *pres)
1748 if (cmd->cdb[0] == REQUEST_SENSE) {
1750 cmd = scst_complete_request_sense(cmd);
1751 } else if (scst_check_auto_sense(cmd)) {
1752 PRINT_INFO_PR("Command finished with CHECK CONDITION, but "
1753 "without sense data (opcode 0x%x), issuing "
1754 "REQUEST SENSE", cmd->cdb[0]);
1755 rc = scst_prepare_request_sense(cmd);
1761 PRINT_ERROR_PR("%s", "Unable to issue REQUEST SENSE, "
1762 "returning HARDWARE ERROR");
1763 scst_set_cmd_error(cmd,
1764 SCST_LOAD_SENSE(scst_sense_hardw_error));
1768 type = cmd->dev->handler->type;
1769 if ((cmd->cdb[0] == MODE_SENSE || cmd->cdb[0] == MODE_SENSE_10) &&
1770 cmd->tgt_dev->acg_dev->rd_only_flag &&
1771 (type == TYPE_DISK || type == TYPE_WORM || type == TYPE_MOD ||
1777 length = scst_get_buf_first(cmd, &address);
1780 if (length > 2 && cmd->cdb[0] == MODE_SENSE)
1781 address[2] |= 0x80; /* Write Protect*/
1782 else if (length > 3 && cmd->cdb[0] == MODE_SENSE_10)
1783 address[3] |= 0x80; /* Write Protect*/
1784 scst_put_buf(cmd, address);
1788 * Check and clear NormACA option for the device, if necessary,
1789 * since we don't support ACA
1791 if ((cmd->cdb[0] == INQUIRY) &&
1792 !(cmd->cdb[1] & SCST_INQ_EVPD/* Std INQUIRY data (no EVPD) */) &&
1793 (cmd->resp_data_len > SCST_INQ_BYTE3))
1798 /* ToDo: all pages ?? */
1799 buflen = scst_get_buf_first(cmd, &buffer);
1801 if (buflen > SCST_INQ_BYTE3) {
1803 if (buffer[SCST_INQ_BYTE3] & SCST_INQ_NORMACA_BIT) {
1804 PRINT_INFO_PR("NormACA set for device: "
1805 "lun=%Ld, type 0x%02x",
1806 (uint64_t)cmd->lun, buffer[0]);
1809 buffer[SCST_INQ_BYTE3] &= ~SCST_INQ_NORMACA_BIT;
1811 scst_set_cmd_error(cmd,
1812 SCST_LOAD_SENSE(scst_sense_hardw_error));
1814 scst_put_buf(cmd, buffer);
1818 if (unlikely((cmd->cdb[0] == RESERVE) || (cmd->cdb[0] == RESERVE_10))) {
1819 if ((cmd->status != 0) && !test_bit(SCST_TGT_DEV_RESERVED,
1820 &cmd->tgt_dev->tgt_dev_flags)) {
1821 struct scst_tgt_dev *tgt_dev_tmp;
1822 TRACE(TRACE_SCSI, "Real RESERVE failed lun=%Ld, status=%x",
1823 (uint64_t)cmd->lun, cmd->masked_status);
1824 TRACE_BUFF_FLAG(TRACE_SCSI, "Sense", cmd->sense_buffer,
1825 sizeof(cmd->sense_buffer));
1826 /* Clearing the reservation */
1827 list_for_each_entry(tgt_dev_tmp, &cmd->dev->dev_tgt_dev_list,
1828 dev_tgt_dev_list_entry) {
1829 clear_bit(SCST_TGT_DEV_RESERVED,
1830 &tgt_dev_tmp->tgt_dev_flags);
1832 cmd->dev->dev_reserved = 0;
1834 scst_unblock_dev(cmd->dev);
1837 if (unlikely((cmd->cdb[0] == MODE_SELECT) ||
1838 (cmd->cdb[0] == MODE_SELECT_10) ||
1839 (cmd->cdb[0] == LOG_SELECT)))
1841 if (cmd->status == 0) {
1842 TRACE(TRACE_SCSI, "MODE/LOG SELECT succeeded, "
1843 "setting the SELECT UA (lun=%Ld)",
1844 (uint64_t)cmd->lun);
1845 spin_lock_bh(&scst_temp_UA_lock);
1846 if (cmd->cdb[0] == LOG_SELECT) {
1847 scst_set_sense(scst_temp_UA,
1848 sizeof(scst_temp_UA),
1849 UNIT_ATTENTION, 0x2a, 0x02);
1851 scst_set_sense(scst_temp_UA,
1852 sizeof(scst_temp_UA),
1853 UNIT_ATTENTION, 0x2a, 0x01);
1855 scst_process_UA(cmd->dev, cmd, scst_temp_UA,
1856 sizeof(scst_temp_UA), 1);
1857 spin_unlock_bh(&scst_temp_UA_lock);
1859 scst_unblock_dev(cmd->dev);
1863 TRACE_EXIT_RES(res);
1867 static int scst_dev_done(struct scst_cmd *cmd)
1869 int res = SCST_CMD_STATE_RES_CONT_SAME;
1871 int atomic = scst_cmd_atomic(cmd);
1875 if (atomic && !cmd->dev->handler->dev_done_atomic &&
1876 cmd->dev->handler->dev_done)
1878 TRACE_DBG("Dev handler %s dev_done() can not be "
1879 "called in atomic context, rescheduling to the thread",
1880 cmd->dev->handler->name);
1881 res = SCST_CMD_STATE_RES_NEED_THREAD;
1885 if (scst_done_cmd_check(cmd, &res))
1888 state = SCST_CMD_STATE_XMIT_RESP;
1889 if (likely(!scst_is_cmd_local(cmd)) &&
1890 likely(cmd->dev->handler->dev_done != NULL))
1893 TRACE_DBG("Calling dev handler %s dev_done(%p)",
1894 cmd->dev->handler->name, cmd);
1895 rc = cmd->dev->handler->dev_done(cmd);
1896 TRACE_DBG("Dev handler %s dev_done() returned %d",
1897 cmd->dev->handler->name, rc);
1898 if (rc != SCST_CMD_STATE_DEFAULT)
1903 case SCST_CMD_STATE_REINIT:
1905 res = SCST_CMD_STATE_RES_RESTART;
1908 case SCST_CMD_STATE_DEV_PARSE:
1909 case SCST_CMD_STATE_PREPARE_SPACE:
1910 case SCST_CMD_STATE_RDY_TO_XFER:
1911 case SCST_CMD_STATE_SEND_TO_MIDLEV:
1912 case SCST_CMD_STATE_DEV_DONE:
1913 case SCST_CMD_STATE_XMIT_RESP:
1914 case SCST_CMD_STATE_FINISHED:
1916 res = SCST_CMD_STATE_RES_CONT_SAME;
1919 case SCST_CMD_STATE_NEED_THREAD_CTX:
1920 TRACE_DBG("Dev handler %s dev_done() requested "
1921 "thread context, rescheduling",
1922 cmd->dev->handler->name);
1923 res = SCST_CMD_STATE_RES_NEED_THREAD;
1928 PRINT_ERROR_PR("Dev handler %s dev_done() returned "
1929 "invalid cmd state %d",
1930 cmd->dev->handler->name, state);
1932 PRINT_ERROR_PR("Dev handler %s dev_done() returned "
1933 "error %d", cmd->dev->handler->name,
1936 scst_set_cmd_error(cmd,
1937 SCST_LOAD_SENSE(scst_sense_hardw_error));
1938 cmd->state = SCST_CMD_STATE_XMIT_RESP;
1939 res = SCST_CMD_STATE_RES_CONT_SAME;
1944 TRACE_EXIT_HRES(res);
1948 static int scst_xmit_response(struct scst_cmd *cmd)
1951 int atomic = scst_cmd_atomic(cmd);
1956 * Check here also in order to avoid unnecessary delays of other
1959 if (unlikely(cmd->sent_to_midlev == 0) &&
1960 (cmd->tgt_dev != NULL))
1962 TRACE(TRACE_SCSI_SERIALIZING,
1963 "cmd %p was not sent to mid-lev (sn %d)", cmd, cmd->sn);
1964 scst_inc_expected_sn_unblock(cmd->tgt_dev, cmd, 0);
1965 cmd->sent_to_midlev = 1;
1968 if (atomic && !cmd->tgtt->xmit_response_atomic) {
1969 TRACE_DBG("%s", "xmit_response() can not be "
1970 "called in atomic context, rescheduling to the thread");
1971 res = SCST_CMD_STATE_RES_NEED_THREAD;
1975 set_bit(SCST_CMD_XMITTING, &cmd->cmd_flags);
1976 smp_mb__after_set_bit();
1978 if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
1979 if (test_bit(SCST_CMD_ABORTED_OTHER, &cmd->cmd_flags)) {
1980 TRACE_MGMT_DBG("Flag ABORTED OTHER set for cmd %p "
1981 "(tag %d), returning TASK ABORTED", cmd, cmd->tag);
1982 scst_set_cmd_error_status(cmd, SAM_STAT_TASK_ABORTED);
1986 if (unlikely(test_bit(SCST_CMD_NO_RESP, &cmd->cmd_flags))) {
1987 TRACE_MGMT_DBG("Flag NO_RESP set for cmd %p (tag %d), skipping",
1989 cmd->state = SCST_CMD_STATE_FINISHED;
1990 res = SCST_CMD_STATE_RES_CONT_SAME;
1995 if (cmd->tm_dbg_delayed && !test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)) {
1996 if (atomic && !cmd->tgtt->xmit_response_atomic) {
1997 TRACE_MGMT_DBG("%s", "DEBUG_TM delayed cmd needs a thread");
1998 res = SCST_CMD_STATE_RES_NEED_THREAD;
2001 TRACE_MGMT_DBG("Delaying cmd %p (tag %d) for 1 second",
2003 schedule_timeout_uninterruptible(HZ);
2008 int finished_cmds = atomic_read(&cmd->sess->tgt->finished_cmds);
2010 res = SCST_CMD_STATE_RES_CONT_NEXT;
2011 cmd->state = SCST_CMD_STATE_XMIT_WAIT;
2013 TRACE_DBG("Calling xmit_response(%p)", cmd);
2015 #if defined(DEBUG) || defined(TRACING)
2018 struct scatterlist *sg = cmd->sg;
2019 TRACE(TRACE_SEND_BOT,
2020 "Xmitting %d S/G(s) at %p sg[0].page at %p",
2021 cmd->sg_cnt, sg, (void*)sg[0].page);
2022 for(i = 0; i < cmd->sg_cnt; ++i) {
2023 TRACE_BUFF_FLAG(TRACE_SEND_BOT,
2024 "Xmitting sg:", page_address(sg[i].page),
2031 if (((scst_random() % 100) == 77))
2032 rc = SCST_TGT_RES_QUEUE_FULL;
2035 rc = cmd->tgtt->xmit_response(cmd);
2036 TRACE_DBG("xmit_response() returned %d", rc);
2038 if (likely(rc == SCST_TGT_RES_SUCCESS))
2041 /* Restore the previous state */
2042 cmd->state = SCST_CMD_STATE_XMIT_RESP;
2045 case SCST_TGT_RES_QUEUE_FULL:
2047 if (scst_queue_retry_cmd(cmd, finished_cmds) == 0)
2053 case SCST_TGT_RES_NEED_THREAD_CTX:
2055 TRACE_DBG("Target driver %s xmit_response() "
2056 "requested thread context, rescheduling",
2058 res = SCST_CMD_STATE_RES_NEED_THREAD;
2069 /* Caution: cmd can be already dead here */
2070 TRACE_EXIT_HRES(res);
2074 if (rc == SCST_TGT_RES_FATAL_ERROR) {
2075 PRINT_ERROR_PR("Target driver %s xmit_response() returned "
2076 "fatal error", cmd->tgtt->name);
2078 PRINT_ERROR_PR("Target driver %s xmit_response() returned "
2079 "invalid value %d", cmd->tgtt->name, rc);
2081 scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
2082 cmd->state = SCST_CMD_STATE_FINISHED;
2083 res = SCST_CMD_STATE_RES_CONT_SAME;
2087 static int scst_finish_cmd(struct scst_cmd *cmd)
2093 spin_lock_irq(&scst_list_lock);
2095 TRACE_DBG("Deleting cmd %p from cmd list", cmd);
2096 list_del(&cmd->cmd_list_entry);
2099 scst_complete_cmd_mgmt(cmd, cmd->mgmt_cmnd);
2101 if (likely(cmd->tgt_dev != NULL)) {
2102 struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
2103 tgt_dev->cmd_count--;
2104 if (!list_empty(&tgt_dev->thr_cmd_list)) {
2105 struct scst_cmd *t =
2106 list_entry(tgt_dev->thr_cmd_list.next,
2107 typeof(*t), cmd_list_entry);
2108 scst_unthrottle_cmd(t);
2109 if (!cmd->processible_env)
2110 wake_up(&scst_list_waitQ);
2114 cmd->sess->sess_cmd_count--;
2116 list_del(&cmd->search_cmd_list_entry);
2118 spin_unlock_irq(&scst_list_lock);
2122 res = SCST_CMD_STATE_RES_CONT_NEXT;
2124 TRACE_EXIT_HRES(res);
2128 void scst_tgt_cmd_done(struct scst_cmd *cmd)
2131 unsigned long flags;
2136 BUG_ON(cmd->state != SCST_CMD_STATE_XMIT_WAIT);
2139 context = SCST_CONTEXT_TASKLET;
2141 context = scst_get_context();
2143 TRACE_DBG("Context: %d", context);
2144 cmd->non_atomic_only = 0;
2145 cmd->state = SCST_CMD_STATE_FINISHED;
2148 case SCST_CONTEXT_DIRECT:
2149 case SCST_CONTEXT_DIRECT_ATOMIC:
2151 scst_check_retries(cmd->tgt, 0);
2152 res = __scst_process_active_cmd(cmd, context, 0);
2153 BUG_ON(res == SCST_CMD_STATE_RES_NEED_THREAD);
2156 case SCST_CONTEXT_TASKLET:
2158 spin_lock_irqsave(&scst_list_lock, flags);
2159 TRACE_DBG("Moving cmd %p to active cmd list", cmd);
2160 list_move_tail(&cmd->cmd_list_entry, &scst_active_cmd_list);
2161 spin_unlock_irqrestore(&scst_list_lock, flags);
2162 scst_schedule_tasklet();
2163 scst_check_retries(cmd->tgt, 0);
2177 * Returns 0 on success, > 0 when we need to wait for unblock,
2178 * < 0 if there is no device (lun) or device type handler.
2179 * Called under scst_list_lock and IRQs disabled
2181 static int scst_translate_lun(struct scst_cmd *cmd)
2183 struct scst_tgt_dev *tgt_dev = NULL;
2188 scst_inc_cmd_count();
2190 if (likely(!test_bit(SCST_FLAG_SUSPENDED, &scst_flags))) {
2192 TRACE_DBG("Finding tgt_dev for cmd %p (lun %Ld)", cmd,
2193 (uint64_t)cmd->lun);
2194 list_for_each_entry(tgt_dev, &cmd->sess->sess_tgt_dev_list,
2195 sess_tgt_dev_list_entry)
2197 if (tgt_dev->acg_dev->lun == cmd->lun) {
2198 TRACE_DBG("tgt_dev %p found", tgt_dev);
2200 if (unlikely(tgt_dev->acg_dev->dev->handler == NULL)) {
2201 PRINT_INFO_PR("Dev handler for device "
2202 "%Ld is NULL, the device will not be "
2203 "visible remotely", (uint64_t)cmd->lun);
2207 if (cmd->state == SCST_CMD_STATE_REINIT) {
2208 cmd->tgt_dev_saved->cmd_count--;
2209 TRACE(TRACE_SCSI_SERIALIZING,
2210 "SCST_CMD_STATE_REINIT: "
2211 "incrementing expected_sn on tgt_dev_saved %p",
2212 cmd->tgt_dev_saved);
2213 scst_inc_expected_sn_unblock(
2214 cmd->tgt_dev_saved, cmd, 1);
2216 cmd->tgt_dev = tgt_dev;
2217 tgt_dev->cmd_count++;
2218 cmd->dev = tgt_dev->acg_dev->dev;
2220 /* ToDo: cmd->queue_type */
2222 /* scst_list_lock is enough to protect that */
2223 cmd->sn = tgt_dev->next_sn;
2226 TRACE(TRACE_DEBUG/*TRACE_SCSI_SERIALIZING*/,
2227 "cmd->sn: %d", cmd->sn);
2234 TRACE_DBG("tgt_dev for lun %Ld not found, command to "
2235 "unexisting LU?", (uint64_t)cmd->lun);
2236 scst_dec_cmd_count();
2239 if ( !cmd->sess->waiting) {
2240 TRACE_DBG("Adding session %p to scst_dev_wait_sess_list",
2242 list_add_tail(&cmd->sess->dev_wait_sess_list_entry,
2243 &scst_dev_wait_sess_list);
2244 cmd->sess->waiting = 1;
2246 scst_dec_cmd_count();
2250 TRACE_EXIT_RES(res);
2254 /* Called under scst_list_lock and IRQs disabled */
2255 static int scst_process_init_cmd(struct scst_cmd *cmd)
2261 res = scst_translate_lun(cmd);
2262 if (likely(res == 0)) {
2263 cmd->state = SCST_CMD_STATE_DEV_PARSE;
2264 if (cmd->tgt_dev->cmd_count > SCST_MAX_DEVICE_COMMANDS)
2265 #if 0 /* don't know how it's better */
2267 scst_throttle_cmd(cmd);
2269 BUG_ON(!list_empty(&cmd->tgt_dev->thr_cmd_list));
2270 TRACE_DBG("Moving cmd %p to active cmd list", cmd);
2271 list_move_tail(&cmd->cmd_list_entry,
2272 &scst_active_cmd_list);
2276 TRACE(TRACE_RETRY, "Too many pending commands in "
2277 "session, returning BUSY to initiator \"%s\"",
2278 (cmd->sess->initiator_name[0] == '\0') ?
2279 "Anonymous" : cmd->sess->initiator_name);
2281 cmd->state = SCST_CMD_STATE_XMIT_RESP;
2283 TRACE_DBG("Moving cmd %p to active cmd list", cmd);
2284 list_move_tail(&cmd->cmd_list_entry, &scst_active_cmd_list);
2286 } else if (res < 0) {
2287 TRACE_DBG("Finishing cmd %p", cmd);
2288 scst_set_cmd_error(cmd,
2289 SCST_LOAD_SENSE(scst_sense_lun_not_supported));
2290 cmd->state = SCST_CMD_STATE_XMIT_RESP;
2291 TRACE_DBG("Moving cmd %p to active cmd list", cmd);
2292 list_move_tail(&cmd->cmd_list_entry, &scst_active_cmd_list);
2295 TRACE_EXIT_RES(res);
2300 * Called under scst_list_lock and IRQs disabled
2301 * We don't drop it anywhere inside, because command execution
2302 * have to be serialized, i.e. commands must be executed in order
2303 * of their arrival, and we set this order inside scst_translate_lun().
2305 static int scst_do_job_init(struct list_head *init_cmd_list)
2311 if (!test_bit(SCST_FLAG_SUSPENDED, &scst_flags)) {
2312 while (!list_empty(init_cmd_list)) {
2313 struct scst_cmd *cmd = list_entry(init_cmd_list->next,
2316 res = scst_process_init_cmd(cmd);
2322 TRACE_EXIT_RES(res);
2326 /* Called with no locks held */
2327 static int __scst_process_active_cmd(struct scst_cmd *cmd, int context,
2336 cmd->atomic = ((context & ~SCST_PROCESSIBLE_ENV) ==
2337 SCST_CONTEXT_DIRECT_ATOMIC);
2338 cmd->processible_env = (context & SCST_PROCESSIBLE_ENV) != 0;
2341 switch (cmd->state) {
2342 case SCST_CMD_STATE_DEV_PARSE:
2343 res = scst_parse_cmd(cmd);
2346 case SCST_CMD_STATE_PREPARE_SPACE:
2347 res = scst_prepare_space(cmd);
2350 case SCST_CMD_STATE_RDY_TO_XFER:
2351 res = scst_rdy_to_xfer(cmd);
2354 case SCST_CMD_STATE_SEND_TO_MIDLEV:
2355 res = scst_send_to_midlev(cmd);
2356 /* !! At this point cmd, sess & tgt_dev can be already freed !! */
2359 case SCST_CMD_STATE_DEV_DONE:
2360 res = scst_dev_done(cmd);
2363 case SCST_CMD_STATE_XMIT_RESP:
2364 res = scst_xmit_response(cmd);
2367 case SCST_CMD_STATE_FINISHED:
2368 res = scst_finish_cmd(cmd);
2372 PRINT_ERROR("cmd (%p) in state %d, but shouldn't be",
2375 res = SCST_CMD_STATE_RES_CONT_NEXT;
2378 } while(res == SCST_CMD_STATE_RES_CONT_SAME);
2380 if (res == SCST_CMD_STATE_RES_CONT_NEXT) {
2382 spin_lock_irq(&scst_list_lock);
2383 } else if (res == SCST_CMD_STATE_RES_NEED_THREAD) {
2384 spin_lock_irq(&scst_list_lock);
2386 switch (cmd->state) {
2387 case SCST_CMD_STATE_DEV_PARSE:
2388 case SCST_CMD_STATE_PREPARE_SPACE:
2389 case SCST_CMD_STATE_RDY_TO_XFER:
2390 case SCST_CMD_STATE_SEND_TO_MIDLEV:
2391 case SCST_CMD_STATE_DEV_DONE:
2392 case SCST_CMD_STATE_XMIT_RESP:
2393 case SCST_CMD_STATE_FINISHED:
2394 TRACE_DBG("Moving cmd %p to active cmd list", cmd);
2395 list_move(&cmd->cmd_list_entry, &scst_active_cmd_list);
2398 /* not very valid commands */
2399 case SCST_CMD_STATE_DEFAULT:
2400 case SCST_CMD_STATE_NEED_THREAD_CTX:
2401 PRINT_ERROR_PR("cmd %p is in state %d, not putting on "
2402 "useful list (left on scst cmd list)", cmd,
2404 spin_unlock_irq(&scst_list_lock);
2406 spin_lock_irq(&scst_list_lock);
2412 cmd->non_atomic_only = 1;
2414 spin_unlock_irq(&scst_list_lock);
2415 wake_up(&scst_list_waitQ);
2416 } else if (res == SCST_CMD_STATE_RES_RESTART) {
2417 if (cmd->state == SCST_CMD_STATE_REINIT) {
2418 spin_lock_irq(&scst_list_lock);
2419 TRACE_DBG("Moving cmd %p to head of init cmd list", cmd);
2420 list_move(&cmd->cmd_list_entry, &scst_init_cmd_list);
2422 spin_unlock_irq(&scst_list_lock);
2428 TRACE_EXIT_RES(res);
2432 /* Called under scst_list_lock and IRQs disabled */
2433 static void scst_do_job_active(struct list_head *active_cmd_list, int context)
2436 struct scst_cmd *cmd;
2437 int atomic = ((context & ~SCST_PROCESSIBLE_ENV) ==
2438 SCST_CONTEXT_DIRECT_ATOMIC);
2442 tm_dbg_check_released_cmds();
2445 list_for_each_entry(cmd, active_cmd_list, cmd_list_entry) {
2446 if (atomic && cmd->non_atomic_only) {
2447 TRACE(TRACE_DEBUG, "Skipping non-atomic cmd %p", cmd);
2450 if (tm_dbg_check_cmd(cmd) != 0)
2452 res = scst_process_active_cmd(cmd, context, NULL, 1);
2453 if (res == SCST_CMD_STATE_RES_CONT_NEXT) {
2455 } else if (res == SCST_CMD_STATE_RES_NEED_THREAD) {
2457 } else if (res == SCST_CMD_STATE_RES_RESTART) {
2467 static inline int test_cmd_lists(void)
2469 int res = !list_empty(&scst_active_cmd_list) ||
2470 (!list_empty(&scst_init_cmd_list) &&
2471 !test_bit(SCST_FLAG_SUSPENDED, &scst_flags)) ||
2472 test_bit(SCST_FLAG_SHUTDOWN, &scst_flags) ||
2473 unlikely(scst_shut_threads_count > 0) ||
2474 tm_dbg_is_release();
2478 int scst_cmd_thread(void *arg)
2480 static spinlock_t lock = SPIN_LOCK_UNLOCKED;
2486 n = scst_thread_num++;
2488 daemonize("scsi_tgt%d", n);
2489 recalc_sigpending();
2490 set_user_nice(current, 10);
2491 current->flags |= PF_NOFREEZE;
2493 spin_lock_irq(&scst_list_lock);
2496 init_waitqueue_entry(&wait, current);
2498 if (!test_cmd_lists()) {
2499 add_wait_queue_exclusive(&scst_list_waitQ, &wait);
2501 set_current_state(TASK_INTERRUPTIBLE);
2502 if (test_cmd_lists())
2504 spin_unlock_irq(&scst_list_lock);
2506 spin_lock_irq(&scst_list_lock);
2508 set_current_state(TASK_RUNNING);
2509 remove_wait_queue(&scst_list_waitQ, &wait);
2512 scst_do_job_init(&scst_init_cmd_list);
2513 scst_do_job_active(&scst_active_cmd_list,
2514 SCST_CONTEXT_THREAD|SCST_PROCESSIBLE_ENV);
2516 if (unlikely(test_bit(SCST_FLAG_SHUTDOWN, &scst_flags)) &&
2517 list_empty(&scst_cmd_list) &&
2518 list_empty(&scst_active_cmd_list) &&
2519 list_empty(&scst_init_cmd_list)) {
2523 if (unlikely(scst_shut_threads_count > 0)) {
2524 scst_shut_threads_count--;
2528 spin_unlock_irq(&scst_list_lock);
2530 if (atomic_dec_and_test(&scst_threads_count) && scst_shutdown_mutex) {
2531 smp_mb__after_atomic_dec();
2532 TRACE_DBG("%s", "Releasing scst_shutdown_mutex");
2533 up(scst_shutdown_mutex);
2540 void scst_cmd_tasklet(long p)
2544 spin_lock_irq(&scst_list_lock);
2546 scst_do_job_init(&scst_init_cmd_list);
2547 scst_do_job_active(&scst_active_cmd_list,
2548 SCST_CONTEXT_DIRECT_ATOMIC|SCST_PROCESSIBLE_ENV);
2550 spin_unlock_irq(&scst_list_lock);
2557 * Returns 0 on success, < 0 if there is no device handler or
2558 * > 0 if SCST_FLAG_SUSPENDED set.
2560 static int scst_mgmt_translate_lun(struct scst_mgmt_cmd *mcmd)
2562 struct scst_tgt_dev *tgt_dev = NULL;
2567 TRACE_DBG("Finding tgt_dev for mgmt cmd %p (lun %Ld)", mcmd,
2568 (uint64_t)mcmd->lun);
2570 spin_lock_irq(&scst_list_lock);
2571 scst_inc_cmd_count();
2572 if (likely(!test_bit(SCST_FLAG_SUSPENDED, &scst_flags))) {
2573 list_for_each_entry(tgt_dev, &mcmd->sess->sess_tgt_dev_list,
2574 sess_tgt_dev_list_entry)
2576 if (tgt_dev->acg_dev->lun == mcmd->lun) {
2577 TRACE_DBG("tgt_dev %p found", tgt_dev);
2578 mcmd->mcmd_tgt_dev = tgt_dev;
2583 if (mcmd->mcmd_tgt_dev == NULL)
2584 scst_dec_cmd_count();
2586 if ( !mcmd->sess->waiting) {
2587 TRACE_DBG("Adding session %p to scst_dev_wait_sess_list",
2589 list_add_tail(&mcmd->sess->dev_wait_sess_list_entry,
2590 &scst_dev_wait_sess_list);
2591 mcmd->sess->waiting = 1;
2593 scst_dec_cmd_count();
2596 spin_unlock_irq(&scst_list_lock);
2598 TRACE_EXIT_HRES(res);
2602 /* Called under scst_list_lock and IRQ off */
2603 static void scst_complete_cmd_mgmt(struct scst_cmd *cmd,
2604 struct scst_mgmt_cmd *mcmd)
2608 TRACE_MGMT_DBG("cmd %p completed (tag %d, mcmd %p, "
2609 "mcmd->cmd_wait_count %d)", cmd, cmd->tag, mcmd,
2610 mcmd->cmd_wait_count);
2612 cmd->mgmt_cmnd = NULL;
2615 mcmd->completed_cmd_count++;
2617 mcmd->cmd_wait_count--;
2618 if (mcmd->cmd_wait_count > 0) {
2619 TRACE_MGMT_DBG("cmd_wait_count(%d) not 0, skipping",
2620 mcmd->cmd_wait_count);
2624 mcmd->state = SCST_MGMT_CMD_STATE_DONE;
2626 if (mcmd->completed) {
2627 TRACE_MGMT_DBG("Moving mgmt cmd %p to active mgmt cmd list",
2629 list_move_tail(&mcmd->mgmt_cmd_list_entry,
2630 &scst_active_mgmt_cmd_list);
2633 wake_up(&scst_mgmt_cmd_list_waitQ);
2640 static int scst_call_dev_task_mgmt_fn(struct scst_mgmt_cmd *mcmd,
2641 struct scst_tgt_dev *tgt_dev, int set_status)
2643 int res = SCST_DEV_TM_NOT_COMPLETED;
2644 if (tgt_dev->acg_dev->dev->handler->task_mgmt_fn) {
2645 TRACE_MGMT_DBG("Calling dev handler %s task_mgmt_fn(fn=%d)",
2646 tgt_dev->acg_dev->dev->handler->name, mcmd->fn);
2647 res = tgt_dev->acg_dev->dev->handler->task_mgmt_fn(mcmd,
2649 TRACE_MGMT_DBG("Dev handler %s task_mgmt_fn() returned %d",
2650 tgt_dev->acg_dev->dev->handler->name, res);
2651 if (set_status && (res != SCST_DEV_TM_NOT_COMPLETED)) {
2652 mcmd->status = (res == SCST_DEV_TM_COMPLETED_SUCCESS) ?
2653 SCST_MGMT_STATUS_SUCCESS :
2654 SCST_MGMT_STATUS_FAILED;
2660 static inline int scst_is_strict_mgmt_fn(int mgmt_fn)
2663 case SCST_ABORT_TASK:
2664 case SCST_ABORT_TASK_SET:
2665 case SCST_CLEAR_TASK_SET:
2673 * Called under scst_list_lock and IRQ off (to protect cmd
2674 * from being destroyed).
2675 * Returns -1 if command is being executed (ABORT failed), 0 otherwise
2677 void scst_abort_cmd(struct scst_cmd *cmd, struct scst_mgmt_cmd *mcmd,
2678 int other_ini, int call_dev_task_mgmt_fn)
2682 TRACE(TRACE_MGMT, "Aborting cmd %p (tag %d)", cmd, cmd->tag);
2685 set_bit(SCST_CMD_ABORTED_OTHER, &cmd->cmd_flags);
2686 smp_mb__after_set_bit();
2688 set_bit(SCST_CMD_ABORTED, &cmd->cmd_flags);
2689 smp_mb__after_set_bit();
2691 if (test_bit(SCST_CMD_THROTTELED, &cmd->cmd_flags))
2692 scst_unthrottle_cmd(cmd);
2694 if (call_dev_task_mgmt_fn && cmd->tgt_dev)
2695 scst_call_dev_task_mgmt_fn(mcmd, cmd->tgt_dev, 0);
2699 if (cmd->tgtt->tm_sync_reply)
2702 if (scst_is_strict_mgmt_fn(mcmd->fn))
2703 defer = test_bit(SCST_CMD_EXECUTING,
2706 defer = test_bit(SCST_CMD_XMITTING,
2712 * Delay the response until the command's finish in
2713 * order to guarantee that "no further responses from
2714 * the task are sent to the SCSI initiator port" after
2715 * response from the TM function is sent (SAM)
2717 TRACE(TRACE_MGMT, "cmd %p (tag %d) being executed/"
2718 "xmitted (state %d), deferring ABORT...", cmd,
2719 cmd->tag, cmd->state);
2721 if (cmd->mgmt_cmnd) {
2722 printk(KERN_ALERT "cmd %p (tag %d, state %d) "
2723 "has non-NULL mgmt_cmnd %p!!! Current "
2724 "mcmd %p\n", cmd, cmd->tag, cmd->state,
2725 cmd->mgmt_cmnd, mcmd);
2728 BUG_ON(cmd->mgmt_cmnd);
2729 mcmd->cmd_wait_count++;
2730 cmd->mgmt_cmnd = mcmd;
2734 tm_dbg_release_cmd(cmd);
2740 /* Called under scst_list_lock and IRQ off */
2741 static int scst_set_mcmd_next_state(struct scst_mgmt_cmd *mcmd)
2744 if (mcmd->cmd_wait_count != 0) {
2745 TRACE_MGMT_DBG("cmd_wait_count(%d) not 0, preparing to "
2746 "wait", mcmd->cmd_wait_count);
2747 mcmd->state = SCST_MGMT_CMD_STATE_EXECUTING;
2750 mcmd->state = SCST_MGMT_CMD_STATE_DONE;
2753 mcmd->completed = 1;
2757 static void scst_unblock_aborted_cmds(int scst_mutex_held)
2759 struct scst_device *dev;
2764 if (!scst_mutex_held)
2767 list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
2768 struct scst_cmd *cmd, *tcmd;
2769 spin_lock_bh(&dev->dev_lock);
2770 list_for_each_entry_safe(cmd, tcmd, &dev->blocked_cmd_list,
2771 blocked_cmd_list_entry) {
2772 if (test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)) {
2773 list_del(&cmd->blocked_cmd_list_entry);
2774 TRACE_MGMT_DBG("Moving aborted blocked cmd %p "
2775 "to active cmd list", cmd);
2776 spin_lock_irq(&scst_list_lock);
2777 list_move_tail(&cmd->cmd_list_entry,
2778 &scst_active_cmd_list);
2779 spin_unlock_irq(&scst_list_lock);
2783 spin_unlock_bh(&dev->dev_lock);
2786 if (!scst_mutex_held)
2790 wake_up(&scst_list_waitQ);
2796 /* Returns 0 if the command processing should be continued, <0 otherwise */
2797 static void __scst_abort_task_set(struct scst_mgmt_cmd *mcmd,
2798 struct scst_tgt_dev *tgt_dev, int other_ini, int scst_mutex_held)
2800 struct scst_cmd *cmd;
2801 struct scst_session *sess = tgt_dev->sess;
2805 spin_lock_irq(&scst_list_lock);
2807 TRACE_DBG("Searching in search cmd list (sess=%p)", sess);
2808 list_for_each_entry(cmd, &sess->search_cmd_list,
2809 search_cmd_list_entry) {
2810 if ((cmd->tgt_dev == NULL) &&
2811 (cmd->lun == tgt_dev->acg_dev->lun))
2813 if (cmd->tgt_dev != tgt_dev)
2815 scst_abort_cmd(cmd, mcmd, other_ini, 0);
2817 spin_unlock_irq(&scst_list_lock);
2819 scst_unblock_aborted_cmds(scst_mutex_held);
2825 /* Returns 0 if the command processing should be continued, <0 otherwise */
2826 static int scst_abort_task_set(struct scst_mgmt_cmd *mcmd)
2829 struct scst_tgt_dev *tgt_dev = mcmd->mcmd_tgt_dev;
2830 struct scst_device *dev = tgt_dev->acg_dev->dev;
2832 TRACE(TRACE_MGMT, "Aborting task set (lun=%d, mcmd=%p)",
2833 tgt_dev->acg_dev->lun, mcmd);
2835 spin_lock_bh(&dev->dev_lock);
2836 __scst_block_dev(dev);
2837 spin_unlock_bh(&dev->dev_lock);
2839 __scst_abort_task_set(mcmd, tgt_dev, 0, 0);
2840 scst_call_dev_task_mgmt_fn(mcmd, tgt_dev, 0);
2842 res = scst_set_mcmd_next_state(mcmd);
2844 TRACE_EXIT_RES(res);
2848 static int scst_check_delay_mgmt_cmd(struct scst_mgmt_cmd *mcmd, int locked)
2851 * No need for special protection for SCST_FLAG_TM_ACTIVE, since
2852 * we could be called from the only thread.
2854 if (test_bit(SCST_FLAG_TM_ACTIVE, &scst_flags)) {
2855 TRACE_MGMT_DBG("Moving mgmt cmd %p to delayed mgmt cmd list",
2858 spin_lock_irq(&scst_list_lock);
2859 list_move_tail(&mcmd->mgmt_cmd_list_entry,
2860 &scst_delayed_mgmt_cmd_list);
2862 spin_unlock_irq(&scst_list_lock);
2865 set_bit(SCST_FLAG_TM_ACTIVE, &scst_flags);
2870 /* Returns 0 if the command processing should be continued,
2871 * >0, if it should be requeued, <0 otherwise */
2872 static int scst_mgmt_cmd_init(struct scst_mgmt_cmd *mcmd)
2878 res = scst_check_delay_mgmt_cmd(mcmd, 1);
2882 if (mcmd->fn == SCST_ABORT_TASK) {
2883 struct scst_session *sess = mcmd->sess;
2884 struct scst_cmd *cmd;
2886 spin_lock_irq(&scst_list_lock);
2887 cmd = __scst_find_cmd_by_tag(sess, mcmd->tag);
2889 TRACE(TRACE_MGMT, "ABORT TASK failed: command for "
2890 "tag %d not found", mcmd->tag);
2891 mcmd->status = SCST_MGMT_STATUS_FAILED;
2892 mcmd->state = SCST_MGMT_CMD_STATE_DONE;
2894 TRACE(TRACE_MGMT, "Cmd %p for tag %d (sn %d) found, "
2895 "aborting it", cmd, mcmd->tag, cmd->sn);
2896 mcmd->cmd_to_abort = cmd;
2897 scst_abort_cmd(cmd, mcmd, 0, 1);
2898 res = scst_set_mcmd_next_state(mcmd);
2899 mcmd->cmd_to_abort = NULL; /* just in case */
2901 spin_unlock_irq(&scst_list_lock);
2904 rc = scst_mgmt_translate_lun(mcmd);
2906 PRINT_ERROR_PR("Corresponding device for lun %Ld not "
2907 "found", (uint64_t)mcmd->lun);
2908 mcmd->status = SCST_MGMT_STATUS_FAILED;
2909 mcmd->state = SCST_MGMT_CMD_STATE_DONE;
2911 mcmd->state = SCST_MGMT_CMD_STATE_READY;
2917 TRACE_EXIT_RES(res);
2921 /* Returns 0 if the command processing should be continued, <0 otherwise */
2922 static int scst_target_reset(struct scst_mgmt_cmd *mcmd)
2925 struct scst_device *dev, *d;
2926 struct scst_tgt_dev *tgt_dev;
2928 LIST_HEAD(host_devs);
2932 TRACE(TRACE_MGMT, "Target reset (mcmd %p, cmd count %d)",
2933 mcmd, mcmd->sess->sess_cmd_count);
2937 list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
2940 spin_lock_bh(&dev->dev_lock);
2941 __scst_block_dev(dev);
2942 scst_process_reset(dev, mcmd->sess, NULL, mcmd);
2943 spin_unlock_bh(&dev->dev_lock);
2947 list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
2948 dev_tgt_dev_list_entry)
2951 rc = scst_call_dev_task_mgmt_fn(mcmd, tgt_dev, 0);
2952 if (rc == SCST_DEV_TM_NOT_COMPLETED)
2954 else if (rc == SCST_DEV_TM_COMPLETED_FAILED)
2955 mcmd->status = SCST_MGMT_STATUS_FAILED;
2960 if (dev->scsi_dev == NULL)
2963 list_for_each_entry(d, &host_devs, reset_dev_list_entry) {
2964 if (dev->scsi_dev->host->host_no ==
2965 d->scsi_dev->host->host_no)
2972 list_add_tail(&dev->reset_dev_list_entry, &host_devs);
2976 * We suppose here that for all commands that already on devices
2977 * on/after scsi_reset_provider() completion callbacks will be called.
2980 list_for_each_entry(dev, &host_devs, reset_dev_list_entry) {
2981 /* dev->scsi_dev must be non-NULL here */
2982 TRACE(TRACE_MGMT, "Resetting host %d bus ",
2983 dev->scsi_dev->host->host_no);
2984 rc = scsi_reset_provider(dev->scsi_dev, SCSI_TRY_RESET_BUS);
2985 TRACE(TRACE_MGMT, "Result of host %d bus reset: %s",
2986 dev->scsi_dev->host->host_no,
2987 (rc == SUCCESS) ? "SUCCESS" : "FAILED");
2988 if (rc != SUCCESS) {
2989 /* SCSI_TRY_RESET_BUS is also done by scsi_reset_provider() */
2990 mcmd->status = SCST_MGMT_STATUS_FAILED;
2994 list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
2995 if (dev->scsi_dev != NULL)
2996 dev->scsi_dev->was_reset = 0;
3001 spin_lock_irq(&scst_list_lock);
3002 tm_dbg_task_mgmt("TARGET RESET");
3003 res = scst_set_mcmd_next_state(mcmd);
3004 spin_unlock_irq(&scst_list_lock);
3006 TRACE_EXIT_RES(res);
3010 /* Returns 0 if the command processing should be continued, <0 otherwise */
3011 static int scst_lun_reset(struct scst_mgmt_cmd *mcmd)
3014 struct scst_tgt_dev *tgt_dev = mcmd->mcmd_tgt_dev;
3015 struct scst_device *dev = tgt_dev->acg_dev->dev;
3019 TRACE(TRACE_MGMT, "Resetting lun %d (mcmd %p)", tgt_dev->acg_dev->lun,
3022 spin_lock_bh(&dev->dev_lock);
3023 __scst_block_dev(dev);
3024 scst_process_reset(dev, mcmd->sess, NULL, mcmd);
3025 spin_unlock_bh(&dev->dev_lock);
3027 rc = scst_call_dev_task_mgmt_fn(mcmd, tgt_dev, 1);
3028 if (rc != SCST_DEV_TM_NOT_COMPLETED)
3031 if (dev->scsi_dev != NULL) {
3032 TRACE(TRACE_MGMT, "Resetting host %d bus ",
3033 dev->scsi_dev->host->host_no);
3034 rc = scsi_reset_provider(dev->scsi_dev, SCSI_TRY_RESET_DEVICE);
3036 mcmd->status = SCST_MGMT_STATUS_FAILED;
3037 dev->scsi_dev->was_reset = 0;
3041 spin_lock_irq(&scst_list_lock);
3042 tm_dbg_task_mgmt("LUN RESET");
3043 res = scst_set_mcmd_next_state(mcmd);
3044 spin_unlock_irq(&scst_list_lock);
3046 TRACE_EXIT_RES(res);
3050 /* Returns 0 if the command processing should be continued, <0 otherwise */
3051 static int scst_abort_all_nexus_loss_sess(struct scst_mgmt_cmd *mcmd,
3055 struct scst_session *sess = mcmd->sess;
3056 struct scst_tgt_dev *tgt_dev;
3061 TRACE(TRACE_MGMT, "Nexus loss for sess %p (mcmd %p)", sess,
3064 TRACE(TRACE_MGMT, "Aborting all from sess %p (mcmd %p)", sess,
3069 list_for_each_entry(tgt_dev, &sess->sess_tgt_dev_list,
3070 sess_tgt_dev_list_entry)
3072 struct scst_device *dev = tgt_dev->acg_dev->dev;
3075 spin_lock_bh(&dev->dev_lock);
3076 __scst_block_dev(dev);
3077 spin_unlock_bh(&dev->dev_lock);
3079 rc = scst_call_dev_task_mgmt_fn(mcmd, tgt_dev, 0);
3080 if (rc == SCST_DEV_TM_COMPLETED_FAILED)
3081 mcmd->status = SCST_MGMT_STATUS_FAILED;
3083 __scst_abort_task_set(mcmd, tgt_dev, !nexus_loss, 1);
3085 scst_reset_tgt_dev(tgt_dev, 1);
3089 spin_lock_irq(&scst_list_lock);
3090 res = scst_set_mcmd_next_state(mcmd);
3091 spin_unlock_irq(&scst_list_lock);
3093 TRACE_EXIT_RES(res);
3097 /* Returns 0 if the command processing should be continued, <0 otherwise */
3098 static int scst_abort_all_nexus_loss_tgt(struct scst_mgmt_cmd *mcmd,
3102 struct scst_tgt *tgt = mcmd->sess->tgt;
3103 struct scst_session *sess;
3104 struct scst_device *dev;
3105 struct scst_tgt_dev *tgt_dev;
3110 TRACE(TRACE_MGMT, "I_T Nexus loss (tgt %p, mcmd %p)", tgt,
3113 TRACE(TRACE_MGMT, "Aborting all from tgt %p (mcmd %p)", tgt,
3119 list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
3120 spin_lock_bh(&dev->dev_lock);
3121 __scst_block_dev(dev);
3122 spin_unlock_bh(&dev->dev_lock);
3125 list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) {
3126 list_for_each_entry(tgt_dev, &sess->sess_tgt_dev_list,
3127 sess_tgt_dev_list_entry)
3131 rc = scst_call_dev_task_mgmt_fn(mcmd, tgt_dev, 0);
3132 if (rc == SCST_DEV_TM_COMPLETED_FAILED)
3133 mcmd->status = SCST_MGMT_STATUS_FAILED;
3135 __scst_abort_task_set(mcmd, tgt_dev, !nexus_loss, 1);
3137 scst_reset_tgt_dev(tgt_dev, 1);
3143 spin_lock_irq(&scst_list_lock);
3144 res = scst_set_mcmd_next_state(mcmd);
3145 spin_unlock_irq(&scst_list_lock);
3147 TRACE_EXIT_RES(res);
3151 /* Returns 0 if the command processing should be continued, <0 otherwise */
3152 static int scst_mgmt_cmd_exec(struct scst_mgmt_cmd *mcmd)
3158 mcmd->status = SCST_MGMT_STATUS_SUCCESS;
3161 case SCST_ABORT_TASK_SET:
3162 case SCST_CLEAR_TASK_SET:
3163 res = scst_abort_task_set(mcmd);
3166 case SCST_LUN_RESET:
3167 res = scst_lun_reset(mcmd);
3170 case SCST_TARGET_RESET:
3171 res = scst_target_reset(mcmd);
3174 case SCST_ABORT_ALL_TASKS_SESS:
3175 res = scst_abort_all_nexus_loss_sess(mcmd, 0);
3178 case SCST_NEXUS_LOSS_SESS:
3179 res = scst_abort_all_nexus_loss_sess(mcmd, 1);
3182 case SCST_ABORT_ALL_TASKS:
3183 res = scst_abort_all_nexus_loss_tgt(mcmd, 0);
3186 case SCST_NEXUS_LOSS:
3187 res = scst_abort_all_nexus_loss_tgt(mcmd, 1);
3190 case SCST_CLEAR_ACA:
3191 scst_call_dev_task_mgmt_fn(mcmd, mcmd->mcmd_tgt_dev, 1);
3192 /* Nothing to do (yet) */
3196 PRINT_ERROR_PR("Unknown task management function %d", mcmd->fn);
3197 mcmd->status = SCST_MGMT_STATUS_FAILED;
3201 TRACE_EXIT_RES(res);
3205 static void scst_mgmt_cmd_send_done(struct scst_mgmt_cmd *mcmd)
3207 struct scst_device *dev;
3208 struct scst_tgt_dev *tgt_dev;
3212 clear_bit(SCST_FLAG_TM_ACTIVE, &scst_flags);
3213 if (!list_empty(&scst_delayed_mgmt_cmd_list)) {
3214 struct scst_mgmt_cmd *m;
3215 spin_lock_irq(&scst_list_lock);
3216 m = list_entry(scst_delayed_mgmt_cmd_list.next, typeof(*m),
3217 mgmt_cmd_list_entry);
3218 TRACE_MGMT_DBG("Moving delayed mgmt cmd %p to active mgmt "
3220 list_move(&m->mgmt_cmd_list_entry, &scst_active_mgmt_cmd_list);
3221 spin_unlock_irq(&scst_list_lock);
3224 mcmd->state = SCST_MGMT_CMD_STATE_FINISHED;
3225 if (scst_is_strict_mgmt_fn(mcmd->fn) && (mcmd->completed_cmd_count > 0))
3226 mcmd->status = SCST_MGMT_STATUS_FAILED;
3228 if (mcmd->sess->tgt->tgtt->task_mgmt_fn_done) {
3229 TRACE_DBG("Calling target %s task_mgmt_fn_done()",
3230 mcmd->sess->tgt->tgtt->name);
3231 mcmd->sess->tgt->tgtt->task_mgmt_fn_done(mcmd);
3232 TRACE_MGMT_DBG("Dev handler %s task_mgmt_fn_done() returned",
3233 mcmd->sess->tgt->tgtt->name);
3237 case SCST_ABORT_TASK_SET:
3238 case SCST_CLEAR_TASK_SET:
3239 case SCST_LUN_RESET:
3240 scst_unblock_dev(mcmd->mcmd_tgt_dev->acg_dev->dev);
3243 case SCST_TARGET_RESET:
3244 case SCST_ABORT_ALL_TASKS:
3245 case SCST_NEXUS_LOSS:
3247 list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
3248 scst_unblock_dev(dev);
3253 case SCST_NEXUS_LOSS_SESS:
3254 case SCST_ABORT_ALL_TASKS_SESS:
3256 list_for_each_entry(tgt_dev, &mcmd->sess->sess_tgt_dev_list,
3257 sess_tgt_dev_list_entry) {
3258 scst_unblock_dev(tgt_dev->acg_dev->dev);
3263 case SCST_CLEAR_ACA:
3268 mcmd->tgt_specific = NULL;
3274 /* Returns >0, if cmd should be requeued */
3275 static int scst_process_mgmt_cmd(struct scst_mgmt_cmd *mcmd)
3281 TRACE_DBG("mcmd %p, state %d", mcmd, mcmd->state);
3284 switch (mcmd->state) {
3285 case SCST_MGMT_CMD_STATE_INIT:
3286 res = scst_mgmt_cmd_init(mcmd);
3291 case SCST_MGMT_CMD_STATE_READY:
3292 if (scst_mgmt_cmd_exec(mcmd))
3296 case SCST_MGMT_CMD_STATE_DONE:
3297 scst_mgmt_cmd_send_done(mcmd);
3300 case SCST_MGMT_CMD_STATE_FINISHED:
3304 case SCST_MGMT_CMD_STATE_EXECUTING:
3309 PRINT_ERROR_PR("Unknown state %d of management command",
3317 TRACE_EXIT_RES(res);
3321 scst_free_mgmt_cmd(mcmd, 1);
3325 static inline int test_mgmt_cmd_list(void)
3327 int res = (!list_empty(&scst_active_mgmt_cmd_list) &&
3328 !test_bit(SCST_FLAG_SUSPENDED, &scst_flags)) ||
3329 test_bit(SCST_FLAG_SHUTDOWN, &scst_flags);
3333 int scst_mgmt_cmd_thread(void *arg)
3335 struct scst_mgmt_cmd *mcmd;
3339 daemonize("scsi_tgt_mc");
3340 recalc_sigpending();
3341 current->flags |= PF_NOFREEZE;
3343 spin_lock_irq(&scst_list_lock);
3346 init_waitqueue_entry(&wait, current);
3348 if (!test_mgmt_cmd_list()) {
3349 add_wait_queue_exclusive(&scst_mgmt_cmd_list_waitQ,
3352 set_current_state(TASK_INTERRUPTIBLE);
3353 if (test_mgmt_cmd_list())
3355 spin_unlock_irq(&scst_list_lock);
3357 spin_lock_irq(&scst_list_lock);
3359 set_current_state(TASK_RUNNING);
3360 remove_wait_queue(&scst_mgmt_cmd_list_waitQ, &wait);
3363 while (!list_empty(&scst_active_mgmt_cmd_list) &&
3364 !test_bit(SCST_FLAG_SUSPENDED, &scst_flags))
3367 mcmd = list_entry(scst_active_mgmt_cmd_list.next,
3368 typeof(*mcmd), mgmt_cmd_list_entry);
3369 TRACE_MGMT_DBG("Moving mgmt cmd %p to mgmt cmd list",
3371 list_move_tail(&mcmd->mgmt_cmd_list_entry,
3372 &scst_mgmt_cmd_list);
3373 spin_unlock_irq(&scst_list_lock);
3374 rc = scst_process_mgmt_cmd(mcmd);
3375 spin_lock_irq(&scst_list_lock);
3377 TRACE_MGMT_DBG("Moving mgmt cmd %p to head "
3378 "of active mgmt cmd list", mcmd);
3379 list_move(&mcmd->mgmt_cmd_list_entry,
3380 &scst_active_mgmt_cmd_list);
3384 if (test_bit(SCST_FLAG_SHUTDOWN, &scst_flags) &&
3385 list_empty(&scst_active_mgmt_cmd_list))
3390 spin_unlock_irq(&scst_list_lock);
3392 if (atomic_dec_and_test(&scst_threads_count) && scst_shutdown_mutex) {
3393 smp_mb__after_atomic_dec();
3394 TRACE_DBG("%s", "Releasing scst_shutdown_mutex");
3395 up(scst_shutdown_mutex);
3402 static struct scst_mgmt_cmd *scst_pre_rx_mgmt_cmd(struct scst_session
3403 *sess, int fn, int atomic, void *tgt_specific)
3405 struct scst_mgmt_cmd *mcmd = NULL;
3409 if (unlikely(sess->tgt->tgtt->task_mgmt_fn_done == NULL)) {
3410 PRINT_ERROR_PR("New mgmt cmd, but task_mgmt_fn_done() is NULL "
3411 "(target %s)", sess->tgt->tgtt->name);
3415 mcmd = scst_alloc_mgmt_cmd(atomic ? GFP_ATOMIC : GFP_KERNEL);
3421 mcmd->state = SCST_MGMT_CMD_STATE_INIT;
3422 mcmd->tgt_specific = tgt_specific;
3429 static int scst_post_rx_mgmt_cmd(struct scst_session *sess,
3430 struct scst_mgmt_cmd *mcmd)
3432 unsigned long flags;
3437 scst_sess_get(sess);
3439 spin_lock_irqsave(&scst_list_lock, flags);
3441 sess->sess_cmd_count++;
3444 if (unlikely(sess->shutting_down)) {
3445 PRINT_ERROR_PR("%s",
3446 "New mgmt cmd while shutting down the session");
3451 if (unlikely(sess->init_phase != SCST_SESS_IPH_READY)) {
3452 switch(sess->init_phase) {
3453 case SCST_SESS_IPH_INITING:
3454 TRACE_DBG("Adding mcmd %p to init deferred mcmd list",
3456 list_add_tail(&mcmd->mgmt_cmd_list_entry,
3457 &sess->init_deferred_mcmd_list);
3459 case SCST_SESS_IPH_SUCCESS:
3461 case SCST_SESS_IPH_FAILED:
3469 TRACE_MGMT_DBG("Adding mgmt cmd %p to active mgmt cmd list", mcmd);
3470 list_add_tail(&mcmd->mgmt_cmd_list_entry, &scst_active_mgmt_cmd_list);
3472 spin_unlock_irqrestore(&scst_list_lock, flags);
3474 wake_up(&scst_mgmt_cmd_list_waitQ);
3481 spin_unlock_irqrestore(&scst_list_lock, flags);
3486 * Must not been called in parallel with scst_unregister_session() for the
3489 int scst_rx_mgmt_fn_lun(struct scst_session *sess, int fn,
3490 const uint8_t *lun, int lun_len, int atomic,
3494 struct scst_mgmt_cmd *mcmd = NULL;
3498 if (unlikely(fn == SCST_ABORT_TASK)) {
3499 PRINT_ERROR_PR("%s() for ABORT TASK called", __FUNCTION__);
3504 mcmd = scst_pre_rx_mgmt_cmd(sess, fn, atomic, tgt_specific);
3508 mcmd->lun = scst_unpack_lun(lun, lun_len);
3509 if (mcmd->lun == (lun_t)-1)
3512 TRACE(TRACE_MGMT, "sess=%p, lun=%Ld", sess, (uint64_t)mcmd->lun);
3514 if (scst_post_rx_mgmt_cmd(sess, mcmd) != 0)
3520 TRACE_EXIT_RES(res);
3524 scst_free_mgmt_cmd(mcmd, 0);
3530 * Must not been called in parallel with scst_unregister_session() for the
3533 int scst_rx_mgmt_fn_tag(struct scst_session *sess, int fn, uint32_t tag,
3534 int atomic, void *tgt_specific)
3537 struct scst_mgmt_cmd *mcmd = NULL;
3541 if (unlikely(fn != SCST_ABORT_TASK)) {
3542 PRINT_ERROR_PR("%s(%d) called", __FUNCTION__, fn);
3547 mcmd = scst_pre_rx_mgmt_cmd(sess, fn, atomic, tgt_specific);
3553 TRACE(TRACE_MGMT, "sess=%p, tag=%d", sess, mcmd->tag);
3555 if (scst_post_rx_mgmt_cmd(sess, mcmd) != 0)
3561 TRACE_EXIT_RES(res);
3565 scst_free_mgmt_cmd(mcmd, 0);
3570 /* scst_mutex supposed to be held */
3571 static struct scst_acg *scst_find_acg(const char *initiator_name)
3573 struct scst_acg *acg, *res = NULL;
3578 list_for_each_entry(acg, &scst_acg_list, scst_acg_list_entry) {
3579 list_for_each_entry(n, &acg->acn_list,
3582 if (strcmp(n->name, initiator_name) == 0) {
3583 TRACE_DBG("Access control group %s found",
3592 TRACE_EXIT_HRES(res);
3596 static int scst_init_session(struct scst_session *sess)
3599 struct scst_acg *acg;
3600 struct scst_cmd *cmd;
3601 struct scst_mgmt_cmd *mcmd, *tm;
3608 if (sess->initiator_name) {
3609 acg = scst_find_acg(sess->initiator_name);
3611 PRINT_INFO_PR("Name %s not found, using default group",
3612 sess->initiator_name);
3613 acg = scst_default_acg;
3617 acg = scst_default_acg;
3620 TRACE_DBG("Assigning session %p to acg %s", sess, acg->acg_name);
3621 list_add_tail(&sess->acg_sess_list_entry, &acg->acg_sess_list);
3623 TRACE_DBG("Adding sess %p to tgt->sess_list", sess);
3624 list_add_tail(&sess->sess_list_entry, &sess->tgt->sess_list);
3626 res = scst_sess_alloc_tgt_devs(sess);