4 * Copyright (C) 2004-2007 Vladislav Bolkhovitin <vst@vlnb.net>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation, version 2
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
18 #include <linux/init.h>
19 #include <linux/kernel.h>
20 #include <linux/errno.h>
21 #include <linux/list.h>
22 #include <linux/spinlock.h>
23 #include <linux/slab.h>
24 #include <linux/sched.h>
25 #include <linux/smp_lock.h>
26 #include <asm/unistd.h>
27 #include <asm/string.h>
28 #include <linux/kthread.h>
29 #include <linux/delay.h>
32 #include "scst_priv.h"
34 static void scst_cmd_set_sn(struct scst_cmd *cmd);
35 static int __scst_init_cmd(struct scst_cmd *cmd);
36 static void scst_finish_cmd_mgmt(struct scst_cmd *cmd);
38 static inline void scst_schedule_tasklet(struct scst_cmd *cmd)
40 struct scst_tasklet *t = &scst_tasklets[smp_processor_id()];
43 spin_lock_irqsave(&t->tasklet_lock, flags);
44 TRACE_DBG("Adding cmd %p to tasklet %d cmd list", cmd,
46 list_add_tail(&cmd->cmd_list_entry, &t->tasklet_cmd_list);
47 spin_unlock_irqrestore(&t->tasklet_lock, flags);
49 tasklet_schedule(&t->tasklet);
53 * Must not be called in parallel with scst_unregister_session_ex() for the
56 struct scst_cmd *scst_rx_cmd(struct scst_session *sess,
57 const uint8_t *lun, int lun_len,
58 const uint8_t *cdb, int cdb_len, int atomic)
65 if (unlikely(sess->shut_phase != SCST_SESS_SPH_READY)) {
66 PRINT_CRIT_ERROR("%s", "New cmd while shutting down the session");
71 cmd = scst_alloc_cmd(atomic ? GFP_ATOMIC : GFP_KERNEL);
77 cmd->tgtt = sess->tgt->tgtt;
79 cmd->start_time = jiffies;
82 * For both wrong lun and CDB defer the error reporting for
83 * scst_cmd_init_done()
86 cmd->lun = scst_unpack_lun(lun, lun_len);
88 if (cdb_len <= SCST_MAX_CDB_SIZE) {
89 memcpy(cmd->cdb, cdb, cdb_len);
90 cmd->cdb_len = cdb_len;
93 TRACE_DBG("cmd %p, sess %p", cmd, sess);
101 static int scst_init_cmd(struct scst_cmd *cmd, int context)
107 /* See the comment in scst_do_job_init() */
108 if (unlikely(!list_empty(&scst_init_cmd_list))) {
109 TRACE_MGMT_DBG("%s", "init cmd list busy");
113 * Memory barrier isn't necessary here, because CPU appears to
117 rc = __scst_init_cmd(cmd);
118 if (unlikely(rc > 0))
120 else if (unlikely(rc != 0))
123 /* Small context optimization */
124 if (((context == SCST_CONTEXT_TASKLET) ||
125 (context == SCST_CONTEXT_DIRECT_ATOMIC)) &&
126 scst_cmd_is_expected_set(cmd)) {
127 if (cmd->expected_data_direction == SCST_DATA_WRITE) {
128 if ( !test_bit(SCST_TGT_DEV_AFTER_INIT_WR_ATOMIC,
129 &cmd->tgt_dev->tgt_dev_flags))
130 context = SCST_CONTEXT_THREAD;
132 if ( !test_bit(SCST_TGT_DEV_AFTER_INIT_OTH_ATOMIC,
133 &cmd->tgt_dev->tgt_dev_flags))
134 context = SCST_CONTEXT_THREAD;
139 TRACE_EXIT_RES(context);
143 if (cmd->preprocessing_only) {
145 * Poor man solution for single threaded targets, where
146 * blocking receiver at least sometimes means blocking all.
148 sBUG_ON(context != SCST_CONTEXT_DIRECT);
150 cmd->state = SCST_CMD_STATE_PRE_XMIT_RESP;
151 /* Keep initiator away from too many BUSY commands */
152 if (!in_interrupt() && !in_atomic())
158 spin_lock_irqsave(&scst_init_lock, flags);
159 TRACE_MGMT_DBG("Adding cmd %p to init cmd list (scst_cmd_count "
160 "%d)", cmd, atomic_read(&scst_cmd_count));
161 list_add_tail(&cmd->cmd_list_entry, &scst_init_cmd_list);
162 if (test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))
163 scst_init_poll_cnt++;
164 spin_unlock_irqrestore(&scst_init_lock, flags);
165 wake_up(&scst_init_cmd_list_waitQ);
171 #ifdef MEASURE_LATENCY
172 static inline uint64_t scst_sec_to_nsec(time_t sec)
174 return (uint64_t)sec * 1000000000;
178 void scst_cmd_init_done(struct scst_cmd *cmd, int pref_context)
181 struct scst_session *sess = cmd->sess;
185 #ifdef MEASURE_LATENCY
189 cmd->start = scst_sec_to_nsec(ts.tv_sec) + ts.tv_nsec;
190 TRACE_DBG("cmd %p (sess %p): start %Ld (tv_sec %ld, "
191 "tv_nsec %ld)", cmd, sess, cmd->start, ts.tv_sec,
196 TRACE_DBG("Preferred context: %d (cmd %p)", pref_context, cmd);
197 TRACE(TRACE_SCSI, "tag=%llu, lun=%Ld, CDB len=%d", cmd->tag,
198 (uint64_t)cmd->lun, cmd->cdb_len);
199 PRINT_BUFF_FLAG(TRACE_SCSI|TRACE_RCV_BOT, "Recieving CDB",
200 cmd->cdb, cmd->cdb_len);
203 if (unlikely(in_irq()) && ((pref_context == SCST_CONTEXT_DIRECT) ||
204 (pref_context == SCST_CONTEXT_DIRECT_ATOMIC)))
206 PRINT_ERROR("Wrong context %d in IRQ from target %s, use "
207 "SCST_CONTEXT_TASKLET instead\n", pref_context,
209 pref_context = SCST_CONTEXT_TASKLET;
213 atomic_inc(&sess->sess_cmd_count);
215 spin_lock_irqsave(&sess->sess_list_lock, flags);
217 if (unlikely(sess->init_phase != SCST_SESS_IPH_READY)) {
219 * We have to always keep command in the search list from the
220 * very beginning, because otherwise it can be missed during
221 * TM processing. This check is needed because there might be
222 * old, i.e. deferred, commands and new, i.e. just coming, ones.
224 if (cmd->search_cmd_list_entry.next == NULL)
225 list_add_tail(&cmd->search_cmd_list_entry,
226 &sess->search_cmd_list);
227 switch(sess->init_phase) {
228 case SCST_SESS_IPH_SUCCESS:
230 case SCST_SESS_IPH_INITING:
231 TRACE_DBG("Adding cmd %p to init deferred cmd list", cmd);
232 list_add_tail(&cmd->cmd_list_entry,
233 &sess->init_deferred_cmd_list);
234 spin_unlock_irqrestore(&sess->sess_list_lock, flags);
236 case SCST_SESS_IPH_FAILED:
237 spin_unlock_irqrestore(&sess->sess_list_lock, flags);
239 cmd->state = SCST_CMD_STATE_PRE_XMIT_RESP;
245 list_add_tail(&cmd->search_cmd_list_entry, &sess->search_cmd_list);
247 spin_unlock_irqrestore(&sess->sess_list_lock, flags);
249 if (unlikely(cmd->lun == (lun_t)-1)) {
250 PRINT_ERROR("Wrong LUN %d, finishing cmd", -1);
251 scst_set_cmd_error(cmd,
252 SCST_LOAD_SENSE(scst_sense_lun_not_supported));
253 cmd->state = SCST_CMD_STATE_PRE_XMIT_RESP;
257 if (unlikely(cmd->cdb_len == 0)) {
258 PRINT_ERROR("Wrong CDB len %d, finishing cmd", 0);
259 scst_set_cmd_error(cmd,
260 SCST_LOAD_SENSE(scst_sense_invalid_opcode));
261 cmd->state = SCST_CMD_STATE_PRE_XMIT_RESP;
265 if (unlikely(cmd->queue_type >= SCST_CMD_QUEUE_ACA)) {
266 PRINT_ERROR("Unsupported queue type %d", cmd->queue_type);
267 scst_set_cmd_error(cmd,
268 SCST_LOAD_SENSE(scst_sense_invalid_message));
269 cmd->state = SCST_CMD_STATE_PRE_XMIT_RESP;
273 cmd->state = SCST_CMD_STATE_INIT;
274 /* cmd must be inited here to keep the order */
275 pref_context = scst_init_cmd(cmd, pref_context);
276 if (unlikely(pref_context < 0))
280 /* Here cmd must not be in any cmd list, no locks */
281 switch (pref_context) {
282 case SCST_CONTEXT_TASKLET:
283 scst_schedule_tasklet(cmd);
286 case SCST_CONTEXT_DIRECT:
287 case SCST_CONTEXT_DIRECT_ATOMIC:
288 scst_process_active_cmd(cmd, pref_context);
289 /* For *NEED_THREAD wake_up() is already done */
293 PRINT_ERROR("Context %x is undefined, using the thread one",
296 case SCST_CONTEXT_THREAD:
297 spin_lock_irqsave(&cmd->cmd_lists->cmd_list_lock, flags);
298 TRACE_DBG("Adding cmd %p to active cmd list", cmd);
299 if (unlikely(cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE))
300 list_add(&cmd->cmd_list_entry,
301 &cmd->cmd_lists->active_cmd_list);
303 list_add_tail(&cmd->cmd_list_entry,
304 &cmd->cmd_lists->active_cmd_list);
305 wake_up(&cmd->cmd_lists->cmd_list_waitQ);
306 spin_unlock_irqrestore(&cmd->cmd_lists->cmd_list_lock, flags);
315 static int scst_pre_parse(struct scst_cmd *cmd)
317 int res = SCST_CMD_STATE_RES_CONT_SAME;
318 struct scst_device *dev = cmd->dev;
323 cmd->inc_expected_sn_on_done = dev->handler->exec_sync ||
324 (!dev->has_own_order_mgmt &&
325 ((dev->queue_alg == SCST_CONTR_MODE_QUEUE_ALG_RESTRICTED_REORDER) ||
326 (cmd->queue_type == SCST_CMD_QUEUE_ORDERED)));
328 sBUG_ON(cmd->internal);
331 * Expected transfer data supplied by the SCSI transport via the
332 * target driver are untrusted, so we prefer to fetch them from CDB.
333 * Additionally, not all transports support supplying the expected
337 rc = scst_get_cdb_info(cmd);
338 if (unlikely(rc != 0)) {
340 PRINT_BUFFER("Failed CDB", cmd->cdb, cmd->cdb_len);
343 PRINT_ERROR("Unknown opcode 0x%02x for %s. "
344 "Should you update scst_scsi_op_table?",
345 cmd->cdb[0], dev->handler->name);
346 PRINT_BUFFER("Failed CDB", cmd->cdb, cmd->cdb_len);
347 #ifdef USE_EXPECTED_VALUES
348 if (scst_cmd_is_expected_set(cmd)) {
349 TRACE(TRACE_SCSI, "Using initiator supplied values: "
350 "direction %d, transfer_len %d",
351 cmd->expected_data_direction,
352 cmd->expected_transfer_len);
353 cmd->data_direction = cmd->expected_data_direction;
355 cmd->bufflen = cmd->expected_transfer_len;
356 /* Restore (likely) lost CDB length */
357 cmd->cdb_len = scst_get_cdb_len(cmd->cdb);
358 if (cmd->cdb_len == -1) {
359 PRINT_ERROR("Unable to get CDB length for "
360 "opcode 0x%02x. Returning INVALID "
361 "OPCODE", cmd->cdb[0]);
362 scst_set_cmd_error(cmd,
363 SCST_LOAD_SENSE(scst_sense_invalid_opcode));
367 PRINT_ERROR("Unknown opcode 0x%02x for %s and "
368 "target %s not supplied expected values",
369 cmd->cdb[0], dev->handler->name, cmd->tgtt->name);
370 scst_set_cmd_error(cmd,
371 SCST_LOAD_SENSE(scst_sense_invalid_opcode));
375 scst_set_cmd_error(cmd,
376 SCST_LOAD_SENSE(scst_sense_invalid_opcode));
380 TRACE(TRACE_SCSI, "op_name <%s>, direction=%d (expected %d, "
381 "set %s), transfer_len=%d (expected len %d), flags=%d",
382 cmd->op_name, cmd->data_direction,
383 cmd->expected_data_direction,
384 scst_cmd_is_expected_set(cmd) ? "yes" : "no",
385 cmd->bufflen, cmd->expected_transfer_len, cmd->op_flags);
387 if (unlikely((cmd->op_flags & SCST_UNKNOWN_LENGTH) != 0)) {
388 if (scst_cmd_is_expected_set(cmd)) {
390 * Command data length can't be easily
391 * determined from the CDB. ToDo, all such
392 * commands should be fixed. Until they are
393 * fixed, get it from the supplied expected
394 * value, but limit it to some reasonable
397 cmd->bufflen = min(cmd->expected_transfer_len,
399 cmd->op_flags &= ~SCST_UNKNOWN_LENGTH;
405 if (unlikely(cmd->cdb[cmd->cdb_len - 1] & CONTROL_BYTE_NACA_BIT)) {
406 PRINT_ERROR("NACA bit in control byte CDB is not supported "
407 "(opcode 0x%02x)", cmd->cdb[0]);
408 scst_set_cmd_error(cmd,
409 SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
413 if (unlikely(cmd->cdb[cmd->cdb_len - 1] & CONTROL_BYTE_LINK_BIT)) {
414 PRINT_ERROR("Linked commands are not supported "
415 "(opcode 0x%02x)", cmd->cdb[0]);
416 scst_set_cmd_error(cmd,
417 SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
421 cmd->state = SCST_CMD_STATE_DEV_PARSE;
428 cmd->state = SCST_CMD_STATE_PRE_XMIT_RESP;
429 res = SCST_CMD_STATE_RES_CONT_SAME;
433 static int scst_parse_cmd(struct scst_cmd *cmd)
435 int res = SCST_CMD_STATE_RES_CONT_SAME;
437 struct scst_device *dev = cmd->dev;
438 int orig_bufflen = cmd->bufflen;
442 if (likely(!scst_is_cmd_local(cmd))) {
443 TRACE_DBG("Calling dev handler %s parse(%p)",
444 dev->handler->name, cmd);
445 TRACE_BUFF_FLAG(TRACE_SND_BOT, "Parsing: ", cmd->cdb, cmd->cdb_len);
446 state = dev->handler->parse(cmd);
447 /* Caution: cmd can be already dead here */
448 TRACE_DBG("Dev handler %s parse() returned %d",
449 dev->handler->name, state);
452 case SCST_CMD_STATE_NEED_THREAD_CTX:
453 TRACE_DBG("Dev handler %s parse() requested thread "
454 "context, rescheduling", dev->handler->name);
455 res = SCST_CMD_STATE_RES_NEED_THREAD;
458 case SCST_CMD_STATE_STOP:
459 TRACE_DBG("Dev handler %s parse() requested stop "
460 "processing", dev->handler->name);
461 res = SCST_CMD_STATE_RES_CONT_NEXT;
465 if (state == SCST_CMD_STATE_DEFAULT)
466 state = SCST_CMD_STATE_PREPARE_SPACE;
468 state = SCST_CMD_STATE_PREPARE_SPACE;
470 if (cmd->data_len == -1)
471 cmd->data_len = cmd->bufflen;
473 if (cmd->data_buf_alloced && unlikely((orig_bufflen > cmd->bufflen))) {
474 PRINT_ERROR("Dev handler supplied data buffer (size %d), "
475 "is less, than required (size %d)", cmd->bufflen,
477 PRINT_BUFFER("Failed CDB", cmd->cdb, cmd->cdb_len);
481 if (unlikely(state == SCST_CMD_STATE_PRE_XMIT_RESP))
484 if (unlikely((cmd->bufflen == 0) &&
485 (cmd->op_flags & SCST_UNKNOWN_LENGTH))) {
486 PRINT_ERROR("Unknown data transfer length for opcode 0x%x "
487 "(handler %s, target %s)", cmd->cdb[0],
488 dev->handler->name, cmd->tgtt->name);
489 PRINT_BUFFER("Failed CDB", cmd->cdb, cmd->cdb_len);
494 if ((cmd->bufflen != 0) &&
495 ((cmd->data_direction == SCST_DATA_NONE) ||
496 ((cmd->sg == NULL) && (state > SCST_CMD_STATE_PREPARE_SPACE)))) {
497 PRINT_ERROR("Dev handler %s parse() returned "
498 "invalid cmd data_direction %d, bufflen %d, state %d "
499 "or sg %p (opcode 0x%x)", dev->handler->name,
500 cmd->data_direction, cmd->bufflen, state, cmd->sg,
502 PRINT_BUFFER("Failed CDB", cmd->cdb, cmd->cdb_len);
507 if (scst_cmd_is_expected_set(cmd)) {
508 #ifdef USE_EXPECTED_VALUES
510 if ((cmd->data_direction != cmd->expected_data_direction) ||
511 (cmd->bufflen != cmd->expected_transfer_len)) {
512 PRINT_ERROR("Expected values don't match decoded ones: "
513 "data_direction %d, expected_data_direction %d, "
514 "bufflen %d, expected_transfer_len %d",
515 cmd->data_direction, cmd->expected_data_direction,
516 cmd->bufflen, cmd->expected_transfer_len);
517 PRINT_BUFFER("Failed CDB", cmd->cdb, cmd->cdb_len);
520 cmd->data_direction = cmd->expected_data_direction;
521 cmd->bufflen = cmd->expected_transfer_len;
523 if (unlikely(cmd->data_direction != cmd->expected_data_direction)) {
524 if (((cmd->expected_data_direction != SCST_DATA_NONE) ||
525 (cmd->bufflen != 0)) &&
526 /* Crazy VMware people sometimes do TUR with READ direction */
527 !(cmd->cdb[0] == TEST_UNIT_READY)) {
528 PRINT_ERROR("Expected data direction %d for opcode "
529 "0x%02x (handler %s, target %s) doesn't match "
530 "decoded value %d", cmd->expected_data_direction,
531 cmd->cdb[0], dev->handler->name,
532 cmd->tgtt->name, cmd->data_direction);
533 PRINT_BUFFER("Failed CDB", cmd->cdb, cmd->cdb_len);
534 scst_set_cmd_error(cmd,
535 SCST_LOAD_SENSE(scst_sense_invalid_message));
539 if (unlikely(cmd->bufflen != cmd->expected_transfer_len)) {
540 TRACE(TRACE_MINOR, "Warning: expected transfer length "
541 "%d for opcode 0x%02x (handler %s, target %s) "
542 "doesn't match decoded value %d. Faulty "
543 "initiator (e.g. VMware is known to be such) or "
544 "scst_scsi_op_table should be updated?",
545 cmd->expected_transfer_len, cmd->cdb[0],
546 dev->handler->name, cmd->tgtt->name,
548 PRINT_BUFF_FLAG(TRACE_MINOR, "Suspicious CDB", cmd->cdb,
554 if (unlikely(cmd->data_direction == SCST_DATA_UNKNOWN)) {
555 PRINT_ERROR("Unknown data direction. Opcode 0x%x, handler %s, "
556 "target %s", cmd->cdb[0], dev->handler->name,
558 PRINT_BUFFER("Failed CDB", cmd->cdb, cmd->cdb_len);
564 case SCST_CMD_STATE_PREPARE_SPACE:
565 case SCST_CMD_STATE_PRE_PARSE:
566 case SCST_CMD_STATE_DEV_PARSE:
567 case SCST_CMD_STATE_RDY_TO_XFER:
568 case SCST_CMD_STATE_TGT_PRE_EXEC:
569 case SCST_CMD_STATE_SEND_TO_MIDLEV:
570 case SCST_CMD_STATE_PRE_DEV_DONE:
571 case SCST_CMD_STATE_DEV_DONE:
572 case SCST_CMD_STATE_PRE_XMIT_RESP:
573 case SCST_CMD_STATE_XMIT_RESP:
574 case SCST_CMD_STATE_FINISHED:
576 res = SCST_CMD_STATE_RES_CONT_SAME;
581 PRINT_ERROR("Dev handler %s parse() returned "
582 "invalid cmd state %d (opcode %d)",
583 dev->handler->name, state, cmd->cdb[0]);
585 PRINT_ERROR("Dev handler %s parse() returned "
586 "error %d (opcode %d)", dev->handler->name,
592 if (cmd->resp_data_len == -1) {
593 if (cmd->data_direction == SCST_DATA_READ)
594 cmd->resp_data_len = cmd->bufflen;
596 cmd->resp_data_len = 0;
600 TRACE_EXIT_HRES(res);
604 /* dev_done() will be called as part of the regular cmd's finish */
605 scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
607 #ifndef USE_EXPECTED_VALUES
610 cmd->state = SCST_CMD_STATE_PRE_DEV_DONE;
611 res = SCST_CMD_STATE_RES_CONT_SAME;
615 static int scst_prepare_space(struct scst_cmd *cmd)
617 int r = 0, res = SCST_CMD_STATE_RES_CONT_SAME;
621 if (cmd->data_direction == SCST_DATA_NONE)
624 if (cmd->data_buf_tgt_alloc) {
625 int orig_bufflen = cmd->bufflen;
627 TRACE_MEM("%s", "Custom tgt data buf allocation requested");
629 r = cmd->tgtt->alloc_data_buf(cmd);
633 if (unlikely(cmd->bufflen == 0)) {
634 /* See comment in scst_alloc_space() */
638 cmd->data_buf_alloced = 1;
639 if (unlikely(orig_bufflen < cmd->bufflen)) {
640 PRINT_ERROR("Target driver allocated data "
641 "buffer (size %d), is less, than "
642 "required (size %d)", orig_bufflen,
646 TRACE_MEM("%s", "data_buf_alloced, returning");
652 if (!cmd->data_buf_alloced)
653 r = scst_alloc_space(cmd);
655 TRACE_MEM("%s", "data_buf_alloced set, returning");
659 if (scst_cmd_atomic(cmd)) {
660 TRACE_MEM("%s", "Atomic memory allocation failed, "
661 "rescheduling to the thread");
662 res = SCST_CMD_STATE_RES_NEED_THREAD;
669 if (cmd->preprocessing_only) {
670 if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
671 TRACE_MGMT_DBG("ABORTED set, returning ABORTED for "
673 cmd->state = SCST_CMD_STATE_PRE_DEV_DONE;
674 res = SCST_CMD_STATE_RES_CONT_SAME;
678 res = SCST_CMD_STATE_RES_CONT_NEXT;
679 cmd->state = SCST_CMD_STATE_PREPROCESS_DONE;
681 TRACE_DBG("Calling preprocessing_done(cmd %p)", cmd);
682 cmd->tgtt->preprocessing_done(cmd);
683 TRACE_DBG("%s", "preprocessing_done() returned");
688 switch (cmd->data_direction) {
689 case SCST_DATA_WRITE:
690 cmd->state = SCST_CMD_STATE_RDY_TO_XFER;
694 cmd->state = SCST_CMD_STATE_TGT_PRE_EXEC;
699 TRACE_EXIT_HRES(res);
703 TRACE(TRACE_OUT_OF_MEM, "Unable to allocate or build requested buffer "
704 "(size %d), sending BUSY or QUEUE FULL status", cmd->bufflen);
706 cmd->state = SCST_CMD_STATE_PRE_DEV_DONE;
707 res = SCST_CMD_STATE_RES_CONT_SAME;
711 scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
712 cmd->state = SCST_CMD_STATE_PRE_DEV_DONE;
713 res = SCST_CMD_STATE_RES_CONT_SAME;
717 void scst_restart_cmd(struct scst_cmd *cmd, int status, int pref_context)
721 TRACE_DBG("Preferred context: %d", pref_context);
722 TRACE_DBG("tag=%llu, status=%#x", scst_cmd_get_tag(cmd), status);
725 if (in_irq() && ((pref_context == SCST_CONTEXT_DIRECT) ||
726 (pref_context == SCST_CONTEXT_DIRECT_ATOMIC))) {
727 PRINT_ERROR("Wrong context %d in IRQ from target %s, use "
728 "SCST_CONTEXT_TASKLET instead\n", pref_context,
730 pref_context = SCST_CONTEXT_TASKLET;
735 case SCST_PREPROCESS_STATUS_SUCCESS:
736 switch (cmd->data_direction) {
737 case SCST_DATA_WRITE:
738 cmd->state = SCST_CMD_STATE_RDY_TO_XFER;
741 cmd->state = SCST_CMD_STATE_TGT_PRE_EXEC;
744 if (cmd->set_sn_on_restart_cmd)
745 scst_cmd_set_sn(cmd);
746 /* Small context optimization */
747 if ((pref_context == SCST_CONTEXT_TASKLET) ||
748 (pref_context == SCST_CONTEXT_DIRECT_ATOMIC)) {
749 if (cmd->data_direction == SCST_DATA_WRITE) {
750 if ( !test_bit(SCST_TGT_DEV_AFTER_RESTART_WR_ATOMIC,
751 &cmd->tgt_dev->tgt_dev_flags))
752 pref_context = SCST_CONTEXT_THREAD;
754 if ( !test_bit(SCST_TGT_DEV_AFTER_RESTART_OTH_ATOMIC,
755 &cmd->tgt_dev->tgt_dev_flags))
756 pref_context = SCST_CONTEXT_THREAD;
761 case SCST_PREPROCESS_STATUS_ERROR_SENSE_SET:
762 cmd->state = SCST_CMD_STATE_PRE_DEV_DONE;
765 case SCST_PREPROCESS_STATUS_ERROR_FATAL:
766 set_bit(SCST_CMD_NO_RESP, &cmd->cmd_flags);
768 case SCST_PREPROCESS_STATUS_ERROR:
769 scst_set_cmd_error(cmd,
770 SCST_LOAD_SENSE(scst_sense_hardw_error));
771 cmd->state = SCST_CMD_STATE_PRE_DEV_DONE;
775 PRINT_ERROR("%s() received unknown status %x", __func__,
777 cmd->state = SCST_CMD_STATE_PRE_DEV_DONE;
781 scst_proccess_redirect_cmd(cmd, pref_context, 1);
788 static int scst_queue_retry_cmd(struct scst_cmd *cmd, int finished_cmds)
790 struct scst_tgt *tgt = cmd->sess->tgt;
796 spin_lock_irqsave(&tgt->tgt_lock, flags);
799 TRACE_RETRY("TGT QUEUE FULL: incrementing retry_cmds %d",
801 if (finished_cmds != atomic_read(&tgt->finished_cmds)) {
802 /* At least one cmd finished, so try again */
804 TRACE_RETRY("Some command(s) finished, direct retry "
805 "(finished_cmds=%d, tgt->finished_cmds=%d, "
806 "retry_cmds=%d)", finished_cmds,
807 atomic_read(&tgt->finished_cmds), tgt->retry_cmds);
812 TRACE_RETRY("Adding cmd %p to retry cmd list", cmd);
813 list_add_tail(&cmd->cmd_list_entry, &tgt->retry_cmd_list);
815 if (!tgt->retry_timer_active) {
816 tgt->retry_timer.expires = jiffies + SCST_TGT_RETRY_TIMEOUT;
817 add_timer(&tgt->retry_timer);
818 tgt->retry_timer_active = 1;
822 spin_unlock_irqrestore(&tgt->tgt_lock, flags);
828 static int scst_rdy_to_xfer(struct scst_cmd *cmd)
834 if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
835 TRACE_MGMT_DBG("ABORTED set, aborting cmd %p", cmd);
839 if (cmd->tgtt->rdy_to_xfer == NULL) {
840 cmd->state = SCST_CMD_STATE_TGT_PRE_EXEC;
841 res = SCST_CMD_STATE_RES_CONT_SAME;
846 int finished_cmds = atomic_read(&cmd->sess->tgt->finished_cmds);
848 res = SCST_CMD_STATE_RES_CONT_NEXT;
849 cmd->state = SCST_CMD_STATE_DATA_WAIT;
851 TRACE_DBG("Calling rdy_to_xfer(%p)", cmd);
853 if (((scst_random() % 100) == 75))
854 rc = SCST_TGT_RES_QUEUE_FULL;
857 rc = cmd->tgtt->rdy_to_xfer(cmd);
858 TRACE_DBG("rdy_to_xfer() returned %d", rc);
860 if (likely(rc == SCST_TGT_RES_SUCCESS))
863 /* Restore the previous state */
864 cmd->state = SCST_CMD_STATE_RDY_TO_XFER;
867 case SCST_TGT_RES_QUEUE_FULL:
868 if (scst_queue_retry_cmd(cmd, finished_cmds) == 0)
873 case SCST_TGT_RES_NEED_THREAD_CTX:
874 TRACE_DBG("Target driver %s "
875 "rdy_to_xfer() requested thread "
876 "context, rescheduling", cmd->tgtt->name);
877 res = SCST_CMD_STATE_RES_NEED_THREAD;
887 TRACE_EXIT_HRES(res);
891 if (rc == SCST_TGT_RES_FATAL_ERROR) {
892 PRINT_ERROR("Target driver %s rdy_to_xfer() returned "
893 "fatal error", cmd->tgtt->name);
895 PRINT_ERROR("Target driver %s rdy_to_xfer() returned invalid "
896 "value %d", cmd->tgtt->name, rc);
898 scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
901 cmd->state = SCST_CMD_STATE_PRE_DEV_DONE;
902 res = SCST_CMD_STATE_RES_CONT_SAME;
906 /* No locks, but might be in IRQ */
907 void scst_proccess_redirect_cmd(struct scst_cmd *cmd, int context,
914 TRACE_DBG("Context: %x", context);
916 switch(context & ~SCST_CONTEXT_PROCESSABLE) {
917 case SCST_CONTEXT_DIRECT_ATOMIC:
918 context &= ~SCST_CONTEXT_PROCESSABLE;
920 case SCST_CONTEXT_DIRECT:
922 scst_check_retries(cmd->tgt);
923 scst_process_active_cmd(cmd, context);
927 PRINT_ERROR("Context %x is unknown, using the thread one",
930 case SCST_CONTEXT_THREAD:
932 scst_check_retries(cmd->tgt);
933 spin_lock_irqsave(&cmd->cmd_lists->cmd_list_lock, flags);
934 TRACE_DBG("Adding cmd %p to active cmd list", cmd);
935 if (unlikely(cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE))
936 list_add(&cmd->cmd_list_entry,
937 &cmd->cmd_lists->active_cmd_list);
939 list_add_tail(&cmd->cmd_list_entry,
940 &cmd->cmd_lists->active_cmd_list);
941 wake_up(&cmd->cmd_lists->cmd_list_waitQ);
942 spin_unlock_irqrestore(&cmd->cmd_lists->cmd_list_lock, flags);
945 case SCST_CONTEXT_TASKLET:
947 scst_check_retries(cmd->tgt);
948 scst_schedule_tasklet(cmd);
956 void scst_rx_data(struct scst_cmd *cmd, int status, int pref_context)
960 TRACE_DBG("Preferred context: %d", pref_context);
961 TRACE(TRACE_SCSI, "tag=%llu status=%#x", scst_cmd_get_tag(cmd), status);
964 if (in_irq() && ((pref_context == SCST_CONTEXT_DIRECT) ||
965 (pref_context == SCST_CONTEXT_DIRECT_ATOMIC)))
967 PRINT_ERROR("Wrong context %d in IRQ from target %s, use "
968 "SCST_CONTEXT_TASKLET instead\n", pref_context,
970 pref_context = SCST_CONTEXT_TASKLET;
975 case SCST_RX_STATUS_SUCCESS:
976 cmd->state = SCST_CMD_STATE_TGT_PRE_EXEC;
977 /* Small context optimization */
978 if ((pref_context == SCST_CONTEXT_TASKLET) ||
979 (pref_context == SCST_CONTEXT_DIRECT_ATOMIC)) {
980 if ( !test_bit(SCST_TGT_DEV_AFTER_RX_DATA_ATOMIC,
981 &cmd->tgt_dev->tgt_dev_flags))
982 pref_context = SCST_CONTEXT_THREAD;
986 case SCST_RX_STATUS_ERROR_SENSE_SET:
987 cmd->state = SCST_CMD_STATE_PRE_DEV_DONE;
990 case SCST_RX_STATUS_ERROR_FATAL:
991 set_bit(SCST_CMD_NO_RESP, &cmd->cmd_flags);
993 case SCST_RX_STATUS_ERROR:
994 scst_set_cmd_error(cmd,
995 SCST_LOAD_SENSE(scst_sense_hardw_error));
996 cmd->state = SCST_CMD_STATE_PRE_DEV_DONE;
1000 PRINT_ERROR("scst_rx_data() received unknown status %x",
1002 cmd->state = SCST_CMD_STATE_PRE_DEV_DONE;
1006 scst_proccess_redirect_cmd(cmd, pref_context, 1);
1012 static int scst_tgt_pre_exec(struct scst_cmd *cmd)
1014 int res = SCST_CMD_STATE_RES_CONT_SAME, rc;
1018 cmd->state = SCST_CMD_STATE_SEND_TO_MIDLEV;
1020 if (cmd->tgtt->pre_exec == NULL)
1023 TRACE_DBG("Calling pre_exec(%p)", cmd);
1024 rc = cmd->tgtt->pre_exec(cmd);
1025 TRACE_DBG("pre_exec() returned %d", rc);
1027 if (unlikely(rc != SCST_PREPROCESS_STATUS_SUCCESS)) {
1029 case SCST_PREPROCESS_STATUS_ERROR_SENSE_SET:
1030 cmd->state = SCST_CMD_STATE_PRE_DEV_DONE;
1032 case SCST_PREPROCESS_STATUS_ERROR_FATAL:
1033 set_bit(SCST_CMD_NO_RESP, &cmd->cmd_flags);
1035 case SCST_PREPROCESS_STATUS_ERROR:
1036 scst_set_cmd_error(cmd,
1037 SCST_LOAD_SENSE(scst_sense_hardw_error));
1038 cmd->state = SCST_CMD_STATE_PRE_DEV_DONE;
1040 case SCST_PREPROCESS_STATUS_NEED_THREAD:
1041 TRACE_DBG("Target driver's %s pre_exec() requested "
1042 "thread context, rescheduling", cmd->tgtt->name);
1043 res = SCST_CMD_STATE_RES_NEED_THREAD;
1044 cmd->state = SCST_CMD_STATE_TGT_PRE_EXEC;
1053 TRACE_EXIT_RES(res);
1057 static void scst_do_cmd_done(struct scst_cmd *cmd, int result,
1058 const uint8_t *rq_sense, int rq_sense_len, int resid)
1062 #ifdef MEASURE_LATENCY
1065 getnstimeofday(&ts);
1066 cmd->post_exec_start = scst_sec_to_nsec(ts.tv_sec) + ts.tv_nsec;
1067 TRACE_DBG("cmd %p (sess %p): post_exec_start %Ld (tv_sec %ld, "
1068 "tv_nsec %ld)", cmd, cmd->sess, cmd->post_exec_start, ts.tv_sec,
1073 cmd->status = result & 0xff;
1074 cmd->msg_status = msg_byte(result);
1075 cmd->host_status = host_byte(result);
1076 cmd->driver_status = driver_byte(result);
1077 if (unlikely(resid != 0)) {
1079 if ((resid < 0) || (resid > cmd->resp_data_len)) {
1080 PRINT_ERROR("Wrong resid %d (cmd->resp_data_len=%d, "
1081 "op %x)", resid, cmd->resp_data_len,
1085 scst_set_resp_data_len(cmd, cmd->resp_data_len - resid);
1088 if (cmd->status == SAM_STAT_CHECK_CONDITION)
1089 scst_alloc_set_sense(cmd, in_irq(), rq_sense, rq_sense_len);
1091 TRACE(TRACE_SCSI, "result=%x, cmd->status=%x, resid=%d, "
1092 "cmd->msg_status=%x, cmd->host_status=%x, "
1093 "cmd->driver_status=%x", result, cmd->status, resid,
1094 cmd->msg_status, cmd->host_status, cmd->driver_status);
1102 /* For small context optimization */
1103 static inline int scst_optimize_post_exec_context(struct scst_cmd *cmd,
1106 if ((context == SCST_CONTEXT_TASKLET) ||
1107 (context == SCST_CONTEXT_DIRECT_ATOMIC)) {
1108 if ( !test_bit(SCST_TGT_DEV_AFTER_EXEC_ATOMIC,
1109 &cmd->tgt_dev->tgt_dev_flags))
1110 context = SCST_CONTEXT_THREAD;
1115 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
1116 static inline struct scst_cmd *scst_get_cmd(struct scsi_cmnd *scsi_cmd,
1117 struct scsi_request **req)
1119 struct scst_cmd *cmd = NULL;
1121 if (scsi_cmd && (*req = scsi_cmd->sc_request))
1122 cmd = (struct scst_cmd *)(*req)->upper_private_data;
1125 PRINT_ERROR("%s", "Request with NULL cmd");
1127 scsi_release_request(*req);
1133 static void scst_cmd_done(struct scsi_cmnd *scsi_cmd)
1135 struct scsi_request *req = NULL;
1136 struct scst_cmd *cmd;
1140 cmd = scst_get_cmd(scsi_cmd, &req);
1144 scst_do_cmd_done(cmd, req->sr_result, req->sr_sense_buffer,
1145 sizeof(req->sr_sense_buffer), scsi_cmd->resid);
1147 /* Clear out request structure */
1149 req->sr_sglist_len = 0;
1150 req->sr_bufflen = 0;
1151 req->sr_buffer = NULL;
1152 req->sr_underflow = 0;
1153 req->sr_request->rq_disk = NULL; /* disown request blk */
1155 scst_release_request(cmd);
1157 cmd->state = SCST_CMD_STATE_PRE_DEV_DONE;
1159 scst_proccess_redirect_cmd(cmd,
1160 scst_optimize_post_exec_context(cmd, scst_get_context()), 0);
1166 #else /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) */
1167 static void scst_cmd_done(void *data, char *sense, int result, int resid)
1169 struct scst_cmd *cmd;
1173 cmd = (struct scst_cmd *)data;
1177 scst_do_cmd_done(cmd, result, sense, SCST_SENSE_BUFFERSIZE, resid);
1179 cmd->state = SCST_CMD_STATE_PRE_DEV_DONE;
1181 scst_proccess_redirect_cmd(cmd,
1182 scst_optimize_post_exec_context(cmd, scst_get_context()), 0);
1188 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) */
1190 static void scst_cmd_done_local(struct scst_cmd *cmd, int next_state)
1196 #ifdef MEASURE_LATENCY
1199 getnstimeofday(&ts);
1200 cmd->post_exec_start = scst_sec_to_nsec(ts.tv_sec) + ts.tv_nsec;
1201 TRACE_DBG("cmd %p (sess %p): post_exec_start %Ld (tv_sec %ld, "
1202 "tv_nsec %ld)", cmd, cmd->sess, cmd->post_exec_start, ts.tv_sec,
1207 if (next_state == SCST_CMD_STATE_DEFAULT)
1208 next_state = SCST_CMD_STATE_PRE_DEV_DONE;
1211 if (next_state == SCST_CMD_STATE_PRE_DEV_DONE) {
1214 struct scatterlist *sg = cmd->sg;
1215 TRACE_RECV_TOP("Exec'd %d S/G(s) at %p sg[0].page at "
1216 "%p", cmd->sg_cnt, sg, (void*)sg_page(&sg[0]));
1217 for(i = 0; i < cmd->sg_cnt; ++i) {
1218 TRACE_BUFF_FLAG(TRACE_RCV_TOP,
1219 "Exec'd sg", sg_virt(&sg[i]),
1228 if ((next_state != SCST_CMD_STATE_PRE_DEV_DONE) &&
1229 (next_state != SCST_CMD_STATE_PRE_XMIT_RESP) &&
1230 (next_state != SCST_CMD_STATE_FINISHED))
1232 PRINT_ERROR("scst_cmd_done_local() received invalid cmd "
1233 "state %d (opcode %d)", next_state, cmd->cdb[0]);
1234 scst_set_cmd_error(cmd,
1235 SCST_LOAD_SENSE(scst_sense_hardw_error));
1236 next_state = SCST_CMD_STATE_PRE_DEV_DONE;
1239 cmd->state = next_state;
1241 context = scst_optimize_post_exec_context(cmd, scst_get_context());
1242 if (cmd->context_processable)
1243 context |= SCST_CONTEXT_PROCESSABLE;
1244 scst_proccess_redirect_cmd(cmd, context, 0);
1250 static int scst_report_luns_local(struct scst_cmd *cmd)
1256 struct scst_tgt_dev *tgt_dev = NULL;
1258 int offs, overflow = 0;
1262 rc = scst_check_local_events(cmd);
1263 if (unlikely(rc != 0))
1267 cmd->msg_status = 0;
1268 cmd->host_status = DID_OK;
1269 cmd->driver_status = 0;
1271 if ((cmd->cdb[2] != 0) && (cmd->cdb[2] != 2)) {
1272 PRINT_ERROR("Unsupported SELECT REPORT value %x in REPORT "
1273 "LUNS command", cmd->cdb[2]);
1277 buffer_size = scst_get_buf_first(cmd, &buffer);
1278 if (unlikely(buffer_size <= 0))
1281 if (buffer_size < 16)
1284 memset(buffer, 0, buffer_size);
1287 /* sess->sess_tgt_dev_list_hash is protected by suspended activity */
1288 for(i = 0; i < TGT_DEV_HASH_SIZE; i++) {
1289 struct list_head *sess_tgt_dev_list_head =
1290 &cmd->sess->sess_tgt_dev_list_hash[i];
1291 list_for_each_entry(tgt_dev, sess_tgt_dev_list_head,
1292 sess_tgt_dev_list_entry) {
1294 if (offs >= buffer_size) {
1295 scst_put_buf(cmd, buffer);
1296 buffer_size = scst_get_buf_next(cmd, &buffer);
1297 if (buffer_size > 0) {
1298 memset(buffer, 0, buffer_size);
1305 if ((buffer_size - offs) < 8) {
1306 PRINT_ERROR("Buffer allocated for REPORT "
1307 "LUNS command doesn't allow to fit 8 "
1308 "byte entry (buffer_size=%d)",
1310 goto out_put_hw_err;
1312 buffer[offs] = (tgt_dev->lun >> 8) & 0xff;
1313 buffer[offs+1] = tgt_dev->lun & 0xff;
1321 scst_put_buf(cmd, buffer);
1323 /* Set the response header */
1324 buffer_size = scst_get_buf_first(cmd, &buffer);
1325 if (unlikely(buffer_size <= 0))
1328 buffer[0] = (dev_cnt >> 24) & 0xff;
1329 buffer[1] = (dev_cnt >> 16) & 0xff;
1330 buffer[2] = (dev_cnt >> 8) & 0xff;
1331 buffer[3] = dev_cnt & 0xff;
1332 scst_put_buf(cmd, buffer);
1335 if (dev_cnt < cmd->resp_data_len)
1336 scst_set_resp_data_len(cmd, dev_cnt);
1342 /* Report the result */
1343 scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1346 return SCST_EXEC_COMPLETED;
1349 scst_put_buf(cmd, buffer);
1352 scst_set_cmd_error(cmd,
1353 SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
1357 scst_put_buf(cmd, buffer);
1358 scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
1362 static int scst_pre_select(struct scst_cmd *cmd)
1364 int res = SCST_EXEC_NOT_COMPLETED;
1368 if (scst_cmd_atomic(cmd)) {
1369 res = SCST_EXEC_NEED_THREAD;
1373 if (cmd->local_exec_done)
1376 cmd->local_exec_done = 1;
1378 scst_block_dev_cmd(cmd, 1);
1380 /* Check for local events will be done when cmd will be executed */
1383 TRACE_EXIT_RES(res);
1387 static int scst_reserve_local(struct scst_cmd *cmd)
1389 int res = SCST_EXEC_NOT_COMPLETED, rc;
1390 struct scst_device *dev;
1391 struct scst_tgt_dev *tgt_dev_tmp;
1395 if (scst_cmd_atomic(cmd)) {
1396 res = SCST_EXEC_NEED_THREAD;
1400 if (cmd->local_exec_done)
1403 cmd->local_exec_done = 1;
1405 if ((cmd->cdb[0] == RESERVE_10) && (cmd->cdb[2] & SCST_RES_3RDPTY)) {
1406 PRINT_ERROR("RESERVE_10: 3rdPty RESERVE not implemented "
1407 "(lun=%Ld)", (uint64_t)cmd->lun);
1408 scst_set_cmd_error(cmd,
1409 SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
1415 if (dev->tst == SCST_CONTR_MODE_ONE_TASK_SET)
1416 scst_block_dev_cmd(cmd, 1);
1418 rc = scst_check_local_events(cmd);
1419 if (unlikely(rc != 0))
1422 spin_lock_bh(&dev->dev_lock);
1424 if (test_bit(SCST_TGT_DEV_RESERVED, &cmd->tgt_dev->tgt_dev_flags)) {
1425 spin_unlock_bh(&dev->dev_lock);
1426 scst_set_cmd_error_status(cmd, SAM_STAT_RESERVATION_CONFLICT);
1430 list_for_each_entry(tgt_dev_tmp, &dev->dev_tgt_dev_list,
1431 dev_tgt_dev_list_entry) {
1432 if (cmd->tgt_dev != tgt_dev_tmp)
1433 set_bit(SCST_TGT_DEV_RESERVED,
1434 &tgt_dev_tmp->tgt_dev_flags);
1436 dev->dev_reserved = 1;
1438 spin_unlock_bh(&dev->dev_lock);
1441 TRACE_EXIT_RES(res);
1445 /* Report the result */
1446 scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1447 res = SCST_EXEC_COMPLETED;
1451 static int scst_release_local(struct scst_cmd *cmd)
1453 int res = SCST_EXEC_NOT_COMPLETED, rc;
1454 struct scst_tgt_dev *tgt_dev_tmp;
1455 struct scst_device *dev;
1459 if (scst_cmd_atomic(cmd)) {
1460 res = SCST_EXEC_NEED_THREAD;
1464 if (cmd->local_exec_done)
1467 cmd->local_exec_done = 1;
1471 if (dev->tst == SCST_CONTR_MODE_ONE_TASK_SET)
1472 scst_block_dev_cmd(cmd, 1);
1474 rc = scst_check_local_events(cmd);
1475 if (unlikely(rc != 0))
1478 spin_lock_bh(&dev->dev_lock);
1481 * The device could be RELEASED behind us, if RESERVING session
1482 * is closed (see scst_free_tgt_dev()), but this actually doesn't
1483 * matter, so use lock and no retest for DEV_RESERVED bits again
1485 if (test_bit(SCST_TGT_DEV_RESERVED, &cmd->tgt_dev->tgt_dev_flags)) {
1486 res = SCST_EXEC_COMPLETED;
1488 cmd->msg_status = 0;
1489 cmd->host_status = DID_OK;
1490 cmd->driver_status = 0;
1493 list_for_each_entry(tgt_dev_tmp,
1494 &dev->dev_tgt_dev_list,
1495 dev_tgt_dev_list_entry) {
1496 clear_bit(SCST_TGT_DEV_RESERVED,
1497 &tgt_dev_tmp->tgt_dev_flags);
1499 dev->dev_reserved = 0;
1502 spin_unlock_bh(&dev->dev_lock);
1504 if (res == SCST_EXEC_COMPLETED)
1508 TRACE_EXIT_RES(res);
1512 res = SCST_EXEC_COMPLETED;
1513 /* Report the result */
1514 scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1518 /* No locks, no IRQ or IRQ-safe context allowed */
1519 int scst_check_local_events(struct scst_cmd *cmd)
1522 struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
1526 if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
1527 TRACE_MGMT_DBG("ABORTED set, aborting cmd %p", cmd);
1528 goto out_uncomplete;
1531 /* Reserve check before Unit Attention */
1532 if (unlikely(test_bit(SCST_TGT_DEV_RESERVED, &tgt_dev->tgt_dev_flags))) {
1533 if ((cmd->cdb[0] != INQUIRY) && (cmd->cdb[0] != REPORT_LUNS) &&
1534 (cmd->cdb[0] != RELEASE) && (cmd->cdb[0] != RELEASE_10) &&
1535 (cmd->cdb[0] != REPORT_DEVICE_IDENTIFIER) &&
1536 (cmd->cdb[0] != ALLOW_MEDIUM_REMOVAL || (cmd->cdb[4] & 3)) &&
1537 (cmd->cdb[0] != LOG_SENSE) && (cmd->cdb[0] != REQUEST_SENSE)) {
1538 scst_set_cmd_error_status(cmd, SAM_STAT_RESERVATION_CONFLICT);
1543 /* If we had internal bus reset, set the command error unit attention */
1544 if ((cmd->dev->scsi_dev != NULL) &&
1545 unlikely(cmd->dev->scsi_dev->was_reset)) {
1546 if (scst_is_ua_command(cmd)) {
1547 struct scst_device *dev = cmd->dev;
1549 /* Prevent more than 1 cmd to be triggered by was_reset */
1550 spin_lock_bh(&dev->dev_lock);
1551 barrier(); /* to reread was_reset */
1552 if (dev->scsi_dev->was_reset) {
1553 TRACE(TRACE_MGMT, "was_reset is %d", 1);
1554 scst_set_cmd_error(cmd,
1555 SCST_LOAD_SENSE(scst_sense_reset_UA));
1556 /* It looks like it is safe to clear was_reset here */
1557 dev->scsi_dev->was_reset = 0;
1560 spin_unlock_bh(&dev->dev_lock);
1567 if (unlikely(test_bit(SCST_TGT_DEV_UA_PENDING,
1568 &cmd->tgt_dev->tgt_dev_flags))) {
1569 if (scst_is_ua_command(cmd)) {
1570 rc = scst_set_pending_UA(cmd);
1579 TRACE_EXIT_RES(res);
1584 sBUG_ON(!cmd->completed);
1593 * The result of cmd execution, if any, should be reported
1594 * via scst_cmd_done_local()
1596 static int scst_pre_exec(struct scst_cmd *cmd)
1598 int res = SCST_EXEC_NOT_COMPLETED;
1599 struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
1604 * This function can be called several times for the same cmd, so it
1605 * can't change any state in a non-reentrable way or use something
1606 * like local_exec_done!!
1609 /* Check READ_ONLY device status */
1610 if (((tgt_dev->acg_dev->rd_only_flag) || cmd->dev->swp) &&
1611 (cmd->cdb[0] == WRITE_6 || /* ToDo: full list of the modify cmds */
1612 cmd->cdb[0] == WRITE_10 ||
1613 cmd->cdb[0] == WRITE_12 ||
1614 cmd->cdb[0] == WRITE_16 ||
1615 cmd->cdb[0] == WRITE_VERIFY ||
1616 cmd->cdb[0] == WRITE_VERIFY_12 ||
1617 cmd->cdb[0] == WRITE_VERIFY_16 ||
1618 (cmd->dev->handler->type == TYPE_TAPE &&
1619 (cmd->cdb[0] == ERASE || cmd->cdb[0] == WRITE_FILEMARKS)))) {
1620 scst_set_cmd_error(cmd,
1621 SCST_LOAD_SENSE(scst_sense_data_protect));
1626 TRACE_EXIT_RES(res);
1630 res = SCST_EXEC_COMPLETED;
1631 /* Report the result */
1632 scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1637 * The result of cmd execution, if any, should be reported
1638 * via scst_cmd_done_local()
1640 static inline int scst_local_exec(struct scst_cmd *cmd)
1642 int res = SCST_EXEC_NOT_COMPLETED;
1647 * Adding new commands here don't forget to update
1648 * scst_is_cmd_local() in scst.h, if necessary
1651 switch (cmd->cdb[0]) {
1653 case MODE_SELECT_10:
1655 res = scst_pre_select(cmd);
1659 res = scst_reserve_local(cmd);
1663 res = scst_release_local(cmd);
1666 res = scst_report_luns_local(cmd);
1670 TRACE_EXIT_RES(res);
1674 /* cmd must be additionally referenced to not die inside */
1675 static int scst_do_send_to_midlev(struct scst_cmd *cmd)
1677 int rc = SCST_EXEC_NOT_COMPLETED;
1678 struct scst_device *dev = cmd->dev;
1679 struct scst_dev_type *handler = dev->handler;
1683 cmd->sent_to_midlev = 1;
1684 cmd->state = SCST_CMD_STATE_EXECUTING;
1685 cmd->scst_cmd_done = scst_cmd_done_local;
1687 rc = scst_pre_exec(cmd);
1688 if (rc != SCST_EXEC_NOT_COMPLETED) {
1689 if (rc == SCST_EXEC_COMPLETED)
1691 else if (rc == SCST_EXEC_NEED_THREAD)
1697 rc = scst_local_exec(cmd);
1698 if (rc != SCST_EXEC_NOT_COMPLETED) {
1699 if (rc == SCST_EXEC_COMPLETED)
1701 else if (rc == SCST_EXEC_NEED_THREAD)
1707 if (!handler->exec_sync)
1708 cmd->context_processable = 0;
1710 if (handler->exec) {
1711 TRACE_DBG("Calling dev handler %s exec(%p)",
1712 handler->name, cmd);
1713 TRACE_BUFF_FLAG(TRACE_SND_TOP, "Execing: ", cmd->cdb, cmd->cdb_len);
1714 cmd->scst_cmd_done = scst_cmd_done_local;
1715 rc = handler->exec(cmd);
1716 TRACE_DBG("Dev handler %s exec() returned %d",
1718 if (rc == SCST_EXEC_COMPLETED)
1720 else if (rc == SCST_EXEC_NEED_THREAD)
1722 else if (rc != SCST_EXEC_NOT_COMPLETED)
1726 TRACE_DBG("Sending cmd %p to SCSI mid-level", cmd);
1728 if (unlikely(dev->scsi_dev == NULL)) {
1729 PRINT_ERROR("Command for virtual device must be "
1730 "processed by device handler (lun %Ld)!",
1731 (uint64_t)cmd->lun);
1735 rc = scst_check_local_events(cmd);
1736 if (unlikely(rc != 0))
1739 #ifndef ALLOW_PASSTHROUGH_IO_SUBMIT_IN_SIRQ
1740 if (scst_cmd_atomic(cmd)) {
1741 TRACE_DBG("Pass-through exec() can not be called in atomic "
1742 "context, rescheduling to the thread (handler %s)",
1744 rc = SCST_EXEC_NEED_THREAD;
1749 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
1750 if (unlikely(scst_alloc_request(cmd) != 0)) {
1751 if (scst_cmd_atomic(cmd)) {
1752 rc = SCST_EXEC_NEED_THREAD;
1755 PRINT_INFO("%s", "Unable to allocate request, "
1756 "sending BUSY status");
1761 scst_do_req(cmd->scsi_req, (void *)cmd->cdb,
1762 (void *)cmd->scsi_req->sr_buffer,
1763 cmd->scsi_req->sr_bufflen, scst_cmd_done, cmd->timeout,
1766 rc = scst_exec_req(dev->scsi_dev, cmd->cdb, cmd->cdb_len,
1767 cmd->data_direction, cmd->sg, cmd->bufflen, cmd->sg_cnt,
1768 cmd->timeout, cmd->retries, cmd, scst_cmd_done,
1769 scst_cmd_atomic(cmd) ? GFP_ATOMIC : GFP_KERNEL);
1770 if (unlikely(rc != 0)) {
1771 if (scst_cmd_atomic(cmd)) {
1772 rc = SCST_EXEC_NEED_THREAD;
1775 PRINT_INFO("scst_exec_req() failed: %d", rc);
1781 rc = SCST_EXEC_COMPLETED;
1788 /* Restore the state */
1789 cmd->sent_to_midlev = 0;
1790 cmd->state = SCST_CMD_STATE_SEND_TO_MIDLEV;
1794 PRINT_ERROR("Dev handler %s exec() or scst_local_exec() returned "
1795 "invalid code %d", handler->name, rc);
1799 scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
1801 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
1808 rc = SCST_EXEC_COMPLETED;
1809 /* Report the result */
1810 scst_cmd_done_local(cmd, SCST_CMD_STATE_DEFAULT);
1815 void scst_inc_expected_sn(struct scst_tgt_dev *tgt_dev, atomic_t *slot)
1820 /* Optimized for lockless fast path */
1822 TRACE_SN("Slot %zd, *cur_sn_slot %d", slot - tgt_dev->sn_slots,
1825 if (!atomic_dec_and_test(slot))
1828 TRACE_SN("Slot is 0 (num_free_sn_slots=%d)",
1829 tgt_dev->num_free_sn_slots);
1830 if (tgt_dev->num_free_sn_slots < (int)ARRAY_SIZE(tgt_dev->sn_slots)-1) {
1831 spin_lock_irq(&tgt_dev->sn_lock);
1832 if (likely(tgt_dev->num_free_sn_slots < (int)ARRAY_SIZE(tgt_dev->sn_slots)-1)) {
1833 if (tgt_dev->num_free_sn_slots < 0)
1834 tgt_dev->cur_sn_slot = slot;
1835 smp_mb(); /* to be in-sync with SIMPLE case in scst_cmd_set_sn() */
1836 tgt_dev->num_free_sn_slots++;
1837 TRACE_SN("Incremented num_free_sn_slots (%d)",
1838 tgt_dev->num_free_sn_slots);
1841 spin_unlock_irq(&tgt_dev->sn_lock);
1846 * No locks is needed, because only one thread at time can
1847 * be here (serialized by sn). Also it is supposed that there
1848 * could not be half-incremented halves.
1850 tgt_dev->expected_sn++;
1851 smp_mb(); /* write must be before def_cmd_count read */
1852 TRACE_SN("Next expected_sn: %ld", tgt_dev->expected_sn);
1858 static int scst_process_internal_cmd(struct scst_cmd *cmd)
1860 int res = SCST_CMD_STATE_RES_CONT_NEXT, rc;
1864 __scst_cmd_get(cmd);
1866 rc = scst_do_send_to_midlev(cmd);
1867 if (rc == SCST_EXEC_NEED_THREAD) {
1868 TRACE_DBG("%s", "scst_do_send_to_midlev() requested "
1869 "thread context, rescheduling");
1870 res = SCST_CMD_STATE_RES_NEED_THREAD;
1872 struct scst_device *dev = cmd->dev;
1873 sBUG_ON(rc != SCST_EXEC_COMPLETED);
1874 if (dev->scsi_dev != NULL)
1875 generic_unplug_device(dev->scsi_dev->request_queue);
1878 __scst_cmd_put(cmd);
1880 TRACE_EXIT_RES(res);
1884 static int scst_send_to_midlev(struct scst_cmd **active_cmd)
1887 struct scst_cmd *cmd = *active_cmd;
1888 struct scst_cmd *ref_cmd;
1889 struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
1890 struct scst_device *dev = cmd->dev;
1891 typeof(tgt_dev->expected_sn) expected_sn;
1896 res = SCST_CMD_STATE_RES_CONT_NEXT;
1898 if (unlikely(cmd->internal || cmd->retry)) {
1899 res = scst_process_internal_cmd(cmd);
1903 #ifdef MEASURE_LATENCY
1904 if (cmd->pre_exec_finish == 0) {
1906 getnstimeofday(&ts);
1907 cmd->pre_exec_finish = scst_sec_to_nsec(ts.tv_sec) + ts.tv_nsec;
1908 TRACE_DBG("cmd %p (sess %p): pre_exec_finish %Ld (tv_sec %ld, "
1909 "tv_nsec %ld)", cmd, sess, cmd->pre_exec_finish, ts.tv_sec,
1914 if (unlikely(scst_inc_on_dev_cmd(cmd) != 0))
1918 __scst_cmd_get(ref_cmd);
1920 if (unlikely(cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE))
1923 sBUG_ON(!cmd->sn_set);
1925 expected_sn = tgt_dev->expected_sn;
1926 /* Optimized for lockless fast path */
1927 if ((cmd->sn != expected_sn) || (tgt_dev->hq_cmd_count > 0)) {
1928 spin_lock_irq(&tgt_dev->sn_lock);
1930 tgt_dev->def_cmd_count++;
1933 expected_sn = tgt_dev->expected_sn;
1934 if ((cmd->sn != expected_sn) || (tgt_dev->hq_cmd_count > 0)) {
1935 /* We are under IRQ lock, but dev->dev_lock is BH one */
1936 int cmd_blocking = scst_pre_dec_on_dev_cmd(cmd);
1937 if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
1938 /* Necessary to allow aborting out of sn cmds */
1939 TRACE_MGMT_DBG("Aborting out of sn cmd %p (tag %llu)",
1941 tgt_dev->def_cmd_count--;
1942 cmd->state = SCST_CMD_STATE_PRE_DEV_DONE;
1943 res = SCST_CMD_STATE_RES_CONT_SAME;
1945 TRACE_SN("Deferring cmd %p (sn=%ld, set %d, "
1946 "expected_sn=%ld)", cmd, cmd->sn,
1947 cmd->sn_set, expected_sn);
1948 list_add_tail(&cmd->sn_cmd_list_entry,
1949 &tgt_dev->deferred_cmd_list);
1951 spin_unlock_irq(&tgt_dev->sn_lock);
1953 __scst_dec_on_dev_cmd(dev, cmd_blocking);
1957 TRACE_SN("Somebody incremented expected_sn %ld, "
1958 "continuing", expected_sn);
1959 tgt_dev->def_cmd_count--;
1960 spin_unlock_irq(&tgt_dev->sn_lock);
1967 atomic_t *slot = cmd->sn_slot;
1968 /* For HQ commands SN is not set */
1969 int inc_expected_sn = !cmd->inc_expected_sn_on_done &&
1972 rc = scst_do_send_to_midlev(cmd);
1973 if (rc == SCST_EXEC_NEED_THREAD) {
1974 TRACE_DBG("%s", "scst_do_send_to_midlev() requested "
1975 "thread context, rescheduling");
1976 res = SCST_CMD_STATE_RES_NEED_THREAD;
1977 scst_dec_on_dev_cmd(cmd);
1984 sBUG_ON(rc != SCST_EXEC_COMPLETED);
1988 if (inc_expected_sn)
1989 scst_inc_expected_sn(tgt_dev, slot);
1991 cmd = scst_check_deferred_commands(tgt_dev);
1995 if (unlikely(scst_inc_on_dev_cmd(cmd) != 0))
1998 __scst_cmd_put(ref_cmd);
2000 __scst_cmd_get(ref_cmd);
2004 if (dev->scsi_dev != NULL)
2005 generic_unplug_device(dev->scsi_dev->request_queue);
2008 __scst_cmd_put(ref_cmd);
2009 /* !! At this point sess, dev and tgt_dev can be already freed !! */
2012 TRACE_EXIT_HRES(res);
2016 /* No locks supposed to be held */
2017 static int scst_check_sense(struct scst_cmd *cmd)
2020 struct scst_device *dev = cmd->dev;
2021 int dbl_ua_possible, ua_sent = 0;
2025 /* If we had internal bus reset behind us, set the command error UA */
2026 if ((dev->scsi_dev != NULL) &&
2027 unlikely(cmd->host_status == DID_RESET) &&
2028 scst_is_ua_command(cmd)) {
2029 TRACE(TRACE_MGMT, "DID_RESET: was_reset=%d host_status=%x",
2030 dev->scsi_dev->was_reset, cmd->host_status);
2031 scst_set_cmd_error(cmd,
2032 SCST_LOAD_SENSE(scst_sense_reset_UA));
2035 /* It looks like it is safe to clear was_reset here */
2036 dev->scsi_dev->was_reset = 0;
2039 dbl_ua_possible = dev->dev_double_ua_possible;
2040 TRACE_DBG("cmd %p dbl_ua_possible %d", cmd, dbl_ua_possible);
2041 if (unlikely(dbl_ua_possible)) {
2042 spin_lock_bh(&dev->dev_lock);
2043 barrier(); /* to reread dev_double_ua_possible */
2044 dbl_ua_possible = dev->dev_double_ua_possible;
2045 if (dbl_ua_possible)
2046 ua_sent = dev->dev_reset_ua_sent;
2048 spin_unlock_bh(&dev->dev_lock);
2051 if (unlikely(cmd->status == SAM_STAT_CHECK_CONDITION) &&
2052 SCST_SENSE_VALID(cmd->sense)) {
2053 PRINT_BUFF_FLAG(TRACE_SCSI, "Sense", cmd->sense,
2054 SCST_SENSE_BUFFERSIZE);
2055 /* Check Unit Attention Sense Key */
2056 if (scst_is_ua_sense(cmd->sense)) {
2057 if (cmd->sense[12] == SCST_SENSE_ASC_UA_RESET) {
2058 if (dbl_ua_possible) {
2060 TRACE(TRACE_MGMT_MINOR, "%s",
2061 "Double UA detected");
2063 TRACE(TRACE_MGMT_MINOR, "Retrying cmd %p "
2064 "(tag %llu)", cmd, cmd->tag);
2066 cmd->msg_status = 0;
2067 cmd->host_status = DID_OK;
2068 cmd->driver_status = 0;
2069 mempool_free(cmd->sense, scst_sense_mempool);
2072 cmd->state = SCST_CMD_STATE_SEND_TO_MIDLEV;
2075 * Dev is still blocked by this cmd, so
2076 * it's OK to clear SCST_DEV_SERIALIZED
2079 dev->dev_double_ua_possible = 0;
2080 dev->dev_serialized = 0;
2081 dev->dev_reset_ua_sent = 0;
2084 dev->dev_reset_ua_sent = 1;
2087 if (cmd->ua_ignore == 0) {
2088 if (unlikely(dbl_ua_possible)) {
2089 __scst_dev_check_set_UA(dev, cmd,
2091 SCST_SENSE_BUFFERSIZE);
2093 scst_dev_check_set_UA(dev, cmd,
2095 SCST_SENSE_BUFFERSIZE);
2101 if (unlikely(dbl_ua_possible)) {
2102 if (ua_sent && scst_is_ua_command(cmd)) {
2103 TRACE_MGMT_DBG("%s", "Clearing dbl_ua_possible flag");
2104 dev->dev_double_ua_possible = 0;
2105 dev->dev_serialized = 0;
2106 dev->dev_reset_ua_sent = 0;
2108 spin_unlock_bh(&dev->dev_lock);
2112 TRACE_EXIT_RES(res);
2116 spin_unlock_bh(&dev->dev_lock);
2120 static int scst_check_auto_sense(struct scst_cmd *cmd)
2126 if (unlikely(cmd->status == SAM_STAT_CHECK_CONDITION) &&
2127 (!SCST_SENSE_VALID(cmd->sense) ||
2128 SCST_NO_SENSE(cmd->sense))) {
2129 TRACE(TRACE_SCSI|TRACE_MINOR, "CHECK_CONDITION, but no sense: "
2130 "cmd->status=%x, cmd->msg_status=%x, "
2131 "cmd->host_status=%x, cmd->driver_status=%x", cmd->status,
2132 cmd->msg_status, cmd->host_status, cmd->driver_status);
2134 } else if (unlikely(cmd->host_status)) {
2135 if ((cmd->host_status == DID_REQUEUE) ||
2136 (cmd->host_status == DID_IMM_RETRY) ||
2137 (cmd->host_status == DID_SOFT_ERROR) ||
2138 (cmd->host_status == DID_ABORT)) {
2141 TRACE(TRACE_SCSI|TRACE_MINOR, "Host status %x "
2142 "received, returning HARDWARE ERROR instead",
2144 scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
2148 TRACE_EXIT_RES(res);
2152 static int scst_done_cmd_check(struct scst_cmd *cmd, int *pres)
2158 if (unlikely(cmd->cdb[0] == REQUEST_SENSE)) {
2160 cmd = scst_complete_request_sense(cmd);
2161 } else if (unlikely(scst_check_auto_sense(cmd))) {
2162 PRINT_INFO("Command finished with CHECK CONDITION, but "
2163 "without sense data (opcode 0x%x), issuing "
2164 "REQUEST SENSE", cmd->cdb[0]);
2165 rc = scst_prepare_request_sense(cmd);
2171 PRINT_ERROR("%s", "Unable to issue REQUEST SENSE, "
2172 "returning HARDWARE ERROR");
2173 scst_set_cmd_error(cmd,
2174 SCST_LOAD_SENSE(scst_sense_hardw_error));
2176 } else if (unlikely(scst_check_sense(cmd))) {
2177 *pres = SCST_CMD_STATE_RES_CONT_SAME;
2182 if (likely(scsi_status_is_good(cmd->status))) {
2183 unsigned char type = cmd->dev->handler->type;
2184 if (unlikely((cmd->cdb[0] == MODE_SENSE ||
2185 cmd->cdb[0] == MODE_SENSE_10)) &&
2186 cmd->tgt_dev->acg_dev->rd_only_flag &&
2187 (type == TYPE_DISK || type == TYPE_WORM || type == TYPE_MOD ||
2188 type == TYPE_TAPE)) {
2192 length = scst_get_buf_first(cmd, &address);
2195 if (length > 2 && cmd->cdb[0] == MODE_SENSE)
2196 address[2] |= 0x80; /* Write Protect*/
2197 else if (length > 3 && cmd->cdb[0] == MODE_SENSE_10)
2198 address[3] |= 0x80; /* Write Protect*/
2199 scst_put_buf(cmd, address);
2203 * Check and clear NormACA option for the device, if necessary,
2204 * since we don't support ACA
2206 if (unlikely((cmd->cdb[0] == INQUIRY)) &&
2207 !(cmd->cdb[1] & SCST_INQ_EVPD/* Std INQUIRY data (no EVPD) */) &&
2208 (cmd->resp_data_len > SCST_INQ_BYTE3)) {
2212 /* ToDo: all pages ?? */
2213 buflen = scst_get_buf_first(cmd, &buffer);
2214 if (buflen > SCST_INQ_BYTE3) {
2216 if (buffer[SCST_INQ_BYTE3] & SCST_INQ_NORMACA_BIT) {
2217 PRINT_INFO("NormACA set for device: "
2218 "lun=%Ld, type 0x%02x",
2219 (uint64_t)cmd->lun, buffer[0]);
2222 buffer[SCST_INQ_BYTE3] &= ~SCST_INQ_NORMACA_BIT;
2224 PRINT_ERROR("%s", "Unable to get INQUIRY "
2226 scst_set_cmd_error(cmd,
2227 SCST_LOAD_SENSE(scst_sense_hardw_error));
2230 scst_put_buf(cmd, buffer);
2233 if (unlikely((cmd->cdb[0] == MODE_SELECT) ||
2234 (cmd->cdb[0] == MODE_SELECT_10) ||
2235 (cmd->cdb[0] == LOG_SELECT))) {
2236 TRACE(TRACE_SCSI, "MODE/LOG SELECT succeeded (LUN %Ld)",
2237 (uint64_t)cmd->lun);
2238 cmd->state = SCST_CMD_STATE_MODE_SELECT_CHECKS;
2239 *pres = SCST_CMD_STATE_RES_CONT_SAME;
2244 if ((cmd->cdb[0] == RESERVE) || (cmd->cdb[0] == RESERVE_10)) {
2245 if (!test_bit(SCST_TGT_DEV_RESERVED,
2246 &cmd->tgt_dev->tgt_dev_flags)) {
2247 struct scst_tgt_dev *tgt_dev_tmp;
2248 struct scst_device *dev = cmd->dev;
2250 TRACE(TRACE_SCSI, "Real RESERVE failed lun=%Ld, "
2251 "status=%x", (uint64_t)cmd->lun, cmd->status);
2252 PRINT_BUFF_FLAG(TRACE_SCSI, "Sense", cmd->sense,
2253 SCST_SENSE_BUFFERSIZE);
2255 /* Clearing the reservation */
2256 spin_lock_bh(&dev->dev_lock);
2257 list_for_each_entry(tgt_dev_tmp, &dev->dev_tgt_dev_list,
2258 dev_tgt_dev_list_entry) {
2259 clear_bit(SCST_TGT_DEV_RESERVED,
2260 &tgt_dev_tmp->tgt_dev_flags);
2262 dev->dev_reserved = 0;
2263 spin_unlock_bh(&dev->dev_lock);
2267 /* Check for MODE PARAMETERS CHANGED UA */
2268 if ((cmd->dev->scsi_dev != NULL) &&
2269 (cmd->status == SAM_STAT_CHECK_CONDITION) &&
2270 SCST_SENSE_VALID(cmd->sense) &&
2271 scst_is_ua_sense(cmd->sense) &&
2272 (cmd->sense[12] == 0x2a) && (cmd->sense[13] == 0x01)) {
2273 TRACE(TRACE_SCSI, "MODE PARAMETERS CHANGED UA (lun %Ld)",
2274 (uint64_t)cmd->lun);
2275 cmd->state = SCST_CMD_STATE_MODE_SELECT_CHECKS;
2276 *pres = SCST_CMD_STATE_RES_CONT_SAME;
2283 TRACE_EXIT_RES(res);
2287 static int scst_pre_dev_done(struct scst_cmd *cmd)
2289 int res = SCST_CMD_STATE_RES_CONT_SAME, rc;
2293 rc = scst_done_cmd_check(cmd, &res);
2297 cmd->state = SCST_CMD_STATE_DEV_DONE;
2300 TRACE_EXIT_HRES(res);
2304 static int scst_mode_select_checks(struct scst_cmd *cmd)
2306 int res = SCST_CMD_STATE_RES_CONT_SAME;
2307 int atomic = scst_cmd_atomic(cmd);
2311 if (likely(scsi_status_is_good(cmd->status))) {
2312 if (unlikely((cmd->cdb[0] == MODE_SELECT) ||
2313 (cmd->cdb[0] == MODE_SELECT_10) ||
2314 (cmd->cdb[0] == LOG_SELECT))) {
2315 struct scst_device *dev = cmd->dev;
2316 if (atomic && (dev->scsi_dev != NULL)) {
2317 TRACE_DBG("%s", "MODE/LOG SELECT: thread "
2318 "context required");
2319 res = SCST_CMD_STATE_RES_NEED_THREAD;
2323 TRACE(TRACE_SCSI, "MODE/LOG SELECT succeeded, "
2324 "setting the SELECT UA (lun=%Ld)",
2325 (uint64_t)cmd->lun);
2327 spin_lock_bh(&dev->dev_lock);
2328 spin_lock(&scst_temp_UA_lock);
2329 if (cmd->cdb[0] == LOG_SELECT) {
2330 scst_set_sense(scst_temp_UA,
2331 sizeof(scst_temp_UA),
2332 UNIT_ATTENTION, 0x2a, 0x02);
2334 scst_set_sense(scst_temp_UA,
2335 sizeof(scst_temp_UA),
2336 UNIT_ATTENTION, 0x2a, 0x01);
2338 scst_dev_check_set_local_UA(dev, cmd, scst_temp_UA,
2339 sizeof(scst_temp_UA));
2340 spin_unlock(&scst_temp_UA_lock);
2341 spin_unlock_bh(&dev->dev_lock);
2343 if (dev->scsi_dev != NULL)
2344 scst_obtain_device_parameters(dev);
2346 } else if ((cmd->status == SAM_STAT_CHECK_CONDITION) &&
2347 SCST_SENSE_VALID(cmd->sense) &&
2348 scst_is_ua_sense(cmd->sense) &&
2349 (((cmd->sense[12] == 0x2a) && (cmd->sense[13] == 0x01)) ||
2350 (cmd->sense[12] == 0x29) /* reset */ ||
2351 (cmd->sense[12] == 0x28) /* medium changed */ ||
2352 (cmd->sense[12] == 0x2F) /* cleared by another ini (just in case) */)) {
2354 TRACE_DBG("Possible parameters changed UA %x: "
2355 "thread context required", cmd->sense[12]);
2356 res = SCST_CMD_STATE_RES_NEED_THREAD;
2360 TRACE(TRACE_SCSI, "Possible parameters changed UA %x "
2361 "(lun %Ld): getting new parameters", cmd->sense[12],
2362 (uint64_t)cmd->lun);
2364 scst_obtain_device_parameters(cmd->dev);
2368 cmd->state = SCST_CMD_STATE_DEV_DONE;
2371 TRACE_EXIT_HRES(res);
2375 static void scst_inc_check_expected_sn(struct scst_cmd *cmd)
2377 if (likely(cmd->sn_set))
2378 scst_inc_expected_sn(cmd->tgt_dev, cmd->sn_slot);
2380 scst_make_deferred_commands_active(cmd->tgt_dev, cmd);
2383 static int scst_dev_done(struct scst_cmd *cmd)
2385 int res = SCST_CMD_STATE_RES_CONT_SAME;
2390 state = SCST_CMD_STATE_PRE_XMIT_RESP;
2391 if (likely(!scst_is_cmd_local(cmd)) &&
2392 likely(cmd->dev->handler->dev_done != NULL))
2395 TRACE_DBG("Calling dev handler %s dev_done(%p)",
2396 cmd->dev->handler->name, cmd);
2397 rc = cmd->dev->handler->dev_done(cmd);
2398 TRACE_DBG("Dev handler %s dev_done() returned %d",
2399 cmd->dev->handler->name, rc);
2400 if (rc != SCST_CMD_STATE_DEFAULT)
2405 case SCST_CMD_STATE_PRE_XMIT_RESP:
2406 case SCST_CMD_STATE_DEV_PARSE:
2407 case SCST_CMD_STATE_PRE_PARSE:
2408 case SCST_CMD_STATE_PREPARE_SPACE:
2409 case SCST_CMD_STATE_RDY_TO_XFER:
2410 case SCST_CMD_STATE_TGT_PRE_EXEC:
2411 case SCST_CMD_STATE_SEND_TO_MIDLEV:
2412 case SCST_CMD_STATE_PRE_DEV_DONE:
2413 case SCST_CMD_STATE_MODE_SELECT_CHECKS:
2414 case SCST_CMD_STATE_DEV_DONE:
2415 case SCST_CMD_STATE_XMIT_RESP:
2416 case SCST_CMD_STATE_FINISHED:
2418 res = SCST_CMD_STATE_RES_CONT_SAME;
2421 case SCST_CMD_STATE_NEED_THREAD_CTX:
2422 TRACE_DBG("Dev handler %s dev_done() requested "
2423 "thread context, rescheduling",
2424 cmd->dev->handler->name);
2425 res = SCST_CMD_STATE_RES_NEED_THREAD;
2430 PRINT_ERROR("Dev handler %s dev_done() returned "
2431 "invalid cmd state %d",
2432 cmd->dev->handler->name, state);
2434 PRINT_ERROR("Dev handler %s dev_done() returned "
2435 "error %d", cmd->dev->handler->name,
2438 scst_set_cmd_error(cmd,
2439 SCST_LOAD_SENSE(scst_sense_hardw_error));
2440 cmd->state = SCST_CMD_STATE_PRE_XMIT_RESP;
2441 res = SCST_CMD_STATE_RES_CONT_SAME;
2445 if (cmd->needs_unblocking)
2446 scst_unblock_dev_cmd(cmd);
2448 if (likely(cmd->dec_on_dev_needed))
2449 scst_dec_on_dev_cmd(cmd);
2451 if (cmd->inc_expected_sn_on_done && cmd->sent_to_midlev)
2452 scst_inc_check_expected_sn(cmd);
2454 TRACE_EXIT_HRES(res);
2458 static int scst_pre_xmit_response(struct scst_cmd *cmd)
2465 if (cmd->tm_dbg_delayed && !test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)) {
2466 if (scst_cmd_atomic(cmd)) {
2467 TRACE_MGMT_DBG("%s", "DEBUG_TM delayed cmd needs a thread");
2468 res = SCST_CMD_STATE_RES_NEED_THREAD;
2471 TRACE_MGMT_DBG("Delaying cmd %p (tag %llu) for 1 second",
2473 schedule_timeout_uninterruptible(HZ);
2477 if (likely(cmd->tgt_dev != NULL)) {
2478 atomic_dec(&cmd->tgt_dev->tgt_dev_cmd_count);
2479 atomic_dec(&cmd->dev->dev_cmd_count);
2480 /* If expected values not set, expected direction is UNKNOWN */
2481 if (cmd->expected_data_direction == SCST_DATA_WRITE)
2482 atomic_dec(&cmd->dev->write_cmd_count);
2484 if (unlikely(cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE))
2485 scst_on_hq_cmd_response(cmd);
2487 if (unlikely(!cmd->sent_to_midlev)) {
2488 TRACE_SN("cmd %p was not sent to mid-lev (sn %ld, set %d)",
2489 cmd, cmd->sn, cmd->sn_set);
2490 scst_unblock_deferred(cmd->tgt_dev, cmd);
2491 cmd->sent_to_midlev = 1;
2496 * If we don't remove cmd from the search list here, before
2497 * submitting it for transmittion, we will have a race, when for
2498 * some reason cmd's release is delayed after transmittion and
2499 * initiator sends cmd with the same tag => it is possible that
2500 * a wrong cmd will be found by find() functions.
2502 spin_lock_irq(&cmd->sess->sess_list_lock);
2503 list_del(&cmd->search_cmd_list_entry);
2504 spin_unlock_irq(&cmd->sess->sess_list_lock);
2507 smp_mb(); /* to sync with scst_abort_cmd() */
2509 if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)))
2510 scst_xmit_process_aborted_cmd(cmd);
2512 if (unlikely(test_bit(SCST_CMD_NO_RESP, &cmd->cmd_flags))) {
2513 TRACE_MGMT_DBG("Flag NO_RESP set for cmd %p (tag %llu), skipping",
2515 cmd->state = SCST_CMD_STATE_FINISHED;
2516 res = SCST_CMD_STATE_RES_CONT_SAME;
2520 cmd->state = SCST_CMD_STATE_XMIT_RESP;
2521 res = SCST_CMD_STATE_RES_CONT_SAME;
2524 #ifdef MEASURE_LATENCY
2527 uint64_t finish, scst_time, proc_time;
2528 struct scst_session *sess = cmd->sess;
2530 getnstimeofday(&ts);
2531 finish = scst_sec_to_nsec(ts.tv_sec) + ts.tv_nsec;
2533 spin_lock_bh(&sess->meas_lock);
2535 scst_time = cmd->pre_exec_finish - cmd->start;
2536 scst_time += finish - cmd->post_exec_start;
2537 proc_time = finish - cmd->start;
2539 sess->scst_time += scst_time;
2540 sess->processing_time += proc_time;
2541 sess->processed_cmds++;
2543 spin_unlock_bh(&sess->meas_lock);
2545 TRACE_DBG("cmd %p (sess %p): finish %Ld (tv_sec %ld, "
2546 "tv_nsec %ld), scst_time %Ld, proc_time %Ld", cmd, sess,
2547 finish, ts.tv_sec, ts.tv_nsec, scst_time, proc_time);
2550 TRACE_EXIT_HRES(res);
2554 static int scst_xmit_response(struct scst_cmd *cmd)
2561 int finished_cmds = atomic_read(&cmd->sess->tgt->finished_cmds);
2563 res = SCST_CMD_STATE_RES_CONT_NEXT;
2564 cmd->state = SCST_CMD_STATE_XMIT_WAIT;
2566 TRACE_DBG("Calling xmit_response(%p)", cmd);
2571 struct scatterlist *sg = cmd->sg;
2572 TRACE_SEND_BOT("Xmitting %d S/G(s) at %p sg[0].page at %p",
2573 cmd->sg_cnt, sg, (void*)sg_page(&sg[0]));
2574 for(i = 0; i < cmd->sg_cnt; ++i) {
2575 TRACE_BUFF_FLAG(TRACE_SND_BOT,
2576 "Xmitting sg", sg_virt(&sg[i]),
2583 if (((scst_random() % 100) == 77))
2584 rc = SCST_TGT_RES_QUEUE_FULL;
2587 rc = cmd->tgtt->xmit_response(cmd);
2588 TRACE_DBG("xmit_response() returned %d", rc);
2590 if (likely(rc == SCST_TGT_RES_SUCCESS))
2593 /* Restore the previous state */
2594 cmd->state = SCST_CMD_STATE_XMIT_RESP;
2597 case SCST_TGT_RES_QUEUE_FULL:
2598 if (scst_queue_retry_cmd(cmd, finished_cmds) == 0)
2603 case SCST_TGT_RES_NEED_THREAD_CTX:
2604 TRACE_DBG("Target driver %s xmit_response() "
2605 "requested thread context, rescheduling",
2607 res = SCST_CMD_STATE_RES_NEED_THREAD;
2617 /* Caution: cmd can be already dead here */
2618 TRACE_EXIT_HRES(res);
2622 if (rc == SCST_TGT_RES_FATAL_ERROR) {
2623 PRINT_ERROR("Target driver %s xmit_response() returned "
2624 "fatal error", cmd->tgtt->name);
2626 PRINT_ERROR("Target driver %s xmit_response() returned "
2627 "invalid value %d", cmd->tgtt->name, rc);
2629 scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
2630 cmd->state = SCST_CMD_STATE_FINISHED;
2631 res = SCST_CMD_STATE_RES_CONT_SAME;
2635 void scst_tgt_cmd_done(struct scst_cmd *cmd)
2639 sBUG_ON(cmd->state != SCST_CMD_STATE_XMIT_WAIT);
2641 cmd->state = SCST_CMD_STATE_FINISHED;
2642 scst_proccess_redirect_cmd(cmd, scst_get_context(), 1);
2648 static int scst_finish_cmd(struct scst_cmd *cmd)
2654 atomic_dec(&cmd->sess->sess_cmd_count);
2657 smp_mb(); /* to sync with scst_abort_cmd() */
2659 if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
2660 TRACE_MGMT_DBG("Aborted cmd %p finished (cmd_ref %d, "
2661 "scst_cmd_count %d)", cmd, atomic_read(&cmd->cmd_ref),
2662 atomic_read(&scst_cmd_count));
2664 scst_finish_cmd_mgmt(cmd);
2667 if (unlikely(cmd->delivery_status != SCST_CMD_DELIVERY_SUCCESS)) {
2668 if ((cmd->tgt_dev != NULL) &&
2669 scst_is_ua_sense(cmd->sense)) {
2670 /* This UA delivery failed, so requeue it */
2671 TRACE_MGMT_DBG("Requeuing UA for delivery failed cmd "
2673 scst_check_set_UA(cmd->tgt_dev, cmd->sense,
2674 SCST_SENSE_BUFFERSIZE, 1);
2678 __scst_cmd_put(cmd);
2680 res = SCST_CMD_STATE_RES_CONT_NEXT;
2682 TRACE_EXIT_HRES(res);
2687 * No locks, but it must be externally serialized (see comment for
2688 * scst_cmd_init_done() in scst.h)
2690 static void scst_cmd_set_sn(struct scst_cmd *cmd)
2692 struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
2693 unsigned long flags;
2697 if (scst_is_implicit_hq(cmd)) {
2698 TRACE(TRACE_SCSI|TRACE_SCSI_SERIALIZING, "Implicit HQ cmd %p", cmd);
2699 cmd->queue_type = SCST_CMD_QUEUE_HEAD_OF_QUEUE;
2702 EXTRACHECKS_BUG_ON(cmd->sn_set || cmd->hq_cmd_inced);
2704 /* Optimized for lockless fast path */
2706 scst_check_debug_sn(cmd);
2708 if (cmd->dev->queue_alg == SCST_CONTR_MODE_QUEUE_ALG_RESTRICTED_REORDER) {
2710 * Not the best way, but well enough until there will be a
2711 * possibility to specify queue type during pass-through
2712 * commands submission.
2714 cmd->queue_type = SCST_CMD_QUEUE_ORDERED;
2717 switch(cmd->queue_type) {
2718 case SCST_CMD_QUEUE_SIMPLE:
2719 case SCST_CMD_QUEUE_UNTAGGED:
2720 #if 1 /* temporary, ToDo */
2721 if (scst_cmd_is_expected_set(cmd)) {
2722 if ((cmd->expected_data_direction == SCST_DATA_READ) &&
2723 (atomic_read(&cmd->dev->write_cmd_count) == 0))
2728 if (likely(tgt_dev->num_free_sn_slots >= 0)) {
2730 * atomic_inc_return() implies memory barrier to sync
2731 * with scst_inc_expected_sn()
2733 if (atomic_inc_return(tgt_dev->cur_sn_slot) == 1) {
2735 TRACE_SN("Incremented curr_sn %ld",
2738 cmd->sn_slot = tgt_dev->cur_sn_slot;
2739 cmd->sn = tgt_dev->curr_sn;
2741 tgt_dev->prev_cmd_ordered = 0;
2743 TRACE(TRACE_MINOR, "***WARNING*** Not enough SN slots "
2744 "%zd", ARRAY_SIZE(tgt_dev->sn_slots));
2749 case SCST_CMD_QUEUE_ORDERED:
2750 TRACE(TRACE_SCSI|TRACE_SCSI_SERIALIZING, "ORDERED cmd %p "
2751 "(op %x)", cmd, cmd->cdb[0]);
2753 if (!tgt_dev->prev_cmd_ordered) {
2754 spin_lock_irqsave(&tgt_dev->sn_lock, flags);
2755 if (tgt_dev->num_free_sn_slots >= 0) {
2756 tgt_dev->num_free_sn_slots--;
2757 if (tgt_dev->num_free_sn_slots >= 0) {
2760 * Commands can finish in any order, so we don't
2761 * know, which slot is empty.
2764 tgt_dev->cur_sn_slot++;
2765 if (tgt_dev->cur_sn_slot == tgt_dev->sn_slots +
2766 ARRAY_SIZE(tgt_dev->sn_slots))
2767 tgt_dev->cur_sn_slot = tgt_dev->sn_slots;
2769 if (atomic_read(tgt_dev->cur_sn_slot) == 0)
2773 sBUG_ON(i == ARRAY_SIZE(tgt_dev->sn_slots));
2775 TRACE_SN("New cur SN slot %zd",
2776 tgt_dev->cur_sn_slot-tgt_dev->sn_slots);
2779 spin_unlock_irqrestore(&tgt_dev->sn_lock, flags);
2781 tgt_dev->prev_cmd_ordered = 1;
2783 cmd->sn = tgt_dev->curr_sn;
2786 case SCST_CMD_QUEUE_HEAD_OF_QUEUE:
2787 TRACE(TRACE_SCSI|TRACE_SCSI_SERIALIZING, "HQ cmd %p "
2788 "(op %x)", cmd, cmd->cdb[0]);
2789 spin_lock_irqsave(&tgt_dev->sn_lock, flags);
2790 tgt_dev->hq_cmd_count++;
2791 spin_unlock_irqrestore(&tgt_dev->sn_lock, flags);
2792 cmd->hq_cmd_inced = 1;
2799 TRACE_SN("cmd(%p)->sn: %ld (tgt_dev %p, *cur_sn_slot %d, "
2800 "num_free_sn_slots %d, prev_cmd_ordered %ld, "
2801 "cur_sn_slot %zd)", cmd, cmd->sn, tgt_dev,
2802 atomic_read(tgt_dev->cur_sn_slot),
2803 tgt_dev->num_free_sn_slots, tgt_dev->prev_cmd_ordered,
2804 tgt_dev->cur_sn_slot-tgt_dev->sn_slots);
2814 * Returns 0 on success, > 0 when we need to wait for unblock,
2815 * < 0 if there is no device (lun) or device type handler.
2817 * No locks, but might be on IRQ, protection is done by the
2818 * suspended activity.
2820 static int scst_translate_lun(struct scst_cmd *cmd)
2822 struct scst_tgt_dev *tgt_dev = NULL;
2829 if (likely(!test_bit(SCST_FLAG_SUSPENDED, &scst_flags))) {
2830 struct list_head *sess_tgt_dev_list_head =
2831 &cmd->sess->sess_tgt_dev_list_hash[HASH_VAL(cmd->lun)];
2832 TRACE_DBG("Finding tgt_dev for cmd %p (lun %Ld)", cmd,
2833 (uint64_t)cmd->lun);
2835 list_for_each_entry(tgt_dev, sess_tgt_dev_list_head,
2836 sess_tgt_dev_list_entry) {
2837 if (tgt_dev->lun == cmd->lun) {
2838 TRACE_DBG("tgt_dev %p found", tgt_dev);
2840 if (unlikely(tgt_dev->dev->handler == &scst_null_devtype)) {
2841 PRINT_INFO("Dev handler for device "
2842 "%Ld is NULL, the device will not be "
2843 "visible remotely", (uint64_t)cmd->lun);
2847 cmd->cmd_lists = tgt_dev->dev->p_cmd_lists;
2848 cmd->tgt_dev = tgt_dev;
2849 cmd->dev = tgt_dev->dev;
2856 TRACE(TRACE_MINOR, "tgt_dev for lun %Ld not found, command to "
2857 "unexisting LU?", (uint64_t)cmd->lun);
2861 TRACE_MGMT_DBG("%s", "FLAG SUSPENDED set, skipping");
2866 TRACE_EXIT_RES(res);
2871 * No locks, but might be on IRQ
2873 * Returns 0 on success, > 0 when we need to wait for unblock,
2874 * < 0 if there is no device (lun) or device type handler.
2876 static int __scst_init_cmd(struct scst_cmd *cmd)
2882 res = scst_translate_lun(cmd);
2883 if (likely(res == 0)) {
2885 bool failure = false;
2887 cmd->state = SCST_CMD_STATE_PRE_PARSE;
2889 cnt = atomic_inc_return(&cmd->tgt_dev->tgt_dev_cmd_count);
2890 if (unlikely(cnt > SCST_MAX_TGT_DEV_COMMANDS)) {
2891 TRACE(TRACE_MGMT_MINOR, "Too many pending commands (%d) in "
2892 "session, returning BUSY to initiator \"%s\"",
2893 cnt, (cmd->sess->initiator_name[0] == '\0') ?
2894 "Anonymous" : cmd->sess->initiator_name);
2898 cnt = atomic_inc_return(&cmd->dev->dev_cmd_count);
2899 if (unlikely(cnt > SCST_MAX_DEV_COMMANDS)) {
2901 TRACE(TRACE_MGMT_MINOR, "Too many pending device "
2902 "commands (%d), returning BUSY to "
2903 "initiator \"%s\"", cnt,
2904 (cmd->sess->initiator_name[0] == '\0') ?
2905 "Anonymous" : cmd->sess->initiator_name);
2910 /* If expected values not set, expected direction is UNKNOWN */
2911 if (cmd->expected_data_direction == SCST_DATA_WRITE)
2912 atomic_inc(&cmd->dev->write_cmd_count);
2914 if (unlikely(failure))
2917 if (!cmd->set_sn_on_restart_cmd)
2918 scst_cmd_set_sn(cmd);
2919 } else if (res < 0) {
2920 TRACE_DBG("Finishing cmd %p", cmd);
2921 scst_set_cmd_error(cmd,
2922 SCST_LOAD_SENSE(scst_sense_lun_not_supported));
2923 cmd->state = SCST_CMD_STATE_PRE_XMIT_RESP;
2928 TRACE_EXIT_RES(res);
2933 cmd->state = SCST_CMD_STATE_PRE_XMIT_RESP;
2937 /* Called under scst_init_lock and IRQs disabled */
2938 static void scst_do_job_init(void)
2940 struct scst_cmd *cmd;
2947 * There is no need for read barrier here, because we don't care where
2948 * this check will be done.
2950 susp = test_bit(SCST_FLAG_SUSPENDED, &scst_flags);
2951 if (scst_init_poll_cnt > 0)
2952 scst_init_poll_cnt--;
2954 list_for_each_entry(cmd, &scst_init_cmd_list, cmd_list_entry) {
2956 if (susp && !test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))
2958 if (!test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)) {
2959 spin_unlock_irq(&scst_init_lock);
2960 rc = __scst_init_cmd(cmd);
2961 spin_lock_irq(&scst_init_lock);
2963 TRACE_MGMT_DBG("%s", "FLAG SUSPENDED set, restarting");
2967 TRACE_MGMT_DBG("Aborting not inited cmd %p (tag %llu)",
2969 cmd->state = SCST_CMD_STATE_PRE_XMIT_RESP;
2973 * Deleting cmd from init cmd list after __scst_init_cmd()
2974 * is necessary to keep the check in scst_init_cmd() correct
2975 * to preserve the commands order.
2977 * We don't care about the race, when init cmd list is empty
2978 * and one command detected that it just was not empty, so
2979 * it's inserting to it, but another command at the same time
2980 * seeing init cmd list empty and goes directly, because it
2981 * could affect only commands from the same initiator to the
2982 * same tgt_dev, but init_cmd_done() doesn't guarantee the order
2983 * in case of simultaneous such calls anyway.
2985 TRACE_MGMT_DBG("Deleting cmd %p from init cmd list", cmd);
2987 list_del(&cmd->cmd_list_entry);
2988 spin_unlock(&scst_init_lock);
2990 spin_lock(&cmd->cmd_lists->cmd_list_lock);
2991 TRACE_MGMT_DBG("Adding cmd %p to active cmd list", cmd);
2992 if (unlikely(cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE))
2993 list_add(&cmd->cmd_list_entry,
2994 &cmd->cmd_lists->active_cmd_list);
2996 list_add_tail(&cmd->cmd_list_entry,
2997 &cmd->cmd_lists->active_cmd_list);
2998 wake_up(&cmd->cmd_lists->cmd_list_waitQ);
2999 spin_unlock(&cmd->cmd_lists->cmd_list_lock);
3001 spin_lock(&scst_init_lock);
3005 /* It isn't really needed, but let's keep it */
3006 if (susp != test_bit(SCST_FLAG_SUSPENDED, &scst_flags))
3013 static inline int test_init_cmd_list(void)
3015 int res = (!list_empty(&scst_init_cmd_list) &&
3016 !test_bit(SCST_FLAG_SUSPENDED, &scst_flags)) ||
3017 unlikely(kthread_should_stop()) ||
3018 (scst_init_poll_cnt > 0);
3022 int scst_init_cmd_thread(void *arg)
3026 PRINT_INFO("Init thread started, PID %d", current->pid);
3028 current->flags |= PF_NOFREEZE;
3030 set_user_nice(current, -10);
3032 spin_lock_irq(&scst_init_lock);
3033 while(!kthread_should_stop()) {
3035 init_waitqueue_entry(&wait, current);
3037 if (!test_init_cmd_list()) {
3038 add_wait_queue_exclusive(&scst_init_cmd_list_waitQ,
3041 set_current_state(TASK_INTERRUPTIBLE);
3042 if (test_init_cmd_list())
3044 spin_unlock_irq(&scst_init_lock);
3046 spin_lock_irq(&scst_init_lock);
3048 set_current_state(TASK_RUNNING);
3049 remove_wait_queue(&scst_init_cmd_list_waitQ, &wait);
3053 spin_unlock_irq(&scst_init_lock);
3056 * If kthread_should_stop() is true, we are guaranteed to be
3057 * on the module unload, so scst_init_cmd_list must be empty.
3059 sBUG_ON(!list_empty(&scst_init_cmd_list));
3061 PRINT_INFO("Init thread PID %d finished", current->pid);
3067 /* Called with no locks held */
3068 void scst_process_active_cmd(struct scst_cmd *cmd, int context)
3074 EXTRACHECKS_BUG_ON(in_irq());
3076 cmd->context_processable = context | SCST_CONTEXT_PROCESSABLE;
3077 context &= ~SCST_CONTEXT_PROCESSABLE;
3078 cmd->atomic = (context == SCST_CONTEXT_DIRECT_ATOMIC);
3080 TRACE_DBG("cmd %p, context_processable %d, atomic %d", cmd,
3081 cmd->context_processable, cmd->atomic);
3084 switch (cmd->state) {
3085 case SCST_CMD_STATE_PRE_PARSE:
3086 res = scst_pre_parse(cmd);
3087 EXTRACHECKS_BUG_ON(res ==
3088 SCST_CMD_STATE_RES_NEED_THREAD);
3091 case SCST_CMD_STATE_DEV_PARSE:
3092 res = scst_parse_cmd(cmd);
3095 case SCST_CMD_STATE_PREPARE_SPACE:
3096 res = scst_prepare_space(cmd);
3099 case SCST_CMD_STATE_RDY_TO_XFER:
3100 res = scst_rdy_to_xfer(cmd);
3103 case SCST_CMD_STATE_TGT_PRE_EXEC:
3104 res = scst_tgt_pre_exec(cmd);
3107 case SCST_CMD_STATE_SEND_TO_MIDLEV:
3108 if (tm_dbg_check_cmd(cmd) != 0) {
3109 res = SCST_CMD_STATE_RES_CONT_NEXT;
3110 TRACE_MGMT_DBG("Skipping cmd %p (tag %llu), "
3111 "because of TM DBG delay", cmd,
3115 res = scst_send_to_midlev(&cmd);
3116 /* !! At this point cmd, sess & tgt_dev can be already freed !! */
3119 case SCST_CMD_STATE_PRE_DEV_DONE:
3120 res = scst_pre_dev_done(cmd);
3121 EXTRACHECKS_BUG_ON(res ==
3122 SCST_CMD_STATE_RES_NEED_THREAD);
3125 case SCST_CMD_STATE_MODE_SELECT_CHECKS:
3126 res = scst_mode_select_checks(cmd);
3129 case SCST_CMD_STATE_DEV_DONE:
3130 res = scst_dev_done(cmd);
3133 case SCST_CMD_STATE_PRE_XMIT_RESP:
3134 res = scst_pre_xmit_response(cmd);
3135 EXTRACHECKS_BUG_ON(res ==
3136 SCST_CMD_STATE_RES_NEED_THREAD);
3139 case SCST_CMD_STATE_XMIT_RESP:
3140 res = scst_xmit_response(cmd);
3143 case SCST_CMD_STATE_FINISHED:
3144 res = scst_finish_cmd(cmd);
3145 EXTRACHECKS_BUG_ON(res ==
3146 SCST_CMD_STATE_RES_NEED_THREAD);
3150 PRINT_CRIT_ERROR("cmd (%p) in state %d, but shouldn't "
3151 "be", cmd, cmd->state);
3153 res = SCST_CMD_STATE_RES_CONT_NEXT;
3156 } while(res == SCST_CMD_STATE_RES_CONT_SAME);
3158 if (res == SCST_CMD_STATE_RES_CONT_NEXT) {
3160 } else if (res == SCST_CMD_STATE_RES_NEED_THREAD) {
3161 spin_lock_irq(&cmd->cmd_lists->cmd_list_lock);
3162 switch (cmd->state) {
3163 case SCST_CMD_STATE_PRE_PARSE:
3164 case SCST_CMD_STATE_DEV_PARSE:
3165 case SCST_CMD_STATE_PREPARE_SPACE:
3166 case SCST_CMD_STATE_RDY_TO_XFER:
3167 case SCST_CMD_STATE_TGT_PRE_EXEC:
3168 case SCST_CMD_STATE_SEND_TO_MIDLEV:
3169 case SCST_CMD_STATE_PRE_DEV_DONE:
3170 case SCST_CMD_STATE_MODE_SELECT_CHECKS:
3171 case SCST_CMD_STATE_DEV_DONE:
3172 case SCST_CMD_STATE_PRE_XMIT_RESP:
3173 case SCST_CMD_STATE_XMIT_RESP:
3174 case SCST_CMD_STATE_FINISHED:
3175 TRACE_DBG("Adding cmd %p to head of active cmd list", cmd);
3176 list_add(&cmd->cmd_list_entry,
3177 &cmd->cmd_lists->active_cmd_list);
3180 /* not very valid commands */
3181 case SCST_CMD_STATE_DEFAULT:
3182 case SCST_CMD_STATE_NEED_THREAD_CTX:
3183 PRINT_CRIT_ERROR("cmd %p is in state %d, not putting on "
3184 "useful list (left on scst cmd list)", cmd,
3186 spin_unlock_irq(&cmd->cmd_lists->cmd_list_lock);
3188 spin_lock_irq(&cmd->cmd_lists->cmd_list_lock);
3194 wake_up(&cmd->cmd_lists->cmd_list_waitQ);
3195 spin_unlock_irq(&cmd->cmd_lists->cmd_list_lock);
3203 /* Called under cmd_list_lock and IRQs disabled */
3204 static void scst_do_job_active(struct list_head *cmd_list,
3205 spinlock_t *cmd_list_lock, int context)
3211 int c = context & ~SCST_CONTEXT_PROCESSABLE;
3212 sBUG_ON((c != SCST_CONTEXT_DIRECT_ATOMIC) &&
3213 (c != SCST_CONTEXT_DIRECT));
3217 while (!list_empty(cmd_list)) {
3218 struct scst_cmd *cmd = list_entry(cmd_list->next, typeof(*cmd),
3220 TRACE_DBG("Deleting cmd %p from active cmd list", cmd);
3221 list_del(&cmd->cmd_list_entry);
3222 spin_unlock_irq(cmd_list_lock);
3223 scst_process_active_cmd(cmd, context);
3224 spin_lock_irq(cmd_list_lock);
3231 static inline int test_cmd_lists(struct scst_cmd_lists *p_cmd_lists)
3233 int res = !list_empty(&p_cmd_lists->active_cmd_list) ||
3234 unlikely(kthread_should_stop()) ||
3235 tm_dbg_is_release();
3239 int scst_cmd_thread(void *arg)
3241 struct scst_cmd_lists *p_cmd_lists = (struct scst_cmd_lists*)arg;
3245 PRINT_INFO("Processing thread started, PID %d", current->pid);
3248 set_user_nice(current, 10);
3250 current->flags |= PF_NOFREEZE;
3252 spin_lock_irq(&p_cmd_lists->cmd_list_lock);
3253 while (!kthread_should_stop()) {
3255 init_waitqueue_entry(&wait, current);
3257 if (!test_cmd_lists(p_cmd_lists)) {
3258 add_wait_queue_exclusive(&p_cmd_lists->cmd_list_waitQ,
3261 set_current_state(TASK_INTERRUPTIBLE);
3262 if (test_cmd_lists(p_cmd_lists))
3264 spin_unlock_irq(&p_cmd_lists->cmd_list_lock);
3266 spin_lock_irq(&p_cmd_lists->cmd_list_lock);
3268 set_current_state(TASK_RUNNING);
3269 remove_wait_queue(&p_cmd_lists->cmd_list_waitQ, &wait);
3272 if (tm_dbg_is_release()) {
3273 spin_unlock_irq(&p_cmd_lists->cmd_list_lock);
3274 tm_dbg_check_released_cmds();
3275 spin_lock_irq(&p_cmd_lists->cmd_list_lock);
3278 scst_do_job_active(&p_cmd_lists->active_cmd_list,
3279 &p_cmd_lists->cmd_list_lock, SCST_CONTEXT_DIRECT |
3280 SCST_CONTEXT_PROCESSABLE);
3282 spin_unlock_irq(&p_cmd_lists->cmd_list_lock);
3286 * If kthread_should_stop() is true, we are guaranteed to be either
3287 * on the module unload, or there must be at least one other thread to
3288 * process the commands lists.
3290 if (p_cmd_lists == &scst_main_cmd_lists) {
3291 sBUG_ON((scst_threads_info.nr_cmd_threads == 1) &&
3292 !list_empty(&scst_main_cmd_lists.active_cmd_list));
3296 PRINT_INFO("Processing thread PID %d finished", current->pid);
3302 void scst_cmd_tasklet(long p)
3304 struct scst_tasklet *t = (struct scst_tasklet*)p;
3308 spin_lock_irq(&t->tasklet_lock);
3309 scst_do_job_active(&t->tasklet_cmd_list, &t->tasklet_lock,
3310 SCST_CONTEXT_DIRECT_ATOMIC);
3311 spin_unlock_irq(&t->tasklet_lock);
3318 * Returns 0 on success, < 0 if there is no device handler or
3319 * > 0 if SCST_FLAG_SUSPENDED set and SCST_FLAG_SUSPENDING - not.
3320 * No locks, protection is done by the suspended activity.
3322 static int scst_mgmt_translate_lun(struct scst_mgmt_cmd *mcmd)
3324 struct scst_tgt_dev *tgt_dev = NULL;
3325 struct list_head *sess_tgt_dev_list_head;
3330 TRACE_DBG("Finding tgt_dev for mgmt cmd %p (lun %Ld)", mcmd,
3331 (uint64_t)mcmd->lun);
3335 if (unlikely(test_bit(SCST_FLAG_SUSPENDED, &scst_flags) &&
3336 !test_bit(SCST_FLAG_SUSPENDING, &scst_flags))) {
3337 TRACE_MGMT_DBG("%s", "FLAG SUSPENDED set, skipping");
3343 sess_tgt_dev_list_head =
3344 &mcmd->sess->sess_tgt_dev_list_hash[HASH_VAL(mcmd->lun)];
3345 list_for_each_entry(tgt_dev, sess_tgt_dev_list_head,
3346 sess_tgt_dev_list_entry) {
3347 if (tgt_dev->lun == mcmd->lun) {
3348 TRACE_DBG("tgt_dev %p found", tgt_dev);
3349 mcmd->mcmd_tgt_dev = tgt_dev;
3354 if (mcmd->mcmd_tgt_dev == NULL)
3358 TRACE_EXIT_HRES(res);
3363 void scst_done_cmd_mgmt(struct scst_cmd *cmd)
3365 struct scst_mgmt_cmd_stub *mstb;
3367 unsigned long flags;
3371 TRACE_MGMT_DBG("cmd %p done (tag %llu)", cmd, cmd->tag);
3373 spin_lock_irqsave(&scst_mcmd_lock, flags);
3375 list_for_each_entry(mstb, &cmd->mgmt_cmd_list,
3376 cmd_mgmt_cmd_list_entry) {
3377 struct scst_mgmt_cmd *mcmd = mstb->mcmd;
3379 TRACE_MGMT_DBG("mcmd %p, mcmd->cmd_done_wait_count %d",
3380 mcmd, mcmd->cmd_done_wait_count);
3382 mcmd->cmd_done_wait_count--;
3383 if (mcmd->cmd_done_wait_count > 0) {
3384 TRACE_MGMT_DBG("cmd_done_wait_count(%d) not 0, "
3385 "skipping", mcmd->cmd_done_wait_count);
3389 if (mcmd->completed) {
3390 sBUG_ON(mcmd->nexus_loss_check_done);
3391 mcmd->completed = 0;
3392 mcmd->state = SCST_MGMT_CMD_STATE_CHECK_NEXUS_LOSS;
3393 TRACE_MGMT_DBG("Adding mgmt cmd %p to active mgmt cmd "
3395 list_add_tail(&mcmd->mgmt_cmd_list_entry,
3396 &scst_active_mgmt_cmd_list);
3401 spin_unlock_irqrestore(&scst_mcmd_lock, flags);
3404 wake_up(&scst_mgmt_cmd_list_waitQ);
3411 static void scst_finish_cmd_mgmt(struct scst_cmd *cmd)
3413 struct scst_mgmt_cmd_stub *mstb, *t;
3415 unsigned long flags;
3419 TRACE_MGMT_DBG("cmd %p finished (tag %llu)", cmd, cmd->tag);
3421 spin_lock_irqsave(&scst_mcmd_lock, flags);
3423 list_for_each_entry_safe(mstb, t, &cmd->mgmt_cmd_list,
3424 cmd_mgmt_cmd_list_entry) {
3425 struct scst_mgmt_cmd *mcmd = mstb->mcmd;
3427 TRACE_MGMT_DBG("mcmd %p, mcmd->cmd_finish_wait_count %d",
3428 mcmd, mcmd->cmd_finish_wait_count);
3430 list_del(&mstb->cmd_mgmt_cmd_list_entry);
3431 mempool_free(mstb, scst_mgmt_stub_mempool);
3434 mcmd->completed_cmd_count++;
3436 mcmd->cmd_finish_wait_count--;
3437 if (mcmd->cmd_finish_wait_count > 0) {
3438 TRACE_MGMT_DBG("cmd_finish_wait_count(%d) not 0, "
3439 "skipping", mcmd->cmd_finish_wait_count);
3443 if (mcmd->completed) {
3444 mcmd->state = SCST_MGMT_CMD_STATE_DONE;
3445 TRACE_MGMT_DBG("Adding mgmt cmd %p to active mgmt cmd "
3447 list_add_tail(&mcmd->mgmt_cmd_list_entry,
3448 &scst_active_mgmt_cmd_list);
3453 spin_unlock_irqrestore(&scst_mcmd_lock, flags);
3456 wake_up(&scst_mgmt_cmd_list_waitQ);
3462 static int scst_call_dev_task_mgmt_fn(struct scst_mgmt_cmd *mcmd,
3463 struct scst_tgt_dev *tgt_dev, int set_status)
3465 int res = SCST_DEV_TM_NOT_COMPLETED;
3466 struct scst_dev_type *h = tgt_dev->dev->handler;
3468 if (h->task_mgmt_fn) {
3469 TRACE_MGMT_DBG("Calling dev handler %s task_mgmt_fn(fn=%d)",
3471 EXTRACHECKS_BUG_ON(in_irq());
3472 res = h->task_mgmt_fn(mcmd, tgt_dev);
3473 TRACE_MGMT_DBG("Dev handler %s task_mgmt_fn() returned %d",
3475 if (set_status && (res != SCST_DEV_TM_NOT_COMPLETED))
3481 static inline int scst_is_strict_mgmt_fn(int mgmt_fn)
3484 #ifdef ABORT_CONSIDER_FINISHED_TASKS_AS_NOT_EXISTING
3485 case SCST_ABORT_TASK:
3488 case SCST_ABORT_TASK_SET:
3489 case SCST_CLEAR_TASK_SET:
3498 * Might be called under sess_list_lock and IRQ off + BHs also off
3499 * Returns -1 if command is being executed (ABORT failed), 0 otherwise
3501 void scst_abort_cmd(struct scst_cmd *cmd, struct scst_mgmt_cmd *mcmd,
3502 int other_ini, int call_dev_task_mgmt_fn)
3504 unsigned long flags;
3505 static spinlock_t other_ini_lock = SPIN_LOCK_UNLOCKED;
3509 TRACE(((mcmd != NULL) && (mcmd->fn == SCST_ABORT_TASK)) ? TRACE_MGMT_MINOR : TRACE_MGMT,
3510 "Aborting cmd %p (tag %llu, op %x)", cmd, cmd->tag, cmd->cdb[0]);
3512 /* To protect from concurrent aborts */
3513 spin_lock_irqsave(&other_ini_lock, flags);
3516 /* Might be necessary if command aborted several times */
3517 if (!test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)) {
3518 set_bit(SCST_CMD_ABORTED_OTHER, &cmd->cmd_flags);
3519 smp_mb__after_set_bit();
3522 /* Might be necessary if command aborted several times */
3523 clear_bit(SCST_CMD_ABORTED_OTHER, &cmd->cmd_flags);
3526 set_bit(SCST_CMD_ABORTED, &cmd->cmd_flags);
3528 spin_unlock_irqrestore(&other_ini_lock, flags);