4 * Copyright (C) 2004 - 2009 Vladislav Bolkhovitin <vst@vlnb.net>
5 * Copyright (C) 2004 - 2005 Leonid Stoljar
6 * Copyright (C) 2007 - 2009 ID7 Ltd.
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation, version 2
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
19 #include <linux/init.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/list.h>
23 #include <linux/spinlock.h>
24 #include <linux/slab.h>
25 #include <linux/sched.h>
26 #include <linux/kthread.h>
27 #include <linux/cdrom.h>
28 #include <linux/unistd.h>
29 #include <linux/string.h>
30 #include <asm/kmap_types.h>
33 #include "scst_priv.h"
36 #include "scst_cdbprobe.h"
38 static void scst_free_tgt_dev(struct scst_tgt_dev *tgt_dev);
39 static void scst_check_internal_sense(struct scst_device *dev, int result,
40 uint8_t *sense, int sense_len);
41 static void scst_queue_report_luns_changed_UA(struct scst_session *sess,
43 static void __scst_check_set_UA(struct scst_tgt_dev *tgt_dev,
44 const uint8_t *sense, int sense_len, int flags);
45 static void scst_alloc_set_UA(struct scst_tgt_dev *tgt_dev,
46 const uint8_t *sense, int sense_len, int flags);
47 static void scst_free_all_UA(struct scst_tgt_dev *tgt_dev);
48 static void scst_release_space(struct scst_cmd *cmd);
49 static void scst_sess_free_tgt_devs(struct scst_session *sess);
50 static void scst_unblock_cmds(struct scst_device *dev);
52 #ifdef CONFIG_SCST_DEBUG_TM
53 static void tm_dbg_init_tgt_dev(struct scst_tgt_dev *tgt_dev,
54 struct scst_acg_dev *acg_dev);
55 static void tm_dbg_deinit_tgt_dev(struct scst_tgt_dev *tgt_dev);
57 static inline void tm_dbg_init_tgt_dev(struct scst_tgt_dev *tgt_dev,
58 struct scst_acg_dev *acg_dev) {}
59 static inline void tm_dbg_deinit_tgt_dev(struct scst_tgt_dev *tgt_dev) {}
60 #endif /* CONFIG_SCST_DEBUG_TM */
62 int scst_alloc_sense(struct scst_cmd *cmd, int atomic)
65 gfp_t gfp_mask = atomic ? GFP_ATOMIC : (GFP_KERNEL|__GFP_NOFAIL);
69 if (cmd->sense != NULL)
72 cmd->sense = mempool_alloc(scst_sense_mempool, gfp_mask);
73 if (cmd->sense == NULL) {
74 PRINT_CRIT_ERROR("Sense memory allocation failed (op %x). "
75 "The sense data will be lost!!", cmd->cdb[0]);
81 cmd->sense_bufflen = SCST_SENSE_BUFFERSIZE;
82 memset(cmd->sense, 0, SCST_SENSE_BUFFERSIZE);
88 EXPORT_SYMBOL(scst_alloc_sense);
90 int scst_alloc_set_sense(struct scst_cmd *cmd, int atomic,
91 const uint8_t *sense, unsigned int len)
97 res = scst_alloc_sense(cmd, atomic);
99 PRINT_BUFFER("Lost sense", sense, len);
103 memcpy(cmd->sense, sense, min((int)len, (int)cmd->sense_bufflen));
104 TRACE_BUFFER("Sense set", cmd->sense, cmd->sense_bufflen);
110 EXPORT_SYMBOL(scst_alloc_set_sense);
112 void scst_set_cmd_error_status(struct scst_cmd *cmd, int status)
116 cmd->status = status;
117 cmd->host_status = DID_OK;
119 cmd->dbl_ua_orig_resp_data_len = cmd->resp_data_len;
120 cmd->dbl_ua_orig_data_direction = cmd->data_direction;
122 cmd->data_direction = SCST_DATA_NONE;
123 cmd->resp_data_len = 0;
124 cmd->is_send_status = 1;
131 EXPORT_SYMBOL(scst_set_cmd_error_status);
133 void scst_set_cmd_error(struct scst_cmd *cmd, int key, int asc, int ascq)
139 scst_set_cmd_error_status(cmd, SAM_STAT_CHECK_CONDITION);
141 rc = scst_alloc_sense(cmd, 1);
143 PRINT_ERROR("Lost sense data (key %x, asc %x, ascq %x)",
148 scst_set_sense(cmd->sense, cmd->sense_bufflen,
149 scst_get_cmd_dev_d_sense(cmd), key, asc, ascq);
150 TRACE_BUFFER("Sense set", cmd->sense, cmd->sense_bufflen);
156 EXPORT_SYMBOL(scst_set_cmd_error);
158 void scst_set_sense(uint8_t *buffer, int len, bool d_sense,
159 int key, int asc, int ascq)
163 memset(buffer, 0, len);
166 /* Descriptor format */
168 PRINT_ERROR("Length %d of sense buffer too small to "
169 "fit sense %x:%x:%x", len, key, asc, ascq);
172 buffer[0] = 0x72; /* Response Code */
174 buffer[1] = key; /* Sense Key */
176 buffer[2] = asc; /* ASC */
178 buffer[3] = ascq; /* ASCQ */
182 PRINT_ERROR("Length %d of sense buffer too small to "
183 "fit sense %x:%x:%x", len, key, asc, ascq);
186 buffer[0] = 0x70; /* Response Code */
188 buffer[2] = key; /* Sense Key */
190 buffer[7] = 0x0a; /* Additional Sense Length */
192 buffer[12] = asc; /* ASC */
194 buffer[13] = ascq; /* ASCQ */
197 TRACE_BUFFER("Sense set", buffer, len);
200 EXPORT_SYMBOL(scst_set_sense);
202 bool scst_analyze_sense(const uint8_t *sense, int len, unsigned int valid_mask,
203 int key, int asc, int ascq)
208 if ((sense[0] == 0x70) || (sense[0] == 0x71)) {
212 PRINT_ERROR("Sense too small to analyze (%d, "
218 if ((valid_mask & SCST_SENSE_KEY_VALID) && (sense[2] != key))
222 if ((valid_mask & SCST_SENSE_ASC_VALID) && (sense[12] != asc))
226 if ((valid_mask & SCST_SENSE_ASCQ_VALID) && (sense[13] != ascq))
228 } else if ((sense[0] == 0x72) || (sense[0] == 0x73)) {
229 /* Descriptor format */
232 PRINT_ERROR("Sense too small to analyze (%d, "
233 "type descriptor)", len);
238 if ((valid_mask & SCST_SENSE_KEY_VALID) && (sense[1] != key))
242 if ((valid_mask & SCST_SENSE_ASC_VALID) && (sense[2] != asc))
246 if ((valid_mask & SCST_SENSE_ASCQ_VALID) && (sense[3] != ascq))
254 TRACE_EXIT_RES((int)res);
257 EXPORT_SYMBOL(scst_analyze_sense);
259 void scst_check_convert_sense(struct scst_cmd *cmd)
265 if ((cmd->sense == NULL) || (cmd->status != SAM_STAT_CHECK_CONDITION))
268 d_sense = scst_get_cmd_dev_d_sense(cmd);
269 if (d_sense && ((cmd->sense[0] == 0x70) || (cmd->sense[0] == 0x71))) {
270 TRACE_MGMT_DBG("Converting fixed sense to descriptor (cmd %p)",
272 if (cmd->sense_bufflen < 14) {
273 PRINT_ERROR("Sense too small to convert (%d, "
274 "type fixed)", cmd->sense_bufflen);
277 scst_set_sense(cmd->sense, cmd->sense_bufflen, d_sense,
278 cmd->sense[2], cmd->sense[12], cmd->sense[13]);
279 } else if (!d_sense && ((cmd->sense[0] == 0x72) ||
280 (cmd->sense[0] == 0x73))) {
281 TRACE_MGMT_DBG("Converting descriptor sense to fixed (cmd %p)",
283 if (cmd->sense_bufflen < 4) {
284 PRINT_ERROR("Sense too small to convert (%d, "
285 "type descryptor)", cmd->sense_bufflen);
288 scst_set_sense(cmd->sense, cmd->sense_bufflen, d_sense,
289 cmd->sense[1], cmd->sense[2], cmd->sense[3]);
296 EXPORT_SYMBOL(scst_check_convert_sense);
298 static void scst_set_cmd_error_sense(struct scst_cmd *cmd, uint8_t *sense,
303 scst_set_cmd_error_status(cmd, SAM_STAT_CHECK_CONDITION);
304 scst_alloc_set_sense(cmd, 1, sense, len);
310 void scst_set_busy(struct scst_cmd *cmd)
312 int c = atomic_read(&cmd->sess->sess_cmd_count);
316 if ((c <= 1) || (cmd->sess->init_phase != SCST_SESS_IPH_READY)) {
317 scst_set_cmd_error_status(cmd, SAM_STAT_BUSY);
318 TRACE(TRACE_MGMT_MINOR, "Sending BUSY status to initiator %s "
319 "(cmds count %d, queue_type %x, sess->init_phase %d)",
320 cmd->sess->initiator_name, c,
321 cmd->queue_type, cmd->sess->init_phase);
323 scst_set_cmd_error_status(cmd, SAM_STAT_TASK_SET_FULL);
324 TRACE(TRACE_MGMT_MINOR, "Sending QUEUE_FULL status to "
325 "initiator %s (cmds count %d, queue_type %x, "
326 "sess->init_phase %d)", cmd->sess->initiator_name, c,
327 cmd->queue_type, cmd->sess->init_phase);
333 EXPORT_SYMBOL(scst_set_busy);
335 void scst_set_initial_UA(struct scst_session *sess, int key, int asc, int ascq)
341 TRACE_MGMT_DBG("Setting for sess %p initial UA %x/%x/%x", sess, key,
344 /* Protect sess_tgt_dev_list_hash */
345 mutex_lock(&scst_mutex);
347 for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
348 struct list_head *sess_tgt_dev_list_head =
349 &sess->sess_tgt_dev_list_hash[i];
350 struct scst_tgt_dev *tgt_dev;
352 list_for_each_entry(tgt_dev, sess_tgt_dev_list_head,
353 sess_tgt_dev_list_entry) {
354 spin_lock_bh(&tgt_dev->tgt_dev_lock);
355 if (!list_empty(&tgt_dev->UA_list)) {
356 struct scst_tgt_dev_UA *ua;
358 ua = list_entry(tgt_dev->UA_list.next,
359 typeof(*ua), UA_list_entry);
360 if (scst_analyze_sense(ua->UA_sense_buffer,
361 sizeof(ua->UA_sense_buffer),
362 SCST_SENSE_ALL_VALID,
363 SCST_LOAD_SENSE(scst_sense_reset_UA))) {
364 scst_set_sense(ua->UA_sense_buffer,
365 sizeof(ua->UA_sense_buffer),
366 tgt_dev->dev->d_sense,
370 "The first UA isn't RESET UA");
372 PRINT_ERROR("%s", "There's no RESET UA to "
374 spin_unlock_bh(&tgt_dev->tgt_dev_lock);
378 mutex_unlock(&scst_mutex);
383 EXPORT_SYMBOL(scst_set_initial_UA);
385 static struct scst_aen *scst_alloc_aen(struct scst_tgt_dev *tgt_dev)
387 struct scst_aen *aen;
391 aen = mempool_alloc(scst_aen_mempool, GFP_KERNEL);
393 PRINT_ERROR("AEN memory allocation failed. Corresponding "
394 "event notification will not be performed (initiator "
395 "%s)", tgt_dev->sess->initiator_name);
398 memset(aen, 0, sizeof(*aen));
400 aen->sess = tgt_dev->sess;
401 scst_sess_get(aen->sess);
403 aen->lun = scst_pack_lun(tgt_dev->lun);
406 TRACE_EXIT_HRES((unsigned long)aen);
410 static void scst_free_aen(struct scst_aen *aen)
414 scst_sess_put(aen->sess);
415 mempool_free(aen, scst_aen_mempool);
422 void scst_capacity_data_changed(struct scst_device *dev)
424 struct scst_tgt_dev *tgt_dev;
425 uint8_t sense_buffer[SCST_STANDARD_SENSE_LEN];
429 if (dev->type != TYPE_DISK) {
430 TRACE_MGMT_DBG("Device type %d isn't for CAPACITY DATA "
431 "CHANGED UA", dev->type);
435 TRACE_MGMT_DBG("CAPACITY DATA CHANGED (dev %p)", dev);
437 mutex_lock(&scst_mutex);
439 list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
440 dev_tgt_dev_list_entry) {
441 struct scst_tgt_template *tgtt = tgt_dev->sess->tgt->tgtt;
443 if (tgtt->report_aen != NULL) {
444 struct scst_aen *aen;
447 aen = scst_alloc_aen(tgt_dev);
451 aen->event_fn = SCST_AEN_SCSI;
452 aen->aen_sense_len = SCST_STANDARD_SENSE_LEN;
453 scst_set_sense(aen->aen_sense, aen->aen_sense_len,
454 tgt_dev->dev->d_sense,
455 SCST_LOAD_SENSE(scst_sense_capacity_data_changed));
457 TRACE_DBG("Calling target's %s report_aen(%p)",
459 rc = tgtt->report_aen(aen);
460 TRACE_DBG("Target's %s report_aen(%p) returned %d",
461 tgtt->name, aen, rc);
462 if (rc == SCST_AEN_RES_SUCCESS)
468 TRACE_MGMT_DBG("Queuing CAPACITY DATA CHANGED UA (tgt_dev %p)",
470 scst_set_sense(sense_buffer, sizeof(sense_buffer),
471 tgt_dev->dev->d_sense,
472 SCST_LOAD_SENSE(scst_sense_capacity_data_changed));
473 scst_check_set_UA(tgt_dev, sense_buffer,
474 sizeof(sense_buffer), 0);
477 mutex_unlock(&scst_mutex);
483 EXPORT_SYMBOL(scst_capacity_data_changed);
485 static inline bool scst_is_report_luns_changed_type(int type)
496 case TYPE_MEDIUM_CHANGER:
505 /* scst_mutex supposed to be held */
506 static void scst_queue_report_luns_changed_UA(struct scst_session *sess,
509 uint8_t sense_buffer[SCST_STANDARD_SENSE_LEN];
510 struct list_head *shead;
511 struct scst_tgt_dev *tgt_dev;
516 TRACE_MGMT_DBG("Queuing REPORTED LUNS DATA CHANGED UA "
521 for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
522 shead = &sess->sess_tgt_dev_list_hash[i];
524 list_for_each_entry(tgt_dev, shead,
525 sess_tgt_dev_list_entry) {
526 /* Lockdep triggers here a false positive.. */
527 spin_lock(&tgt_dev->tgt_dev_lock);
531 for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
532 shead = &sess->sess_tgt_dev_list_hash[i];
534 list_for_each_entry(tgt_dev, shead,
535 sess_tgt_dev_list_entry) {
536 if (!scst_is_report_luns_changed_type(
540 scst_set_sense(sense_buffer, sizeof(sense_buffer),
541 tgt_dev->dev->d_sense,
542 SCST_LOAD_SENSE(scst_sense_reported_luns_data_changed));
544 __scst_check_set_UA(tgt_dev, sense_buffer,
545 sizeof(sense_buffer),
546 flags | SCST_SET_UA_FLAG_GLOBAL);
550 for (i = TGT_DEV_HASH_SIZE-1; i >= 0; i--) {
551 shead = &sess->sess_tgt_dev_list_hash[i];
553 list_for_each_entry_reverse(tgt_dev,
554 shead, sess_tgt_dev_list_entry) {
555 spin_unlock(&tgt_dev->tgt_dev_lock);
565 /* The activity supposed to be suspended and scst_mutex held */
566 void scst_report_luns_changed(struct scst_acg *acg)
568 struct scst_session *sess;
572 TRACE_MGMT_DBG("REPORTED LUNS DATA CHANGED (acg %s)", acg->acg_name);
574 list_for_each_entry(sess, &acg->acg_sess_list, acg_sess_list_entry) {
576 struct list_head *shead;
577 struct scst_tgt_dev *tgt_dev;
578 struct scst_tgt_template *tgtt = sess->tgt->tgtt;
580 for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
581 shead = &sess->sess_tgt_dev_list_hash[i];
583 list_for_each_entry(tgt_dev, shead,
584 sess_tgt_dev_list_entry) {
585 if (scst_is_report_luns_changed_type(
590 TRACE_MGMT_DBG("Not found a device capable REPORTED "
591 "LUNS DATA CHANGED UA (sess %p)", sess);
594 if (tgtt->report_aen != NULL) {
595 struct scst_aen *aen;
598 aen = scst_alloc_aen(tgt_dev);
602 aen->event_fn = SCST_AEN_SCSI;
603 aen->aen_sense_len = SCST_STANDARD_SENSE_LEN;
604 scst_set_sense(aen->aen_sense, aen->aen_sense_len,
605 tgt_dev->dev->d_sense,
606 SCST_LOAD_SENSE(scst_sense_reported_luns_data_changed));
608 TRACE_DBG("Calling target's %s report_aen(%p)",
610 rc = tgtt->report_aen(aen);
611 TRACE_DBG("Target's %s report_aen(%p) returned %d",
612 tgtt->name, aen, rc);
613 if (rc == SCST_AEN_RES_SUCCESS)
620 scst_queue_report_luns_changed_UA(sess, 0);
627 void scst_aen_done(struct scst_aen *aen)
631 TRACE_MGMT_DBG("AEN %p (fn %d) done (initiator %s)", aen,
632 aen->event_fn, aen->sess->initiator_name);
634 if (aen->delivery_status == SCST_AEN_RES_SUCCESS)
637 if (aen->event_fn != SCST_AEN_SCSI)
640 TRACE_MGMT_DBG("Delivery of SCSI AEN failed (initiator %s)",
641 aen->sess->initiator_name);
643 if (scst_analyze_sense(aen->aen_sense, aen->aen_sense_len,
644 SCST_SENSE_ALL_VALID, SCST_LOAD_SENSE(
645 scst_sense_reported_luns_data_changed))) {
646 mutex_lock(&scst_mutex);
647 scst_queue_report_luns_changed_UA(aen->sess,
648 SCST_SET_UA_FLAG_AT_HEAD);
649 mutex_unlock(&scst_mutex);
650 } else if (scst_analyze_sense(aen->aen_sense, aen->aen_sense_len,
651 SCST_SENSE_ALL_VALID,
652 SCST_LOAD_SENSE(scst_sense_capacity_data_changed))) {
653 /* tgt_dev might get dead, so we need to reseek it */
654 struct list_head *shead;
655 struct scst_tgt_dev *tgt_dev;
658 lun = scst_unpack_lun((uint8_t *)&aen->lun, sizeof(aen->lun));
660 mutex_lock(&scst_mutex);
662 shead = &aen->sess->sess_tgt_dev_list_hash[HASH_VAL(lun)];
663 list_for_each_entry(tgt_dev, shead,
664 sess_tgt_dev_list_entry) {
665 if (tgt_dev->lun == lun) {
666 TRACE_MGMT_DBG("Queuing CAPACITY DATA CHANGED "
667 "UA (tgt_dev %p)", tgt_dev);
668 scst_check_set_UA(tgt_dev, aen->aen_sense,
670 SCST_SET_UA_FLAG_AT_HEAD);
675 mutex_unlock(&scst_mutex);
677 PRINT_ERROR("%s", "Unknown SCSI AEN");
685 EXPORT_SYMBOL(scst_aen_done);
687 int scst_get_cmd_abnormal_done_state(const struct scst_cmd *cmd)
693 switch (cmd->state) {
694 case SCST_CMD_STATE_INIT_WAIT:
695 case SCST_CMD_STATE_INIT:
696 case SCST_CMD_STATE_PRE_PARSE:
697 case SCST_CMD_STATE_DEV_PARSE:
698 case SCST_CMD_STATE_DEV_DONE:
700 res = SCST_CMD_STATE_FINISHED_INTERNAL;
702 res = SCST_CMD_STATE_PRE_XMIT_RESP;
705 case SCST_CMD_STATE_PRE_DEV_DONE:
706 case SCST_CMD_STATE_MODE_SELECT_CHECKS:
707 res = SCST_CMD_STATE_DEV_DONE;
710 case SCST_CMD_STATE_PRE_XMIT_RESP:
711 res = SCST_CMD_STATE_XMIT_RESP;
714 case SCST_CMD_STATE_PREPROCESS_DONE:
715 case SCST_CMD_STATE_PREPARE_SPACE:
716 case SCST_CMD_STATE_RDY_TO_XFER:
717 case SCST_CMD_STATE_DATA_WAIT:
718 case SCST_CMD_STATE_TGT_PRE_EXEC:
719 case SCST_CMD_STATE_SEND_FOR_EXEC:
720 case SCST_CMD_STATE_LOCAL_EXEC:
721 case SCST_CMD_STATE_REAL_EXEC:
722 case SCST_CMD_STATE_REAL_EXECUTING:
723 res = SCST_CMD_STATE_PRE_DEV_DONE;
727 PRINT_CRIT_ERROR("Wrong cmd state %d (cmd %p, op %x)",
728 cmd->state, cmd, cmd->cdb[0]);
730 /* Invalid state to supress compiler's warning */
731 res = SCST_CMD_STATE_LAST_ACTIVE;
737 EXPORT_SYMBOL(scst_get_cmd_abnormal_done_state);
739 void scst_set_cmd_abnormal_done_state(struct scst_cmd *cmd)
743 #ifdef CONFIG_SCST_EXTRACHECKS
744 switch (cmd->state) {
745 case SCST_CMD_STATE_XMIT_RESP:
746 case SCST_CMD_STATE_FINISHED:
747 case SCST_CMD_STATE_FINISHED_INTERNAL:
748 case SCST_CMD_STATE_XMIT_WAIT:
749 PRINT_CRIT_ERROR("Wrong cmd state %d (cmd %p, op %x)",
750 cmd->state, cmd, cmd->cdb[0]);
755 cmd->state = scst_get_cmd_abnormal_done_state(cmd);
757 #ifdef CONFIG_SCST_EXTRACHECKS
758 if ((cmd->state != SCST_CMD_STATE_PRE_XMIT_RESP) &&
759 (cmd->tgt_dev == NULL) && !cmd->internal) {
760 PRINT_CRIT_ERROR("Wrong not inited cmd state %d (cmd %p, "
761 "op %x)", cmd->state, cmd, cmd->cdb[0]);
769 EXPORT_SYMBOL(scst_set_cmd_abnormal_done_state);
771 void scst_set_resp_data_len(struct scst_cmd *cmd, int resp_data_len)
777 scst_check_restore_sg_buff(cmd);
778 cmd->resp_data_len = resp_data_len;
780 if (resp_data_len == cmd->bufflen)
784 for (i = 0; i < cmd->sg_cnt; i++) {
785 l += cmd->sg[i].length;
786 if (l >= resp_data_len) {
787 int left = resp_data_len - (l - cmd->sg[i].length);
788 #ifdef CONFIG_SCST_DEBUG
789 TRACE(TRACE_SG_OP|TRACE_MEMORY, "cmd %p (tag %llu), "
790 "resp_data_len %d, i %d, cmd->sg[i].length %d, "
792 cmd, (long long unsigned int)cmd->tag,
794 cmd->sg[i].length, left);
796 cmd->orig_sg_cnt = cmd->sg_cnt;
797 cmd->orig_sg_entry = i;
798 cmd->orig_entry_len = cmd->sg[i].length;
799 cmd->sg_cnt = (left > 0) ? i+1 : i;
800 cmd->sg[i].length = left;
801 cmd->sg_buff_modified = 1;
810 EXPORT_SYMBOL(scst_set_resp_data_len);
813 int scst_queue_retry_cmd(struct scst_cmd *cmd, int finished_cmds)
815 struct scst_tgt *tgt = cmd->tgt;
821 spin_lock_irqsave(&tgt->tgt_lock, flags);
824 * Memory barrier is needed here, because we need the exact order
825 * between the read and write between retry_cmds and finished_cmds to
826 * not miss the case when a command finished while we queuing it for
827 * retry after the finished_cmds check.
830 TRACE_RETRY("TGT QUEUE FULL: incrementing retry_cmds %d",
832 if (finished_cmds != atomic_read(&tgt->finished_cmds)) {
833 /* At least one cmd finished, so try again */
835 TRACE_RETRY("Some command(s) finished, direct retry "
836 "(finished_cmds=%d, tgt->finished_cmds=%d, "
837 "retry_cmds=%d)", finished_cmds,
838 atomic_read(&tgt->finished_cmds), tgt->retry_cmds);
843 TRACE_RETRY("Adding cmd %p to retry cmd list", cmd);
844 list_add_tail(&cmd->cmd_list_entry, &tgt->retry_cmd_list);
846 if (!tgt->retry_timer_active) {
847 tgt->retry_timer.expires = jiffies + SCST_TGT_RETRY_TIMEOUT;
848 add_timer(&tgt->retry_timer);
849 tgt->retry_timer_active = 1;
853 spin_unlock_irqrestore(&tgt->tgt_lock, flags);
859 /* Returns 0 to continue, >0 to restart, <0 to break */
860 static int scst_check_hw_pending_cmd(struct scst_cmd *cmd,
861 unsigned long cur_time, unsigned long max_time,
862 struct scst_session *sess, unsigned long *flags,
863 struct scst_tgt_template *tgtt)
865 int res = -1; /* break */
867 TRACE_DBG("cmd %p, hw_pending %d, proc time %ld, "
868 "pending time %ld", cmd, cmd->cmd_hw_pending,
869 (long)(cur_time - cmd->start_time) / HZ,
870 (long)(cur_time - cmd->hw_pending_start) / HZ);
872 if (time_before_eq(cur_time, cmd->start_time + max_time)) {
873 /* Cmds are ordered, so no need to check more */
877 if (!cmd->cmd_hw_pending) {
878 res = 0; /* continue */
882 if (time_before(cur_time, cmd->hw_pending_start + max_time)) {
883 /* Cmds are ordered, so no need to check more */
887 TRACE_MGMT_DBG("Cmd %p HW pending for too long %ld (state %x)",
888 cmd, (cur_time - cmd->hw_pending_start) / HZ,
891 cmd->cmd_hw_pending = 0;
893 spin_unlock_irqrestore(&sess->sess_list_lock, *flags);
894 tgtt->on_hw_pending_cmd_timeout(cmd);
895 spin_lock_irqsave(&sess->sess_list_lock, *flags);
897 res = 1; /* restart */
904 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
905 static void scst_hw_pending_work_fn(void *p)
907 static void scst_hw_pending_work_fn(struct delayed_work *work)
910 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
911 struct scst_session *sess = (struct scst_session *)p;
913 struct scst_session *sess = container_of(work, struct scst_session,
916 struct scst_tgt_template *tgtt = sess->tgt->tgtt;
917 struct scst_cmd *cmd;
918 unsigned long cur_time = jiffies;
920 unsigned long max_time = tgtt->max_hw_pending_time * HZ;
924 TRACE_DBG("HW pending work (sess %p, max time %ld)", sess, max_time/HZ);
926 clear_bit(SCST_SESS_HW_PENDING_WORK_SCHEDULED, &sess->sess_aflags);
928 spin_lock_irqsave(&sess->sess_list_lock, flags);
931 list_for_each_entry(cmd, &sess->search_cmd_list,
932 sess_cmd_list_entry) {
935 rc = scst_check_hw_pending_cmd(cmd, cur_time, max_time, sess,
946 list_for_each_entry(cmd, &sess->after_pre_xmit_cmd_list,
947 sess_cmd_list_entry) {
950 rc = scst_check_hw_pending_cmd(cmd, cur_time, max_time, sess,
960 if (!list_empty(&sess->search_cmd_list) ||
961 !list_empty(&sess->after_pre_xmit_cmd_list)) {
963 * For stuck cmds if there is no activity we might need to have
964 * one more run to release them, so reschedule once again.
966 TRACE_DBG("Sched HW pending work for sess %p (max time %d)",
967 sess, tgtt->max_hw_pending_time);
968 set_bit(SCST_SESS_HW_PENDING_WORK_SCHEDULED, &sess->sess_aflags);
969 schedule_delayed_work(&sess->hw_pending_work,
970 tgtt->max_hw_pending_time * HZ);
973 spin_unlock_irqrestore(&sess->sess_list_lock, flags);
979 /* Called under scst_mutex and suspended activity */
980 int scst_alloc_device(gfp_t gfp_mask, struct scst_device **out_dev)
982 struct scst_device *dev;
984 static int dev_num; /* protected by scst_mutex */
988 dev = kzalloc(sizeof(*dev), gfp_mask);
990 TRACE(TRACE_OUT_OF_MEM, "%s",
991 "Allocation of scst_device failed");
996 dev->handler = &scst_null_devtype;
997 dev->p_cmd_lists = &scst_main_cmd_lists;
998 atomic_set(&dev->dev_cmd_count, 0);
999 atomic_set(&dev->write_cmd_count, 0);
1000 scst_init_mem_lim(&dev->dev_mem_lim);
1001 spin_lock_init(&dev->dev_lock);
1002 atomic_set(&dev->on_dev_count, 0);
1003 INIT_LIST_HEAD(&dev->blocked_cmd_list);
1004 INIT_LIST_HEAD(&dev->dev_tgt_dev_list);
1005 INIT_LIST_HEAD(&dev->dev_acg_dev_list);
1006 INIT_LIST_HEAD(&dev->threads_list);
1007 init_waitqueue_head(&dev->on_dev_waitQ);
1008 dev->dev_double_ua_possible = 1;
1009 dev->queue_alg = SCST_CONTR_MODE_QUEUE_ALG_UNRESTRICTED_REORDER;
1010 dev->dev_num = dev_num++;
1012 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
1013 #if defined(CONFIG_BLOCK) && defined(SCST_IO_CONTEXT)
1014 dev->dev_io_ctx = alloc_io_context(GFP_KERNEL, -1);
1015 if (dev->dev_io_ctx == NULL) {
1016 TRACE(TRACE_OUT_OF_MEM, "%s", "Failed to alloc dev IO context");
1027 TRACE_EXIT_RES(res);
1031 /* Called under scst_mutex and suspended activity */
1032 void scst_free_device(struct scst_device *dev)
1036 #ifdef CONFIG_SCST_EXTRACHECKS
1037 if (!list_empty(&dev->dev_tgt_dev_list) ||
1038 !list_empty(&dev->dev_acg_dev_list)) {
1039 PRINT_CRIT_ERROR("%s: dev_tgt_dev_list or dev_acg_dev_list "
1040 "is not empty!", __func__);
1045 __exit_io_context(dev->dev_io_ctx);
1053 void scst_init_mem_lim(struct scst_mem_lim *mem_lim)
1055 atomic_set(&mem_lim->alloced_pages, 0);
1056 mem_lim->max_allowed_pages =
1057 ((uint64_t)scst_max_dev_cmd_mem << 10) >> (PAGE_SHIFT - 10);
1059 EXPORT_SYMBOL(scst_init_mem_lim);
1061 static struct scst_acg_dev *scst_alloc_acg_dev(struct scst_acg *acg,
1062 struct scst_device *dev, uint64_t lun)
1064 struct scst_acg_dev *res;
1068 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
1069 res = kmem_cache_alloc(scst_acgd_cachep, GFP_KERNEL);
1071 res = kmem_cache_zalloc(scst_acgd_cachep, GFP_KERNEL);
1074 TRACE(TRACE_OUT_OF_MEM,
1075 "%s", "Allocation of scst_acg_dev failed");
1078 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
1079 memset(res, 0, sizeof(*res));
1087 TRACE_EXIT_HRES(res);
1091 /* The activity supposed to be suspended and scst_mutex held */
1092 static void scst_free_acg_dev(struct scst_acg_dev *acg_dev)
1096 TRACE_DBG("Removing acg_dev %p from acg_dev_list and dev_acg_dev_list",
1098 list_del(&acg_dev->acg_dev_list_entry);
1099 list_del(&acg_dev->dev_acg_dev_list_entry);
1101 kmem_cache_free(scst_acgd_cachep, acg_dev);
1107 /* The activity supposed to be suspended and scst_mutex held */
1108 struct scst_acg *scst_alloc_add_acg(const char *acg_name)
1110 struct scst_acg *acg;
1114 acg = kzalloc(sizeof(*acg), GFP_KERNEL);
1116 TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of acg failed");
1120 INIT_LIST_HEAD(&acg->acg_dev_list);
1121 INIT_LIST_HEAD(&acg->acg_sess_list);
1122 INIT_LIST_HEAD(&acg->acn_list);
1123 acg->acg_name = acg_name;
1125 TRACE_DBG("Adding acg %s to scst_acg_list", acg_name);
1126 list_add_tail(&acg->scst_acg_list_entry, &scst_acg_list);
1129 TRACE_EXIT_HRES(acg);
1133 /* The activity supposed to be suspended and scst_mutex held */
1134 int scst_destroy_acg(struct scst_acg *acg)
1136 struct scst_acn *n, *nn;
1137 struct scst_acg_dev *acg_dev, *acg_dev_tmp;
1142 if (!list_empty(&acg->acg_sess_list)) {
1143 PRINT_ERROR("%s: acg_sess_list is not empty!", __func__);
1148 TRACE_DBG("Removing acg %s from scst_acg_list", acg->acg_name);
1149 list_del(&acg->scst_acg_list_entry);
1151 /* Freeing acg_devs */
1152 list_for_each_entry_safe(acg_dev, acg_dev_tmp, &acg->acg_dev_list,
1153 acg_dev_list_entry) {
1154 struct scst_tgt_dev *tgt_dev, *tt;
1155 list_for_each_entry_safe(tgt_dev, tt,
1156 &acg_dev->dev->dev_tgt_dev_list,
1157 dev_tgt_dev_list_entry) {
1158 if (tgt_dev->acg_dev == acg_dev)
1159 scst_free_tgt_dev(tgt_dev);
1161 scst_free_acg_dev(acg_dev);
1165 list_for_each_entry_safe(n, nn, &acg->acn_list,
1167 list_del(&n->acn_list_entry);
1171 INIT_LIST_HEAD(&acg->acn_list);
1175 TRACE_EXIT_RES(res);
1180 * scst_mutex supposed to be held, there must not be parallel activity in this
1183 static struct scst_tgt_dev *scst_alloc_add_tgt_dev(struct scst_session *sess,
1184 struct scst_acg_dev *acg_dev)
1186 int ini_sg, ini_unchecked_isa_dma, ini_use_clustering;
1187 struct scst_tgt_dev *tgt_dev, *t = NULL;
1188 struct scst_device *dev = acg_dev->dev;
1189 struct list_head *sess_tgt_dev_list_head;
1190 struct scst_tgt_template *vtt = sess->tgt->tgtt;
1192 bool share_io_ctx = false;
1193 uint8_t sense_buffer[SCST_STANDARD_SENSE_LEN];
1197 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
1198 tgt_dev = kmem_cache_alloc(scst_tgtd_cachep, GFP_KERNEL);
1200 tgt_dev = kmem_cache_zalloc(scst_tgtd_cachep, GFP_KERNEL);
1202 if (tgt_dev == NULL) {
1203 TRACE(TRACE_OUT_OF_MEM, "%s",
1204 "Allocation of scst_tgt_dev failed");
1207 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
1208 memset(tgt_dev, 0, sizeof(*tgt_dev));
1212 tgt_dev->lun = acg_dev->lun;
1213 tgt_dev->acg_dev = acg_dev;
1214 tgt_dev->sess = sess;
1215 atomic_set(&tgt_dev->tgt_dev_cmd_count, 0);
1217 scst_sgv_pool_use_norm(tgt_dev);
1219 if (dev->scsi_dev != NULL) {
1220 ini_sg = dev->scsi_dev->host->sg_tablesize;
1221 ini_unchecked_isa_dma = dev->scsi_dev->host->unchecked_isa_dma;
1222 ini_use_clustering = (dev->scsi_dev->host->use_clustering ==
1225 ini_sg = (1 << 15) /* infinite */;
1226 ini_unchecked_isa_dma = 0;
1227 ini_use_clustering = 0;
1229 tgt_dev->max_sg_cnt = min(ini_sg, sess->tgt->sg_tablesize);
1231 if ((sess->tgt->tgtt->use_clustering || ini_use_clustering) &&
1232 !sess->tgt->tgtt->no_clustering)
1233 scst_sgv_pool_use_norm_clust(tgt_dev);
1235 if (sess->tgt->tgtt->unchecked_isa_dma || ini_unchecked_isa_dma)
1236 scst_sgv_pool_use_dma(tgt_dev);
1238 if (dev->scsi_dev != NULL) {
1239 TRACE_MGMT_DBG("host=%d, channel=%d, id=%d, lun=%d, "
1240 "SCST lun=%lld", dev->scsi_dev->host->host_no,
1241 dev->scsi_dev->channel, dev->scsi_dev->id,
1243 (long long unsigned int)tgt_dev->lun);
1245 TRACE_MGMT_DBG("Virtual device %s on SCST lun=%lld",
1246 dev->virt_name, (long long unsigned int)tgt_dev->lun);
1249 spin_lock_init(&tgt_dev->tgt_dev_lock);
1250 INIT_LIST_HEAD(&tgt_dev->UA_list);
1251 spin_lock_init(&tgt_dev->thr_data_lock);
1252 INIT_LIST_HEAD(&tgt_dev->thr_data_list);
1253 spin_lock_init(&tgt_dev->sn_lock);
1254 INIT_LIST_HEAD(&tgt_dev->deferred_cmd_list);
1255 INIT_LIST_HEAD(&tgt_dev->skipped_sn_list);
1256 tgt_dev->expected_sn = 1;
1257 tgt_dev->num_free_sn_slots = ARRAY_SIZE(tgt_dev->sn_slots)-1;
1258 tgt_dev->cur_sn_slot = &tgt_dev->sn_slots[0];
1259 for (i = 0; i < (int)ARRAY_SIZE(tgt_dev->sn_slots); i++)
1260 atomic_set(&tgt_dev->sn_slots[i], 0);
1262 if (dev->handler->parse_atomic &&
1263 (sess->tgt->tgtt->preprocessing_done == NULL)) {
1264 if (sess->tgt->tgtt->rdy_to_xfer_atomic)
1265 __set_bit(SCST_TGT_DEV_AFTER_INIT_WR_ATOMIC,
1266 &tgt_dev->tgt_dev_flags);
1267 if (dev->handler->exec_atomic)
1268 __set_bit(SCST_TGT_DEV_AFTER_INIT_OTH_ATOMIC,
1269 &tgt_dev->tgt_dev_flags);
1271 if (dev->handler->exec_atomic) {
1272 if (sess->tgt->tgtt->rdy_to_xfer_atomic)
1273 __set_bit(SCST_TGT_DEV_AFTER_RESTART_WR_ATOMIC,
1274 &tgt_dev->tgt_dev_flags);
1275 __set_bit(SCST_TGT_DEV_AFTER_RESTART_OTH_ATOMIC,
1276 &tgt_dev->tgt_dev_flags);
1277 __set_bit(SCST_TGT_DEV_AFTER_RX_DATA_ATOMIC,
1278 &tgt_dev->tgt_dev_flags);
1280 if (dev->handler->dev_done_atomic &&
1281 sess->tgt->tgtt->xmit_response_atomic) {
1282 __set_bit(SCST_TGT_DEV_AFTER_EXEC_ATOMIC,
1283 &tgt_dev->tgt_dev_flags);
1286 scst_set_sense(sense_buffer, sizeof(sense_buffer),
1287 dev->d_sense, SCST_LOAD_SENSE(scst_sense_reset_UA));
1288 scst_alloc_set_UA(tgt_dev, sense_buffer, sizeof(sense_buffer), 0);
1290 tm_dbg_init_tgt_dev(tgt_dev, acg_dev);
1292 if (tgt_dev->sess->initiator_name != NULL) {
1293 spin_lock_bh(&dev->dev_lock);
1294 list_for_each_entry(t, &dev->dev_tgt_dev_list,
1295 dev_tgt_dev_list_entry) {
1296 TRACE_DBG("t name %s (tgt_dev name %s)",
1297 t->sess->initiator_name,
1298 tgt_dev->sess->initiator_name);
1299 if (t->sess->initiator_name == NULL)
1301 if (strcmp(t->sess->initiator_name,
1302 tgt_dev->sess->initiator_name) == 0) {
1303 share_io_ctx = true;
1307 spin_unlock_bh(&dev->dev_lock);
1311 TRACE_MGMT_DBG("Sharing IO context %p (tgt_dev %p, ini %s)",
1312 t->tgt_dev_io_ctx, tgt_dev,
1313 tgt_dev->sess->initiator_name);
1314 tgt_dev->tgt_dev_io_ctx = ioc_task_link(t->tgt_dev_io_ctx);
1316 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
1317 #if defined(CONFIG_BLOCK) && defined(SCST_IO_CONTEXT)
1318 tgt_dev->tgt_dev_io_ctx = alloc_io_context(GFP_KERNEL, -1);
1319 if (tgt_dev->tgt_dev_io_ctx == NULL) {
1320 TRACE(TRACE_OUT_OF_MEM, "Failed to alloc tgt_dev IO "
1321 "context for dev %s (initiator %s)",
1322 dev->virt_name, sess->initiator_name);
1329 if (vtt->threads_num > 0) {
1331 if (dev->handler->threads_num > 0)
1332 rc = scst_add_dev_threads(dev, vtt->threads_num);
1333 else if (dev->handler->threads_num == 0)
1334 rc = scst_add_global_threads(vtt->threads_num);
1339 if (dev->handler && dev->handler->attach_tgt) {
1340 TRACE_DBG("Calling dev handler's attach_tgt(%p)",
1342 rc = dev->handler->attach_tgt(tgt_dev);
1343 TRACE_DBG("%s", "Dev handler's attach_tgt() returned");
1345 PRINT_ERROR("Device handler's %s attach_tgt() "
1346 "failed: %d", dev->handler->name, rc);
1351 spin_lock_bh(&dev->dev_lock);
1352 list_add_tail(&tgt_dev->dev_tgt_dev_list_entry, &dev->dev_tgt_dev_list);
1353 if (dev->dev_reserved)
1354 __set_bit(SCST_TGT_DEV_RESERVED, &tgt_dev->tgt_dev_flags);
1355 spin_unlock_bh(&dev->dev_lock);
1357 sess_tgt_dev_list_head =
1358 &sess->sess_tgt_dev_list_hash[HASH_VAL(tgt_dev->lun)];
1359 list_add_tail(&tgt_dev->sess_tgt_dev_list_entry,
1360 sess_tgt_dev_list_head);
1367 if (vtt->threads_num > 0) {
1368 if (dev->handler->threads_num > 0)
1369 scst_del_dev_threads(dev, vtt->threads_num);
1370 else if (dev->handler->threads_num == 0)
1371 scst_del_global_threads(vtt->threads_num);
1375 scst_free_all_UA(tgt_dev);
1376 __exit_io_context(tgt_dev->tgt_dev_io_ctx);
1378 kmem_cache_free(scst_tgtd_cachep, tgt_dev);
1383 static void scst_clear_reservation(struct scst_tgt_dev *tgt_dev);
1385 /* No locks supposed to be held, scst_mutex - held */
1386 void scst_nexus_loss(struct scst_tgt_dev *tgt_dev, bool queue_UA)
1390 scst_clear_reservation(tgt_dev);
1392 /* With activity suspended the lock isn't needed, but let's be safe */
1393 spin_lock_bh(&tgt_dev->tgt_dev_lock);
1394 scst_free_all_UA(tgt_dev);
1395 memset(tgt_dev->tgt_dev_sense, 0, sizeof(tgt_dev->tgt_dev_sense));
1396 spin_unlock_bh(&tgt_dev->tgt_dev_lock);
1399 uint8_t sense_buffer[SCST_STANDARD_SENSE_LEN];
1400 scst_set_sense(sense_buffer, sizeof(sense_buffer),
1401 tgt_dev->dev->d_sense,
1402 SCST_LOAD_SENSE(scst_sense_nexus_loss_UA));
1403 scst_check_set_UA(tgt_dev, sense_buffer,
1404 sizeof(sense_buffer), 0);
1412 * scst_mutex supposed to be held, there must not be parallel activity in this
1415 static void scst_free_tgt_dev(struct scst_tgt_dev *tgt_dev)
1417 struct scst_device *dev = tgt_dev->dev;
1418 struct scst_tgt_template *vtt = tgt_dev->sess->tgt->tgtt;
1422 tm_dbg_deinit_tgt_dev(tgt_dev);
1424 spin_lock_bh(&dev->dev_lock);
1425 list_del(&tgt_dev->dev_tgt_dev_list_entry);
1426 spin_unlock_bh(&dev->dev_lock);
1428 list_del(&tgt_dev->sess_tgt_dev_list_entry);
1430 scst_clear_reservation(tgt_dev);
1431 scst_free_all_UA(tgt_dev);
1433 if (dev->handler && dev->handler->detach_tgt) {
1434 TRACE_DBG("Calling dev handler's detach_tgt(%p)",
1436 dev->handler->detach_tgt(tgt_dev);
1437 TRACE_DBG("%s", "Dev handler's detach_tgt() returned");
1440 if (vtt->threads_num > 0) {
1441 if (dev->handler->threads_num > 0)
1442 scst_del_dev_threads(dev, vtt->threads_num);
1443 else if (dev->handler->threads_num == 0)
1444 scst_del_global_threads(vtt->threads_num);
1447 __exit_io_context(tgt_dev->tgt_dev_io_ctx);
1449 kmem_cache_free(scst_tgtd_cachep, tgt_dev);
1455 /* scst_mutex supposed to be held */
1456 int scst_sess_alloc_tgt_devs(struct scst_session *sess)
1459 struct scst_acg_dev *acg_dev;
1460 struct scst_tgt_dev *tgt_dev;
1464 list_for_each_entry(acg_dev, &sess->acg->acg_dev_list,
1465 acg_dev_list_entry) {
1466 tgt_dev = scst_alloc_add_tgt_dev(sess, acg_dev);
1467 if (tgt_dev == NULL) {
1478 scst_sess_free_tgt_devs(sess);
1483 * scst_mutex supposed to be held, there must not be parallel activity in this
1486 static void scst_sess_free_tgt_devs(struct scst_session *sess)
1489 struct scst_tgt_dev *tgt_dev, *t;
1493 /* The session is going down, no users, so no locks */
1494 for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
1495 struct list_head *sess_tgt_dev_list_head =
1496 &sess->sess_tgt_dev_list_hash[i];
1497 list_for_each_entry_safe(tgt_dev, t, sess_tgt_dev_list_head,
1498 sess_tgt_dev_list_entry) {
1499 scst_free_tgt_dev(tgt_dev);
1501 INIT_LIST_HEAD(sess_tgt_dev_list_head);
1508 /* The activity supposed to be suspended and scst_mutex held */
1509 int scst_acg_add_dev(struct scst_acg *acg, struct scst_device *dev,
1510 uint64_t lun, int read_only)
1513 struct scst_acg_dev *acg_dev;
1514 struct scst_tgt_dev *tgt_dev;
1515 struct scst_session *sess;
1516 LIST_HEAD(tmp_tgt_dev_list);
1520 INIT_LIST_HEAD(&tmp_tgt_dev_list);
1522 #ifdef CONFIG_SCST_EXTRACHECKS
1523 list_for_each_entry(acg_dev, &acg->acg_dev_list, acg_dev_list_entry) {
1524 if (acg_dev->dev == dev) {
1525 PRINT_ERROR("Device is already in group %s",
1533 acg_dev = scst_alloc_acg_dev(acg, dev, lun);
1534 if (acg_dev == NULL) {
1538 acg_dev->rd_only = read_only;
1540 TRACE_DBG("Adding acg_dev %p to acg_dev_list and dev_acg_dev_list",
1542 list_add_tail(&acg_dev->acg_dev_list_entry, &acg->acg_dev_list);
1543 list_add_tail(&acg_dev->dev_acg_dev_list_entry, &dev->dev_acg_dev_list);
1545 list_for_each_entry(sess, &acg->acg_sess_list, acg_sess_list_entry) {
1546 tgt_dev = scst_alloc_add_tgt_dev(sess, acg_dev);
1547 if (tgt_dev == NULL) {
1551 list_add_tail(&tgt_dev->extra_tgt_dev_list_entry,
1555 scst_report_luns_changed(acg);
1557 if (dev->virt_name != NULL) {
1558 PRINT_INFO("Added device %s to group %s (LUN %lld, "
1559 "rd_only %d)", dev->virt_name, acg->acg_name,
1560 (long long unsigned int)lun,
1563 PRINT_INFO("Added device %d:%d:%d:%d to group %s (LUN "
1564 "%lld, rd_only %d)",
1565 dev->scsi_dev->host->host_no,
1566 dev->scsi_dev->channel, dev->scsi_dev->id,
1567 dev->scsi_dev->lun, acg->acg_name,
1568 (long long unsigned int)lun,
1573 TRACE_EXIT_RES(res);
1577 list_for_each_entry(tgt_dev, &tmp_tgt_dev_list,
1578 extra_tgt_dev_list_entry) {
1579 scst_free_tgt_dev(tgt_dev);
1581 scst_free_acg_dev(acg_dev);
1585 /* The activity supposed to be suspended and scst_mutex held */
1586 int scst_acg_remove_dev(struct scst_acg *acg, struct scst_device *dev)
1589 struct scst_acg_dev *acg_dev = NULL, *a;
1590 struct scst_tgt_dev *tgt_dev, *tt;
1594 list_for_each_entry(a, &acg->acg_dev_list, acg_dev_list_entry) {
1595 if (a->dev == dev) {
1601 if (acg_dev == NULL) {
1602 PRINT_ERROR("Device is not found in group %s", acg->acg_name);
1607 list_for_each_entry_safe(tgt_dev, tt, &dev->dev_tgt_dev_list,
1608 dev_tgt_dev_list_entry) {
1609 if (tgt_dev->acg_dev == acg_dev)
1610 scst_free_tgt_dev(tgt_dev);
1612 scst_free_acg_dev(acg_dev);
1614 scst_report_luns_changed(acg);
1616 if (dev->virt_name != NULL) {
1617 PRINT_INFO("Removed device %s from group %s",
1618 dev->virt_name, acg->acg_name);
1620 PRINT_INFO("Removed device %d:%d:%d:%d from group %s",
1621 dev->scsi_dev->host->host_no,
1622 dev->scsi_dev->channel, dev->scsi_dev->id,
1623 dev->scsi_dev->lun, acg->acg_name);
1627 TRACE_EXIT_RES(res);
1631 /* scst_mutex supposed to be held */
1632 int scst_acg_add_name(struct scst_acg *acg, const char *name)
1641 list_for_each_entry(n, &acg->acn_list, acn_list_entry)
1643 if (strcmp(n->name, name) == 0) {
1644 PRINT_ERROR("Name %s already exists in group %s",
1645 name, acg->acg_name);
1651 n = kmalloc(sizeof(*n), GFP_KERNEL);
1653 PRINT_ERROR("%s", "Unable to allocate scst_acn");
1659 nm = kmalloc(len + 1, GFP_KERNEL);
1661 PRINT_ERROR("%s", "Unable to allocate scst_acn->name");
1669 list_add_tail(&n->acn_list_entry, &acg->acn_list);
1673 PRINT_INFO("Added name %s to group %s", name, acg->acg_name);
1675 TRACE_EXIT_RES(res);
1683 /* scst_mutex supposed to be held */
1684 int scst_acg_remove_name(struct scst_acg *acg, const char *name)
1691 list_for_each_entry(n, &acg->acn_list, acn_list_entry)
1693 if (strcmp(n->name, name) == 0) {
1694 list_del(&n->acn_list_entry);
1703 PRINT_INFO("Removed name %s from group %s", name,
1706 PRINT_ERROR("Unable to find name %s in group %s", name,
1710 TRACE_EXIT_RES(res);
1714 static struct scst_cmd *scst_create_prepare_internal_cmd(
1715 struct scst_cmd *orig_cmd, int bufsize)
1717 struct scst_cmd *res;
1718 gfp_t gfp_mask = scst_cmd_atomic(orig_cmd) ? GFP_ATOMIC : GFP_KERNEL;
1722 res = scst_alloc_cmd(gfp_mask);
1726 res->cmd_lists = orig_cmd->cmd_lists;
1727 res->sess = orig_cmd->sess;
1728 res->atomic = scst_cmd_atomic(orig_cmd);
1730 res->tgtt = orig_cmd->tgtt;
1731 res->tgt = orig_cmd->tgt;
1732 res->dev = orig_cmd->dev;
1733 res->tgt_dev = orig_cmd->tgt_dev;
1734 res->lun = orig_cmd->lun;
1735 res->queue_type = SCST_CMD_QUEUE_HEAD_OF_QUEUE;
1736 res->data_direction = SCST_DATA_UNKNOWN;
1737 res->orig_cmd = orig_cmd;
1738 res->bufflen = bufsize;
1740 scst_sess_get(res->sess);
1741 if (res->tgt_dev != NULL)
1744 res->state = SCST_CMD_STATE_PRE_PARSE;
1747 TRACE_EXIT_HRES((unsigned long)res);
1751 int scst_prepare_request_sense(struct scst_cmd *orig_cmd)
1754 static const uint8_t request_sense[6] =
1755 { REQUEST_SENSE, 0, 0, 0, SCST_SENSE_BUFFERSIZE, 0 };
1756 struct scst_cmd *rs_cmd;
1760 if (orig_cmd->sense != NULL) {
1761 TRACE_MEM("Releasing sense %p (orig_cmd %p)",
1762 orig_cmd->sense, orig_cmd);
1763 mempool_free(orig_cmd->sense, scst_sense_mempool);
1764 orig_cmd->sense = NULL;
1767 rs_cmd = scst_create_prepare_internal_cmd(orig_cmd,
1768 SCST_SENSE_BUFFERSIZE);
1772 memcpy(rs_cmd->cdb, request_sense, sizeof(request_sense));
1773 rs_cmd->cdb[1] |= scst_get_cmd_dev_d_sense(orig_cmd);
1774 rs_cmd->cdb_len = sizeof(request_sense);
1775 rs_cmd->data_direction = SCST_DATA_READ;
1776 rs_cmd->expected_data_direction = rs_cmd->data_direction;
1777 rs_cmd->expected_transfer_len = SCST_SENSE_BUFFERSIZE;
1778 rs_cmd->expected_values_set = 1;
1780 TRACE(TRACE_MGMT_MINOR, "Adding REQUEST SENSE cmd %p to head of active "
1781 "cmd list", rs_cmd);
1782 spin_lock_irq(&rs_cmd->cmd_lists->cmd_list_lock);
1783 list_add(&rs_cmd->cmd_list_entry, &rs_cmd->cmd_lists->active_cmd_list);
1784 wake_up(&rs_cmd->cmd_lists->cmd_list_waitQ);
1785 spin_unlock_irq(&rs_cmd->cmd_lists->cmd_list_lock);
1788 TRACE_EXIT_RES(res);
1796 static void scst_complete_request_sense(struct scst_cmd *req_cmd)
1798 struct scst_cmd *orig_cmd = req_cmd->orig_cmd;
1804 sBUG_ON(orig_cmd == NULL);
1806 len = scst_get_buf_first(req_cmd, &buf);
1808 if (scsi_status_is_good(req_cmd->status) && (len > 0) &&
1809 SCST_SENSE_VALID(buf) && (!SCST_NO_SENSE(buf))) {
1810 PRINT_BUFF_FLAG(TRACE_SCSI, "REQUEST SENSE returned",
1812 scst_alloc_set_sense(orig_cmd, scst_cmd_atomic(req_cmd), buf,
1815 PRINT_ERROR("%s", "Unable to get the sense via "
1816 "REQUEST SENSE, returning HARDWARE ERROR");
1817 scst_set_cmd_error(orig_cmd,
1818 SCST_LOAD_SENSE(scst_sense_hardw_error));
1822 scst_put_buf(req_cmd, buf);
1824 TRACE(TRACE_MGMT_MINOR, "Adding orig cmd %p to head of active "
1825 "cmd list", orig_cmd);
1826 spin_lock_irq(&orig_cmd->cmd_lists->cmd_list_lock);
1827 list_add(&orig_cmd->cmd_list_entry, &orig_cmd->cmd_lists->active_cmd_list);
1828 wake_up(&orig_cmd->cmd_lists->cmd_list_waitQ);
1829 spin_unlock_irq(&orig_cmd->cmd_lists->cmd_list_lock);
1835 int scst_finish_internal_cmd(struct scst_cmd *cmd)
1841 sBUG_ON(!cmd->internal);
1843 if (cmd->cdb[0] == REQUEST_SENSE)
1844 scst_complete_request_sense(cmd);
1846 __scst_cmd_put(cmd);
1848 res = SCST_CMD_STATE_RES_CONT_NEXT;
1850 TRACE_EXIT_HRES(res);
1854 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
1855 static void scst_req_done(struct scsi_cmnd *scsi_cmd)
1857 struct scsi_request *req;
1861 if (scsi_cmd && (req = scsi_cmd->sc_request)) {
1863 if (req->sr_bufflen)
1864 kfree(req->sr_buffer);
1865 scsi_release_request(req);
1873 static void scst_send_release(struct scst_device *dev)
1875 struct scsi_request *req;
1876 struct scsi_device *scsi_dev;
1881 if (dev->scsi_dev == NULL)
1884 scsi_dev = dev->scsi_dev;
1886 req = scsi_allocate_request(scsi_dev, GFP_KERNEL);
1888 PRINT_ERROR("Allocation of scsi_request failed: unable "
1889 "to RELEASE device %d:%d:%d:%d",
1890 scsi_dev->host->host_no, scsi_dev->channel,
1891 scsi_dev->id, scsi_dev->lun);
1895 memset(cdb, 0, sizeof(cdb));
1897 cdb[1] = (scsi_dev->scsi_level <= SCSI_2) ?
1898 ((scsi_dev->lun << 5) & 0xe0) : 0;
1899 memcpy(req->sr_cmnd, cdb, sizeof(cdb));
1900 req->sr_cmd_len = sizeof(cdb);
1901 req->sr_data_direction = SCST_DATA_NONE;
1903 req->sr_bufflen = 0;
1904 req->sr_buffer = NULL;
1905 req->sr_request->rq_disk = dev->rq_disk;
1906 req->sr_sense_buffer[0] = 0;
1908 TRACE(TRACE_DEBUG | TRACE_SCSI, "Sending RELEASE req %p to SCSI "
1910 scst_do_req(req, req->sr_cmnd, (void *)req->sr_buffer, req->sr_bufflen,
1911 scst_req_done, 15, 3);
1917 #else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18) */
1918 static void scst_send_release(struct scst_device *dev)
1920 struct scsi_device *scsi_dev;
1921 unsigned char cdb[6];
1922 uint8_t sense[SCSI_SENSE_BUFFERSIZE];
1927 if (dev->scsi_dev == NULL)
1930 scsi_dev = dev->scsi_dev;
1932 for (i = 0; i < 5; i++) {
1933 memset(cdb, 0, sizeof(cdb));
1935 cdb[1] = (scsi_dev->scsi_level <= SCSI_2) ?
1936 ((scsi_dev->lun << 5) & 0xe0) : 0;
1938 memset(sense, 0, sizeof(sense));
1940 TRACE(TRACE_DEBUG | TRACE_SCSI, "%s", "Sending RELEASE req to "
1942 rc = scsi_execute(scsi_dev, cdb, SCST_DATA_NONE, NULL, 0,
1944 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)
1948 TRACE_DBG("MODE_SENSE done: %x", rc);
1950 if (scsi_status_is_good(rc)) {
1953 PRINT_ERROR("RELEASE failed: %d", rc);
1954 PRINT_BUFFER("RELEASE sense", sense, sizeof(sense));
1955 scst_check_internal_sense(dev, rc, sense,
1964 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18) */
1966 /* scst_mutex supposed to be held */
1967 static void scst_clear_reservation(struct scst_tgt_dev *tgt_dev)
1969 struct scst_device *dev = tgt_dev->dev;
1974 spin_lock_bh(&dev->dev_lock);
1975 if (dev->dev_reserved &&
1976 !test_bit(SCST_TGT_DEV_RESERVED, &tgt_dev->tgt_dev_flags)) {
1977 /* This is one who holds the reservation */
1978 struct scst_tgt_dev *tgt_dev_tmp;
1979 list_for_each_entry(tgt_dev_tmp, &dev->dev_tgt_dev_list,
1980 dev_tgt_dev_list_entry) {
1981 clear_bit(SCST_TGT_DEV_RESERVED,
1982 &tgt_dev_tmp->tgt_dev_flags);
1984 dev->dev_reserved = 0;
1987 spin_unlock_bh(&dev->dev_lock);
1990 scst_send_release(dev);
1996 struct scst_session *scst_alloc_session(struct scst_tgt *tgt, gfp_t gfp_mask,
1997 const char *initiator_name)
1999 struct scst_session *sess;
2006 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
2007 sess = kmem_cache_alloc(scst_sess_cachep, gfp_mask);
2009 sess = kmem_cache_zalloc(scst_sess_cachep, gfp_mask);
2012 TRACE(TRACE_OUT_OF_MEM, "%s",
2013 "Allocation of scst_session failed");
2016 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
2017 memset(sess, 0, sizeof(*sess));
2020 sess->init_phase = SCST_SESS_IPH_INITING;
2021 sess->shut_phase = SCST_SESS_SPH_READY;
2022 atomic_set(&sess->refcnt, 0);
2023 for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
2024 struct list_head *sess_tgt_dev_list_head =
2025 &sess->sess_tgt_dev_list_hash[i];
2026 INIT_LIST_HEAD(sess_tgt_dev_list_head);
2028 spin_lock_init(&sess->sess_list_lock);
2029 INIT_LIST_HEAD(&sess->search_cmd_list);
2030 INIT_LIST_HEAD(&sess->after_pre_xmit_cmd_list);
2032 INIT_LIST_HEAD(&sess->init_deferred_cmd_list);
2033 INIT_LIST_HEAD(&sess->init_deferred_mcmd_list);
2034 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20))
2035 INIT_DELAYED_WORK(&sess->hw_pending_work,
2036 (void (*)(struct work_struct *))scst_hw_pending_work_fn);
2038 INIT_WORK(&sess->hw_pending_work, scst_hw_pending_work_fn, sess);
2041 #ifdef CONFIG_SCST_MEASURE_LATENCY
2042 spin_lock_init(&sess->meas_lock);
2045 len = strlen(initiator_name);
2046 nm = kmalloc(len + 1, gfp_mask);
2048 PRINT_ERROR("%s", "Unable to allocate sess->initiator_name");
2052 strcpy(nm, initiator_name);
2053 sess->initiator_name = nm;
2060 kmem_cache_free(scst_sess_cachep, sess);
2065 void scst_free_session(struct scst_session *sess)
2069 mutex_lock(&scst_mutex);
2071 TRACE_DBG("Removing sess %p from the list", sess);
2072 list_del(&sess->sess_list_entry);
2073 TRACE_DBG("Removing session %p from acg %s", sess, sess->acg->acg_name);
2074 list_del(&sess->acg_sess_list_entry);
2076 scst_sess_free_tgt_devs(sess);
2078 wake_up_all(&sess->tgt->unreg_waitQ);
2080 mutex_unlock(&scst_mutex);
2082 kfree(sess->initiator_name);
2083 kmem_cache_free(scst_sess_cachep, sess);
2089 void scst_free_session_callback(struct scst_session *sess)
2091 struct completion *c;
2095 TRACE_DBG("Freeing session %p", sess);
2097 cancel_delayed_work_sync(&sess->hw_pending_work);
2099 c = sess->shutdown_compl;
2101 if (sess->unreg_done_fn) {
2102 TRACE_DBG("Calling unreg_done_fn(%p)", sess);
2103 sess->unreg_done_fn(sess);
2104 TRACE_DBG("%s", "unreg_done_fn() returned");
2106 scst_free_session(sess);
2115 void scst_sched_session_free(struct scst_session *sess)
2117 unsigned long flags;
2121 if (sess->shut_phase != SCST_SESS_SPH_SHUTDOWN) {
2122 PRINT_CRIT_ERROR("session %p is going to shutdown with unknown "
2123 "shut phase %lx", sess, sess->shut_phase);
2127 spin_lock_irqsave(&scst_mgmt_lock, flags);
2128 TRACE_DBG("Adding sess %p to scst_sess_shut_list", sess);
2129 list_add_tail(&sess->sess_shut_list_entry, &scst_sess_shut_list);
2130 spin_unlock_irqrestore(&scst_mgmt_lock, flags);
2132 wake_up(&scst_mgmt_waitQ);
2138 void scst_cmd_get(struct scst_cmd *cmd)
2140 __scst_cmd_get(cmd);
2142 EXPORT_SYMBOL(scst_cmd_get);
2144 void scst_cmd_put(struct scst_cmd *cmd)
2146 __scst_cmd_put(cmd);
2148 EXPORT_SYMBOL(scst_cmd_put);
2150 struct scst_cmd *scst_alloc_cmd(gfp_t gfp_mask)
2152 struct scst_cmd *cmd;
2156 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
2157 cmd = kmem_cache_alloc(scst_cmd_cachep, gfp_mask);
2159 cmd = kmem_cache_zalloc(scst_cmd_cachep, gfp_mask);
2162 TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of scst_cmd failed");
2165 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
2166 memset(cmd, 0, sizeof(*cmd));
2169 cmd->state = SCST_CMD_STATE_INIT_WAIT;
2170 cmd->start_time = jiffies;
2171 atomic_set(&cmd->cmd_ref, 1);
2172 cmd->cmd_lists = &scst_main_cmd_lists;
2173 INIT_LIST_HEAD(&cmd->mgmt_cmd_list);
2174 cmd->queue_type = SCST_CMD_QUEUE_SIMPLE;
2175 cmd->timeout = SCST_DEFAULT_TIMEOUT;
2178 cmd->is_send_status = 1;
2179 cmd->resp_data_len = -1;
2181 cmd->dbl_ua_orig_data_direction = SCST_DATA_UNKNOWN;
2182 cmd->dbl_ua_orig_resp_data_len = -1;
2189 static void scst_destroy_put_cmd(struct scst_cmd *cmd)
2191 scst_sess_put(cmd->sess);
2194 * At this point tgt_dev can be dead, but the pointer remains non-NULL
2196 if (likely(cmd->tgt_dev != NULL))
2199 scst_destroy_cmd(cmd);
2203 /* No locks supposed to be held */
2204 void scst_free_cmd(struct scst_cmd *cmd)
2210 TRACE_DBG("Freeing cmd %p (tag %llu)",
2211 cmd, (long long unsigned int)cmd->tag);
2213 if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
2214 TRACE_MGMT_DBG("Freeing aborted cmd %p (scst_cmd_count %d)",
2215 cmd, atomic_read(&scst_cmd_count));
2218 sBUG_ON(cmd->inc_blocking || cmd->needs_unblocking ||
2219 cmd->dec_on_dev_needed);
2221 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
2222 #if defined(CONFIG_SCST_EXTRACHECKS)
2223 if (cmd->scsi_req) {
2224 PRINT_ERROR("%s: %s", __func__, "Cmd with unfreed "
2226 scst_release_request(cmd);
2232 * Target driver can already free sg buffer before calling
2233 * scst_tgt_cmd_done(). E.g., scst_local has to do that.
2235 if (!cmd->tgt_data_buf_alloced)
2236 scst_check_restore_sg_buff(cmd);
2238 if (cmd->tgtt->on_free_cmd != NULL) {
2239 TRACE_DBG("Calling target's on_free_cmd(%p)", cmd);
2240 cmd->tgtt->on_free_cmd(cmd);
2241 TRACE_DBG("%s", "Target's on_free_cmd() returned");
2244 if (likely(cmd->dev != NULL)) {
2245 struct scst_dev_type *handler = cmd->dev->handler;
2246 if (handler->on_free_cmd != NULL) {
2247 TRACE_DBG("Calling dev handler %s on_free_cmd(%p)",
2248 handler->name, cmd);
2249 handler->on_free_cmd(cmd);
2250 TRACE_DBG("Dev handler %s on_free_cmd() returned",
2255 scst_release_space(cmd);
2257 if (unlikely(cmd->sense != NULL)) {
2258 TRACE_MEM("Releasing sense %p (cmd %p)", cmd->sense, cmd);
2259 mempool_free(cmd->sense, scst_sense_mempool);
2263 if (likely(cmd->tgt_dev != NULL)) {
2264 #ifdef CONFIG_SCST_EXTRACHECKS
2265 if (unlikely(!cmd->sent_for_exec) && !cmd->internal) {
2266 PRINT_ERROR("Finishing not executed cmd %p (opcode "
2267 "%d, target %s, LUN %lld, sn %ld, expected_sn %ld)",
2268 cmd, cmd->cdb[0], cmd->tgtt->name,
2269 (long long unsigned int)cmd->lun,
2270 cmd->sn, cmd->tgt_dev->expected_sn);
2271 scst_unblock_deferred(cmd->tgt_dev, cmd);
2275 if (unlikely(cmd->out_of_sn)) {
2276 TRACE_SN("Out of SN cmd %p (tag %llu, sn %ld), "
2278 (long long unsigned int)cmd->tag,
2280 destroy = test_and_set_bit(SCST_CMD_CAN_BE_DESTROYED,
2285 if (likely(destroy))
2286 scst_destroy_put_cmd(cmd);
2292 /* No locks supposed to be held. */
2293 void scst_check_retries(struct scst_tgt *tgt)
2295 int need_wake_up = 0;
2300 * We don't worry about overflow of finished_cmds, because we check
2301 * only for its change.
2303 atomic_inc(&tgt->finished_cmds);
2304 /* See comment in scst_queue_retry_cmd() */
2305 smp_mb__after_atomic_inc();
2306 if (unlikely(tgt->retry_cmds > 0)) {
2307 struct scst_cmd *c, *tc;
2308 unsigned long flags;
2310 TRACE_RETRY("Checking retry cmd list (retry_cmds %d)",
2313 spin_lock_irqsave(&tgt->tgt_lock, flags);
2314 list_for_each_entry_safe(c, tc, &tgt->retry_cmd_list,
2318 TRACE_RETRY("Moving retry cmd %p to head of active "
2319 "cmd list (retry_cmds left %d)",
2320 c, tgt->retry_cmds);
2321 spin_lock(&c->cmd_lists->cmd_list_lock);
2322 list_move(&c->cmd_list_entry,
2323 &c->cmd_lists->active_cmd_list);
2324 wake_up(&c->cmd_lists->cmd_list_waitQ);
2325 spin_unlock(&c->cmd_lists->cmd_list_lock);
2328 if (need_wake_up >= 2) /* "slow start" */
2331 spin_unlock_irqrestore(&tgt->tgt_lock, flags);
2338 void scst_tgt_retry_timer_fn(unsigned long arg)
2340 struct scst_tgt *tgt = (struct scst_tgt *)arg;
2341 unsigned long flags;
2343 TRACE_RETRY("Retry timer expired (retry_cmds %d)", tgt->retry_cmds);
2345 spin_lock_irqsave(&tgt->tgt_lock, flags);
2346 tgt->retry_timer_active = 0;
2347 spin_unlock_irqrestore(&tgt->tgt_lock, flags);
2349 scst_check_retries(tgt);
2355 struct scst_mgmt_cmd *scst_alloc_mgmt_cmd(gfp_t gfp_mask)
2357 struct scst_mgmt_cmd *mcmd;
2361 mcmd = mempool_alloc(scst_mgmt_mempool, gfp_mask);
2363 PRINT_CRIT_ERROR("%s", "Allocation of management command "
2364 "failed, some commands and their data could leak");
2367 memset(mcmd, 0, sizeof(*mcmd));
2374 void scst_free_mgmt_cmd(struct scst_mgmt_cmd *mcmd)
2376 unsigned long flags;
2380 spin_lock_irqsave(&mcmd->sess->sess_list_lock, flags);
2381 atomic_dec(&mcmd->sess->sess_cmd_count);
2382 spin_unlock_irqrestore(&mcmd->sess->sess_list_lock, flags);
2384 scst_sess_put(mcmd->sess);
2386 if (mcmd->mcmd_tgt_dev != NULL)
2389 mempool_free(mcmd, scst_mgmt_mempool);
2395 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
2396 int scst_alloc_request(struct scst_cmd *cmd)
2399 struct scsi_request *req;
2400 int gm = scst_cmd_atomic(cmd) ? GFP_ATOMIC : GFP_KERNEL;
2404 /* cmd->dev->scsi_dev must be non-NULL here */
2405 req = scsi_allocate_request(cmd->dev->scsi_dev, gm);
2407 TRACE(TRACE_OUT_OF_MEM, "%s",
2408 "Allocation of scsi_request failed");
2413 cmd->scsi_req = req;
2415 memcpy(req->sr_cmnd, cmd->cdb, cmd->cdb_len);
2416 req->sr_cmd_len = cmd->cdb_len;
2417 req->sr_data_direction = cmd->data_direction;
2418 req->sr_use_sg = cmd->sg_cnt;
2419 req->sr_bufflen = cmd->bufflen;
2420 req->sr_buffer = cmd->sg;
2421 req->sr_request->rq_disk = cmd->dev->rq_disk;
2422 req->sr_sense_buffer[0] = 0;
2424 cmd->scsi_req->upper_private_data = cmd;
2431 void scst_release_request(struct scst_cmd *cmd)
2433 scsi_release_request(cmd->scsi_req);
2434 cmd->scsi_req = NULL;
2438 static bool is_report_sg_limitation(void)
2440 #if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
2441 return (trace_flag & TRACE_OUT_OF_MEM) != 0;
2447 int scst_alloc_space(struct scst_cmd *cmd)
2451 int atomic = scst_cmd_atomic(cmd);
2453 struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
2458 gfp_mask = tgt_dev->gfp_mask | (atomic ? GFP_ATOMIC : GFP_KERNEL);
2460 flags = atomic ? SCST_POOL_NO_ALLOC_ON_CACHE_MISS : 0;
2462 flags |= SCST_POOL_ALLOC_NO_CACHED;
2464 cmd->sg = sgv_pool_alloc(tgt_dev->pool, cmd->bufflen, gfp_mask, flags,
2465 &cmd->sg_cnt, &cmd->sgv, &cmd->dev->dev_mem_lim, NULL);
2466 if (cmd->sg == NULL)
2469 if (unlikely(cmd->sg_cnt > tgt_dev->max_sg_cnt)) {
2470 if ((ll < 10) || is_report_sg_limitation()) {
2471 PRINT_INFO("Unable to complete command due to "
2472 "SG IO count limitation (requested %d, "
2473 "available %d, tgt lim %d)", cmd->sg_cnt,
2474 tgt_dev->max_sg_cnt, cmd->tgt->sg_tablesize);
2480 if (cmd->data_direction != SCST_DATA_BIDI)
2483 cmd->in_sg = sgv_pool_alloc(tgt_dev->pool, cmd->in_bufflen, gfp_mask,
2484 flags, &cmd->in_sg_cnt, &cmd->in_sgv,
2485 &cmd->dev->dev_mem_lim, NULL);
2486 if (cmd->in_sg == NULL)
2489 if (unlikely(cmd->in_sg_cnt > tgt_dev->max_sg_cnt)) {
2490 if ((ll < 10) || is_report_sg_limitation()) {
2491 PRINT_INFO("Unable to complete command due to "
2492 "SG IO count limitation (IN buffer, requested "
2493 "%d, available %d, tgt lim %d)", cmd->in_sg_cnt,
2494 tgt_dev->max_sg_cnt, cmd->tgt->sg_tablesize);
2497 goto out_in_sg_free;
2508 sgv_pool_free(cmd->in_sgv, &cmd->dev->dev_mem_lim);
2514 sgv_pool_free(cmd->sgv, &cmd->dev->dev_mem_lim);
2521 static void scst_release_space(struct scst_cmd *cmd)
2525 if (cmd->sgv == NULL)
2528 if (cmd->tgt_data_buf_alloced || cmd->dh_data_buf_alloced) {
2529 TRACE_MEM("%s", "*data_buf_alloced set, returning");
2533 sgv_pool_free(cmd->sgv, &cmd->dev->dev_mem_lim);
2540 if (cmd->in_sgv != NULL) {
2541 sgv_pool_free(cmd->in_sgv, &cmd->dev->dev_mem_lim);
2545 cmd->in_bufflen = 0;
2553 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 30) || !defined(SCSI_EXEC_REQ_FIFO_DEFINED)
2556 * Can switch to the next dst_sg element, so, to copy to strictly only
2557 * one dst_sg element, it must be either last in the chain, or
2558 * copy_len == dst_sg->length.
2560 static int __sg_copy_elem(struct scatterlist **pdst_sg, size_t *pdst_len,
2561 size_t *pdst_offs, struct scatterlist *src_sg,
2563 enum km_type d_km_type, enum km_type s_km_type)
2566 struct scatterlist *dst_sg;
2567 size_t src_len, dst_len, src_offs, dst_offs;
2568 struct page *src_page, *dst_page;
2571 copy_len = 0x7FFFFFFF; /* copy all */
2574 dst_len = *pdst_len;
2575 dst_offs = *pdst_offs;
2576 dst_page = sg_page(dst_sg);
2578 src_page = sg_page(src_sg);
2579 src_len = src_sg->length;
2580 src_offs = src_sg->offset;
2583 void *saddr, *daddr;
2586 saddr = kmap_atomic(src_page +
2587 (src_offs >> PAGE_SHIFT), s_km_type) +
2588 (src_offs & ~PAGE_MASK);
2589 daddr = kmap_atomic(dst_page +
2590 (dst_offs >> PAGE_SHIFT), d_km_type) +
2591 (dst_offs & ~PAGE_MASK);
2593 if (((src_offs & ~PAGE_MASK) == 0) &&
2594 ((dst_offs & ~PAGE_MASK) == 0) &&
2595 (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE) &&
2596 (copy_len >= PAGE_SIZE)) {
2597 copy_page(daddr, saddr);
2600 n = min_t(size_t, PAGE_SIZE - (dst_offs & ~PAGE_MASK),
2601 PAGE_SIZE - (src_offs & ~PAGE_MASK));
2602 n = min(n, src_len);
2603 n = min(n, dst_len);
2604 n = min_t(size_t, n, copy_len);
2605 memcpy(daddr, saddr, n);
2610 kunmap_atomic(saddr, s_km_type);
2611 kunmap_atomic(daddr, d_km_type);
2621 dst_sg = sg_next(dst_sg);
2624 dst_page = sg_page(dst_sg);
2625 dst_len = dst_sg->length;
2626 dst_offs = dst_sg->offset;
2628 } while (src_len > 0);
2632 *pdst_len = dst_len;
2633 *pdst_offs = dst_offs;
2638 * sg_copy_elem - copy one SG element to another
2639 * @dst_sg: destination SG element
2640 * @src_sg: source SG element
2641 * @copy_len: maximum amount of data to copy. If 0, then copy all.
2642 * @d_km_type: kmap_atomic type for the destination SG
2643 * @s_km_type: kmap_atomic type for the source SG
2646 * Data from the source SG element will be copied to the destination SG
2647 * element. Returns number of bytes copied. Can switch to the next dst_sg
2648 * element, so, to copy to strictly only one dst_sg element, it must be
2649 * either last in the chain, or copy_len == dst_sg->length.
2651 int sg_copy_elem(struct scatterlist *dst_sg, struct scatterlist *src_sg,
2652 size_t copy_len, enum km_type d_km_type,
2653 enum km_type s_km_type)
2655 size_t dst_len = dst_sg->length, dst_offs = dst_sg->offset;
2657 return __sg_copy_elem(&dst_sg, &dst_len, &dst_offs, src_sg,
2658 copy_len, d_km_type, s_km_type);
2663 * sg_copy - copy one SG vector to another
2664 * @dst_sg: destination SG
2665 * @src_sg: source SG
2666 * @copy_len: maximum amount of data to copy. If 0, then copy all.
2667 * @d_km_type: kmap_atomic type for the destination SG
2668 * @s_km_type: kmap_atomic type for the source SG
2671 * Data from the source SG vector will be copied to the destination SG
2672 * vector. End of the vectors will be determined by sg_next() returning
2673 * NULL. Returns number of bytes copied.
2675 int sg_copy(struct scatterlist *dst_sg,
2676 struct scatterlist *src_sg, size_t copy_len,
2677 enum km_type d_km_type, enum km_type s_km_type)
2680 size_t dst_len, dst_offs;
2683 copy_len = 0x7FFFFFFF; /* copy all */
2685 dst_len = dst_sg->length;
2686 dst_offs = dst_sg->offset;
2689 copy_len -= __sg_copy_elem(&dst_sg, &dst_len, &dst_offs,
2690 src_sg, copy_len, d_km_type, s_km_type);
2691 if ((copy_len == 0) || (dst_sg == NULL))
2694 src_sg = sg_next(src_sg);
2695 } while (src_sg != NULL);
2700 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 30) || !defined(SCSI_EXEC_REQ_FIFO_DEFINED) */
2702 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30) && !defined(SCSI_EXEC_REQ_FIFO_DEFINED)
2703 #include <linux/pfn.h>
2705 struct blk_kern_sg_hdr {
2706 struct scatterlist *orig_sgp;
2708 struct sg_table new_sg_table;
2709 struct scatterlist *saved_sg;
2714 #define BLK_KERN_SG_HDR_ENTRIES (1 + (sizeof(struct blk_kern_sg_hdr) - 1) / \
2715 sizeof(struct scatterlist))
2718 * blk_rq_unmap_kern_sg - "unmaps" data buffers in the request
2719 * @req: request to unmap
2720 * @do_copy: sets copy data between buffers, if needed, or not
2723 * It frees all additional buffers allocated for SG->BIO mapping.
2725 void blk_rq_unmap_kern_sg(struct request *req, int do_copy)
2727 struct blk_kern_sg_hdr *hdr = (struct blk_kern_sg_hdr *)req->end_io_data;
2732 if (hdr->tail_only) {
2733 /* Tail element only was copied */
2734 struct scatterlist *saved_sg = hdr->saved_sg;
2735 struct scatterlist *tail_sg = hdr->orig_sgp;
2737 if ((rq_data_dir(req) == READ) && do_copy)
2738 sg_copy_elem(saved_sg, tail_sg, tail_sg->length,
2739 KM_BIO_DST_IRQ, KM_BIO_SRC_IRQ);
2741 __free_pages(sg_page(tail_sg), get_order(tail_sg->length));
2742 *tail_sg = *saved_sg;
2745 /* The whole SG was copied */
2746 struct sg_table new_sg_table = hdr->new_sg_table;
2747 struct scatterlist *new_sgl = new_sg_table.sgl +
2748 BLK_KERN_SG_HDR_ENTRIES;
2749 struct scatterlist *orig_sgl = hdr->orig_sgp;
2751 if ((rq_data_dir(req) == READ) && do_copy)
2752 sg_copy(orig_sgl, new_sgl, 0, KM_BIO_DST_IRQ,
2755 sg_free_table(&new_sg_table);
2762 static int blk_rq_handle_align_tail_only(struct request *rq,
2763 struct scatterlist *sg_to_copy,
2764 gfp_t gfp, gfp_t page_gfp)
2767 struct scatterlist *tail_sg = sg_to_copy;
2768 struct scatterlist *saved_sg;
2769 struct blk_kern_sg_hdr *hdr;
2773 saved_sg_nents = 1 + BLK_KERN_SG_HDR_ENTRIES;
2775 saved_sg = kmalloc(sizeof(*saved_sg) * saved_sg_nents, gfp);
2776 if (saved_sg == NULL)
2779 sg_init_table(saved_sg, saved_sg_nents);
2781 hdr = (struct blk_kern_sg_hdr *)saved_sg;
2782 saved_sg += BLK_KERN_SG_HDR_ENTRIES;
2783 saved_sg_nents -= BLK_KERN_SG_HDR_ENTRIES;
2785 hdr->tail_only = true;
2786 hdr->orig_sgp = tail_sg;
2787 hdr->saved_sg = saved_sg;
2789 *saved_sg = *tail_sg;
2791 pg = alloc_pages(page_gfp, get_order(tail_sg->length));
2793 goto err_free_saved_sg;
2795 sg_assign_page(tail_sg, pg);
2796 tail_sg->offset = 0;
2798 if (rq_data_dir(rq) == WRITE)
2799 sg_copy_elem(tail_sg, saved_sg, saved_sg->length,
2800 KM_USER1, KM_USER0);
2802 rq->end_io_data = hdr;
2803 rq->cmd_flags |= REQ_COPY_USER;
2816 static int blk_rq_handle_align(struct request *rq, struct scatterlist **psgl,
2817 int *pnents, struct scatterlist *sgl_to_copy,
2818 int nents_to_copy, gfp_t gfp, gfp_t page_gfp)
2821 struct scatterlist *sgl = *psgl;
2822 int nents = *pnents;
2823 struct sg_table sg_table;
2824 struct scatterlist *sg;
2825 struct scatterlist *new_sgl;
2826 size_t len = 0, to_copy;
2828 struct blk_kern_sg_hdr *hdr;
2830 if (sgl != sgl_to_copy) {
2831 /* copy only the last element */
2832 res = blk_rq_handle_align_tail_only(rq, sgl_to_copy,
2836 /* else go through */
2839 for_each_sg(sgl, sg, nents, i)
2843 new_sgl_nents = PFN_UP(len) + BLK_KERN_SG_HDR_ENTRIES;
2845 res = sg_alloc_table(&sg_table, new_sgl_nents, gfp);
2849 new_sgl = sg_table.sgl;
2850 hdr = (struct blk_kern_sg_hdr *)new_sgl;
2851 new_sgl += BLK_KERN_SG_HDR_ENTRIES;
2852 new_sgl_nents -= BLK_KERN_SG_HDR_ENTRIES;
2854 hdr->tail_only = false;
2855 hdr->orig_sgp = sgl;
2856 hdr->new_sg_table = sg_table;
2858 for_each_sg(new_sgl, sg, new_sgl_nents, i) {
2861 pg = alloc_page(page_gfp);
2863 goto err_free_new_sgl;
2865 sg_assign_page(sg, pg);
2866 sg->length = min_t(size_t, PAGE_SIZE, len);
2871 if (rq_data_dir(rq) == WRITE) {
2873 * We need to limit amount of copied data to to_copy, because
2874 * sgl might have the last element not marked as last in
2877 sg_copy(new_sgl, sgl, to_copy, KM_USER0, KM_USER1);
2880 rq->end_io_data = hdr;
2881 rq->cmd_flags |= REQ_COPY_USER;
2884 *pnents = new_sgl_nents;
2890 for_each_sg(new_sgl, sg, new_sgl_nents, i) {
2891 struct page *pg = sg_page(sg);
2896 sg_free_table(&sg_table);
2902 static void bio_map_kern_endio(struct bio *bio, int err)
2907 static int __blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
2908 int nents, gfp_t gfp, struct scatterlist **sgl_to_copy,
2912 struct request_queue *q = rq->q;
2913 int rw = rq_data_dir(rq);
2917 struct scatterlist *sg, *prev_sg = NULL;
2918 struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
2920 *sgl_to_copy = NULL;
2922 if (unlikely((sgl == 0) || (nents <= 0))) {
2929 * Let's keep each bio allocation inside a single page to decrease
2930 * probability of failure.
2932 max_nr_vecs = min_t(size_t,
2933 ((PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec)),
2936 need_new_bio = true;
2938 for_each_sg(sgl, sg, nents, i) {
2939 struct page *page = sg_page(sg);
2940 void *page_addr = page_address(page);
2941 size_t len = sg->length, l;
2942 size_t offset = sg->offset;
2948 * Each segment must be aligned on DMA boundary and
2949 * not on stack. The last one may have unaligned
2950 * length as long as the total length is aligned to
2951 * DMA padding alignment.
2957 if (((sg->offset | l) & queue_dma_alignment(q)) ||
2958 (page_addr && object_is_on_stack(page_addr + sg->offset))) {
2968 bio = bio_kmalloc(gfp, max_nr_vecs);
2975 bio->bi_rw |= 1 << BIO_RW;
2977 bio->bi_end_io = bio_map_kern_endio;
2982 tbio = tbio->bi_next = bio;
2985 bytes = min_t(size_t, len, PAGE_SIZE - offset);
2987 rc = bio_add_pc_page(q, bio, page, bytes, offset);
2989 if (unlikely(need_new_bio || (rc < 0))) {
2996 need_new_bio = true;
3003 need_new_bio = false;
3006 page = nth_page(page, 1);
3015 /* Total length must be aligned on DMA padding alignment */
3016 if ((tot_len & q->dma_pad_mask) &&
3017 !(rq->cmd_flags & REQ_COPY_USER)) {
3019 if (sgl->offset == 0) {
3020 *sgl_to_copy = prev_sg;
3027 while (hbio != NULL) {
3029 hbio = hbio->bi_next;
3030 bio->bi_next = NULL;
3032 blk_queue_bounce(q, &bio);
3034 res = blk_rq_append_bio(q, rq, bio);
3035 if (unlikely(res != 0)) {
3036 bio->bi_next = hbio;
3042 rq->buffer = rq->data = NULL;
3049 *nents_to_copy = nents;
3052 while (hbio != NULL) {
3054 hbio = hbio->bi_next;
3061 * blk_rq_map_kern_sg - map kernel data to a request, for REQ_TYPE_BLOCK_PC
3062 * @rq: request to fill
3064 * @nents: number of elements in @sgl
3065 * @gfp: memory allocation flags
3068 * Data will be mapped directly if possible. Otherwise a bounce
3069 * buffer will be used.
3071 int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
3072 int nents, gfp_t gfp)
3075 struct scatterlist *sg_to_copy = NULL;
3076 int nents_to_copy = 0;
3078 if (unlikely((sgl == 0) || (sgl->length == 0) ||
3079 (nents <= 0) || (rq->end_io_data != NULL))) {
3085 res = __blk_rq_map_kern_sg(rq, sgl, nents, gfp, &sg_to_copy,
3087 if (unlikely(res != 0)) {
3088 if (sg_to_copy == NULL)
3091 res = blk_rq_handle_align(rq, &sgl, &nents, sg_to_copy,
3092 nents_to_copy, gfp, rq->q->bounce_gfp | gfp);
3093 if (unlikely(res != 0))
3096 res = __blk_rq_map_kern_sg(rq, sgl, nents, gfp, &sg_to_copy,
3099 blk_rq_unmap_kern_sg(rq, 0);
3104 rq->buffer = rq->data = NULL;
3110 struct scsi_io_context {
3113 void (*done)(void *data, char *sense, int result, int resid);
3114 char sense[SCSI_SENSE_BUFFERSIZE];
3117 static void scsi_end_async(struct request *req, int error)
3119 struct scsi_io_context *sioc = req->end_io_data;
3121 req->end_io_data = sioc->blk_data;
3122 blk_rq_unmap_kern_sg(req, (error == 0));
3125 sioc->done(sioc->data, sioc->sense, req->errors, req->data_len);
3128 __blk_put_request(req->q, req);
3132 * scsi_execute_async - insert request
3133 * @sdev: scsi device
3134 * @cmd: scsi command
3135 * @cmd_len: length of scsi cdb
3136 * @data_direction: DMA_TO_DEVICE, DMA_FROM_DEVICE, or DMA_NONE
3137 * @sgl: data buffer scatterlist
3138 * @nents: number of elements in the sgl
3139 * @timeout: request timeout in seconds
3140 * @retries: number of times to retry request
3141 * @privdata: data passed to done()
3142 * @done: callback function when done
3143 * @gfp: memory allocation flags
3144 * @flags: one or more SCSI_ASYNC_EXEC_FLAG_* flags
3146 int scsi_execute_async(struct scsi_device *sdev, const unsigned char *cmd,
3147 int cmd_len, int data_direction, struct scatterlist *sgl,
3148 int nents, int timeout, int retries, void *privdata,
3149 void (*done)(void *, char *, int, int), gfp_t gfp,
3152 struct request *req;
3153 struct scsi_io_context *sioc;
3155 int write = (data_direction == DMA_TO_DEVICE);
3157 sioc = kzalloc(sizeof(*sioc), gfp);
3159 return DRIVER_ERROR << 24;
3161 req = blk_get_request(sdev->request_queue, write, gfp);
3164 req->cmd_type = REQ_TYPE_BLOCK_PC;
3165 req->cmd_flags |= REQ_QUIET;
3167 if (flags & SCSI_ASYNC_EXEC_FLAG_HAS_TAIL_SPACE_FOR_PADDING)
3168 req->cmd_flags |= REQ_COPY_USER;
3171 err = blk_rq_map_kern_sg(req, sgl, nents, gfp);
3176 sioc->blk_data = req->end_io_data;
3177 sioc->data = privdata;
3180 req->cmd_len = cmd_len;
3181 memset(req->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */
3182 memcpy(req->cmd, cmd, req->cmd_len);
3183 req->sense = sioc->sense;
3185 req->timeout = timeout;
3186 req->retries = retries;
3187 req->end_io_data = sioc;
3189 blk_execute_rq_nowait(req->q, NULL, req,
3190 flags & SCSI_ASYNC_EXEC_FLAG_AT_HEAD, scsi_end_async);
3194 blk_put_request(req);
3198 return DRIVER_ERROR << 24;
3200 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30) && !defined(SCSI_EXEC_REQ_FIFO_DEFINED) */
3202 void scst_copy_sg(struct scst_cmd *cmd, enum scst_sg_copy_dir copy_dir)
3204 struct scatterlist *src_sg, *dst_sg;
3205 unsigned int to_copy;
3206 int atomic = scst_cmd_atomic(cmd);
3210 if (copy_dir == SCST_SG_COPY_FROM_TARGET) {
3211 if (cmd->data_direction != SCST_DATA_BIDI) {
3212 src_sg = cmd->tgt_sg;
3214 to_copy = cmd->bufflen;
3216 TRACE_MEM("BIDI cmd %p", cmd);
3217 src_sg = cmd->tgt_in_sg;
3218 dst_sg = cmd->in_sg;
3219 to_copy = cmd->in_bufflen;
3223 dst_sg = cmd->tgt_sg;
3224 to_copy = cmd->resp_data_len;
3227 TRACE_MEM("cmd %p, copy_dir %d, src_sg %p, dst_sg %p, "
3228 "to_copy %d", cmd, copy_dir, src_sg, dst_sg, to_copy);
3230 if (unlikely(src_sg == NULL) || unlikely(dst_sg == NULL)) {
3232 * It can happened, e.g., with scst_user for cmd with delay
3233 * alloc, which failed with Check Condition.
3238 sg_copy(dst_sg, src_sg, to_copy, atomic ? KM_SOFTIRQ0 : KM_USER0,
3239 atomic ? KM_SOFTIRQ1 : KM_USER1);
3246 static const int SCST_CDB_LENGTH[8] = { 6, 10, 10, -1, 16, 12, -1, -1 };
3248 #define SCST_CDB_GROUP(opcode) ((opcode >> 5) & 0x7)
3249 #define SCST_GET_CDB_LEN(opcode) SCST_CDB_LENGTH[SCST_CDB_GROUP(opcode)]
3251 int scst_get_cdb_len(const uint8_t *cdb)
3253 return SCST_GET_CDB_LEN(cdb[0]);
3256 /* get_trans_len_x extract x bytes from cdb as length starting from off */
3258 static int get_trans_cdb_len_10(struct scst_cmd *cmd, uint8_t off)
3265 static int get_trans_len_block_limit(struct scst_cmd *cmd, uint8_t off)
3271 static int get_trans_len_read_capacity(struct scst_cmd *cmd, uint8_t off)
3273 cmd->bufflen = READ_CAP_LEN;
3277 static int get_trans_len_serv_act_in(struct scst_cmd *cmd, uint8_t off)
3283 if ((cmd->cdb[1] & 0x1f) == SAI_READ_CAPACITY_16) {
3284 cmd->op_name = "READ CAPACITY(16)";
3285 cmd->bufflen = READ_CAP16_LEN;
3286 cmd->op_flags |= SCST_IMPLICIT_HQ;
3288 cmd->op_flags |= SCST_UNKNOWN_LENGTH;
3290 TRACE_EXIT_RES(res);
3294 static int get_trans_len_single(struct scst_cmd *cmd, uint8_t off)
3300 static int get_trans_len_read_pos(struct scst_cmd *cmd, uint8_t off)
3302 uint8_t *p = (uint8_t *)cmd->cdb + off;
3306 cmd->bufflen |= ((u32)p[0]) << 8;
3307 cmd->bufflen |= ((u32)p[1]);
3309 switch (cmd->cdb[1] & 0x1f) {
3313 if (cmd->bufflen != 0) {
3314 PRINT_ERROR("READ POSITION: Invalid non-zero (%d) "
3315 "allocation length for service action %x",
3316 cmd->bufflen, cmd->cdb[1] & 0x1f);
3322 switch (cmd->cdb[1] & 0x1f) {
3331 cmd->bufflen = max(28, cmd->bufflen);
3334 PRINT_ERROR("READ POSITION: Invalid service action %x",
3335 cmd->cdb[1] & 0x1f);
3343 scst_set_cmd_error(cmd,
3344 SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
3349 static int get_trans_len_1(struct scst_cmd *cmd, uint8_t off)
3351 cmd->bufflen = (u32)cmd->cdb[off];
3355 static int get_trans_len_1_256(struct scst_cmd *cmd, uint8_t off)
3357 cmd->bufflen = (u32)cmd->cdb[off];
3358 if (cmd->bufflen == 0)
3363 static int get_trans_len_2(struct scst_cmd *cmd, uint8_t off)
3365 const uint8_t *p = cmd->cdb + off;
3368 cmd->bufflen |= ((u32)p[0]) << 8;
3369 cmd->bufflen |= ((u32)p[1]);
3374 static int get_trans_len_3(struct scst_cmd *cmd, uint8_t off)
3376 const uint8_t *p = cmd->cdb + off;
3379 cmd->bufflen |= ((u32)p[0]) << 16;
3380 cmd->bufflen |= ((u32)p[1]) << 8;
3381 cmd->bufflen |= ((u32)p[2]);
3386 static int get_trans_len_4(struct scst_cmd *cmd, uint8_t off)
3388 const uint8_t *p = cmd->cdb + off;
3391 cmd->bufflen |= ((u32)p[0]) << 24;
3392 cmd->bufflen |= ((u32)p[1]) << 16;
3393 cmd->bufflen |= ((u32)p[2]) << 8;
3394 cmd->bufflen |= ((u32)p[3]);
3399 static int get_trans_len_none(struct scst_cmd *cmd, uint8_t off)
3405 int scst_get_cdb_info(struct scst_cmd *cmd)
3407 int dev_type = cmd->dev->type;
3410 const struct scst_sdbops *ptr = NULL;
3414 op = cmd->cdb[0]; /* get clear opcode */
3416 TRACE_DBG("opcode=%02x, cdblen=%d bytes, tblsize=%d, "
3417 "dev_type=%d", op, SCST_GET_CDB_LEN(op), SCST_CDB_TBL_SIZE,
3420 i = scst_scsi_op_list[op];
3421 while (i < SCST_CDB_TBL_SIZE && scst_scsi_op_table[i].ops == op) {
3422 if (scst_scsi_op_table[i].devkey[dev_type] != SCST_CDB_NOTSUPP) {
3423 ptr = &scst_scsi_op_table[i];
3424 TRACE_DBG("op = 0x%02x+'%c%c%c%c%c%c%c%c%c%c'+<%s>",
3425 ptr->ops, ptr->devkey[0], /* disk */
3426 ptr->devkey[1], /* tape */
3427 ptr->devkey[2], /* printer */
3428 ptr->devkey[3], /* cpu */
3429 ptr->devkey[4], /* cdr */
3430 ptr->devkey[5], /* cdrom */
3431 ptr->devkey[6], /* scanner */
3432 ptr->devkey[7], /* worm */
3433 ptr->devkey[8], /* changer */
3434 ptr->devkey[9], /* commdev */
3436 TRACE_DBG("direction=%d flags=%d off=%d",
3445 if (unlikely(ptr == NULL)) {
3446 /* opcode not found or now not used !!! */
3447 TRACE(TRACE_SCSI, "Unknown opcode 0x%x for type %d", op,
3450 cmd->op_flags = SCST_INFO_NOT_FOUND;
3454 cmd->cdb_len = SCST_GET_CDB_LEN(op);
3455 cmd->op_name = ptr->op_name;
3456 cmd->data_direction = ptr->direction;
3457 cmd->op_flags = ptr->flags;
3458 res = (*ptr->get_trans_len)(cmd, ptr->off);
3461 TRACE_EXIT_RES(res);
3464 EXPORT_SYMBOL(scst_get_cdb_info);
3466 /* Packs SCST LUN back to SCSI form using peripheral device addressing method */
3467 uint64_t scst_pack_lun(const uint64_t lun)
3470 uint16_t *p = (uint16_t *)&res;
3473 *p = cpu_to_be16(*p);
3475 TRACE_EXIT_HRES((unsigned long)res);
3480 * Routine to extract a lun number from an 8-byte LUN structure
3481 * in network byte order (BE).
3482 * (see SAM-2, Section 4.12.3 page 40)
3483 * Supports 2 types of lun unpacking: peripheral and logical unit.
3485 uint64_t scst_unpack_lun(const uint8_t *lun, int len)
3487 uint64_t res = NO_SUCH_LUN;
3492 TRACE_BUFF_FLAG(TRACE_DEBUG, "Raw LUN", lun, len);
3494 if (unlikely(len < 2)) {
3495 PRINT_ERROR("Illegal lun length %d, expected 2 bytes or "
3503 if ((*((uint64_t *)lun) &
3504 __constant_cpu_to_be64(0x0000FFFFFFFFFFFFLL)) != 0)
3508 if (*((uint16_t *)&lun[2]) != 0)
3512 if (*((uint32_t *)&lun[2]) != 0)
3520 address_method = (*lun) >> 6; /* high 2 bits of byte 0 */
3521 switch (address_method) {
3522 case 0: /* peripheral device addressing method */
3525 PRINT_ERROR("Illegal BUS INDENTIFIER in LUN "
3526 "peripheral device addressing method 0x%02x, "
3527 "expected 0", *lun);
3534 * Looks like it's legal to use it as flat space addressing
3541 case 1: /* flat space addressing method */
3542 res = *(lun + 1) | (((*lun) & 0x3f) << 8);
3545 case 2: /* logical unit addressing method */
3547 PRINT_ERROR("Illegal BUS NUMBER in LUN logical unit "
3548 "addressing method 0x%02x, expected 0",
3552 if (*(lun + 1) & 0xe0) {
3553 PRINT_ERROR("Illegal TARGET in LUN logical unit "
3554 "addressing method 0x%02x, expected 0",
3555 (*(lun + 1) & 0xf8) >> 5);
3558 res = *(lun + 1) & 0x1f;
3561 case 3: /* extended logical unit addressing method */
3563 PRINT_ERROR("Unimplemented LUN addressing method %u",
3569 TRACE_EXIT_RES((int)res);
3573 PRINT_ERROR("%s", "Multi-level LUN unimplemented");
3577 int scst_calc_block_shift(int sector_size)
3579 int block_shift = 0;
3582 if (sector_size == 0)
3592 if (block_shift < 9) {
3593 PRINT_ERROR("Wrong sector size %d", sector_size);
3597 TRACE_EXIT_RES(block_shift);
3600 EXPORT_SYMBOL(scst_calc_block_shift);
3602 int scst_sbc_generic_parse(struct scst_cmd *cmd,
3603 int (*get_block_shift)(struct scst_cmd *cmd))
3610 * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
3611 * therefore change them only if necessary
3614 TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
3615 cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
3617 switch (cmd->cdb[0]) {
3622 if ((cmd->cdb[1] & BYTCHK) == 0) {
3623 cmd->data_len = cmd->bufflen << get_block_shift(cmd);
3634 if (cmd->op_flags & SCST_TRANSFER_LEN_TYPE_FIXED) {
3636 * No need for locks here, since *_detach() can not be
3637 * called, when there are existing commands.
3639 cmd->bufflen = cmd->bufflen << get_block_shift(cmd);
3643 if ((cmd->op_flags & (SCST_SMALL_TIMEOUT | SCST_LONG_TIMEOUT)) == 0)
3644 cmd->timeout = SCST_GENERIC_DISK_REG_TIMEOUT;
3645 else if (cmd->op_flags & SCST_SMALL_TIMEOUT)
3646 cmd->timeout = SCST_GENERIC_DISK_SMALL_TIMEOUT;
3647 else if (cmd->op_flags & SCST_LONG_TIMEOUT)
3648 cmd->timeout = SCST_GENERIC_DISK_LONG_TIMEOUT;
3650 TRACE_DBG("res %d, bufflen %d, data_len %d, direct %d",
3651 res, cmd->bufflen, cmd->data_len, cmd->data_direction);
3653 TRACE_EXIT_RES(res);
3656 EXPORT_SYMBOL(scst_sbc_generic_parse);
3658 int scst_cdrom_generic_parse(struct scst_cmd *cmd,
3659 int (*get_block_shift)(struct scst_cmd *cmd))
3666 * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
3667 * therefore change them only if necessary
3670 TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
3671 cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
3673 cmd->cdb[1] &= 0x1f;
3675 switch (cmd->cdb[0]) {
3680 if ((cmd->cdb[1] & BYTCHK) == 0) {
3681 cmd->data_len = cmd->bufflen << get_block_shift(cmd);
3691 if (cmd->op_flags & SCST_TRANSFER_LEN_TYPE_FIXED)
3692 cmd->bufflen = cmd->bufflen << get_block_shift(cmd);
3695 if ((cmd->op_flags & (SCST_SMALL_TIMEOUT | SCST_LONG_TIMEOUT)) == 0)
3696 cmd->timeout = SCST_GENERIC_CDROM_REG_TIMEOUT;
3697 else if (cmd->op_flags & SCST_SMALL_TIMEOUT)
3698 cmd->timeout = SCST_GENERIC_CDROM_SMALL_TIMEOUT;
3699 else if (cmd->op_flags & SCST_LONG_TIMEOUT)
3700 cmd->timeout = SCST_GENERIC_CDROM_LONG_TIMEOUT;
3702 TRACE_DBG("res=%d, bufflen=%d, direct=%d", res, cmd->bufflen,
3703 cmd->data_direction);
3708 EXPORT_SYMBOL(scst_cdrom_generic_parse);
3710 int scst_modisk_generic_parse(struct scst_cmd *cmd,
3711 int (*get_block_shift)(struct scst_cmd *cmd))
3718 * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
3719 * therefore change them only if necessary
3722 TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
3723 cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
3725 cmd->cdb[1] &= 0x1f;
3727 switch (cmd->cdb[0]) {
3732 if ((cmd->cdb[1] & BYTCHK) == 0) {
3733 cmd->data_len = cmd->bufflen << get_block_shift(cmd);
3743 if (cmd->op_flags & SCST_TRANSFER_LEN_TYPE_FIXED)
3744 cmd->bufflen = cmd->bufflen << get_block_shift(cmd);
3747 if ((cmd->op_flags & (SCST_SMALL_TIMEOUT | SCST_LONG_TIMEOUT)) == 0)
3748 cmd->timeout = SCST_GENERIC_MODISK_REG_TIMEOUT;
3749 else if (cmd->op_flags & SCST_SMALL_TIMEOUT)
3750 cmd->timeout = SCST_GENERIC_MODISK_SMALL_TIMEOUT;
3751 else if (cmd->op_flags & SCST_LONG_TIMEOUT)
3752 cmd->timeout = SCST_GENERIC_MODISK_LONG_TIMEOUT;
3754 TRACE_DBG("res=%d, bufflen=%d, direct=%d", res, cmd->bufflen,
3755 cmd->data_direction);
3757 TRACE_EXIT_RES(res);
3760 EXPORT_SYMBOL(scst_modisk_generic_parse);
3762 int scst_tape_generic_parse(struct scst_cmd *cmd,
3763 int (*get_block_size)(struct scst_cmd *cmd))
3770 * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
3771 * therefore change them only if necessary
3774 TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
3775 cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
3777 if (cmd->cdb[0] == READ_POSITION) {
3778 int tclp = cmd->cdb[1] & 4;
3779 int long_bit = cmd->cdb[1] & 2;
3780 int bt = cmd->cdb[1] & 1;
3782 if ((tclp == long_bit) && (!bt || !long_bit)) {
3784 tclp ? POSITION_LEN_LONG : POSITION_LEN_SHORT;
3785 cmd->data_direction = SCST_DATA_READ;
3788 cmd->data_direction = SCST_DATA_NONE;
3792 if (cmd->op_flags & SCST_TRANSFER_LEN_TYPE_FIXED & cmd->cdb[1])
3793 cmd->bufflen = cmd->bufflen * get_block_size(cmd);
3795 if ((cmd->op_flags & (SCST_SMALL_TIMEOUT | SCST_LONG_TIMEOUT)) == 0)
3796 cmd->timeout = SCST_GENERIC_TAPE_REG_TIMEOUT;
3797 else if (cmd->op_flags & SCST_SMALL_TIMEOUT)
3798 cmd->timeout = SCST_GENERIC_TAPE_SMALL_TIMEOUT;
3799 else if (cmd->op_flags & SCST_LONG_TIMEOUT)
3800 cmd->timeout = SCST_GENERIC_TAPE_LONG_TIMEOUT;
3802 TRACE_EXIT_RES(res);
3805 EXPORT_SYMBOL(scst_tape_generic_parse);
3807 static int scst_null_parse(struct scst_cmd *cmd)
3814 * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
3815 * therefore change them only if necessary
3818 TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
3819 cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
3821 switch (cmd->cdb[0]) {