4 * Copyright (C) 2004 - 2009 Vladislav Bolkhovitin <vst@vlnb.net>
5 * Copyright (C) 2004 - 2005 Leonid Stoljar
6 * Copyright (C) 2007 - 2009 ID7 Ltd.
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation, version 2
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
19 #include <linux/init.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/list.h>
23 #include <linux/spinlock.h>
24 #include <linux/slab.h>
25 #include <linux/sched.h>
26 #include <linux/kthread.h>
27 #include <linux/cdrom.h>
28 #include <linux/unistd.h>
29 #include <linux/string.h>
32 #include "scst_priv.h"
35 #include "scst_cdbprobe.h"
37 static void scst_free_tgt_dev(struct scst_tgt_dev *tgt_dev);
38 static void scst_check_internal_sense(struct scst_device *dev, int result,
39 uint8_t *sense, int sense_len);
40 static void scst_queue_report_luns_changed_UA(struct scst_session *sess,
42 static void __scst_check_set_UA(struct scst_tgt_dev *tgt_dev,
43 const uint8_t *sense, int sense_len, int flags);
44 static void scst_alloc_set_UA(struct scst_tgt_dev *tgt_dev,
45 const uint8_t *sense, int sense_len, int flags);
46 static void scst_free_all_UA(struct scst_tgt_dev *tgt_dev);
47 static void scst_release_space(struct scst_cmd *cmd);
48 static void scst_sess_free_tgt_devs(struct scst_session *sess);
49 static void scst_unblock_cmds(struct scst_device *dev);
51 #ifdef CONFIG_SCST_DEBUG_TM
52 static void tm_dbg_init_tgt_dev(struct scst_tgt_dev *tgt_dev,
53 struct scst_acg_dev *acg_dev);
54 static void tm_dbg_deinit_tgt_dev(struct scst_tgt_dev *tgt_dev);
56 static inline void tm_dbg_init_tgt_dev(struct scst_tgt_dev *tgt_dev,
57 struct scst_acg_dev *acg_dev) {}
58 static inline void tm_dbg_deinit_tgt_dev(struct scst_tgt_dev *tgt_dev) {}
59 #endif /* CONFIG_SCST_DEBUG_TM */
61 int scst_alloc_sense(struct scst_cmd *cmd, int atomic)
64 gfp_t gfp_mask = atomic ? GFP_ATOMIC : (GFP_KERNEL|__GFP_NOFAIL);
68 if (cmd->sense != NULL)
71 cmd->sense = mempool_alloc(scst_sense_mempool, gfp_mask);
72 if (cmd->sense == NULL) {
73 PRINT_CRIT_ERROR("Sense memory allocation failed (op %x). "
74 "The sense data will be lost!!", cmd->cdb[0]);
80 cmd->sense_bufflen = SCST_SENSE_BUFFERSIZE;
81 memset(cmd->sense, 0, SCST_SENSE_BUFFERSIZE);
87 EXPORT_SYMBOL(scst_alloc_sense);
89 int scst_alloc_set_sense(struct scst_cmd *cmd, int atomic,
90 const uint8_t *sense, unsigned int len)
96 res = scst_alloc_sense(cmd, atomic);
98 PRINT_BUFFER("Lost sense", sense, len);
102 memcpy(cmd->sense, sense, min((int)len, (int)cmd->sense_bufflen));
103 TRACE_BUFFER("Sense set", cmd->sense, cmd->sense_bufflen);
109 EXPORT_SYMBOL(scst_alloc_set_sense);
111 void scst_set_cmd_error_status(struct scst_cmd *cmd, int status)
115 cmd->status = status;
116 cmd->host_status = DID_OK;
118 cmd->dbl_ua_orig_resp_data_len = cmd->resp_data_len;
119 cmd->dbl_ua_orig_data_direction = cmd->data_direction;
121 cmd->data_direction = SCST_DATA_NONE;
122 cmd->resp_data_len = 0;
123 cmd->is_send_status = 1;
130 EXPORT_SYMBOL(scst_set_cmd_error_status);
132 void scst_set_cmd_error(struct scst_cmd *cmd, int key, int asc, int ascq)
138 scst_set_cmd_error_status(cmd, SAM_STAT_CHECK_CONDITION);
140 rc = scst_alloc_sense(cmd, 1);
142 PRINT_ERROR("Lost sense data (key %x, asc %x, ascq %x)",
147 scst_set_sense(cmd->sense, cmd->sense_bufflen,
148 scst_get_cmd_dev_d_sense(cmd), key, asc, ascq);
149 TRACE_BUFFER("Sense set", cmd->sense, cmd->sense_bufflen);
155 EXPORT_SYMBOL(scst_set_cmd_error);
157 void scst_set_sense(uint8_t *buffer, int len, bool d_sense,
158 int key, int asc, int ascq)
162 memset(buffer, 0, len);
165 /* Descriptor format */
167 PRINT_ERROR("Length %d of sense buffer too small to "
168 "fit sense %x:%x:%x", len, key, asc, ascq);
171 buffer[0] = 0x72; /* Response Code */
173 buffer[1] = key; /* Sense Key */
175 buffer[2] = asc; /* ASC */
177 buffer[3] = ascq; /* ASCQ */
181 PRINT_ERROR("Length %d of sense buffer too small to "
182 "fit sense %x:%x:%x", len, key, asc, ascq);
185 buffer[0] = 0x70; /* Response Code */
187 buffer[2] = key; /* Sense Key */
189 buffer[7] = 0x0a; /* Additional Sense Length */
191 buffer[12] = asc; /* ASC */
193 buffer[13] = ascq; /* ASCQ */
196 TRACE_BUFFER("Sense set", buffer, len);
199 EXPORT_SYMBOL(scst_set_sense);
201 bool scst_analyze_sense(const uint8_t *sense, int len, unsigned int valid_mask,
202 int key, int asc, int ascq)
207 if ((sense[0] == 0x70) || (sense[0] == 0x71)) {
211 PRINT_ERROR("Sense too small to analyze (%d, "
217 if ((valid_mask & SCST_SENSE_KEY_VALID) && (sense[2] != key))
221 if ((valid_mask & SCST_SENSE_ASC_VALID) && (sense[12] != asc))
225 if ((valid_mask & SCST_SENSE_ASCQ_VALID) && (sense[13] != ascq))
227 } else if ((sense[0] == 0x72) || (sense[0] == 0x73)) {
228 /* Descriptor format */
231 PRINT_ERROR("Sense too small to analyze (%d, "
232 "type descriptor)", len);
237 if ((valid_mask & SCST_SENSE_KEY_VALID) && (sense[1] != key))
241 if ((valid_mask & SCST_SENSE_ASC_VALID) && (sense[2] != asc))
245 if ((valid_mask & SCST_SENSE_ASCQ_VALID) && (sense[3] != ascq))
253 TRACE_EXIT_RES((int)res);
256 EXPORT_SYMBOL(scst_analyze_sense);
258 void scst_check_convert_sense(struct scst_cmd *cmd)
264 if ((cmd->sense == NULL) || (cmd->status != SAM_STAT_CHECK_CONDITION))
267 d_sense = scst_get_cmd_dev_d_sense(cmd);
268 if (d_sense && ((cmd->sense[0] == 0x70) || (cmd->sense[0] == 0x71))) {
269 TRACE_MGMT_DBG("Converting fixed sense to descriptor (cmd %p)",
271 if (cmd->sense_bufflen < 14) {
272 PRINT_ERROR("Sense too small to convert (%d, "
273 "type fixed)", cmd->sense_bufflen);
276 scst_set_sense(cmd->sense, cmd->sense_bufflen, d_sense,
277 cmd->sense[2], cmd->sense[12], cmd->sense[13]);
278 } else if (!d_sense && ((cmd->sense[0] == 0x72) ||
279 (cmd->sense[0] == 0x73))) {
280 TRACE_MGMT_DBG("Converting descriptor sense to fixed (cmd %p)",
282 if (cmd->sense_bufflen < 4) {
283 PRINT_ERROR("Sense too small to convert (%d, "
284 "type descryptor)", cmd->sense_bufflen);
287 scst_set_sense(cmd->sense, cmd->sense_bufflen, d_sense,
288 cmd->sense[1], cmd->sense[2], cmd->sense[3]);
295 EXPORT_SYMBOL(scst_check_convert_sense);
297 static void scst_set_cmd_error_sense(struct scst_cmd *cmd, uint8_t *sense,
302 scst_set_cmd_error_status(cmd, SAM_STAT_CHECK_CONDITION);
303 scst_alloc_set_sense(cmd, 1, sense, len);
309 void scst_set_busy(struct scst_cmd *cmd)
311 int c = atomic_read(&cmd->sess->sess_cmd_count);
315 if ((c <= 1) || (cmd->sess->init_phase != SCST_SESS_IPH_READY)) {
316 scst_set_cmd_error_status(cmd, SAM_STAT_BUSY);
317 TRACE(TRACE_MGMT_MINOR, "Sending BUSY status to initiator %s "
318 "(cmds count %d, queue_type %x, sess->init_phase %d)",
319 cmd->sess->initiator_name, c,
320 cmd->queue_type, cmd->sess->init_phase);
322 scst_set_cmd_error_status(cmd, SAM_STAT_TASK_SET_FULL);
323 TRACE(TRACE_MGMT_MINOR, "Sending QUEUE_FULL status to "
324 "initiator %s (cmds count %d, queue_type %x, "
325 "sess->init_phase %d)", cmd->sess->initiator_name, c,
326 cmd->queue_type, cmd->sess->init_phase);
332 EXPORT_SYMBOL(scst_set_busy);
334 void scst_set_initial_UA(struct scst_session *sess, int key, int asc, int ascq)
340 TRACE_MGMT_DBG("Setting for sess %p initial UA %x/%x/%x", sess, key,
343 /* Protect sess_tgt_dev_list_hash */
344 mutex_lock(&scst_mutex);
346 for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
347 struct list_head *sess_tgt_dev_list_head =
348 &sess->sess_tgt_dev_list_hash[i];
349 struct scst_tgt_dev *tgt_dev;
351 list_for_each_entry(tgt_dev, sess_tgt_dev_list_head,
352 sess_tgt_dev_list_entry) {
353 spin_lock_bh(&tgt_dev->tgt_dev_lock);
354 if (!list_empty(&tgt_dev->UA_list)) {
355 struct scst_tgt_dev_UA *ua;
357 ua = list_entry(tgt_dev->UA_list.next,
358 typeof(*ua), UA_list_entry);
359 if (scst_analyze_sense(ua->UA_sense_buffer,
360 sizeof(ua->UA_sense_buffer),
361 SCST_SENSE_ALL_VALID,
362 SCST_LOAD_SENSE(scst_sense_reset_UA))) {
363 scst_set_sense(ua->UA_sense_buffer,
364 sizeof(ua->UA_sense_buffer),
365 tgt_dev->dev->d_sense,
369 "The first UA isn't RESET UA");
371 PRINT_ERROR("%s", "There's no RESET UA to "
373 spin_unlock_bh(&tgt_dev->tgt_dev_lock);
377 mutex_unlock(&scst_mutex);
382 EXPORT_SYMBOL(scst_set_initial_UA);
384 static struct scst_aen *scst_alloc_aen(struct scst_tgt_dev *tgt_dev)
386 struct scst_aen *aen;
390 aen = mempool_alloc(scst_aen_mempool, GFP_KERNEL);
392 PRINT_ERROR("AEN memory allocation failed. Corresponding "
393 "event notification will not be performed (initiator "
394 "%s)", tgt_dev->sess->initiator_name);
397 memset(aen, 0, sizeof(*aen));
399 aen->sess = tgt_dev->sess;
400 scst_sess_get(aen->sess);
402 aen->lun = scst_pack_lun(tgt_dev->lun);
405 TRACE_EXIT_HRES((unsigned long)aen);
409 static void scst_free_aen(struct scst_aen *aen)
413 scst_sess_put(aen->sess);
414 mempool_free(aen, scst_aen_mempool);
421 void scst_capacity_data_changed(struct scst_device *dev)
423 struct scst_tgt_dev *tgt_dev;
424 uint8_t sense_buffer[SCST_STANDARD_SENSE_LEN];
428 if (dev->type != TYPE_DISK) {
429 TRACE_MGMT_DBG("Device type %d isn't for CAPACITY DATA "
430 "CHANGED UA", dev->type);
434 TRACE_MGMT_DBG("CAPACITY DATA CHANGED (dev %p)", dev);
436 mutex_lock(&scst_mutex);
438 list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
439 dev_tgt_dev_list_entry) {
440 struct scst_tgt_template *tgtt = tgt_dev->sess->tgt->tgtt;
442 if (tgtt->report_aen != NULL) {
443 struct scst_aen *aen;
446 aen = scst_alloc_aen(tgt_dev);
450 aen->event_fn = SCST_AEN_SCSI;
451 aen->aen_sense_len = SCST_STANDARD_SENSE_LEN;
452 scst_set_sense(aen->aen_sense, aen->aen_sense_len,
453 tgt_dev->dev->d_sense,
454 SCST_LOAD_SENSE(scst_sense_capacity_data_changed));
456 TRACE_DBG("Calling target's %s report_aen(%p)",
458 rc = tgtt->report_aen(aen);
459 TRACE_DBG("Target's %s report_aen(%p) returned %d",
460 tgtt->name, aen, rc);
461 if (rc == SCST_AEN_RES_SUCCESS)
467 TRACE_MGMT_DBG("Queuing CAPACITY DATA CHANGED UA (tgt_dev %p)",
469 scst_set_sense(sense_buffer, sizeof(sense_buffer),
470 tgt_dev->dev->d_sense,
471 SCST_LOAD_SENSE(scst_sense_capacity_data_changed));
472 scst_check_set_UA(tgt_dev, sense_buffer,
473 sizeof(sense_buffer), 0);
476 mutex_unlock(&scst_mutex);
482 EXPORT_SYMBOL(scst_capacity_data_changed);
484 static inline bool scst_is_report_luns_changed_type(int type)
495 case TYPE_MEDIUM_CHANGER:
504 /* scst_mutex supposed to be held */
505 static void scst_queue_report_luns_changed_UA(struct scst_session *sess,
508 uint8_t sense_buffer[SCST_STANDARD_SENSE_LEN];
509 struct list_head *shead;
510 struct scst_tgt_dev *tgt_dev;
515 TRACE_MGMT_DBG("Queuing REPORTED LUNS DATA CHANGED UA "
520 for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
521 shead = &sess->sess_tgt_dev_list_hash[i];
523 list_for_each_entry(tgt_dev, shead,
524 sess_tgt_dev_list_entry) {
525 /* Lockdep triggers here a false positive.. */
526 spin_lock(&tgt_dev->tgt_dev_lock);
530 for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
531 shead = &sess->sess_tgt_dev_list_hash[i];
533 list_for_each_entry(tgt_dev, shead,
534 sess_tgt_dev_list_entry) {
535 if (!scst_is_report_luns_changed_type(
539 scst_set_sense(sense_buffer, sizeof(sense_buffer),
540 tgt_dev->dev->d_sense,
541 SCST_LOAD_SENSE(scst_sense_reported_luns_data_changed));
543 __scst_check_set_UA(tgt_dev, sense_buffer,
544 sizeof(sense_buffer),
545 flags | SCST_SET_UA_FLAG_GLOBAL);
549 for (i = TGT_DEV_HASH_SIZE-1; i >= 0; i--) {
550 shead = &sess->sess_tgt_dev_list_hash[i];
552 list_for_each_entry_reverse(tgt_dev,
553 shead, sess_tgt_dev_list_entry) {
554 spin_unlock(&tgt_dev->tgt_dev_lock);
564 /* The activity supposed to be suspended and scst_mutex held */
565 void scst_report_luns_changed(struct scst_acg *acg)
567 struct scst_session *sess;
571 TRACE_MGMT_DBG("REPORTED LUNS DATA CHANGED (acg %s)", acg->acg_name);
573 list_for_each_entry(sess, &acg->acg_sess_list, acg_sess_list_entry) {
575 struct list_head *shead;
576 struct scst_tgt_dev *tgt_dev;
577 struct scst_tgt_template *tgtt = sess->tgt->tgtt;
579 for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
580 shead = &sess->sess_tgt_dev_list_hash[i];
582 list_for_each_entry(tgt_dev, shead,
583 sess_tgt_dev_list_entry) {
584 if (scst_is_report_luns_changed_type(
589 TRACE_MGMT_DBG("Not found a device capable REPORTED "
590 "LUNS DATA CHANGED UA (sess %p)", sess);
593 if (tgtt->report_aen != NULL) {
594 struct scst_aen *aen;
597 aen = scst_alloc_aen(tgt_dev);
601 aen->event_fn = SCST_AEN_SCSI;
602 aen->aen_sense_len = SCST_STANDARD_SENSE_LEN;
603 scst_set_sense(aen->aen_sense, aen->aen_sense_len,
604 tgt_dev->dev->d_sense,
605 SCST_LOAD_SENSE(scst_sense_reported_luns_data_changed));
607 TRACE_DBG("Calling target's %s report_aen(%p)",
609 rc = tgtt->report_aen(aen);
610 TRACE_DBG("Target's %s report_aen(%p) returned %d",
611 tgtt->name, aen, rc);
612 if (rc == SCST_AEN_RES_SUCCESS)
619 scst_queue_report_luns_changed_UA(sess, 0);
626 void scst_aen_done(struct scst_aen *aen)
630 TRACE_MGMT_DBG("AEN %p (fn %d) done (initiator %s)", aen,
631 aen->event_fn, aen->sess->initiator_name);
633 if (aen->delivery_status == SCST_AEN_RES_SUCCESS)
636 if (aen->event_fn != SCST_AEN_SCSI)
639 TRACE_MGMT_DBG("Delivery of SCSI AEN failed (initiator %s)",
640 aen->sess->initiator_name);
642 if (scst_analyze_sense(aen->aen_sense, aen->aen_sense_len,
643 SCST_SENSE_ALL_VALID, SCST_LOAD_SENSE(
644 scst_sense_reported_luns_data_changed))) {
645 mutex_lock(&scst_mutex);
646 scst_queue_report_luns_changed_UA(aen->sess,
647 SCST_SET_UA_FLAG_AT_HEAD);
648 mutex_unlock(&scst_mutex);
649 } else if (scst_analyze_sense(aen->aen_sense, aen->aen_sense_len,
650 SCST_SENSE_ALL_VALID,
651 SCST_LOAD_SENSE(scst_sense_capacity_data_changed))) {
652 /* tgt_dev might get dead, so we need to reseek it */
653 struct list_head *shead;
654 struct scst_tgt_dev *tgt_dev;
657 lun = scst_unpack_lun((uint8_t *)&aen->lun, sizeof(aen->lun));
659 mutex_lock(&scst_mutex);
661 shead = &aen->sess->sess_tgt_dev_list_hash[HASH_VAL(lun)];
662 list_for_each_entry(tgt_dev, shead,
663 sess_tgt_dev_list_entry) {
664 if (tgt_dev->lun == lun) {
665 TRACE_MGMT_DBG("Queuing CAPACITY DATA CHANGED "
666 "UA (tgt_dev %p)", tgt_dev);
667 scst_check_set_UA(tgt_dev, aen->aen_sense,
669 SCST_SET_UA_FLAG_AT_HEAD);
674 mutex_unlock(&scst_mutex);
676 PRINT_ERROR("%s", "Unknown SCSI AEN");
684 EXPORT_SYMBOL(scst_aen_done);
686 int scst_get_cmd_abnormal_done_state(const struct scst_cmd *cmd)
692 switch (cmd->state) {
693 case SCST_CMD_STATE_INIT_WAIT:
694 case SCST_CMD_STATE_INIT:
695 case SCST_CMD_STATE_PRE_PARSE:
696 case SCST_CMD_STATE_DEV_PARSE:
697 case SCST_CMD_STATE_DEV_DONE:
699 res = SCST_CMD_STATE_FINISHED_INTERNAL;
701 res = SCST_CMD_STATE_PRE_XMIT_RESP;
704 case SCST_CMD_STATE_PRE_DEV_DONE:
705 case SCST_CMD_STATE_MODE_SELECT_CHECKS:
706 res = SCST_CMD_STATE_DEV_DONE;
709 case SCST_CMD_STATE_PRE_XMIT_RESP:
710 res = SCST_CMD_STATE_XMIT_RESP;
713 case SCST_CMD_STATE_PREPROCESS_DONE:
714 case SCST_CMD_STATE_PREPARE_SPACE:
715 case SCST_CMD_STATE_RDY_TO_XFER:
716 case SCST_CMD_STATE_DATA_WAIT:
717 case SCST_CMD_STATE_TGT_PRE_EXEC:
718 case SCST_CMD_STATE_SEND_FOR_EXEC:
719 case SCST_CMD_STATE_LOCAL_EXEC:
720 case SCST_CMD_STATE_REAL_EXEC:
721 case SCST_CMD_STATE_REAL_EXECUTING:
722 res = SCST_CMD_STATE_PRE_DEV_DONE;
726 PRINT_CRIT_ERROR("Wrong cmd state %d (cmd %p, op %x)",
727 cmd->state, cmd, cmd->cdb[0]);
729 /* Invalid state to supress compiler's warning */
730 res = SCST_CMD_STATE_LAST_ACTIVE;
736 EXPORT_SYMBOL(scst_get_cmd_abnormal_done_state);
738 void scst_set_cmd_abnormal_done_state(struct scst_cmd *cmd)
742 #ifdef CONFIG_SCST_EXTRACHECKS
743 switch (cmd->state) {
744 case SCST_CMD_STATE_XMIT_RESP:
745 case SCST_CMD_STATE_FINISHED:
746 case SCST_CMD_STATE_FINISHED_INTERNAL:
747 case SCST_CMD_STATE_XMIT_WAIT:
748 PRINT_CRIT_ERROR("Wrong cmd state %d (cmd %p, op %x)",
749 cmd->state, cmd, cmd->cdb[0]);
754 cmd->state = scst_get_cmd_abnormal_done_state(cmd);
756 #ifdef CONFIG_SCST_EXTRACHECKS
757 if ((cmd->state != SCST_CMD_STATE_PRE_XMIT_RESP) &&
758 (cmd->tgt_dev == NULL) && !cmd->internal) {
759 PRINT_CRIT_ERROR("Wrong not inited cmd state %d (cmd %p, "
760 "op %x)", cmd->state, cmd, cmd->cdb[0]);
768 EXPORT_SYMBOL(scst_set_cmd_abnormal_done_state);
770 void scst_set_resp_data_len(struct scst_cmd *cmd, int resp_data_len)
776 scst_check_restore_sg_buff(cmd);
777 cmd->resp_data_len = resp_data_len;
779 if (resp_data_len == cmd->bufflen)
783 for (i = 0; i < cmd->sg_cnt; i++) {
784 l += cmd->sg[i].length;
785 if (l >= resp_data_len) {
786 int left = resp_data_len - (l - cmd->sg[i].length);
787 #ifdef CONFIG_SCST_DEBUG
788 TRACE(TRACE_SG_OP|TRACE_MEMORY, "cmd %p (tag %llu), "
789 "resp_data_len %d, i %d, cmd->sg[i].length %d, "
791 cmd, (long long unsigned int)cmd->tag,
793 cmd->sg[i].length, left);
795 cmd->orig_sg_cnt = cmd->sg_cnt;
796 cmd->orig_sg_entry = i;
797 cmd->orig_entry_len = cmd->sg[i].length;
798 cmd->sg_cnt = (left > 0) ? i+1 : i;
799 cmd->sg[i].length = left;
800 cmd->sg_buff_modified = 1;
809 EXPORT_SYMBOL(scst_set_resp_data_len);
811 /* Called under scst_mutex and suspended activity */
812 int scst_alloc_device(gfp_t gfp_mask, struct scst_device **out_dev)
814 struct scst_device *dev;
816 static int dev_num; /* protected by scst_mutex */
820 dev = kzalloc(sizeof(*dev), gfp_mask);
822 TRACE(TRACE_OUT_OF_MEM, "%s",
823 "Allocation of scst_device failed");
828 dev->handler = &scst_null_devtype;
829 dev->p_cmd_lists = &scst_main_cmd_lists;
830 atomic_set(&dev->dev_cmd_count, 0);
831 atomic_set(&dev->write_cmd_count, 0);
832 scst_init_mem_lim(&dev->dev_mem_lim);
833 spin_lock_init(&dev->dev_lock);
834 atomic_set(&dev->on_dev_count, 0);
835 INIT_LIST_HEAD(&dev->blocked_cmd_list);
836 INIT_LIST_HEAD(&dev->dev_tgt_dev_list);
837 INIT_LIST_HEAD(&dev->dev_acg_dev_list);
838 INIT_LIST_HEAD(&dev->threads_list);
839 init_waitqueue_head(&dev->on_dev_waitQ);
840 dev->dev_double_ua_possible = 1;
841 dev->queue_alg = SCST_CONTR_MODE_QUEUE_ALG_UNRESTRICTED_REORDER;
842 dev->dev_num = dev_num++;
844 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
845 #if defined(CONFIG_BLOCK) && defined(SCST_IO_CONTEXT)
846 dev->dev_io_ctx = alloc_io_context(GFP_KERNEL, -1);
847 if (dev->dev_io_ctx == NULL) {
848 TRACE(TRACE_OUT_OF_MEM, "%s", "Failed to alloc dev IO context");
863 /* Called under scst_mutex and suspended activity */
864 void scst_free_device(struct scst_device *dev)
868 #ifdef CONFIG_SCST_EXTRACHECKS
869 if (!list_empty(&dev->dev_tgt_dev_list) ||
870 !list_empty(&dev->dev_acg_dev_list)) {
871 PRINT_CRIT_ERROR("%s: dev_tgt_dev_list or dev_acg_dev_list "
872 "is not empty!", __func__);
877 __exit_io_context(dev->dev_io_ctx);
885 void scst_init_mem_lim(struct scst_mem_lim *mem_lim)
887 atomic_set(&mem_lim->alloced_pages, 0);
888 mem_lim->max_allowed_pages =
889 ((uint64_t)scst_max_dev_cmd_mem << 10) >> (PAGE_SHIFT - 10);
891 EXPORT_SYMBOL(scst_init_mem_lim);
893 static struct scst_acg_dev *scst_alloc_acg_dev(struct scst_acg *acg,
894 struct scst_device *dev, uint64_t lun)
896 struct scst_acg_dev *res;
900 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
901 res = kmem_cache_alloc(scst_acgd_cachep, GFP_KERNEL);
903 res = kmem_cache_zalloc(scst_acgd_cachep, GFP_KERNEL);
906 TRACE(TRACE_OUT_OF_MEM,
907 "%s", "Allocation of scst_acg_dev failed");
910 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
911 memset(res, 0, sizeof(*res));
919 TRACE_EXIT_HRES(res);
923 /* The activity supposed to be suspended and scst_mutex held */
924 static void scst_free_acg_dev(struct scst_acg_dev *acg_dev)
928 TRACE_DBG("Removing acg_dev %p from acg_dev_list and dev_acg_dev_list",
930 list_del(&acg_dev->acg_dev_list_entry);
931 list_del(&acg_dev->dev_acg_dev_list_entry);
933 kmem_cache_free(scst_acgd_cachep, acg_dev);
939 /* The activity supposed to be suspended and scst_mutex held */
940 struct scst_acg *scst_alloc_add_acg(const char *acg_name)
942 struct scst_acg *acg;
946 acg = kzalloc(sizeof(*acg), GFP_KERNEL);
948 TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of acg failed");
952 INIT_LIST_HEAD(&acg->acg_dev_list);
953 INIT_LIST_HEAD(&acg->acg_sess_list);
954 INIT_LIST_HEAD(&acg->acn_list);
955 acg->acg_name = acg_name;
957 TRACE_DBG("Adding acg %s to scst_acg_list", acg_name);
958 list_add_tail(&acg->scst_acg_list_entry, &scst_acg_list);
961 TRACE_EXIT_HRES(acg);
965 /* The activity supposed to be suspended and scst_mutex held */
966 int scst_destroy_acg(struct scst_acg *acg)
968 struct scst_acn *n, *nn;
969 struct scst_acg_dev *acg_dev, *acg_dev_tmp;
974 if (!list_empty(&acg->acg_sess_list)) {
975 PRINT_ERROR("%s: acg_sess_list is not empty!", __func__);
980 TRACE_DBG("Removing acg %s from scst_acg_list", acg->acg_name);
981 list_del(&acg->scst_acg_list_entry);
983 /* Freeing acg_devs */
984 list_for_each_entry_safe(acg_dev, acg_dev_tmp, &acg->acg_dev_list,
985 acg_dev_list_entry) {
986 struct scst_tgt_dev *tgt_dev, *tt;
987 list_for_each_entry_safe(tgt_dev, tt,
988 &acg_dev->dev->dev_tgt_dev_list,
989 dev_tgt_dev_list_entry) {
990 if (tgt_dev->acg_dev == acg_dev)
991 scst_free_tgt_dev(tgt_dev);
993 scst_free_acg_dev(acg_dev);
997 list_for_each_entry_safe(n, nn, &acg->acn_list,
999 list_del(&n->acn_list_entry);
1003 INIT_LIST_HEAD(&acg->acn_list);
1007 TRACE_EXIT_RES(res);
1012 * scst_mutex supposed to be held, there must not be parallel activity in this
1015 static struct scst_tgt_dev *scst_alloc_add_tgt_dev(struct scst_session *sess,
1016 struct scst_acg_dev *acg_dev)
1018 int ini_sg, ini_unchecked_isa_dma, ini_use_clustering;
1019 struct scst_tgt_dev *tgt_dev, *t = NULL;
1020 struct scst_device *dev = acg_dev->dev;
1021 struct list_head *sess_tgt_dev_list_head;
1022 struct scst_tgt_template *vtt = sess->tgt->tgtt;
1024 bool share_io_ctx = false;
1025 uint8_t sense_buffer[SCST_STANDARD_SENSE_LEN];
1029 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
1030 tgt_dev = kmem_cache_alloc(scst_tgtd_cachep, GFP_KERNEL);
1032 tgt_dev = kmem_cache_zalloc(scst_tgtd_cachep, GFP_KERNEL);
1034 if (tgt_dev == NULL) {
1035 TRACE(TRACE_OUT_OF_MEM, "%s",
1036 "Allocation of scst_tgt_dev failed");
1039 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
1040 memset(tgt_dev, 0, sizeof(*tgt_dev));
1044 tgt_dev->lun = acg_dev->lun;
1045 tgt_dev->acg_dev = acg_dev;
1046 tgt_dev->sess = sess;
1047 atomic_set(&tgt_dev->tgt_dev_cmd_count, 0);
1049 scst_sgv_pool_use_norm(tgt_dev);
1051 if (dev->scsi_dev != NULL) {
1052 ini_sg = dev->scsi_dev->host->sg_tablesize;
1053 ini_unchecked_isa_dma = dev->scsi_dev->host->unchecked_isa_dma;
1054 ini_use_clustering = (dev->scsi_dev->host->use_clustering ==
1057 ini_sg = (1 << 15) /* infinite */;
1058 ini_unchecked_isa_dma = 0;
1059 ini_use_clustering = 0;
1061 tgt_dev->max_sg_cnt = min(ini_sg, sess->tgt->sg_tablesize);
1063 if ((sess->tgt->tgtt->use_clustering || ini_use_clustering) &&
1064 !sess->tgt->tgtt->no_clustering)
1065 scst_sgv_pool_use_norm_clust(tgt_dev);
1067 if (sess->tgt->tgtt->unchecked_isa_dma || ini_unchecked_isa_dma)
1068 scst_sgv_pool_use_dma(tgt_dev);
1070 if (dev->scsi_dev != NULL) {
1071 TRACE_MGMT_DBG("host=%d, channel=%d, id=%d, lun=%d, "
1072 "SCST lun=%lld", dev->scsi_dev->host->host_no,
1073 dev->scsi_dev->channel, dev->scsi_dev->id,
1075 (long long unsigned int)tgt_dev->lun);
1077 TRACE_MGMT_DBG("Virtual device %s on SCST lun=%lld",
1078 dev->virt_name, (long long unsigned int)tgt_dev->lun);
1081 spin_lock_init(&tgt_dev->tgt_dev_lock);
1082 INIT_LIST_HEAD(&tgt_dev->UA_list);
1083 spin_lock_init(&tgt_dev->thr_data_lock);
1084 INIT_LIST_HEAD(&tgt_dev->thr_data_list);
1085 spin_lock_init(&tgt_dev->sn_lock);
1086 INIT_LIST_HEAD(&tgt_dev->deferred_cmd_list);
1087 INIT_LIST_HEAD(&tgt_dev->skipped_sn_list);
1088 tgt_dev->expected_sn = 1;
1089 tgt_dev->num_free_sn_slots = ARRAY_SIZE(tgt_dev->sn_slots)-1;
1090 tgt_dev->cur_sn_slot = &tgt_dev->sn_slots[0];
1091 for (i = 0; i < (int)ARRAY_SIZE(tgt_dev->sn_slots); i++)
1092 atomic_set(&tgt_dev->sn_slots[i], 0);
1094 if (dev->handler->parse_atomic &&
1095 (sess->tgt->tgtt->preprocessing_done == NULL)) {
1096 if (sess->tgt->tgtt->rdy_to_xfer_atomic)
1097 __set_bit(SCST_TGT_DEV_AFTER_INIT_WR_ATOMIC,
1098 &tgt_dev->tgt_dev_flags);
1099 if (dev->handler->exec_atomic)
1100 __set_bit(SCST_TGT_DEV_AFTER_INIT_OTH_ATOMIC,
1101 &tgt_dev->tgt_dev_flags);
1103 if (dev->handler->exec_atomic) {
1104 if (sess->tgt->tgtt->rdy_to_xfer_atomic)
1105 __set_bit(SCST_TGT_DEV_AFTER_RESTART_WR_ATOMIC,
1106 &tgt_dev->tgt_dev_flags);
1107 __set_bit(SCST_TGT_DEV_AFTER_RESTART_OTH_ATOMIC,
1108 &tgt_dev->tgt_dev_flags);
1109 __set_bit(SCST_TGT_DEV_AFTER_RX_DATA_ATOMIC,
1110 &tgt_dev->tgt_dev_flags);
1112 if (dev->handler->dev_done_atomic &&
1113 sess->tgt->tgtt->xmit_response_atomic) {
1114 __set_bit(SCST_TGT_DEV_AFTER_EXEC_ATOMIC,
1115 &tgt_dev->tgt_dev_flags);
1118 scst_set_sense(sense_buffer, sizeof(sense_buffer),
1119 dev->d_sense, SCST_LOAD_SENSE(scst_sense_reset_UA));
1120 scst_alloc_set_UA(tgt_dev, sense_buffer, sizeof(sense_buffer), 0);
1122 tm_dbg_init_tgt_dev(tgt_dev, acg_dev);
1124 if (tgt_dev->sess->initiator_name != NULL) {
1125 spin_lock_bh(&dev->dev_lock);
1126 list_for_each_entry(t, &dev->dev_tgt_dev_list,
1127 dev_tgt_dev_list_entry) {
1128 TRACE_DBG("t name %s (tgt_dev name %s)",
1129 t->sess->initiator_name,
1130 tgt_dev->sess->initiator_name);
1131 if (t->sess->initiator_name == NULL)
1133 if (strcmp(t->sess->initiator_name,
1134 tgt_dev->sess->initiator_name) == 0) {
1135 share_io_ctx = true;
1139 spin_unlock_bh(&dev->dev_lock);
1143 TRACE_MGMT_DBG("Sharing IO context %p (tgt_dev %p, ini %s)",
1144 t->tgt_dev_io_ctx, tgt_dev,
1145 tgt_dev->sess->initiator_name);
1146 tgt_dev->tgt_dev_io_ctx = ioc_task_link(t->tgt_dev_io_ctx);
1148 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
1149 #if defined(CONFIG_BLOCK) && defined(SCST_IO_CONTEXT)
1150 tgt_dev->tgt_dev_io_ctx = alloc_io_context(GFP_KERNEL, -1);
1151 if (tgt_dev->tgt_dev_io_ctx == NULL) {
1152 TRACE(TRACE_OUT_OF_MEM, "Failed to alloc tgt_dev IO "
1153 "context for dev %s (initiator %s)",
1154 dev->virt_name, sess->initiator_name);
1161 if (vtt->threads_num > 0) {
1163 if (dev->handler->threads_num > 0)
1164 rc = scst_add_dev_threads(dev, vtt->threads_num);
1165 else if (dev->handler->threads_num == 0)
1166 rc = scst_add_global_threads(vtt->threads_num);
1171 if (dev->handler && dev->handler->attach_tgt) {
1172 TRACE_DBG("Calling dev handler's attach_tgt(%p)",
1174 rc = dev->handler->attach_tgt(tgt_dev);
1175 TRACE_DBG("%s", "Dev handler's attach_tgt() returned");
1177 PRINT_ERROR("Device handler's %s attach_tgt() "
1178 "failed: %d", dev->handler->name, rc);
1183 spin_lock_bh(&dev->dev_lock);
1184 list_add_tail(&tgt_dev->dev_tgt_dev_list_entry, &dev->dev_tgt_dev_list);
1185 if (dev->dev_reserved)
1186 __set_bit(SCST_TGT_DEV_RESERVED, &tgt_dev->tgt_dev_flags);
1187 spin_unlock_bh(&dev->dev_lock);
1189 sess_tgt_dev_list_head =
1190 &sess->sess_tgt_dev_list_hash[HASH_VAL(tgt_dev->lun)];
1191 list_add_tail(&tgt_dev->sess_tgt_dev_list_entry,
1192 sess_tgt_dev_list_head);
1199 if (vtt->threads_num > 0) {
1200 if (dev->handler->threads_num > 0)
1201 scst_del_dev_threads(dev, vtt->threads_num);
1202 else if (dev->handler->threads_num == 0)
1203 scst_del_global_threads(vtt->threads_num);
1207 scst_free_all_UA(tgt_dev);
1208 __exit_io_context(tgt_dev->tgt_dev_io_ctx);
1210 kmem_cache_free(scst_tgtd_cachep, tgt_dev);
1215 static void scst_clear_reservation(struct scst_tgt_dev *tgt_dev);
1217 /* No locks supposed to be held, scst_mutex - held */
1218 void scst_nexus_loss(struct scst_tgt_dev *tgt_dev, bool queue_UA)
1222 scst_clear_reservation(tgt_dev);
1224 /* With activity suspended the lock isn't needed, but let's be safe */
1225 spin_lock_bh(&tgt_dev->tgt_dev_lock);
1226 scst_free_all_UA(tgt_dev);
1227 memset(tgt_dev->tgt_dev_sense, 0, sizeof(tgt_dev->tgt_dev_sense));
1228 spin_unlock_bh(&tgt_dev->tgt_dev_lock);
1231 uint8_t sense_buffer[SCST_STANDARD_SENSE_LEN];
1232 scst_set_sense(sense_buffer, sizeof(sense_buffer),
1233 tgt_dev->dev->d_sense,
1234 SCST_LOAD_SENSE(scst_sense_nexus_loss_UA));
1235 scst_check_set_UA(tgt_dev, sense_buffer,
1236 sizeof(sense_buffer), 0);
1244 * scst_mutex supposed to be held, there must not be parallel activity in this
1247 static void scst_free_tgt_dev(struct scst_tgt_dev *tgt_dev)
1249 struct scst_device *dev = tgt_dev->dev;
1250 struct scst_tgt_template *vtt = tgt_dev->sess->tgt->tgtt;
1254 tm_dbg_deinit_tgt_dev(tgt_dev);
1256 spin_lock_bh(&dev->dev_lock);
1257 list_del(&tgt_dev->dev_tgt_dev_list_entry);
1258 spin_unlock_bh(&dev->dev_lock);
1260 list_del(&tgt_dev->sess_tgt_dev_list_entry);
1262 scst_clear_reservation(tgt_dev);
1263 scst_free_all_UA(tgt_dev);
1265 if (dev->handler && dev->handler->detach_tgt) {
1266 TRACE_DBG("Calling dev handler's detach_tgt(%p)",
1268 dev->handler->detach_tgt(tgt_dev);
1269 TRACE_DBG("%s", "Dev handler's detach_tgt() returned");
1272 if (vtt->threads_num > 0) {
1273 if (dev->handler->threads_num > 0)
1274 scst_del_dev_threads(dev, vtt->threads_num);
1275 else if (dev->handler->threads_num == 0)
1276 scst_del_global_threads(vtt->threads_num);
1279 __exit_io_context(tgt_dev->tgt_dev_io_ctx);
1281 kmem_cache_free(scst_tgtd_cachep, tgt_dev);
1287 /* scst_mutex supposed to be held */
1288 int scst_sess_alloc_tgt_devs(struct scst_session *sess)
1291 struct scst_acg_dev *acg_dev;
1292 struct scst_tgt_dev *tgt_dev;
1296 list_for_each_entry(acg_dev, &sess->acg->acg_dev_list,
1297 acg_dev_list_entry) {
1298 tgt_dev = scst_alloc_add_tgt_dev(sess, acg_dev);
1299 if (tgt_dev == NULL) {
1310 scst_sess_free_tgt_devs(sess);
1315 * scst_mutex supposed to be held, there must not be parallel activity in this
1318 static void scst_sess_free_tgt_devs(struct scst_session *sess)
1321 struct scst_tgt_dev *tgt_dev, *t;
1325 /* The session is going down, no users, so no locks */
1326 for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
1327 struct list_head *sess_tgt_dev_list_head =
1328 &sess->sess_tgt_dev_list_hash[i];
1329 list_for_each_entry_safe(tgt_dev, t, sess_tgt_dev_list_head,
1330 sess_tgt_dev_list_entry) {
1331 scst_free_tgt_dev(tgt_dev);
1333 INIT_LIST_HEAD(sess_tgt_dev_list_head);
1340 /* The activity supposed to be suspended and scst_mutex held */
1341 int scst_acg_add_dev(struct scst_acg *acg, struct scst_device *dev,
1342 uint64_t lun, int read_only)
1345 struct scst_acg_dev *acg_dev;
1346 struct scst_tgt_dev *tgt_dev;
1347 struct scst_session *sess;
1348 LIST_HEAD(tmp_tgt_dev_list);
1352 INIT_LIST_HEAD(&tmp_tgt_dev_list);
1354 #ifdef CONFIG_SCST_EXTRACHECKS
1355 list_for_each_entry(acg_dev, &acg->acg_dev_list, acg_dev_list_entry) {
1356 if (acg_dev->dev == dev) {
1357 PRINT_ERROR("Device is already in group %s",
1365 acg_dev = scst_alloc_acg_dev(acg, dev, lun);
1366 if (acg_dev == NULL) {
1370 acg_dev->rd_only_flag = read_only;
1372 TRACE_DBG("Adding acg_dev %p to acg_dev_list and dev_acg_dev_list",
1374 list_add_tail(&acg_dev->acg_dev_list_entry, &acg->acg_dev_list);
1375 list_add_tail(&acg_dev->dev_acg_dev_list_entry, &dev->dev_acg_dev_list);
1377 list_for_each_entry(sess, &acg->acg_sess_list, acg_sess_list_entry) {
1378 tgt_dev = scst_alloc_add_tgt_dev(sess, acg_dev);
1379 if (tgt_dev == NULL) {
1383 list_add_tail(&tgt_dev->extra_tgt_dev_list_entry,
1387 scst_report_luns_changed(acg);
1389 if (dev->virt_name != NULL) {
1390 PRINT_INFO("Added device %s to group %s (LUN %lld, "
1391 "rd_only %d)", dev->virt_name, acg->acg_name,
1392 (long long unsigned int)lun,
1395 PRINT_INFO("Added device %d:%d:%d:%d to group %s (LUN "
1396 "%lld, rd_only %d)",
1397 dev->scsi_dev->host->host_no,
1398 dev->scsi_dev->channel, dev->scsi_dev->id,
1399 dev->scsi_dev->lun, acg->acg_name,
1400 (long long unsigned int)lun,
1405 TRACE_EXIT_RES(res);
1409 list_for_each_entry(tgt_dev, &tmp_tgt_dev_list,
1410 extra_tgt_dev_list_entry) {
1411 scst_free_tgt_dev(tgt_dev);
1413 scst_free_acg_dev(acg_dev);
1417 /* The activity supposed to be suspended and scst_mutex held */
1418 int scst_acg_remove_dev(struct scst_acg *acg, struct scst_device *dev)
1421 struct scst_acg_dev *acg_dev = NULL, *a;
1422 struct scst_tgt_dev *tgt_dev, *tt;
1426 list_for_each_entry(a, &acg->acg_dev_list, acg_dev_list_entry) {
1427 if (a->dev == dev) {
1433 if (acg_dev == NULL) {
1434 PRINT_ERROR("Device is not found in group %s", acg->acg_name);
1439 list_for_each_entry_safe(tgt_dev, tt, &dev->dev_tgt_dev_list,
1440 dev_tgt_dev_list_entry) {
1441 if (tgt_dev->acg_dev == acg_dev)
1442 scst_free_tgt_dev(tgt_dev);
1444 scst_free_acg_dev(acg_dev);
1446 scst_report_luns_changed(acg);
1448 if (dev->virt_name != NULL) {
1449 PRINT_INFO("Removed device %s from group %s",
1450 dev->virt_name, acg->acg_name);
1452 PRINT_INFO("Removed device %d:%d:%d:%d from group %s",
1453 dev->scsi_dev->host->host_no,
1454 dev->scsi_dev->channel, dev->scsi_dev->id,
1455 dev->scsi_dev->lun, acg->acg_name);
1459 TRACE_EXIT_RES(res);
1463 /* scst_mutex supposed to be held */
1464 int scst_acg_add_name(struct scst_acg *acg, const char *name)
1473 list_for_each_entry(n, &acg->acn_list, acn_list_entry)
1475 if (strcmp(n->name, name) == 0) {
1476 PRINT_ERROR("Name %s already exists in group %s",
1477 name, acg->acg_name);
1483 n = kmalloc(sizeof(*n), GFP_KERNEL);
1485 PRINT_ERROR("%s", "Unable to allocate scst_acn");
1491 nm = kmalloc(len + 1, GFP_KERNEL);
1493 PRINT_ERROR("%s", "Unable to allocate scst_acn->name");
1501 list_add_tail(&n->acn_list_entry, &acg->acn_list);
1505 PRINT_INFO("Added name %s to group %s", name, acg->acg_name);
1507 TRACE_EXIT_RES(res);
1515 /* scst_mutex supposed to be held */
1516 int scst_acg_remove_name(struct scst_acg *acg, const char *name)
1523 list_for_each_entry(n, &acg->acn_list, acn_list_entry)
1525 if (strcmp(n->name, name) == 0) {
1526 list_del(&n->acn_list_entry);
1535 PRINT_INFO("Removed name %s from group %s", name,
1538 PRINT_ERROR("Unable to find name %s in group %s", name,
1542 TRACE_EXIT_RES(res);
1546 static struct scst_cmd *scst_create_prepare_internal_cmd(
1547 struct scst_cmd *orig_cmd, int bufsize)
1549 struct scst_cmd *res;
1550 gfp_t gfp_mask = scst_cmd_atomic(orig_cmd) ? GFP_ATOMIC : GFP_KERNEL;
1554 res = scst_alloc_cmd(gfp_mask);
1558 res->cmd_lists = orig_cmd->cmd_lists;
1559 res->sess = orig_cmd->sess;
1560 res->atomic = scst_cmd_atomic(orig_cmd);
1562 res->tgtt = orig_cmd->tgtt;
1563 res->tgt = orig_cmd->tgt;
1564 res->dev = orig_cmd->dev;
1565 res->tgt_dev = orig_cmd->tgt_dev;
1566 res->lun = orig_cmd->lun;
1567 res->queue_type = SCST_CMD_QUEUE_HEAD_OF_QUEUE;
1568 res->data_direction = SCST_DATA_UNKNOWN;
1569 res->orig_cmd = orig_cmd;
1570 res->bufflen = bufsize;
1572 scst_sess_get(res->sess);
1573 if (res->tgt_dev != NULL)
1576 res->state = SCST_CMD_STATE_PRE_PARSE;
1579 TRACE_EXIT_HRES((unsigned long)res);
1583 int scst_prepare_request_sense(struct scst_cmd *orig_cmd)
1586 static const uint8_t request_sense[6] =
1587 { REQUEST_SENSE, 0, 0, 0, SCST_SENSE_BUFFERSIZE, 0 };
1588 struct scst_cmd *rs_cmd;
1592 if (orig_cmd->sense != NULL) {
1593 TRACE_MEM("Releasing sense %p (orig_cmd %p)",
1594 orig_cmd->sense, orig_cmd);
1595 mempool_free(orig_cmd->sense, scst_sense_mempool);
1596 orig_cmd->sense = NULL;
1599 rs_cmd = scst_create_prepare_internal_cmd(orig_cmd,
1600 SCST_SENSE_BUFFERSIZE);
1604 memcpy(rs_cmd->cdb, request_sense, sizeof(request_sense));
1605 rs_cmd->cdb[1] |= scst_get_cmd_dev_d_sense(orig_cmd);
1606 rs_cmd->cdb_len = sizeof(request_sense);
1607 rs_cmd->data_direction = SCST_DATA_READ;
1608 rs_cmd->expected_data_direction = rs_cmd->data_direction;
1609 rs_cmd->expected_transfer_len = SCST_SENSE_BUFFERSIZE;
1610 rs_cmd->expected_values_set = 1;
1612 TRACE(TRACE_MGMT_MINOR, "Adding REQUEST SENSE cmd %p to head of active "
1613 "cmd list", rs_cmd);
1614 spin_lock_irq(&rs_cmd->cmd_lists->cmd_list_lock);
1615 list_add(&rs_cmd->cmd_list_entry, &rs_cmd->cmd_lists->active_cmd_list);
1616 wake_up(&rs_cmd->cmd_lists->cmd_list_waitQ);
1617 spin_unlock_irq(&rs_cmd->cmd_lists->cmd_list_lock);
1620 TRACE_EXIT_RES(res);
1628 static void scst_complete_request_sense(struct scst_cmd *req_cmd)
1630 struct scst_cmd *orig_cmd = req_cmd->orig_cmd;
1636 sBUG_ON(orig_cmd == NULL);
1638 len = scst_get_buf_first(req_cmd, &buf);
1640 if (scsi_status_is_good(req_cmd->status) && (len > 0) &&
1641 SCST_SENSE_VALID(buf) && (!SCST_NO_SENSE(buf))) {
1642 PRINT_BUFF_FLAG(TRACE_SCSI, "REQUEST SENSE returned",
1644 scst_alloc_set_sense(orig_cmd, scst_cmd_atomic(req_cmd), buf,
1647 PRINT_ERROR("%s", "Unable to get the sense via "
1648 "REQUEST SENSE, returning HARDWARE ERROR");
1649 scst_set_cmd_error(orig_cmd,
1650 SCST_LOAD_SENSE(scst_sense_hardw_error));
1654 scst_put_buf(req_cmd, buf);
1656 TRACE(TRACE_MGMT_MINOR, "Adding orig cmd %p to head of active "
1657 "cmd list", orig_cmd);
1658 spin_lock_irq(&orig_cmd->cmd_lists->cmd_list_lock);
1659 list_add(&orig_cmd->cmd_list_entry, &orig_cmd->cmd_lists->active_cmd_list);
1660 wake_up(&orig_cmd->cmd_lists->cmd_list_waitQ);
1661 spin_unlock_irq(&orig_cmd->cmd_lists->cmd_list_lock);
1667 int scst_finish_internal_cmd(struct scst_cmd *cmd)
1673 sBUG_ON(!cmd->internal);
1675 if (cmd->cdb[0] == REQUEST_SENSE)
1676 scst_complete_request_sense(cmd);
1678 __scst_cmd_put(cmd);
1680 res = SCST_CMD_STATE_RES_CONT_NEXT;
1682 TRACE_EXIT_HRES(res);
1686 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
1687 static void scst_req_done(struct scsi_cmnd *scsi_cmd)
1689 struct scsi_request *req;
1693 if (scsi_cmd && (req = scsi_cmd->sc_request)) {
1695 if (req->sr_bufflen)
1696 kfree(req->sr_buffer);
1697 scsi_release_request(req);
1705 static void scst_send_release(struct scst_device *dev)
1707 struct scsi_request *req;
1708 struct scsi_device *scsi_dev;
1713 if (dev->scsi_dev == NULL)
1716 scsi_dev = dev->scsi_dev;
1718 req = scsi_allocate_request(scsi_dev, GFP_KERNEL);
1720 PRINT_ERROR("Allocation of scsi_request failed: unable "
1721 "to RELEASE device %d:%d:%d:%d",
1722 scsi_dev->host->host_no, scsi_dev->channel,
1723 scsi_dev->id, scsi_dev->lun);
1727 memset(cdb, 0, sizeof(cdb));
1729 cdb[1] = (scsi_dev->scsi_level <= SCSI_2) ?
1730 ((scsi_dev->lun << 5) & 0xe0) : 0;
1731 memcpy(req->sr_cmnd, cdb, sizeof(cdb));
1732 req->sr_cmd_len = sizeof(cdb);
1733 req->sr_data_direction = SCST_DATA_NONE;
1735 req->sr_bufflen = 0;
1736 req->sr_buffer = NULL;
1737 req->sr_request->rq_disk = dev->rq_disk;
1738 req->sr_sense_buffer[0] = 0;
1740 TRACE(TRACE_DEBUG | TRACE_SCSI, "Sending RELEASE req %p to SCSI "
1742 scst_do_req(req, req->sr_cmnd, (void *)req->sr_buffer, req->sr_bufflen,
1743 scst_req_done, 15, 3);
1749 #else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18) */
1750 static void scst_send_release(struct scst_device *dev)
1752 struct scsi_device *scsi_dev;
1753 unsigned char cdb[6];
1754 uint8_t sense[SCSI_SENSE_BUFFERSIZE];
1759 if (dev->scsi_dev == NULL)
1762 scsi_dev = dev->scsi_dev;
1764 for (i = 0; i < 5; i++) {
1765 memset(cdb, 0, sizeof(cdb));
1767 cdb[1] = (scsi_dev->scsi_level <= SCSI_2) ?
1768 ((scsi_dev->lun << 5) & 0xe0) : 0;
1770 memset(sense, 0, sizeof(sense));
1772 TRACE(TRACE_DEBUG | TRACE_SCSI, "%s", "Sending RELEASE req to "
1774 rc = scsi_execute(scsi_dev, cdb, SCST_DATA_NONE, NULL, 0,
1776 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)
1780 TRACE_DBG("MODE_SENSE done: %x", rc);
1782 if (scsi_status_is_good(rc)) {
1785 PRINT_ERROR("RELEASE failed: %d", rc);
1786 PRINT_BUFFER("RELEASE sense", sense, sizeof(sense));
1787 scst_check_internal_sense(dev, rc, sense,
1796 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18) */
1798 /* scst_mutex supposed to be held */
1799 static void scst_clear_reservation(struct scst_tgt_dev *tgt_dev)
1801 struct scst_device *dev = tgt_dev->dev;
1806 spin_lock_bh(&dev->dev_lock);
1807 if (dev->dev_reserved &&
1808 !test_bit(SCST_TGT_DEV_RESERVED, &tgt_dev->tgt_dev_flags)) {
1809 /* This is one who holds the reservation */
1810 struct scst_tgt_dev *tgt_dev_tmp;
1811 list_for_each_entry(tgt_dev_tmp, &dev->dev_tgt_dev_list,
1812 dev_tgt_dev_list_entry) {
1813 clear_bit(SCST_TGT_DEV_RESERVED,
1814 &tgt_dev_tmp->tgt_dev_flags);
1816 dev->dev_reserved = 0;
1819 spin_unlock_bh(&dev->dev_lock);
1822 scst_send_release(dev);
1828 struct scst_session *scst_alloc_session(struct scst_tgt *tgt, gfp_t gfp_mask,
1829 const char *initiator_name)
1831 struct scst_session *sess;
1838 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
1839 sess = kmem_cache_alloc(scst_sess_cachep, gfp_mask);
1841 sess = kmem_cache_zalloc(scst_sess_cachep, gfp_mask);
1844 TRACE(TRACE_OUT_OF_MEM, "%s",
1845 "Allocation of scst_session failed");
1848 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
1849 memset(sess, 0, sizeof(*sess));
1852 sess->init_phase = SCST_SESS_IPH_INITING;
1853 sess->shut_phase = SCST_SESS_SPH_READY;
1854 atomic_set(&sess->refcnt, 0);
1855 for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
1856 struct list_head *sess_tgt_dev_list_head =
1857 &sess->sess_tgt_dev_list_hash[i];
1858 INIT_LIST_HEAD(sess_tgt_dev_list_head);
1860 spin_lock_init(&sess->sess_list_lock);
1861 INIT_LIST_HEAD(&sess->search_cmd_list);
1863 INIT_LIST_HEAD(&sess->init_deferred_cmd_list);
1864 INIT_LIST_HEAD(&sess->init_deferred_mcmd_list);
1866 #ifdef CONFIG_SCST_MEASURE_LATENCY
1867 spin_lock_init(&sess->meas_lock);
1870 len = strlen(initiator_name);
1871 nm = kmalloc(len + 1, gfp_mask);
1873 PRINT_ERROR("%s", "Unable to allocate sess->initiator_name");
1877 strcpy(nm, initiator_name);
1878 sess->initiator_name = nm;
1885 kmem_cache_free(scst_sess_cachep, sess);
1890 void scst_free_session(struct scst_session *sess)
1894 mutex_lock(&scst_mutex);
1896 TRACE_DBG("Removing sess %p from the list", sess);
1897 list_del(&sess->sess_list_entry);
1898 TRACE_DBG("Removing session %p from acg %s", sess, sess->acg->acg_name);
1899 list_del(&sess->acg_sess_list_entry);
1901 scst_sess_free_tgt_devs(sess);
1903 wake_up_all(&sess->tgt->unreg_waitQ);
1905 mutex_unlock(&scst_mutex);
1907 kfree(sess->initiator_name);
1908 kmem_cache_free(scst_sess_cachep, sess);
1914 void scst_free_session_callback(struct scst_session *sess)
1916 struct completion *c;
1920 TRACE_DBG("Freeing session %p", sess);
1922 c = sess->shutdown_compl;
1924 if (sess->unreg_done_fn) {
1925 TRACE_DBG("Calling unreg_done_fn(%p)", sess);
1926 sess->unreg_done_fn(sess);
1927 TRACE_DBG("%s", "unreg_done_fn() returned");
1929 scst_free_session(sess);
1938 void scst_sched_session_free(struct scst_session *sess)
1940 unsigned long flags;
1944 if (sess->shut_phase != SCST_SESS_SPH_SHUTDOWN) {
1945 PRINT_CRIT_ERROR("session %p is going to shutdown with unknown "
1946 "shut phase %lx", sess, sess->shut_phase);
1950 spin_lock_irqsave(&scst_mgmt_lock, flags);
1951 TRACE_DBG("Adding sess %p to scst_sess_shut_list", sess);
1952 list_add_tail(&sess->sess_shut_list_entry, &scst_sess_shut_list);
1953 spin_unlock_irqrestore(&scst_mgmt_lock, flags);
1955 wake_up(&scst_mgmt_waitQ);
1961 void scst_cmd_get(struct scst_cmd *cmd)
1963 __scst_cmd_get(cmd);
1965 EXPORT_SYMBOL(scst_cmd_get);
1967 void scst_cmd_put(struct scst_cmd *cmd)
1969 __scst_cmd_put(cmd);
1971 EXPORT_SYMBOL(scst_cmd_put);
1973 struct scst_cmd *scst_alloc_cmd(gfp_t gfp_mask)
1975 struct scst_cmd *cmd;
1979 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
1980 cmd = kmem_cache_alloc(scst_cmd_cachep, gfp_mask);
1982 cmd = kmem_cache_zalloc(scst_cmd_cachep, gfp_mask);
1985 TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of scst_cmd failed");
1988 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
1989 memset(cmd, 0, sizeof(*cmd));
1992 cmd->state = SCST_CMD_STATE_INIT_WAIT;
1993 cmd->start_time = jiffies;
1994 atomic_set(&cmd->cmd_ref, 1);
1995 cmd->cmd_lists = &scst_main_cmd_lists;
1996 INIT_LIST_HEAD(&cmd->mgmt_cmd_list);
1997 cmd->queue_type = SCST_CMD_QUEUE_SIMPLE;
1998 cmd->timeout = SCST_DEFAULT_TIMEOUT;
2001 cmd->is_send_status = 1;
2002 cmd->resp_data_len = -1;
2004 cmd->dbl_ua_orig_data_direction = SCST_DATA_UNKNOWN;
2005 cmd->dbl_ua_orig_resp_data_len = -1;
2012 static void scst_destroy_put_cmd(struct scst_cmd *cmd)
2014 scst_sess_put(cmd->sess);
2017 * At this point tgt_dev can be dead, but the pointer remains non-NULL
2019 if (likely(cmd->tgt_dev != NULL))
2022 scst_destroy_cmd(cmd);
2026 /* No locks supposed to be held */
2027 void scst_free_cmd(struct scst_cmd *cmd)
2033 TRACE_DBG("Freeing cmd %p (tag %llu)",
2034 cmd, (long long unsigned int)cmd->tag);
2036 if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
2037 TRACE_MGMT_DBG("Freeing aborted cmd %p (scst_cmd_count %d)",
2038 cmd, atomic_read(&scst_cmd_count));
2041 sBUG_ON(cmd->inc_blocking || cmd->needs_unblocking ||
2042 cmd->dec_on_dev_needed);
2044 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
2045 #if defined(CONFIG_SCST_EXTRACHECKS)
2046 if (cmd->scsi_req) {
2047 PRINT_ERROR("%s: %s", __func__, "Cmd with unfreed "
2049 scst_release_request(cmd);
2055 * Target driver can already free sg buffer before calling
2056 * scst_tgt_cmd_done(). E.g., scst_local has to do that.
2058 if (!cmd->tgt_data_buf_alloced)
2059 scst_check_restore_sg_buff(cmd);
2061 if (cmd->tgtt->on_free_cmd != NULL) {
2062 TRACE_DBG("Calling target's on_free_cmd(%p)", cmd);
2063 cmd->tgtt->on_free_cmd(cmd);
2064 TRACE_DBG("%s", "Target's on_free_cmd() returned");
2067 if (likely(cmd->dev != NULL)) {
2068 struct scst_dev_type *handler = cmd->dev->handler;
2069 if (handler->on_free_cmd != NULL) {
2070 TRACE_DBG("Calling dev handler %s on_free_cmd(%p)",
2071 handler->name, cmd);
2072 handler->on_free_cmd(cmd);
2073 TRACE_DBG("Dev handler %s on_free_cmd() returned",
2078 scst_release_space(cmd);
2080 if (unlikely(cmd->sense != NULL)) {
2081 TRACE_MEM("Releasing sense %p (cmd %p)", cmd->sense, cmd);
2082 mempool_free(cmd->sense, scst_sense_mempool);
2086 if (likely(cmd->tgt_dev != NULL)) {
2087 #ifdef CONFIG_SCST_EXTRACHECKS
2088 if (unlikely(!cmd->sent_for_exec) && !cmd->internal) {
2089 PRINT_ERROR("Finishing not executed cmd %p (opcode "
2090 "%d, target %s, lun %lld, sn %ld, expected_sn %ld)",
2091 cmd, cmd->cdb[0], cmd->tgtt->name,
2092 (long long unsigned int)cmd->lun,
2093 cmd->sn, cmd->tgt_dev->expected_sn);
2094 scst_unblock_deferred(cmd->tgt_dev, cmd);
2098 if (unlikely(cmd->out_of_sn)) {
2099 TRACE_SN("Out of SN cmd %p (tag %llu, sn %ld), "
2101 (long long unsigned int)cmd->tag,
2103 destroy = test_and_set_bit(SCST_CMD_CAN_BE_DESTROYED,
2108 if (likely(destroy))
2109 scst_destroy_put_cmd(cmd);
2115 /* No locks supposed to be held. */
2116 void scst_check_retries(struct scst_tgt *tgt)
2118 int need_wake_up = 0;
2123 * We don't worry about overflow of finished_cmds, because we check
2124 * only for its change.
2126 atomic_inc(&tgt->finished_cmds);
2127 /* See comment in scst_queue_retry_cmd() */
2128 smp_mb__after_atomic_inc();
2129 if (unlikely(tgt->retry_cmds > 0)) {
2130 struct scst_cmd *c, *tc;
2131 unsigned long flags;
2133 TRACE_RETRY("Checking retry cmd list (retry_cmds %d)",
2136 spin_lock_irqsave(&tgt->tgt_lock, flags);
2137 list_for_each_entry_safe(c, tc, &tgt->retry_cmd_list,
2141 TRACE_RETRY("Moving retry cmd %p to head of active "
2142 "cmd list (retry_cmds left %d)",
2143 c, tgt->retry_cmds);
2144 spin_lock(&c->cmd_lists->cmd_list_lock);
2145 list_move(&c->cmd_list_entry,
2146 &c->cmd_lists->active_cmd_list);
2147 wake_up(&c->cmd_lists->cmd_list_waitQ);
2148 spin_unlock(&c->cmd_lists->cmd_list_lock);
2151 if (need_wake_up >= 2) /* "slow start" */
2154 spin_unlock_irqrestore(&tgt->tgt_lock, flags);
2161 void scst_tgt_retry_timer_fn(unsigned long arg)
2163 struct scst_tgt *tgt = (struct scst_tgt *)arg;
2164 unsigned long flags;
2166 TRACE_RETRY("Retry timer expired (retry_cmds %d)", tgt->retry_cmds);
2168 spin_lock_irqsave(&tgt->tgt_lock, flags);
2169 tgt->retry_timer_active = 0;
2170 spin_unlock_irqrestore(&tgt->tgt_lock, flags);
2172 scst_check_retries(tgt);
2178 struct scst_mgmt_cmd *scst_alloc_mgmt_cmd(gfp_t gfp_mask)
2180 struct scst_mgmt_cmd *mcmd;
2184 mcmd = mempool_alloc(scst_mgmt_mempool, gfp_mask);
2186 PRINT_CRIT_ERROR("%s", "Allocation of management command "
2187 "failed, some commands and their data could leak");
2190 memset(mcmd, 0, sizeof(*mcmd));
2197 void scst_free_mgmt_cmd(struct scst_mgmt_cmd *mcmd)
2199 unsigned long flags;
2203 spin_lock_irqsave(&mcmd->sess->sess_list_lock, flags);
2204 atomic_dec(&mcmd->sess->sess_cmd_count);
2205 spin_unlock_irqrestore(&mcmd->sess->sess_list_lock, flags);
2207 scst_sess_put(mcmd->sess);
2209 if (mcmd->mcmd_tgt_dev != NULL)
2212 mempool_free(mcmd, scst_mgmt_mempool);
2218 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
2219 int scst_alloc_request(struct scst_cmd *cmd)
2222 struct scsi_request *req;
2223 int gm = scst_cmd_atomic(cmd) ? GFP_ATOMIC : GFP_KERNEL;
2227 /* cmd->dev->scsi_dev must be non-NULL here */
2228 req = scsi_allocate_request(cmd->dev->scsi_dev, gm);
2230 TRACE(TRACE_OUT_OF_MEM, "%s",
2231 "Allocation of scsi_request failed");
2236 cmd->scsi_req = req;
2238 memcpy(req->sr_cmnd, cmd->cdb, cmd->cdb_len);
2239 req->sr_cmd_len = cmd->cdb_len;
2240 req->sr_data_direction = cmd->data_direction;
2241 req->sr_use_sg = cmd->sg_cnt;
2242 req->sr_bufflen = cmd->bufflen;
2243 req->sr_buffer = cmd->sg;
2244 req->sr_request->rq_disk = cmd->dev->rq_disk;
2245 req->sr_sense_buffer[0] = 0;
2247 cmd->scsi_req->upper_private_data = cmd;
2254 void scst_release_request(struct scst_cmd *cmd)
2256 scsi_release_request(cmd->scsi_req);
2257 cmd->scsi_req = NULL;
2261 int scst_alloc_space(struct scst_cmd *cmd)
2265 int atomic = scst_cmd_atomic(cmd);
2267 struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
2272 gfp_mask = tgt_dev->gfp_mask | (atomic ? GFP_ATOMIC : GFP_KERNEL);
2274 flags = atomic ? SCST_POOL_NO_ALLOC_ON_CACHE_MISS : 0;
2276 flags |= SCST_POOL_ALLOC_NO_CACHED;
2278 cmd->sg = sgv_pool_alloc(tgt_dev->pool, cmd->bufflen, gfp_mask, flags,
2279 &cmd->sg_cnt, &cmd->sgv, &cmd->dev->dev_mem_lim, NULL);
2280 if (cmd->sg == NULL)
2283 if (unlikely(cmd->sg_cnt > tgt_dev->max_sg_cnt)) {
2285 PRINT_INFO("Unable to complete command due to "
2286 "SG IO count limitation (requested %d, "
2287 "available %d, tgt lim %d)", cmd->sg_cnt,
2288 tgt_dev->max_sg_cnt, cmd->tgt->sg_tablesize);
2294 if (cmd->data_direction != SCST_DATA_BIDI)
2297 cmd->in_sg = sgv_pool_alloc(tgt_dev->pool, cmd->in_bufflen, gfp_mask,
2298 flags, &cmd->in_sg_cnt, &cmd->in_sgv,
2299 &cmd->dev->dev_mem_lim, NULL);
2300 if (cmd->in_sg == NULL)
2303 if (unlikely(cmd->in_sg_cnt > tgt_dev->max_sg_cnt)) {
2305 PRINT_INFO("Unable to complete command due to "
2306 "SG IO count limitation (IN buffer, requested "
2307 "%d, available %d, tgt lim %d)", cmd->in_sg_cnt,
2308 tgt_dev->max_sg_cnt, cmd->tgt->sg_tablesize);
2311 goto out_in_sg_free;
2322 sgv_pool_free(cmd->in_sgv, &cmd->dev->dev_mem_lim);
2328 sgv_pool_free(cmd->sgv, &cmd->dev->dev_mem_lim);
2335 static void scst_release_space(struct scst_cmd *cmd)
2339 if (cmd->sgv == NULL)
2342 if (cmd->tgt_data_buf_alloced || cmd->dh_data_buf_alloced) {
2343 TRACE_MEM("%s", "*data_buf_alloced set, returning");
2347 sgv_pool_free(cmd->sgv, &cmd->dev->dev_mem_lim);
2354 if (cmd->in_sgv != NULL) {
2355 sgv_pool_free(cmd->in_sgv, &cmd->dev->dev_mem_lim);
2359 cmd->in_bufflen = 0;
2367 void scst_copy_sg(struct scst_cmd *cmd, enum scst_sg_copy_dir copy_dir)
2369 struct scatterlist *src_sg, *dst_sg;
2370 unsigned int src_sg_cnt, src_len, dst_len, src_offs, dst_offs;
2371 struct page *src, *dst;
2372 unsigned int s, d, to_copy;
2376 if (copy_dir == SCST_SG_COPY_FROM_TARGET) {
2377 if (cmd->data_direction != SCST_DATA_BIDI) {
2378 src_sg = cmd->tgt_sg;
2379 src_sg_cnt = cmd->tgt_sg_cnt;
2381 to_copy = cmd->bufflen;
2383 TRACE_MEM("BIDI cmd %p", cmd);
2384 src_sg = cmd->tgt_in_sg;
2385 src_sg_cnt = cmd->tgt_in_sg_cnt;
2386 dst_sg = cmd->in_sg;
2387 to_copy = cmd->in_bufflen;
2391 src_sg_cnt = cmd->sg_cnt;
2392 dst_sg = cmd->tgt_sg;
2393 to_copy = cmd->resp_data_len;
2396 TRACE_MEM("cmd %p, copy_dir %d, src_sg %p, src_sg_cnt %d, dst_sg %p, "
2397 "to_copy %d", cmd, copy_dir, src_sg, src_sg_cnt, dst_sg,
2400 dst = sg_page(dst_sg);
2401 dst_len = dst_sg->length;
2402 dst_offs = dst_sg->offset;
2407 while (s < src_sg_cnt) {
2408 src = sg_page(&src_sg[s]);
2409 src_len = src_sg[s].length;
2410 src_offs += src_sg[s].offset;
2416 * Himem pages are not allowed here, see the
2417 * corresponding #warning in scst_main.c. Correct
2418 * your target driver or dev handler to not alloc
2421 EXTRACHECKS_BUG_ON(PageHighMem(dst) ||
2424 TRACE_MEM("cmd %p, to_copy %d, src %p, src_len %d, "
2425 "src_offs %d, dst %p, dst_len %d, dst_offs %d",
2426 cmd, to_copy, src, src_len, src_offs, dst,
2429 if ((src_offs == 0) && (dst_offs == 0) &&
2430 (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE)) {
2431 copy_page(page_address(dst), page_address(src));
2434 n = min(PAGE_SIZE - dst_offs,
2435 PAGE_SIZE - src_offs);
2436 n = min(n, src_len);
2437 n = min(n, dst_len);
2438 memcpy(page_address(dst) + dst_offs,
2439 page_address(src) + src_offs, n);
2440 dst_offs -= min(n, dst_offs);
2441 src_offs -= min(n, src_offs);
2444 TRACE_MEM("cmd %p, n %d, s %d", cmd, n, s);
2454 dst = sg_page(&dst_sg[d]);
2455 dst_len = dst_sg[d].length;
2456 dst_offs += dst_sg[d].offset;
2458 } while (src_len > 0);
2468 static const int SCST_CDB_LENGTH[8] = { 6, 10, 10, -1, 16, 12, -1, -1 };
2470 #define SCST_CDB_GROUP(opcode) ((opcode >> 5) & 0x7)
2471 #define SCST_GET_CDB_LEN(opcode) SCST_CDB_LENGTH[SCST_CDB_GROUP(opcode)]
2473 int scst_get_cdb_len(const uint8_t *cdb)
2475 return SCST_GET_CDB_LEN(cdb[0]);
2478 /* get_trans_len_x extract x bytes from cdb as length starting from off */
2480 /* for special commands */
2481 static int get_trans_len_block_limit(struct scst_cmd *cmd, uint8_t off)
2487 static int get_trans_len_read_capacity(struct scst_cmd *cmd, uint8_t off)
2489 cmd->bufflen = READ_CAP_LEN;
2493 static int get_trans_len_serv_act_in(struct scst_cmd *cmd, uint8_t off)
2499 if ((cmd->cdb[1] & 0x1f) == SAI_READ_CAPACITY_16) {
2500 cmd->op_name = "READ CAPACITY(16)";
2501 cmd->bufflen = READ_CAP16_LEN;
2502 cmd->op_flags |= SCST_IMPLICIT_HQ;
2504 cmd->op_flags |= SCST_UNKNOWN_LENGTH;
2506 TRACE_EXIT_RES(res);
2510 static int get_trans_len_single(struct scst_cmd *cmd, uint8_t off)
2516 static int get_trans_len_read_pos(struct scst_cmd *cmd, uint8_t off)
2518 uint8_t *p = (uint8_t *)cmd->cdb + off;
2522 cmd->bufflen |= ((u32)p[0]) << 8;
2523 cmd->bufflen |= ((u32)p[1]);
2525 switch (cmd->cdb[1] & 0x1f) {
2529 if (cmd->bufflen != 0) {
2530 PRINT_ERROR("READ POSITION: Invalid non-zero (%d) "
2531 "allocation length for service action %x",
2532 cmd->bufflen, cmd->cdb[1] & 0x1f);
2538 switch (cmd->cdb[1] & 0x1f) {
2547 cmd->bufflen = max(28, cmd->bufflen);
2550 PRINT_ERROR("READ POSITION: Invalid service action %x",
2551 cmd->cdb[1] & 0x1f);
2559 scst_set_cmd_error(cmd,
2560 SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
2565 static int get_trans_len_1(struct scst_cmd *cmd, uint8_t off)
2567 cmd->bufflen = (u32)cmd->cdb[off];
2571 static int get_trans_len_1_256(struct scst_cmd *cmd, uint8_t off)
2573 cmd->bufflen = (u32)cmd->cdb[off];
2574 if (cmd->bufflen == 0)
2579 static int get_trans_len_2(struct scst_cmd *cmd, uint8_t off)
2581 const uint8_t *p = cmd->cdb + off;
2584 cmd->bufflen |= ((u32)p[0]) << 8;
2585 cmd->bufflen |= ((u32)p[1]);
2590 static int get_trans_len_3(struct scst_cmd *cmd, uint8_t off)
2592 const uint8_t *p = cmd->cdb + off;
2595 cmd->bufflen |= ((u32)p[0]) << 16;
2596 cmd->bufflen |= ((u32)p[1]) << 8;
2597 cmd->bufflen |= ((u32)p[2]);
2602 static int get_trans_len_4(struct scst_cmd *cmd, uint8_t off)
2604 const uint8_t *p = cmd->cdb + off;
2607 cmd->bufflen |= ((u32)p[0]) << 24;
2608 cmd->bufflen |= ((u32)p[1]) << 16;
2609 cmd->bufflen |= ((u32)p[2]) << 8;
2610 cmd->bufflen |= ((u32)p[3]);
2615 static int get_trans_len_none(struct scst_cmd *cmd, uint8_t off)
2621 int scst_get_cdb_info(struct scst_cmd *cmd)
2623 int dev_type = cmd->dev->type;
2626 const struct scst_sdbops *ptr = NULL;
2630 op = cmd->cdb[0]; /* get clear opcode */
2632 TRACE_DBG("opcode=%02x, cdblen=%d bytes, tblsize=%d, "
2633 "dev_type=%d", op, SCST_GET_CDB_LEN(op), SCST_CDB_TBL_SIZE,
2636 i = scst_scsi_op_list[op];
2637 while (i < SCST_CDB_TBL_SIZE && scst_scsi_op_table[i].ops == op) {
2638 if (scst_scsi_op_table[i].devkey[dev_type] != SCST_CDB_NOTSUPP) {
2639 ptr = &scst_scsi_op_table[i];
2640 TRACE_DBG("op = 0x%02x+'%c%c%c%c%c%c%c%c%c%c'+<%s>",
2641 ptr->ops, ptr->devkey[0], /* disk */
2642 ptr->devkey[1], /* tape */
2643 ptr->devkey[2], /* printer */
2644 ptr->devkey[3], /* cpu */
2645 ptr->devkey[4], /* cdr */
2646 ptr->devkey[5], /* cdrom */
2647 ptr->devkey[6], /* scanner */
2648 ptr->devkey[7], /* worm */
2649 ptr->devkey[8], /* changer */
2650 ptr->devkey[9], /* commdev */
2652 TRACE_DBG("direction=%d flags=%d off=%d",
2661 if (unlikely(ptr == NULL)) {
2662 /* opcode not found or now not used !!! */
2663 TRACE(TRACE_SCSI, "Unknown opcode 0x%x for type %d", op,
2666 cmd->op_flags = SCST_INFO_NOT_FOUND;
2670 cmd->cdb_len = SCST_GET_CDB_LEN(op);
2671 cmd->op_name = ptr->op_name;
2672 cmd->data_direction = ptr->direction;
2673 cmd->op_flags = ptr->flags;
2674 res = (*ptr->get_trans_len)(cmd, ptr->off);
2677 TRACE_EXIT_RES(res);
2680 EXPORT_SYMBOL(scst_get_cdb_info);
2682 /* Packs SCST LUN back to SCSI form using peripheral device addressing method */
2683 uint64_t scst_pack_lun(const uint64_t lun)
2686 uint16_t *p = (uint16_t *)&res;
2689 *p = cpu_to_be16(*p);
2691 TRACE_EXIT_HRES((unsigned long)res);
2696 * Routine to extract a lun number from an 8-byte LUN structure
2697 * in network byte order (BE).
2698 * (see SAM-2, Section 4.12.3 page 40)
2699 * Supports 2 types of lun unpacking: peripheral and logical unit.
2701 uint64_t scst_unpack_lun(const uint8_t *lun, int len)
2703 uint64_t res = NO_SUCH_LUN;
2708 TRACE_BUFF_FLAG(TRACE_DEBUG, "Raw LUN", lun, len);
2710 if (unlikely(len < 2)) {
2711 PRINT_ERROR("Illegal lun length %d, expected 2 bytes or "
2719 if ((*((uint64_t *)lun) &
2720 __constant_cpu_to_be64(0x0000FFFFFFFFFFFFLL)) != 0)
2724 if (*((uint16_t *)&lun[2]) != 0)
2728 if (*((uint32_t *)&lun[2]) != 0)
2736 address_method = (*lun) >> 6; /* high 2 bits of byte 0 */
2737 switch (address_method) {
2738 case 0: /* peripheral device addressing method */
2741 PRINT_ERROR("Illegal BUS INDENTIFIER in LUN "
2742 "peripheral device addressing method 0x%02x, "
2743 "expected 0", *lun);
2750 * Looks like it's legal to use it as flat space addressing
2757 case 1: /* flat space addressing method */
2758 res = *(lun + 1) | (((*lun) & 0x3f) << 8);
2761 case 2: /* logical unit addressing method */
2763 PRINT_ERROR("Illegal BUS NUMBER in LUN logical unit "
2764 "addressing method 0x%02x, expected 0",
2768 if (*(lun + 1) & 0xe0) {
2769 PRINT_ERROR("Illegal TARGET in LUN logical unit "
2770 "addressing method 0x%02x, expected 0",
2771 (*(lun + 1) & 0xf8) >> 5);
2774 res = *(lun + 1) & 0x1f;
2777 case 3: /* extended logical unit addressing method */
2779 PRINT_ERROR("Unimplemented LUN addressing method %u",
2785 TRACE_EXIT_RES((int)res);
2789 PRINT_ERROR("%s", "Multi-level LUN unimplemented");
2793 int scst_calc_block_shift(int sector_size)
2795 int block_shift = 0;
2798 if (sector_size == 0)
2808 if (block_shift < 9) {
2809 PRINT_ERROR("Wrong sector size %d", sector_size);
2813 TRACE_EXIT_RES(block_shift);
2816 EXPORT_SYMBOL(scst_calc_block_shift);
2818 int scst_sbc_generic_parse(struct scst_cmd *cmd,
2819 int (*get_block_shift)(struct scst_cmd *cmd))
2826 * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
2827 * therefore change them only if necessary
2830 TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
2831 cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
2833 switch (cmd->cdb[0]) {
2838 if ((cmd->cdb[1] & BYTCHK) == 0) {
2839 cmd->data_len = cmd->bufflen << get_block_shift(cmd);
2850 if (cmd->op_flags & SCST_TRANSFER_LEN_TYPE_FIXED) {
2852 * No need for locks here, since *_detach() can not be
2853 * called, when there are existing commands.
2855 cmd->bufflen = cmd->bufflen << get_block_shift(cmd);
2859 if ((cmd->op_flags & (SCST_SMALL_TIMEOUT | SCST_LONG_TIMEOUT)) == 0)
2860 cmd->timeout = SCST_GENERIC_DISK_REG_TIMEOUT;
2861 else if (cmd->op_flags & SCST_SMALL_TIMEOUT)
2862 cmd->timeout = SCST_GENERIC_DISK_SMALL_TIMEOUT;
2863 else if (cmd->op_flags & SCST_LONG_TIMEOUT)
2864 cmd->timeout = SCST_GENERIC_DISK_LONG_TIMEOUT;
2866 TRACE_DBG("res %d, bufflen %d, data_len %d, direct %d",
2867 res, cmd->bufflen, cmd->data_len, cmd->data_direction);
2869 TRACE_EXIT_RES(res);
2872 EXPORT_SYMBOL(scst_sbc_generic_parse);
2874 int scst_cdrom_generic_parse(struct scst_cmd *cmd,
2875 int (*get_block_shift)(struct scst_cmd *cmd))
2882 * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
2883 * therefore change them only if necessary
2886 TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
2887 cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
2889 cmd->cdb[1] &= 0x1f;
2891 switch (cmd->cdb[0]) {
2896 if ((cmd->cdb[1] & BYTCHK) == 0) {
2897 cmd->data_len = cmd->bufflen << get_block_shift(cmd);
2907 if (cmd->op_flags & SCST_TRANSFER_LEN_TYPE_FIXED)
2908 cmd->bufflen = cmd->bufflen << get_block_shift(cmd);
2911 if ((cmd->op_flags & (SCST_SMALL_TIMEOUT | SCST_LONG_TIMEOUT)) == 0)
2912 cmd->timeout = SCST_GENERIC_CDROM_REG_TIMEOUT;
2913 else if (cmd->op_flags & SCST_SMALL_TIMEOUT)
2914 cmd->timeout = SCST_GENERIC_CDROM_SMALL_TIMEOUT;
2915 else if (cmd->op_flags & SCST_LONG_TIMEOUT)
2916 cmd->timeout = SCST_GENERIC_CDROM_LONG_TIMEOUT;
2918 TRACE_DBG("res=%d, bufflen=%d, direct=%d", res, cmd->bufflen,
2919 cmd->data_direction);
2924 EXPORT_SYMBOL(scst_cdrom_generic_parse);
2926 int scst_modisk_generic_parse(struct scst_cmd *cmd,
2927 int (*get_block_shift)(struct scst_cmd *cmd))
2934 * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
2935 * therefore change them only if necessary
2938 TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
2939 cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
2941 cmd->cdb[1] &= 0x1f;
2943 switch (cmd->cdb[0]) {
2948 if ((cmd->cdb[1] & BYTCHK) == 0) {
2949 cmd->data_len = cmd->bufflen << get_block_shift(cmd);
2959 if (cmd->op_flags & SCST_TRANSFER_LEN_TYPE_FIXED)
2960 cmd->bufflen = cmd->bufflen << get_block_shift(cmd);
2963 if ((cmd->op_flags & (SCST_SMALL_TIMEOUT | SCST_LONG_TIMEOUT)) == 0)
2964 cmd->timeout = SCST_GENERIC_MODISK_REG_TIMEOUT;
2965 else if (cmd->op_flags & SCST_SMALL_TIMEOUT)
2966 cmd->timeout = SCST_GENERIC_MODISK_SMALL_TIMEOUT;
2967 else if (cmd->op_flags & SCST_LONG_TIMEOUT)
2968 cmd->timeout = SCST_GENERIC_MODISK_LONG_TIMEOUT;
2970 TRACE_DBG("res=%d, bufflen=%d, direct=%d", res, cmd->bufflen,
2971 cmd->data_direction);
2973 TRACE_EXIT_RES(res);
2976 EXPORT_SYMBOL(scst_modisk_generic_parse);
2978 int scst_tape_generic_parse(struct scst_cmd *cmd,
2979 int (*get_block_size)(struct scst_cmd *cmd))
2986 * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
2987 * therefore change them only if necessary
2990 TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
2991 cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
2993 if (cmd->cdb[0] == READ_POSITION) {
2994 int tclp = cmd->cdb[1] & 4;
2995 int long_bit = cmd->cdb[1] & 2;
2996 int bt = cmd->cdb[1] & 1;
2998 if ((tclp == long_bit) && (!bt || !long_bit)) {
3000 tclp ? POSITION_LEN_LONG : POSITION_LEN_SHORT;
3001 cmd->data_direction = SCST_DATA_READ;
3004 cmd->data_direction = SCST_DATA_NONE;
3008 if (cmd->op_flags & SCST_TRANSFER_LEN_TYPE_FIXED & cmd->cdb[1])
3009 cmd->bufflen = cmd->bufflen * get_block_size(cmd);
3011 if ((cmd->op_flags & (SCST_SMALL_TIMEOUT | SCST_LONG_TIMEOUT)) == 0)
3012 cmd->timeout = SCST_GENERIC_TAPE_REG_TIMEOUT;
3013 else if (cmd->op_flags & SCST_SMALL_TIMEOUT)
3014 cmd->timeout = SCST_GENERIC_TAPE_SMALL_TIMEOUT;
3015 else if (cmd->op_flags & SCST_LONG_TIMEOUT)
3016 cmd->timeout = SCST_GENERIC_TAPE_LONG_TIMEOUT;
3018 TRACE_EXIT_RES(res);
3021 EXPORT_SYMBOL(scst_tape_generic_parse);
3023 static int scst_null_parse(struct scst_cmd *cmd)
3030 * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
3031 * therefore change them only if necessary
3034 TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
3035 cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
3037 switch (cmd->cdb[0]) {
3043 TRACE_DBG("res %d bufflen %d direct %d",
3044 res, cmd->bufflen, cmd->data_direction);
3050 int scst_changer_generic_parse(struct scst_cmd *cmd,
3051 int (*nothing)(struct scst_cmd *cmd))
3053 int res = scst_null_parse(cmd);
3055 if (cmd->op_flags & SCST_LONG_TIMEOUT)
3056 cmd->timeout = SCST_GENERIC_CHANGER_LONG_TIMEOUT;
3058 cmd->timeout = SCST_GENERIC_CHANGER_TIMEOUT;
3062 EXPORT_SYMBOL(scst_changer_generic_parse);
3064 int scst_processor_generic_parse(struct scst_cmd *cmd,
3065 int (*nothing)(struct scst_cmd *cmd))
3067 int res = scst_null_parse(cmd);
3069 if (cmd->op_flags & SCST_LONG_TIMEOUT)
3070 cmd->timeout = SCST_GENERIC_PROCESSOR_LONG_TIMEOUT;
3072 cmd->timeout = SCST_GENERIC_PROCESSOR_TIMEOUT;
3076 EXPORT_SYMBOL(scst_processor_generic_parse);
3078 int scst_raid_generic_parse(struct scst_cmd *cmd,
3079 int (*nothing)(struct scst_cmd *cmd))
3081 int res = scst_null_parse(cmd);
3083 if (cmd->op_flags & SCST_LONG_TIMEOUT)
3084 cmd->timeout = SCST_GENERIC_RAID_LONG_TIMEOUT;
3086 cmd->timeout = SCST_GENERIC_RAID_TIMEOUT;
3090 EXPORT_SYMBOL(scst_raid_generic_parse);
3092 int scst_block_generic_dev_done(struct scst_cmd *cmd,
3093 void (*set_block_shift)(struct scst_cmd *cmd, int block_shift))
3095 int opcode = cmd->cdb[0];
3096 int status = cmd->status;
3097 int res = SCST_CMD_STATE_DEFAULT;
3102 * SCST sets good defaults for cmd->is_send_status and
3103 * cmd->resp_data_len based on cmd->status and cmd->data_direction,
3104 * therefore change them only if necessary
3107 if ((status == SAM_STAT_GOOD) || (status == SAM_STAT_CONDITION_MET)) {
3111 /* Always keep track of disk capacity */
3112 int buffer_size, sector_size, sh;
3115 buffer_size = scst_get_buf_first(cmd, &buffer);
3116 if (unlikely(buffer_size <= 0)) {
3117 if (buffer_size < 0) {
3118 PRINT_ERROR("%s: Unable to get the"
3119 " buffer (%d)", __func__, buffer_size);
3125 ((buffer[4] << 24) | (buffer[5] << 16) |
3126 (buffer[6] << 8) | (buffer[7] << 0));
3127 scst_put_buf(cmd, buffer);
3128 if (sector_size != 0)
3129 sh = scst_calc_block_shift(sector_size);
3132 set_block_shift(cmd, sh);
3133 TRACE_DBG("block_shift %d", sh);
3142 TRACE_DBG("cmd->is_send_status=%x, cmd->resp_data_len=%d, "
3143 "res=%d", cmd->is_send_status, cmd->resp_data_len, res);
3146 TRACE_EXIT_RES(res);
3149 EXPORT_SYMBOL(scst_block_generic_dev_done);
3151 int scst_tape_generic_dev_done(struct scst_cmd *cmd,
3152 void (*set_block_size)(struct scst_cmd *cmd, int block_shift))
3154 int opcode = cmd->cdb[0];
3155 int res = SCST_CMD_STATE_DEFAULT;
3156 int buffer_size, bs;
3157 uint8_t *buffer = NULL;
3162 * SCST sets good defaults for cmd->is_send_status and
3163 * cmd->resp_data_len based on cmd->status and cmd->data_direction,
3164 * therefore change them only if necessary
3170 buffer_size = scst_get_buf_first(cmd, &buffer);
3171 if (unlikely(buffer_size <= 0)) {
3172 if (buffer_size < 0) {
3173 PRINT_ERROR("%s: Unable to get the buffer (%d)",
3174 __func__, buffer_size);
3183 TRACE_DBG("%s", "MODE_SENSE");
3184 if ((cmd->cdb[2] & 0xC0) == 0) {
3185 if (buffer[3] == 8) {
3186 bs = (buffer[9] << 16) |
3187 (buffer[10] << 8) | buffer[11];
3188 set_block_size(cmd, bs);
3193 TRACE_DBG("%s", "MODE_SELECT");
3194 if (buffer[3] == 8) {
3195 bs = (buffer[9] << 16) | (buffer[10] << 8) |
3197 set_block_size(cmd, bs);
3208 scst_put_buf(cmd, buffer);
3213 TRACE_EXIT_RES(res);
3216 EXPORT_SYMBOL(scst_tape_generic_dev_done);
3218 static void scst_check_internal_sense(struct scst_device *dev, int result,
3219 uint8_t *sense, int sense_len)
3223 if (host_byte(result) == DID_RESET) {
3224 TRACE(TRACE_MGMT_MINOR, "%s", "DID_RESET received, triggering "
3226 scst_set_sense(sense, sense_len, dev->d_sense,
3227 SCST_LOAD_SENSE(scst_sense_reset_UA));
3228 scst_dev_check_set_UA(dev, NULL, sense, sense_len);
3229 } else if ((status_byte(result) == CHECK_CONDITION) &&
3230 SCST_SENSE_VALID(sense) && scst_is_ua_sense(sense))
3231 scst_dev_check_set_UA(dev, NULL, sense, sense_len);
3237 enum dma_data_direction scst_to_dma_dir(int scst_dir)
3239 static const enum dma_data_direction tr_tbl[] = { DMA_NONE,
3240 DMA_TO_DEVICE, DMA_FROM_DEVICE, DMA_BIDIRECTIONAL, DMA_NONE };
3242 return tr_tbl[scst_dir];
3244 EXPORT_SYMBOL(scst_to_dma_dir);
3246 enum dma_data_direction scst_to_tgt_dma_dir(int scst_dir)
3248 static const enum dma_data_direction tr_tbl[] = { DMA_NONE,
3249 DMA_FROM_DEVICE, DMA_TO_DEVICE, DMA_BIDIRECTIONAL, DMA_NONE };
3251 return tr_tbl[scst_dir];
3253 EXPORT_SYMBOL(scst_to_tgt_dma_dir);
3255 int scst_obtain_device_parameters(struct scst_device *dev)
3259 uint8_t buffer[4+0x0A];
3260 uint8_t sense_buffer[SCSI_SENSE_BUFFERSIZE];
3264 EXTRACHECKS_BUG_ON(dev->scsi_dev == NULL);
3266 for (i = 0; i < 5; i++) {
3267 /* Get control mode page */
3268 memset(cmd, 0, sizeof(cmd));
3269 cmd[0] = MODE_SENSE;
3270 cmd[1] = 8; /* DBD */
3272 cmd[4] = sizeof(buffer);
3274 memset(buffer, 0, sizeof(buffer));
3275 memset(sense_buffer, 0, sizeof(sense_buffer));
3277 TRACE(TRACE_SCSI, "%s", "Doing internal MODE_SENSE");
3278 res = scsi_execute(dev->scsi_dev, cmd, SCST_DATA_READ, buffer,
3279 sizeof(buffer), sense_buffer, 15, 0, 0
3280 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)
3285 TRACE_DBG("MODE_SENSE done: %x", res);
3287 if (scsi_status_is_good(res)) {
3290 PRINT_BUFF_FLAG(TRACE_SCSI,
3291 "Returned control mode page data",
3292 buffer, sizeof(buffer));
3294 dev->tst = buffer[4+2] >> 5;
3295 q = buffer[4+3] >> 4;
3296 if (q > SCST_CONTR_MODE_QUEUE_ALG_UNRESTRICTED_REORDER) {
3297 PRINT_ERROR("Too big QUEUE ALG %x, dev "
3298 "%d:%d:%d:%d", dev->queue_alg,
3299 dev->scsi_dev->host->host_no,
3300 dev->scsi_dev->channel,
3301 dev->scsi_dev->id, dev->scsi_dev->lun);
3304 dev->swp = (buffer[4+4] & 0x8) >> 3;
3305 dev->tas = (buffer[4+5] & 0x40) >> 6;
3306 dev->d_sense = (buffer[4+2] & 0x4) >> 2;
3309 * Unfortunately, SCSI ML doesn't provide a way to
3310 * specify commands task attribute, so we can rely on
3311 * device's restricted reordering only.
3313 dev->has_own_order_mgmt = !dev->queue_alg;
3315 TRACE(TRACE_SCSI|TRACE_MGMT_MINOR,
3316 "Device %d:%d:%d:%d: TST %x, "
3317 "QUEUE ALG %x, SWP %x, TAS %x, D_SENSE %d"
3318 "has_own_order_mgmt %d",
3319 dev->scsi_dev->host->host_no,
3320 dev->scsi_dev->channel, dev->scsi_dev->id,
3321 dev->scsi_dev->lun, dev->tst, dev->queue_alg,
3322 dev->swp, dev->tas, dev->d_sense,
3323 dev->has_own_order_mgmt);
3328 if ((status_byte(res) == CHECK_CONDITION) &&
3329 SCST_SENSE_VALID(sense_buffer)) {
3332 * 3ware controller is buggy and returns CONDITION_GOOD
3333 * instead of CHECK_CONDITION
3335 if (SCST_SENSE_VALID(sense_buffer)) {
3337 if (scst_analyze_sense(sense_buffer,
3338 sizeof(sense_buffer),
3339 SCST_SENSE_KEY_VALID,
3340 ILLEGAL_REQUEST, 0, 0)) {
3341 TRACE(TRACE_SCSI|TRACE_MGMT_MINOR,
3342 "Device %d:%d:%d:%d doesn't "
3343 "support control mode page, "
3344 "using defaults: TST %x, "
3345 "QUEUE ALG %x, SWP %x, "
3346 "TAS %x, D_SENSE %d, "
3347 "has_own_order_mgmt %d ",
3348 dev->scsi_dev->host->host_no,
3349 dev->scsi_dev->channel,
3352 dev->tst, dev->queue_alg,
3355 dev->has_own_order_mgmt);
3358 } else if (scst_analyze_sense(sense_buffer,
3359 sizeof(sense_buffer),
3360 SCST_SENSE_KEY_VALID,
3363 "Device %d:%d:%d:%d not ready",
3364 dev->scsi_dev->host->host_no,
3365 dev->scsi_dev->channel,
3367 dev->scsi_dev->lun);
3372 TRACE(TRACE_SCSI|TRACE_MGMT_MINOR,
3373 "Internal MODE SENSE to "
3374 "device %d:%d:%d:%d failed: %x",
3375 dev->scsi_dev->host->host_no,
3376 dev->scsi_dev->channel,
3378 dev->scsi_dev->lun, res);
3379 PRINT_BUFF_FLAG(TRACE_SCSI|TRACE_MGMT_MINOR,
3381 sense_buffer, sizeof(sense_buffer));
3383 scst_check_internal_sense(dev, res, sense_buffer,
3384 sizeof(sense_buffer));
3390 TRACE_EXIT_RES(res);
3393 EXPORT_SYMBOL(scst_obtain_device_parameters);
3395 /* Called under dev_lock and BH off */
3396 void scst_process_reset(struct scst_device *dev,
3397 struct scst_session *originator, struct scst_cmd *exclude_cmd,
3398 struct scst_mgmt_cmd *mcmd, bool setUA)
3400 struct scst_tgt_dev *tgt_dev;
3401 struct scst_cmd *cmd, *tcmd;
3405 /* Clear RESERVE'ation, if necessary */
3406 if (dev->dev_reserved) {
3407 list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
3408 dev_tgt_dev_list_entry) {
3409 TRACE(TRACE_MGMT_MINOR, "Clearing RESERVE'ation for "
3411 (long long unsigned int)tgt_dev->lun);
3412 clear_bit(SCST_TGT_DEV_RESERVED,
3413 &tgt_dev->tgt_dev_flags);
3415 dev->dev_reserved = 0;
3417 * There is no need to send RELEASE, since the device is going
3418 * to be resetted. Actually, since we can be in RESET TM
3419 * function, it might be dangerous.
3423 dev->dev_double_ua_possible = 1;
3425 list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
3426 dev_tgt_dev_list_entry) {
3427 struct scst_session *sess = tgt_dev->sess;
3429 spin_lock_bh(&tgt_dev->tgt_dev_lock);
3431 scst_free_all_UA(tgt_dev);
3433 memset(tgt_dev->tgt_dev_sense, 0,
3434 sizeof(tgt_dev->tgt_dev_sense));
3436 spin_unlock_bh(&tgt_dev->tgt_dev_lock);
3438 spin_lock_irq(&sess->sess_list_lock);
3440 TRACE_DBG("Searching in search cmd list (sess=%p)", sess);
3441 list_for_each_entry(cmd, &sess->search_cmd_list,
3442 search_cmd_list_entry) {
3443 if (cmd == exclude_cmd)
3445 if ((cmd->tgt_dev == tgt_dev) ||
3446 ((cmd->tgt_dev == NULL) &&
3447 (cmd->lun == tgt_dev->lun))) {
3448 scst_abort_cmd(cmd, mcmd,
3449 (tgt_dev->sess != originator), 0);
3452 spin_unlock_irq(&sess->sess_list_lock);
3455 list_for_each_entry_safe(cmd, tcmd, &dev->blocked_cmd_list,
3456 blocked_cmd_list_entry) {
3457 if (test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)) {
3458 list_del(&cmd->blocked_cmd_list_entry);
3459 TRACE_MGMT_DBG("Adding aborted blocked cmd %p "
3460 "to active cmd list", cmd);
3461 spin_lock_irq(&cmd->cmd_lists->cmd_list_lock);
3462 list_add_tail(&cmd->cmd_list_entry,
3463 &cmd->cmd_lists->active_cmd_list);
3464 wake_up(&cmd->cmd_lists->cmd_list_waitQ);
3465 spin_unlock_irq(&cmd->cmd_lists->cmd_list_lock);
3470 uint8_t sense_buffer[SCST_STANDARD_SENSE_LEN];
3471 scst_set_sense(sense_buffer, sizeof(sense_buffer),
3472 dev->d_sense, SCST_LOAD_SENSE(scst_sense_reset_UA));
3473 scst_dev_check_set_local_UA(dev, exclude_cmd, sense_buffer,
3474 sizeof(sense_buffer));
3481 int scst_set_pending_UA(struct scst_cmd *cmd)
3484 struct scst_tgt_dev_UA *UA_entry;
3485 bool first = true, global_unlock = false;
3486 struct scst_session *sess = cmd->sess;
3490 TRACE(TRACE_MGMT_MINOR, "Setting pending UA cmd %p", cmd);
3492 spin_lock_bh(&cmd->tgt_dev->tgt_dev_lock);
3495 /* UA list could be cleared behind us, so retest */
3496 if (list_empty(&cmd->tgt_dev->UA_list)) {
3498 "SCST_TGT_DEV_UA_PENDING set, but UA_list empty");
3503 UA_entry = list_entry(cmd->tgt_dev->UA_list.next, typeof(*UA_entry),
3506 TRACE_DBG("next %p UA_entry %p",
3507 cmd->tgt_dev->UA_list.next, UA_entry);
3509 if (UA_entry->global_UA && first) {
3510 TRACE_MGMT_DBG("Global UA %p detected", UA_entry);
3512 spin_unlock_bh(&cmd->tgt_dev->tgt_dev_lock);
3514 mutex_lock(&scst_mutex);
3517 for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
3518 struct list_head *sess_tgt_dev_list_head =
3519 &sess->sess_tgt_dev_list_hash[i];
3520 struct scst_tgt_dev *tgt_dev;
3521 list_for_each_entry(tgt_dev, sess_tgt_dev_list_head,
3522 sess_tgt_dev_list_entry) {
3523 /* Lockdep triggers here a false positive.. */
3524 spin_lock(&tgt_dev->tgt_dev_lock);
3529 global_unlock = true;
3533 scst_set_cmd_error_sense(cmd, UA_entry->UA_sense_buffer,
3534 sizeof(UA_entry->UA_sense_buffer));
3538 list_del(&UA_entry->UA_list_entry);
3540 if (UA_entry->global_UA) {
3541 for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
3542 struct list_head *sess_tgt_dev_list_head =
3543 &sess->sess_tgt_dev_list_hash[i];
3544 struct scst_tgt_dev *tgt_dev;
3546 list_for_each_entry(tgt_dev, sess_tgt_dev_list_head,
3547 sess_tgt_dev_list_entry) {
3548 struct scst_tgt_dev_UA *ua;
3549 list_for_each_entry(ua, &tgt_dev->UA_list,
3551 if (ua->global_UA &&
3552 memcmp(ua->UA_sense_buffer,
3553 UA_entry->UA_sense_buffer,
3554 sizeof(ua->UA_sense_buffer)) == 0) {
3555 TRACE_MGMT_DBG("Freeing not "
3556 "needed global UA %p",
3558 list_del(&ua->UA_list_entry);
3559 mempool_free(ua, scst_ua_mempool);
3567 mempool_free(UA_entry, scst_ua_mempool);
3569 if (list_empty(&cmd->tgt_dev->UA_list)) {
3570 clear_bit(SCST_TGT_DEV_UA_PENDING,
3571 &cmd->tgt_dev->tgt_dev_flags);
3575 if (global_unlock) {
3576 for (i = TGT_DEV_HASH_SIZE-1; i >= 0; i--) {
3577 struct list_head *sess_tgt_dev_list_head =
3578 &sess->sess_tgt_dev_list_hash[i];
3579 struct scst_tgt_dev *tgt_dev;
3580 list_for_each_entry_reverse(tgt_dev, sess_tgt_dev_list_head,
3581 sess_tgt_dev_list_entry) {
3582 spin_unlock(&tgt_dev->tgt_dev_lock);
3587 mutex_unlock(&scst_mutex);
3589 spin_lock_bh(&cmd->tgt_dev->tgt_dev_lock);
3592 spin_unlock_bh(&cmd->tgt_dev->tgt_dev_lock);
3594 TRACE_EXIT_RES(res);
3598 /* Called under tgt_dev_lock and BH off */
3599 static void scst_alloc_set_UA(struct scst_tgt_dev *tgt_dev,
3600 const uint8_t *sense, int sense_len, int flags)
3602 struct scst_tgt_dev_UA *UA_entry = NULL;
3606 UA_entry = mempool_alloc(scst_ua_mempool, GFP_ATOMIC);
3607 if (UA_entry == NULL) {
3608 PRINT_CRIT_ERROR("%s", "UNIT ATTENTION memory "
3609 "allocation failed. The UNIT ATTENTION "
3610 "on some sessions will be missed");
3611 PRINT_BUFFER("Lost UA", sense, sense_len);
3614 memset(UA_entry, 0, sizeof(*UA_entry));
3616 UA_entry->global_UA = (flags & SCST_SET_UA_FLAG_GLOBAL) != 0;
3617 if (UA_entry->global_UA)
3618 TRACE_MGMT_DBG("Queuing global UA %p", UA_entry);
3620 if (sense_len > (int)sizeof(UA_entry->UA_sense_buffer))
3621 sense_len = sizeof(UA_entry->UA_sense_buffer);
3622 memcpy(UA_entry->UA_sense_buffer, sense, sense_len);
3624 set_bit(SCST_TGT_DEV_UA_PENDING, &tgt_dev->tgt_dev_flags);
3626 TRACE_MGMT_DBG("Adding new UA to tgt_dev %p", tgt_dev);
3628 if (flags & SCST_SET_UA_FLAG_AT_HEAD)
3629 list_add(&UA_entry->UA_list_entry, &tgt_dev->UA_list);
3631 list_add_tail(&UA_entry->UA_list_entry, &tgt_dev->UA_list);
3638 /* tgt_dev_lock supposed to be held and BH off */
3639 static void __scst_check_set_UA(struct scst_tgt_dev *tgt_dev,
3640 const uint8_t *sense, int sense_len, int flags)
3643 struct scst_tgt_dev_UA *UA_entry_tmp;
3644 int len = min((int)sizeof(UA_entry_tmp->UA_sense_buffer), sense_len);
3648 list_for_each_entry(UA_entry_tmp, &tgt_dev->UA_list,
3650 if (memcmp(sense, UA_entry_tmp->UA_sense_buffer, len) == 0) {
3651 TRACE_MGMT_DBG("%s", "UA already exists");
3658 scst_alloc_set_UA(tgt_dev, sense, len, flags);
3664 void scst_check_set_UA(struct scst_tgt_dev *tgt_dev,
3665 const uint8_t *sense, int sense_len, int flags)
3669 spin_lock_bh(&tgt_dev->tgt_dev_lock);
3670 __scst_check_set_UA(tgt_dev, sense, sense_len, flags);
3671 spin_unlock_bh(&tgt_dev->tgt_dev_lock);
3677 /* Called under dev_lock and BH off */
3678 void scst_dev_check_set_local_UA(struct scst_device *dev,
3679 struct scst_cmd *exclude, const uint8_t *sense, int sense_len)
3681 struct scst_tgt_dev *tgt_dev, *exclude_tgt_dev = NULL;
3685 if (exclude != NULL)
3686 exclude_tgt_dev = exclude->tgt_dev;
3688 list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
3689 dev_tgt_dev_list_entry) {
3690 if (tgt_dev != exclude_tgt_dev)
3691 scst_check_set_UA(tgt_dev, sense, sense_len, 0);
3698 /* Called under dev_lock and BH off */
3699 void __scst_dev_check_set_UA(struct scst_device *dev,
3700 struct scst_cmd *exclude, const uint8_t *sense, int sense_len)
3704 TRACE(TRACE_MGMT_MINOR, "Processing UA dev %p", dev);
3706 /* Check for reset UA */
3707 if (scst_analyze_sense(sense, sense_len, SCST_SENSE_ASC_VALID,
3708 0, SCST_SENSE_ASC_UA_RESET, 0))
3709 scst_process_reset(dev,
3710 (exclude != NULL) ? exclude->sess : NULL,
3711 exclude, NULL, false);
3713 scst_dev_check_set_local_UA(dev, exclude, sense, sense_len);
3719 /* Called under tgt_dev_lock or when tgt_dev is unused */
3720 static void scst_free_all_UA(struct scst_tgt_dev *tgt_dev)
3722 struct scst_tgt_dev_UA *UA_entry, *t;
3726 list_for_each_entry_safe(UA_entry, t,
3727 &tgt_dev->UA_list, UA_list_entry) {
3728 TRACE_MGMT_DBG("Clearing UA for tgt_dev lun %lld",
3729 (long long unsigned int)tgt_dev->lun);
3730 list_del(&UA_entry->UA_list_entry);
3731 mempool_free(UA_entry, scst_ua_mempool);
3733 INIT_LIST_HEAD(&tgt_dev->UA_list);
3734 clear_bit(SCST_TGT_DEV_UA_PENDING, &tgt_dev->tgt_dev_flags);
3741 struct scst_cmd *__scst_check_deferred_commands(struct scst_tgt_dev *tgt_dev)
3743 struct scst_cmd *res = NULL, *cmd, *t;
3744 typeof(tgt_dev->expected_sn) expected_sn = tgt_dev->expected_sn;
3746 spin_lock_irq(&tgt_dev->sn_lock);
3748 if (unlikely(tgt_dev->hq_cmd_count != 0))