4 * Copyright (C) 2004 - 2008 Vladislav Bolkhovitin <vst@vlnb.net>
5 * Copyright (C) 2004 - 2005 Leonid Stoljar
6 * Copyright (C) 2007 - 2008 CMS Distribution Limited
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation, version 2
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
19 #include <linux/init.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/list.h>
23 #include <linux/spinlock.h>
24 #include <linux/slab.h>
25 #include <linux/sched.h>
26 #include <linux/kthread.h>
27 #include <linux/cdrom.h>
28 #include <linux/unistd.h>
29 #include <linux/string.h>
32 #include "scst_priv.h"
35 #include "scst_cdbprobe.h"
37 static void scst_free_tgt_dev(struct scst_tgt_dev *tgt_dev);
38 static void scst_check_internal_sense(struct scst_device *dev, int result,
39 uint8_t *sense, int sense_len);
40 static void scst_queue_report_luns_changed_UA(struct scst_session *sess,
42 static void __scst_check_set_UA(struct scst_tgt_dev *tgt_dev,
43 const uint8_t *sense, int sense_len, int flags);
44 static void scst_alloc_set_UA(struct scst_tgt_dev *tgt_dev,
45 const uint8_t *sense, int sense_len, int flags);
46 static void scst_free_all_UA(struct scst_tgt_dev *tgt_dev);
47 static void scst_release_space(struct scst_cmd *cmd);
48 static void scst_sess_free_tgt_devs(struct scst_session *sess);
49 static void scst_unblock_cmds(struct scst_device *dev);
51 #ifdef CONFIG_SCST_DEBUG_TM
52 static void tm_dbg_init_tgt_dev(struct scst_tgt_dev *tgt_dev,
53 struct scst_acg_dev *acg_dev);
54 static void tm_dbg_deinit_tgt_dev(struct scst_tgt_dev *tgt_dev);
56 static inline void tm_dbg_init_tgt_dev(struct scst_tgt_dev *tgt_dev,
57 struct scst_acg_dev *acg_dev) {}
58 static inline void tm_dbg_deinit_tgt_dev(struct scst_tgt_dev *tgt_dev) {}
59 #endif /* CONFIG_SCST_DEBUG_TM */
61 int scst_alloc_sense(struct scst_cmd *cmd, int atomic)
64 gfp_t gfp_mask = atomic ? GFP_ATOMIC : (GFP_KERNEL|__GFP_NOFAIL);
68 if (cmd->sense != NULL)
71 cmd->sense = mempool_alloc(scst_sense_mempool, gfp_mask);
72 if (cmd->sense == NULL) {
73 PRINT_CRIT_ERROR("Sense memory allocation failed (op %x). "
74 "The sense data will be lost!!", cmd->cdb[0]);
80 memset(cmd->sense, 0, SCST_SENSE_BUFFERSIZE);
86 EXPORT_SYMBOL(scst_alloc_sense);
88 int scst_alloc_set_sense(struct scst_cmd *cmd, int atomic,
89 const uint8_t *sense, unsigned int len)
95 res = scst_alloc_sense(cmd, atomic);
97 PRINT_BUFFER("Lost sense", sense, len);
101 memcpy(cmd->sense, sense, min((int)len, (int)SCST_SENSE_BUFFERSIZE));
102 TRACE_BUFFER("Sense set", cmd->sense, SCST_SENSE_BUFFERSIZE);
108 EXPORT_SYMBOL(scst_alloc_set_sense);
110 void scst_set_cmd_error_status(struct scst_cmd *cmd, int status)
114 cmd->status = status;
115 cmd->host_status = DID_OK;
117 cmd->dbl_ua_orig_resp_data_len = cmd->resp_data_len;
118 cmd->dbl_ua_orig_data_direction = cmd->data_direction;
120 cmd->data_direction = SCST_DATA_NONE;
121 cmd->resp_data_len = 0;
122 cmd->is_send_status = 1;
129 EXPORT_SYMBOL(scst_set_cmd_error_status);
131 void scst_set_cmd_error(struct scst_cmd *cmd, int key, int asc, int ascq)
137 scst_set_cmd_error_status(cmd, SAM_STAT_CHECK_CONDITION);
139 rc = scst_alloc_sense(cmd, 1);
141 PRINT_ERROR("Lost sense data (key %x, asc %x, ascq %x)",
146 scst_set_sense(cmd->sense, SCST_SENSE_BUFFERSIZE,
147 scst_get_cmd_dev_d_sense(cmd), key, asc, ascq);
148 TRACE_BUFFER("Sense set", cmd->sense, SCST_SENSE_BUFFERSIZE);
154 EXPORT_SYMBOL(scst_set_cmd_error);
156 void scst_set_sense(uint8_t *buffer, int len, bool d_sense,
157 int key, int asc, int ascq)
161 memset(buffer, 0, len);
164 /* Descriptor format */
167 PRINT_ERROR("Length %d of sense buffer too small to "
168 "fit sense %x:%x:%x", len, key, asc, ascq);
171 buffer[0] = 0x72; /* Response Code */
173 buffer[1] = key; /* Sense Key */
175 buffer[2] = asc; /* ASC */
177 buffer[3] = ascq; /* ASCQ */
182 PRINT_ERROR("Length %d of sense buffer too small to "
183 "fit sense %x:%x:%x", len, key, asc, ascq);
186 buffer[0] = 0x70; /* Response Code */
188 buffer[2] = key; /* Sense Key */
190 buffer[7] = 0x0a; /* Additional Sense Length */
192 buffer[12] = asc; /* ASC */
194 buffer[13] = ascq; /* ASCQ */
197 TRACE_BUFFER("Sense set", buffer, len);
200 EXPORT_SYMBOL(scst_set_sense);
202 bool scst_analyze_sense(const uint8_t *sense, int len, unsigned int valid_mask,
203 int key, int asc, int ascq)
211 if ((sense[0] == 0x70) || (sense[0] == 0x71)) {
215 if ((valid_mask & SCST_SENSE_KEY_VALID) && (sense[2] != key))
219 if ((valid_mask & SCST_SENSE_ASC_VALID) && (sense[12] != asc))
223 if ((valid_mask & SCST_SENSE_ASCQ_VALID) && (sense[13] != ascq))
225 } else if ((sense[0] == 0x72) || (sense[0] == 0x73)) {
226 /* Descriptor format */
229 if ((valid_mask & SCST_SENSE_KEY_VALID) && (sense[1] != key))
233 if ((valid_mask & SCST_SENSE_ASC_VALID) && (sense[2] != asc))
237 if ((valid_mask & SCST_SENSE_ASCQ_VALID) && (sense[3] != ascq))
245 TRACE_EXIT_RES((int)res);
248 EXPORT_SYMBOL(scst_analyze_sense);
250 static void scst_set_cmd_error_sense(struct scst_cmd *cmd, uint8_t *sense,
255 scst_set_cmd_error_status(cmd, SAM_STAT_CHECK_CONDITION);
256 scst_alloc_set_sense(cmd, 1, sense, len);
262 void scst_set_busy(struct scst_cmd *cmd)
264 int c = atomic_read(&cmd->sess->sess_cmd_count);
268 if ((c <= 1) || (cmd->sess->init_phase != SCST_SESS_IPH_READY)) {
269 scst_set_cmd_error_status(cmd, SAM_STAT_BUSY);
270 TRACE(TRACE_MGMT_MINOR, "Sending BUSY status to initiator %s "
271 "(cmds count %d, queue_type %x, sess->init_phase %d)",
272 cmd->sess->initiator_name, c,
273 cmd->queue_type, cmd->sess->init_phase);
275 scst_set_cmd_error_status(cmd, SAM_STAT_TASK_SET_FULL);
276 TRACE(TRACE_MGMT_MINOR, "Sending QUEUE_FULL status to "
277 "initiator %s (cmds count %d, queue_type %x, "
278 "sess->init_phase %d)", cmd->sess->initiator_name, c,
279 cmd->queue_type, cmd->sess->init_phase);
285 EXPORT_SYMBOL(scst_set_busy);
287 void scst_set_initial_UA(struct scst_session *sess, int key, int asc, int ascq)
293 TRACE_MGMT_DBG("Setting for sess %p initial UA %x/%x/%x", sess, key,
296 /* Protect sess_tgt_dev_list_hash */
297 mutex_lock(&scst_mutex);
299 for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
300 struct list_head *sess_tgt_dev_list_head =
301 &sess->sess_tgt_dev_list_hash[i];
302 struct scst_tgt_dev *tgt_dev;
304 list_for_each_entry(tgt_dev, sess_tgt_dev_list_head,
305 sess_tgt_dev_list_entry) {
306 spin_lock_bh(&tgt_dev->tgt_dev_lock);
307 if (!list_empty(&tgt_dev->UA_list)) {
308 struct scst_tgt_dev_UA *ua;
310 ua = list_entry(tgt_dev->UA_list.next,
311 typeof(*ua), UA_list_entry);
312 if (scst_analyze_sense(ua->UA_sense_buffer,
313 sizeof(ua->UA_sense_buffer),
314 SCST_SENSE_ALL_VALID,
315 SCST_LOAD_SENSE(scst_sense_reset_UA))) {
316 scst_set_sense(ua->UA_sense_buffer,
317 sizeof(ua->UA_sense_buffer),
318 tgt_dev->dev->d_sense,
322 "The first UA isn't RESET UA");
324 PRINT_ERROR("%s", "There's no RESET UA to "
326 spin_unlock_bh(&tgt_dev->tgt_dev_lock);
330 mutex_unlock(&scst_mutex);
335 EXPORT_SYMBOL(scst_set_initial_UA);
337 static struct scst_aen *scst_alloc_aen(struct scst_tgt_dev *tgt_dev)
339 struct scst_aen *aen;
343 aen = mempool_alloc(scst_aen_mempool, GFP_KERNEL);
345 PRINT_ERROR("AEN memory allocation failed. Corresponding "
346 "event notification will not be performed (initiator "
347 "%s)", tgt_dev->sess->initiator_name);
350 memset(aen, 0, sizeof(*aen));
352 aen->sess = tgt_dev->sess;
353 scst_sess_get(aen->sess);
355 aen->lun = scst_pack_lun(tgt_dev->lun);
358 TRACE_EXIT_HRES((unsigned long)aen);
362 static void scst_free_aen(struct scst_aen *aen)
366 scst_sess_put(aen->sess);
367 mempool_free(aen, scst_aen_mempool);
374 void scst_capacity_data_changed(struct scst_device *dev)
376 struct scst_tgt_dev *tgt_dev;
377 uint8_t sense_buffer[SCST_STANDARD_SENSE_LEN];
381 if (dev->type != TYPE_DISK) {
382 TRACE_MGMT_DBG("Device type %d isn't for CAPACITY DATA "
383 "CHANGED UA", dev->type);
387 TRACE_MGMT_DBG("CAPACITY DATA CHANGED (dev %p)", dev);
389 mutex_lock(&scst_mutex);
391 list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
392 dev_tgt_dev_list_entry) {
393 struct scst_tgt_template *tgtt = tgt_dev->sess->tgt->tgtt;
395 if (tgtt->report_aen != NULL) {
396 struct scst_aen *aen;
399 aen = scst_alloc_aen(tgt_dev);
403 aen->event_fn = SCST_AEN_SCSI;
404 aen->aen_sense_len = SCST_STANDARD_SENSE_LEN;
405 scst_set_sense(aen->aen_sense, aen->aen_sense_len,
406 tgt_dev->dev->d_sense,
407 SCST_LOAD_SENSE(scst_sense_capacity_data_changed));
409 TRACE_DBG("Calling target's %s report_aen(%p)",
411 rc = tgtt->report_aen(aen);
412 TRACE_DBG("Target's %s report_aen(%p) returned %d",
413 tgtt->name, aen, rc);
414 if (rc == SCST_AEN_RES_SUCCESS)
420 TRACE_MGMT_DBG("Queuing CAPACITY DATA CHANGED UA (tgt_dev %p)",
422 scst_set_sense(sense_buffer, sizeof(sense_buffer),
423 tgt_dev->dev->d_sense,
424 SCST_LOAD_SENSE(scst_sense_capacity_data_changed));
425 scst_check_set_UA(tgt_dev, sense_buffer,
426 sizeof(sense_buffer), 0);
429 mutex_unlock(&scst_mutex);
435 EXPORT_SYMBOL(scst_capacity_data_changed);
437 static inline bool scst_is_report_luns_changed_type(int type)
448 case TYPE_MEDIUM_CHANGER:
457 /* scst_mutex supposed to be held */
458 static void scst_queue_report_luns_changed_UA(struct scst_session *sess,
461 uint8_t sense_buffer[SCST_STANDARD_SENSE_LEN];
462 struct list_head *shead;
463 struct scst_tgt_dev *tgt_dev;
468 TRACE_MGMT_DBG("Queuing REPORTED LUNS DATA CHANGED UA "
471 for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
472 shead = &sess->sess_tgt_dev_list_hash[i];
474 list_for_each_entry(tgt_dev, shead,
475 sess_tgt_dev_list_entry) {
476 spin_lock_bh(&tgt_dev->tgt_dev_lock);
480 for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
481 shead = &sess->sess_tgt_dev_list_hash[i];
483 list_for_each_entry(tgt_dev, shead,
484 sess_tgt_dev_list_entry) {
485 if (!scst_is_report_luns_changed_type(
489 scst_set_sense(sense_buffer, sizeof(sense_buffer),
490 tgt_dev->dev->d_sense,
491 SCST_LOAD_SENSE(scst_sense_reported_luns_data_changed));
493 __scst_check_set_UA(tgt_dev, sense_buffer,
494 sizeof(sense_buffer),
495 flags | SCST_SET_UA_FLAG_GLOBAL);
499 for (i = TGT_DEV_HASH_SIZE-1; i >= 0; i--) {
500 shead = &sess->sess_tgt_dev_list_hash[i];
502 list_for_each_entry_reverse(tgt_dev,
503 shead, sess_tgt_dev_list_entry) {
504 spin_unlock_bh(&tgt_dev->tgt_dev_lock);
512 /* The activity supposed to be suspended and scst_mutex held */
513 void scst_report_luns_changed(struct scst_acg *acg)
515 struct scst_session *sess;
519 TRACE_MGMT_DBG("REPORTED LUNS DATA CHANGED (acg %s)", acg->acg_name);
521 list_for_each_entry(sess, &acg->acg_sess_list, acg_sess_list_entry) {
523 struct list_head *shead;
524 struct scst_tgt_dev *tgt_dev;
525 struct scst_tgt_template *tgtt = sess->tgt->tgtt;
527 for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
528 shead = &sess->sess_tgt_dev_list_hash[i];
530 list_for_each_entry(tgt_dev, shead,
531 sess_tgt_dev_list_entry) {
532 if (scst_is_report_luns_changed_type(
537 TRACE_MGMT_DBG("Not found a device capable REPORTED "
538 "LUNS DATA CHANGED UA (sess %p)", sess);
541 if (tgtt->report_aen != NULL) {
542 struct scst_aen *aen;
545 aen = scst_alloc_aen(tgt_dev);
549 aen->event_fn = SCST_AEN_SCSI;
550 aen->aen_sense_len = SCST_STANDARD_SENSE_LEN;
551 scst_set_sense(aen->aen_sense, aen->aen_sense_len,
552 tgt_dev->dev->d_sense,
553 SCST_LOAD_SENSE(scst_sense_reported_luns_data_changed));
555 TRACE_DBG("Calling target's %s report_aen(%p)",
557 rc = tgtt->report_aen(aen);
558 TRACE_DBG("Target's %s report_aen(%p) returned %d",
559 tgtt->name, aen, rc);
560 if (rc == SCST_AEN_RES_SUCCESS)
567 scst_queue_report_luns_changed_UA(sess, 0);
574 void scst_aen_done(struct scst_aen *aen)
578 TRACE_MGMT_DBG("AEN %p (fn %d) done (initiator %s)", aen,
579 aen->event_fn, aen->sess->initiator_name);
581 if (aen->delivery_status == SCST_AEN_RES_SUCCESS)
584 if (aen->event_fn != SCST_AEN_SCSI)
587 TRACE_MGMT_DBG("Delivery of SCSI AEN failed (initiator %s)",
588 aen->sess->initiator_name);
590 if (scst_analyze_sense(aen->aen_sense, aen->aen_sense_len,
591 SCST_SENSE_ALL_VALID, SCST_LOAD_SENSE(
592 scst_sense_reported_luns_data_changed))) {
593 mutex_lock(&scst_mutex);
594 scst_queue_report_luns_changed_UA(aen->sess,
595 SCST_SET_UA_FLAG_AT_HEAD);
596 mutex_unlock(&scst_mutex);
597 } else if (scst_analyze_sense(aen->aen_sense, aen->aen_sense_len,
598 SCST_SENSE_ALL_VALID,
599 SCST_LOAD_SENSE(scst_sense_capacity_data_changed))) {
600 /* tgt_dev might get dead, so we need to reseek it */
601 struct list_head *shead;
602 struct scst_tgt_dev *tgt_dev;
605 lun = scst_unpack_lun((uint8_t *)&aen->lun, sizeof(aen->lun));
607 mutex_lock(&scst_mutex);
609 shead = &aen->sess->sess_tgt_dev_list_hash[HASH_VAL(lun)];
610 list_for_each_entry(tgt_dev, shead,
611 sess_tgt_dev_list_entry) {
612 if (tgt_dev->lun == lun) {
613 TRACE_MGMT_DBG("Queuing CAPACITY DATA CHANGED "
614 "UA (tgt_dev %p)", tgt_dev);
615 scst_check_set_UA(tgt_dev, aen->aen_sense,
617 SCST_SET_UA_FLAG_AT_HEAD);
622 mutex_unlock(&scst_mutex);
624 PRINT_ERROR("%s", "Unknown SCSI AEN");
632 EXPORT_SYMBOL(scst_aen_done);
634 int scst_get_cmd_abnormal_done_state(const struct scst_cmd *cmd)
640 switch (cmd->state) {
641 case SCST_CMD_STATE_INIT_WAIT:
642 case SCST_CMD_STATE_INIT:
643 case SCST_CMD_STATE_PRE_PARSE:
644 case SCST_CMD_STATE_DEV_PARSE:
645 case SCST_CMD_STATE_DEV_DONE:
647 res = SCST_CMD_STATE_FINISHED_INTERNAL;
649 res = SCST_CMD_STATE_PRE_XMIT_RESP;
652 case SCST_CMD_STATE_PRE_DEV_DONE:
653 case SCST_CMD_STATE_MODE_SELECT_CHECKS:
654 res = SCST_CMD_STATE_DEV_DONE;
657 case SCST_CMD_STATE_PRE_XMIT_RESP:
658 res = SCST_CMD_STATE_XMIT_RESP;
661 case SCST_CMD_STATE_PREPROCESS_DONE:
662 case SCST_CMD_STATE_PREPARE_SPACE:
663 case SCST_CMD_STATE_RDY_TO_XFER:
664 case SCST_CMD_STATE_DATA_WAIT:
665 case SCST_CMD_STATE_TGT_PRE_EXEC:
666 case SCST_CMD_STATE_SEND_FOR_EXEC:
667 case SCST_CMD_STATE_LOCAL_EXEC:
668 case SCST_CMD_STATE_REAL_EXEC:
669 case SCST_CMD_STATE_REAL_EXECUTING:
670 res = SCST_CMD_STATE_PRE_DEV_DONE;
674 PRINT_CRIT_ERROR("Wrong cmd state %d (cmd %p, op %x)",
675 cmd->state, cmd, cmd->cdb[0]);
682 EXPORT_SYMBOL(scst_get_cmd_abnormal_done_state);
684 void scst_set_cmd_abnormal_done_state(struct scst_cmd *cmd)
688 #ifdef CONFIG_SCST_EXTRACHECKS
689 switch (cmd->state) {
690 case SCST_CMD_STATE_XMIT_RESP:
691 case SCST_CMD_STATE_FINISHED:
692 case SCST_CMD_STATE_FINISHED_INTERNAL:
693 case SCST_CMD_STATE_XMIT_WAIT:
694 PRINT_CRIT_ERROR("Wrong cmd state %d (cmd %p, op %x)",
695 cmd->state, cmd, cmd->cdb[0]);
700 cmd->state = scst_get_cmd_abnormal_done_state(cmd);
702 EXTRACHECKS_BUG_ON((cmd->state != SCST_CMD_STATE_PRE_XMIT_RESP) &&
703 (cmd->tgt_dev == NULL));
708 EXPORT_SYMBOL(scst_set_cmd_abnormal_done_state);
710 void scst_set_resp_data_len(struct scst_cmd *cmd, int resp_data_len)
716 scst_check_restore_sg_buff(cmd);
717 cmd->resp_data_len = resp_data_len;
719 if (resp_data_len == cmd->bufflen)
723 for (i = 0; i < cmd->sg_cnt; i++) {
724 l += cmd->sg[i].length;
725 if (l >= resp_data_len) {
726 int left = resp_data_len - (l - cmd->sg[i].length);
727 #ifdef CONFIG_SCST_DEBUG
728 TRACE(TRACE_SG_OP|TRACE_MEMORY, "cmd %p (tag %llu), "
729 "resp_data_len %d, i %d, cmd->sg[i].length %d, "
731 cmd, (long long unsigned int)cmd->tag,
733 cmd->sg[i].length, left);
735 cmd->orig_sg_cnt = cmd->sg_cnt;
736 cmd->orig_sg_entry = i;
737 cmd->orig_entry_len = cmd->sg[i].length;
738 cmd->sg_cnt = (left > 0) ? i+1 : i;
739 cmd->sg[i].length = left;
740 cmd->sg_buff_modified = 1;
749 EXPORT_SYMBOL(scst_set_resp_data_len);
751 /* Called under scst_mutex and suspended activity */
752 int scst_alloc_device(gfp_t gfp_mask, struct scst_device **out_dev)
754 struct scst_device *dev;
756 static int dev_num; /* protected by scst_mutex */
760 dev = kzalloc(sizeof(*dev), gfp_mask);
762 TRACE(TRACE_OUT_OF_MEM, "%s",
763 "Allocation of scst_device failed");
768 dev->handler = &scst_null_devtype;
769 dev->p_cmd_lists = &scst_main_cmd_lists;
770 atomic_set(&dev->dev_cmd_count, 0);
771 atomic_set(&dev->write_cmd_count, 0);
772 scst_init_mem_lim(&dev->dev_mem_lim);
773 spin_lock_init(&dev->dev_lock);
774 atomic_set(&dev->on_dev_count, 0);
775 INIT_LIST_HEAD(&dev->blocked_cmd_list);
776 INIT_LIST_HEAD(&dev->dev_tgt_dev_list);
777 INIT_LIST_HEAD(&dev->dev_acg_dev_list);
778 INIT_LIST_HEAD(&dev->threads_list);
779 init_waitqueue_head(&dev->on_dev_waitQ);
780 dev->dev_double_ua_possible = 1;
781 dev->queue_alg = SCST_CONTR_MODE_QUEUE_ALG_UNRESTRICTED_REORDER;
782 dev->dev_num = dev_num++;
784 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
785 #if defined(CONFIG_BLOCK) && defined(SCST_IO_CONTEXT)
786 dev->dev_io_ctx = alloc_io_context(GFP_KERNEL, -1);
787 if (dev->dev_io_ctx == NULL) {
788 TRACE(TRACE_OUT_OF_MEM, "%s", "Failed to alloc dev IO context");
803 /* Called under scst_mutex and suspended activity */
804 void scst_free_device(struct scst_device *dev)
808 #ifdef CONFIG_SCST_EXTRACHECKS
809 if (!list_empty(&dev->dev_tgt_dev_list) ||
810 !list_empty(&dev->dev_acg_dev_list)) {
811 PRINT_CRIT_ERROR("%s: dev_tgt_dev_list or dev_acg_dev_list "
812 "is not empty!", __func__);
817 __exit_io_context(dev->dev_io_ctx);
825 void scst_init_mem_lim(struct scst_mem_lim *mem_lim)
827 atomic_set(&mem_lim->alloced_pages, 0);
828 mem_lim->max_allowed_pages =
829 ((uint64_t)scst_max_dev_cmd_mem << 10) >> (PAGE_SHIFT - 10);
831 EXPORT_SYMBOL(scst_init_mem_lim);
833 static struct scst_acg_dev *scst_alloc_acg_dev(struct scst_acg *acg,
834 struct scst_device *dev, uint64_t lun)
836 struct scst_acg_dev *res;
840 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
841 res = kmem_cache_alloc(scst_acgd_cachep, GFP_KERNEL);
843 res = kmem_cache_zalloc(scst_acgd_cachep, GFP_KERNEL);
846 TRACE(TRACE_OUT_OF_MEM,
847 "%s", "Allocation of scst_acg_dev failed");
850 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
851 memset(res, 0, sizeof(*res));
859 TRACE_EXIT_HRES(res);
863 /* The activity supposed to be suspended and scst_mutex held */
864 static void scst_free_acg_dev(struct scst_acg_dev *acg_dev)
868 TRACE_DBG("Removing acg_dev %p from acg_dev_list and dev_acg_dev_list",
870 list_del(&acg_dev->acg_dev_list_entry);
871 list_del(&acg_dev->dev_acg_dev_list_entry);
873 kmem_cache_free(scst_acgd_cachep, acg_dev);
879 /* The activity supposed to be suspended and scst_mutex held */
880 struct scst_acg *scst_alloc_add_acg(const char *acg_name)
882 struct scst_acg *acg;
886 acg = kzalloc(sizeof(*acg), GFP_KERNEL);
888 TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of acg failed");
892 INIT_LIST_HEAD(&acg->acg_dev_list);
893 INIT_LIST_HEAD(&acg->acg_sess_list);
894 INIT_LIST_HEAD(&acg->acn_list);
895 acg->acg_name = acg_name;
897 TRACE_DBG("Adding acg %s to scst_acg_list", acg_name);
898 list_add_tail(&acg->scst_acg_list_entry, &scst_acg_list);
901 TRACE_EXIT_HRES(acg);
905 /* The activity supposed to be suspended and scst_mutex held */
906 int scst_destroy_acg(struct scst_acg *acg)
908 struct scst_acn *n, *nn;
909 struct scst_acg_dev *acg_dev, *acg_dev_tmp;
914 if (!list_empty(&acg->acg_sess_list)) {
915 PRINT_ERROR("%s: acg_sess_list is not empty!", __func__);
920 TRACE_DBG("Removing acg %s from scst_acg_list", acg->acg_name);
921 list_del(&acg->scst_acg_list_entry);
923 /* Freeing acg_devs */
924 list_for_each_entry_safe(acg_dev, acg_dev_tmp, &acg->acg_dev_list,
925 acg_dev_list_entry) {
926 struct scst_tgt_dev *tgt_dev, *tt;
927 list_for_each_entry_safe(tgt_dev, tt,
928 &acg_dev->dev->dev_tgt_dev_list,
929 dev_tgt_dev_list_entry) {
930 if (tgt_dev->acg_dev == acg_dev)
931 scst_free_tgt_dev(tgt_dev);
933 scst_free_acg_dev(acg_dev);
937 list_for_each_entry_safe(n, nn, &acg->acn_list,
939 list_del(&n->acn_list_entry);
943 INIT_LIST_HEAD(&acg->acn_list);
952 * scst_mutex supposed to be held, there must not be parallel activity in this
955 static struct scst_tgt_dev *scst_alloc_add_tgt_dev(struct scst_session *sess,
956 struct scst_acg_dev *acg_dev)
958 int ini_sg, ini_unchecked_isa_dma, ini_use_clustering;
959 struct scst_tgt_dev *tgt_dev, *t;
960 struct scst_device *dev = acg_dev->dev;
961 struct list_head *sess_tgt_dev_list_head;
962 struct scst_tgt_template *vtt = sess->tgt->tgtt;
964 bool share_io_ctx = false;
965 uint8_t sense_buffer[SCST_STANDARD_SENSE_LEN];
969 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
970 tgt_dev = kmem_cache_alloc(scst_tgtd_cachep, GFP_KERNEL);
972 tgt_dev = kmem_cache_zalloc(scst_tgtd_cachep, GFP_KERNEL);
974 if (tgt_dev == NULL) {
975 TRACE(TRACE_OUT_OF_MEM, "%s",
976 "Allocation of scst_tgt_dev failed");
979 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
980 memset(tgt_dev, 0, sizeof(*tgt_dev));
984 tgt_dev->lun = acg_dev->lun;
985 tgt_dev->acg_dev = acg_dev;
986 tgt_dev->sess = sess;
987 atomic_set(&tgt_dev->tgt_dev_cmd_count, 0);
989 scst_sgv_pool_use_norm(tgt_dev);
991 if (dev->scsi_dev != NULL) {
992 ini_sg = dev->scsi_dev->host->sg_tablesize;
993 ini_unchecked_isa_dma = dev->scsi_dev->host->unchecked_isa_dma;
994 ini_use_clustering = (dev->scsi_dev->host->use_clustering ==
997 ini_sg = (1 << 15) /* infinite */;
998 ini_unchecked_isa_dma = 0;
999 ini_use_clustering = 0;
1001 tgt_dev->max_sg_cnt = min(ini_sg, sess->tgt->sg_tablesize);
1003 if ((sess->tgt->tgtt->use_clustering || ini_use_clustering) &&
1004 !sess->tgt->tgtt->no_clustering)
1005 scst_sgv_pool_use_norm_clust(tgt_dev);
1007 if (sess->tgt->tgtt->unchecked_isa_dma || ini_unchecked_isa_dma)
1008 scst_sgv_pool_use_dma(tgt_dev);
1010 if (dev->scsi_dev != NULL) {
1011 TRACE_MGMT_DBG("host=%d, channel=%d, id=%d, lun=%d, "
1012 "SCST lun=%lld", dev->scsi_dev->host->host_no,
1013 dev->scsi_dev->channel, dev->scsi_dev->id,
1015 (long long unsigned int)tgt_dev->lun);
1017 TRACE_MGMT_DBG("Virtual device %s on SCST lun=%lld",
1018 dev->virt_name, (long long unsigned int)tgt_dev->lun);
1021 spin_lock_init(&tgt_dev->tgt_dev_lock);
1022 INIT_LIST_HEAD(&tgt_dev->UA_list);
1023 spin_lock_init(&tgt_dev->thr_data_lock);
1024 INIT_LIST_HEAD(&tgt_dev->thr_data_list);
1025 spin_lock_init(&tgt_dev->sn_lock);
1026 INIT_LIST_HEAD(&tgt_dev->deferred_cmd_list);
1027 INIT_LIST_HEAD(&tgt_dev->skipped_sn_list);
1028 tgt_dev->expected_sn = 1;
1029 tgt_dev->num_free_sn_slots = ARRAY_SIZE(tgt_dev->sn_slots)-1;
1030 tgt_dev->cur_sn_slot = &tgt_dev->sn_slots[0];
1031 for (i = 0; i < (int)ARRAY_SIZE(tgt_dev->sn_slots); i++)
1032 atomic_set(&tgt_dev->sn_slots[i], 0);
1034 if (dev->handler->parse_atomic &&
1035 (sess->tgt->tgtt->preprocessing_done == NULL)) {
1036 if (sess->tgt->tgtt->rdy_to_xfer_atomic)
1037 __set_bit(SCST_TGT_DEV_AFTER_INIT_WR_ATOMIC,
1038 &tgt_dev->tgt_dev_flags);
1039 if (dev->handler->exec_atomic)
1040 __set_bit(SCST_TGT_DEV_AFTER_INIT_OTH_ATOMIC,
1041 &tgt_dev->tgt_dev_flags);
1043 if (dev->handler->exec_atomic) {
1044 if (sess->tgt->tgtt->rdy_to_xfer_atomic)
1045 __set_bit(SCST_TGT_DEV_AFTER_RESTART_WR_ATOMIC,
1046 &tgt_dev->tgt_dev_flags);
1047 __set_bit(SCST_TGT_DEV_AFTER_RESTART_OTH_ATOMIC,
1048 &tgt_dev->tgt_dev_flags);
1049 __set_bit(SCST_TGT_DEV_AFTER_RX_DATA_ATOMIC,
1050 &tgt_dev->tgt_dev_flags);
1052 if (dev->handler->dev_done_atomic &&
1053 sess->tgt->tgtt->xmit_response_atomic) {
1054 __set_bit(SCST_TGT_DEV_AFTER_EXEC_ATOMIC,
1055 &tgt_dev->tgt_dev_flags);
1058 scst_set_sense(sense_buffer, sizeof(sense_buffer),
1059 dev->d_sense, SCST_LOAD_SENSE(scst_sense_reset_UA));
1060 scst_alloc_set_UA(tgt_dev, sense_buffer, sizeof(sense_buffer), 0);
1062 tm_dbg_init_tgt_dev(tgt_dev, acg_dev);
1064 if (tgt_dev->sess->initiator_name != NULL) {
1065 spin_lock_bh(&dev->dev_lock);
1066 list_for_each_entry(t, &dev->dev_tgt_dev_list,
1067 dev_tgt_dev_list_entry) {
1068 TRACE_DBG("t name %s (tgt_dev name %s)",
1069 t->sess->initiator_name,
1070 tgt_dev->sess->initiator_name);
1071 if (t->sess->initiator_name == NULL)
1073 if (strcmp(t->sess->initiator_name,
1074 tgt_dev->sess->initiator_name) == 0) {
1075 share_io_ctx = true;
1079 spin_unlock_bh(&dev->dev_lock);
1083 TRACE_MGMT_DBG("Sharing IO context %p (tgt_dev %p, ini %s)",
1084 t->tgt_dev_io_ctx, tgt_dev,
1085 tgt_dev->sess->initiator_name);
1086 tgt_dev->tgt_dev_io_ctx = ioc_task_link(t->tgt_dev_io_ctx);
1088 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
1089 #if defined(CONFIG_BLOCK) && defined(SCST_IO_CONTEXT)
1090 tgt_dev->tgt_dev_io_ctx = alloc_io_context(GFP_KERNEL, -1);
1091 if (tgt_dev->tgt_dev_io_ctx == NULL) {
1092 TRACE(TRACE_OUT_OF_MEM, "Failed to alloc tgt_dev IO "
1093 "context for dev %s (initiator %s)",
1094 dev->virt_name, sess->initiator_name);
1101 if (vtt->threads_num > 0) {
1103 if (dev->handler->threads_num > 0)
1104 rc = scst_add_dev_threads(dev, vtt->threads_num);
1105 else if (dev->handler->threads_num == 0)
1106 rc = scst_add_global_threads(vtt->threads_num);
1111 if (dev->handler && dev->handler->attach_tgt) {
1112 TRACE_DBG("Calling dev handler's attach_tgt(%p)",
1114 rc = dev->handler->attach_tgt(tgt_dev);
1115 TRACE_DBG("%s", "Dev handler's attach_tgt() returned");
1117 PRINT_ERROR("Device handler's %s attach_tgt() "
1118 "failed: %d", dev->handler->name, rc);
1123 spin_lock_bh(&dev->dev_lock);
1124 list_add_tail(&tgt_dev->dev_tgt_dev_list_entry, &dev->dev_tgt_dev_list);
1125 if (dev->dev_reserved)
1126 __set_bit(SCST_TGT_DEV_RESERVED, &tgt_dev->tgt_dev_flags);
1127 spin_unlock_bh(&dev->dev_lock);
1129 sess_tgt_dev_list_head =
1130 &sess->sess_tgt_dev_list_hash[HASH_VAL(tgt_dev->lun)];
1131 list_add_tail(&tgt_dev->sess_tgt_dev_list_entry,
1132 sess_tgt_dev_list_head);
1139 if (vtt->threads_num > 0) {
1140 if (dev->handler->threads_num > 0)
1141 scst_del_dev_threads(dev, vtt->threads_num);
1142 else if (dev->handler->threads_num == 0)
1143 scst_del_global_threads(vtt->threads_num);
1147 scst_free_all_UA(tgt_dev);
1148 __exit_io_context(tgt_dev->tgt_dev_io_ctx);
1150 kmem_cache_free(scst_tgtd_cachep, tgt_dev);
1155 static void scst_clear_reservation(struct scst_tgt_dev *tgt_dev);
1157 /* No locks supposed to be held, scst_mutex - held */
1158 void scst_nexus_loss(struct scst_tgt_dev *tgt_dev, bool queue_UA)
1162 scst_clear_reservation(tgt_dev);
1164 /* With activity suspended the lock isn't needed, but let's be safe */
1165 spin_lock_bh(&tgt_dev->tgt_dev_lock);
1166 scst_free_all_UA(tgt_dev);
1167 spin_unlock_bh(&tgt_dev->tgt_dev_lock);
1170 uint8_t sense_buffer[SCST_STANDARD_SENSE_LEN];
1171 scst_set_sense(sense_buffer, sizeof(sense_buffer),
1172 tgt_dev->dev->d_sense,
1173 SCST_LOAD_SENSE(scst_sense_nexus_loss_UA));
1174 scst_check_set_UA(tgt_dev, sense_buffer,
1175 sizeof(sense_buffer), 0);
1183 * scst_mutex supposed to be held, there must not be parallel activity in this
1186 static void scst_free_tgt_dev(struct scst_tgt_dev *tgt_dev)
1188 struct scst_device *dev = tgt_dev->dev;
1189 struct scst_tgt_template *vtt = tgt_dev->sess->tgt->tgtt;
1193 tm_dbg_deinit_tgt_dev(tgt_dev);
1195 spin_lock_bh(&dev->dev_lock);
1196 list_del(&tgt_dev->dev_tgt_dev_list_entry);
1197 spin_unlock_bh(&dev->dev_lock);
1199 list_del(&tgt_dev->sess_tgt_dev_list_entry);
1201 scst_clear_reservation(tgt_dev);
1202 scst_free_all_UA(tgt_dev);
1204 if (dev->handler && dev->handler->detach_tgt) {
1205 TRACE_DBG("Calling dev handler's detach_tgt(%p)",
1207 dev->handler->detach_tgt(tgt_dev);
1208 TRACE_DBG("%s", "Dev handler's detach_tgt() returned");
1211 if (vtt->threads_num > 0) {
1212 if (dev->handler->threads_num > 0)
1213 scst_del_dev_threads(dev, vtt->threads_num);
1214 else if (dev->handler->threads_num == 0)
1215 scst_del_global_threads(vtt->threads_num);
1218 __exit_io_context(tgt_dev->tgt_dev_io_ctx);
1220 kmem_cache_free(scst_tgtd_cachep, tgt_dev);
1226 /* scst_mutex supposed to be held */
1227 int scst_sess_alloc_tgt_devs(struct scst_session *sess)
1230 struct scst_acg_dev *acg_dev;
1231 struct scst_tgt_dev *tgt_dev;
1235 list_for_each_entry(acg_dev, &sess->acg->acg_dev_list,
1236 acg_dev_list_entry) {
1237 tgt_dev = scst_alloc_add_tgt_dev(sess, acg_dev);
1238 if (tgt_dev == NULL) {
1249 scst_sess_free_tgt_devs(sess);
1254 * scst_mutex supposed to be held, there must not be parallel activity in this
1257 static void scst_sess_free_tgt_devs(struct scst_session *sess)
1260 struct scst_tgt_dev *tgt_dev, *t;
1264 /* The session is going down, no users, so no locks */
1265 for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
1266 struct list_head *sess_tgt_dev_list_head =
1267 &sess->sess_tgt_dev_list_hash[i];
1268 list_for_each_entry_safe(tgt_dev, t, sess_tgt_dev_list_head,
1269 sess_tgt_dev_list_entry) {
1270 scst_free_tgt_dev(tgt_dev);
1272 INIT_LIST_HEAD(sess_tgt_dev_list_head);
1279 /* The activity supposed to be suspended and scst_mutex held */
1280 int scst_acg_add_dev(struct scst_acg *acg, struct scst_device *dev,
1281 uint64_t lun, int read_only)
1284 struct scst_acg_dev *acg_dev;
1285 struct scst_tgt_dev *tgt_dev;
1286 struct scst_session *sess;
1287 LIST_HEAD(tmp_tgt_dev_list);
1291 INIT_LIST_HEAD(&tmp_tgt_dev_list);
1293 #ifdef CONFIG_SCST_EXTRACHECKS
1294 list_for_each_entry(acg_dev, &acg->acg_dev_list, acg_dev_list_entry) {
1295 if (acg_dev->dev == dev) {
1296 PRINT_ERROR("Device is already in group %s",
1304 acg_dev = scst_alloc_acg_dev(acg, dev, lun);
1305 if (acg_dev == NULL) {
1309 acg_dev->rd_only_flag = read_only;
1311 TRACE_DBG("Adding acg_dev %p to acg_dev_list and dev_acg_dev_list",
1313 list_add_tail(&acg_dev->acg_dev_list_entry, &acg->acg_dev_list);
1314 list_add_tail(&acg_dev->dev_acg_dev_list_entry, &dev->dev_acg_dev_list);
1316 list_for_each_entry(sess, &acg->acg_sess_list, acg_sess_list_entry) {
1317 tgt_dev = scst_alloc_add_tgt_dev(sess, acg_dev);
1318 if (tgt_dev == NULL) {
1322 list_add_tail(&tgt_dev->extra_tgt_dev_list_entry,
1326 scst_report_luns_changed(acg);
1328 if (dev->virt_name != NULL) {
1329 PRINT_INFO("Added device %s to group %s (LUN %lld, "
1330 "rd_only %d)", dev->virt_name, acg->acg_name,
1331 (long long unsigned int)lun,
1334 PRINT_INFO("Added device %d:%d:%d:%d to group %s (LUN "
1335 "%lld, rd_only %d)",
1336 dev->scsi_dev->host->host_no,
1337 dev->scsi_dev->channel, dev->scsi_dev->id,
1338 dev->scsi_dev->lun, acg->acg_name,
1339 (long long unsigned int)lun,
1344 TRACE_EXIT_RES(res);
1348 list_for_each_entry(tgt_dev, &tmp_tgt_dev_list,
1349 extra_tgt_dev_list_entry) {
1350 scst_free_tgt_dev(tgt_dev);
1352 scst_free_acg_dev(acg_dev);
1356 /* The activity supposed to be suspended and scst_mutex held */
1357 int scst_acg_remove_dev(struct scst_acg *acg, struct scst_device *dev)
1360 struct scst_acg_dev *acg_dev = NULL, *a;
1361 struct scst_tgt_dev *tgt_dev, *tt;
1365 list_for_each_entry(a, &acg->acg_dev_list, acg_dev_list_entry) {
1366 if (a->dev == dev) {
1372 if (acg_dev == NULL) {
1373 PRINT_ERROR("Device is not found in group %s", acg->acg_name);
1378 list_for_each_entry_safe(tgt_dev, tt, &dev->dev_tgt_dev_list,
1379 dev_tgt_dev_list_entry) {
1380 if (tgt_dev->acg_dev == acg_dev)
1381 scst_free_tgt_dev(tgt_dev);
1383 scst_free_acg_dev(acg_dev);
1385 scst_report_luns_changed(acg);
1387 if (dev->virt_name != NULL) {
1388 PRINT_INFO("Removed device %s from group %s",
1389 dev->virt_name, acg->acg_name);
1391 PRINT_INFO("Removed device %d:%d:%d:%d from group %s",
1392 dev->scsi_dev->host->host_no,
1393 dev->scsi_dev->channel, dev->scsi_dev->id,
1394 dev->scsi_dev->lun, acg->acg_name);
1398 TRACE_EXIT_RES(res);
1402 /* scst_mutex supposed to be held */
1403 int scst_acg_add_name(struct scst_acg *acg, const char *name)
1412 list_for_each_entry(n, &acg->acn_list, acn_list_entry)
1414 if (strcmp(n->name, name) == 0) {
1415 PRINT_ERROR("Name %s already exists in group %s",
1416 name, acg->acg_name);
1422 n = kmalloc(sizeof(*n), GFP_KERNEL);
1424 PRINT_ERROR("%s", "Unable to allocate scst_acn");
1430 nm = kmalloc(len + 1, GFP_KERNEL);
1432 PRINT_ERROR("%s", "Unable to allocate scst_acn->name");
1440 list_add_tail(&n->acn_list_entry, &acg->acn_list);
1444 PRINT_INFO("Added name %s to group %s", name, acg->acg_name);
1446 TRACE_EXIT_RES(res);
1454 /* scst_mutex supposed to be held */
1455 int scst_acg_remove_name(struct scst_acg *acg, const char *name)
1462 list_for_each_entry(n, &acg->acn_list, acn_list_entry)
1464 if (strcmp(n->name, name) == 0) {
1465 list_del(&n->acn_list_entry);
1474 PRINT_INFO("Removed name %s from group %s", name,
1477 PRINT_ERROR("Unable to find name %s in group %s", name,
1481 TRACE_EXIT_RES(res);
1485 static struct scst_cmd *scst_create_prepare_internal_cmd(
1486 struct scst_cmd *orig_cmd, int bufsize)
1488 struct scst_cmd *res;
1489 gfp_t gfp_mask = scst_cmd_atomic(orig_cmd) ? GFP_ATOMIC : GFP_KERNEL;
1493 res = scst_alloc_cmd(gfp_mask);
1497 res->cmd_lists = orig_cmd->cmd_lists;
1498 res->sess = orig_cmd->sess;
1499 res->atomic = scst_cmd_atomic(orig_cmd);
1501 res->tgtt = orig_cmd->tgtt;
1502 res->tgt = orig_cmd->tgt;
1503 res->dev = orig_cmd->dev;
1504 res->tgt_dev = orig_cmd->tgt_dev;
1505 res->lun = orig_cmd->lun;
1506 res->queue_type = SCST_CMD_QUEUE_HEAD_OF_QUEUE;
1507 res->data_direction = SCST_DATA_UNKNOWN;
1508 res->orig_cmd = orig_cmd;
1509 res->bufflen = bufsize;
1511 scst_sess_get(res->sess);
1512 if (res->tgt_dev != NULL)
1515 res->state = SCST_CMD_STATE_PRE_PARSE;
1518 TRACE_EXIT_HRES((unsigned long)res);
1522 int scst_prepare_request_sense(struct scst_cmd *orig_cmd)
1525 static const uint8_t request_sense[6] =
1526 { REQUEST_SENSE, 0, 0, 0, SCST_SENSE_BUFFERSIZE, 0 };
1527 struct scst_cmd *rs_cmd;
1531 if (orig_cmd->sense != NULL) {
1532 TRACE_MEM("Releasing sense %p (orig_cmd %p)",
1533 orig_cmd->sense, orig_cmd);
1534 mempool_free(orig_cmd->sense, scst_sense_mempool);
1535 orig_cmd->sense = NULL;
1538 rs_cmd = scst_create_prepare_internal_cmd(orig_cmd,
1539 SCST_SENSE_BUFFERSIZE);
1543 memcpy(rs_cmd->cdb, request_sense, sizeof(request_sense));
1544 rs_cmd->cdb_len = sizeof(request_sense);
1545 rs_cmd->data_direction = SCST_DATA_READ;
1546 rs_cmd->expected_data_direction = rs_cmd->data_direction;
1547 rs_cmd->expected_transfer_len = SCST_SENSE_BUFFERSIZE;
1548 rs_cmd->expected_values_set = 1;
1550 TRACE(TRACE_MGMT_MINOR, "Adding REQUEST SENSE cmd %p to head of active "
1551 "cmd list", rs_cmd);
1552 spin_lock_irq(&rs_cmd->cmd_lists->cmd_list_lock);
1553 list_add(&rs_cmd->cmd_list_entry, &rs_cmd->cmd_lists->active_cmd_list);
1554 wake_up(&rs_cmd->cmd_lists->cmd_list_waitQ);
1555 spin_unlock_irq(&rs_cmd->cmd_lists->cmd_list_lock);
1558 TRACE_EXIT_RES(res);
1566 static void scst_complete_request_sense(struct scst_cmd *req_cmd)
1568 struct scst_cmd *orig_cmd = req_cmd->orig_cmd;
1574 sBUG_ON(orig_cmd == NULL);
1576 len = scst_get_buf_first(req_cmd, &buf);
1578 if (scsi_status_is_good(req_cmd->status) && (len > 0) &&
1579 SCST_SENSE_VALID(buf) && (!SCST_NO_SENSE(buf))) {
1580 PRINT_BUFF_FLAG(TRACE_SCSI, "REQUEST SENSE returned",
1582 scst_alloc_set_sense(orig_cmd, scst_cmd_atomic(req_cmd), buf,
1585 PRINT_ERROR("%s", "Unable to get the sense via "
1586 "REQUEST SENSE, returning HARDWARE ERROR");
1587 scst_set_cmd_error(orig_cmd,
1588 SCST_LOAD_SENSE(scst_sense_hardw_error));
1592 scst_put_buf(req_cmd, buf);
1594 TRACE(TRACE_MGMT_MINOR, "Adding orig cmd %p to head of active "
1595 "cmd list", orig_cmd);
1596 spin_lock_irq(&orig_cmd->cmd_lists->cmd_list_lock);
1597 list_add(&orig_cmd->cmd_list_entry, &orig_cmd->cmd_lists->active_cmd_list);
1598 wake_up(&orig_cmd->cmd_lists->cmd_list_waitQ);
1599 spin_unlock_irq(&orig_cmd->cmd_lists->cmd_list_lock);
1605 int scst_finish_internal_cmd(struct scst_cmd *cmd)
1611 sBUG_ON(!cmd->internal);
1613 if (cmd->cdb[0] == REQUEST_SENSE)
1614 scst_complete_request_sense(cmd);
1616 __scst_cmd_put(cmd);
1618 res = SCST_CMD_STATE_RES_CONT_NEXT;
1620 TRACE_EXIT_HRES(res);
1624 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
1625 static void scst_req_done(struct scsi_cmnd *scsi_cmd)
1627 struct scsi_request *req;
1631 if (scsi_cmd && (req = scsi_cmd->sc_request)) {
1633 if (req->sr_bufflen)
1634 kfree(req->sr_buffer);
1635 scsi_release_request(req);
1643 static void scst_send_release(struct scst_device *dev)
1645 struct scsi_request *req;
1646 struct scsi_device *scsi_dev;
1651 if (dev->scsi_dev == NULL)
1654 scsi_dev = dev->scsi_dev;
1656 req = scsi_allocate_request(scsi_dev, GFP_KERNEL);
1658 PRINT_ERROR("Allocation of scsi_request failed: unable "
1659 "to RELEASE device %d:%d:%d:%d",
1660 scsi_dev->host->host_no, scsi_dev->channel,
1661 scsi_dev->id, scsi_dev->lun);
1665 memset(cdb, 0, sizeof(cdb));
1667 cdb[1] = (scsi_dev->scsi_level <= SCSI_2) ?
1668 ((scsi_dev->lun << 5) & 0xe0) : 0;
1669 memcpy(req->sr_cmnd, cdb, sizeof(cdb));
1670 req->sr_cmd_len = sizeof(cdb);
1671 req->sr_data_direction = SCST_DATA_NONE;
1673 req->sr_bufflen = 0;
1674 req->sr_buffer = NULL;
1675 req->sr_request->rq_disk = dev->rq_disk;
1676 req->sr_sense_buffer[0] = 0;
1678 TRACE(TRACE_DEBUG | TRACE_SCSI, "Sending RELEASE req %p to SCSI "
1680 scst_do_req(req, req->sr_cmnd, (void *)req->sr_buffer, req->sr_bufflen,
1681 scst_req_done, 15, 3);
1687 #else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18) */
1688 static void scst_send_release(struct scst_device *dev)
1690 struct scsi_device *scsi_dev;
1691 unsigned char cdb[6];
1692 uint8_t sense[SCSI_SENSE_BUFFERSIZE];
1697 if (dev->scsi_dev == NULL)
1700 scsi_dev = dev->scsi_dev;
1702 for (i = 0; i < 5; i++) {
1703 memset(cdb, 0, sizeof(cdb));
1705 cdb[1] = (scsi_dev->scsi_level <= SCSI_2) ?
1706 ((scsi_dev->lun << 5) & 0xe0) : 0;
1708 memset(sense, 0, sizeof(sense));
1710 TRACE(TRACE_DEBUG | TRACE_SCSI, "%s", "Sending RELEASE req to "
1712 rc = scsi_execute(scsi_dev, cdb, SCST_DATA_NONE, NULL, 0,
1714 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)
1718 TRACE_DBG("MODE_SENSE done: %x", rc);
1720 if (scsi_status_is_good(rc)) {
1723 PRINT_ERROR("RELEASE failed: %d", rc);
1724 PRINT_BUFFER("RELEASE sense", sense, sizeof(sense));
1725 scst_check_internal_sense(dev, rc, sense,
1734 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18) */
1736 /* scst_mutex supposed to be held */
1737 static void scst_clear_reservation(struct scst_tgt_dev *tgt_dev)
1739 struct scst_device *dev = tgt_dev->dev;
1744 spin_lock_bh(&dev->dev_lock);
1745 if (dev->dev_reserved &&
1746 !test_bit(SCST_TGT_DEV_RESERVED, &tgt_dev->tgt_dev_flags)) {
1747 /* This is one who holds the reservation */
1748 struct scst_tgt_dev *tgt_dev_tmp;
1749 list_for_each_entry(tgt_dev_tmp, &dev->dev_tgt_dev_list,
1750 dev_tgt_dev_list_entry) {
1751 clear_bit(SCST_TGT_DEV_RESERVED,
1752 &tgt_dev_tmp->tgt_dev_flags);
1754 dev->dev_reserved = 0;
1757 spin_unlock_bh(&dev->dev_lock);
1760 scst_send_release(dev);
1766 struct scst_session *scst_alloc_session(struct scst_tgt *tgt, gfp_t gfp_mask,
1767 const char *initiator_name)
1769 struct scst_session *sess;
1776 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
1777 sess = kmem_cache_alloc(scst_sess_cachep, gfp_mask);
1779 sess = kmem_cache_zalloc(scst_sess_cachep, gfp_mask);
1782 TRACE(TRACE_OUT_OF_MEM, "%s",
1783 "Allocation of scst_session failed");
1786 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
1787 memset(sess, 0, sizeof(*sess));
1790 sess->init_phase = SCST_SESS_IPH_INITING;
1791 sess->shut_phase = SCST_SESS_SPH_READY;
1792 atomic_set(&sess->refcnt, 0);
1793 for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
1794 struct list_head *sess_tgt_dev_list_head =
1795 &sess->sess_tgt_dev_list_hash[i];
1796 INIT_LIST_HEAD(sess_tgt_dev_list_head);
1798 spin_lock_init(&sess->sess_list_lock);
1799 INIT_LIST_HEAD(&sess->search_cmd_list);
1801 INIT_LIST_HEAD(&sess->init_deferred_cmd_list);
1802 INIT_LIST_HEAD(&sess->init_deferred_mcmd_list);
1804 #ifdef CONFIG_SCST_MEASURE_LATENCY
1805 spin_lock_init(&sess->meas_lock);
1808 len = strlen(initiator_name);
1809 nm = kmalloc(len + 1, gfp_mask);
1811 PRINT_ERROR("%s", "Unable to allocate sess->initiator_name");
1815 strcpy(nm, initiator_name);
1816 sess->initiator_name = nm;
1823 kmem_cache_free(scst_sess_cachep, sess);
1828 void scst_free_session(struct scst_session *sess)
1832 mutex_lock(&scst_mutex);
1834 TRACE_DBG("Removing sess %p from the list", sess);
1835 list_del(&sess->sess_list_entry);
1836 TRACE_DBG("Removing session %p from acg %s", sess, sess->acg->acg_name);
1837 list_del(&sess->acg_sess_list_entry);
1839 scst_sess_free_tgt_devs(sess);
1841 wake_up_all(&sess->tgt->unreg_waitQ);
1843 mutex_unlock(&scst_mutex);
1845 kfree(sess->initiator_name);
1846 kmem_cache_free(scst_sess_cachep, sess);
1852 void scst_free_session_callback(struct scst_session *sess)
1854 struct completion *c;
1858 TRACE_DBG("Freeing session %p", sess);
1860 c = sess->shutdown_compl;
1862 if (sess->unreg_done_fn) {
1863 TRACE_DBG("Calling unreg_done_fn(%p)", sess);
1864 sess->unreg_done_fn(sess);
1865 TRACE_DBG("%s", "unreg_done_fn() returned");
1867 scst_free_session(sess);
1876 void scst_sched_session_free(struct scst_session *sess)
1878 unsigned long flags;
1882 if (sess->shut_phase != SCST_SESS_SPH_SHUTDOWN) {
1883 PRINT_CRIT_ERROR("session %p is going to shutdown with unknown "
1884 "shut phase %lx", sess, sess->shut_phase);
1888 spin_lock_irqsave(&scst_mgmt_lock, flags);
1889 TRACE_DBG("Adding sess %p to scst_sess_shut_list", sess);
1890 list_add_tail(&sess->sess_shut_list_entry, &scst_sess_shut_list);
1891 spin_unlock_irqrestore(&scst_mgmt_lock, flags);
1893 wake_up(&scst_mgmt_waitQ);
1899 void scst_cmd_get(struct scst_cmd *cmd)
1901 __scst_cmd_get(cmd);
1903 EXPORT_SYMBOL(scst_cmd_get);
1905 void scst_cmd_put(struct scst_cmd *cmd)
1907 __scst_cmd_put(cmd);
1909 EXPORT_SYMBOL(scst_cmd_put);
1911 struct scst_cmd *scst_alloc_cmd(gfp_t gfp_mask)
1913 struct scst_cmd *cmd;
1917 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
1918 cmd = kmem_cache_alloc(scst_cmd_cachep, gfp_mask);
1920 cmd = kmem_cache_zalloc(scst_cmd_cachep, gfp_mask);
1923 TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of scst_cmd failed");
1926 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
1927 memset(cmd, 0, sizeof(*cmd));
1930 cmd->state = SCST_CMD_STATE_INIT_WAIT;
1931 cmd->start_time = jiffies;
1932 atomic_set(&cmd->cmd_ref, 1);
1933 cmd->cmd_lists = &scst_main_cmd_lists;
1934 INIT_LIST_HEAD(&cmd->mgmt_cmd_list);
1935 cmd->queue_type = SCST_CMD_QUEUE_SIMPLE;
1936 cmd->timeout = SCST_DEFAULT_TIMEOUT;
1939 cmd->is_send_status = 1;
1940 cmd->resp_data_len = -1;
1942 cmd->dbl_ua_orig_data_direction = SCST_DATA_UNKNOWN;
1943 cmd->dbl_ua_orig_resp_data_len = -1;
1950 static void scst_destroy_put_cmd(struct scst_cmd *cmd)
1952 scst_sess_put(cmd->sess);
1955 * At this point tgt_dev can be dead, but the pointer remains non-NULL
1957 if (likely(cmd->tgt_dev != NULL))
1960 scst_destroy_cmd(cmd);
1964 /* No locks supposed to be held */
1965 void scst_free_cmd(struct scst_cmd *cmd)
1971 TRACE_DBG("Freeing cmd %p (tag %llu)",
1972 cmd, (long long unsigned int)cmd->tag);
1974 if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
1975 TRACE_MGMT_DBG("Freeing aborted cmd %p (scst_cmd_count %d)",
1976 cmd, atomic_read(&scst_cmd_count));
1979 sBUG_ON(cmd->inc_blocking || cmd->needs_unblocking ||
1980 cmd->dec_on_dev_needed);
1982 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
1983 #if defined(CONFIG_SCST_EXTRACHECKS)
1984 if (cmd->scsi_req) {
1985 PRINT_ERROR("%s: %s", __func__, "Cmd with unfreed "
1987 scst_release_request(cmd);
1993 * Target driver can already free sg buffer before calling
1994 * scst_tgt_cmd_done(). E.g., scst_local has to do that.
1996 if (!cmd->tgt_data_buf_alloced)
1997 scst_check_restore_sg_buff(cmd);
1999 if (cmd->tgtt->on_free_cmd != NULL) {
2000 TRACE_DBG("Calling target's on_free_cmd(%p)", cmd);
2001 cmd->tgtt->on_free_cmd(cmd);
2002 TRACE_DBG("%s", "Target's on_free_cmd() returned");
2005 if (likely(cmd->dev != NULL)) {
2006 struct scst_dev_type *handler = cmd->dev->handler;
2007 if (handler->on_free_cmd != NULL) {
2008 TRACE_DBG("Calling dev handler %s on_free_cmd(%p)",
2009 handler->name, cmd);
2010 handler->on_free_cmd(cmd);
2011 TRACE_DBG("Dev handler %s on_free_cmd() returned",
2016 scst_release_space(cmd);
2018 if (unlikely(cmd->sense != NULL)) {
2019 TRACE_MEM("Releasing sense %p (cmd %p)", cmd->sense, cmd);
2020 mempool_free(cmd->sense, scst_sense_mempool);
2024 if (likely(cmd->tgt_dev != NULL)) {
2025 #ifdef CONFIG_SCST_EXTRACHECKS
2026 if (unlikely(!cmd->sent_for_exec) && !cmd->internal) {
2027 PRINT_ERROR("Finishing not executed cmd %p (opcode "
2028 "%d, target %s, lun %lld, sn %ld, expected_sn %ld)",
2029 cmd, cmd->cdb[0], cmd->tgtt->name,
2030 (long long unsigned int)cmd->lun,
2031 cmd->sn, cmd->tgt_dev->expected_sn);
2032 scst_unblock_deferred(cmd->tgt_dev, cmd);
2036 if (unlikely(cmd->out_of_sn)) {
2037 TRACE_SN("Out of SN cmd %p (tag %llu, sn %ld), "
2039 (long long unsigned int)cmd->tag,
2041 destroy = test_and_set_bit(SCST_CMD_CAN_BE_DESTROYED,
2046 if (likely(destroy))
2047 scst_destroy_put_cmd(cmd);
2053 /* No locks supposed to be held. */
2054 void scst_check_retries(struct scst_tgt *tgt)
2056 int need_wake_up = 0;
2061 * We don't worry about overflow of finished_cmds, because we check
2062 * only for its change.
2064 atomic_inc(&tgt->finished_cmds);
2065 /* See comment in scst_queue_retry_cmd() */
2066 smp_mb__after_atomic_inc();
2067 if (unlikely(tgt->retry_cmds > 0)) {
2068 struct scst_cmd *c, *tc;
2069 unsigned long flags;
2071 TRACE_RETRY("Checking retry cmd list (retry_cmds %d)",
2074 spin_lock_irqsave(&tgt->tgt_lock, flags);
2075 list_for_each_entry_safe(c, tc, &tgt->retry_cmd_list,
2079 TRACE_RETRY("Moving retry cmd %p to head of active "
2080 "cmd list (retry_cmds left %d)",
2081 c, tgt->retry_cmds);
2082 spin_lock(&c->cmd_lists->cmd_list_lock);
2083 list_move(&c->cmd_list_entry,
2084 &c->cmd_lists->active_cmd_list);
2085 wake_up(&c->cmd_lists->cmd_list_waitQ);
2086 spin_unlock(&c->cmd_lists->cmd_list_lock);
2089 if (need_wake_up >= 2) /* "slow start" */
2092 spin_unlock_irqrestore(&tgt->tgt_lock, flags);
2099 void scst_tgt_retry_timer_fn(unsigned long arg)
2101 struct scst_tgt *tgt = (struct scst_tgt *)arg;
2102 unsigned long flags;
2104 TRACE_RETRY("Retry timer expired (retry_cmds %d)", tgt->retry_cmds);
2106 spin_lock_irqsave(&tgt->tgt_lock, flags);
2107 tgt->retry_timer_active = 0;
2108 spin_unlock_irqrestore(&tgt->tgt_lock, flags);
2110 scst_check_retries(tgt);
2116 struct scst_mgmt_cmd *scst_alloc_mgmt_cmd(gfp_t gfp_mask)
2118 struct scst_mgmt_cmd *mcmd;
2122 mcmd = mempool_alloc(scst_mgmt_mempool, gfp_mask);
2124 PRINT_CRIT_ERROR("%s", "Allocation of management command "
2125 "failed, some commands and their data could leak");
2128 memset(mcmd, 0, sizeof(*mcmd));
2135 void scst_free_mgmt_cmd(struct scst_mgmt_cmd *mcmd)
2137 unsigned long flags;
2141 spin_lock_irqsave(&mcmd->sess->sess_list_lock, flags);
2142 atomic_dec(&mcmd->sess->sess_cmd_count);
2143 spin_unlock_irqrestore(&mcmd->sess->sess_list_lock, flags);
2145 scst_sess_put(mcmd->sess);
2147 if (mcmd->mcmd_tgt_dev != NULL)
2150 mempool_free(mcmd, scst_mgmt_mempool);
2156 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
2157 int scst_alloc_request(struct scst_cmd *cmd)
2160 struct scsi_request *req;
2161 int gm = scst_cmd_atomic(cmd) ? GFP_ATOMIC : GFP_KERNEL;
2165 /* cmd->dev->scsi_dev must be non-NULL here */
2166 req = scsi_allocate_request(cmd->dev->scsi_dev, gm);
2168 TRACE(TRACE_OUT_OF_MEM, "%s",
2169 "Allocation of scsi_request failed");
2174 cmd->scsi_req = req;
2176 memcpy(req->sr_cmnd, cmd->cdb, cmd->cdb_len);
2177 req->sr_cmd_len = cmd->cdb_len;
2178 req->sr_data_direction = cmd->data_direction;
2179 req->sr_use_sg = cmd->sg_cnt;
2180 req->sr_bufflen = cmd->bufflen;
2181 req->sr_buffer = cmd->sg;
2182 req->sr_request->rq_disk = cmd->dev->rq_disk;
2183 req->sr_sense_buffer[0] = 0;
2185 cmd->scsi_req->upper_private_data = cmd;
2192 void scst_release_request(struct scst_cmd *cmd)
2194 scsi_release_request(cmd->scsi_req);
2195 cmd->scsi_req = NULL;
2199 int scst_alloc_space(struct scst_cmd *cmd)
2203 int atomic = scst_cmd_atomic(cmd);
2205 struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
2210 gfp_mask = tgt_dev->gfp_mask | (atomic ? GFP_ATOMIC : GFP_KERNEL);
2212 flags = atomic ? SCST_POOL_NO_ALLOC_ON_CACHE_MISS : 0;
2214 flags |= SCST_POOL_ALLOC_NO_CACHED;
2216 cmd->sg = sgv_pool_alloc(tgt_dev->pool, cmd->bufflen, gfp_mask, flags,
2217 &cmd->sg_cnt, &cmd->sgv, &cmd->dev->dev_mem_lim, NULL);
2218 if (cmd->sg == NULL)
2221 if (unlikely(cmd->sg_cnt > tgt_dev->max_sg_cnt)) {
2223 PRINT_INFO("Unable to complete command due to "
2224 "SG IO count limitation (requested %d, "
2225 "available %d, tgt lim %d)", cmd->sg_cnt,
2226 tgt_dev->max_sg_cnt, cmd->tgt->sg_tablesize);
2232 if (cmd->data_direction != SCST_DATA_BIDI)
2235 cmd->in_sg = sgv_pool_alloc(tgt_dev->pool, cmd->in_bufflen, gfp_mask,
2236 flags, &cmd->in_sg_cnt, &cmd->in_sgv,
2237 &cmd->dev->dev_mem_lim, NULL);
2238 if (cmd->in_sg == NULL)
2241 if (unlikely(cmd->in_sg_cnt > tgt_dev->max_sg_cnt)) {
2243 PRINT_INFO("Unable to complete command due to "
2244 "SG IO count limitation (IN buffer, requested "
2245 "%d, available %d, tgt lim %d)", cmd->in_sg_cnt,
2246 tgt_dev->max_sg_cnt, cmd->tgt->sg_tablesize);
2249 goto out_in_sg_free;
2260 sgv_pool_free(cmd->in_sgv, &cmd->dev->dev_mem_lim);
2266 sgv_pool_free(cmd->sgv, &cmd->dev->dev_mem_lim);
2273 static void scst_release_space(struct scst_cmd *cmd)
2277 if (cmd->sgv == NULL)
2280 if (cmd->tgt_data_buf_alloced || cmd->dh_data_buf_alloced) {
2281 TRACE_MEM("%s", "*data_buf_alloced set, returning");
2285 sgv_pool_free(cmd->sgv, &cmd->dev->dev_mem_lim);
2292 if (cmd->in_sgv != NULL) {
2293 sgv_pool_free(cmd->in_sgv, &cmd->dev->dev_mem_lim);
2297 cmd->in_bufflen = 0;
2305 void scst_copy_sg(struct scst_cmd *cmd, enum scst_sg_copy_dir copy_dir)
2307 struct scatterlist *src_sg, *dst_sg;
2308 unsigned int src_sg_cnt, src_len, dst_len, src_offs, dst_offs;
2309 struct page *src, *dst;
2310 unsigned int s, d, to_copy;
2314 if (copy_dir == SCST_SG_COPY_FROM_TARGET) {
2315 if (cmd->data_direction != SCST_DATA_BIDI) {
2316 src_sg = cmd->tgt_sg;
2317 src_sg_cnt = cmd->tgt_sg_cnt;
2319 to_copy = cmd->bufflen;
2321 TRACE_MEM("BIDI cmd %p", cmd);
2322 src_sg = cmd->tgt_in_sg;
2323 src_sg_cnt = cmd->tgt_in_sg_cnt;
2324 dst_sg = cmd->in_sg;
2325 to_copy = cmd->in_bufflen;
2329 src_sg_cnt = cmd->sg_cnt;
2330 dst_sg = cmd->tgt_sg;
2331 to_copy = cmd->resp_data_len;
2334 TRACE_MEM("cmd %p, copy_dir %d, src_sg %p, src_sg_cnt %d, dst_sg %p, "
2335 "to_copy %d", cmd, copy_dir, src_sg, src_sg_cnt, dst_sg,
2338 dst = sg_page(dst_sg);
2339 dst_len = dst_sg->length;
2340 dst_offs = dst_sg->offset;
2345 while (s < src_sg_cnt) {
2346 src = sg_page(&src_sg[s]);
2347 src_len = src_sg[s].length;
2348 src_offs += src_sg[s].offset;
2354 * Himem pages are not allowed here, see the
2355 * corresponding #warning in scst_main.c. Correct
2356 * your target driver or dev handler to not alloc
2359 EXTRACHECKS_BUG_ON(PageHighMem(dst) ||
2362 TRACE_MEM("cmd %p, to_copy %d, src %p, src_len %d, "
2363 "src_offs %d, dst %p, dst_len %d, dst_offs %d",
2364 cmd, to_copy, src, src_len, src_offs, dst,
2367 if ((src_offs == 0) && (dst_offs == 0) &&
2368 (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE)) {
2369 copy_page(page_address(dst), page_address(src));
2372 n = min(PAGE_SIZE - dst_offs,
2373 PAGE_SIZE - src_offs);
2374 n = min(n, src_len);
2375 n = min(n, dst_len);
2376 memcpy(page_address(dst) + dst_offs,
2377 page_address(src) + src_offs, n);
2378 dst_offs -= min(n, dst_offs);
2379 src_offs -= min(n, src_offs);
2382 TRACE_MEM("cmd %p, n %d, s %d", cmd, n, s);
2392 dst = sg_page(&dst_sg[d]);
2393 dst_len = dst_sg[d].length;
2394 dst_offs += dst_sg[d].offset;
2396 } while (src_len > 0);
2406 static const int SCST_CDB_LENGTH[8] = { 6, 10, 10, -1, 16, 12, -1, -1 };
2408 #define SCST_CDB_GROUP(opcode) ((opcode >> 5) & 0x7)
2409 #define SCST_GET_CDB_LEN(opcode) SCST_CDB_LENGTH[SCST_CDB_GROUP(opcode)]
2411 int scst_get_cdb_len(const uint8_t *cdb)
2413 return SCST_GET_CDB_LEN(cdb[0]);
2416 /* get_trans_len_x extract x bytes from cdb as length starting from off */
2418 /* for special commands */
2419 static int get_trans_len_block_limit(struct scst_cmd *cmd, uint8_t off)
2425 static int get_trans_len_read_capacity(struct scst_cmd *cmd, uint8_t off)
2427 cmd->bufflen = READ_CAP_LEN;
2431 static int get_trans_len_single(struct scst_cmd *cmd, uint8_t off)
2437 static int get_trans_len_read_pos(struct scst_cmd *cmd, uint8_t off)
2439 uint8_t *p = (uint8_t *)cmd->cdb + off;
2443 cmd->bufflen |= ((u32)p[0]) << 8;
2444 cmd->bufflen |= ((u32)p[1]);
2446 switch (cmd->cdb[1] & 0x1f) {
2450 if (cmd->bufflen != 0) {
2451 PRINT_ERROR("READ POSITION: Invalid non-zero (%d) "
2452 "allocation length for service action %x",
2453 cmd->bufflen, cmd->cdb[1] & 0x1f);
2459 switch (cmd->cdb[1] & 0x1f) {
2468 cmd->bufflen = max(28, cmd->bufflen);
2471 PRINT_ERROR("READ POSITION: Invalid service action %x",
2472 cmd->cdb[1] & 0x1f);
2480 scst_set_cmd_error(cmd,
2481 SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
2486 static int get_trans_len_1(struct scst_cmd *cmd, uint8_t off)
2488 cmd->bufflen = (u32)cmd->cdb[off];
2492 static int get_trans_len_1_256(struct scst_cmd *cmd, uint8_t off)
2494 cmd->bufflen = (u32)cmd->cdb[off];
2495 if (cmd->bufflen == 0)
2500 static int get_trans_len_2(struct scst_cmd *cmd, uint8_t off)
2502 const uint8_t *p = cmd->cdb + off;
2505 cmd->bufflen |= ((u32)p[0]) << 8;
2506 cmd->bufflen |= ((u32)p[1]);
2511 static int get_trans_len_3(struct scst_cmd *cmd, uint8_t off)
2513 const uint8_t *p = cmd->cdb + off;
2516 cmd->bufflen |= ((u32)p[0]) << 16;
2517 cmd->bufflen |= ((u32)p[1]) << 8;
2518 cmd->bufflen |= ((u32)p[2]);
2523 static int get_trans_len_4(struct scst_cmd *cmd, uint8_t off)
2525 const uint8_t *p = cmd->cdb + off;
2528 cmd->bufflen |= ((u32)p[0]) << 24;
2529 cmd->bufflen |= ((u32)p[1]) << 16;
2530 cmd->bufflen |= ((u32)p[2]) << 8;
2531 cmd->bufflen |= ((u32)p[3]);
2536 static int get_trans_len_none(struct scst_cmd *cmd, uint8_t off)
2542 int scst_get_cdb_info(struct scst_cmd *cmd)
2544 int dev_type = cmd->dev->handler->type;
2547 const struct scst_sdbops *ptr = NULL;
2551 op = cmd->cdb[0]; /* get clear opcode */
2553 TRACE_DBG("opcode=%02x, cdblen=%d bytes, tblsize=%d, "
2554 "dev_type=%d", op, SCST_GET_CDB_LEN(op), SCST_CDB_TBL_SIZE,
2557 i = scst_scsi_op_list[op];
2558 while (i < SCST_CDB_TBL_SIZE && scst_scsi_op_table[i].ops == op) {
2559 if (scst_scsi_op_table[i].devkey[dev_type] != SCST_CDB_NOTSUPP) {
2560 ptr = &scst_scsi_op_table[i];
2561 TRACE_DBG("op = 0x%02x+'%c%c%c%c%c%c%c%c%c%c'+<%s>",
2562 ptr->ops, ptr->devkey[0], /* disk */
2563 ptr->devkey[1], /* tape */
2564 ptr->devkey[2], /* printer */
2565 ptr->devkey[3], /* cpu */
2566 ptr->devkey[4], /* cdr */
2567 ptr->devkey[5], /* cdrom */
2568 ptr->devkey[6], /* scanner */
2569 ptr->devkey[7], /* worm */
2570 ptr->devkey[8], /* changer */
2571 ptr->devkey[9], /* commdev */
2573 TRACE_DBG("direction=%d flags=%d off=%d",
2583 /* opcode not found or now not used !!! */
2584 TRACE(TRACE_SCSI, "Unknown opcode 0x%x for type %d", op,
2587 cmd->op_flags = SCST_INFO_INVALID;
2591 cmd->cdb_len = SCST_GET_CDB_LEN(op);
2592 cmd->op_name = ptr->op_name;
2593 cmd->data_direction = ptr->direction;
2594 cmd->op_flags = ptr->flags;
2595 res = (*ptr->get_trans_len)(cmd, ptr->off);
2601 EXPORT_SYMBOL(scst_get_cdb_info);
2603 /* Packs SCST LUN back to SCSI form using peripheral device addressing method */
2604 uint64_t scst_pack_lun(const uint64_t lun)
2607 uint16_t *p = (uint16_t *)&res;
2610 *p = cpu_to_be16(*p);
2612 TRACE_EXIT_HRES((unsigned long)res);
2617 * Routine to extract a lun number from an 8-byte LUN structure
2618 * in network byte order (BE).
2619 * (see SAM-2, Section 4.12.3 page 40)
2620 * Supports 2 types of lun unpacking: peripheral and logical unit.
2622 uint64_t scst_unpack_lun(const uint8_t *lun, int len)
2624 uint64_t res = NO_SUCH_LUN;
2629 TRACE_BUFF_FLAG(TRACE_DEBUG, "Raw LUN", lun, len);
2631 if (unlikely(len < 2)) {
2632 PRINT_ERROR("Illegal lun length %d, expected 2 bytes or "
2640 if ((*((uint64_t *)lun) &
2641 __constant_cpu_to_be64(0x0000FFFFFFFFFFFFLL)) != 0)
2645 if (*((uint16_t *)&lun[2]) != 0)
2649 if (*((uint32_t *)&lun[2]) != 0)
2657 address_method = (*lun) >> 6; /* high 2 bits of byte 0 */
2658 switch (address_method) {
2659 case 0: /* peripheral device addressing method */
2662 PRINT_ERROR("Illegal BUS INDENTIFIER in LUN "
2663 "peripheral device addressing method 0x%02x, "
2664 "expected 0", *lun);
2671 * Looks like it's legal to use it as flat space addressing
2678 case 1: /* flat space addressing method */
2679 res = *(lun + 1) | (((*lun) & 0x3f) << 8);
2682 case 2: /* logical unit addressing method */
2684 PRINT_ERROR("Illegal BUS NUMBER in LUN logical unit "
2685 "addressing method 0x%02x, expected 0",
2689 if (*(lun + 1) & 0xe0) {
2690 PRINT_ERROR("Illegal TARGET in LUN logical unit "
2691 "addressing method 0x%02x, expected 0",
2692 (*(lun + 1) & 0xf8) >> 5);
2695 res = *(lun + 1) & 0x1f;
2698 case 3: /* extended logical unit addressing method */
2700 PRINT_ERROR("Unimplemented LUN addressing method %u",
2706 TRACE_EXIT_RES((int)res);
2710 PRINT_ERROR("%s", "Multi-level LUN unimplemented");
2714 int scst_calc_block_shift(int sector_size)
2716 int block_shift = 0;
2719 if (sector_size == 0)
2729 if (block_shift < 9) {
2730 PRINT_ERROR("Wrong sector size %d", sector_size);
2734 TRACE_EXIT_RES(block_shift);
2737 EXPORT_SYMBOL(scst_calc_block_shift);
2739 int scst_sbc_generic_parse(struct scst_cmd *cmd,
2740 int (*get_block_shift)(struct scst_cmd *cmd))
2747 * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
2748 * therefore change them only if necessary
2751 TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
2752 cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
2754 switch (cmd->cdb[0]) {
2755 case SERVICE_ACTION_IN:
2756 if ((cmd->cdb[1] & 0x1f) == SAI_READ_CAPACITY_16) {
2757 cmd->bufflen = READ_CAP16_LEN;
2758 cmd->data_direction = SCST_DATA_READ;
2765 if ((cmd->cdb[1] & BYTCHK) == 0) {
2766 cmd->data_len = cmd->bufflen << get_block_shift(cmd);
2777 if (cmd->op_flags & SCST_TRANSFER_LEN_TYPE_FIXED) {
2779 * No need for locks here, since *_detach() can not be
2780 * called, when there are existing commands.
2782 cmd->bufflen = cmd->bufflen << get_block_shift(cmd);
2786 if ((cmd->op_flags & (SCST_SMALL_TIMEOUT | SCST_LONG_TIMEOUT)) == 0)
2787 cmd->timeout = SCST_GENERIC_DISK_REG_TIMEOUT;
2788 else if (cmd->op_flags & SCST_SMALL_TIMEOUT)
2789 cmd->timeout = SCST_GENERIC_DISK_SMALL_TIMEOUT;
2790 else if (cmd->op_flags & SCST_LONG_TIMEOUT)
2791 cmd->timeout = SCST_GENERIC_DISK_LONG_TIMEOUT;
2793 TRACE_DBG("res %d, bufflen %d, data_len %d, direct %d",
2794 res, cmd->bufflen, cmd->data_len, cmd->data_direction);
2796 TRACE_EXIT_RES(res);
2799 EXPORT_SYMBOL(scst_sbc_generic_parse);
2801 int scst_cdrom_generic_parse(struct scst_cmd *cmd,
2802 int (*get_block_shift)(struct scst_cmd *cmd))
2809 * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
2810 * therefore change them only if necessary
2813 TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
2814 cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
2816 cmd->cdb[1] &= 0x1f;
2818 switch (cmd->cdb[0]) {
2823 if ((cmd->cdb[1] & BYTCHK) == 0) {
2824 cmd->data_len = cmd->bufflen << get_block_shift(cmd);
2834 if (cmd->op_flags & SCST_TRANSFER_LEN_TYPE_FIXED)
2835 cmd->bufflen = cmd->bufflen << get_block_shift(cmd);
2838 if ((cmd->op_flags & (SCST_SMALL_TIMEOUT | SCST_LONG_TIMEOUT)) == 0)
2839 cmd->timeout = SCST_GENERIC_CDROM_REG_TIMEOUT;
2840 else if (cmd->op_flags & SCST_SMALL_TIMEOUT)
2841 cmd->timeout = SCST_GENERIC_CDROM_SMALL_TIMEOUT;
2842 else if (cmd->op_flags & SCST_LONG_TIMEOUT)
2843 cmd->timeout = SCST_GENERIC_CDROM_LONG_TIMEOUT;
2845 TRACE_DBG("res=%d, bufflen=%d, direct=%d", res, cmd->bufflen,
2846 cmd->data_direction);
2851 EXPORT_SYMBOL(scst_cdrom_generic_parse);
2853 int scst_modisk_generic_parse(struct scst_cmd *cmd,
2854 int (*get_block_shift)(struct scst_cmd *cmd))
2861 * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
2862 * therefore change them only if necessary
2865 TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
2866 cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
2868 cmd->cdb[1] &= 0x1f;
2870 switch (cmd->cdb[0]) {
2875 if ((cmd->cdb[1] & BYTCHK) == 0) {
2876 cmd->data_len = cmd->bufflen << get_block_shift(cmd);
2886 if (cmd->op_flags & SCST_TRANSFER_LEN_TYPE_FIXED)
2887 cmd->bufflen = cmd->bufflen << get_block_shift(cmd);
2890 if ((cmd->op_flags & (SCST_SMALL_TIMEOUT | SCST_LONG_TIMEOUT)) == 0)
2891 cmd->timeout = SCST_GENERIC_MODISK_REG_TIMEOUT;
2892 else if (cmd->op_flags & SCST_SMALL_TIMEOUT)
2893 cmd->timeout = SCST_GENERIC_MODISK_SMALL_TIMEOUT;
2894 else if (cmd->op_flags & SCST_LONG_TIMEOUT)
2895 cmd->timeout = SCST_GENERIC_MODISK_LONG_TIMEOUT;
2897 TRACE_DBG("res=%d, bufflen=%d, direct=%d", res, cmd->bufflen,
2898 cmd->data_direction);
2900 TRACE_EXIT_RES(res);
2903 EXPORT_SYMBOL(scst_modisk_generic_parse);
2905 int scst_tape_generic_parse(struct scst_cmd *cmd,
2906 int (*get_block_size)(struct scst_cmd *cmd))
2913 * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
2914 * therefore change them only if necessary
2917 TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
2918 cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
2920 if (cmd->cdb[0] == READ_POSITION) {
2921 int tclp = cmd->cdb[1] & 4;
2922 int long_bit = cmd->cdb[1] & 2;
2923 int bt = cmd->cdb[1] & 1;
2925 if ((tclp == long_bit) && (!bt || !long_bit)) {
2927 tclp ? POSITION_LEN_LONG : POSITION_LEN_SHORT;
2928 cmd->data_direction = SCST_DATA_READ;
2931 cmd->data_direction = SCST_DATA_NONE;
2935 if (cmd->op_flags & SCST_TRANSFER_LEN_TYPE_FIXED & cmd->cdb[1])
2936 cmd->bufflen = cmd->bufflen * get_block_size(cmd);
2938 if ((cmd->op_flags & (SCST_SMALL_TIMEOUT | SCST_LONG_TIMEOUT)) == 0)
2939 cmd->timeout = SCST_GENERIC_TAPE_REG_TIMEOUT;
2940 else if (cmd->op_flags & SCST_SMALL_TIMEOUT)
2941 cmd->timeout = SCST_GENERIC_TAPE_SMALL_TIMEOUT;
2942 else if (cmd->op_flags & SCST_LONG_TIMEOUT)
2943 cmd->timeout = SCST_GENERIC_TAPE_LONG_TIMEOUT;
2945 TRACE_EXIT_RES(res);
2948 EXPORT_SYMBOL(scst_tape_generic_parse);
2950 static int scst_null_parse(struct scst_cmd *cmd)
2957 * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
2958 * therefore change them only if necessary
2961 TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
2962 cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
2964 switch (cmd->cdb[0]) {
2970 TRACE_DBG("res %d bufflen %d direct %d",
2971 res, cmd->bufflen, cmd->data_direction);
2977 int scst_changer_generic_parse(struct scst_cmd *cmd,
2978 int (*nothing)(struct scst_cmd *cmd))
2980 int res = scst_null_parse(cmd);
2982 if (cmd->op_flags & SCST_LONG_TIMEOUT)
2983 cmd->timeout = SCST_GENERIC_CHANGER_LONG_TIMEOUT;
2985 cmd->timeout = SCST_GENERIC_CHANGER_TIMEOUT;
2989 EXPORT_SYMBOL(scst_changer_generic_parse);
2991 int scst_processor_generic_parse(struct scst_cmd *cmd,
2992 int (*nothing)(struct scst_cmd *cmd))
2994 int res = scst_null_parse(cmd);
2996 if (cmd->op_flags & SCST_LONG_TIMEOUT)
2997 cmd->timeout = SCST_GENERIC_PROCESSOR_LONG_TIMEOUT;
2999 cmd->timeout = SCST_GENERIC_PROCESSOR_TIMEOUT;
3003 EXPORT_SYMBOL(scst_processor_generic_parse);
3005 int scst_raid_generic_parse(struct scst_cmd *cmd,
3006 int (*nothing)(struct scst_cmd *cmd))
3008 int res = scst_null_parse(cmd);
3010 if (cmd->op_flags & SCST_LONG_TIMEOUT)
3011 cmd->timeout = SCST_GENERIC_RAID_LONG_TIMEOUT;
3013 cmd->timeout = SCST_GENERIC_RAID_TIMEOUT;
3017 EXPORT_SYMBOL(scst_raid_generic_parse);
3019 int scst_block_generic_dev_done(struct scst_cmd *cmd,
3020 void (*set_block_shift)(struct scst_cmd *cmd, int block_shift))
3022 int opcode = cmd->cdb[0];
3023 int status = cmd->status;
3024 int res = SCST_CMD_STATE_DEFAULT;
3029 * SCST sets good defaults for cmd->is_send_status and
3030 * cmd->resp_data_len based on cmd->status and cmd->data_direction,
3031 * therefore change them only if necessary
3034 if ((status == SAM_STAT_GOOD) || (status == SAM_STAT_CONDITION_MET)) {
3038 /* Always keep track of disk capacity */
3039 int buffer_size, sector_size, sh;
3042 buffer_size = scst_get_buf_first(cmd, &buffer);
3043 if (unlikely(buffer_size <= 0)) {
3044 if (buffer_size < 0) {
3045 PRINT_ERROR("%s: Unable to get the"
3046 " buffer (%d)", __func__, buffer_size);
3052 ((buffer[4] << 24) | (buffer[5] << 16) |
3053 (buffer[6] << 8) | (buffer[7] << 0));
3054 scst_put_buf(cmd, buffer);
3055 if (sector_size != 0)
3056 sh = scst_calc_block_shift(sector_size);
3059 set_block_shift(cmd, sh);
3060 TRACE_DBG("block_shift %d", sh);
3069 TRACE_DBG("cmd->is_send_status=%x, cmd->resp_data_len=%d, "
3070 "res=%d", cmd->is_send_status, cmd->resp_data_len, res);
3073 TRACE_EXIT_RES(res);
3076 EXPORT_SYMBOL(scst_block_generic_dev_done);
3078 int scst_tape_generic_dev_done(struct scst_cmd *cmd,
3079 void (*set_block_size)(struct scst_cmd *cmd, int block_shift))
3081 int opcode = cmd->cdb[0];
3082 int res = SCST_CMD_STATE_DEFAULT;
3083 int buffer_size, bs;
3084 uint8_t *buffer = NULL;
3089 * SCST sets good defaults for cmd->is_send_status and
3090 * cmd->resp_data_len based on cmd->status and cmd->data_direction,
3091 * therefore change them only if necessary
3097 buffer_size = scst_get_buf_first(cmd, &buffer);
3098 if (unlikely(buffer_size <= 0)) {
3099 if (buffer_size < 0) {
3100 PRINT_ERROR("%s: Unable to get the buffer (%d)",
3101 __func__, buffer_size);
3110 TRACE_DBG("%s", "MODE_SENSE");
3111 if ((cmd->cdb[2] & 0xC0) == 0) {
3112 if (buffer[3] == 8) {
3113 bs = (buffer[9] << 16) |
3114 (buffer[10] << 8) | buffer[11];
3115 set_block_size(cmd, bs);
3120 TRACE_DBG("%s", "MODE_SELECT");
3121 if (buffer[3] == 8) {
3122 bs = (buffer[9] << 16) | (buffer[10] << 8) |
3124 set_block_size(cmd, bs);
3135 scst_put_buf(cmd, buffer);
3140 TRACE_EXIT_RES(res);
3143 EXPORT_SYMBOL(scst_tape_generic_dev_done);
3145 static void scst_check_internal_sense(struct scst_device *dev, int result,
3146 uint8_t *sense, int sense_len)
3150 if (host_byte(result) == DID_RESET) {
3151 TRACE(TRACE_MGMT_MINOR, "%s", "DID_RESET received, triggering "
3153 scst_set_sense(sense, sense_len, dev->d_sense,
3154 SCST_LOAD_SENSE(scst_sense_reset_UA));
3155 scst_dev_check_set_UA(dev, NULL, sense, sense_len);
3156 } else if ((status_byte(result) == CHECK_CONDITION) &&
3157 SCST_SENSE_VALID(sense) && scst_is_ua_sense(sense))
3158 scst_dev_check_set_UA(dev, NULL, sense, sense_len);
3164 enum dma_data_direction scst_to_dma_dir(int scst_dir)
3166 static const enum dma_data_direction tr_tbl[] = { DMA_NONE,
3167 DMA_TO_DEVICE, DMA_FROM_DEVICE, DMA_BIDIRECTIONAL, DMA_NONE };
3169 return tr_tbl[scst_dir];
3171 EXPORT_SYMBOL(scst_to_dma_dir);
3173 enum dma_data_direction scst_to_tgt_dma_dir(int scst_dir)
3175 static const enum dma_data_direction tr_tbl[] = { DMA_NONE,
3176 DMA_FROM_DEVICE, DMA_TO_DEVICE, DMA_BIDIRECTIONAL, DMA_NONE };
3178 return tr_tbl[scst_dir];
3180 EXPORT_SYMBOL(scst_to_tgt_dma_dir);
3182 int scst_obtain_device_parameters(struct scst_device *dev)
3186 uint8_t buffer[4+0x0A];
3187 uint8_t sense_buffer[SCSI_SENSE_BUFFERSIZE];
3191 EXTRACHECKS_BUG_ON(dev->scsi_dev == NULL);
3193 for (i = 0; i < 5; i++) {
3194 /* Get control mode page */
3195 memset(cmd, 0, sizeof(cmd));
3196 cmd[0] = MODE_SENSE;
3197 cmd[1] = 8; /* DBD */
3199 cmd[4] = sizeof(buffer);
3201 memset(buffer, 0, sizeof(buffer));
3202 memset(sense_buffer, 0, sizeof(sense_buffer));
3204 TRACE(TRACE_SCSI, "%s", "Doing internal MODE_SENSE");
3205 res = scsi_execute(dev->scsi_dev, cmd, SCST_DATA_READ, buffer,
3206 sizeof(buffer), sense_buffer, 15, 0, 0
3207 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)
3212 TRACE_DBG("MODE_SENSE done: %x", res);
3214 if (scsi_status_is_good(res)) {
3217 PRINT_BUFF_FLAG(TRACE_SCSI,
3218 "Returned control mode page data",
3219 buffer, sizeof(buffer));
3221 dev->tst = buffer[4+2] >> 5;
3222 q = buffer[4+3] >> 4;
3223 if (q > SCST_CONTR_MODE_QUEUE_ALG_UNRESTRICTED_REORDER) {
3224 PRINT_ERROR("Too big QUEUE ALG %x, dev "
3225 "%d:%d:%d:%d", dev->queue_alg,
3226 dev->scsi_dev->host->host_no,
3227 dev->scsi_dev->channel,
3228 dev->scsi_dev->id, dev->scsi_dev->lun);
3231 dev->swp = (buffer[4+4] & 0x8) >> 3;
3232 dev->tas = (buffer[4+5] & 0x40) >> 6;
3233 dev->d_sense = (buffer[4+2] & 0x4) >> 2;
3236 * Unfortunately, SCSI ML doesn't provide a way to
3237 * specify commands task attribute, so we can rely on
3238 * device's restricted reordering only.
3240 dev->has_own_order_mgmt = !dev->queue_alg;
3242 TRACE(TRACE_SCSI|TRACE_MGMT_MINOR,
3243 "Device %d:%d:%d:%d: TST %x, "
3244 "QUEUE ALG %x, SWP %x, TAS %x, D_SENSE %d"
3245 "has_own_order_mgmt %d",
3246 dev->scsi_dev->host->host_no,
3247 dev->scsi_dev->channel, dev->scsi_dev->id,
3248 dev->scsi_dev->lun, dev->tst, dev->queue_alg,
3249 dev->swp, dev->tas, dev->d_sense,
3250 dev->has_own_order_mgmt);
3255 if ((status_byte(res) == CHECK_CONDITION) &&
3256 SCST_SENSE_VALID(sense_buffer)) {
3259 * 3ware controller is buggy and returns CONDITION_GOOD
3260 * instead of CHECK_CONDITION
3262 if (SCST_SENSE_VALID(sense_buffer)) {
3264 if (scst_analyze_sense(sense_buffer,
3265 sizeof(sense_buffer),
3266 SCST_SENSE_KEY_VALID,
3267 ILLEGAL_REQUEST, 0, 0)) {
3268 TRACE(TRACE_SCSI|TRACE_MGMT_MINOR,
3269 "Device %d:%d:%d:%d doesn't "
3270 "support control mode page, "
3271 "using defaults: TST %x, "
3272 "QUEUE ALG %x, SWP %x, "
3273 "TAS %x, D_SENSE %d, "
3274 "has_own_order_mgmt %d ",
3275 dev->scsi_dev->host->host_no,
3276 dev->scsi_dev->channel,
3279 dev->tst, dev->queue_alg,
3282 dev->has_own_order_mgmt);
3285 } else if (scst_analyze_sense(sense_buffer,
3286 sizeof(sense_buffer),
3287 SCST_SENSE_KEY_VALID,
3290 "Device %d:%d:%d:%d not ready",
3291 dev->scsi_dev->host->host_no,
3292 dev->scsi_dev->channel,
3294 dev->scsi_dev->lun);
3299 TRACE(TRACE_SCSI|TRACE_MGMT_MINOR,
3300 "Internal MODE SENSE to "
3301 "device %d:%d:%d:%d failed: %x",
3302 dev->scsi_dev->host->host_no,
3303 dev->scsi_dev->channel,
3305 dev->scsi_dev->lun, res);
3306 PRINT_BUFF_FLAG(TRACE_SCSI|TRACE_MGMT_MINOR,
3308 sense_buffer, sizeof(sense_buffer));
3310 scst_check_internal_sense(dev, res, sense_buffer,
3311 sizeof(sense_buffer));
3317 TRACE_EXIT_RES(res);
3320 EXPORT_SYMBOL(scst_obtain_device_parameters);
3322 /* Called under dev_lock and BH off */
3323 void scst_process_reset(struct scst_device *dev,
3324 struct scst_session *originator, struct scst_cmd *exclude_cmd,
3325 struct scst_mgmt_cmd *mcmd, bool setUA)
3327 struct scst_tgt_dev *tgt_dev;
3328 struct scst_cmd *cmd, *tcmd;
3332 /* Clear RESERVE'ation, if necessary */
3333 if (dev->dev_reserved) {
3334 list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
3335 dev_tgt_dev_list_entry) {
3336 TRACE(TRACE_MGMT_MINOR, "Clearing RESERVE'ation for "
3338 (long long unsigned int)tgt_dev->lun);
3339 clear_bit(SCST_TGT_DEV_RESERVED,
3340 &tgt_dev->tgt_dev_flags);
3342 dev->dev_reserved = 0;
3344 * There is no need to send RELEASE, since the device is going
3345 * to be resetted. Actually, since we can be in RESET TM
3346 * function, it might be dangerous.
3350 dev->dev_double_ua_possible = 1;
3352 list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
3353 dev_tgt_dev_list_entry) {
3354 struct scst_session *sess = tgt_dev->sess;
3356 spin_lock_bh(&tgt_dev->tgt_dev_lock);
3357 scst_free_all_UA(tgt_dev);
3358 spin_unlock_bh(&tgt_dev->tgt_dev_lock);
3360 spin_lock_irq(&sess->sess_list_lock);
3362 TRACE_DBG("Searching in search cmd list (sess=%p)", sess);
3363 list_for_each_entry(cmd, &sess->search_cmd_list,
3364 search_cmd_list_entry) {
3365 if (cmd == exclude_cmd)
3367 if ((cmd->tgt_dev == tgt_dev) ||
3368 ((cmd->tgt_dev == NULL) &&
3369 (cmd->lun == tgt_dev->lun))) {
3370 scst_abort_cmd(cmd, mcmd,
3371 (tgt_dev->sess != originator), 0);
3374 spin_unlock_irq(&sess->sess_list_lock);
3377 list_for_each_entry_safe(cmd, tcmd, &dev->blocked_cmd_list,
3378 blocked_cmd_list_entry) {
3379 if (test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)) {
3380 list_del(&cmd->blocked_cmd_list_entry);
3381 TRACE_MGMT_DBG("Adding aborted blocked cmd %p "
3382 "to active cmd list", cmd);
3383 spin_lock_irq(&cmd->cmd_lists->cmd_list_lock);
3384 list_add_tail(&cmd->cmd_list_entry,
3385 &cmd->cmd_lists->active_cmd_list);
3386 wake_up(&cmd->cmd_lists->cmd_list_waitQ);
3387 spin_unlock_irq(&cmd->cmd_lists->cmd_list_lock);
3392 uint8_t sense_buffer[SCST_STANDARD_SENSE_LEN];
3393 scst_set_sense(sense_buffer, sizeof(sense_buffer),
3394 dev->d_sense, SCST_LOAD_SENSE(scst_sense_reset_UA));
3395 scst_dev_check_set_local_UA(dev, exclude_cmd, sense_buffer,
3396 sizeof(sense_buffer));
3403 int scst_set_pending_UA(struct scst_cmd *cmd)
3406 struct scst_tgt_dev_UA *UA_entry;
3407 bool first = true, global_unlock = false;
3408 struct scst_session *sess = cmd->sess;
3412 TRACE(TRACE_MGMT_MINOR, "Setting pending UA cmd %p", cmd);
3414 spin_lock_bh(&cmd->tgt_dev->tgt_dev_lock);
3417 /* UA list could be cleared behind us, so retest */
3418 if (list_empty(&cmd->tgt_dev->UA_list)) {
3420 "SCST_TGT_DEV_UA_PENDING set, but UA_list empty");
3425 UA_entry = list_entry(cmd->tgt_dev->UA_list.next, typeof(*UA_entry),
3428 TRACE_DBG("next %p UA_entry %p",
3429 cmd->tgt_dev->UA_list.next, UA_entry);
3431 if (UA_entry->global_UA && first) {
3432 TRACE_MGMT_DBG("Global UA %p detected", UA_entry);
3434 spin_unlock_bh(&cmd->tgt_dev->tgt_dev_lock);
3436 mutex_lock(&scst_mutex);
3438 for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
3439 struct list_head *sess_tgt_dev_list_head =
3440 &sess->sess_tgt_dev_list_hash[i];
3441 struct scst_tgt_dev *tgt_dev;
3442 list_for_each_entry(tgt_dev, sess_tgt_dev_list_head,
3443 sess_tgt_dev_list_entry) {
3444 spin_lock_bh(&tgt_dev->tgt_dev_lock);
3449 global_unlock = true;
3453 scst_set_cmd_error_sense(cmd, UA_entry->UA_sense_buffer,
3454 sizeof(UA_entry->UA_sense_buffer));
3458 list_del(&UA_entry->UA_list_entry);
3460 if (UA_entry->global_UA) {
3461 for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
3462 struct list_head *sess_tgt_dev_list_head =
3463 &sess->sess_tgt_dev_list_hash[i];
3464 struct scst_tgt_dev *tgt_dev;
3466 list_for_each_entry(tgt_dev, sess_tgt_dev_list_head,
3467 sess_tgt_dev_list_entry) {
3468 struct scst_tgt_dev_UA *ua;
3469 list_for_each_entry(ua, &tgt_dev->UA_list,
3471 if (ua->global_UA &&
3472 memcmp(ua->UA_sense_buffer,
3473 UA_entry->UA_sense_buffer,
3474 sizeof(ua->UA_sense_buffer)) == 0) {
3475 TRACE_MGMT_DBG("Freeing not "
3476 "needed global UA %p",
3478 list_del(&ua->UA_list_entry);
3479 mempool_free(ua, scst_ua_mempool);
3487 mempool_free(UA_entry, scst_ua_mempool);
3489 if (list_empty(&cmd->tgt_dev->UA_list)) {
3490 clear_bit(SCST_TGT_DEV_UA_PENDING,
3491 &cmd->tgt_dev->tgt_dev_flags);
3495 if (global_unlock) {
3496 for (i = TGT_DEV_HASH_SIZE-1; i >= 0; i--) {
3497 struct list_head *sess_tgt_dev_list_head =
3498 &sess->sess_tgt_dev_list_hash[i];
3499 struct scst_tgt_dev *tgt_dev;
3500 list_for_each_entry_reverse(tgt_dev, sess_tgt_dev_list_head,
3501 sess_tgt_dev_list_entry) {
3502 spin_unlock_bh(&tgt_dev->tgt_dev_lock);
3506 mutex_unlock(&scst_mutex);
3508 spin_lock_bh(&cmd->tgt_dev->tgt_dev_lock);
3511 spin_unlock_bh(&cmd->tgt_dev->tgt_dev_lock);
3513 TRACE_EXIT_RES(res);
3517 /* Called under tgt_dev_lock and BH off */
3518 static void scst_alloc_set_UA(struct scst_tgt_dev *tgt_dev,
3519 const uint8_t *sense, int sense_len, int flags)
3521 struct scst_tgt_dev_UA *UA_entry = NULL;
3525 UA_entry = mempool_alloc(scst_ua_mempool, GFP_ATOMIC);
3526 if (UA_entry == NULL) {
3527 PRINT_CRIT_ERROR("%s", "UNIT ATTENTION memory "
3528 "allocation failed. The UNIT ATTENTION "
3529 "on some sessions will be missed");
3530 PRINT_BUFFER("Lost UA", sense, sense_len);
3533 memset(UA_entry, 0, sizeof(*UA_entry));
3535 UA_entry->global_UA = (flags & SCST_SET_UA_FLAG_GLOBAL) != 0;
3536 if (UA_entry->global_UA)
3537 TRACE_MGMT_DBG("Queuing global UA %p", UA_entry);
3539 if (sense_len > (int)sizeof(UA_entry->UA_sense_buffer))
3540 sense_len = sizeof(UA_entry->UA_sense_buffer);
3541 memcpy(UA_entry->UA_sense_buffer, sense, sense_len);
3543 set_bit(SCST_TGT_DEV_UA_PENDING, &tgt_dev->tgt_dev_flags);
3545 TRACE_MGMT_DBG("Adding new UA to tgt_dev %p", tgt_dev);
3547 if (flags & SCST_SET_UA_FLAG_AT_HEAD)
3548 list_add(&UA_entry->UA_list_entry, &tgt_dev->UA_list);
3550 list_add_tail(&UA_entry->UA_list_entry, &tgt_dev->UA_list);
3557 /* tgt_dev_lock supposed to be held and BH off */
3558 static void __scst_check_set_UA(struct scst_tgt_dev *tgt_dev,
3559 const uint8_t *sense, int sense_len, int flags)
3562 struct scst_tgt_dev_UA *UA_entry_tmp;
3563 int len = min((int)sizeof(UA_entry_tmp->UA_sense_buffer), sense_len);
3567 list_for_each_entry(UA_entry_tmp, &tgt_dev->UA_list,
3569 if (memcmp(sense, UA_entry_tmp->UA_sense_buffer, len) == 0) {
3570 TRACE_MGMT_DBG("%s", "UA already exists");
3577 scst_alloc_set_UA(tgt_dev, sense, len, flags);
3583 void scst_check_set_UA(struct scst_tgt_dev *tgt_dev,
3584 const uint8_t *sense, int sense_len, int flags)
3588 spin_lock_bh(&tgt_dev->tgt_dev_lock);
3589 __scst_check_set_UA(tgt_dev, sense, sense_len, flags);
3590 spin_unlock_bh(&tgt_dev->tgt_dev_lock);
3596 /* Called under dev_lock and BH off */
3597 void scst_dev_check_set_local_UA(struct scst_device *dev,
3598 struct scst_cmd *exclude, const uint8_t *sense, int sense_len)
3600 struct scst_tgt_dev *tgt_dev, *exclude_tgt_dev = NULL;
3604 if (exclude != NULL)
3605 exclude_tgt_dev = exclude->tgt_dev;
3607 list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
3608 dev_tgt_dev_list_entry) {
3609 if (tgt_dev != exclude_tgt_dev)
3610 scst_check_set_UA(tgt_dev, sense, sense_len, 0);
3617 /* Called under dev_lock and BH off */
3618 void __scst_dev_check_set_UA(struct scst_device *dev,
3619 struct scst_cmd *exclude, const uint8_t *sense, int sense_len)
3623 TRACE(TRACE_MGMT_MINOR, "Processing UA dev %p", dev);
3625 /* Check for reset UA */
3626 if (scst_analyze_sense(sense, sense_len, SCST_SENSE_ASC_VALID,
3627 0, SCST_SENSE_ASC_UA_RESET, 0))
3628 scst_process_reset(dev,
3629 (exclude != NULL) ? exclude->sess : NULL,
3630 exclude, NULL, false);
3632 scst_dev_check_set_local_UA(dev, exclude, sense, sense_len);
3638 /* Called under tgt_dev_lock or when tgt_dev is unused */
3639 static void scst_free_all_UA(struct scst_tgt_dev *tgt_dev)
3641 struct scst_tgt_dev_UA *UA_entry, *t;
3645 list_for_each_entry_safe(UA_entry, t,
3646 &tgt_dev->UA_list, UA_list_entry) {
3647 TRACE_MGMT_DBG("Clearing UA for tgt_dev lun %lld",
3648 (long long unsigned int)tgt_dev->lun);
3649 list_del(&UA_entry->UA_list_entry);
3650 mempool_free(UA_entry, scst_ua_mempool);
3652 INIT_LIST_HEAD(&tgt_dev->UA_list);
3653 clear_bit(SCST_TGT_DEV_UA_PENDING, &tgt_dev->tgt_dev_flags);
3660 struct scst_cmd *__scst_check_deferred_commands(struct scst_tgt_dev *tgt_dev)
3662 struct scst_cmd *res = NULL, *cmd, *t;
3663 typeof(tgt_dev->expected_sn) expected_sn = tgt_dev->expected_sn;
3665 spin_lock_irq(&tgt_dev->sn_lock);
3667 if (unlikely(tgt_dev->hq_cmd_count != 0))
3671 list_for_each_entry_safe(cmd, t, &tgt_dev->deferred_cmd_list,
3672 sn_cmd_list_entry) {
3673 EXTRACHECKS_BUG_ON(cmd->queue_type ==
3674 SCST_CMD_QUEUE_HEAD_OF_QUEUE);
3675 if (cmd->sn == expected_sn) {
3676 TRACE_SN("Deferred command %p (sn %ld, set %d) found",
3677 cmd, cmd->sn, cmd->sn_set);
3678 tgt_dev->def_cmd_count--;
3679 list_del(&cmd->sn_cmd_list_entry);
3683 spin_lock(&cmd->cmd_lists->cmd_list_lock);
3684 TRACE_SN("Adding cmd %p to active cmd list",
3686 list_add_tail(&cmd->cmd_list_entry,
3687 &cmd->cmd_lists->active_cmd_list);
3688 wake_up(&cmd->cmd_lists->cmd_list_waitQ);
3689 spin_unlock(&cmd->cmd_lists->cmd_list_lock);
3696 list_for_each_entry(cmd, &tgt_dev->skipped_sn_list,
3697 sn_cmd_list_entry) {
3698 EXTRACHECKS_BUG_ON(cmd->queue_type ==
3699 SCST_CMD_QUEUE_HEAD_OF_QUEUE);
3700 if (cmd->sn == expected_sn) {
3701 atomic_t *slot = cmd->sn_slot;
3703 * !! At this point any pointer in cmd, except !!
3704 * !! sn_slot and sn_cmd_list_entry, could be !!
3705 * !! already destroyed !!
3707 TRACE_SN("cmd %p (tag %llu) with skipped sn %ld found",
3709 (long long unsigned int)cmd->tag,
3711 tgt_dev->def_cmd_count--;
3712 list_del(&cmd->sn_cmd_list_entry);
3713 spin_unlock_irq(&tgt_dev->sn_lock);
3714 if (test_and_set_bit(SCST_CMD_CAN_BE_DESTROYED,
3716 scst_destroy_put_cmd(cmd);
3717 scst_inc_expected_sn(tgt_dev, slot);
3718 expected_sn = tgt_dev->expected_sn;
3719 spin_lock_irq(&tgt_dev->sn_lock);
3725 spin_unlock_irq(&tgt_dev->sn_lock);
3729 void scst_add_thr_data(struct scst_tgt_dev *tgt_dev,
3730 struct scst_thr_data_hdr *data,
3731 void (*free_fn) (struct&nb