4 * Copyright (C) 2004 - 2008 Vladislav Bolkhovitin <vst@vlnb.net>
5 * Copyright (C) 2004 - 2005 Leonid Stoljar
6 * Copyright (C) 2007 - 2008 CMS Distribution Limited
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation, version 2
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
19 #include <linux/init.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/list.h>
23 #include <linux/spinlock.h>
24 #include <linux/slab.h>
25 #include <linux/sched.h>
26 #include <linux/kthread.h>
27 #include <linux/cdrom.h>
28 #include <linux/unistd.h>
29 #include <linux/string.h>
32 #include <linux/highmem.h>
36 #include "scst_priv.h"
39 #include "scst_cdbprobe.h"
41 static void scst_free_tgt_dev(struct scst_tgt_dev *tgt_dev);
42 static void scst_check_internal_sense(struct scst_device *dev, int result,
43 uint8_t *sense, int sense_len);
45 int scst_alloc_sense(struct scst_cmd *cmd, int atomic)
48 unsigned long gfp_mask = atomic ? GFP_ATOMIC : (GFP_KERNEL|__GFP_NOFAIL);
52 sBUG_ON(cmd->sense != NULL);
54 cmd->sense = mempool_alloc(scst_sense_mempool, gfp_mask);
55 if (cmd->sense == NULL) {
56 PRINT_CRIT_ERROR("Sense memory allocation failed (op %x). "
57 "The sense data will be lost!!", cmd->cdb[0]);
62 memset(cmd->sense, 0, SCST_SENSE_BUFFERSIZE);
68 EXPORT_SYMBOL(scst_alloc_sense);
70 int scst_alloc_set_sense(struct scst_cmd *cmd, int atomic,
71 const uint8_t *sense, unsigned int len)
77 res = scst_alloc_sense(cmd, atomic);
79 PRINT_BUFFER("Lost sense", sense, len);
83 memcpy(cmd->sense, sense, min((int)len, (int)SCST_SENSE_BUFFERSIZE));
84 TRACE_BUFFER("Sense set", cmd->sense, SCST_SENSE_BUFFERSIZE);
90 EXPORT_SYMBOL(scst_alloc_set_sense);
92 void scst_set_cmd_error_status(struct scst_cmd *cmd, int status)
97 cmd->host_status = DID_OK;
99 cmd->dbl_ua_orig_resp_data_len = cmd->resp_data_len;
100 cmd->dbl_ua_orig_data_direction = cmd->data_direction;
102 cmd->data_direction = SCST_DATA_NONE;
103 cmd->resp_data_len = 0;
104 cmd->is_send_status = 1;
111 EXPORT_SYMBOL(scst_set_cmd_error_status);
113 void scst_set_cmd_error(struct scst_cmd *cmd, int key, int asc, int ascq)
119 scst_set_cmd_error_status(cmd, SAM_STAT_CHECK_CONDITION);
121 rc = scst_alloc_sense(cmd, 1);
123 PRINT_ERROR("Lost sense data (key %x, asc %x, ascq %x)",
128 scst_set_sense(cmd->sense, SCST_SENSE_BUFFERSIZE, key, asc, ascq);
129 TRACE_BUFFER("Sense set", cmd->sense, SCST_SENSE_BUFFERSIZE);
135 EXPORT_SYMBOL(scst_set_cmd_error);
137 void scst_set_sense(uint8_t *buffer, int len, int key,
140 memset(buffer, 0, len);
141 buffer[0] = 0x70; /* Error Code */
142 buffer[2] = key; /* Sense Key */
143 buffer[7] = 0x0a; /* Additional Sense Length */
144 buffer[12] = asc; /* ASC */
145 buffer[13] = ascq; /* ASCQ */
146 TRACE_BUFFER("Sense set", buffer, len);
149 EXPORT_SYMBOL(scst_set_sense);
151 void scst_set_cmd_error_sense(struct scst_cmd *cmd, uint8_t *sense,
156 scst_set_cmd_error_status(cmd, SAM_STAT_CHECK_CONDITION);
157 scst_alloc_set_sense(cmd, 1, sense, len);
162 EXPORT_SYMBOL(scst_set_cmd_error_sense);
164 void scst_set_busy(struct scst_cmd *cmd)
166 int c = atomic_read(&cmd->sess->sess_cmd_count);
170 if ((c <= 1) || (cmd->sess->init_phase != SCST_SESS_IPH_READY)) {
171 scst_set_cmd_error_status(cmd, SAM_STAT_BUSY);
172 TRACE(TRACE_MGMT_MINOR, "Sending BUSY status to initiator %s "
173 "(cmds count %d, queue_type %x, sess->init_phase %d)",
174 cmd->sess->initiator_name, c,
175 cmd->queue_type, cmd->sess->init_phase);
177 scst_set_cmd_error_status(cmd, SAM_STAT_TASK_SET_FULL);
178 TRACE(TRACE_MGMT_MINOR, "Sending QUEUE_FULL status to "
179 "initiator %s (cmds count %d, queue_type %x, "
180 "sess->init_phase %d)", cmd->sess->initiator_name, c,
181 cmd->queue_type, cmd->sess->init_phase);
187 EXPORT_SYMBOL(scst_set_busy);
189 int scst_get_cmd_abnormal_done_state(const struct scst_cmd *cmd)
196 case SCST_CMD_STATE_INIT_WAIT:
197 case SCST_CMD_STATE_INIT:
198 case SCST_CMD_STATE_PRE_PARSE:
199 case SCST_CMD_STATE_DEV_PARSE:
200 res = SCST_CMD_STATE_PRE_XMIT_RESP;
204 res = SCST_CMD_STATE_PRE_DEV_DONE;
211 EXPORT_SYMBOL(scst_get_cmd_abnormal_done_state);
213 void scst_set_cmd_abnormal_done_state(struct scst_cmd *cmd)
219 case SCST_CMD_STATE_PRE_XMIT_RESP:
220 case SCST_CMD_STATE_XMIT_RESP:
221 case SCST_CMD_STATE_FINISHED:
222 case SCST_CMD_STATE_XMIT_WAIT:
223 PRINT_CRIT_ERROR("Wrong cmd state %x (cmd %p, op %x)",
224 cmd->state, cmd, cmd->cdb[0]);
229 cmd->state = scst_get_cmd_abnormal_done_state(cmd);
231 EXTRACHECKS_BUG_ON((cmd->state != SCST_CMD_STATE_PRE_XMIT_RESP) &&
232 (cmd->tgt_dev == NULL));
237 EXPORT_SYMBOL(scst_set_cmd_abnormal_done_state);
239 void scst_set_resp_data_len(struct scst_cmd *cmd, int resp_data_len)
245 scst_check_restore_sg_buff(cmd);
246 cmd->resp_data_len = resp_data_len;
248 if (resp_data_len == cmd->bufflen)
252 for (i = 0; i < cmd->sg_cnt; i++) {
253 l += cmd->sg[i].length;
254 if (l >= resp_data_len) {
255 int left = resp_data_len - (l - cmd->sg[i].length);
257 TRACE(TRACE_SG_OP|TRACE_MEMORY, "cmd %p (tag %llu), "
258 "resp_data_len %d, i %d, cmd->sg[i].length %d, "
260 cmd, (long long unsigned int)cmd->tag,
262 cmd->sg[i].length, left);
264 cmd->orig_sg_cnt = cmd->sg_cnt;
265 cmd->orig_sg_entry = i;
266 cmd->orig_entry_len = cmd->sg[i].length;
267 cmd->sg_cnt = (left > 0) ? i+1 : i;
268 cmd->sg[i].length = left;
269 cmd->sg_buff_modified = 1;
278 EXPORT_SYMBOL(scst_set_resp_data_len);
280 /* Called under scst_mutex and suspended activity */
281 int scst_alloc_device(int gfp_mask, struct scst_device **out_dev)
283 struct scst_device *dev;
285 static int dev_num; /* protected by scst_mutex */
289 dev = kzalloc(sizeof(*dev), gfp_mask);
291 TRACE(TRACE_OUT_OF_MEM, "%s",
292 "Allocation of scst_device failed");
297 dev->handler = &scst_null_devtype;
298 dev->p_cmd_lists = &scst_main_cmd_lists;
299 atomic_set(&dev->dev_cmd_count, 0);
300 atomic_set(&dev->write_cmd_count, 0);
301 scst_init_mem_lim(&dev->dev_mem_lim);
302 spin_lock_init(&dev->dev_lock);
303 atomic_set(&dev->on_dev_count, 0);
304 INIT_LIST_HEAD(&dev->blocked_cmd_list);
305 INIT_LIST_HEAD(&dev->dev_tgt_dev_list);
306 INIT_LIST_HEAD(&dev->dev_acg_dev_list);
307 INIT_LIST_HEAD(&dev->threads_list);
308 init_waitqueue_head(&dev->on_dev_waitQ);
309 dev->dev_double_ua_possible = 1;
310 dev->dev_serialized = 1;
311 dev->queue_alg = SCST_CONTR_MODE_QUEUE_ALG_UNRESTRICTED_REORDER;
312 dev->dev_num = dev_num++;
321 /* Called under scst_mutex and suspended activity */
322 void scst_free_device(struct scst_device *dev)
327 if (!list_empty(&dev->dev_tgt_dev_list) ||
328 !list_empty(&dev->dev_acg_dev_list)) {
329 PRINT_CRIT_ERROR("%s: dev_tgt_dev_list or dev_acg_dev_list "
330 "is not empty!", __func__);
341 void scst_init_mem_lim(struct scst_mem_lim *mem_lim)
343 atomic_set(&mem_lim->alloced_pages, 0);
344 mem_lim->max_allowed_pages =
345 ((uint64_t)scst_max_dev_cmd_mem << 10) >> (PAGE_SHIFT - 10);
347 EXPORT_SYMBOL(scst_init_mem_lim);
349 struct scst_acg_dev *scst_alloc_acg_dev(struct scst_acg *acg,
350 struct scst_device *dev, lun_t lun)
352 struct scst_acg_dev *res;
356 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
357 res = kmem_cache_alloc(scst_acgd_cachep, GFP_KERNEL);
359 res = kmem_cache_zalloc(scst_acgd_cachep, GFP_KERNEL);
362 TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of scst_acg_dev failed");
365 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
366 memset(res, 0, sizeof(*res));
374 TRACE_EXIT_HRES(res);
378 /* The activity supposed to be suspended and scst_mutex held */
379 void scst_free_acg_dev(struct scst_acg_dev *acg_dev)
383 TRACE_DBG("Removing acg_dev %p from acg_dev_list and dev_acg_dev_list",
385 list_del(&acg_dev->acg_dev_list_entry);
386 list_del(&acg_dev->dev_acg_dev_list_entry);
388 kmem_cache_free(scst_acgd_cachep, acg_dev);
394 /* The activity supposed to be suspended and scst_mutex held */
395 struct scst_acg *scst_alloc_add_acg(const char *acg_name)
397 struct scst_acg *acg;
401 acg = kzalloc(sizeof(*acg), GFP_KERNEL);
403 TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of acg failed");
407 INIT_LIST_HEAD(&acg->acg_dev_list);
408 INIT_LIST_HEAD(&acg->acg_sess_list);
409 INIT_LIST_HEAD(&acg->acn_list);
410 acg->acg_name = acg_name;
412 TRACE_DBG("Adding acg %s to scst_acg_list", acg_name);
413 list_add_tail(&acg->scst_acg_list_entry, &scst_acg_list);
416 TRACE_EXIT_HRES(acg);
420 /* The activity supposed to be suspended and scst_mutex held */
421 int scst_destroy_acg(struct scst_acg *acg)
423 struct scst_acn *n, *nn;
424 struct scst_acg_dev *acg_dev, *acg_dev_tmp;
429 if (!list_empty(&acg->acg_sess_list)) {
430 PRINT_ERROR("%s: acg_sess_list is not empty!", __func__);
435 TRACE_DBG("Removing acg %s from scst_acg_list", acg->acg_name);
436 list_del(&acg->scst_acg_list_entry);
438 /* Freeing acg_devs */
439 list_for_each_entry_safe(acg_dev, acg_dev_tmp, &acg->acg_dev_list,
440 acg_dev_list_entry) {
441 struct scst_tgt_dev *tgt_dev, *tt;
442 list_for_each_entry_safe(tgt_dev, tt,
443 &acg_dev->dev->dev_tgt_dev_list,
444 dev_tgt_dev_list_entry) {
445 if (tgt_dev->acg_dev == acg_dev)
446 scst_free_tgt_dev(tgt_dev);
448 scst_free_acg_dev(acg_dev);
452 list_for_each_entry_safe(n, nn, &acg->acn_list,
454 list_del(&n->acn_list_entry);
458 INIT_LIST_HEAD(&acg->acn_list);
466 /* scst_mutex supposed to be held, there must not be parallel activity in this sess */
467 static struct scst_tgt_dev *scst_alloc_add_tgt_dev(struct scst_session *sess,
468 struct scst_acg_dev *acg_dev)
470 int ini_sg, ini_unchecked_isa_dma, ini_use_clustering;
471 struct scst_tgt_dev *tgt_dev;
472 struct scst_device *dev = acg_dev->dev;
473 struct list_head *sess_tgt_dev_list_head;
474 struct scst_tgt_template *vtt = sess->tgt->tgtt;
479 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
480 tgt_dev = kmem_cache_alloc(scst_tgtd_cachep, GFP_KERNEL);
482 tgt_dev = kmem_cache_zalloc(scst_tgtd_cachep, GFP_KERNEL);
484 if (tgt_dev == NULL) {
485 TRACE(TRACE_OUT_OF_MEM, "%s",
486 "Allocation of scst_tgt_dev failed");
489 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
490 memset(tgt_dev, 0, sizeof(*tgt_dev));
494 tgt_dev->lun = acg_dev->lun;
495 tgt_dev->acg_dev = acg_dev;
496 tgt_dev->sess = sess;
497 atomic_set(&tgt_dev->tgt_dev_cmd_count, 0);
499 scst_sgv_pool_use_norm(tgt_dev);
501 if (dev->scsi_dev != NULL) {
502 ini_sg = dev->scsi_dev->host->sg_tablesize;
503 ini_unchecked_isa_dma = dev->scsi_dev->host->unchecked_isa_dma;
504 ini_use_clustering = (dev->scsi_dev->host->use_clustering ==
507 ini_sg = (1 << 15) /* infinite */;
508 ini_unchecked_isa_dma = 0;
509 ini_use_clustering = 0;
511 tgt_dev->max_sg_cnt = min(ini_sg, sess->tgt->sg_tablesize);
513 if ((sess->tgt->tgtt->use_clustering || ini_use_clustering) &&
514 !sess->tgt->tgtt->no_clustering)
515 scst_sgv_pool_use_norm_clust(tgt_dev);
517 if (sess->tgt->tgtt->unchecked_isa_dma || ini_unchecked_isa_dma) {
518 scst_sgv_pool_use_dma(tgt_dev);
521 scst_sgv_pool_use_highmem(tgt_dev);
525 if (dev->scsi_dev != NULL) {
526 TRACE_MGMT_DBG("host=%d, channel=%d, id=%d, lun=%d, "
527 "SCST lun=%Ld", dev->scsi_dev->host->host_no,
528 dev->scsi_dev->channel, dev->scsi_dev->id,
530 (long long unsigned int)tgt_dev->lun);
532 TRACE_MGMT_DBG("Virtual device %s on SCST lun=%Ld",
534 (long long unsigned int)tgt_dev->lun);
537 spin_lock_init(&tgt_dev->tgt_dev_lock);
538 INIT_LIST_HEAD(&tgt_dev->UA_list);
539 spin_lock_init(&tgt_dev->thr_data_lock);
540 INIT_LIST_HEAD(&tgt_dev->thr_data_list);
541 spin_lock_init(&tgt_dev->sn_lock);
542 INIT_LIST_HEAD(&tgt_dev->deferred_cmd_list);
543 INIT_LIST_HEAD(&tgt_dev->skipped_sn_list);
544 tgt_dev->expected_sn = 1;
545 tgt_dev->num_free_sn_slots = ARRAY_SIZE(tgt_dev->sn_slots)-1;
546 tgt_dev->cur_sn_slot = &tgt_dev->sn_slots[0];
547 for (i = 0; i < (int)ARRAY_SIZE(tgt_dev->sn_slots); i++)
548 atomic_set(&tgt_dev->sn_slots[i], 0);
550 if (dev->handler->parse_atomic &&
551 (sess->tgt->tgtt->preprocessing_done == NULL)) {
552 if (sess->tgt->tgtt->rdy_to_xfer_atomic ||
553 (sess->tgt->tgtt->rdy_to_xfer == NULL))
554 __set_bit(SCST_TGT_DEV_AFTER_INIT_WR_ATOMIC,
555 &tgt_dev->tgt_dev_flags);
556 if (dev->handler->exec_atomic || (dev->handler->exec == NULL))
557 __set_bit(SCST_TGT_DEV_AFTER_INIT_OTH_ATOMIC,
558 &tgt_dev->tgt_dev_flags);
560 if (dev->handler->exec_atomic || (dev->handler->exec == NULL)) {
561 if (sess->tgt->tgtt->rdy_to_xfer_atomic ||
562 (sess->tgt->tgtt->rdy_to_xfer == NULL))
563 __set_bit(SCST_TGT_DEV_AFTER_RESTART_WR_ATOMIC,
564 &tgt_dev->tgt_dev_flags);
565 __set_bit(SCST_TGT_DEV_AFTER_RESTART_OTH_ATOMIC,
566 &tgt_dev->tgt_dev_flags);
567 __set_bit(SCST_TGT_DEV_AFTER_RX_DATA_ATOMIC,
568 &tgt_dev->tgt_dev_flags);
570 if ((dev->handler->dev_done_atomic ||
571 (dev->handler->dev_done == NULL)) &&
572 sess->tgt->tgtt->xmit_response_atomic) {
573 __set_bit(SCST_TGT_DEV_AFTER_EXEC_ATOMIC,
574 &tgt_dev->tgt_dev_flags);
577 spin_lock_bh(&scst_temp_UA_lock);
578 scst_set_sense(scst_temp_UA, sizeof(scst_temp_UA),
579 SCST_LOAD_SENSE(scst_sense_reset_UA));
580 scst_alloc_set_UA(tgt_dev, scst_temp_UA, sizeof(scst_temp_UA), 0);
581 spin_unlock_bh(&scst_temp_UA_lock);
583 tm_dbg_init_tgt_dev(tgt_dev, acg_dev);
585 if (vtt->threads_num > 0) {
587 if (dev->handler->threads_num > 0)
588 rc = scst_add_dev_threads(dev, vtt->threads_num);
589 else if (dev->handler->threads_num == 0)
590 rc = scst_add_cmd_threads(vtt->threads_num);
595 if (dev->handler && dev->handler->attach_tgt) {
596 TRACE_DBG("Calling dev handler's attach_tgt(%p)",
598 rc = dev->handler->attach_tgt(tgt_dev);
599 TRACE_DBG("%s", "Dev handler's attach_tgt() returned");
601 PRINT_ERROR("Device handler's %s attach_tgt() "
602 "failed: %d", dev->handler->name, rc);
607 spin_lock_bh(&dev->dev_lock);
608 list_add_tail(&tgt_dev->dev_tgt_dev_list_entry, &dev->dev_tgt_dev_list);
609 if (dev->dev_reserved)
610 __set_bit(SCST_TGT_DEV_RESERVED, &tgt_dev->tgt_dev_flags);
611 spin_unlock_bh(&dev->dev_lock);
613 sess_tgt_dev_list_head =
614 &sess->sess_tgt_dev_list_hash[HASH_VAL(tgt_dev->lun)];
615 list_add_tail(&tgt_dev->sess_tgt_dev_list_entry, sess_tgt_dev_list_head);
622 if (vtt->threads_num > 0) {
623 if (dev->handler->threads_num > 0)
624 scst_del_dev_threads(dev, vtt->threads_num);
625 else if (dev->handler->threads_num == 0)
626 scst_del_cmd_threads(vtt->threads_num);
630 kmem_cache_free(scst_tgtd_cachep, tgt_dev);
635 static void scst_clear_reservation(struct scst_tgt_dev *tgt_dev);
637 /* No locks supposed to be held, scst_mutex - held */
638 void scst_nexus_loss(struct scst_tgt_dev *tgt_dev)
642 scst_clear_reservation(tgt_dev);
644 /* With activity suspended the lock isn't needed, but let's be safe */
645 spin_lock_bh(&tgt_dev->tgt_dev_lock);
646 scst_free_all_UA(tgt_dev);
647 spin_unlock_bh(&tgt_dev->tgt_dev_lock);
649 spin_lock_bh(&scst_temp_UA_lock);
650 scst_set_sense(scst_temp_UA, sizeof(scst_temp_UA),
651 SCST_LOAD_SENSE(scst_sense_nexus_loss_UA));
652 scst_check_set_UA(tgt_dev, scst_temp_UA, sizeof(scst_temp_UA), 0);
653 spin_unlock_bh(&scst_temp_UA_lock);
659 /* scst_mutex supposed to be held, there must not be parallel activity in this sess */
660 static void scst_free_tgt_dev(struct scst_tgt_dev *tgt_dev)
662 struct scst_device *dev = tgt_dev->dev;
663 struct scst_tgt_template *vtt = tgt_dev->sess->tgt->tgtt;
667 tm_dbg_deinit_tgt_dev(tgt_dev);
669 spin_lock_bh(&dev->dev_lock);
670 list_del(&tgt_dev->dev_tgt_dev_list_entry);
671 spin_unlock_bh(&dev->dev_lock);
673 list_del(&tgt_dev->sess_tgt_dev_list_entry);
675 scst_clear_reservation(tgt_dev);
676 scst_free_all_UA(tgt_dev);
678 if (dev->handler && dev->handler->detach_tgt) {
679 TRACE_DBG("Calling dev handler's detach_tgt(%p)",
681 dev->handler->detach_tgt(tgt_dev);
682 TRACE_DBG("%s", "Dev handler's detach_tgt() returned");
685 if (vtt->threads_num > 0) {
686 if (dev->handler->threads_num > 0)
687 scst_del_dev_threads(dev, vtt->threads_num);
688 else if (dev->handler->threads_num == 0)
689 scst_del_cmd_threads(vtt->threads_num);
692 kmem_cache_free(scst_tgtd_cachep, tgt_dev);
698 /* scst_mutex supposed to be held */
699 int scst_sess_alloc_tgt_devs(struct scst_session *sess)
702 struct scst_acg_dev *acg_dev;
703 struct scst_tgt_dev *tgt_dev;
707 list_for_each_entry(acg_dev, &sess->acg->acg_dev_list,
708 acg_dev_list_entry) {
709 tgt_dev = scst_alloc_add_tgt_dev(sess, acg_dev);
710 if (tgt_dev == NULL) {
721 scst_sess_free_tgt_devs(sess);
725 /* scst_mutex supposed to be held, there must not be parallel activity in this sess */
726 void scst_sess_free_tgt_devs(struct scst_session *sess)
729 struct scst_tgt_dev *tgt_dev, *t;
733 /* The session is going down, no users, so no locks */
734 for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
735 struct list_head *sess_tgt_dev_list_head =
736 &sess->sess_tgt_dev_list_hash[i];
737 list_for_each_entry_safe(tgt_dev, t, sess_tgt_dev_list_head,
738 sess_tgt_dev_list_entry) {
739 scst_free_tgt_dev(tgt_dev);
741 INIT_LIST_HEAD(sess_tgt_dev_list_head);
748 /* The activity supposed to be suspended and scst_mutex held */
749 int scst_acg_add_dev(struct scst_acg *acg, struct scst_device *dev, lun_t lun,
753 struct scst_acg_dev *acg_dev;
754 struct scst_tgt_dev *tgt_dev;
755 struct scst_session *sess;
756 LIST_HEAD(tmp_tgt_dev_list);
760 INIT_LIST_HEAD(&tmp_tgt_dev_list);
763 list_for_each_entry(acg_dev, &acg->acg_dev_list, acg_dev_list_entry) {
764 if (acg_dev->dev == dev) {
765 PRINT_ERROR("Device is already in group %s",
773 acg_dev = scst_alloc_acg_dev(acg, dev, lun);
774 if (acg_dev == NULL) {
778 acg_dev->rd_only_flag = read_only;
780 TRACE_DBG("Adding acg_dev %p to acg_dev_list and dev_acg_dev_list",
782 list_add_tail(&acg_dev->acg_dev_list_entry, &acg->acg_dev_list);
783 list_add_tail(&acg_dev->dev_acg_dev_list_entry, &dev->dev_acg_dev_list);
785 list_for_each_entry(sess, &acg->acg_sess_list, acg_sess_list_entry)
787 tgt_dev = scst_alloc_add_tgt_dev(sess, acg_dev);
788 if (tgt_dev == NULL) {
792 list_add_tail(&tgt_dev->extra_tgt_dev_list_entry,
798 if (dev->virt_name != NULL) {
799 PRINT_INFO("Added device %s to group %s (LUN %Ld, "
800 "rd_only %d)", dev->virt_name, acg->acg_name,
801 (long long unsigned int)lun,
804 PRINT_INFO("Added device %d:%d:%d:%d to group %s (LUN "
805 "%Ld, rd_only %d)", dev->scsi_dev->host->host_no,
806 dev->scsi_dev->channel, dev->scsi_dev->id,
807 dev->scsi_dev->lun, acg->acg_name,
808 (long long unsigned int)lun,
817 list_for_each_entry(tgt_dev, &tmp_tgt_dev_list,
818 extra_tgt_dev_list_entry) {
819 scst_free_tgt_dev(tgt_dev);
821 scst_free_acg_dev(acg_dev);
825 /* The activity supposed to be suspended and scst_mutex held */
826 int scst_acg_remove_dev(struct scst_acg *acg, struct scst_device *dev)
829 struct scst_acg_dev *acg_dev = NULL, *a;
830 struct scst_tgt_dev *tgt_dev, *tt;
834 list_for_each_entry(a, &acg->acg_dev_list, acg_dev_list_entry) {
841 if (acg_dev == NULL) {
842 PRINT_ERROR("Device is not found in group %s", acg->acg_name);
847 list_for_each_entry_safe(tgt_dev, tt, &dev->dev_tgt_dev_list,
848 dev_tgt_dev_list_entry) {
849 if (tgt_dev->acg_dev == acg_dev)
850 scst_free_tgt_dev(tgt_dev);
852 scst_free_acg_dev(acg_dev);
856 if (dev->virt_name != NULL) {
857 PRINT_INFO("Removed device %s from group %s",
858 dev->virt_name, acg->acg_name);
860 PRINT_INFO("Removed device %d:%d:%d:%d from group %s",
861 dev->scsi_dev->host->host_no,
862 dev->scsi_dev->channel, dev->scsi_dev->id,
863 dev->scsi_dev->lun, acg->acg_name);
871 /* scst_mutex supposed to be held */
872 int scst_acg_add_name(struct scst_acg *acg, const char *name)
881 list_for_each_entry(n, &acg->acn_list, acn_list_entry)
883 if (strcmp(n->name, name) == 0) {
884 PRINT_ERROR("Name %s already exists in group %s",
885 name, acg->acg_name);
891 n = kmalloc(sizeof(*n), GFP_KERNEL);
893 PRINT_ERROR("%s", "Unable to allocate scst_acn");
899 nm = kmalloc(len + 1, GFP_KERNEL);
901 PRINT_ERROR("%s", "Unable to allocate scst_acn->name");
909 list_add_tail(&n->acn_list_entry, &acg->acn_list);
913 PRINT_INFO("Added name %s to group %s", name, acg->acg_name);
923 /* scst_mutex supposed to be held */
924 int scst_acg_remove_name(struct scst_acg *acg, const char *name)
931 list_for_each_entry(n, &acg->acn_list, acn_list_entry)
933 if (strcmp(n->name, name) == 0) {
934 list_del(&n->acn_list_entry);
943 PRINT_INFO("Removed name %s from group %s", name,
946 PRINT_ERROR("Unable to find name %s in group %s", name,
954 struct scst_cmd *scst_create_prepare_internal_cmd(
955 struct scst_cmd *orig_cmd, int bufsize)
957 struct scst_cmd *res;
958 int gfp_mask = scst_cmd_atomic(orig_cmd) ? GFP_ATOMIC : GFP_KERNEL;
962 res = scst_alloc_cmd(gfp_mask);
966 res->cmd_lists = orig_cmd->cmd_lists;
967 res->sess = orig_cmd->sess;
968 res->state = SCST_CMD_STATE_PRE_PARSE;
969 res->atomic = scst_cmd_atomic(orig_cmd);
971 res->tgtt = orig_cmd->tgtt;
972 res->tgt = orig_cmd->tgt;
973 res->dev = orig_cmd->dev;
974 res->tgt_dev = orig_cmd->tgt_dev;
975 res->lun = orig_cmd->lun;
976 res->queue_type = SCST_CMD_QUEUE_HEAD_OF_QUEUE;
977 res->data_direction = SCST_DATA_UNKNOWN;
978 res->orig_cmd = orig_cmd;
980 res->bufflen = bufsize;
983 TRACE_EXIT_HRES((unsigned long)res);
987 void scst_free_internal_cmd(struct scst_cmd *cmd)
997 int scst_prepare_request_sense(struct scst_cmd *orig_cmd)
999 int res = SCST_CMD_STATE_RES_CONT_NEXT;
1000 #define sbuf_size 252
1001 static const uint8_t request_sense[6] =
1002 { REQUEST_SENSE, 0, 0, 0, sbuf_size, 0 };
1003 struct scst_cmd *rs_cmd;
1007 rs_cmd = scst_create_prepare_internal_cmd(orig_cmd, sbuf_size);
1011 memcpy(rs_cmd->cdb, request_sense, sizeof(request_sense));
1012 rs_cmd->cdb_len = sizeof(request_sense);
1013 rs_cmd->data_direction = SCST_DATA_READ;
1015 TRACE(TRACE_MGMT_MINOR, "Adding REQUEST SENSE cmd %p to head of active "
1016 "cmd list ", rs_cmd);
1017 spin_lock_irq(&rs_cmd->cmd_lists->cmd_list_lock);
1018 list_add(&rs_cmd->cmd_list_entry, &rs_cmd->cmd_lists->active_cmd_list);
1019 spin_unlock_irq(&rs_cmd->cmd_lists->cmd_list_lock);
1022 TRACE_EXIT_RES(res);
1031 struct scst_cmd *scst_complete_request_sense(struct scst_cmd *req_cmd)
1033 struct scst_cmd *orig_cmd = req_cmd->orig_cmd;
1039 if (req_cmd->dev->handler->dev_done != NULL) {
1041 TRACE_DBG("Calling dev handler %s dev_done(%p)",
1042 req_cmd->dev->handler->name, req_cmd);
1043 rc = req_cmd->dev->handler->dev_done(req_cmd);
1044 TRACE_DBG("Dev handler %s dev_done() returned %d",
1045 req_cmd->dev->handler->name, rc);
1050 len = scst_get_buf_first(req_cmd, &buf);
1052 if (scsi_status_is_good(req_cmd->status) && (len > 0) &&
1053 SCST_SENSE_VALID(buf) && (!SCST_NO_SENSE(buf))) {
1054 PRINT_BUFF_FLAG(TRACE_SCSI, "REQUEST SENSE returned",
1056 scst_alloc_set_sense(orig_cmd, scst_cmd_atomic(req_cmd), buf,
1059 PRINT_ERROR("%s", "Unable to get the sense via "
1060 "REQUEST SENSE, returning HARDWARE ERROR");
1061 scst_set_cmd_error(orig_cmd,
1062 SCST_LOAD_SENSE(scst_sense_hardw_error));
1066 scst_put_buf(req_cmd, buf);
1068 scst_free_internal_cmd(req_cmd);
1070 TRACE_EXIT_HRES((unsigned long)orig_cmd);
1074 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
1075 static void scst_req_done(struct scsi_cmnd *scsi_cmd)
1077 struct scsi_request *req;
1081 if (scsi_cmd && (req = scsi_cmd->sc_request)) {
1083 if (req->sr_bufflen)
1084 kfree(req->sr_buffer);
1085 scsi_release_request(req);
1093 static void scst_send_release(struct scst_device *dev)
1095 struct scsi_request *req;
1096 struct scsi_device *scsi_dev;
1101 if (dev->scsi_dev == NULL)
1104 scsi_dev = dev->scsi_dev;
1106 req = scsi_allocate_request(scsi_dev, GFP_KERNEL);
1108 PRINT_ERROR("Allocation of scsi_request failed: unable "
1109 "to RELEASE device %d:%d:%d:%d",
1110 scsi_dev->host->host_no, scsi_dev->channel,
1111 scsi_dev->id, scsi_dev->lun);
1115 memset(cdb, 0, sizeof(cdb));
1117 cdb[1] = (scsi_dev->scsi_level <= SCSI_2) ?
1118 ((scsi_dev->lun << 5) & 0xe0) : 0;
1119 memcpy(req->sr_cmnd, cdb, sizeof(cdb));
1120 req->sr_cmd_len = sizeof(cdb);
1121 req->sr_data_direction = SCST_DATA_NONE;
1123 req->sr_bufflen = 0;
1124 req->sr_buffer = NULL;
1125 req->sr_request->rq_disk = dev->rq_disk;
1126 req->sr_sense_buffer[0] = 0;
1128 TRACE(TRACE_DEBUG | TRACE_SCSI, "Sending RELEASE req %p to SCSI "
1130 scst_do_req(req, req->sr_cmnd, (void *)req->sr_buffer, req->sr_bufflen,
1131 scst_req_done, 15, 3);
1137 #else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18) */
1138 static void scst_send_release(struct scst_device *dev)
1140 struct scsi_device *scsi_dev;
1141 unsigned char cdb[6];
1142 unsigned char *sense;
1147 if (dev->scsi_dev == NULL)
1150 /* We can't afford missing RELEASE due to memory shortage */
1151 sense = kmalloc(SCST_SENSE_BUFFERSIZE, GFP_KERNEL|__GFP_NOFAIL);
1153 scsi_dev = dev->scsi_dev;
1155 for (i = 0; i < 5; i++) {
1156 memset(cdb, 0, sizeof(cdb));
1158 cdb[1] = (scsi_dev->scsi_level <= SCSI_2) ?
1159 ((scsi_dev->lun << 5) & 0xe0) : 0;
1161 memset(sense, 0, SCST_SENSE_BUFFERSIZE);
1163 TRACE(TRACE_DEBUG | TRACE_SCSI, "%s", "Sending RELEASE req to "
1165 rc = scsi_execute(scsi_dev, cdb, SCST_DATA_NONE, NULL, 0,
1167 TRACE_DBG("MODE_SENSE done: %x", rc);
1169 if (scsi_status_is_good(rc)) {
1172 PRINT_ERROR("RELEASE failed: %d", rc);
1173 PRINT_BUFFER("RELEASE sense", sense,
1174 SCST_SENSE_BUFFERSIZE);
1175 scst_check_internal_sense(dev, rc,
1176 sense, SCST_SENSE_BUFFERSIZE);
1186 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18) */
1188 /* scst_mutex supposed to be held */
1189 static void scst_clear_reservation(struct scst_tgt_dev *tgt_dev)
1191 struct scst_device *dev = tgt_dev->dev;
1196 spin_lock_bh(&dev->dev_lock);
1197 if (dev->dev_reserved &&
1198 !test_bit(SCST_TGT_DEV_RESERVED, &tgt_dev->tgt_dev_flags)) {
1199 /* This is one who holds the reservation */
1200 struct scst_tgt_dev *tgt_dev_tmp;
1201 list_for_each_entry(tgt_dev_tmp, &dev->dev_tgt_dev_list,
1202 dev_tgt_dev_list_entry) {
1203 clear_bit(SCST_TGT_DEV_RESERVED,
1204 &tgt_dev_tmp->tgt_dev_flags);
1206 dev->dev_reserved = 0;
1209 spin_unlock_bh(&dev->dev_lock);
1212 scst_send_release(dev);
1218 struct scst_session *scst_alloc_session(struct scst_tgt *tgt, int gfp_mask,
1219 const char *initiator_name)
1221 struct scst_session *sess;
1228 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
1229 sess = kmem_cache_alloc(scst_sess_cachep, gfp_mask);
1231 sess = kmem_cache_zalloc(scst_sess_cachep, gfp_mask);
1234 TRACE(TRACE_OUT_OF_MEM, "%s",
1235 "Allocation of scst_session failed");
1238 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
1239 memset(sess, 0, sizeof(*sess));
1242 sess->init_phase = SCST_SESS_IPH_INITING;
1243 sess->shut_phase = SCST_SESS_SPH_READY;
1244 atomic_set(&sess->refcnt, 0);
1245 for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
1246 struct list_head *sess_tgt_dev_list_head =
1247 &sess->sess_tgt_dev_list_hash[i];
1248 INIT_LIST_HEAD(sess_tgt_dev_list_head);
1250 spin_lock_init(&sess->sess_list_lock);
1251 INIT_LIST_HEAD(&sess->search_cmd_list);
1253 INIT_LIST_HEAD(&sess->init_deferred_cmd_list);
1254 INIT_LIST_HEAD(&sess->init_deferred_mcmd_list);
1256 #ifdef MEASURE_LATENCY
1257 spin_lock_init(&sess->meas_lock);
1260 len = strlen(initiator_name);
1261 nm = kmalloc(len + 1, gfp_mask);
1263 PRINT_ERROR("%s", "Unable to allocate sess->initiator_name");
1267 strcpy(nm, initiator_name);
1268 sess->initiator_name = nm;
1275 kmem_cache_free(scst_sess_cachep, sess);
1280 void scst_free_session(struct scst_session *sess)
1284 mutex_lock(&scst_mutex);
1286 TRACE_DBG("Removing sess %p from the list", sess);
1287 list_del(&sess->sess_list_entry);
1288 TRACE_DBG("Removing session %p from acg %s", sess, sess->acg->acg_name);
1289 list_del(&sess->acg_sess_list_entry);
1291 scst_sess_free_tgt_devs(sess);
1293 wake_up_all(&sess->tgt->unreg_waitQ);
1295 mutex_unlock(&scst_mutex);
1297 kfree(sess->initiator_name);
1298 kmem_cache_free(scst_sess_cachep, sess);
1304 void scst_free_session_callback(struct scst_session *sess)
1306 struct completion *c;
1310 TRACE_DBG("Freeing session %p", sess);
1312 c = sess->shutdown_compl;
1314 if (sess->unreg_done_fn) {
1315 TRACE_DBG("Calling unreg_done_fn(%p)", sess);
1316 sess->unreg_done_fn(sess);
1317 TRACE_DBG("%s", "unreg_done_fn() returned");
1319 scst_free_session(sess);
1328 void scst_sched_session_free(struct scst_session *sess)
1330 unsigned long flags;
1334 spin_lock_irqsave(&scst_mgmt_lock, flags);
1335 TRACE_DBG("Adding sess %p to scst_sess_shut_list", sess);
1336 list_add_tail(&sess->sess_shut_list_entry, &scst_sess_shut_list);
1337 spin_unlock_irqrestore(&scst_mgmt_lock, flags);
1339 wake_up(&scst_mgmt_waitQ);
1345 void scst_cmd_get(struct scst_cmd *cmd)
1347 __scst_cmd_get(cmd);
1349 EXPORT_SYMBOL(scst_cmd_get);
1351 void scst_cmd_put(struct scst_cmd *cmd)
1353 __scst_cmd_put(cmd);
1355 EXPORT_SYMBOL(scst_cmd_put);
1357 struct scst_cmd *scst_alloc_cmd(int gfp_mask)
1359 struct scst_cmd *cmd;
1363 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
1364 cmd = kmem_cache_alloc(scst_cmd_cachep, gfp_mask);
1366 cmd = kmem_cache_zalloc(scst_cmd_cachep, gfp_mask);
1369 TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of scst_cmd failed");
1372 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
1373 memset(cmd, 0, sizeof(*cmd));
1376 cmd->state = SCST_CMD_STATE_INIT_WAIT;
1377 atomic_set(&cmd->cmd_ref, 1);
1378 cmd->cmd_lists = &scst_main_cmd_lists;
1379 INIT_LIST_HEAD(&cmd->mgmt_cmd_list);
1380 cmd->queue_type = SCST_CMD_QUEUE_SIMPLE;
1381 cmd->timeout = SCST_DEFAULT_TIMEOUT;
1384 cmd->is_send_status = 1;
1385 cmd->resp_data_len = -1;
1386 cmd->dbl_ua_orig_resp_data_len = -1;
1387 cmd->dbl_ua_orig_data_direction = SCST_DATA_UNKNOWN;
1394 void scst_destroy_put_cmd(struct scst_cmd *cmd)
1396 scst_sess_put(cmd->sess);
1398 /* At this point tgt_dev can be dead, but the pointer remains not-NULL */
1399 if (likely(cmd->tgt_dev != NULL))
1402 scst_destroy_cmd(cmd);
1406 /* No locks supposed to be held */
1407 void scst_free_cmd(struct scst_cmd *cmd)
1413 TRACE_DBG("Freeing cmd %p (tag %Lu)",
1414 cmd, (long long unsigned int)cmd->tag);
1416 if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
1417 TRACE_MGMT_DBG("Freeing aborted cmd %p (scst_cmd_count %d)",
1418 cmd, atomic_read(&scst_cmd_count));
1421 sBUG_ON(cmd->inc_blocking || cmd->needs_unblocking ||
1422 cmd->dec_on_dev_needed);
1424 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
1425 #if defined(EXTRACHECKS)
1426 if (cmd->scsi_req) {
1427 PRINT_ERROR("%s: %s", __func__, "Cmd with unfreed "
1429 scst_release_request(cmd);
1434 scst_check_restore_sg_buff(cmd);
1436 if (unlikely(cmd->internal)) {
1437 if (cmd->bufflen > 0)
1438 scst_release_space(cmd);
1439 scst_destroy_cmd(cmd);
1443 if (cmd->tgtt->on_free_cmd != NULL) {
1444 TRACE_DBG("Calling target's on_free_cmd(%p)", cmd);
1445 cmd->tgtt->on_free_cmd(cmd);
1446 TRACE_DBG("%s", "Target's on_free_cmd() returned");
1449 if (likely(cmd->dev != NULL)) {
1450 struct scst_dev_type *handler = cmd->dev->handler;
1451 if (handler->on_free_cmd != NULL) {
1452 TRACE_DBG("Calling dev handler %s on_free_cmd(%p)",
1453 handler->name, cmd);
1454 handler->on_free_cmd(cmd);
1455 TRACE_DBG("Dev handler %s on_free_cmd() returned",
1460 scst_release_space(cmd);
1462 if (unlikely(cmd->sense != NULL)) {
1463 TRACE_MEM("Releasing sense %p (cmd %p)", cmd->sense, cmd);
1464 mempool_free(cmd->sense, scst_sense_mempool);
1468 if (likely(cmd->tgt_dev != NULL)) {
1470 if (unlikely(!cmd->sent_to_midlev)) {
1471 PRINT_ERROR("Finishing not executed cmd %p (opcode "
1472 "%d, target %s, lun %Ld, sn %ld, expected_sn %ld)",
1473 cmd, cmd->cdb[0], cmd->tgtt->name,
1474 (long long unsigned int)cmd->lun,
1475 cmd->sn, cmd->tgt_dev->expected_sn);
1476 scst_unblock_deferred(cmd->tgt_dev, cmd);
1480 if (unlikely(cmd->out_of_sn)) {
1481 TRACE_SN("Out of SN cmd %p (tag %llu, sn %ld), "
1483 (long long unsigned int)cmd->tag,
1485 destroy = test_and_set_bit(SCST_CMD_CAN_BE_DESTROYED,
1490 if (likely(destroy))
1491 scst_destroy_put_cmd(cmd);
1498 /* No locks supposed to be held. */
1499 void scst_check_retries(struct scst_tgt *tgt)
1501 int need_wake_up = 0;
1506 * We don't worry about overflow of finished_cmds, because we check
1507 * only for its change
1509 atomic_inc(&tgt->finished_cmds);
1510 smp_mb__after_atomic_inc();
1511 if (unlikely(tgt->retry_cmds > 0)) {
1512 struct scst_cmd *c, *tc;
1513 unsigned long flags;
1515 TRACE_RETRY("Checking retry cmd list (retry_cmds %d)",
1518 spin_lock_irqsave(&tgt->tgt_lock, flags);
1519 list_for_each_entry_safe(c, tc, &tgt->retry_cmd_list,
1524 TRACE_RETRY("Moving retry cmd %p to head of active "
1525 "cmd list (retry_cmds left %d)", c, tgt->retry_cmds);
1526 spin_lock(&c->cmd_lists->cmd_list_lock);
1527 list_move(&c->cmd_list_entry, &c->cmd_lists->active_cmd_list);
1528 wake_up(&c->cmd_lists->cmd_list_waitQ);
1529 spin_unlock(&c->cmd_lists->cmd_list_lock);
1532 if (need_wake_up >= 2) /* "slow start" */
1535 spin_unlock_irqrestore(&tgt->tgt_lock, flags);
1542 void scst_tgt_retry_timer_fn(unsigned long arg)
1544 struct scst_tgt *tgt = (struct scst_tgt *)arg;
1545 unsigned long flags;
1547 TRACE_RETRY("Retry timer expired (retry_cmds %d)", tgt->retry_cmds);
1549 spin_lock_irqsave(&tgt->tgt_lock, flags);
1550 tgt->retry_timer_active = 0;
1551 spin_unlock_irqrestore(&tgt->tgt_lock, flags);
1553 scst_check_retries(tgt);
1559 struct scst_mgmt_cmd *scst_alloc_mgmt_cmd(int gfp_mask)
1561 struct scst_mgmt_cmd *mcmd;
1565 mcmd = mempool_alloc(scst_mgmt_mempool, gfp_mask);
1567 PRINT_CRIT_ERROR("%s", "Allocation of management command "
1568 "failed, some commands and their data could leak");
1571 memset(mcmd, 0, sizeof(*mcmd));
1578 void scst_free_mgmt_cmd(struct scst_mgmt_cmd *mcmd)
1580 unsigned long flags;
1584 spin_lock_irqsave(&mcmd->sess->sess_list_lock, flags);
1585 atomic_dec(&mcmd->sess->sess_cmd_count);
1586 spin_unlock_irqrestore(&mcmd->sess->sess_list_lock, flags);
1588 scst_sess_put(mcmd->sess);
1590 if (mcmd->mcmd_tgt_dev != NULL)
1593 mempool_free(mcmd, scst_mgmt_mempool);
1599 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
1600 int scst_alloc_request(struct scst_cmd *cmd)
1603 struct scsi_request *req;
1604 int gm = scst_cmd_atomic(cmd) ? GFP_ATOMIC : GFP_KERNEL;
1608 /* cmd->dev->scsi_dev must be non-NULL here */
1609 req = scsi_allocate_request(cmd->dev->scsi_dev, gm);
1611 TRACE(TRACE_OUT_OF_MEM, "%s",
1612 "Allocation of scsi_request failed");
1617 cmd->scsi_req = req;
1619 memcpy(req->sr_cmnd, cmd->cdb, cmd->cdb_len);
1620 req->sr_cmd_len = cmd->cdb_len;
1621 req->sr_data_direction = cmd->data_direction;
1622 req->sr_use_sg = cmd->sg_cnt;
1623 req->sr_bufflen = cmd->bufflen;
1624 req->sr_buffer = cmd->sg;
1625 req->sr_request->rq_disk = cmd->dev->rq_disk;
1626 req->sr_sense_buffer[0] = 0;
1628 cmd->scsi_req->upper_private_data = cmd;
1635 void scst_release_request(struct scst_cmd *cmd)
1637 scsi_release_request(cmd->scsi_req);
1638 cmd->scsi_req = NULL;
1642 int scst_alloc_space(struct scst_cmd *cmd)
1646 int atomic = scst_cmd_atomic(cmd);
1648 struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
1649 int bufflen = cmd->bufflen;
1653 gfp_mask = tgt_dev->gfp_mask | (atomic ? GFP_ATOMIC : GFP_KERNEL);
1655 flags = atomic ? SCST_POOL_NO_ALLOC_ON_CACHE_MISS : 0;
1657 flags |= SCST_POOL_ALLOC_NO_CACHED;
1659 if (unlikely(cmd->bufflen == 0)) {
1660 /* ToDo: remove when 1.0.1 will be started */
1661 TRACE(TRACE_MGMT_MINOR, "Warning: data direction %d or/and "
1662 "zero buffer length. Opcode 0x%x, handler %s, target "
1663 "%s", cmd->data_direction, cmd->cdb[0],
1664 cmd->dev->handler->name, cmd->tgtt->name);
1666 * Be on the safe side and alloc stub buffer. Neither target
1667 * drivers, nor user space will touch it, since bufflen
1670 bufflen = PAGE_SIZE;
1673 cmd->sg = sgv_pool_alloc(tgt_dev->pool, bufflen, gfp_mask, flags,
1674 &cmd->sg_cnt, &cmd->sgv, &cmd->dev->dev_mem_lim, NULL);
1675 if (cmd->sg == NULL)
1678 if (unlikely(cmd->sg_cnt > tgt_dev->max_sg_cnt)) {
1681 PRINT_INFO("Unable to complete command due to "
1682 "SG IO count limitation (requested %d, "
1683 "available %d, tgt lim %d)", cmd->sg_cnt,
1684 tgt_dev->max_sg_cnt, cmd->tgt->sg_tablesize);
1697 sgv_pool_free(cmd->sgv, &cmd->dev->dev_mem_lim);
1704 void scst_release_space(struct scst_cmd *cmd)
1708 if (cmd->sgv == NULL)
1711 if (cmd->data_buf_alloced) {
1712 TRACE_MEM("%s", "data_buf_alloced set, returning");
1716 sgv_pool_free(cmd->sgv, &cmd->dev->dev_mem_lim);
1729 static const int SCST_CDB_LENGTH[8] = { 6, 10, 10, -1, 16, 12, -1, -1 };
1731 #define SCST_CDB_GROUP(opcode) ((opcode >> 5) & 0x7)
1732 #define SCST_GET_CDB_LEN(opcode) SCST_CDB_LENGTH[SCST_CDB_GROUP(opcode)]
1734 int scst_get_cdb_len(const uint8_t *cdb)
1736 return SCST_GET_CDB_LEN(cdb[0]);
1739 /* get_trans_len_x extract x bytes from cdb as length starting from off */
1741 /* for special commands */
1742 static int get_trans_len_block_limit(struct scst_cmd *cmd, uint8_t off)
1748 static int get_trans_len_read_capacity(struct scst_cmd *cmd, uint8_t off)
1750 cmd->bufflen = READ_CAP_LEN;
1754 static int get_trans_len_single(struct scst_cmd *cmd, uint8_t off)
1760 static int get_trans_len_read_pos(struct scst_cmd *cmd, uint8_t off)
1762 uint8_t *p = (uint8_t *)cmd->cdb + off;
1766 cmd->bufflen |= ((u32)p[0]) << 8;
1767 cmd->bufflen |= ((u32)p[1]);
1769 switch (cmd->cdb[1] & 0x1f) {
1773 if (cmd->bufflen != 0) {
1774 PRINT_ERROR("READ POSITION: Invalid non-zero (%d) "
1775 "allocation length for service action %x",
1776 cmd->bufflen, cmd->cdb[1] & 0x1f);
1782 switch (cmd->cdb[1] & 0x1f) {
1791 cmd->bufflen = max(28, cmd->bufflen);
1794 PRINT_ERROR("READ POSITION: Invalid service action %x",
1795 cmd->cdb[1] & 0x1f);
1803 scst_set_cmd_error(cmd,
1804 SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
1809 static int get_trans_len_1(struct scst_cmd *cmd, uint8_t off)
1811 cmd->bufflen = (u32)cmd->cdb[off];
1815 static int get_trans_len_1_256(struct scst_cmd *cmd, uint8_t off)
1817 cmd->bufflen = (u32)cmd->cdb[off];
1818 if (cmd->bufflen == 0)
1823 static int get_trans_len_2(struct scst_cmd *cmd, uint8_t off)
1825 const uint8_t *p = cmd->cdb + off;
1828 cmd->bufflen |= ((u32)p[0]) << 8;
1829 cmd->bufflen |= ((u32)p[1]);
1834 static int get_trans_len_3(struct scst_cmd *cmd, uint8_t off)
1836 const uint8_t *p = cmd->cdb + off;
1839 cmd->bufflen |= ((u32)p[0]) << 16;
1840 cmd->bufflen |= ((u32)p[1]) << 8;
1841 cmd->bufflen |= ((u32)p[2]);
1846 static int get_trans_len_4(struct scst_cmd *cmd, uint8_t off)
1848 const uint8_t *p = cmd->cdb + off;
1851 cmd->bufflen |= ((u32)p[0]) << 24;
1852 cmd->bufflen |= ((u32)p[1]) << 16;
1853 cmd->bufflen |= ((u32)p[2]) << 8;
1854 cmd->bufflen |= ((u32)p[3]);
1859 static int get_trans_len_none(struct scst_cmd *cmd, uint8_t off)
1865 int scst_get_cdb_info(struct scst_cmd *cmd)
1867 int dev_type = cmd->dev->handler->type;
1870 const struct scst_sdbops *ptr = NULL;
1874 op = cmd->cdb[0]; /* get clear opcode */
1876 TRACE_DBG("opcode=%02x, cdblen=%d bytes, tblsize=%d, "
1877 "dev_type=%d", op, SCST_GET_CDB_LEN(op), SCST_CDB_TBL_SIZE,
1880 i = scst_scsi_op_list[op];
1881 while (i < SCST_CDB_TBL_SIZE && scst_scsi_op_table[i].ops == op) {
1882 if (scst_scsi_op_table[i].devkey[dev_type] != SCST_CDB_NOTSUPP) {
1883 ptr = &scst_scsi_op_table[i];
1884 TRACE_DBG("op = 0x%02x+'%c%c%c%c%c%c%c%c%c%c'+<%s>",
1885 ptr->ops, ptr->devkey[0], /* disk */
1886 ptr->devkey[1], /* tape */
1887 ptr->devkey[2], /* printer */
1888 ptr->devkey[3], /* cpu */
1889 ptr->devkey[4], /* cdr */
1890 ptr->devkey[5], /* cdrom */
1891 ptr->devkey[6], /* scanner */
1892 ptr->devkey[7], /* worm */
1893 ptr->devkey[8], /* changer */
1894 ptr->devkey[9], /* commdev */
1896 TRACE_DBG("direction=%d flags=%d off=%d",
1906 /* opcode not found or now not used !!! */
1907 TRACE(TRACE_SCSI, "Unknown opcode 0x%x for type %d", op,
1910 cmd->op_flags = SCST_INFO_INVALID;
1914 cmd->cdb_len = SCST_GET_CDB_LEN(op);
1915 cmd->op_name = ptr->op_name;
1916 cmd->data_direction = ptr->direction;
1917 cmd->op_flags = ptr->flags;
1918 res = (*ptr->get_trans_len)(cmd, ptr->off);
1919 #if 0 /* ToDo: enable when 1.0.1 will be started and fix all scst_get_buf_first() returns 0 cases */
1920 if (unlikely(cmd->bufflen == 0)) {
1922 * According to SPC bufflen 0 for data transfer commands isn't
1923 * an error, so we need to fix the transfer direction.
1925 cmd->data_direction = SCST_DATA_NONE;
1933 EXPORT_SYMBOL(scst_get_cdb_info);
1936 * Routine to extract a lun number from an 8-byte LUN structure
1937 * in network byte order (BE).
1938 * (see SAM-2, Section 4.12.3 page 40)
1939 * Supports 2 types of lun unpacking: peripheral and logical unit.
1941 lun_t scst_unpack_lun(const uint8_t *lun, int len)
1943 lun_t res = (lun_t)-1;
1948 TRACE_BUFF_FLAG(TRACE_DEBUG, "Raw LUN", lun, len);
1950 if (unlikely(len < 2)) {
1951 PRINT_ERROR("Illegal lun length %d, expected 2 bytes or "
1959 if ((*((uint64_t *)lun) &
1960 __constant_cpu_to_be64(0x0000FFFFFFFFFFFFLL)) != 0)
1964 if (*((uint16_t *)&lun[2]) != 0)
1968 if (*((uint32_t *)&lun[2]) != 0)
1976 address_method = (*lun) >> 6; /* high 2 bits of byte 0 */
1977 switch (address_method) {
1978 case 0: /* peripheral device addressing method */
1979 #if 0 /* Looks like it's legal to use it as flat space addressing method as well */
1981 PRINT_ERROR("Illegal BUS INDENTIFIER in LUN "
1982 "peripheral device addressing method 0x%02x, "
1983 "expected 0", *lun);
1992 case 1: /* flat space addressing method */
1993 res = *(lun + 1) | (((*lun) & 0x3f) << 8);
1996 case 2: /* logical unit addressing method */
1998 PRINT_ERROR("Illegal BUS NUMBER in LUN logical unit "
1999 "addressing method 0x%02x, expected 0",
2003 if (*(lun + 1) & 0xe0) {
2004 PRINT_ERROR("Illegal TARGET in LUN logical unit "
2005 "addressing method 0x%02x, expected 0",
2006 (*(lun + 1) & 0xf8) >> 5);
2009 res = *(lun + 1) & 0x1f;
2012 case 3: /* extended logical unit addressing method */
2014 PRINT_ERROR("Unimplemented LUN addressing method %u",
2020 TRACE_EXIT_RES((int)res);
2024 PRINT_ERROR("%s", "Multi-level LUN unimplemented");
2028 int scst_calc_block_shift(int sector_size)
2030 int block_shift = 0;
2033 if (sector_size == 0)
2043 if (block_shift < 9) {
2044 PRINT_ERROR("Wrong sector size %d", sector_size);
2048 TRACE_EXIT_RES(block_shift);
2051 EXPORT_SYMBOL(scst_calc_block_shift);
2053 int scst_sbc_generic_parse(struct scst_cmd *cmd,
2054 int (*get_block_shift)(struct scst_cmd *cmd))
2061 * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
2062 * therefore change them only if necessary
2065 TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
2066 cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
2068 switch (cmd->cdb[0]) {
2069 case SERVICE_ACTION_IN:
2070 if ((cmd->cdb[1] & 0x1f) == SAI_READ_CAPACITY_16) {
2071 cmd->bufflen = READ_CAP16_LEN;
2072 cmd->data_direction = SCST_DATA_READ;
2079 if ((cmd->cdb[1] & BYTCHK) == 0) {
2080 cmd->data_len = cmd->bufflen << get_block_shift(cmd);
2091 if (cmd->op_flags & SCST_TRANSFER_LEN_TYPE_FIXED) {
2093 * No need for locks here, since *_detach() can not be
2094 * called, when there are existing commands.
2096 cmd->bufflen = cmd->bufflen << get_block_shift(cmd);
2100 if ((cmd->op_flags & (SCST_SMALL_TIMEOUT | SCST_LONG_TIMEOUT)) == 0)
2101 cmd->timeout = SCST_GENERIC_DISK_REG_TIMEOUT;
2102 else if (cmd->op_flags & SCST_SMALL_TIMEOUT)
2103 cmd->timeout = SCST_GENERIC_DISK_SMALL_TIMEOUT;
2104 else if (cmd->op_flags & SCST_LONG_TIMEOUT)
2105 cmd->timeout = SCST_GENERIC_DISK_LONG_TIMEOUT;
2107 TRACE_DBG("res %d, bufflen %d, data_len %d, direct %d",
2108 res, cmd->bufflen, cmd->data_len, cmd->data_direction);
2110 TRACE_EXIT_RES(res);
2113 EXPORT_SYMBOL(scst_sbc_generic_parse);
2115 int scst_cdrom_generic_parse(struct scst_cmd *cmd,
2116 int (*get_block_shift)(struct scst_cmd *cmd))
2123 * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
2124 * therefore change them only if necessary
2127 TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
2128 cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
2130 cmd->cdb[1] &= 0x1f;
2132 switch (cmd->cdb[0]) {
2137 if ((cmd->cdb[1] & BYTCHK) == 0) {
2138 cmd->data_len = cmd->bufflen << get_block_shift(cmd);
2148 if (cmd->op_flags & SCST_TRANSFER_LEN_TYPE_FIXED)
2149 cmd->bufflen = cmd->bufflen << get_block_shift(cmd);
2152 if ((cmd->op_flags & (SCST_SMALL_TIMEOUT | SCST_LONG_TIMEOUT)) == 0)
2153 cmd->timeout = SCST_GENERIC_CDROM_REG_TIMEOUT;
2154 else if (cmd->op_flags & SCST_SMALL_TIMEOUT)
2155 cmd->timeout = SCST_GENERIC_CDROM_SMALL_TIMEOUT;
2156 else if (cmd->op_flags & SCST_LONG_TIMEOUT)
2157 cmd->timeout = SCST_GENERIC_CDROM_LONG_TIMEOUT;
2159 TRACE_DBG("res=%d, bufflen=%d, direct=%d", res, cmd->bufflen,
2160 cmd->data_direction);
2165 EXPORT_SYMBOL(scst_cdrom_generic_parse);
2167 int scst_modisk_generic_parse(struct scst_cmd *cmd,
2168 int (*get_block_shift)(struct scst_cmd *cmd))
2175 * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
2176 * therefore change them only if necessary
2179 TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
2180 cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
2182 cmd->cdb[1] &= 0x1f;
2184 switch (cmd->cdb[0]) {
2189 if ((cmd->cdb[1] & BYTCHK) == 0) {
2190 cmd->data_len = cmd->bufflen << get_block_shift(cmd);
2200 if (cmd->op_flags & SCST_TRANSFER_LEN_TYPE_FIXED)
2201 cmd->bufflen = cmd->bufflen << get_block_shift(cmd);
2204 if ((cmd->op_flags & (SCST_SMALL_TIMEOUT | SCST_LONG_TIMEOUT)) == 0)
2205 cmd->timeout = SCST_GENERIC_MODISK_REG_TIMEOUT;
2206 else if (cmd->op_flags & SCST_SMALL_TIMEOUT)
2207 cmd->timeout = SCST_GENERIC_MODISK_SMALL_TIMEOUT;
2208 else if (cmd->op_flags & SCST_LONG_TIMEOUT)
2209 cmd->timeout = SCST_GENERIC_MODISK_LONG_TIMEOUT;
2211 TRACE_DBG("res=%d, bufflen=%d, direct=%d", res, cmd->bufflen,
2212 cmd->data_direction);
2214 TRACE_EXIT_RES(res);
2217 EXPORT_SYMBOL(scst_modisk_generic_parse);
2219 int scst_tape_generic_parse(struct scst_cmd *cmd,
2220 int (*get_block_size)(struct scst_cmd *cmd))
2227 * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
2228 * therefore change them only if necessary
2231 TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
2232 cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
2234 if (cmd->cdb[0] == READ_POSITION) {
2235 int tclp = cmd->cdb[1] & TCLP_BIT;
2236 int long_bit = cmd->cdb[1] & LONG_BIT;
2237 int bt = cmd->cdb[1] & BT_BIT;
2239 if ((tclp == long_bit) && (!bt || !long_bit)) {
2241 tclp ? POSITION_LEN_LONG : POSITION_LEN_SHORT;
2242 cmd->data_direction = SCST_DATA_READ;
2245 cmd->data_direction = SCST_DATA_NONE;
2249 if (cmd->op_flags & SCST_TRANSFER_LEN_TYPE_FIXED & cmd->cdb[1])
2250 cmd->bufflen = cmd->bufflen * get_block_size(cmd);
2252 if ((cmd->op_flags & (SCST_SMALL_TIMEOUT | SCST_LONG_TIMEOUT)) == 0)
2253 cmd->timeout = SCST_GENERIC_TAPE_REG_TIMEOUT;
2254 else if (cmd->op_flags & SCST_SMALL_TIMEOUT)
2255 cmd->timeout = SCST_GENERIC_TAPE_SMALL_TIMEOUT;
2256 else if (cmd->op_flags & SCST_LONG_TIMEOUT)
2257 cmd->timeout = SCST_GENERIC_TAPE_LONG_TIMEOUT;
2259 TRACE_EXIT_RES(res);
2262 EXPORT_SYMBOL(scst_tape_generic_parse);
2264 static int scst_null_parse(struct scst_cmd *cmd)
2271 * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
2272 * therefore change them only if necessary
2275 TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
2276 cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
2278 switch (cmd->cdb[0]) {
2284 TRACE_DBG("res %d bufflen %d direct %d",
2285 res, cmd->bufflen, cmd->data_direction);
2291 int scst_changer_generic_parse(struct scst_cmd *cmd,
2292 int (*nothing)(struct scst_cmd *cmd))
2294 int res = scst_null_parse(cmd);
2296 if (cmd->op_flags & SCST_LONG_TIMEOUT)
2297 cmd->timeout = SCST_GENERIC_CHANGER_LONG_TIMEOUT;
2299 cmd->timeout = SCST_GENERIC_CHANGER_TIMEOUT;
2303 EXPORT_SYMBOL(scst_changer_generic_parse);
2305 int scst_processor_generic_parse(struct scst_cmd *cmd,
2306 int (*nothing)(struct scst_cmd *cmd))
2308 int res = scst_null_parse(cmd);
2310 if (cmd->op_flags & SCST_LONG_TIMEOUT)
2311 cmd->timeout = SCST_GENERIC_PROCESSOR_LONG_TIMEOUT;
2313 cmd->timeout = SCST_GENERIC_PROCESSOR_TIMEOUT;
2317 EXPORT_SYMBOL(scst_processor_generic_parse);
2319 int scst_raid_generic_parse(struct scst_cmd *cmd,
2320 int (*nothing)(struct scst_cmd *cmd))
2322 int res = scst_null_parse(cmd);
2324 if (cmd->op_flags & SCST_LONG_TIMEOUT)
2325 cmd->timeout = SCST_GENERIC_RAID_LONG_TIMEOUT;
2327 cmd->timeout = SCST_GENERIC_RAID_TIMEOUT;
2331 EXPORT_SYMBOL(scst_raid_generic_parse);
2333 int scst_block_generic_dev_done(struct scst_cmd *cmd,
2334 void (*set_block_shift)(struct scst_cmd *cmd, int block_shift))
2336 int opcode = cmd->cdb[0];
2337 int status = cmd->status;
2338 int res = SCST_CMD_STATE_DEFAULT;
2343 * SCST sets good defaults for cmd->is_send_status and cmd->resp_data_len
2344 * based on cmd->status and cmd->data_direction, therefore change
2345 * them only if necessary
2348 if ((status == SAM_STAT_GOOD) || (status == SAM_STAT_CONDITION_MET)) {
2352 /* Always keep track of disk capacity */
2353 int buffer_size, sector_size, sh;
2356 buffer_size = scst_get_buf_first(cmd, &buffer);
2357 if (unlikely(buffer_size <= 0)) {
2358 PRINT_ERROR("%s: Unable to get the buffer "
2359 "(%d)", __func__, buffer_size);
2364 ((buffer[4] << 24) | (buffer[5] << 16) |
2365 (buffer[6] << 8) | (buffer[7] << 0));
2366 scst_put_buf(cmd, buffer);
2367 if (sector_size != 0)
2368 sh = scst_calc_block_shift(sector_size);
2371 set_block_shift(cmd, sh);
2372 TRACE_DBG("block_shift %d", sh);
2381 TRACE_DBG("cmd->is_send_status=%x, cmd->resp_data_len=%d, "
2382 "res=%d", cmd->is_send_status, cmd->resp_data_len, res);
2385 TRACE_EXIT_RES(res);
2388 EXPORT_SYMBOL(scst_block_generic_dev_done);
2390 int scst_tape_generic_dev_done(struct scst_cmd *cmd,
2391 void (*set_block_size)(struct scst_cmd *cmd, int block_shift))
2393 int opcode = cmd->cdb[0];
2394 int res = SCST_CMD_STATE_DEFAULT;
2395 int buffer_size, bs;
2396 uint8_t *buffer = NULL;
2401 * SCST sets good defaults for cmd->is_send_status and cmd->resp_data_len
2402 * based on cmd->status and cmd->data_direction, therefore change
2403 * them only if necessary
2409 buffer_size = scst_get_buf_first(cmd, &buffer);
2410 if (unlikely(buffer_size <= 0)) {
2411 PRINT_ERROR("%s: Unable to get the buffer (%d)",
2412 __func__, buffer_size);
2420 TRACE_DBG("%s", "MODE_SENSE");
2421 if ((cmd->cdb[2] & 0xC0) == 0) {
2422 if (buffer[3] == 8) {
2423 bs = (buffer[9] << 16) |
2424 (buffer[10] << 8) | buffer[11];
2425 set_block_size(cmd, bs);
2430 TRACE_DBG("%s", "MODE_SELECT");
2431 if (buffer[3] == 8) {
2432 bs = (buffer[9] << 16) | (buffer[10] << 8) |
2434 set_block_size(cmd, bs);
2445 scst_put_buf(cmd, buffer);
2450 TRACE_EXIT_RES(res);
2453 EXPORT_SYMBOL(scst_tape_generic_dev_done);
2455 static void scst_check_internal_sense(struct scst_device *dev, int result,
2456 uint8_t *sense, int sense_len)
2460 if (host_byte(result) == DID_RESET) {
2461 TRACE(TRACE_MGMT_MINOR, "%s", "DID_RESET received, triggering "
2463 scst_set_sense(sense, sense_len,
2464 SCST_LOAD_SENSE(scst_sense_reset_UA));
2465 scst_dev_check_set_UA(dev, NULL, sense, sense_len);
2466 } else if ((status_byte(result) == CHECK_CONDITION) &&
2467 SCST_SENSE_VALID(sense) && scst_is_ua_sense(sense))
2468 scst_dev_check_set_UA(dev, NULL, sense, sense_len);
2474 int scst_obtain_device_parameters(struct scst_device *dev)
2478 uint8_t buffer[4+0x0A];
2479 uint8_t sense_buffer[SCST_SENSE_BUFFERSIZE];
2483 sBUG_ON(in_interrupt() || in_atomic());
2484 EXTRACHECKS_BUG_ON(dev->scsi_dev == NULL);
2486 for (i = 0; i < 5; i++) {
2487 /* Get control mode page */
2488 memset(cmd, 0, sizeof(cmd));
2489 cmd[0] = MODE_SENSE;
2490 cmd[1] = 8; /* DBD */
2492 cmd[4] = sizeof(buffer);
2494 memset(buffer, 0, sizeof(buffer));
2495 memset(sense_buffer, 0, sizeof(sense_buffer));
2497 TRACE(TRACE_SCSI, "%s", "Doing internal MODE_SENSE");
2498 res = scsi_execute(dev->scsi_dev, cmd, SCST_DATA_READ, buffer,
2499 sizeof(buffer), sense_buffer, 15, 0, 0);
2501 TRACE_DBG("MODE_SENSE done: %x", res);
2503 if (scsi_status_is_good(res)) {
2506 PRINT_BUFF_FLAG(TRACE_SCSI, "Returned control mode page data",
2507 buffer, sizeof(buffer));
2509 dev->tst = buffer[4+2] >> 5;
2510 q = buffer[4+3] >> 4;
2511 if (q > SCST_CONTR_MODE_QUEUE_ALG_UNRESTRICTED_REORDER) {
2512 PRINT_ERROR("Too big QUEUE ALG %x, dev "
2513 "%d:%d:%d:%d", dev->queue_alg,
2514 dev->scsi_dev->host->host_no, dev->scsi_dev->channel,
2515 dev->scsi_dev->id, dev->scsi_dev->lun);
2518 dev->swp = (buffer[4+4] & 0x8) >> 3;
2519 dev->tas = (buffer[4+5] & 0x40) >> 6;
2522 * Unfortunately, SCSI ML doesn't provide a way to
2523 * specify commands task attribute, so we can rely on
2524 * device's restricted reordering only.
2526 dev->has_own_order_mgmt = !dev->queue_alg;
2528 TRACE(TRACE_SCSI|TRACE_MGMT_MINOR, "Device %d:%d:%d:%d: TST %x, "
2529 "QUEUE ALG %x, SWP %x, TAS %x, has_own_order_mgmt "
2530 "%d", dev->scsi_dev->host->host_no,
2531 dev->scsi_dev->channel, dev->scsi_dev->id,
2532 dev->scsi_dev->lun, dev->tst, dev->queue_alg,
2533 dev->swp, dev->tas, dev->has_own_order_mgmt);
2537 #if 0 /* 3ware controller is buggy and returns CONDITION_GOOD instead of CHECK_CONDITION */
2538 if ((status_byte(res) == CHECK_CONDITION) &&
2542 SCST_SENSE_VALID(sense_buffer)) {
2543 if (sense_buffer[2] == ILLEGAL_REQUEST) {
2544 TRACE(TRACE_SCSI|TRACE_MGMT_MINOR, "Device "
2545 "%d:%d:%d:%d doesn't support control "
2546 "mode page, using defaults: TST "
2547 "%x, QUEUE ALG %x, SWP %x, TAS %x, "
2548 "has_own_order_mgmt %d",
2549 dev->scsi_dev->host->host_no,
2550 dev->scsi_dev->channel, dev->scsi_dev->id,
2551 dev->scsi_dev->lun, dev->tst, dev->queue_alg,
2552 dev->swp, dev->tas, dev->has_own_order_mgmt);
2555 } else if (sense_buffer[2] == NOT_READY) {
2556 TRACE(TRACE_SCSI, "Device %d:%d:%d:%d not ready",
2557 dev->scsi_dev->host->host_no,
2558 dev->scsi_dev->channel, dev->scsi_dev->id,
2559 dev->scsi_dev->lun);
2564 TRACE(TRACE_SCSI|TRACE_MGMT_MINOR, "Internal MODE SENSE to "
2565 "device %d:%d:%d:%d failed: %x",
2566 dev->scsi_dev->host->host_no,
2567 dev->scsi_dev->channel, dev->scsi_dev->id,
2568 dev->scsi_dev->lun, res);
2569 PRINT_BUFF_FLAG(TRACE_SCSI|TRACE_MGMT_MINOR, "MODE SENSE "
2570 "sense", sense_buffer, sizeof(sense_buffer));
2572 scst_check_internal_sense(dev, res, sense_buffer,
2573 sizeof(sense_buffer));
2579 TRACE_EXIT_RES(res);
2582 EXPORT_SYMBOL(scst_obtain_device_parameters);
2584 /* Called under dev_lock and BH off */
2585 void scst_process_reset(struct scst_device *dev,
2586 struct scst_session *originator, struct scst_cmd *exclude_cmd,
2587 struct scst_mgmt_cmd *mcmd)
2589 struct scst_tgt_dev *tgt_dev;
2590 struct scst_cmd *cmd, *tcmd;
2594 /* Clear RESERVE'ation, if necessary */
2595 if (dev->dev_reserved) {
2596 list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
2597 dev_tgt_dev_list_entry) {
2598 TRACE(TRACE_MGMT, "Clearing RESERVE'ation for tgt_dev "
2600 (long long unsigned int)tgt_dev->lun);
2601 clear_bit(SCST_TGT_DEV_RESERVED,
2602 &tgt_dev->tgt_dev_flags);
2604 dev->dev_reserved = 0;
2606 * There is no need to send RELEASE, since the device is going
2607 * to be resetted. Actually, since we can be in RESET TM
2608 * function, it might be dangerous.
2612 dev->dev_double_ua_possible = 1;
2613 dev->dev_serialized = 1;
2615 list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
2616 dev_tgt_dev_list_entry) {
2617 struct scst_session *sess = tgt_dev->sess;
2619 spin_lock_bh(&tgt_dev->tgt_dev_lock);
2620 scst_free_all_UA(tgt_dev);
2621 spin_unlock_bh(&tgt_dev->tgt_dev_lock);
2623 spin_lock_irq(&sess->sess_list_lock);
2625 TRACE_DBG("Searching in search cmd list (sess=%p)", sess);
2626 list_for_each_entry(cmd, &sess->search_cmd_list,
2627 search_cmd_list_entry) {
2628 if (cmd == exclude_cmd)
2630 if ((cmd->tgt_dev == tgt_dev) ||
2631 ((cmd->tgt_dev == NULL) &&
2632 (cmd->lun == tgt_dev->lun))) {
2633 scst_abort_cmd(cmd, mcmd,
2634 (tgt_dev->sess != originator), 0);
2637 spin_unlock_irq(&sess->sess_list_lock);
2640 list_for_each_entry_safe(cmd, tcmd, &dev->blocked_cmd_list,
2641 blocked_cmd_list_entry) {
2642 if (test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)) {
2643 list_del(&cmd->blocked_cmd_list_entry);
2644 TRACE_MGMT_DBG("Adding aborted blocked cmd %p "
2645 "to active cmd list", cmd);
2646 spin_lock_irq(&cmd->cmd_lists->cmd_list_lock);
2647 list_add_tail(&cmd->cmd_list_entry,
2648 &cmd->cmd_lists->active_cmd_list);
2649 wake_up(&cmd->cmd_lists->cmd_list_waitQ);
2650 spin_unlock_irq(&cmd->cmd_lists->cmd_list_lock);
2654 /* BH already off */
2655 spin_lock(&scst_temp_UA_lock);
2656 scst_set_sense(scst_temp_UA, sizeof(scst_temp_UA),
2657 SCST_LOAD_SENSE(scst_sense_reset_UA));
2658 scst_dev_check_set_local_UA(dev, exclude_cmd, scst_temp_UA,
2659 sizeof(scst_temp_UA));
2660 spin_unlock(&scst_temp_UA_lock);
2666 int scst_set_pending_UA(struct scst_cmd *cmd)
2669 struct scst_tgt_dev_UA *UA_entry;
2673 TRACE(TRACE_MGMT, "Setting pending UA cmd %p", cmd);
2675 spin_lock_bh(&cmd->tgt_dev->tgt_dev_lock);
2677 /* UA list could be cleared behind us, so retest */
2678 if (list_empty(&cmd->tgt_dev->UA_list)) {
2680 "SCST_TGT_DEV_UA_PENDING set, but UA_list empty");
2685 UA_entry = list_entry(cmd->tgt_dev->UA_list.next, typeof(*UA_entry),
2688 TRACE_DBG("next %p UA_entry %p",
2689 cmd->tgt_dev->UA_list.next, UA_entry);
2691 scst_set_cmd_error_sense(cmd, UA_entry->UA_sense_buffer,
2692 sizeof(UA_entry->UA_sense_buffer));
2696 list_del(&UA_entry->UA_list_entry);
2698 mempool_free(UA_entry, scst_ua_mempool);
2700 if (list_empty(&cmd->tgt_dev->UA_list)) {
2701 clear_bit(SCST_TGT_DEV_UA_PENDING,
2702 &cmd->tgt_dev->tgt_dev_flags);
2705 spin_unlock_bh(&cmd->tgt_dev->tgt_dev_lock);
2708 TRACE_EXIT_RES(res);
2712 spin_unlock_bh(&cmd->tgt_dev->tgt_dev_lock);
2716 /* Called under tgt_dev_lock and BH off */
2717 void scst_alloc_set_UA(struct scst_tgt_dev *tgt_dev,
2718 const uint8_t *sense, int sense_len, int head)
2720 struct scst_tgt_dev_UA *UA_entry = NULL;
2724 UA_entry = mempool_alloc(scst_ua_mempool, GFP_ATOMIC);
2725 if (UA_entry == NULL) {
2726 PRINT_CRIT_ERROR("%s", "UNIT ATTENTION memory "
2727 "allocation failed. The UNIT ATTENTION "
2728 "on some sessions will be missed");
2729 PRINT_BUFFER("Lost UA", sense, sense_len);
2732 memset(UA_entry, 0, sizeof(*UA_entry));
2734 if (sense_len > (int)sizeof(UA_entry->UA_sense_buffer))
2735 sense_len = sizeof(UA_entry->UA_sense_buffer);
2736 memcpy(UA_entry->UA_sense_buffer, sense, sense_len);
2738 set_bit(SCST_TGT_DEV_UA_PENDING, &tgt_dev->tgt_dev_flags);
2740 TRACE_MGMT_DBG("Adding new UA to tgt_dev %p", tgt_dev);
2743 list_add(&UA_entry->UA_list_entry, &tgt_dev->UA_list);
2745 list_add_tail(&UA_entry->UA_list_entry, &tgt_dev->UA_list);
2752 void scst_check_set_UA(struct scst_tgt_dev *tgt_dev,
2753 const uint8_t *sense, int sense_len, int head)
2756 struct scst_tgt_dev_UA *UA_entry_tmp;
2760 spin_lock_bh(&tgt_dev->tgt_dev_lock);
2762 list_for_each_entry(UA_entry_tmp, &tgt_dev->UA_list,
2764 if (memcmp(sense, UA_entry_tmp->UA_sense_buffer, sense_len) == 0) {
2765 TRACE_MGMT_DBG("%s", "UA already exists");
2772 scst_alloc_set_UA(tgt_dev, sense, sense_len, head);
2774 spin_unlock_bh(&tgt_dev->tgt_dev_lock);
2780 /* Called under dev_lock and BH off */
2781 void scst_dev_check_set_local_UA(struct scst_device *dev,
2782 struct scst_cmd *exclude, const uint8_t *sense, int sense_len)
2784 struct scst_tgt_dev *tgt_dev, *exclude_tgt_dev = NULL;
2788 if (exclude != NULL)
2789 exclude_tgt_dev = exclude->tgt_dev;
2791 list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
2792 dev_tgt_dev_list_entry) {
2793 if (tgt_dev != exclude_tgt_dev)
2794 scst_check_set_UA(tgt_dev, sense, sense_len, 0);
2801 /* Called under dev_lock and BH off */
2802 void __scst_dev_check_set_UA(struct scst_device *dev,
2803 struct scst_cmd *exclude, const uint8_t *sense, int sense_len)
2807 TRACE(TRACE_MGMT, "Processing UA dev %p", dev);
2809 /* Check for reset UA */
2810 if (sense[12] == SCST_SENSE_ASC_UA_RESET)
2811 scst_process_reset(dev, (exclude != NULL) ? exclude->sess : NULL,
2814 scst_dev_check_set_local_UA(dev, exclude, sense, sense_len);
2820 /* Called under tgt_dev_lock or when tgt_dev is unused */
2821 void scst_free_all_UA(struct scst_tgt_dev *tgt_dev)
2823 struct scst_tgt_dev_UA *UA_entry, *t;
2827 list_for_each_entry_safe(UA_entry, t, &tgt_dev->UA_list, UA_list_entry) {
2828 TRACE_MGMT_DBG("Clearing UA for tgt_dev lun %Ld",
2829 (long long unsigned int)tgt_dev->lun);
2830 list_del(&UA_entry->UA_list_entry);
2833 INIT_LIST_HEAD(&tgt_dev->UA_list);
2834 clear_bit(SCST_TGT_DEV_UA_PENDING, &tgt_dev->tgt_dev_flags);
2841 struct scst_cmd *__scst_check_deferred_commands(struct scst_tgt_dev *tgt_dev)
2843 struct scst_cmd *res = NULL, *cmd, *t;
2844 typeof(tgt_dev->expected_sn) expected_sn = tgt_dev->expected_sn;
2846 spin_lock_irq(&tgt_dev->sn_lock);
2848 if (unlikely(tgt_dev->hq_cmd_count != 0))
2852 list_for_each_entry_safe(cmd, t, &tgt_dev->deferred_cmd_list,
2853 sn_cmd_list_entry) {
2854 EXTRACHECKS_BUG_ON(cmd->queue_type ==
2855 SCST_CMD_QUEUE_HEAD_OF_QUEUE);
2856 if (cmd->sn == expected_sn) {
2857 TRACE_SN("Deferred command %p (sn %ld, set %d) found",
2858 cmd, cmd->sn, cmd->sn_set);
2859 tgt_dev->def_cmd_count--;
2860 list_del(&cmd->sn_cmd_list_entry);
2864 spin_lock(&cmd->cmd_lists->cmd_list_lock);
2865 TRACE_SN("Adding cmd %p to active cmd list",
2867 list_add_tail(&cmd->cmd_list_entry,
2868 &cmd->cmd_lists->active_cmd_list);
2869 wake_up(&cmd->cmd_lists->cmd_list_waitQ);
2870 spin_unlock(&cmd->cmd_lists->cmd_list_lock);
2877 list_for_each_entry(cmd, &tgt_dev->skipped_sn_list,
2878 sn_cmd_list_entry) {
2879 EXTRACHECKS_BUG_ON(cmd->queue_type ==
2880 SCST_CMD_QUEUE_HEAD_OF_QUEUE);
2881 if (cmd->sn == expected_sn) {
2882 atomic_t *slot = cmd->sn_slot;
2884 * !! At this point any pointer in cmd, except !!
2885 * !! sn_slot and sn_cmd_list_entry, could be !!
2886 * !! already destroyed !!
2888 TRACE_SN("cmd %p (tag %llu) with skipped sn %ld found",
2890 (long long unsigned int)cmd->tag,
2892 tgt_dev->def_cmd_count--;
2893 list_del(&cmd->sn_cmd_list_entry);
2894 spin_unlock_irq(&tgt_dev->sn_lock);
2895 if (test_and_set_bit(SCST_CMD_CAN_BE_DESTROYED,
2897 scst_destroy_put_cmd(cmd);
2898 scst_inc_expected_sn(tgt_dev, slot);
2899 expected_sn = tgt_dev->expected_sn;
2900 spin_lock_irq(&tgt_dev->sn_lock);
2906 spin_unlock_irq(&tgt_dev->sn_lock);
2910 void scst_add_thr_data(struct scst_tgt_dev *tgt_dev,
2911 struct scst_thr_data_hdr *data,
2912 void (*free_fn) (struct scst_thr_data_hdr *data))
2914 data->pid = current->pid;
2915 atomic_set(&data->ref, 1);
2916 EXTRACHECKS_BUG_ON(free_fn == NULL);
2917 data->free_fn = free_fn;
2918 spin_lock(&tgt_dev->thr_data_lock);
2919 list_add_tail(&data->thr_data_list_entry, &tgt_dev->thr_data_list);
2920 spin_unlock(&tgt_dev->thr_data_lock);
2922 EXPORT_SYMBOL(scst_add_thr_data);
2924 void scst_del_all_thr_data(struct scst_tgt_dev *tgt_dev)
2926 spin_lock(&tgt_dev->thr_data_lock);
2927 while (!list_empty(&tgt_dev->thr_data_list)) {
2928 struct scst_thr_data_hdr *d = list_entry(
2929 tgt_dev->thr_data_list.next, typeof(*d),
2930 thr_data_list_entry);
2931 list_del(&d->thr_data_list_entry);
2932 spin_unlock(&tgt_dev->thr_data_lock);
2933 scst_thr_data_put(d);
2934 spin_lock(&tgt_dev->thr_data_lock);
2936 spin_unlock(&tgt_dev->thr_data_lock);
2939 EXPORT_SYMBOL(scst_del_all_thr_data);
2941 void scst_dev_del_all_thr_data(struct scst_device *dev)
2943 struct scst_tgt_dev *tgt_dev;
2947 mutex_lock(&scst_mutex);
2949 list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
2950 dev_tgt_dev_list_entry) {
2951 scst_del_all_thr_data(tgt_dev);
2954 mutex_unlock(&scst_mutex);
2959 EXPORT_SYMBOL(scst_dev_del_all_thr_data);
2961 struct scst_thr_data_hdr *scst_find_thr_data(struct scst_tgt_dev *tgt_dev)
2963 struct scst_thr_data_hdr *res = NULL, *d;
2965 spin_lock(&tgt_dev->thr_data_lock);
2966 list_for_each_entry(d, &tgt_dev->thr_data_list, thr_data_list_entry) {
2967 if (d->pid == current->pid) {
2969 scst_thr_data_get(res);
2973 spin_unlock(&tgt_dev->thr_data_lock);
2976 EXPORT_SYMBOL(scst_find_thr_data);
2978 /* dev_lock supposed to be held and BH disabled */
2979 void __scst_block_dev(struct scst_device *dev)
2982 TRACE_MGMT_DBG("Device BLOCK(new %d), dev %p", dev->block_count, dev);
2986 void scst_block_dev(struct scst_device *dev, int outstanding)
2988 spin_lock_bh(&dev->dev_lock);
2989 __scst_block_dev(dev);
2990 spin_unlock_bh(&dev->dev_lock);
2992 /* spin_unlock_bh() doesn't provide the necessary memory barrier */
2995 TRACE_MGMT_DBG("Waiting during blocking outstanding %d (on_dev_count "
2996 "%d)", outstanding, atomic_read(&dev->on_dev_count));
2997 wait_event(dev->on_dev_waitQ,
2998 atomic_read(&dev->on_dev_count) <= outstanding);
2999 TRACE_MGMT_DBG("%s", "wait_event() returned");
3003 void scst_block_dev_cmd(struct scst_cmd *cmd, int outstanding)
3005 sBUG_ON(cmd->needs_unblocking);
3007 cmd->needs_unblocking = 1;
3008 TRACE_MGMT_DBG("Needs unblocking cmd %p (tag %llu)",
3009 cmd, (long long unsigned int)cmd->tag);
3011 scst_block_dev(cmd->dev, outstanding);
3015 void scst_unblock_dev(struct scst_device *dev)
3017 spin_lock_bh(&dev->dev_lock);
3018 TRACE_MGMT_DBG("Device UNBLOCK(new %d), dev %p",
3019 dev->block_count-1, dev);
3020 if (--dev->block_count == 0)
3021 scst_unblock_cmds(dev);
3022 spin_unlock_bh(&dev->dev_lock);
3023 sBUG_ON(dev->block_count < 0);
3027 void scst_unblock_dev_cmd(struct scst_cmd *cmd)
3029 scst_unblock_dev(cmd->dev);
3030 cmd->needs_unblocking = 0;
3034 int scst_inc_on_dev_cmd(struct scst_cmd *cmd)
3037 struct scst_device *dev = cmd->dev;
3041 sBUG_ON(cmd->inc_blocking || cmd->dec_on_dev_needed);
3043 atomic_inc(&dev->on_dev_count);
3044 cmd->dec_on_dev_needed = 1;
3045 TRACE_DBG("New on_dev_count %d", atomic_read(&dev->on_dev_count));
3047 #ifdef STRICT_SERIALIZING
3048 spin_lock_bh(&dev->dev_lock);
3049 if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)))
3051 if (dev->block_count > 0) {
3052 scst_dec_on_dev_cmd(cmd);
3053 TRACE_MGMT_DBG("Delaying cmd %p due to blocking or strict "
3054 "serializing (tag %llu, dev %p)", cmd, cmd->tag, dev);
3055 list_add_tail(&cmd->blocked_cmd_list_entry,
3056 &dev->blocked_cmd_list);
3059 __scst_block_dev(dev);
3060 cmd->inc_blocking = 1;
3062 spin_unlock_bh(&dev->dev_lock);
3066 if (unlikely(dev->block_count > 0)) {
3067 spin_lock_bh(&dev->dev_lock);
3068 if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)))
3070 barrier(); /* to reread block_count */
3071 if (dev->block_count > 0) {
3072 scst_dec_on_dev_cmd(cmd);
3073 TRACE_MGMT_DBG("Delaying cmd %p due to blocking or "
3074 "serializing (tag %llu, dev %p)", cmd,
3075 (long long unsigned int)cmd->tag, dev);
3076 list_add_tail(&cmd->blocked_cmd_list_entry,
3077 &dev->blocked_cmd_list);
3079 spin_unlock_bh(&dev->dev_lock);
3082 TRACE_MGMT_DBG("%s", "Somebody unblocked the device, "
3085 spin_unlock_bh(&dev->dev_lock);
3087 if (unlikely(dev->dev_serialized)) {
3088 spin_lock_bh(&dev->dev_lock);
3089 barrier(); /* to reread block_count */
3090 if (dev->block_count == 0) {
3091 TRACE_MGMT_DBG("cmd %p (tag %llu), blocking further "
3092 "cmds due to serializing (dev %p)", cmd,
3093 (long long unsigned int)cmd->tag, dev);
3094 __scst_block_dev(dev);
3095 cmd->inc_blocking = 1;
3097 spin_unlock_bh(&dev->dev_lock);
3098 TRACE_MGMT_DBG("Somebody blocked the device, "
3099 "repeating (count %d)", dev->block_count);
3102 spin_unlock_bh(&dev->dev_lock);
3107 TRACE_EXIT_RES(res);
3111 spin_unlock_bh(&dev->dev_lock);
3115 /* Called under dev_lock */
3116 void scst_unblock_cmds(struct scst_device *dev)
3118 #ifdef STRICT_SERIALIZING
3119 struct scst_cmd *cmd, *t;
3120 unsigned long flags;
3124 local_irq_save(flags);
3125 list_for_each_entry_safe(cmd, t, &dev->blocked_cmd_list,
3126 blocked_cmd_list_entry) {
3129 * Since only one cmd per time is being executed, expected_sn
3130 * can't change behind us, if the corresponding cmd is in
3131 * blocked_cmd_list, but we could be called before
3132 * scst_inc_expected_sn().
3134 if (likely(!cmd->internal && !cmd->retry)) {
3135 typeof(cmd->tgt_dev->expected_sn) expected_sn;
3136 if (cmd->tgt_dev == NULL)
3138 expected_sn = cmd->tgt_dev->expected_sn;
3139 if (cmd->sn == expected_sn)
3141 else if (cmd->sn != (expected_sn+1))
3145 list_del(&cmd->blocked_cmd_list_entry);
3146 TRACE_MGMT_DBG("Adding cmd %p to head of active cmd list", cmd);
3147 spin_lock(&cmd->cmd_lists->cmd_list_lock);
3148 list_add(&cmd->cmd_list_entry, &cmd->cmd_lists->active_cmd_list);
3149 wake_up(&cmd->cmd_lists->cmd_list_waitQ);
3150 spin_unlock(&cmd->cmd_lists->cmd_list_lock);
3154 local_irq_restore(flags);
3155 #else /* STRICT_SERIALIZING */
3156 struct scst_cmd *cmd, *tcmd;
3157 unsigned long flags;
3161 local_irq_save(flags);
3162 list_for_each_entry_safe(cmd, tcmd, &dev->blocked_cmd_list,
3163 blocked_cmd_list_entry) {
3164 list_del(&cmd->blocked_cmd_list_entry);
3165 TRACE_MGMT_DBG("Adding blocked cmd %p to active cmd list", cmd);
3166 spin_lock(&cmd->cmd_lists->cmd_list_lock);
3167 if (unlikely(cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE))
3168 list_add(&cmd->cmd_list_entry,
3169 &cmd->cmd_lists->active_cmd_list);
3171 list_add_tail(&cmd->cmd_list_entry,
3172 &cmd->cmd_lists->active_cmd_list);
3173 wake_up(&cmd->cmd_lists->cmd_list_waitQ);
3174 spin_unlock(&cmd->cmd_lists->cmd_list_lock);
3176 local_irq_restore(flags);
3177 #endif /* STRICT_SERIALIZING */
3183 static void __scst_unblock_deferred(struct scst_tgt_dev *tgt_dev,
3184 struct scst_cmd *out_of_sn_cmd)
3186 EXTRACHECKS_BUG_ON(!out_of_sn_cmd->sn_set);
3188 if (out_of_sn_cmd->sn == tgt_dev->expected_sn) {
3189 scst_inc_expected_sn(tgt_dev, out_of_sn_cmd->sn_slot);
3190 scst_make_deferred_commands_active(tgt_dev, out_of_sn_cmd);
3192 out_of_sn_cmd->out_of_sn = 1;
3193 spin_lock_irq(&tgt_dev->sn_lock);
3194 tgt_dev->def_cmd_count++;
3195 list_add_tail(&out_of_sn_cmd->sn_cmd_list_entry,
3196 &tgt_dev->skipped_sn_list);
3197 TRACE_SN("out_of_sn_cmd %p with sn %ld added to skipped_sn_list "
3198 "(expected_sn %ld)", out_of_sn_cmd, out_of_sn_cmd->sn,
3199 tgt_dev->expected_sn);
3200 spin_unlock_irq(&tgt_dev->sn_lock);
3206 void scst_unblock_deferred(struct scst_tgt_dev *tgt_dev,
3207 struct scst_cmd *out_of_sn_cmd)
3211 if (!out_of_sn_cmd->sn_set) {
3212 TRACE_SN("cmd %p without sn", out_of_sn_cmd);
3216 __scst_unblock_deferred(tgt_dev, out_of_sn_cmd);
3223 void scst_on_hq_cmd_response(struct scst_cmd *cmd)
3225 struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
3229 if (!cmd->hq_cmd_inced)
3232 spin_lock_irq(&tgt_dev->sn_lock);
3233 tgt_dev->hq_cmd_count--;
3234 spin_unlock_irq(&tgt_dev->sn_lock);
3236 EXTRACHECKS_BUG_ON(tgt_dev->hq_cmd_count < 0);
3239 * There is no problem in checking hq_cmd_count in the
3240 * non-locked state. In the worst case we will only have
3241 * unneeded run of the deferred commands.
3243 if (tgt_dev->hq_cmd_count == 0)
3244 scst_make_deferred_commands_active(tgt_dev, cmd);
3251 void scst_xmit_process_aborted_cmd(struct scst_cmd *cmd)
3255 TRACE_MGMT_DBG("Aborted cmd %p done (cmd_ref %d, "
3256 "scst_cmd_count %d)", cmd, atomic_read(&cmd->cmd_ref),
3257 atomic_read(&scst_cmd_count));
3259 scst_done_cmd_mgmt(cmd);
3262 if (test_bit(SCST_CMD_ABORTED_OTHER, &cmd->cmd_flags)) {
3263 if (cmd->completed) {
3264 /* It's completed and it's OK to return its result */
3268 if (cmd->dev->tas) {
3269 TRACE_MGMT_DBG("Flag ABORTED OTHER set for cmd %p "
3270 "(tag %llu), returning TASK ABORTED ", cmd,
3271 (long long unsigned int)cmd->tag);
3272 scst_set_cmd_error_status(cmd, SAM_STAT_TASK_ABORTED);
3274 TRACE_MGMT_DBG("Flag ABORTED OTHER set for cmd %p "
3275 "(tag %llu), aborting without delivery or "
3277 cmd, (long long unsigned int)cmd->tag);
3279 * There is no need to check/requeue possible UA,
3280 * because, if it exists, it will be delivered
3281 * by the "completed" branch above.
3283 clear_bit(SCST_CMD_ABORTED_OTHER, &cmd->cmd_flags);
3292 void __init scst_scsi_op_list_init(void)
3299 for (i = 0; i < 256; i++)
3300 scst_scsi_op_list[i] = SCST_CDB_TBL_SIZE;
3302 for (i = 0; i < SCST_CDB_TBL_SIZE; i++) {
3303 if (scst_scsi_op_table[i].ops != op) {
3304 op = scst_scsi_op_table[i].ops;
3305 scst_scsi_op_list[op] = i;
3314 /* Original taken from the XFS code */
3315 unsigned long scst_random(void)
3318 static unsigned long RandomValue;
3319 static DEFINE_SPINLOCK(lock);
3320 /* cycles pseudo-randomly through all values between 1 and 2^31 - 2 */
3324 unsigned long flags;
3326 spin_lock_irqsave(&lock, flags);
3328 RandomValue = jiffies;
3334 rv = 16807 * lo - 2836 * hi;
3338 spin_unlock_irqrestore(&lock, flags);
3341 EXPORT_SYMBOL(scst_random);
3346 #define TM_DBG_STATE_ABORT 0
3347 #define TM_DBG_STATE_RESET 1
3348 #define TM_DBG_STATE_OFFLINE 2
3350 #define INIT_TM_DBG_STATE TM_DBG_STATE_ABORT
3352 static void tm_dbg_timer_fn(unsigned long arg);
3354 static DEFINE_SPINLOCK(scst_tm_dbg_lock);
3355 /* All serialized by scst_tm_dbg_lock */
3357 unsigned int tm_dbg_release:1;
3358 unsigned int tm_dbg_blocked:1;
3360 static LIST_HEAD(tm_dbg_delayed_cmd_list);
3361 static int tm_dbg_delayed_cmds_count;
3362 static int tm_dbg_passed_cmds_count;
3363 static int tm_dbg_state;
3364 static int tm_dbg_on_state_passes;
3365 static DEFINE_TIMER(tm_dbg_timer, tm_dbg_timer_fn, 0, 0);
3366 static wait_queue_head_t *tm_dbg_p_cmd_list_waitQ;
3368 static const int tm_dbg_on_state_num_passes[] = { 5, 1, 0x7ffffff };
3370 void tm_dbg_init_tgt_dev(struct scst_tgt_dev *tgt_dev,
3371 struct scst_acg_dev *acg_dev)
3373 if ((acg_dev->acg == scst_default_acg) && (acg_dev->lun == 0)) {
3374 unsigned long flags;
3375 /* Do TM debugging only for LUN 0 */
3376 spin_lock_irqsave(&scst_tm_dbg_lock, flags);
3377 tm_dbg_p_cmd_list_waitQ =
3378 &tgt_dev->dev->p_cmd_lists->cmd_list_waitQ;
3379 tm_dbg_state = INIT_TM_DBG_STATE;
3380 tm_dbg_on_state_passes =
3381 tm_dbg_on_state_num_passes[tm_dbg_state];
3382 __set_bit(SCST_TGT_DEV_UNDER_TM_DBG, &tgt_dev->tgt_dev_flags);
3383 PRINT_INFO("LUN 0 connected from initiator %s is under "
3384 "TM debugging", tgt_dev->sess->tgt->tgtt->name);
3385 spin_unlock_irqrestore(&scst_tm_dbg_lock, flags);
3389 void tm_dbg_deinit_tgt_dev(struct scst_tgt_dev *tgt_dev)
3391 if (test_bit(SCST_TGT_DEV_UNDER_TM_DBG, &tgt_dev->tgt_dev_flags)) {
3392 unsigned long flags;
3393 del_timer_sync(&tm_dbg_timer);
3394 spin_lock_irqsave(&scst_tm_dbg_lock, flags);
3395 tm_dbg_p_cmd_list_waitQ = NULL;
3396 spin_unlock_irqrestore(&scst_tm_dbg_lock, flags);
3400 static void tm_dbg_timer_fn(unsigned long arg)
3402 TRACE_MGMT_DBG("%s", "delayed cmd timer expired");
3403 tm_dbg_flags.tm_dbg_release = 1;
3405 wake_up_all(tm_dbg_p_cmd_list_waitQ);
3408 /* Called under scst_tm_dbg_lock and IRQs off */
3409 static void tm_dbg_delay_cmd(struct scst_cmd *cmd)
3411 switch (tm_dbg_state) {
3412 case TM_DBG_STATE_ABORT:
3413 if (tm_dbg_delayed_cmds_count == 0) {
3414 unsigned long d = 58*HZ + (scst_random() % (4*HZ));
3415 TRACE_MGMT_DBG("STATE ABORT: delaying cmd %p (tag %llu) "
3416 "for %ld.%ld seconds (%ld HZ), "
3417 "tm_dbg_on_state_passes=%d", cmd, cmd->tag,
3418 d/HZ, (d%HZ)*100/HZ, d, tm_dbg_on_state_passes);
3419 mod_timer(&tm_dbg_timer, jiffies + d);
3421 tm_dbg_flags.tm_dbg_blocked = 1;
3424 TRACE_MGMT_DBG("Delaying another timed cmd %p "
3425 "(tag %llu), delayed_cmds_count=%d, "
3426 "tm_dbg_on_state_passes=%d", cmd, cmd->tag,
3427 tm_dbg_delayed_cmds_count,
3428 tm_dbg_on_state_passes);
3429 if (tm_dbg_delayed_cmds_count == 2)
3430 tm_dbg_flags.tm_dbg_blocked = 0;
3434 case TM_DBG_STATE_RESET:
3435 case TM_DBG_STATE_OFFLINE:
3436 TRACE_MGMT_DBG("STATE RESET/OFFLINE: delaying cmd %p "
3437 "(tag %llu), delayed_cmds_count=%d, "
3438 "tm_dbg_on_state_passes=%d", cmd, cmd->tag,
3439 tm_dbg_delayed_cmds_count, tm_dbg_on_state_passes);
3440 tm_dbg_flags.tm_dbg_blocked = 1;
3446 /* IRQs already off */
3447 spin_lock(&cmd->cmd_lists->cmd_list_lock);
3448 list_add_tail(&cmd->cmd_list_entry, &tm_dbg_delayed_cmd_list);
3449 spin_unlock(&cmd->cmd_lists->cmd_list_lock);
3450 cmd->tm_dbg_delayed = 1;
3451 tm_dbg_delayed_cmds_count++;
3456 void tm_dbg_check_released_cmds(void)
3458 if (tm_dbg_flags.tm_dbg_release) {
3459 struct scst_cmd *cmd, *tc;
3460 spin_lock_irq(&scst_tm_dbg_lock);
3461 list_for_each_entry_safe_reverse(cmd, tc,
3462 &tm_dbg_delayed_cmd_list, cmd_list_entry) {
3463 TRACE_MGMT_DBG("Releasing timed cmd %p (tag %llu), "
3464 "delayed_cmds_count=%d", cmd, cmd->tag,
3465 tm_dbg_delayed_cmds_count);
3466 spin_lock(&cmd->cmd_lists->cmd_list_lock);
3467 list_move(&cmd->cmd_list_entry,
3468 &cmd->cmd_lists->active_cmd_list);
3469 spin_unlock(&cmd->cmd_lists->cmd_list_lock);
3471 tm_dbg_flags.tm_dbg_release = 0;
3472 spin_unlock_irq(&scst_tm_dbg_lock);
3476 /* Called under scst_tm_dbg_lock */
3477 static void tm_dbg_change_state(void)
3479 tm_dbg_flags.tm_dbg_blocked = 0;
3480 if (--tm_dbg_on_state_passes == 0) {
3481 switch (tm_dbg_state) {
3482 case TM_DBG_STATE_ABORT:
3483 TRACE_MGMT_DBG("%s", "Changing "
3484 "tm_dbg_state to RESET");
3487 tm_dbg_flags.tm_dbg_blocked = 0;
3489 case TM_DBG_STATE_RESET:
3490 case TM_DBG_STATE_OFFLINE:
3491 if (TM_DBG_GO_OFFLINE) {
3492 TRACE_MGMT_DBG("%s", "Changing "
3493 "tm_dbg_state to OFFLINE");
3495 TM_DBG_STATE_OFFLINE;
3497 TRACE_MGMT_DBG("%s", "Changing "
3498 "tm_dbg_state to ABORT");
3506 tm_dbg_on_state_passes =
3507 tm_dbg_on_state_num_passes[tm_dbg_state];
3510 TRACE_MGMT_DBG("%s", "Deleting timer");
3511 del_timer(&tm_dbg_timer);
3515 int tm_dbg_check_cmd(struct scst_cmd *cmd)
3518 unsigned long flags;
3520 if (cmd->tm_dbg_immut)
3523 if (cmd->tm_dbg_delayed) {
3524 spin_lock_irqsave(&scst_tm_dbg_lock, flags);
3525 TRACE_MGMT_DBG("Processing delayed cmd %p (tag %llu), "
3526 "delayed_cmds_count=%d", cmd, cmd->tag,
3527 tm_dbg_delayed_cmds_count);
3529 cmd->tm_dbg_immut = 1;
3530 tm_dbg_delayed_cmds_count--;
3531 if ((tm_dbg_delayed_cmds_count == 0) &&
3532 (tm_dbg_state == TM_DBG_STATE_ABORT))
3533 tm_dbg_change_state();
3534 spin_unlock_irqrestore(&scst_tm_dbg_lock, flags);
3535 } else if (cmd->tgt_dev && test_bit(SCST_TGT_DEV_UNDER_TM_DBG,
3536 &cmd->tgt_dev->tgt_dev_flags)) {
3537 /* Delay 50th command */
3538 spin_lock_irqsave(&scst_tm_dbg_lock, flags);
3539 if (tm_dbg_flags.tm_dbg_blocked ||
3540 (++tm_dbg_passed_cmds_count % 50) == 0) {
3541 tm_dbg_delay_cmd(cmd);
3544 cmd->tm_dbg_immut = 1;
3545 spin_unlock_irqrestore(&scst_tm_dbg_lock, flags);
3553 void tm_dbg_release_cmd(struct scst_cmd *cmd)
3556 unsigned long flags;
3558 spin_lock_irqsave(&scst_tm_dbg_lock, flags);
3559 list_for_each_entry(c, &tm_dbg_delayed_cmd_list,
3562 TRACE_MGMT_DBG("Abort request for "
3563 "delayed cmd %p (tag=%llu), moving it to "
3564 "active cmd list (delayed_cmds_count=%d)",
3565 c, c->tag, tm_dbg_delayed_cmds_count);
3567 if (!test_bit(SCST_CMD_ABORTED_OTHER, &cmd->cmd_flags)) {
3568 /* Test how completed commands handled */
3569 if (((scst_random() % 10) == 5)) {
3570 scst_set_cmd_error(cmd,
3571 SCST_LOAD_SENSE(scst_sense_hardw_error));
3572 /* It's completed now */
3576 spin_lock(&cmd->cmd_lists->cmd_list_lock);
3577 list_move(&c->cmd_list_entry,
3578 &c->cmd_lists->active_cmd_list);
3579 wake_up(&c->cmd_lists->cmd_list_waitQ);
3580 spin_unlock(&cmd->cmd_lists->cmd_list_lock);
3584 spin_unlock_irqrestore(&scst_tm_dbg_lock, flags);
3587 /* Might be called under scst_mutex */
3588 void tm_dbg_task_mgmt(struct scst_device *dev, const char *fn, int force)
3590 unsigned long flags;
3593 struct scst_tgt_dev *tgt_dev;
3596 spin_lock_bh(&dev->dev_lock);
3597 list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
3598 dev_tgt_dev_list_entry) {
3599 if (test_bit(SCST_TGT_DEV_UNDER_TM_DBG,
3600 &tgt_dev->tgt_dev_flags)) {
3605 spin_unlock_bh(&dev->dev_lock);
3611 spin_lock_irqsave(&scst_tm_dbg_lock, flags);
3612 if ((tm_dbg_state != TM_DBG_STATE_OFFLINE) || force) {
3613 TRACE_MGMT_DBG("%s: freeing %d delayed cmds", fn,
3614 tm_dbg_delayed_cmds_count);
3615 tm_dbg_change_state();
3616 tm_dbg_flags.tm_dbg_release = 1;
3618 if (tm_dbg_p_cmd_list_waitQ != NULL)
3619 wake_up_all(tm_dbg_p_cmd_list_waitQ);
3621 TRACE_MGMT_DBG("%s: while OFFLINE state, doing nothing", fn);
3623 spin_unlock_irqrestore(&scst_tm_dbg_lock, flags);
3629 int tm_dbg_is_release(void)
3631 return tm_dbg_flags.tm_dbg_release;
3633 #endif /* DEBUG_TM */
3636 void scst_check_debug_sn(struct scst_cmd *cmd)
3638 static DEFINE_SPINLOCK(lock);
3641 unsigned long flags;
3642 int old = cmd->queue_type;
3644 spin_lock_irqsave(&lock, flags);
3647 if ((scst_random() % 1000) == 500) {
3648 if ((scst_random() % 3) == 1)
3649 type = SCST_CMD_QUEUE_HEAD_OF_QUEUE;
3651 type = SCST_CMD_QUEUE_ORDERED;
3653 cnt = scst_random() % 10;
3659 cmd->queue_type = type;
3662 if (((scst_random() % 1000) == 750))
3663 cmd->queue_type = SCST_CMD_QUEUE_ORDERED;
3664 else if (((scst_random() % 1000) == 751))
3665 cmd->queue_type = SCST_CMD_QUEUE_HEAD_OF_QUEUE;
3666 else if (((scst_random() % 1000) == 752))
3667 cmd->queue_type = SCST_CMD_QUEUE_SIMPLE;
3669 TRACE_SN("DbgSN changed cmd %p: %d/%d (cnt %d)", cmd, old,
3670 cmd->queue_type, cnt);
3673 spin_unlock_irqrestore(&lock, flags);
3676 #endif /* DEBUG_SN */