4 * Copyright (C) 2004-2007 Vladislav Bolkhovitin <vst@vlnb.net>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation, version 2
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
18 #include <linux/init.h>
19 #include <linux/kernel.h>
20 #include <linux/errno.h>
21 #include <linux/list.h>
22 #include <linux/spinlock.h>
23 #include <linux/slab.h>
24 #include <linux/sched.h>
25 #include <linux/kthread.h>
26 #include <linux/cdrom.h>
27 #include <asm/unistd.h>
28 #include <asm/string.h>
31 #include <linux/highmem.h>
35 #include "scst_priv.h"
38 #include "scst_cdbprobe.h"
40 static void scst_free_tgt_dev(struct scst_tgt_dev *tgt_dev);
41 static void scst_check_internal_sense(struct scst_device *dev, int result,
42 uint8_t *sense, int sense_len);
44 int scst_alloc_sense(struct scst_cmd *cmd, int atomic)
47 unsigned long gfp_mask = atomic ? GFP_ATOMIC : (GFP_KERNEL|__GFP_NOFAIL);
51 sBUG_ON(cmd->sense != NULL);
53 cmd->sense = mempool_alloc(scst_sense_mempool, gfp_mask);
54 if (cmd->sense == NULL) {
55 PRINT_CRIT_ERROR("Sense memory allocation failed (op %x). "
56 "The sense data will be lost!!", cmd->cdb[0]);
61 memset(cmd->sense, 0, SCST_SENSE_BUFFERSIZE);
68 int scst_alloc_set_sense(struct scst_cmd *cmd, int atomic,
69 const uint8_t *sense, unsigned int len)
75 res = scst_alloc_sense(cmd, atomic);
77 PRINT_BUFFER("Lost sense", sense, len);
81 memcpy(cmd->sense, sense, min((int)len, (int)SCST_SENSE_BUFFERSIZE));
82 TRACE_BUFFER("Sense set", cmd->sense, SCST_SENSE_BUFFERSIZE);
89 void scst_set_cmd_error_status(struct scst_cmd *cmd, int status)
94 cmd->host_status = DID_OK;
96 cmd->data_direction = SCST_DATA_NONE;
97 cmd->is_send_status = 1;
98 cmd->resp_data_len = 0;
106 void scst_set_cmd_error(struct scst_cmd *cmd, int key, int asc, int ascq)
112 scst_set_cmd_error_status(cmd, SAM_STAT_CHECK_CONDITION);
114 rc = scst_alloc_sense(cmd, 1);
116 PRINT_ERROR("Lost sense data (key %x, asc %x, ascq %x)",
121 scst_set_sense(cmd->sense, SCST_SENSE_BUFFERSIZE, key, asc, ascq);
122 TRACE_BUFFER("Sense set", cmd->sense, SCST_SENSE_BUFFERSIZE);
129 void scst_set_sense(uint8_t *buffer, int len, int key,
132 memset(buffer, 0, len);
133 buffer[0] = 0x70; /* Error Code */
134 buffer[2] = key; /* Sense Key */
135 buffer[7] = 0x0a; /* Additional Sense Length */
136 buffer[12] = asc; /* ASC */
137 buffer[13] = ascq; /* ASCQ */
138 TRACE_BUFFER("Sense set", buffer, len);
142 void scst_set_cmd_error_sense(struct scst_cmd *cmd, uint8_t *sense,
147 scst_set_cmd_error_status(cmd, SAM_STAT_CHECK_CONDITION);
148 scst_alloc_set_sense(cmd, 1, sense, len);
154 void scst_set_busy(struct scst_cmd *cmd)
156 int c = atomic_read(&cmd->sess->sess_cmd_count);
160 if ((c <= 1) || (cmd->sess->init_phase != SCST_SESS_IPH_READY)) {
161 scst_set_cmd_error_status(cmd, SAM_STAT_BUSY);
162 TRACE(TRACE_MGMT_MINOR, "Sending BUSY status to initiator %s "
163 "(cmds count %d, queue_type %x, sess->init_phase %d)",
164 cmd->sess->initiator_name, c,
165 cmd->queue_type, cmd->sess->init_phase);
167 scst_set_cmd_error_status(cmd, SAM_STAT_TASK_SET_FULL);
168 TRACE(TRACE_MGMT_MINOR, "Sending QUEUE_FULL status to "
169 "initiator %s (cmds count %d, queue_type %x, "
170 "sess->init_phase %d)", cmd->sess->initiator_name, c,
171 cmd->queue_type, cmd->sess->init_phase);
178 void scst_set_resp_data_len(struct scst_cmd *cmd, int resp_data_len)
184 scst_check_restore_sg_buff(cmd);
185 cmd->resp_data_len = resp_data_len;
187 if (resp_data_len == cmd->bufflen)
191 for(i = 0; i < cmd->sg_cnt; i++) {
192 l += cmd->sg[i].length;
193 if (l >= resp_data_len) {
194 int left = resp_data_len - (l - cmd->sg[i].length);
196 TRACE(TRACE_SG_OP|TRACE_MEMORY, "cmd %p (tag %llu), "
197 "resp_data_len %d, i %d, cmd->sg[i].length %d, "
198 "left %d", cmd, cmd->tag, resp_data_len, i,
199 cmd->sg[i].length, left);
201 cmd->orig_sg_cnt = cmd->sg_cnt;
202 cmd->orig_sg_entry = i;
203 cmd->orig_entry_len = cmd->sg[i].length;
204 cmd->sg_cnt = (left > 0) ? i+1 : i;
205 cmd->sg[i].length = left;
206 cmd->sg_buff_modified = 1;
216 /* Called under scst_mutex and suspended activity */
217 int scst_alloc_device(int gfp_mask, struct scst_device **out_dev)
219 struct scst_device *dev;
221 static int dev_num; /* protected by scst_mutex */
225 dev = kzalloc(sizeof(*dev), gfp_mask);
227 TRACE(TRACE_OUT_OF_MEM, "%s",
228 "Allocation of scst_device failed");
233 dev->handler = &scst_null_devtype;
234 dev->p_cmd_lists = &scst_main_cmd_lists;
235 atomic_set(&dev->dev_cmd_count, 0);
236 atomic_set(&dev->write_cmd_count, 0);
237 spin_lock_init(&dev->dev_lock);
238 atomic_set(&dev->on_dev_count, 0);
239 INIT_LIST_HEAD(&dev->blocked_cmd_list);
240 INIT_LIST_HEAD(&dev->dev_tgt_dev_list);
241 INIT_LIST_HEAD(&dev->dev_acg_dev_list);
242 INIT_LIST_HEAD(&dev->threads_list);
243 init_waitqueue_head(&dev->on_dev_waitQ);
244 dev->dev_double_ua_possible = 1;
245 dev->dev_serialized = 1;
246 dev->queue_alg = SCST_CONTR_MODE_QUEUE_ALG_UNRESTRICTED_REORDER;
247 dev->dev_num = dev_num++;
256 /* Called under scst_mutex and suspended activity */
257 void scst_free_device(struct scst_device *dev)
262 if (!list_empty(&dev->dev_tgt_dev_list) ||
263 !list_empty(&dev->dev_acg_dev_list)) {
264 PRINT_CRIT_ERROR("%s: dev_tgt_dev_list or dev_acg_dev_list "
265 "is not empty!", __FUNCTION__);
276 struct scst_acg_dev *scst_alloc_acg_dev(struct scst_acg *acg,
277 struct scst_device *dev, lun_t lun)
279 struct scst_acg_dev *res;
283 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
284 res = kmem_cache_alloc(scst_acgd_cachep, GFP_KERNEL);
286 res = kmem_cache_zalloc(scst_acgd_cachep, GFP_KERNEL);
289 TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of scst_acg_dev failed");
292 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
293 memset(res, 0, sizeof(*res));
301 TRACE_EXIT_HRES(res);
305 /* The activity supposed to be suspended and scst_mutex held */
306 void scst_free_acg_dev(struct scst_acg_dev *acg_dev)
310 TRACE_DBG("Removing acg_dev %p from acg_dev_list and dev_acg_dev_list",
312 list_del(&acg_dev->acg_dev_list_entry);
313 list_del(&acg_dev->dev_acg_dev_list_entry);
315 kmem_cache_free(scst_acgd_cachep, acg_dev);
321 /* The activity supposed to be suspended and scst_mutex held */
322 struct scst_acg *scst_alloc_add_acg(const char *acg_name)
324 struct scst_acg *acg;
328 acg = kzalloc(sizeof(*acg), GFP_KERNEL);
330 TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of acg failed");
334 INIT_LIST_HEAD(&acg->acg_dev_list);
335 INIT_LIST_HEAD(&acg->acg_sess_list);
336 INIT_LIST_HEAD(&acg->acn_list);
337 acg->acg_name = acg_name;
339 TRACE_DBG("Adding acg %s to scst_acg_list", acg_name);
340 list_add_tail(&acg->scst_acg_list_entry, &scst_acg_list);
343 TRACE_EXIT_HRES(acg);
347 /* The activity supposed to be suspended and scst_mutex held */
348 int scst_destroy_acg(struct scst_acg *acg)
350 struct scst_acn *n, *nn;
351 struct scst_acg_dev *acg_dev, *acg_dev_tmp;
356 if (!list_empty(&acg->acg_sess_list)) {
357 PRINT_ERROR("%s: acg_sess_list is not empty!", __FUNCTION__);
362 TRACE_DBG("Removing acg %s from scst_acg_list", acg->acg_name);
363 list_del(&acg->scst_acg_list_entry);
365 /* Freeing acg_devs */
366 list_for_each_entry_safe(acg_dev, acg_dev_tmp, &acg->acg_dev_list,
367 acg_dev_list_entry) {
368 struct scst_tgt_dev *tgt_dev, *tt;
369 list_for_each_entry_safe(tgt_dev, tt,
370 &acg_dev->dev->dev_tgt_dev_list,
371 dev_tgt_dev_list_entry) {
372 if (tgt_dev->acg_dev == acg_dev)
373 scst_free_tgt_dev(tgt_dev);
375 scst_free_acg_dev(acg_dev);
379 list_for_each_entry_safe(n, nn, &acg->acn_list,
381 list_del(&n->acn_list_entry);
385 INIT_LIST_HEAD(&acg->acn_list);
393 /* scst_mutex supposed to be held, there must not be parallel activity in this sess */
394 static struct scst_tgt_dev *scst_alloc_add_tgt_dev(struct scst_session *sess,
395 struct scst_acg_dev *acg_dev)
397 int ini_sg, ini_unchecked_isa_dma, ini_use_clustering;
398 struct scst_tgt_dev *tgt_dev;
399 struct scst_device *dev = acg_dev->dev;
400 struct list_head *sess_tgt_dev_list_head;
401 struct scst_tgt_template *vtt = sess->tgt->tgtt;
406 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
407 tgt_dev = kmem_cache_alloc(scst_tgtd_cachep, GFP_KERNEL);
409 tgt_dev = kmem_cache_zalloc(scst_tgtd_cachep, GFP_KERNEL);
411 if (tgt_dev == NULL) {
412 TRACE(TRACE_OUT_OF_MEM, "%s",
413 "Allocation of scst_tgt_dev failed");
416 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
417 memset(tgt_dev, 0, sizeof(*tgt_dev));
421 tgt_dev->lun = acg_dev->lun;
422 tgt_dev->acg_dev = acg_dev;
423 tgt_dev->sess = sess;
424 atomic_set(&tgt_dev->tgt_dev_cmd_count, 0);
426 scst_sgv_pool_use_norm(tgt_dev);
428 if (dev->scsi_dev != NULL) {
429 ini_sg = dev->scsi_dev->host->sg_tablesize;
430 ini_unchecked_isa_dma = dev->scsi_dev->host->unchecked_isa_dma;
431 ini_use_clustering = (dev->scsi_dev->host->use_clustering ==
434 ini_sg = (1 << 15) /* infinite */;
435 ini_unchecked_isa_dma = 0;
436 ini_use_clustering = 0;
438 tgt_dev->max_sg_cnt = min(ini_sg, sess->tgt->sg_tablesize);
440 if ((sess->tgt->tgtt->use_clustering || ini_use_clustering) &&
441 !sess->tgt->tgtt->no_clustering) {
442 scst_sgv_pool_use_norm_clust(tgt_dev);
445 if (sess->tgt->tgtt->unchecked_isa_dma || ini_unchecked_isa_dma) {
446 scst_sgv_pool_use_dma(tgt_dev);
449 scst_sgv_pool_use_highmem(tgt_dev);
453 if (dev->scsi_dev != NULL) {
454 TRACE_MGMT_DBG("host=%d, channel=%d, id=%d, lun=%d, "
455 "SCST lun=%Ld", dev->scsi_dev->host->host_no,
456 dev->scsi_dev->channel, dev->scsi_dev->id,
457 dev->scsi_dev->lun, (uint64_t)tgt_dev->lun);
460 TRACE_MGMT_DBG("Virtual device %s on SCST lun=%Ld",
461 dev->virt_name, (uint64_t)tgt_dev->lun);
464 spin_lock_init(&tgt_dev->tgt_dev_lock);
465 INIT_LIST_HEAD(&tgt_dev->UA_list);
466 spin_lock_init(&tgt_dev->thr_data_lock);
467 INIT_LIST_HEAD(&tgt_dev->thr_data_list);
468 spin_lock_init(&tgt_dev->sn_lock);
469 INIT_LIST_HEAD(&tgt_dev->deferred_cmd_list);
470 INIT_LIST_HEAD(&tgt_dev->skipped_sn_list);
471 tgt_dev->expected_sn = 1;
472 tgt_dev->num_free_sn_slots = ARRAY_SIZE(tgt_dev->sn_slots)-1;
473 tgt_dev->cur_sn_slot = &tgt_dev->sn_slots[0];
474 for(i = 0; i < (int)ARRAY_SIZE(tgt_dev->sn_slots); i++)
475 atomic_set(&tgt_dev->sn_slots[i], 0);
477 if (dev->handler->parse_atomic &&
478 (sess->tgt->tgtt->preprocessing_done == NULL)) {
479 if (sess->tgt->tgtt->rdy_to_xfer_atomic ||
480 (sess->tgt->tgtt->rdy_to_xfer == NULL))
481 __set_bit(SCST_TGT_DEV_AFTER_INIT_WR_ATOMIC,
482 &tgt_dev->tgt_dev_flags);
483 if (dev->handler->exec_atomic || (dev->handler->exec == NULL))
484 __set_bit(SCST_TGT_DEV_AFTER_INIT_OTH_ATOMIC,
485 &tgt_dev->tgt_dev_flags);
487 if (dev->handler->exec_atomic || (dev->handler->exec == NULL)) {
488 if (sess->tgt->tgtt->rdy_to_xfer_atomic ||
489 (sess->tgt->tgtt->rdy_to_xfer == NULL))
490 __set_bit(SCST_TGT_DEV_AFTER_RESTART_WR_ATOMIC,
491 &tgt_dev->tgt_dev_flags);
492 __set_bit(SCST_TGT_DEV_AFTER_RESTART_OTH_ATOMIC,
493 &tgt_dev->tgt_dev_flags);
494 __set_bit(SCST_TGT_DEV_AFTER_RX_DATA_ATOMIC,
495 &tgt_dev->tgt_dev_flags);
497 if ((dev->handler->dev_done_atomic ||
498 (dev->handler->dev_done == NULL)) &&
499 sess->tgt->tgtt->xmit_response_atomic) {
500 __set_bit(SCST_TGT_DEV_AFTER_EXEC_ATOMIC,
501 &tgt_dev->tgt_dev_flags);
504 spin_lock_bh(&scst_temp_UA_lock);
505 scst_set_sense(scst_temp_UA, sizeof(scst_temp_UA),
506 SCST_LOAD_SENSE(scst_sense_reset_UA));
507 scst_alloc_set_UA(tgt_dev, scst_temp_UA, sizeof(scst_temp_UA), 0);
508 spin_unlock_bh(&scst_temp_UA_lock);
510 tm_dbg_init_tgt_dev(tgt_dev, acg_dev);
512 if (vtt->threads_num > 0) {
514 if (dev->handler->threads_num > 0)
515 rc = scst_add_dev_threads(dev, vtt->threads_num);
516 else if (dev->handler->threads_num == 0)
517 rc = scst_add_cmd_threads(vtt->threads_num);
522 if (dev->handler && dev->handler->attach_tgt) {
523 TRACE_DBG("Calling dev handler's attach_tgt(%p)",
525 rc = dev->handler->attach_tgt(tgt_dev);
526 TRACE_DBG("%s", "Dev handler's attach_tgt() returned");
528 PRINT_ERROR("Device handler's %s attach_tgt() "
529 "failed: %d", dev->handler->name, rc);
534 spin_lock_bh(&dev->dev_lock);
535 list_add_tail(&tgt_dev->dev_tgt_dev_list_entry, &dev->dev_tgt_dev_list);
536 if (dev->dev_reserved)
537 __set_bit(SCST_TGT_DEV_RESERVED, &tgt_dev->tgt_dev_flags);
538 spin_unlock_bh(&dev->dev_lock);
540 sess_tgt_dev_list_head =
541 &sess->sess_tgt_dev_list_hash[HASH_VAL(tgt_dev->lun)];
542 list_add_tail(&tgt_dev->sess_tgt_dev_list_entry, sess_tgt_dev_list_head);
549 if (vtt->threads_num > 0) {
550 if (dev->handler->threads_num > 0)
551 scst_del_dev_threads(dev, vtt->threads_num);
552 else if (dev->handler->threads_num == 0)
553 scst_del_cmd_threads(vtt->threads_num);
557 kmem_cache_free(scst_tgtd_cachep, tgt_dev);
562 static void scst_clear_reservation(struct scst_tgt_dev *tgt_dev);
564 /* No locks supposed to be held, scst_mutex - held */
565 void scst_nexus_loss(struct scst_tgt_dev *tgt_dev)
569 scst_clear_reservation(tgt_dev);
571 /* With activity suspended the lock isn't needed, but let's be safe */
572 spin_lock_bh(&tgt_dev->tgt_dev_lock);
573 scst_free_all_UA(tgt_dev);
574 spin_unlock_bh(&tgt_dev->tgt_dev_lock);
576 spin_lock_bh(&scst_temp_UA_lock);
577 scst_set_sense(scst_temp_UA, sizeof(scst_temp_UA),
578 SCST_LOAD_SENSE(scst_sense_nexus_loss_UA));
579 scst_check_set_UA(tgt_dev, scst_temp_UA, sizeof(scst_temp_UA), 0);
580 spin_unlock_bh(&scst_temp_UA_lock);
586 /* scst_mutex supposed to be held, there must not be parallel activity in this sess */
587 static void scst_free_tgt_dev(struct scst_tgt_dev *tgt_dev)
589 struct scst_device *dev = tgt_dev->dev;
590 struct scst_tgt_template *vtt = tgt_dev->sess->tgt->tgtt;
594 tm_dbg_deinit_tgt_dev(tgt_dev);
596 spin_lock_bh(&dev->dev_lock);
597 list_del(&tgt_dev->dev_tgt_dev_list_entry);
598 spin_unlock_bh(&dev->dev_lock);
600 list_del(&tgt_dev->sess_tgt_dev_list_entry);
602 scst_clear_reservation(tgt_dev);
603 scst_free_all_UA(tgt_dev);
605 if (dev->handler && dev->handler->detach_tgt) {
606 TRACE_DBG("Calling dev handler's detach_tgt(%p)",
608 dev->handler->detach_tgt(tgt_dev);
609 TRACE_DBG("%s", "Dev handler's detach_tgt() returned");
612 if (vtt->threads_num > 0) {
613 if (dev->handler->threads_num > 0)
614 scst_del_dev_threads(dev, vtt->threads_num);
615 else if (dev->handler->threads_num == 0)
616 scst_del_cmd_threads(vtt->threads_num);
619 kmem_cache_free(scst_tgtd_cachep, tgt_dev);
625 /* scst_mutex supposed to be held */
626 int scst_sess_alloc_tgt_devs(struct scst_session *sess)
629 struct scst_acg_dev *acg_dev;
630 struct scst_tgt_dev *tgt_dev;
634 list_for_each_entry(acg_dev, &sess->acg->acg_dev_list,
635 acg_dev_list_entry) {
636 tgt_dev = scst_alloc_add_tgt_dev(sess, acg_dev);
637 if (tgt_dev == NULL) {
648 scst_sess_free_tgt_devs(sess);
652 /* scst_mutex supposed to be held, there must not be parallel activity in this sess */
653 void scst_sess_free_tgt_devs(struct scst_session *sess)
656 struct scst_tgt_dev *tgt_dev, *t;
660 /* The session is going down, no users, so no locks */
661 for(i = 0; i < TGT_DEV_HASH_SIZE; i++) {
662 struct list_head *sess_tgt_dev_list_head =
663 &sess->sess_tgt_dev_list_hash[i];
664 list_for_each_entry_safe(tgt_dev, t, sess_tgt_dev_list_head,
665 sess_tgt_dev_list_entry) {
666 scst_free_tgt_dev(tgt_dev);
668 INIT_LIST_HEAD(sess_tgt_dev_list_head);
675 /* The activity supposed to be suspended and scst_mutex held */
676 int scst_acg_add_dev(struct scst_acg *acg, struct scst_device *dev, lun_t lun,
680 struct scst_acg_dev *acg_dev;
681 struct scst_tgt_dev *tgt_dev;
682 struct scst_session *sess;
683 LIST_HEAD(tmp_tgt_dev_list);
687 INIT_LIST_HEAD(&tmp_tgt_dev_list);
690 list_for_each_entry(acg_dev, &acg->acg_dev_list, acg_dev_list_entry) {
691 if (acg_dev->dev == dev) {
692 PRINT_ERROR("Device is already in group %s",
700 acg_dev = scst_alloc_acg_dev(acg, dev, lun);
701 if (acg_dev == NULL) {
705 acg_dev->rd_only_flag = read_only;
707 TRACE_DBG("Adding acg_dev %p to acg_dev_list and dev_acg_dev_list",
709 list_add_tail(&acg_dev->acg_dev_list_entry, &acg->acg_dev_list);
710 list_add_tail(&acg_dev->dev_acg_dev_list_entry, &dev->dev_acg_dev_list);
712 list_for_each_entry(sess, &acg->acg_sess_list, acg_sess_list_entry)
714 tgt_dev = scst_alloc_add_tgt_dev(sess, acg_dev);
715 if (tgt_dev == NULL) {
719 list_add_tail(&tgt_dev->extra_tgt_dev_list_entry,
725 if (dev->virt_name != NULL) {
726 PRINT_INFO("Added device %s to group %s (LUN %Ld, "
727 "rd_only %d)", dev->virt_name, acg->acg_name,
730 PRINT_INFO("Added device %d:%d:%d:%d to group %s (LUN "
731 "%Ld, rd_only %d)", dev->scsi_dev->host->host_no,
732 dev->scsi_dev->channel, dev->scsi_dev->id,
733 dev->scsi_dev->lun, acg->acg_name, lun,
742 list_for_each_entry(tgt_dev, &tmp_tgt_dev_list,
743 extra_tgt_dev_list_entry) {
744 scst_free_tgt_dev(tgt_dev);
746 scst_free_acg_dev(acg_dev);
750 /* The activity supposed to be suspended and scst_mutex held */
751 int scst_acg_remove_dev(struct scst_acg *acg, struct scst_device *dev)
754 struct scst_acg_dev *acg_dev = NULL, *a;
755 struct scst_tgt_dev *tgt_dev, *tt;
759 list_for_each_entry(a, &acg->acg_dev_list, acg_dev_list_entry) {
766 if (acg_dev == NULL) {
767 PRINT_ERROR("Device is not found in group %s", acg->acg_name);
772 list_for_each_entry_safe(tgt_dev, tt, &dev->dev_tgt_dev_list,
773 dev_tgt_dev_list_entry) {
774 if (tgt_dev->acg_dev == acg_dev)
775 scst_free_tgt_dev(tgt_dev);
777 scst_free_acg_dev(acg_dev);
781 if (dev->virt_name != NULL) {
782 PRINT_INFO("Removed device %s from group %s",
783 dev->virt_name, acg->acg_name);
785 PRINT_INFO("Removed device %d:%d:%d:%d from group %s",
786 dev->scsi_dev->host->host_no,
787 dev->scsi_dev->channel, dev->scsi_dev->id,
788 dev->scsi_dev->lun, acg->acg_name);
796 /* scst_mutex supposed to be held */
797 int scst_acg_add_name(struct scst_acg *acg, const char *name)
806 list_for_each_entry(n, &acg->acn_list, acn_list_entry)
808 if (strcmp(n->name, name) == 0) {
809 PRINT_ERROR("Name %s already exists in group %s",
810 name, acg->acg_name);
816 n = kmalloc(sizeof(*n), GFP_KERNEL);
818 PRINT_ERROR("%s", "Unable to allocate scst_acn");
824 nm = kmalloc(len + 1, GFP_KERNEL);
826 PRINT_ERROR("%s", "Unable to allocate scst_acn->name");
834 list_add_tail(&n->acn_list_entry, &acg->acn_list);
838 PRINT_INFO("Added name %s to group %s", name, acg->acg_name);
849 /* scst_mutex supposed to be held */
850 int scst_acg_remove_name(struct scst_acg *acg, const char *name)
857 list_for_each_entry(n, &acg->acn_list, acn_list_entry)
859 if (strcmp(n->name, name) == 0) {
860 list_del(&n->acn_list_entry);
869 PRINT_INFO("Removed name %s from group %s", name,
872 PRINT_ERROR("Unable to find name %s in group %s", name,
880 struct scst_cmd *scst_create_prepare_internal_cmd(
881 struct scst_cmd *orig_cmd, int bufsize)
883 struct scst_cmd *res;
884 int gfp_mask = scst_cmd_atomic(orig_cmd) ? GFP_ATOMIC : GFP_KERNEL;
888 res = scst_alloc_cmd(gfp_mask);
892 res->cmd_lists = orig_cmd->cmd_lists;
893 res->sess = orig_cmd->sess;
894 res->state = SCST_CMD_STATE_DEV_PARSE;
895 res->atomic = scst_cmd_atomic(orig_cmd);
897 res->tgtt = orig_cmd->tgtt;
898 res->tgt = orig_cmd->tgt;
899 res->dev = orig_cmd->dev;
900 res->tgt_dev = orig_cmd->tgt_dev;
901 res->lun = orig_cmd->lun;
902 res->queue_type = SCST_CMD_QUEUE_HEAD_OF_QUEUE;
903 res->data_direction = SCST_DATA_UNKNOWN;
904 res->orig_cmd = orig_cmd;
906 res->bufflen = bufsize;
909 TRACE_EXIT_HRES((unsigned long)res);
913 void scst_free_internal_cmd(struct scst_cmd *cmd)
923 int scst_prepare_request_sense(struct scst_cmd *orig_cmd)
925 int res = SCST_CMD_STATE_RES_CONT_NEXT;
926 #define sbuf_size 252
927 static const uint8_t request_sense[6] =
928 { REQUEST_SENSE, 0, 0, 0, sbuf_size, 0 };
929 struct scst_cmd *rs_cmd;
933 rs_cmd = scst_create_prepare_internal_cmd(orig_cmd, sbuf_size);
937 memcpy(rs_cmd->cdb, request_sense, sizeof(request_sense));
938 rs_cmd->cdb_len = sizeof(request_sense);
939 rs_cmd->data_direction = SCST_DATA_READ;
941 TRACE(TRACE_MGMT_MINOR, "Adding REQUEST SENSE cmd %p to head of active "
942 "cmd list ", rs_cmd);
943 spin_lock_irq(&rs_cmd->cmd_lists->cmd_list_lock);
944 list_add(&rs_cmd->cmd_list_entry, &rs_cmd->cmd_lists->active_cmd_list);
945 spin_unlock_irq(&rs_cmd->cmd_lists->cmd_list_lock);
957 struct scst_cmd *scst_complete_request_sense(struct scst_cmd *req_cmd)
959 struct scst_cmd *orig_cmd = req_cmd->orig_cmd;
965 if (req_cmd->dev->handler->dev_done != NULL) {
967 TRACE_DBG("Calling dev handler %s dev_done(%p)",
968 req_cmd->dev->handler->name, req_cmd);
969 rc = req_cmd->dev->handler->dev_done(req_cmd);
970 TRACE_DBG("Dev handler %s dev_done() returned %d",
971 req_cmd->dev->handler->name, rc);
976 len = scst_get_buf_first(req_cmd, &buf);
978 if (scsi_status_is_good(req_cmd->status) && (len > 0) &&
979 SCST_SENSE_VALID(buf) && (!SCST_NO_SENSE(buf))) {
980 PRINT_BUFF_FLAG(TRACE_SCSI, "REQUEST SENSE returned",
982 scst_alloc_set_sense(orig_cmd, scst_cmd_atomic(req_cmd), buf,
985 PRINT_ERROR("%s", "Unable to get the sense via "
986 "REQUEST SENSE, returning HARDWARE ERROR");
987 scst_set_cmd_error(orig_cmd,
988 SCST_LOAD_SENSE(scst_sense_hardw_error));
992 scst_put_buf(req_cmd, buf);
994 scst_free_internal_cmd(req_cmd);
996 TRACE_EXIT_HRES((unsigned long)orig_cmd);
1000 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
1001 static void scst_req_done(struct scsi_cmnd *scsi_cmd)
1003 struct scsi_request *req;
1007 if (scsi_cmd && (req = scsi_cmd->sc_request)) {
1009 if (req->sr_bufflen)
1010 kfree(req->sr_buffer);
1011 scsi_release_request(req);
1019 static void scst_send_release(struct scst_device *dev)
1021 struct scsi_request *req;
1022 struct scsi_device *scsi_dev;
1027 if (dev->scsi_dev == NULL)
1030 scsi_dev = dev->scsi_dev;
1032 req = scsi_allocate_request(scsi_dev, GFP_KERNEL);
1034 PRINT_ERROR("Allocation of scsi_request failed: unable "
1035 "to RELEASE device %d:%d:%d:%d",
1036 scsi_dev->host->host_no, scsi_dev->channel,
1037 scsi_dev->id, scsi_dev->lun);
1041 memset(cdb, 0, sizeof(cdb));
1043 cdb[1] = (scsi_dev->scsi_level <= SCSI_2) ?
1044 ((scsi_dev->lun << 5) & 0xe0) : 0;
1045 memcpy(req->sr_cmnd, cdb, sizeof(cdb));
1046 req->sr_cmd_len = sizeof(cdb);
1047 req->sr_data_direction = SCST_DATA_NONE;
1049 req->sr_bufflen = 0;
1050 req->sr_buffer = NULL;
1051 req->sr_request->rq_disk = dev->rq_disk;
1052 req->sr_sense_buffer[0] = 0;
1054 TRACE(TRACE_DEBUG | TRACE_SCSI, "Sending RELEASE req %p to SCSI "
1056 scst_do_req(req, req->sr_cmnd, (void *)req->sr_buffer, req->sr_bufflen,
1057 scst_req_done, SCST_DEFAULT_TIMEOUT, 3);
1063 #else /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) */
1064 static void scst_send_release(struct scst_device *dev)
1066 struct scsi_device *scsi_dev;
1067 unsigned char cdb[6];
1068 unsigned char *sense;
1073 if (dev->scsi_dev == NULL)
1076 /* We can't afford missing RELEASE due to memory shortage */
1077 sense = kmalloc(SCST_SENSE_BUFFERSIZE, GFP_KERNEL|__GFP_NOFAIL);
1079 scsi_dev = dev->scsi_dev;
1081 for(i = 0; i < 5; i++) {
1082 memset(cdb, 0, sizeof(cdb));
1084 cdb[1] = (scsi_dev->scsi_level <= SCSI_2) ?
1085 ((scsi_dev->lun << 5) & 0xe0) : 0;
1087 memset(sense, 0, SCST_SENSE_BUFFERSIZE);
1089 TRACE(TRACE_DEBUG | TRACE_SCSI, "%s", "Sending RELEASE req to "
1091 rc = scsi_execute(scsi_dev, cdb, SCST_DATA_NONE, NULL, 0,
1092 sense, SCST_DEFAULT_TIMEOUT, 0, GFP_KERNEL);
1093 TRACE_DBG("MODE_SENSE done: %x", rc);
1095 if (scsi_status_is_good(rc)) {
1098 PRINT_ERROR("RELEASE failed: %d", rc);
1099 PRINT_BUFFER("RELEASE sense", sense,
1100 SCST_SENSE_BUFFERSIZE);
1101 scst_check_internal_sense(dev, rc,
1102 sense, SCST_SENSE_BUFFERSIZE);
1112 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) */
1114 /* scst_mutex supposed to be held */
1115 static void scst_clear_reservation(struct scst_tgt_dev *tgt_dev)
1117 struct scst_device *dev = tgt_dev->dev;
1122 spin_lock_bh(&dev->dev_lock);
1123 if (dev->dev_reserved &&
1124 !test_bit(SCST_TGT_DEV_RESERVED, &tgt_dev->tgt_dev_flags)) {
1125 /* This is one who holds the reservation */
1126 struct scst_tgt_dev *tgt_dev_tmp;
1127 list_for_each_entry(tgt_dev_tmp, &dev->dev_tgt_dev_list,
1128 dev_tgt_dev_list_entry) {
1129 clear_bit(SCST_TGT_DEV_RESERVED,
1130 &tgt_dev_tmp->tgt_dev_flags);
1132 dev->dev_reserved = 0;
1135 spin_unlock_bh(&dev->dev_lock);
1138 scst_send_release(dev);
1144 struct scst_session *scst_alloc_session(struct scst_tgt *tgt, int gfp_mask,
1145 const char *initiator_name)
1147 struct scst_session *sess;
1154 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
1155 sess = kmem_cache_alloc(scst_sess_cachep, gfp_mask);
1157 sess = kmem_cache_zalloc(scst_sess_cachep, gfp_mask);
1160 TRACE(TRACE_OUT_OF_MEM, "%s",
1161 "Allocation of scst_session failed");
1164 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
1165 memset(sess, 0, sizeof(*sess));
1168 sess->init_phase = SCST_SESS_IPH_INITING;
1169 sess->shut_phase = SCST_SESS_SPH_READY;
1170 atomic_set(&sess->refcnt, 0);
1171 for(i = 0; i < TGT_DEV_HASH_SIZE; i++) {
1172 struct list_head *sess_tgt_dev_list_head =
1173 &sess->sess_tgt_dev_list_hash[i];
1174 INIT_LIST_HEAD(sess_tgt_dev_list_head);
1176 spin_lock_init(&sess->sess_list_lock);
1177 INIT_LIST_HEAD(&sess->search_cmd_list);
1179 INIT_LIST_HEAD(&sess->init_deferred_cmd_list);
1180 INIT_LIST_HEAD(&sess->init_deferred_mcmd_list);
1182 #ifdef MEASURE_LATENCY
1183 spin_lock_init(&sess->meas_lock);
1186 len = strlen(initiator_name);
1187 nm = kmalloc(len + 1, gfp_mask);
1189 PRINT_ERROR("%s", "Unable to allocate sess->initiator_name");
1193 strcpy(nm, initiator_name);
1194 sess->initiator_name = nm;
1201 kmem_cache_free(scst_sess_cachep, sess);
1206 void scst_free_session(struct scst_session *sess)
1210 mutex_lock(&scst_mutex);
1212 TRACE_DBG("Removing sess %p from the list", sess);
1213 list_del(&sess->sess_list_entry);
1214 TRACE_DBG("Removing session %p from acg %s", sess, sess->acg->acg_name);
1215 list_del(&sess->acg_sess_list_entry);
1217 scst_sess_free_tgt_devs(sess);
1219 wake_up_all(&sess->tgt->unreg_waitQ);
1221 mutex_unlock(&scst_mutex);
1223 kfree(sess->initiator_name);
1224 kmem_cache_free(scst_sess_cachep, sess);
1230 void scst_free_session_callback(struct scst_session *sess)
1232 struct completion *c;
1236 TRACE_DBG("Freeing session %p", sess);
1238 c = sess->shutdown_compl;
1240 if (sess->unreg_done_fn) {
1241 TRACE_DBG("Calling unreg_done_fn(%p)", sess);
1242 sess->unreg_done_fn(sess);
1243 TRACE_DBG("%s", "unreg_done_fn() returned");
1245 scst_free_session(sess);
1254 void scst_sched_session_free(struct scst_session *sess)
1256 unsigned long flags;
1260 spin_lock_irqsave(&scst_mgmt_lock, flags);
1261 TRACE_DBG("Adding sess %p to scst_sess_shut_list", sess);
1262 list_add_tail(&sess->sess_shut_list_entry, &scst_sess_shut_list);
1263 spin_unlock_irqrestore(&scst_mgmt_lock, flags);
1265 wake_up(&scst_mgmt_waitQ);
1271 void scst_cmd_get(struct scst_cmd *cmd)
1273 __scst_cmd_get(cmd);
1276 void scst_cmd_put(struct scst_cmd *cmd)
1278 __scst_cmd_put(cmd);
1281 struct scst_cmd *scst_alloc_cmd(int gfp_mask)
1283 struct scst_cmd *cmd;
1287 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
1288 cmd = kmem_cache_alloc(scst_cmd_cachep, gfp_mask);
1290 cmd = kmem_cache_zalloc(scst_cmd_cachep, gfp_mask);
1293 TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of scst_cmd failed");
1296 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
1297 memset(cmd, 0, sizeof(*cmd));
1300 cmd->state = SCST_CMD_STATE_INIT_WAIT;
1301 atomic_set(&cmd->cmd_ref, 1);
1302 cmd->cmd_lists = &scst_main_cmd_lists;
1303 INIT_LIST_HEAD(&cmd->mgmt_cmd_list);
1304 cmd->queue_type = SCST_CMD_QUEUE_SIMPLE;
1305 cmd->timeout = SCST_DEFAULT_TIMEOUT;
1308 cmd->is_send_status = 1;
1309 cmd->resp_data_len = -1;
1316 void scst_destroy_put_cmd(struct scst_cmd *cmd)
1318 scst_sess_put(cmd->sess);
1320 /* At this point tgt_dev can be dead, but the pointer remains not-NULL */
1321 if (likely(cmd->tgt_dev != NULL))
1324 scst_destroy_cmd(cmd);
1328 /* No locks supposed to be held */
1329 void scst_free_cmd(struct scst_cmd *cmd)
1335 TRACE_DBG("Freeing cmd %p (tag %Lu)", cmd, cmd->tag);
1337 if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
1338 TRACE_MGMT_DBG("Freeing aborted cmd %p (scst_cmd_count %d)",
1339 cmd, atomic_read(&scst_cmd_count));
1342 sBUG_ON(cmd->inc_blocking || cmd->needs_unblocking ||
1343 cmd->dec_on_dev_needed);
1345 #if defined(EXTRACHECKS) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18))
1346 if (cmd->scsi_req) {
1347 PRINT_ERROR("%s: %s", __FUNCTION__, "Cmd with unfreed "
1349 scst_release_request(cmd);
1353 scst_check_restore_sg_buff(cmd);
1355 if (unlikely(cmd->internal)) {
1356 if (cmd->bufflen > 0)
1357 scst_release_space(cmd);
1358 scst_destroy_cmd(cmd);
1362 if (cmd->tgtt->on_free_cmd != NULL) {
1363 TRACE_DBG("Calling target's on_free_cmd(%p)", cmd);
1364 cmd->tgtt->on_free_cmd(cmd);
1365 TRACE_DBG("%s", "Target's on_free_cmd() returned");
1368 if (likely(cmd->dev != NULL)) {
1369 struct scst_dev_type *handler = cmd->dev->handler;
1370 if (handler->on_free_cmd != NULL) {
1371 TRACE_DBG("Calling dev handler %s on_free_cmd(%p)",
1372 handler->name, cmd);
1373 handler->on_free_cmd(cmd);
1374 TRACE_DBG("Dev handler %s on_free_cmd() returned",
1379 scst_release_space(cmd);
1381 if (unlikely(cmd->sense != NULL)) {
1382 TRACE_MEM("Releasing sense %p (cmd %p)", cmd->sense, cmd);
1383 mempool_free(cmd->sense, scst_sense_mempool);
1387 if (likely(cmd->tgt_dev != NULL)) {
1389 if (unlikely(!cmd->sent_to_midlev)) {
1390 PRINT_ERROR("Finishing not executed cmd %p (opcode "
1391 "%d, target %s, lun %Ld, sn %ld, expected_sn %ld)",
1392 cmd, cmd->cdb[0], cmd->tgtt->name, (uint64_t)cmd->lun,
1393 cmd->sn, cmd->tgt_dev->expected_sn);
1394 scst_unblock_deferred(cmd->tgt_dev, cmd);
1398 if (unlikely(cmd->out_of_sn)) {
1399 TRACE_SN("Out of SN cmd %p (tag %llu, sn %ld), "
1400 "destroy=%d", cmd, cmd->tag, cmd->sn, destroy);
1401 destroy = test_and_set_bit(SCST_CMD_CAN_BE_DESTROYED,
1406 if (likely(destroy))
1407 scst_destroy_put_cmd(cmd);
1414 /* No locks supposed to be held. */
1415 void scst_check_retries(struct scst_tgt *tgt)
1417 int need_wake_up = 0;
1422 * We don't worry about overflow of finished_cmds, because we check
1423 * only for its change
1425 atomic_inc(&tgt->finished_cmds);
1426 smp_mb__after_atomic_inc();
1427 if (unlikely(tgt->retry_cmds > 0))
1429 struct scst_cmd *c, *tc;
1430 unsigned long flags;
1432 TRACE_RETRY("Checking retry cmd list (retry_cmds %d)",
1435 spin_lock_irqsave(&tgt->tgt_lock, flags);
1436 list_for_each_entry_safe(c, tc, &tgt->retry_cmd_list,
1441 TRACE_RETRY("Moving retry cmd %p to head of active "
1442 "cmd list (retry_cmds left %d)", c, tgt->retry_cmds);
1443 spin_lock(&c->cmd_lists->cmd_list_lock);
1444 list_move(&c->cmd_list_entry, &c->cmd_lists->active_cmd_list);
1445 wake_up(&c->cmd_lists->cmd_list_waitQ);
1446 spin_unlock(&c->cmd_lists->cmd_list_lock);
1449 if (need_wake_up >= 2) /* "slow start" */
1452 spin_unlock_irqrestore(&tgt->tgt_lock, flags);
1459 void scst_tgt_retry_timer_fn(unsigned long arg)
1461 struct scst_tgt *tgt = (struct scst_tgt*)arg;
1462 unsigned long flags;
1464 TRACE_RETRY("Retry timer expired (retry_cmds %d)", tgt->retry_cmds);
1466 spin_lock_irqsave(&tgt->tgt_lock, flags);
1467 tgt->retry_timer_active = 0;
1468 spin_unlock_irqrestore(&tgt->tgt_lock, flags);
1470 scst_check_retries(tgt);
1476 struct scst_mgmt_cmd *scst_alloc_mgmt_cmd(int gfp_mask)
1478 struct scst_mgmt_cmd *mcmd;
1482 mcmd = mempool_alloc(scst_mgmt_mempool, gfp_mask);
1484 PRINT_CRIT_ERROR("%s", "Allocation of management command "
1485 "failed, some commands and their data could leak");
1488 memset(mcmd, 0, sizeof(*mcmd));
1495 void scst_free_mgmt_cmd(struct scst_mgmt_cmd *mcmd)
1497 unsigned long flags;
1501 spin_lock_irqsave(&mcmd->sess->sess_list_lock, flags);
1502 atomic_dec(&mcmd->sess->sess_cmd_count);
1503 spin_unlock_irqrestore(&mcmd->sess->sess_list_lock, flags);
1505 scst_sess_put(mcmd->sess);
1507 if (mcmd->mcmd_tgt_dev != NULL)
1510 mempool_free(mcmd, scst_mgmt_mempool);
1516 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
1517 int scst_alloc_request(struct scst_cmd *cmd)
1520 struct scsi_request *req;
1521 int gm = scst_cmd_atomic(cmd) ? GFP_ATOMIC : GFP_KERNEL;
1525 /* cmd->dev->scsi_dev must be non-NULL here */
1526 req = scsi_allocate_request(cmd->dev->scsi_dev, gm);
1528 TRACE(TRACE_OUT_OF_MEM, "%s",
1529 "Allocation of scsi_request failed");
1534 cmd->scsi_req = req;
1536 memcpy(req->sr_cmnd, cmd->cdb, cmd->cdb_len);
1537 req->sr_cmd_len = cmd->cdb_len;
1538 req->sr_data_direction = cmd->data_direction;
1539 req->sr_use_sg = cmd->sg_cnt;
1540 req->sr_bufflen = cmd->bufflen;
1541 req->sr_buffer = cmd->sg;
1542 req->sr_request->rq_disk = cmd->dev->rq_disk;
1543 req->sr_sense_buffer[0] = 0;
1545 cmd->scsi_req->upper_private_data = cmd;
1552 void scst_release_request(struct scst_cmd *cmd)
1554 scsi_release_request(cmd->scsi_req);
1555 cmd->scsi_req = NULL;
1559 int scst_alloc_space(struct scst_cmd *cmd)
1563 int atomic = scst_cmd_atomic(cmd);
1565 struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
1566 int bufflen = cmd->bufflen;
1570 gfp_mask = tgt_dev->gfp_mask | (atomic ? GFP_ATOMIC : GFP_KERNEL);
1572 flags = atomic ? SCST_POOL_NO_ALLOC_ON_CACHE_MISS : 0;
1574 flags |= SCST_POOL_ALLOC_NO_CACHED;
1576 if (unlikely(cmd->bufflen == 0)) {
1577 TRACE(TRACE_MGMT_MINOR, "Warning: data direction %d or/and "
1578 "zero buffer length. Opcode 0x%x, handler %s, target "
1579 "%s", cmd->data_direction, cmd->cdb[0],
1580 cmd->dev->handler->name, cmd->tgtt->name);
1582 * Be on the safe side and alloc stub buffer. Neither target
1583 * drivers, nor user space will touch it, since bufflen
1586 bufflen = PAGE_SIZE;
1589 cmd->sg = sgv_pool_alloc(tgt_dev->pool, bufflen, gfp_mask, flags,
1590 &cmd->sg_cnt, &cmd->sgv, NULL);
1591 if (cmd->sg == NULL)
1594 if (unlikely(cmd->sg_cnt > tgt_dev->max_sg_cnt)) {
1597 PRINT_INFO("Unable to complete command due to "
1598 "SG IO count limitation (requested %d, "
1599 "available %d, tgt lim %d)", cmd->sg_cnt,
1600 tgt_dev->max_sg_cnt, cmd->tgt->sg_tablesize);
1613 sgv_pool_free(cmd->sgv);
1620 void scst_release_space(struct scst_cmd *cmd)
1624 if (cmd->sgv == NULL)
1627 if (cmd->data_buf_alloced) {
1628 TRACE_MEM("%s", "data_buf_alloced set, returning");
1632 sgv_pool_free(cmd->sgv);
1645 static const int SCST_CDB_LENGTH[8] = { 6, 10, 10, -1, 16, 12, -1, -1 };
1647 #define SCST_CDB_GROUP(opcode) ((opcode >> 5) & 0x7)
1648 #define SCST_GET_CDB_LEN(opcode) SCST_CDB_LENGTH[SCST_CDB_GROUP(opcode)]
1650 int scst_get_cdb_len(const uint8_t *cdb)
1652 return SCST_GET_CDB_LEN(cdb[0]);
1655 /* get_trans_len_x extract x bytes from cdb as length starting from off */
1657 /* for special commands */
1658 static int get_trans_len_block_limit(struct scst_cmd *cmd, uint8_t off)
1664 static int get_trans_len_read_capacity(struct scst_cmd *cmd, uint8_t off)
1666 cmd->bufflen = READ_CAP_LEN;
1670 static int get_trans_len_single(struct scst_cmd *cmd, uint8_t off)
1676 static int get_trans_len_read_pos(struct scst_cmd *cmd, uint8_t off)
1678 uint8_t *p = (uint8_t *)cmd->cdb + off;
1682 cmd->bufflen |= ((u32)p[0]) << 8;
1683 cmd->bufflen |= ((u32)p[1]);
1685 switch (cmd->cdb[1] & 0x1f) {
1689 if (cmd->bufflen != 0) {
1690 PRINT_ERROR("READ POSITION: Invalid non-zero (%d) "
1691 "allocation length for service action %x",
1692 cmd->bufflen, cmd->cdb[1] & 0x1f);
1698 switch (cmd->cdb[1] & 0x1f) {
1707 cmd->bufflen = max(28, cmd->bufflen);
1710 PRINT_ERROR("READ POSITION: Invalid service action %x",
1711 cmd->cdb[1] & 0x1f);
1719 scst_set_cmd_error(cmd,
1720 SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
1725 static int get_trans_len_1(struct scst_cmd *cmd, uint8_t off)
1727 cmd->bufflen = (u32)cmd->cdb[off];
1731 static int get_trans_len_2(struct scst_cmd *cmd, uint8_t off)
1733 const uint8_t *p = cmd->cdb + off;
1736 cmd->bufflen |= ((u32)p[0]) << 8;
1737 cmd->bufflen |= ((u32)p[1]);
1742 static int get_trans_len_3(struct scst_cmd *cmd, uint8_t off)
1744 const uint8_t *p = cmd->cdb + off;
1747 cmd->bufflen |= ((u32)p[0]) << 16;
1748 cmd->bufflen |= ((u32)p[1]) << 8;
1749 cmd->bufflen |= ((u32)p[2]);
1754 static int get_trans_len_4(struct scst_cmd *cmd, uint8_t off)
1756 const uint8_t *p = cmd->cdb + off;
1759 cmd->bufflen |= ((u32)p[0]) << 24;
1760 cmd->bufflen |= ((u32)p[1]) << 16;
1761 cmd->bufflen |= ((u32)p[2]) << 8;
1762 cmd->bufflen |= ((u32)p[3]);
1767 static int get_trans_len_none(struct scst_cmd *cmd, uint8_t off)
1773 int scst_get_cdb_info(struct scst_cmd *cmd)
1775 int dev_type = cmd->dev->handler->type;
1778 const struct scst_sdbops *ptr = NULL;
1782 op = cmd->cdb[0]; /* get clear opcode */
1784 TRACE_DBG("opcode=%02x, cdblen=%d bytes, tblsize=%d, "
1785 "dev_type=%d", op, SCST_GET_CDB_LEN(op), SCST_CDB_TBL_SIZE,
1788 i = scst_scsi_op_list[op];
1789 while (i < SCST_CDB_TBL_SIZE && scst_scsi_op_table[i].ops == op) {
1790 if (scst_scsi_op_table[i].devkey[dev_type] != SCST_CDB_NOTSUPP) {
1791 ptr = &scst_scsi_op_table[i];
1792 TRACE_DBG("op = 0x%02x+'%c%c%c%c%c%c%c%c%c%c'+<%s>",
1793 ptr->ops, ptr->devkey[0], /* disk */
1794 ptr->devkey[1], /* tape */
1795 ptr->devkey[2], /* printer */
1796 ptr->devkey[3], /* cpu */
1797 ptr->devkey[4], /* cdr */
1798 ptr->devkey[5], /* cdrom */
1799 ptr->devkey[6], /* scanner */
1800 ptr->devkey[7], /* worm */
1801 ptr->devkey[8], /* changer */
1802 ptr->devkey[9], /* commdev */
1804 TRACE_DBG("direction=%d flags=%d off=%d",
1814 /* opcode not found or now not used !!! */
1815 TRACE(TRACE_SCSI, "Unknown opcode 0x%x for type %d", op,
1818 cmd->op_flags = SCST_INFO_INVALID;
1822 cmd->cdb_len = SCST_GET_CDB_LEN(op);
1823 cmd->op_name = ptr->op_name;
1824 cmd->data_direction = ptr->direction;
1825 cmd->op_flags = ptr->flags;
1826 res = (*ptr->get_trans_len)(cmd, ptr->off);
1834 * Routine to extract a lun number from an 8-byte LUN structure
1835 * in network byte order (BE).
1836 * (see SAM-2, Section 4.12.3 page 40)
1837 * Supports 2 types of lun unpacking: peripheral and logical unit.
1839 lun_t scst_unpack_lun(const uint8_t *lun, int len)
1841 lun_t res = (lun_t)-1;
1846 TRACE_BUFF_FLAG(TRACE_DEBUG, "Raw LUN", lun, len);
1848 if (unlikely(len < 2)) {
1849 PRINT_ERROR("Illegal lun length %d, expected 2 bytes or "
1857 if ((*((uint64_t*)lun) &
1858 __constant_cpu_to_be64(0x0000FFFFFFFFFFFFLL)) != 0)
1862 if (*((uint16_t*)&lun[2]) != 0)
1866 if (*((uint32_t*)&lun[2]) != 0)
1874 address_method = (*lun) >> 6; /* high 2 bits of byte 0 */
1875 switch (address_method) {
1876 case 0: /* peripheral device addressing method */
1877 #if 0 /* Looks like it's legal to use it as flat space addressing method as well */
1879 PRINT_ERROR("Illegal BUS INDENTIFIER in LUN "
1880 "peripheral device addressing method 0x%02x, "
1881 "expected 0", *lun);
1890 case 1: /* flat space addressing method */
1891 res = *(lun + 1) | (((*lun) & 0x3f) << 8);
1894 case 2: /* logical unit addressing method */
1896 PRINT_ERROR("Illegal BUS NUMBER in LUN logical unit "
1897 "addressing method 0x%02x, expected 0",
1901 if (*(lun + 1) & 0xe0) {
1902 PRINT_ERROR("Illegal TARGET in LUN logical unit "
1903 "addressing method 0x%02x, expected 0",
1904 (*(lun + 1) & 0xf8) >> 5);
1907 res = *(lun + 1) & 0x1f;
1910 case 3: /* extended logical unit addressing method */
1912 PRINT_ERROR("Unimplemented LUN addressing method %u",
1918 TRACE_EXIT_RES((int)res);
1922 PRINT_ERROR("%s", "Multi-level LUN unimplemented");
1926 int scst_calc_block_shift(int sector_size)
1928 int block_shift = 0;
1931 if (sector_size == 0)
1941 if (block_shift < 9) {
1942 PRINT_ERROR("Wrong sector size %d", sector_size);
1946 TRACE_EXIT_RES(block_shift);
1950 int scst_sbc_generic_parse(struct scst_cmd *cmd,
1951 int (*get_block_shift)(struct scst_cmd *cmd))
1958 * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
1959 * therefore change them only if necessary
1962 TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
1963 cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
1965 switch (cmd->cdb[0]) {
1966 case SERVICE_ACTION_IN:
1967 if ((cmd->cdb[1] & 0x1f) == SAI_READ_CAPACITY_16) {
1968 cmd->bufflen = READ_CAP16_LEN;
1969 cmd->data_direction = SCST_DATA_READ;
1976 if ((cmd->cdb[1] & BYTCHK) == 0) {
1977 cmd->data_len = cmd->bufflen << get_block_shift(cmd);
1988 if (cmd->op_flags & SCST_TRANSFER_LEN_TYPE_FIXED) {
1990 * No need for locks here, since *_detach() can not be
1991 * called, when there are existing commands.
1993 cmd->bufflen = cmd->bufflen << get_block_shift(cmd);
1997 TRACE_DBG("res %d, bufflen %d, data_len %d, direct %d",
1998 res, cmd->bufflen, cmd->data_len, cmd->data_direction);
2000 TRACE_EXIT_RES(res);
2004 int scst_cdrom_generic_parse(struct scst_cmd *cmd,
2005 int (*get_block_shift)(struct scst_cmd *cmd))
2012 * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
2013 * therefore change them only if necessary
2016 TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
2017 cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
2019 cmd->cdb[1] &= 0x1f;
2021 switch (cmd->cdb[0]) {
2026 if ((cmd->cdb[1] & BYTCHK) == 0) {
2027 cmd->data_len = cmd->bufflen << get_block_shift(cmd);
2037 if (cmd->op_flags & SCST_TRANSFER_LEN_TYPE_FIXED)
2038 cmd->bufflen = cmd->bufflen << get_block_shift(cmd);
2041 TRACE_DBG("res=%d, bufflen=%d, direct=%d", res, cmd->bufflen,
2042 cmd->data_direction);
2048 int scst_modisk_generic_parse(struct scst_cmd *cmd,
2049 int (*get_block_shift)(struct scst_cmd *cmd))
2056 * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
2057 * therefore change them only if necessary
2060 TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
2061 cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
2063 cmd->cdb[1] &= 0x1f;
2065 switch (cmd->cdb[0]) {
2070 if ((cmd->cdb[1] & BYTCHK) == 0) {
2071 cmd->data_len = cmd->bufflen << get_block_shift(cmd);
2081 if (cmd->op_flags & SCST_TRANSFER_LEN_TYPE_FIXED)
2082 cmd->bufflen = cmd->bufflen << get_block_shift(cmd);
2085 TRACE_DBG("res=%d, bufflen=%d, direct=%d", res, cmd->bufflen,
2086 cmd->data_direction);
2088 TRACE_EXIT_RES(res);
2092 int scst_tape_generic_parse(struct scst_cmd *cmd,
2093 int (*get_block_size)(struct scst_cmd *cmd))
2100 * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
2101 * therefore change them only if necessary
2104 TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
2105 cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
2107 if (cmd->cdb[0] == READ_POSITION) {
2108 int tclp = cmd->cdb[1] & TCLP_BIT;
2109 int long_bit = cmd->cdb[1] & LONG_BIT;
2110 int bt = cmd->cdb[1] & BT_BIT;
2112 if ((tclp == long_bit) && (!bt || !long_bit)) {
2114 tclp ? POSITION_LEN_LONG : POSITION_LEN_SHORT;
2115 cmd->data_direction = SCST_DATA_READ;
2118 cmd->data_direction = SCST_DATA_NONE;
2122 if (cmd->op_flags & SCST_TRANSFER_LEN_TYPE_FIXED & cmd->cdb[1])
2123 cmd->bufflen = cmd->bufflen * get_block_size(cmd);
2125 TRACE_EXIT_RES(res);
2129 static int scst_null_parse(struct scst_cmd *cmd)
2136 * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
2137 * therefore change them only if necessary
2140 TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
2141 cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
2143 switch (cmd->cdb[0]) {
2149 TRACE_DBG("res %d bufflen %d direct %d",
2150 res, cmd->bufflen, cmd->data_direction);
2156 int scst_changer_generic_parse(struct scst_cmd *cmd,
2157 int (*nothing)(struct scst_cmd *cmd))
2159 return scst_null_parse(cmd);
2162 int scst_processor_generic_parse(struct scst_cmd *cmd,
2163 int (*nothing)(struct scst_cmd *cmd))
2165 return scst_null_parse(cmd);
2168 int scst_raid_generic_parse(struct scst_cmd *cmd,
2169 int (*nothing)(struct scst_cmd *cmd))
2171 return scst_null_parse(cmd);
2174 int scst_block_generic_dev_done(struct scst_cmd *cmd,
2175 void (*set_block_shift)(struct scst_cmd *cmd, int block_shift))
2177 int opcode = cmd->cdb[0];
2178 int status = cmd->status;
2179 int res = SCST_CMD_STATE_DEFAULT;
2184 * SCST sets good defaults for cmd->is_send_status and cmd->resp_data_len
2185 * based on cmd->status and cmd->data_direction, therefore change
2186 * them only if necessary
2189 if ((status == SAM_STAT_GOOD) || (status == SAM_STAT_CONDITION_MET)) {
2193 /* Always keep track of disk capacity */
2194 int buffer_size, sector_size, sh;
2197 buffer_size = scst_get_buf_first(cmd, &buffer);
2198 if (unlikely(buffer_size <= 0)) {
2199 PRINT_ERROR("%s: Unable to get the buffer "
2200 "(%d)", __FUNCTION__, buffer_size);
2205 ((buffer[4] << 24) | (buffer[5] << 16) |
2206 (buffer[6] << 8) | (buffer[7] << 0));
2207 scst_put_buf(cmd, buffer);
2208 if (sector_size != 0)
2209 sh = scst_calc_block_shift(sector_size);
2212 set_block_shift(cmd, sh);
2213 TRACE_DBG("block_shift %d", sh);
2222 TRACE_DBG("cmd->is_send_status=%x, cmd->resp_data_len=%d, "
2223 "res=%d", cmd->is_send_status, cmd->resp_data_len, res);
2226 TRACE_EXIT_RES(res);
2230 int scst_tape_generic_dev_done(struct scst_cmd *cmd,
2231 void (*set_block_size)(struct scst_cmd *cmd, int block_shift))
2233 int opcode = cmd->cdb[0];
2234 int res = SCST_CMD_STATE_DEFAULT;
2235 int buffer_size, bs;
2236 uint8_t *buffer = NULL;
2241 * SCST sets good defaults for cmd->is_send_status and cmd->resp_data_len
2242 * based on cmd->status and cmd->data_direction, therefore change
2243 * them only if necessary
2249 buffer_size = scst_get_buf_first(cmd, &buffer);
2250 if (unlikely(buffer_size <= 0)) {
2251 PRINT_ERROR("%s: Unable to get the buffer (%d)",
2252 __FUNCTION__, buffer_size);
2260 TRACE_DBG("%s", "MODE_SENSE");
2261 if ((cmd->cdb[2] & 0xC0) == 0) {
2262 if (buffer[3] == 8) {
2263 bs = (buffer[9] << 16) |
2264 (buffer[10] << 8) | buffer[11];
2265 set_block_size(cmd, bs);
2270 TRACE_DBG("%s", "MODE_SELECT");
2271 if (buffer[3] == 8) {
2272 bs = (buffer[9] << 16) | (buffer[10] << 8) |
2274 set_block_size(cmd, bs);
2285 scst_put_buf(cmd, buffer);
2290 TRACE_EXIT_RES(res);
2294 static void scst_check_internal_sense(struct scst_device *dev, int result,
2295 uint8_t *sense, int sense_len)
2299 if (host_byte(result) == DID_RESET) {
2300 TRACE(TRACE_MGMT_MINOR, "%s", "DID_RESET received, triggering "
2302 scst_set_sense(sense, sense_len,
2303 SCST_LOAD_SENSE(scst_sense_reset_UA));
2304 scst_dev_check_set_UA(dev, NULL, sense, sense_len);
2305 } else if ((status_byte(result) == CHECK_CONDITION) &&
2306 SCST_SENSE_VALID(sense) && scst_is_ua_sense(sense))
2307 scst_dev_check_set_UA(dev, NULL, sense, sense_len);
2313 int scst_obtain_device_parameters(struct scst_device *dev)
2317 uint8_t buffer[4+0x0A];
2318 uint8_t sense_buffer[SCST_SENSE_BUFFERSIZE];
2322 sBUG_ON(in_interrupt() || in_atomic());
2323 EXTRACHECKS_BUG_ON(dev->scsi_dev == NULL);
2325 for(i = 0; i < 5; i++) {
2326 /* Get control mode page */
2327 memset(cmd, 0, sizeof(cmd));
2328 cmd[0] = MODE_SENSE;
2329 cmd[1] = 8; /* DBD */
2331 cmd[4] = sizeof(buffer);
2333 memset(buffer, 0, sizeof(buffer));
2334 memset(sense_buffer, 0, sizeof(sense_buffer));
2336 TRACE(TRACE_SCSI, "%s", "Doing internal MODE_SENSE");
2337 res = scsi_execute(dev->scsi_dev, cmd, SCST_DATA_READ, buffer,
2338 sizeof(buffer), sense_buffer, SCST_DEFAULT_TIMEOUT,
2341 TRACE_DBG("MODE_SENSE done: %x", res);
2343 if (scsi_status_is_good(res)) {
2346 PRINT_BUFF_FLAG(TRACE_SCSI, "Returned control mode page data",
2347 buffer, sizeof(buffer));
2349 dev->tst = buffer[4+2] >> 5;
2350 q = buffer[4+3] >> 4;
2351 if (q > SCST_CONTR_MODE_QUEUE_ALG_UNRESTRICTED_REORDER) {
2352 PRINT_ERROR("Too big QUEUE ALG %x, dev "
2353 "%d:%d:%d:%d", dev->queue_alg,
2354 dev->scsi_dev->host->host_no, dev->scsi_dev->channel,
2355 dev->scsi_dev->id, dev->scsi_dev->lun);
2358 dev->swp = (buffer[4+4] & 0x8) >> 3;
2359 dev->tas = (buffer[4+5] & 0x40) >> 6;
2362 * Unfortunately, SCSI ML doesn't provide a way to
2363 * specify commands task attribute, so we can rely on
2364 * device's restricted reordering only.
2366 dev->has_own_order_mgmt = !dev->queue_alg;
2368 TRACE(TRACE_SCSI|TRACE_MGMT_MINOR, "Device %d:%d:%d:%d: TST %x, "
2369 "QUEUE ALG %x, SWP %x, TAS %x, has_own_order_mgmt "
2370 "%d", dev->scsi_dev->host->host_no,
2371 dev->scsi_dev->channel, dev->scsi_dev->id,
2372 dev->scsi_dev->lun, dev->tst, dev->queue_alg,
2373 dev->swp, dev->tas, dev->has_own_order_mgmt);
2377 #if 0 /* 3ware controller is buggy and returns CONDITION_GOOD instead of CHECK_CONDITION */
2378 if ((status_byte(res) == CHECK_CONDITION) &&
2382 SCST_SENSE_VALID(sense_buffer)) {
2383 if (sense_buffer[2] == ILLEGAL_REQUEST) {
2384 TRACE(TRACE_SCSI|TRACE_MGMT_MINOR, "Device "
2385 "%d:%d:%d:%d doesn't support control "
2386 "mode page, using defaults: TST "
2387 "%x, QUEUE ALG %x, SWP %x, TAS %x, "
2388 "has_own_order_mgmt %d",
2389 dev->scsi_dev->host->host_no,
2390 dev->scsi_dev->channel, dev->scsi_dev->id,
2391 dev->scsi_dev->lun, dev->tst, dev->queue_alg,
2392 dev->swp, dev->tas, dev->has_own_order_mgmt);
2395 } else if (sense_buffer[2] == NOT_READY) {
2396 TRACE(TRACE_SCSI, "Device %d:%d:%d:%d not ready",
2397 dev->scsi_dev->host->host_no,
2398 dev->scsi_dev->channel, dev->scsi_dev->id,
2399 dev->scsi_dev->lun);
2404 TRACE(TRACE_SCSI|TRACE_MGMT_MINOR, "Internal MODE SENSE to "
2405 "device %d:%d:%d:%d failed: %x",
2406 dev->scsi_dev->host->host_no,
2407 dev->scsi_dev->channel, dev->scsi_dev->id,
2408 dev->scsi_dev->lun, res);
2409 PRINT_BUFF_FLAG(TRACE_SCSI|TRACE_MGMT_MINOR, "MODE SENSE "
2410 "sense", sense_buffer, sizeof(sense_buffer));
2412 scst_check_internal_sense(dev, res, sense_buffer,
2413 sizeof(sense_buffer));
2419 TRACE_EXIT_RES(res);
2423 /* Called under dev_lock and BH off */
2424 void scst_process_reset(struct scst_device *dev,
2425 struct scst_session *originator, struct scst_cmd *exclude_cmd,
2426 struct scst_mgmt_cmd *mcmd)
2428 struct scst_tgt_dev *tgt_dev;
2429 struct scst_cmd *cmd, *tcmd;
2433 /* Clear RESERVE'ation, if necessary */
2434 if (dev->dev_reserved) {
2435 list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
2436 dev_tgt_dev_list_entry) {
2437 TRACE(TRACE_MGMT, "Clearing RESERVE'ation for tgt_dev "
2438 "lun %Ld", tgt_dev->lun);
2439 clear_bit(SCST_TGT_DEV_RESERVED,
2440 &tgt_dev->tgt_dev_flags);
2442 dev->dev_reserved = 0;
2444 * There is no need to send RELEASE, since the device is going
2445 * to be resetted. Actually, since we can be in RESET TM
2446 * function, it might be dangerous.
2450 dev->dev_double_ua_possible = 1;
2451 dev->dev_serialized = 1;
2453 list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
2454 dev_tgt_dev_list_entry) {
2455 struct scst_session *sess = tgt_dev->sess;
2457 spin_lock_bh(&tgt_dev->tgt_dev_lock);
2458 scst_free_all_UA(tgt_dev);
2459 spin_unlock_bh(&tgt_dev->tgt_dev_lock);
2461 spin_lock_irq(&sess->sess_list_lock);
2463 TRACE_DBG("Searching in search cmd list (sess=%p)", sess);
2464 list_for_each_entry(cmd, &sess->search_cmd_list,
2465 search_cmd_list_entry) {
2466 if (cmd == exclude_cmd)
2468 if ((cmd->tgt_dev == tgt_dev) ||
2469 ((cmd->tgt_dev == NULL) &&
2470 (cmd->lun == tgt_dev->lun))) {
2471 scst_abort_cmd(cmd, mcmd,
2472 (tgt_dev->sess != originator), 0);
2475 spin_unlock_irq(&sess->sess_list_lock);
2478 list_for_each_entry_safe(cmd, tcmd, &dev->blocked_cmd_list,
2479 blocked_cmd_list_entry) {
2480 if (test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)) {
2481 list_del(&cmd->blocked_cmd_list_entry);
2482 TRACE_MGMT_DBG("Adding aborted blocked cmd %p "
2483 "to active cmd list", cmd);
2484 spin_lock_irq(&cmd->cmd_lists->cmd_list_lock);
2485 list_add_tail(&cmd->cmd_list_entry,
2486 &cmd->cmd_lists->active_cmd_list);
2487 wake_up(&cmd->cmd_lists->cmd_list_waitQ);
2488 spin_unlock_irq(&cmd->cmd_lists->cmd_list_lock);
2492 /* BH already off */
2493 spin_lock(&scst_temp_UA_lock);
2494 scst_set_sense(scst_temp_UA, sizeof(scst_temp_UA),
2495 SCST_LOAD_SENSE(scst_sense_reset_UA));
2496 scst_dev_check_set_local_UA(dev, exclude_cmd, scst_temp_UA,
2497 sizeof(scst_temp_UA));
2498 spin_unlock(&scst_temp_UA_lock);
2504 int scst_set_pending_UA(struct scst_cmd *cmd)
2507 struct scst_tgt_dev_UA *UA_entry;
2511 TRACE(TRACE_MGMT, "Setting pending UA cmd %p", cmd);
2513 spin_lock_bh(&cmd->tgt_dev->tgt_dev_lock);
2515 /* UA list could be cleared behind us, so retest */
2516 if (list_empty(&cmd->tgt_dev->UA_list)) {
2518 "SCST_TGT_DEV_UA_PENDING set, but UA_list empty");
2523 UA_entry = list_entry(cmd->tgt_dev->UA_list.next, typeof(*UA_entry),
2526 TRACE_DBG("next %p UA_entry %p",
2527 cmd->tgt_dev->UA_list.next, UA_entry);
2529 scst_set_cmd_error_sense(cmd, UA_entry->UA_sense_buffer,
2530 sizeof(UA_entry->UA_sense_buffer));
2534 list_del(&UA_entry->UA_list_entry);
2536 mempool_free(UA_entry, scst_ua_mempool);
2538 if (list_empty(&cmd->tgt_dev->UA_list)) {
2539 clear_bit(SCST_TGT_DEV_UA_PENDING,
2540 &cmd->tgt_dev->tgt_dev_flags);
2543 spin_unlock_bh(&cmd->tgt_dev->tgt_dev_lock);
2546 TRACE_EXIT_RES(res);
2550 spin_unlock_bh(&cmd->tgt_dev->tgt_dev_lock);
2554 /* Called under tgt_dev_lock and BH off */
2555 void scst_alloc_set_UA(struct scst_tgt_dev *tgt_dev,
2556 const uint8_t *sense, int sense_len, int head)
2558 struct scst_tgt_dev_UA *UA_entry = NULL;
2562 UA_entry = mempool_alloc(scst_ua_mempool, GFP_ATOMIC);
2563 if (UA_entry == NULL) {
2564 PRINT_CRIT_ERROR("%s", "UNIT ATTENTION memory "
2565 "allocation failed. The UNIT ATTENTION "
2566 "on some sessions will be missed");
2567 PRINT_BUFFER("Lost UA", sense, sense_len);
2570 memset(UA_entry, 0, sizeof(*UA_entry));
2572 if (sense_len > (int)sizeof(UA_entry->UA_sense_buffer))
2573 sense_len = sizeof(UA_entry->UA_sense_buffer);
2574 memcpy(UA_entry->UA_sense_buffer, sense, sense_len);
2576 set_bit(SCST_TGT_DEV_UA_PENDING, &tgt_dev->tgt_dev_flags);
2578 TRACE_MGMT_DBG("Adding new UA to tgt_dev %p", tgt_dev);
2581 list_add(&UA_entry->UA_list_entry, &tgt_dev->UA_list);
2583 list_add_tail(&UA_entry->UA_list_entry, &tgt_dev->UA_list);
2590 void scst_check_set_UA(struct scst_tgt_dev *tgt_dev,
2591 const uint8_t *sense, int sense_len, int head)
2594 struct scst_tgt_dev_UA *UA_entry_tmp;
2598 spin_lock_bh(&tgt_dev->tgt_dev_lock);
2600 list_for_each_entry(UA_entry_tmp, &tgt_dev->UA_list,
2602 if (memcmp(sense, UA_entry_tmp->UA_sense_buffer, sense_len) == 0) {
2603 TRACE_MGMT_DBG("%s", "UA already exists");
2610 scst_alloc_set_UA(tgt_dev, sense, sense_len, head);
2612 spin_unlock_bh(&tgt_dev->tgt_dev_lock);
2618 /* Called under dev_lock and BH off */
2619 void scst_dev_check_set_local_UA(struct scst_device *dev,
2620 struct scst_cmd *exclude, const uint8_t *sense, int sense_len)
2622 struct scst_tgt_dev *tgt_dev, *exclude_tgt_dev = NULL;
2626 if (exclude != NULL)
2627 exclude_tgt_dev = exclude->tgt_dev;
2629 list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
2630 dev_tgt_dev_list_entry) {
2631 if (tgt_dev != exclude_tgt_dev)
2632 scst_check_set_UA(tgt_dev, sense, sense_len, 0);
2639 /* Called under dev_lock and BH off */
2640 void __scst_dev_check_set_UA(struct scst_device *dev,
2641 struct scst_cmd *exclude, const uint8_t *sense, int sense_len)
2645 TRACE(TRACE_MGMT, "Processing UA dev %p", dev);
2647 /* Check for reset UA */
2648 if (sense[12] == SCST_SENSE_ASC_UA_RESET)
2649 scst_process_reset(dev, (exclude != NULL) ? exclude->sess : NULL,
2652 scst_dev_check_set_local_UA(dev, exclude, sense, sense_len);
2658 /* Called under tgt_dev_lock or when tgt_dev is unused */
2659 void scst_free_all_UA(struct scst_tgt_dev *tgt_dev)
2661 struct scst_tgt_dev_UA *UA_entry, *t;
2665 list_for_each_entry_safe(UA_entry, t, &tgt_dev->UA_list, UA_list_entry) {
2666 TRACE_MGMT_DBG("Clearing UA for tgt_dev lun %Ld",
2668 list_del(&UA_entry->UA_list_entry);
2671 INIT_LIST_HEAD(&tgt_dev->UA_list);
2672 clear_bit(SCST_TGT_DEV_UA_PENDING, &tgt_dev->tgt_dev_flags);
2679 struct scst_cmd *__scst_check_deferred_commands(struct scst_tgt_dev *tgt_dev)
2681 struct scst_cmd *res = NULL, *cmd, *t;
2682 typeof(tgt_dev->expected_sn) expected_sn = tgt_dev->expected_sn;
2684 spin_lock_irq(&tgt_dev->sn_lock);
2686 if (unlikely(tgt_dev->hq_cmd_count != 0))
2690 list_for_each_entry_safe(cmd, t, &tgt_dev->deferred_cmd_list,
2691 sn_cmd_list_entry) {
2692 EXTRACHECKS_BUG_ON(cmd->queue_type ==
2693 SCST_CMD_QUEUE_HEAD_OF_QUEUE);
2694 if (cmd->sn == expected_sn) {
2695 TRACE_SN("Deferred command %p (sn %ld, set %d) found",
2696 cmd, cmd->sn, cmd->sn_set);
2697 tgt_dev->def_cmd_count--;
2698 list_del(&cmd->sn_cmd_list_entry);
2702 spin_lock(&cmd->cmd_lists->cmd_list_lock);
2703 TRACE_SN("Adding cmd %p to active cmd list",
2705 list_add_tail(&cmd->cmd_list_entry,
2706 &cmd->cmd_lists->active_cmd_list);
2707 wake_up(&cmd->cmd_lists->cmd_list_waitQ);
2708 spin_unlock(&cmd->cmd_lists->cmd_list_lock);
2715 list_for_each_entry(cmd, &tgt_dev->skipped_sn_list,
2716 sn_cmd_list_entry) {
2717 EXTRACHECKS_BUG_ON(cmd->queue_type ==
2718 SCST_CMD_QUEUE_HEAD_OF_QUEUE);
2719 if (cmd->sn == expected_sn) {
2720 atomic_t *slot = cmd->sn_slot;
2722 * !! At this point any pointer in cmd, except !!
2723 * !! sn_slot and sn_cmd_list_entry, could be !!
2724 * !! already destroyed !!
2726 TRACE_SN("cmd %p (tag %llu) with skipped sn %ld found",
2727 cmd, cmd->tag, cmd->sn);
2728 tgt_dev->def_cmd_count--;
2729 list_del(&cmd->sn_cmd_list_entry);
2730 spin_unlock_irq(&tgt_dev->sn_lock);
2731 if (test_and_set_bit(SCST_CMD_CAN_BE_DESTROYED,
2733 scst_destroy_put_cmd(cmd);
2735 scst_inc_expected_sn(tgt_dev, slot);
2736 expected_sn = tgt_dev->expected_sn;
2737 spin_lock_irq(&tgt_dev->sn_lock);
2743 spin_unlock_irq(&tgt_dev->sn_lock);
2747 void scst_add_thr_data(struct scst_tgt_dev *tgt_dev,
2748 struct scst_thr_data_hdr *data,
2749 void (*free_fn) (struct scst_thr_data_hdr *data))
2751 data->pid = current->pid;
2752 atomic_set(&data->ref, 1);
2753 EXTRACHECKS_BUG_ON(free_fn == NULL);
2754 data->free_fn = free_fn;
2755 spin_lock(&tgt_dev->thr_data_lock);
2756 list_add_tail(&data->thr_data_list_entry, &tgt_dev->thr_data_list);
2757 spin_unlock(&tgt_dev->thr_data_lock);
2760 void scst_del_all_thr_data(struct scst_tgt_dev *tgt_dev)
2762 spin_lock(&tgt_dev->thr_data_lock);
2763 while (!list_empty(&tgt_dev->thr_data_list)) {
2764 struct scst_thr_data_hdr *d = list_entry(
2765 tgt_dev->thr_data_list.next, typeof(*d),
2766 thr_data_list_entry);
2767 list_del(&d->thr_data_list_entry);
2768 spin_unlock(&tgt_dev->thr_data_lock);
2769 scst_thr_data_put(d);
2770 spin_lock(&tgt_dev->thr_data_lock);
2772 spin_unlock(&tgt_dev->thr_data_lock);
2776 void scst_dev_del_all_thr_data(struct scst_device *dev)
2778 struct scst_tgt_dev *tgt_dev;
2782 mutex_lock(&scst_mutex);
2784 list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
2785 dev_tgt_dev_list_entry) {
2786 scst_del_all_thr_data(tgt_dev);
2789 mutex_unlock(&scst_mutex);
2795 struct scst_thr_data_hdr *scst_find_thr_data(struct scst_tgt_dev *tgt_dev)
2797 struct scst_thr_data_hdr *res = NULL, *d;
2799 spin_lock(&tgt_dev->thr_data_lock);
2800 list_for_each_entry(d, &tgt_dev->thr_data_list, thr_data_list_entry) {
2801 if (d->pid == current->pid) {
2803 scst_thr_data_get(res);
2807 spin_unlock(&tgt_dev->thr_data_lock);
2811 /* dev_lock supposed to be held and BH disabled */
2812 void __scst_block_dev(struct scst_device *dev)
2815 TRACE_MGMT_DBG("Device BLOCK(new %d), dev %p", dev->block_count, dev);
2819 void scst_block_dev(struct scst_device *dev, int outstanding)
2821 spin_lock_bh(&dev->dev_lock);
2822 __scst_block_dev(dev);
2823 spin_unlock_bh(&dev->dev_lock);
2825 /* spin_unlock_bh() doesn't provide the necessary memory barrier */
2828 TRACE_MGMT_DBG("Waiting during blocking outstanding %d (on_dev_count "
2829 "%d)", outstanding, atomic_read(&dev->on_dev_count));
2830 wait_event(dev->on_dev_waitQ,
2831 atomic_read(&dev->on_dev_count) <= outstanding);
2832 TRACE_MGMT_DBG("%s", "wait_event() returned");
2836 void scst_block_dev_cmd(struct scst_cmd *cmd, int outstanding)
2838 sBUG_ON(cmd->needs_unblocking);
2840 cmd->needs_unblocking = 1;
2841 TRACE_MGMT_DBG("Needs unblocking cmd %p (tag %llu)", cmd, cmd->tag);
2843 scst_block_dev(cmd->dev, outstanding);
2847 void scst_unblock_dev(struct scst_device *dev)
2849 spin_lock_bh(&dev->dev_lock);
2850 TRACE_MGMT_DBG("Device UNBLOCK(new %d), dev %p",
2851 dev->block_count-1, dev);
2852 if (--dev->block_count == 0)
2853 scst_unblock_cmds(dev);
2854 spin_unlock_bh(&dev->dev_lock);
2855 sBUG_ON(dev->block_count < 0);
2859 void scst_unblock_dev_cmd(struct scst_cmd *cmd)
2861 scst_unblock_dev(cmd->dev);
2862 cmd->needs_unblocking = 0;
2866 int scst_inc_on_dev_cmd(struct scst_cmd *cmd)
2869 struct scst_device *dev = cmd->dev;
2873 sBUG_ON(cmd->inc_blocking || cmd->dec_on_dev_needed);
2875 atomic_inc(&dev->on_dev_count);
2876 cmd->dec_on_dev_needed = 1;
2877 TRACE_DBG("New on_dev_count %d", atomic_read(&dev->on_dev_count));
2879 #ifdef STRICT_SERIALIZING
2880 spin_lock_bh(&dev->dev_lock);
2881 if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)))
2883 if (dev->block_count > 0) {
2884 scst_dec_on_dev_cmd(cmd);
2885 TRACE_MGMT_DBG("Delaying cmd %p due to blocking or strict "
2886 "serializing (tag %llu, dev %p)", cmd, cmd->tag, dev);
2887 list_add_tail(&cmd->blocked_cmd_list_entry,
2888 &dev->blocked_cmd_list);
2891 __scst_block_dev(dev);
2892 cmd->inc_blocking = 1;
2894 spin_unlock_bh(&dev->dev_lock);
2898 if (unlikely(dev->block_count > 0)) {
2899 spin_lock_bh(&dev->dev_lock);
2900 if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)))
2902 barrier(); /* to reread block_count */
2903 if (dev->block_count > 0) {
2904 scst_dec_on_dev_cmd(cmd);
2905 TRACE_MGMT_DBG("Delaying cmd %p due to blocking or "
2906 "serializing (tag %llu, dev %p)", cmd,
2908 list_add_tail(&cmd->blocked_cmd_list_entry,
2909 &dev->blocked_cmd_list);
2911 spin_unlock_bh(&dev->dev_lock);
2914 TRACE_MGMT_DBG("%s", "Somebody unblocked the device, "
2917 spin_unlock_bh(&dev->dev_lock);
2919 if (unlikely(dev->dev_serialized)) {
2920 spin_lock_bh(&dev->dev_lock);
2921 barrier(); /* to reread block_count */
2922 if (dev->block_count == 0) {
2923 TRACE_MGMT_DBG("cmd %p (tag %llu), blocking further "
2924 "cmds due to serializing (dev %p)", cmd,
2926 __scst_block_dev(dev);
2927 cmd->inc_blocking = 1;
2929 spin_unlock_bh(&dev->dev_lock);
2930 TRACE_MGMT_DBG("Somebody blocked the device, "
2931 "repeating (count %d)", dev->block_count);
2934 spin_unlock_bh(&dev->dev_lock);
2939 TRACE_EXIT_RES(res);
2943 spin_unlock_bh(&dev->dev_lock);
2947 /* Called under dev_lock */
2948 void scst_unblock_cmds(struct scst_device *dev)
2950 #ifdef STRICT_SERIALIZING
2951 struct scst_cmd *cmd, *t;
2952 unsigned long flags;
2956 local_irq_save(flags);
2957 list_for_each_entry_safe(cmd, t, &dev->blocked_cmd_list,
2958 blocked_cmd_list_entry) {
2961 * Since only one cmd per time is being executed, expected_sn
2962 * can't change behind us, if the corresponding cmd is in
2963 * blocked_cmd_list, but we could be called before
2964 * scst_inc_expected_sn().
2966 if (likely(!cmd->internal && !cmd->retry)) {
2967 typeof(cmd->tgt_dev->expected_sn) expected_sn;
2968 if (cmd->tgt_dev == NULL)
2970 expected_sn = cmd->tgt_dev->expected_sn;
2971 if (cmd->sn == expected_sn)
2973 else if (cmd->sn != (expected_sn+1))
2977 list_del(&cmd->blocked_cmd_list_entry);
2978 TRACE_MGMT_DBG("Adding cmd %p to head of active cmd list", cmd);
2979 spin_lock(&cmd->cmd_lists->cmd_list_lock);
2980 list_add(&cmd->cmd_list_entry, &cmd->cmd_lists->active_cmd_list);
2981 wake_up(&cmd->cmd_lists->cmd_list_waitQ);
2982 spin_unlock(&cmd->cmd_lists->cmd_list_lock);
2986 local_irq_restore(flags);
2987 #else /* STRICT_SERIALIZING */
2988 struct scst_cmd *cmd, *tcmd;
2989 unsigned long flags;
2993 local_irq_save(flags);
2994 list_for_each_entry_safe(cmd, tcmd, &dev->blocked_cmd_list,
2995 blocked_cmd_list_entry) {
2996 list_del(&cmd->blocked_cmd_list_entry);
2997 TRACE_MGMT_DBG("Adding blocked cmd %p to active cmd list", cmd);
2998 spin_lock(&cmd->cmd_lists->cmd_list_lock);
2999 if (unlikely(cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE))
3000 list_add(&cmd->cmd_list_entry,
3001 &cmd->cmd_lists->active_cmd_list);
3003 list_add_tail(&cmd->cmd_list_entry,
3004 &cmd->cmd_lists->active_cmd_list);
3005 wake_up(&cmd->cmd_lists->cmd_list_waitQ);
3006 spin_unlock(&cmd->cmd_lists->cmd_list_lock);
3008 local_irq_restore(flags);
3009 #endif /* STRICT_SERIALIZING */
3015 static void __scst_unblock_deferred(struct scst_tgt_dev *tgt_dev,
3016 struct scst_cmd *out_of_sn_cmd)
3018 EXTRACHECKS_BUG_ON(!out_of_sn_cmd->sn_set);
3020 if (out_of_sn_cmd->sn == tgt_dev->expected_sn) {
3021 scst_inc_expected_sn(tgt_dev, out_of_sn_cmd->sn_slot);
3022 scst_make_deferred_commands_active(tgt_dev, out_of_sn_cmd);
3024 out_of_sn_cmd->out_of_sn = 1;
3025 spin_lock_irq(&tgt_dev->sn_lock);
3026 tgt_dev->def_cmd_count++;
3027 list_add_tail(&out_of_sn_cmd->sn_cmd_list_entry,
3028 &tgt_dev->skipped_sn_list);
3029 TRACE_SN("out_of_sn_cmd %p with sn %ld added to skipped_sn_list "
3030 "(expected_sn %ld)", out_of_sn_cmd, out_of_sn_cmd->sn,
3031 tgt_dev->expected_sn);
3032 spin_unlock_irq(&tgt_dev->sn_lock);
3038 void scst_unblock_deferred(struct scst_tgt_dev *tgt_dev,
3039 struct scst_cmd *out_of_sn_cmd)
3043 if (!out_of_sn_cmd->sn_set) {
3044 TRACE_SN("cmd %p without sn", out_of_sn_cmd);
3048 __scst_unblock_deferred(tgt_dev, out_of_sn_cmd);
3055 void scst_on_hq_cmd_response(struct scst_cmd *cmd)
3057 struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
3061 if (!cmd->hq_cmd_inced)
3064 spin_lock_irq(&tgt_dev->sn_lock);
3065 tgt_dev->hq_cmd_count--;
3066 spin_unlock_irq(&tgt_dev->sn_lock);
3068 EXTRACHECKS_BUG_ON(tgt_dev->hq_cmd_count < 0);
3071 * There is no problem in checking hq_cmd_count in the
3072 * non-locked state. In the worst case we will only have
3073 * unneeded run of the deferred commands.
3075 if (tgt_dev->hq_cmd_count == 0)
3076 scst_make_deferred_commands_active(tgt_dev, cmd);
3083 void scst_xmit_process_aborted_cmd(struct scst_cmd *cmd)
3087 TRACE_MGMT_DBG("Aborted cmd %p done (cmd_ref %d, "
3088 "scst_cmd_count %d)", cmd, atomic_read(&cmd->cmd_ref),
3089 atomic_read(&scst_cmd_count));
3091 scst_done_cmd_mgmt(cmd);
3094 if (test_bit(SCST_CMD_ABORTED_OTHER, &cmd->cmd_flags)) {
3095 if (cmd->completed) {
3096 /* It's completed and it's OK to return its result */
3100 if (cmd->dev->tas) {
3101 TRACE_MGMT_DBG("Flag ABORTED OTHER set for cmd %p "
3102 "(tag %llu), returning TASK ABORTED ", cmd,
3104 scst_set_cmd_error_status(cmd, SAM_STAT_TASK_ABORTED);
3106 TRACE_MGMT_DBG("Flag ABORTED OTHER set for cmd %p "
3107 "(tag %llu), aborting without delivery or "
3108 "notification", cmd, cmd->tag);
3110 * There is no need to check/requeue possible UA,
3111 * because, if it exists, it will be delivered
3112 * by the "completed" branch above.
3114 clear_bit(SCST_CMD_ABORTED_OTHER, &cmd->cmd_flags);
3123 void __init scst_scsi_op_list_init(void)
3130 for (i = 0; i < 256; i++)
3131 scst_scsi_op_list[i] = SCST_CDB_TBL_SIZE;
3133 for (i = 0; i < SCST_CDB_TBL_SIZE; i++) {
3134 if (scst_scsi_op_table[i].ops != op) {
3135 op = scst_scsi_op_table[i].ops;
3136 scst_scsi_op_list[op] = i;
3145 /* Original taken from the XFS code */
3146 unsigned long scst_random(void)
3149 static unsigned long RandomValue;
3150 static spinlock_t lock = SPIN_LOCK_UNLOCKED;
3151 /* cycles pseudo-randomly through all values between 1 and 2^31 - 2 */
3155 unsigned long flags;
3157 spin_lock_irqsave(&lock, flags);
3159 RandomValue = jiffies;
3165 rv = 16807 * lo - 2836 * hi;
3166 if (rv <= 0) rv += 2147483647;
3168 spin_unlock_irqrestore(&lock, flags);
3175 #define TM_DBG_STATE_ABORT 0
3176 #define TM_DBG_STATE_RESET 1
3177 #define TM_DBG_STATE_OFFLINE 2
3179 #define INIT_TM_DBG_STATE TM_DBG_STATE_ABORT
3181 static void tm_dbg_timer_fn(unsigned long arg);
3183 static spinlock_t scst_tm_dbg_lock = SPIN_LOCK_UNLOCKED;
3184 /* All serialized by scst_tm_dbg_lock */
3187 unsigned int tm_dbg_release:1;
3188 unsigned int tm_dbg_blocked:1;
3190 static LIST_HEAD(tm_dbg_delayed_cmd_list);
3191 static int tm_dbg_delayed_cmds_count;
3192 static int tm_dbg_passed_cmds_count;
3193 static int tm_dbg_state;
3194 static int tm_dbg_on_state_passes;
3195 static DEFINE_TIMER(tm_dbg_timer, tm_dbg_timer_fn, 0, 0);
3196 static wait_queue_head_t *tm_dbg_p_cmd_list_waitQ;
3198 static const int tm_dbg_on_state_num_passes[] = { 5, 1, 0x7ffffff };
3200 void tm_dbg_init_tgt_dev(struct scst_tgt_dev *tgt_dev,
3201 struct scst_acg_dev *acg_dev)
3203 if ((acg_dev->acg == scst_default_acg) && (acg_dev->lun == 0)) {
3204 unsigned long flags;
3205 /* Do TM debugging only for LUN 0 */
3206 spin_lock_irqsave(&scst_tm_dbg_lock, flags);
3207 tm_dbg_p_cmd_list_waitQ =
3208 &tgt_dev->dev->p_cmd_lists->cmd_list_waitQ;
3209 tm_dbg_state = INIT_TM_DBG_STATE;
3210 tm_dbg_on_state_passes =
3211 tm_dbg_on_state_num_passes[tm_dbg_state];
3212 __set_bit(SCST_TGT_DEV_UNDER_TM_DBG, &tgt_dev->tgt_dev_flags);
3213 PRINT_INFO("LUN 0 connected from initiator %s is under "
3214 "TM debugging", tgt_dev->sess->tgt->tgtt->name);
3215 spin_unlock_irqrestore(&scst_tm_dbg_lock, flags);
3219 void tm_dbg_deinit_tgt_dev(struct scst_tgt_dev *tgt_dev)
3221 if (test_bit(SCST_TGT_DEV_UNDER_TM_DBG, &tgt_dev->tgt_dev_flags)) {
3222 unsigned long flags;
3223 del_timer_sync(&tm_dbg_timer);
3224 spin_lock_irqsave(&scst_tm_dbg_lock, flags);
3225 tm_dbg_p_cmd_list_waitQ = NULL;
3226 spin_unlock_irqrestore(&scst_tm_dbg_lock, flags);
3230 static void tm_dbg_timer_fn(unsigned long arg)
3232 TRACE_MGMT_DBG("%s", "delayed cmd timer expired");
3233 tm_dbg_flags.tm_dbg_release = 1;
3235 wake_up_all(tm_dbg_p_cmd_list_waitQ);
3238 /* Called under scst_tm_dbg_lock and IRQs off */
3239 static void tm_dbg_delay_cmd(struct scst_cmd *cmd)
3241 switch(tm_dbg_state) {
3242 case TM_DBG_STATE_ABORT:
3243 if (tm_dbg_delayed_cmds_count == 0) {
3244 unsigned long d = 58*HZ + (scst_random() % (4*HZ));
3245 TRACE_MGMT_DBG("STATE ABORT: delaying cmd %p (tag %llu) "
3246 "for %ld.%ld seconds (%ld HZ), "
3247 "tm_dbg_on_state_passes=%d", cmd, cmd->tag,
3248 d/HZ, (d%HZ)*100/HZ, d, tm_dbg_on_state_passes);
3249 mod_timer(&tm_dbg_timer, jiffies + d);
3251 tm_dbg_flags.tm_dbg_blocked = 1;
3254 TRACE_MGMT_DBG("Delaying another timed cmd %p "
3255 "(tag %llu), delayed_cmds_count=%d, "
3256 "tm_dbg_on_state_passes=%d", cmd, cmd->tag,
3257 tm_dbg_delayed_cmds_count,
3258 tm_dbg_on_state_passes);
3259 if (tm_dbg_delayed_cmds_count == 2)
3260 tm_dbg_flags.tm_dbg_blocked = 0;
3264 case TM_DBG_STATE_RESET:
3265 case TM_DBG_STATE_OFFLINE:
3266 TRACE_MGMT_DBG("STATE RESET/OFFLINE: delaying cmd %p "
3267 "(tag %llu), delayed_cmds_count=%d, "
3268 "tm_dbg_on_state_passes=%d", cmd, cmd->tag,
3269 tm_dbg_delayed_cmds_count, tm_dbg_on_state_passes);
3270 tm_dbg_flags.tm_dbg_blocked = 1;
3276 /* IRQs already off */
3277 spin_lock(&cmd->cmd_lists->cmd_list_lock);
3278 list_add_tail(&cmd->cmd_list_entry, &tm_dbg_delayed_cmd_list);
3279 spin_unlock(&cmd->cmd_lists->cmd_list_lock);
3280 cmd->tm_dbg_delayed = 1;
3281 tm_dbg_delayed_cmds_count++;
3286 void tm_dbg_check_released_cmds(void)
3288 if (tm_dbg_flags.tm_dbg_release) {
3289 struct scst_cmd *cmd, *tc;
3290 spin_lock_irq(&scst_tm_dbg_lock);
3291 list_for_each_entry_safe_reverse(cmd, tc,
3292 &tm_dbg_delayed_cmd_list, cmd_list_entry) {
3293 TRACE_MGMT_DBG("Releasing timed cmd %p (tag %llu), "
3294 "delayed_cmds_count=%d", cmd, cmd->tag,
3295 tm_dbg_delayed_cmds_count);
3296 spin_lock(&cmd->cmd_lists->cmd_list_lock);
3297 list_move(&cmd->cmd_list_entry,
3298 &cmd->cmd_lists->active_cmd_list);
3299 spin_unlock(&cmd->cmd_lists->cmd_list_lock);
3301 tm_dbg_flags.tm_dbg_release = 0;
3302 spin_unlock_irq(&scst_tm_dbg_lock);
3306 /* Called under scst_tm_dbg_lock */
3307 static void tm_dbg_change_state(void)
3309 tm_dbg_flags.tm_dbg_blocked = 0;
3310 if (--tm_dbg_on_state_passes == 0) {
3311 switch(tm_dbg_state) {
3312 case TM_DBG_STATE_ABORT:
3313 TRACE_MGMT_DBG("%s", "Changing "
3314 "tm_dbg_state to RESET");
3317 tm_dbg_flags.tm_dbg_blocked = 0;
3319 case TM_DBG_STATE_RESET:
3320 case TM_DBG_STATE_OFFLINE:
3321 if (TM_DBG_GO_OFFLINE) {
3322 TRACE_MGMT_DBG("%s", "Changing "
3323 "tm_dbg_state to OFFLINE");
3325 TM_DBG_STATE_OFFLINE;
3327 TRACE_MGMT_DBG("%s", "Changing "
3328 "tm_dbg_state to ABORT");
3336 tm_dbg_on_state_passes =
3337 tm_dbg_on_state_num_passes[tm_dbg_state];
3340 TRACE_MGMT_DBG("%s", "Deleting timer");
3341 del_timer(&tm_dbg_timer);
3345 int tm_dbg_check_cmd(struct scst_cmd *cmd)
3348 unsigned long flags;
3350 if (cmd->tm_dbg_immut)
3353 if (cmd->tm_dbg_delayed) {
3354 spin_lock_irqsave(&scst_tm_dbg_lock, flags);
3355 TRACE_MGMT_DBG("Processing delayed cmd %p (tag %llu), "
3356 "delayed_cmds_count=%d", cmd, cmd->tag,
3357 tm_dbg_delayed_cmds_count);
3359 cmd->tm_dbg_immut = 1;
3360 tm_dbg_delayed_cmds_count--;
3361 if ((tm_dbg_delayed_cmds_count == 0) &&
3362 (tm_dbg_state == TM_DBG_STATE_ABORT))
3363 tm_dbg_change_state();
3364 spin_unlock_irqrestore(&scst_tm_dbg_lock, flags);
3365 } else if (cmd->tgt_dev && test_bit(SCST_TGT_DEV_UNDER_TM_DBG,
3366 &cmd->tgt_dev->tgt_dev_flags)) {
3367 /* Delay 50th command */
3368 spin_lock_irqsave(&scst_tm_dbg_lock, flags);
3369 if (tm_dbg_flags.tm_dbg_blocked ||
3370 (++tm_dbg_passed_cmds_count % 50) == 0) {
3371 tm_dbg_delay_cmd(cmd);
3374 cmd->tm_dbg_immut = 1;
3375 spin_unlock_irqrestore(&scst_tm_dbg_lock, flags);
3383 void tm_dbg_release_cmd(struct scst_cmd *cmd)
3386 unsigned long flags;
3388 spin_lock_irqsave(&scst_tm_dbg_lock, flags);
3389 list_for_each_entry(c, &tm_dbg_delayed_cmd_list,
3392 TRACE_MGMT_DBG("Abort request for "
3393 "delayed cmd %p (tag=%llu), moving it to "
3394 "active cmd list (delayed_cmds_count=%d)",
3395 c, c->tag, tm_dbg_delayed_cmds_count);
3397 if (!test_bit(SCST_CMD_ABORTED_OTHER, &cmd->cmd_flags)) {
3398 /* Test how completed commands handled */
3399 if (((scst_random() % 10) == 5)) {
3400 scst_set_cmd_error(cmd,
3401 SCST_LOAD_SENSE(scst_sense_hardw_error));
3402 /* It's completed now */
3406 spin_lock(&cmd->cmd_lists->cmd_list_lock);
3407 list_move(&c->cmd_list_entry,
3408 &c->cmd_lists->active_cmd_list);
3409 wake_up(&c->cmd_lists->cmd_list_waitQ);
3410 spin_unlock(&cmd->cmd_lists->cmd_list_lock);
3414 spin_unlock_irqrestore(&scst_tm_dbg_lock, flags);
3417 /* Might be called under scst_mutex */
3418 void tm_dbg_task_mgmt(struct scst_device *dev, const char *fn, int force)
3420 unsigned long flags;
3423 struct scst_tgt_dev *tgt_dev;
3426 spin_lock_bh(&dev->dev_lock);
3427 list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
3428 dev_tgt_dev_list_entry) {
3429 if (test_bit(SCST_TGT_DEV_UNDER_TM_DBG,
3430 &tgt_dev->tgt_dev_flags)) {
3435 spin_unlock_bh(&dev->dev_lock);
3441 spin_lock_irqsave(&scst_tm_dbg_lock, flags);
3442 if ((tm_dbg_state != TM_DBG_STATE_OFFLINE) || force) {
3443 TRACE_MGMT_DBG("%s: freeing %d delayed cmds", fn,
3444 tm_dbg_delayed_cmds_count);
3445 tm_dbg_change_state();
3446 tm_dbg_flags.tm_dbg_release = 1;
3448 if (tm_dbg_p_cmd_list_waitQ != NULL)
3449 wake_up_all(tm_dbg_p_cmd_list_waitQ);
3451 TRACE_MGMT_DBG("%s: while OFFLINE state, doing nothing", fn);
3453 spin_unlock_irqrestore(&scst_tm_dbg_lock, flags);
3459 int tm_dbg_is_release(void)
3461 return tm_dbg_flags.tm_dbg_release;
3463 #endif /* DEBUG_TM */
3466 void scst_check_debug_sn(struct scst_cmd *cmd)
3468 static spinlock_t lock = SPIN_LOCK_UNLOCKED;
3471 unsigned long flags;
3472 int old = cmd->queue_type;
3474 spin_lock_irqsave(&lock, flags);
3477 if ((scst_random() % 1000) == 500) {
3478 if ((scst_random() % 3) == 1)
3479 type = SCST_CMD_QUEUE_HEAD_OF_QUEUE;
3481 type = SCST_CMD_QUEUE_ORDERED;
3483 cnt = scst_random() % 10;
3489 cmd->queue_type = type;
3492 if (((scst_random() % 1000) == 750))
3493 cmd->queue_type = SCST_CMD_QUEUE_ORDERED;
3494 else if (((scst_random() % 1000) == 751))
3495 cmd->queue_type = SCST_CMD_QUEUE_HEAD_OF_QUEUE;
3496 else if (((scst_random() % 1000) == 752))
3497 cmd->queue_type = SCST_CMD_QUEUE_SIMPLE;
3499 TRACE_SN("DbgSN changed cmd %p: %d/%d (cnt %d)", cmd, old,
3500 cmd->queue_type, cnt);
3503 spin_unlock_irqrestore(&lock, flags);
3506 #endif /* DEBUG_SN */