4 * Copyright (C) 2004 - 2008 Vladislav Bolkhovitin <vst@vlnb.net>
5 * Copyright (C) 2004 - 2005 Leonid Stoljar
6 * Copyright (C) 2007 - 2008 CMS Distribution Limited
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation, version 2
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
19 #include <linux/init.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/list.h>
23 #include <linux/spinlock.h>
24 #include <linux/slab.h>
25 #include <linux/sched.h>
26 #include <linux/kthread.h>
27 #include <linux/cdrom.h>
28 #include <linux/unistd.h>
29 #include <linux/string.h>
32 #include "scst_priv.h"
35 #include "scst_cdbprobe.h"
37 static void scst_free_tgt_dev(struct scst_tgt_dev *tgt_dev);
38 static void scst_check_internal_sense(struct scst_device *dev, int result,
39 uint8_t *sense, int sense_len);
40 static void scst_alloc_set_UA(struct scst_tgt_dev *tgt_dev,
41 const uint8_t *sense, int sense_len, int head);
42 static void scst_free_all_UA(struct scst_tgt_dev *tgt_dev);
43 static void scst_release_space(struct scst_cmd *cmd);
44 static void scst_sess_free_tgt_devs(struct scst_session *sess);
45 static void scst_unblock_cmds(struct scst_device *dev);
47 #ifdef CONFIG_SCST_DEBUG_TM
48 static void tm_dbg_init_tgt_dev(struct scst_tgt_dev *tgt_dev,
49 struct scst_acg_dev *acg_dev);
50 static void tm_dbg_deinit_tgt_dev(struct scst_tgt_dev *tgt_dev);
52 static inline void tm_dbg_init_tgt_dev(struct scst_tgt_dev *tgt_dev,
53 struct scst_acg_dev *acg_dev) {}
54 static inline void tm_dbg_deinit_tgt_dev(struct scst_tgt_dev *tgt_dev) {}
55 #endif /* CONFIG_SCST_DEBUG_TM */
57 int scst_alloc_sense(struct scst_cmd *cmd, int atomic)
60 gfp_t gfp_mask = atomic ? GFP_ATOMIC : (GFP_KERNEL|__GFP_NOFAIL);
64 sBUG_ON(cmd->sense != NULL);
66 cmd->sense = mempool_alloc(scst_sense_mempool, gfp_mask);
67 if (cmd->sense == NULL) {
68 PRINT_CRIT_ERROR("Sense memory allocation failed (op %x). "
69 "The sense data will be lost!!", cmd->cdb[0]);
74 memset(cmd->sense, 0, SCST_SENSE_BUFFERSIZE);
80 EXPORT_SYMBOL(scst_alloc_sense);
82 int scst_alloc_set_sense(struct scst_cmd *cmd, int atomic,
83 const uint8_t *sense, unsigned int len)
89 res = scst_alloc_sense(cmd, atomic);
91 PRINT_BUFFER("Lost sense", sense, len);
95 memcpy(cmd->sense, sense, min((int)len, (int)SCST_SENSE_BUFFERSIZE));
96 TRACE_BUFFER("Sense set", cmd->sense, SCST_SENSE_BUFFERSIZE);
102 EXPORT_SYMBOL(scst_alloc_set_sense);
104 void scst_set_cmd_error_status(struct scst_cmd *cmd, int status)
108 cmd->status = status;
109 cmd->host_status = DID_OK;
111 cmd->dbl_ua_orig_resp_data_len = cmd->resp_data_len;
112 cmd->dbl_ua_orig_data_direction = cmd->data_direction;
114 cmd->data_direction = SCST_DATA_NONE;
115 cmd->resp_data_len = 0;
116 cmd->is_send_status = 1;
123 EXPORT_SYMBOL(scst_set_cmd_error_status);
125 void scst_set_cmd_error(struct scst_cmd *cmd, int key, int asc, int ascq)
131 scst_set_cmd_error_status(cmd, SAM_STAT_CHECK_CONDITION);
133 rc = scst_alloc_sense(cmd, 1);
135 PRINT_ERROR("Lost sense data (key %x, asc %x, ascq %x)",
140 scst_set_sense(cmd->sense, SCST_SENSE_BUFFERSIZE, key, asc, ascq);
141 TRACE_BUFFER("Sense set", cmd->sense, SCST_SENSE_BUFFERSIZE);
147 EXPORT_SYMBOL(scst_set_cmd_error);
149 void scst_set_sense(uint8_t *buffer, int len, int key,
152 memset(buffer, 0, len);
153 buffer[0] = 0x70; /* Error Code */
154 buffer[2] = key; /* Sense Key */
155 buffer[7] = 0x0a; /* Additional Sense Length */
156 buffer[12] = asc; /* ASC */
157 buffer[13] = ascq; /* ASCQ */
158 TRACE_BUFFER("Sense set", buffer, len);
161 EXPORT_SYMBOL(scst_set_sense);
163 static void scst_set_cmd_error_sense(struct scst_cmd *cmd, uint8_t *sense,
168 scst_set_cmd_error_status(cmd, SAM_STAT_CHECK_CONDITION);
169 scst_alloc_set_sense(cmd, 1, sense, len);
175 void scst_set_busy(struct scst_cmd *cmd)
177 int c = atomic_read(&cmd->sess->sess_cmd_count);
181 if ((c <= 1) || (cmd->sess->init_phase != SCST_SESS_IPH_READY)) {
182 scst_set_cmd_error_status(cmd, SAM_STAT_BUSY);
183 TRACE(TRACE_MGMT_MINOR, "Sending BUSY status to initiator %s "
184 "(cmds count %d, queue_type %x, sess->init_phase %d)",
185 cmd->sess->initiator_name, c,
186 cmd->queue_type, cmd->sess->init_phase);
188 scst_set_cmd_error_status(cmd, SAM_STAT_TASK_SET_FULL);
189 TRACE(TRACE_MGMT_MINOR, "Sending QUEUE_FULL status to "
190 "initiator %s (cmds count %d, queue_type %x, "
191 "sess->init_phase %d)", cmd->sess->initiator_name, c,
192 cmd->queue_type, cmd->sess->init_phase);
198 EXPORT_SYMBOL(scst_set_busy);
200 int scst_get_cmd_abnormal_done_state(const struct scst_cmd *cmd)
206 switch (cmd->state) {
207 case SCST_CMD_STATE_INIT_WAIT:
208 case SCST_CMD_STATE_INIT:
209 case SCST_CMD_STATE_PRE_PARSE:
210 case SCST_CMD_STATE_DEV_PARSE:
211 res = SCST_CMD_STATE_PRE_XMIT_RESP;
215 res = SCST_CMD_STATE_PRE_DEV_DONE;
222 EXPORT_SYMBOL(scst_get_cmd_abnormal_done_state);
224 void scst_set_cmd_abnormal_done_state(struct scst_cmd *cmd)
228 #ifdef CONFIG_SCST_EXTRACHECKS
229 switch (cmd->state) {
230 case SCST_CMD_STATE_PRE_XMIT_RESP:
231 case SCST_CMD_STATE_XMIT_RESP:
232 case SCST_CMD_STATE_FINISHED:
233 case SCST_CMD_STATE_XMIT_WAIT:
234 PRINT_CRIT_ERROR("Wrong cmd state %x (cmd %p, op %x)",
235 cmd->state, cmd, cmd->cdb[0]);
240 cmd->state = scst_get_cmd_abnormal_done_state(cmd);
242 EXTRACHECKS_BUG_ON((cmd->state != SCST_CMD_STATE_PRE_XMIT_RESP) &&
243 (cmd->tgt_dev == NULL));
248 EXPORT_SYMBOL(scst_set_cmd_abnormal_done_state);
250 void scst_set_resp_data_len(struct scst_cmd *cmd, int resp_data_len)
256 scst_check_restore_sg_buff(cmd);
257 cmd->resp_data_len = resp_data_len;
259 if (resp_data_len == cmd->bufflen)
263 for (i = 0; i < cmd->sg_cnt; i++) {
264 l += cmd->sg[i].length;
265 if (l >= resp_data_len) {
266 int left = resp_data_len - (l - cmd->sg[i].length);
267 #ifdef CONFIG_SCST_DEBUG
268 TRACE(TRACE_SG_OP|TRACE_MEMORY, "cmd %p (tag %llu), "
269 "resp_data_len %d, i %d, cmd->sg[i].length %d, "
271 cmd, (long long unsigned int)cmd->tag,
273 cmd->sg[i].length, left);
275 cmd->orig_sg_cnt = cmd->sg_cnt;
276 cmd->orig_sg_entry = i;
277 cmd->orig_entry_len = cmd->sg[i].length;
278 cmd->sg_cnt = (left > 0) ? i+1 : i;
279 cmd->sg[i].length = left;
280 cmd->sg_buff_modified = 1;
289 EXPORT_SYMBOL(scst_set_resp_data_len);
291 /* Called under scst_mutex and suspended activity */
292 int scst_alloc_device(gfp_t gfp_mask, struct scst_device **out_dev)
294 struct scst_device *dev;
296 static int dev_num; /* protected by scst_mutex */
300 dev = kzalloc(sizeof(*dev), gfp_mask);
302 TRACE(TRACE_OUT_OF_MEM, "%s",
303 "Allocation of scst_device failed");
308 dev->handler = &scst_null_devtype;
309 dev->p_cmd_lists = &scst_main_cmd_lists;
310 atomic_set(&dev->dev_cmd_count, 0);
311 atomic_set(&dev->write_cmd_count, 0);
312 scst_init_mem_lim(&dev->dev_mem_lim);
313 spin_lock_init(&dev->dev_lock);
314 atomic_set(&dev->on_dev_count, 0);
315 INIT_LIST_HEAD(&dev->blocked_cmd_list);
316 INIT_LIST_HEAD(&dev->dev_tgt_dev_list);
317 INIT_LIST_HEAD(&dev->dev_acg_dev_list);
318 INIT_LIST_HEAD(&dev->threads_list);
319 init_waitqueue_head(&dev->on_dev_waitQ);
320 dev->dev_double_ua_possible = 1;
321 dev->queue_alg = SCST_CONTR_MODE_QUEUE_ALG_UNRESTRICTED_REORDER;
322 dev->dev_num = dev_num++;
331 /* Called under scst_mutex and suspended activity */
332 void scst_free_device(struct scst_device *dev)
336 #ifdef CONFIG_SCST_EXTRACHECKS
337 if (!list_empty(&dev->dev_tgt_dev_list) ||
338 !list_empty(&dev->dev_acg_dev_list)) {
339 PRINT_CRIT_ERROR("%s: dev_tgt_dev_list or dev_acg_dev_list "
340 "is not empty!", __func__);
351 void scst_init_mem_lim(struct scst_mem_lim *mem_lim)
353 atomic_set(&mem_lim->alloced_pages, 0);
354 mem_lim->max_allowed_pages =
355 ((uint64_t)scst_max_dev_cmd_mem << 10) >> (PAGE_SHIFT - 10);
357 EXPORT_SYMBOL(scst_init_mem_lim);
359 static struct scst_acg_dev *scst_alloc_acg_dev(struct scst_acg *acg,
360 struct scst_device *dev, uint64_t lun)
362 struct scst_acg_dev *res;
366 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
367 res = kmem_cache_alloc(scst_acgd_cachep, GFP_KERNEL);
369 res = kmem_cache_zalloc(scst_acgd_cachep, GFP_KERNEL);
372 TRACE(TRACE_OUT_OF_MEM,
373 "%s", "Allocation of scst_acg_dev failed");
376 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
377 memset(res, 0, sizeof(*res));
385 TRACE_EXIT_HRES(res);
389 /* The activity supposed to be suspended and scst_mutex held */
390 static void scst_free_acg_dev(struct scst_acg_dev *acg_dev)
394 TRACE_DBG("Removing acg_dev %p from acg_dev_list and dev_acg_dev_list",
396 list_del(&acg_dev->acg_dev_list_entry);
397 list_del(&acg_dev->dev_acg_dev_list_entry);
399 kmem_cache_free(scst_acgd_cachep, acg_dev);
405 /* The activity supposed to be suspended and scst_mutex held */
406 struct scst_acg *scst_alloc_add_acg(const char *acg_name)
408 struct scst_acg *acg;
412 acg = kzalloc(sizeof(*acg), GFP_KERNEL);
414 TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of acg failed");
418 INIT_LIST_HEAD(&acg->acg_dev_list);
419 INIT_LIST_HEAD(&acg->acg_sess_list);
420 INIT_LIST_HEAD(&acg->acn_list);
421 acg->acg_name = acg_name;
423 TRACE_DBG("Adding acg %s to scst_acg_list", acg_name);
424 list_add_tail(&acg->scst_acg_list_entry, &scst_acg_list);
427 TRACE_EXIT_HRES(acg);
431 /* The activity supposed to be suspended and scst_mutex held */
432 int scst_destroy_acg(struct scst_acg *acg)
434 struct scst_acn *n, *nn;
435 struct scst_acg_dev *acg_dev, *acg_dev_tmp;
440 if (!list_empty(&acg->acg_sess_list)) {
441 PRINT_ERROR("%s: acg_sess_list is not empty!", __func__);
446 TRACE_DBG("Removing acg %s from scst_acg_list", acg->acg_name);
447 list_del(&acg->scst_acg_list_entry);
449 /* Freeing acg_devs */
450 list_for_each_entry_safe(acg_dev, acg_dev_tmp, &acg->acg_dev_list,
451 acg_dev_list_entry) {
452 struct scst_tgt_dev *tgt_dev, *tt;
453 list_for_each_entry_safe(tgt_dev, tt,
454 &acg_dev->dev->dev_tgt_dev_list,
455 dev_tgt_dev_list_entry) {
456 if (tgt_dev->acg_dev == acg_dev)
457 scst_free_tgt_dev(tgt_dev);
459 scst_free_acg_dev(acg_dev);
463 list_for_each_entry_safe(n, nn, &acg->acn_list,
465 list_del(&n->acn_list_entry);
469 INIT_LIST_HEAD(&acg->acn_list);
478 * scst_mutex supposed to be held, there must not be parallel activity in this
481 static struct scst_tgt_dev *scst_alloc_add_tgt_dev(struct scst_session *sess,
482 struct scst_acg_dev *acg_dev)
484 int ini_sg, ini_unchecked_isa_dma, ini_use_clustering;
485 struct scst_tgt_dev *tgt_dev;
486 struct scst_device *dev = acg_dev->dev;
487 struct list_head *sess_tgt_dev_list_head;
488 struct scst_tgt_template *vtt = sess->tgt->tgtt;
493 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
494 tgt_dev = kmem_cache_alloc(scst_tgtd_cachep, GFP_KERNEL);
496 tgt_dev = kmem_cache_zalloc(scst_tgtd_cachep, GFP_KERNEL);
498 if (tgt_dev == NULL) {
499 TRACE(TRACE_OUT_OF_MEM, "%s",
500 "Allocation of scst_tgt_dev failed");
503 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
504 memset(tgt_dev, 0, sizeof(*tgt_dev));
508 tgt_dev->lun = acg_dev->lun;
509 tgt_dev->acg_dev = acg_dev;
510 tgt_dev->sess = sess;
511 atomic_set(&tgt_dev->tgt_dev_cmd_count, 0);
513 scst_sgv_pool_use_norm(tgt_dev);
515 if (dev->scsi_dev != NULL) {
516 ini_sg = dev->scsi_dev->host->sg_tablesize;
517 ini_unchecked_isa_dma = dev->scsi_dev->host->unchecked_isa_dma;
518 ini_use_clustering = (dev->scsi_dev->host->use_clustering ==
521 ini_sg = (1 << 15) /* infinite */;
522 ini_unchecked_isa_dma = 0;
523 ini_use_clustering = 0;
525 tgt_dev->max_sg_cnt = min(ini_sg, sess->tgt->sg_tablesize);
527 if ((sess->tgt->tgtt->use_clustering || ini_use_clustering) &&
528 !sess->tgt->tgtt->no_clustering)
529 scst_sgv_pool_use_norm_clust(tgt_dev);
531 if (sess->tgt->tgtt->unchecked_isa_dma || ini_unchecked_isa_dma)
532 scst_sgv_pool_use_dma(tgt_dev);
534 if (dev->scsi_dev != NULL) {
535 TRACE_MGMT_DBG("host=%d, channel=%d, id=%d, lun=%d, "
536 "SCST lun=%lld", dev->scsi_dev->host->host_no,
537 dev->scsi_dev->channel, dev->scsi_dev->id,
539 (long long unsigned int)tgt_dev->lun);
541 TRACE_MGMT_DBG("Virtual device %s on SCST lun=%lld",
543 (long long unsigned int)tgt_dev->lun);
546 spin_lock_init(&tgt_dev->tgt_dev_lock);
547 INIT_LIST_HEAD(&tgt_dev->UA_list);
548 spin_lock_init(&tgt_dev->thr_data_lock);
549 INIT_LIST_HEAD(&tgt_dev->thr_data_list);
550 spin_lock_init(&tgt_dev->sn_lock);
551 INIT_LIST_HEAD(&tgt_dev->deferred_cmd_list);
552 INIT_LIST_HEAD(&tgt_dev->skipped_sn_list);
553 tgt_dev->expected_sn = 1;
554 tgt_dev->num_free_sn_slots = ARRAY_SIZE(tgt_dev->sn_slots)-1;
555 tgt_dev->cur_sn_slot = &tgt_dev->sn_slots[0];
556 for (i = 0; i < (int)ARRAY_SIZE(tgt_dev->sn_slots); i++)
557 atomic_set(&tgt_dev->sn_slots[i], 0);
559 if (dev->handler->parse_atomic &&
560 (sess->tgt->tgtt->preprocessing_done == NULL)) {
561 if (sess->tgt->tgtt->rdy_to_xfer_atomic)
562 __set_bit(SCST_TGT_DEV_AFTER_INIT_WR_ATOMIC,
563 &tgt_dev->tgt_dev_flags);
564 if (dev->handler->exec_atomic)
565 __set_bit(SCST_TGT_DEV_AFTER_INIT_OTH_ATOMIC,
566 &tgt_dev->tgt_dev_flags);
568 if (dev->handler->exec_atomic) {
569 if (sess->tgt->tgtt->rdy_to_xfer_atomic)
570 __set_bit(SCST_TGT_DEV_AFTER_RESTART_WR_ATOMIC,
571 &tgt_dev->tgt_dev_flags);
572 __set_bit(SCST_TGT_DEV_AFTER_RESTART_OTH_ATOMIC,
573 &tgt_dev->tgt_dev_flags);
574 __set_bit(SCST_TGT_DEV_AFTER_RX_DATA_ATOMIC,
575 &tgt_dev->tgt_dev_flags);
577 if (dev->handler->dev_done_atomic &&
578 sess->tgt->tgtt->xmit_response_atomic) {
579 __set_bit(SCST_TGT_DEV_AFTER_EXEC_ATOMIC,
580 &tgt_dev->tgt_dev_flags);
583 spin_lock_bh(&scst_temp_UA_lock);
584 scst_set_sense(scst_temp_UA, sizeof(scst_temp_UA),
585 SCST_LOAD_SENSE(scst_sense_reset_UA));
586 scst_alloc_set_UA(tgt_dev, scst_temp_UA, sizeof(scst_temp_UA), 0);
587 spin_unlock_bh(&scst_temp_UA_lock);
589 tm_dbg_init_tgt_dev(tgt_dev, acg_dev);
591 if (vtt->threads_num > 0) {
593 if (dev->handler->threads_num > 0)
594 rc = scst_add_dev_threads(dev, vtt->threads_num);
595 else if (dev->handler->threads_num == 0)
596 rc = scst_add_cmd_threads(vtt->threads_num);
601 if (dev->handler && dev->handler->attach_tgt) {
602 TRACE_DBG("Calling dev handler's attach_tgt(%p)",
604 rc = dev->handler->attach_tgt(tgt_dev);
605 TRACE_DBG("%s", "Dev handler's attach_tgt() returned");
607 PRINT_ERROR("Device handler's %s attach_tgt() "
608 "failed: %d", dev->handler->name, rc);
613 spin_lock_bh(&dev->dev_lock);
614 list_add_tail(&tgt_dev->dev_tgt_dev_list_entry, &dev->dev_tgt_dev_list);
615 if (dev->dev_reserved)
616 __set_bit(SCST_TGT_DEV_RESERVED, &tgt_dev->tgt_dev_flags);
617 spin_unlock_bh(&dev->dev_lock);
619 sess_tgt_dev_list_head =
620 &sess->sess_tgt_dev_list_hash[HASH_VAL(tgt_dev->lun)];
621 list_add_tail(&tgt_dev->sess_tgt_dev_list_entry,
622 sess_tgt_dev_list_head);
629 if (vtt->threads_num > 0) {
630 if (dev->handler->threads_num > 0)
631 scst_del_dev_threads(dev, vtt->threads_num);
632 else if (dev->handler->threads_num == 0)
633 scst_del_cmd_threads(vtt->threads_num);
637 kmem_cache_free(scst_tgtd_cachep, tgt_dev);
642 static void scst_clear_reservation(struct scst_tgt_dev *tgt_dev);
644 /* No locks supposed to be held, scst_mutex - held */
645 void scst_nexus_loss(struct scst_tgt_dev *tgt_dev)
649 scst_clear_reservation(tgt_dev);
651 /* With activity suspended the lock isn't needed, but let's be safe */
652 spin_lock_bh(&tgt_dev->tgt_dev_lock);
653 scst_free_all_UA(tgt_dev);
654 spin_unlock_bh(&tgt_dev->tgt_dev_lock);
656 spin_lock_bh(&scst_temp_UA_lock);
657 scst_set_sense(scst_temp_UA, sizeof(scst_temp_UA),
658 SCST_LOAD_SENSE(scst_sense_nexus_loss_UA));
659 scst_check_set_UA(tgt_dev, scst_temp_UA, sizeof(scst_temp_UA), 0);
660 spin_unlock_bh(&scst_temp_UA_lock);
667 * scst_mutex supposed to be held, there must not be parallel activity in this
670 static void scst_free_tgt_dev(struct scst_tgt_dev *tgt_dev)
672 struct scst_device *dev = tgt_dev->dev;
673 struct scst_tgt_template *vtt = tgt_dev->sess->tgt->tgtt;
677 tm_dbg_deinit_tgt_dev(tgt_dev);
679 spin_lock_bh(&dev->dev_lock);
680 list_del(&tgt_dev->dev_tgt_dev_list_entry);
681 spin_unlock_bh(&dev->dev_lock);
683 list_del(&tgt_dev->sess_tgt_dev_list_entry);
685 scst_clear_reservation(tgt_dev);
686 scst_free_all_UA(tgt_dev);
688 if (dev->handler && dev->handler->detach_tgt) {
689 TRACE_DBG("Calling dev handler's detach_tgt(%p)",
691 dev->handler->detach_tgt(tgt_dev);
692 TRACE_DBG("%s", "Dev handler's detach_tgt() returned");
695 if (vtt->threads_num > 0) {
696 if (dev->handler->threads_num > 0)
697 scst_del_dev_threads(dev, vtt->threads_num);
698 else if (dev->handler->threads_num == 0)
699 scst_del_cmd_threads(vtt->threads_num);
702 kmem_cache_free(scst_tgtd_cachep, tgt_dev);
708 /* scst_mutex supposed to be held */
709 int scst_sess_alloc_tgt_devs(struct scst_session *sess)
712 struct scst_acg_dev *acg_dev;
713 struct scst_tgt_dev *tgt_dev;
717 list_for_each_entry(acg_dev, &sess->acg->acg_dev_list,
718 acg_dev_list_entry) {
719 tgt_dev = scst_alloc_add_tgt_dev(sess, acg_dev);
720 if (tgt_dev == NULL) {
731 scst_sess_free_tgt_devs(sess);
736 * scst_mutex supposed to be held, there must not be parallel activity in this
739 static void scst_sess_free_tgt_devs(struct scst_session *sess)
742 struct scst_tgt_dev *tgt_dev, *t;
746 /* The session is going down, no users, so no locks */
747 for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
748 struct list_head *sess_tgt_dev_list_head =
749 &sess->sess_tgt_dev_list_hash[i];
750 list_for_each_entry_safe(tgt_dev, t, sess_tgt_dev_list_head,
751 sess_tgt_dev_list_entry) {
752 scst_free_tgt_dev(tgt_dev);
754 INIT_LIST_HEAD(sess_tgt_dev_list_head);
761 /* The activity supposed to be suspended and scst_mutex held */
762 int scst_acg_add_dev(struct scst_acg *acg, struct scst_device *dev,
763 uint64_t lun, int read_only)
766 struct scst_acg_dev *acg_dev;
767 struct scst_tgt_dev *tgt_dev;
768 struct scst_session *sess;
769 LIST_HEAD(tmp_tgt_dev_list);
773 INIT_LIST_HEAD(&tmp_tgt_dev_list);
775 #ifdef CONFIG_SCST_EXTRACHECKS
776 list_for_each_entry(acg_dev, &acg->acg_dev_list, acg_dev_list_entry) {
777 if (acg_dev->dev == dev) {
778 PRINT_ERROR("Device is already in group %s",
786 acg_dev = scst_alloc_acg_dev(acg, dev, lun);
787 if (acg_dev == NULL) {
791 acg_dev->rd_only_flag = read_only;
793 TRACE_DBG("Adding acg_dev %p to acg_dev_list and dev_acg_dev_list",
795 list_add_tail(&acg_dev->acg_dev_list_entry, &acg->acg_dev_list);
796 list_add_tail(&acg_dev->dev_acg_dev_list_entry, &dev->dev_acg_dev_list);
798 list_for_each_entry(sess, &acg->acg_sess_list, acg_sess_list_entry) {
799 tgt_dev = scst_alloc_add_tgt_dev(sess, acg_dev);
800 if (tgt_dev == NULL) {
804 list_add_tail(&tgt_dev->extra_tgt_dev_list_entry,
810 if (dev->virt_name != NULL) {
811 PRINT_INFO("Added device %s to group %s (LUN %lld, "
812 "rd_only %d)", dev->virt_name, acg->acg_name,
813 (long long unsigned int)lun,
816 PRINT_INFO("Added device %d:%d:%d:%d to group %s (LUN "
818 dev->scsi_dev->host->host_no,
819 dev->scsi_dev->channel, dev->scsi_dev->id,
820 dev->scsi_dev->lun, acg->acg_name,
821 (long long unsigned int)lun,
830 list_for_each_entry(tgt_dev, &tmp_tgt_dev_list,
831 extra_tgt_dev_list_entry) {
832 scst_free_tgt_dev(tgt_dev);
834 scst_free_acg_dev(acg_dev);
838 /* The activity supposed to be suspended and scst_mutex held */
839 int scst_acg_remove_dev(struct scst_acg *acg, struct scst_device *dev)
842 struct scst_acg_dev *acg_dev = NULL, *a;
843 struct scst_tgt_dev *tgt_dev, *tt;
847 list_for_each_entry(a, &acg->acg_dev_list, acg_dev_list_entry) {
854 if (acg_dev == NULL) {
855 PRINT_ERROR("Device is not found in group %s", acg->acg_name);
860 list_for_each_entry_safe(tgt_dev, tt, &dev->dev_tgt_dev_list,
861 dev_tgt_dev_list_entry) {
862 if (tgt_dev->acg_dev == acg_dev)
863 scst_free_tgt_dev(tgt_dev);
865 scst_free_acg_dev(acg_dev);
869 if (dev->virt_name != NULL) {
870 PRINT_INFO("Removed device %s from group %s",
871 dev->virt_name, acg->acg_name);
873 PRINT_INFO("Removed device %d:%d:%d:%d from group %s",
874 dev->scsi_dev->host->host_no,
875 dev->scsi_dev->channel, dev->scsi_dev->id,
876 dev->scsi_dev->lun, acg->acg_name);
884 /* scst_mutex supposed to be held */
885 int scst_acg_add_name(struct scst_acg *acg, const char *name)
894 list_for_each_entry(n, &acg->acn_list, acn_list_entry)
896 if (strcmp(n->name, name) == 0) {
897 PRINT_ERROR("Name %s already exists in group %s",
898 name, acg->acg_name);
904 n = kmalloc(sizeof(*n), GFP_KERNEL);
906 PRINT_ERROR("%s", "Unable to allocate scst_acn");
912 nm = kmalloc(len + 1, GFP_KERNEL);
914 PRINT_ERROR("%s", "Unable to allocate scst_acn->name");
922 list_add_tail(&n->acn_list_entry, &acg->acn_list);
926 PRINT_INFO("Added name %s to group %s", name, acg->acg_name);
936 /* scst_mutex supposed to be held */
937 int scst_acg_remove_name(struct scst_acg *acg, const char *name)
944 list_for_each_entry(n, &acg->acn_list, acn_list_entry)
946 if (strcmp(n->name, name) == 0) {
947 list_del(&n->acn_list_entry);
956 PRINT_INFO("Removed name %s from group %s", name,
959 PRINT_ERROR("Unable to find name %s in group %s", name,
967 static struct scst_cmd *scst_create_prepare_internal_cmd(
968 struct scst_cmd *orig_cmd, int bufsize)
970 struct scst_cmd *res;
971 gfp_t gfp_mask = scst_cmd_atomic(orig_cmd) ? GFP_ATOMIC : GFP_KERNEL;
975 res = scst_alloc_cmd(gfp_mask);
979 res->cmd_lists = orig_cmd->cmd_lists;
980 res->sess = orig_cmd->sess;
981 res->atomic = scst_cmd_atomic(orig_cmd);
983 res->tgtt = orig_cmd->tgtt;
984 res->tgt = orig_cmd->tgt;
985 res->dev = orig_cmd->dev;
986 res->tgt_dev = orig_cmd->tgt_dev;
987 res->lun = orig_cmd->lun;
988 res->queue_type = SCST_CMD_QUEUE_HEAD_OF_QUEUE;
989 res->data_direction = SCST_DATA_UNKNOWN;
990 res->orig_cmd = orig_cmd;
991 res->bufflen = bufsize;
993 res->state = SCST_CMD_STATE_PRE_PARSE;
996 TRACE_EXIT_HRES((unsigned long)res);
1000 static void scst_free_internal_cmd(struct scst_cmd *cmd)
1004 __scst_cmd_put(cmd);
1010 int scst_prepare_request_sense(struct scst_cmd *orig_cmd)
1013 #define sbuf_size 252
1014 static const uint8_t request_sense[6] =
1015 { REQUEST_SENSE, 0, 0, 0, sbuf_size, 0 };
1016 struct scst_cmd *rs_cmd;
1020 rs_cmd = scst_create_prepare_internal_cmd(orig_cmd, sbuf_size);
1024 memcpy(rs_cmd->cdb, request_sense, sizeof(request_sense));
1025 rs_cmd->cdb_len = sizeof(request_sense);
1026 rs_cmd->data_direction = SCST_DATA_READ;
1027 rs_cmd->expected_data_direction = rs_cmd->data_direction;
1028 rs_cmd->expected_transfer_len = sbuf_size;
1029 rs_cmd->expected_values_set = 1;
1031 TRACE(TRACE_MGMT_MINOR, "Adding REQUEST SENSE cmd %p to head of active "
1032 "cmd list ", rs_cmd);
1033 spin_lock_irq(&rs_cmd->cmd_lists->cmd_list_lock);
1034 list_add(&rs_cmd->cmd_list_entry, &rs_cmd->cmd_lists->active_cmd_list);
1035 spin_unlock_irq(&rs_cmd->cmd_lists->cmd_list_lock);
1036 wake_up(&rs_cmd->cmd_lists->cmd_list_waitQ);
1039 TRACE_EXIT_RES(res);
1048 struct scst_cmd *scst_complete_request_sense(struct scst_cmd *req_cmd)
1050 struct scst_cmd *orig_cmd = req_cmd->orig_cmd;
1056 sBUG_ON(orig_cmd == NULL);
1058 len = scst_get_buf_first(req_cmd, &buf);
1060 if (scsi_status_is_good(req_cmd->status) && (len > 0) &&
1061 SCST_SENSE_VALID(buf) && (!SCST_NO_SENSE(buf))) {
1062 PRINT_BUFF_FLAG(TRACE_SCSI, "REQUEST SENSE returned",
1064 scst_alloc_set_sense(orig_cmd, scst_cmd_atomic(req_cmd), buf,
1067 PRINT_ERROR("%s", "Unable to get the sense via "
1068 "REQUEST SENSE, returning HARDWARE ERROR");
1069 scst_set_cmd_error(orig_cmd,
1070 SCST_LOAD_SENSE(scst_sense_hardw_error));
1074 scst_put_buf(req_cmd, buf);
1076 scst_free_internal_cmd(req_cmd);
1078 TRACE_EXIT_HRES((unsigned long)orig_cmd);
1082 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
1083 static void scst_req_done(struct scsi_cmnd *scsi_cmd)
1085 struct scsi_request *req;
1089 if (scsi_cmd && (req = scsi_cmd->sc_request)) {
1091 if (req->sr_bufflen)
1092 kfree(req->sr_buffer);
1093 scsi_release_request(req);
1101 static void scst_send_release(struct scst_device *dev)
1103 struct scsi_request *req;
1104 struct scsi_device *scsi_dev;
1109 if (dev->scsi_dev == NULL)
1112 scsi_dev = dev->scsi_dev;
1114 req = scsi_allocate_request(scsi_dev, GFP_KERNEL);
1116 PRINT_ERROR("Allocation of scsi_request failed: unable "
1117 "to RELEASE device %d:%d:%d:%d",
1118 scsi_dev->host->host_no, scsi_dev->channel,
1119 scsi_dev->id, scsi_dev->lun);
1123 memset(cdb, 0, sizeof(cdb));
1125 cdb[1] = (scsi_dev->scsi_level <= SCSI_2) ?
1126 ((scsi_dev->lun << 5) & 0xe0) : 0;
1127 memcpy(req->sr_cmnd, cdb, sizeof(cdb));
1128 req->sr_cmd_len = sizeof(cdb);
1129 req->sr_data_direction = SCST_DATA_NONE;
1131 req->sr_bufflen = 0;
1132 req->sr_buffer = NULL;
1133 req->sr_request->rq_disk = dev->rq_disk;
1134 req->sr_sense_buffer[0] = 0;
1136 TRACE(TRACE_DEBUG | TRACE_SCSI, "Sending RELEASE req %p to SCSI "
1138 scst_do_req(req, req->sr_cmnd, (void *)req->sr_buffer, req->sr_bufflen,
1139 scst_req_done, 15, 3);
1145 #else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18) */
1146 static void scst_send_release(struct scst_device *dev)
1148 struct scsi_device *scsi_dev;
1149 unsigned char cdb[6];
1150 unsigned char *sense;
1155 if (dev->scsi_dev == NULL)
1158 /* We can't afford missing RELEASE due to memory shortage */
1159 sense = kmalloc(SCST_SENSE_BUFFERSIZE, GFP_KERNEL|__GFP_NOFAIL);
1161 scsi_dev = dev->scsi_dev;
1163 for (i = 0; i < 5; i++) {
1164 memset(cdb, 0, sizeof(cdb));
1166 cdb[1] = (scsi_dev->scsi_level <= SCSI_2) ?
1167 ((scsi_dev->lun << 5) & 0xe0) : 0;
1169 memset(sense, 0, SCST_SENSE_BUFFERSIZE);
1171 TRACE(TRACE_DEBUG | TRACE_SCSI, "%s", "Sending RELEASE req to "
1173 rc = scsi_execute(scsi_dev, cdb, SCST_DATA_NONE, NULL, 0,
1175 TRACE_DBG("MODE_SENSE done: %x", rc);
1177 if (scsi_status_is_good(rc)) {
1180 PRINT_ERROR("RELEASE failed: %d", rc);
1181 PRINT_BUFFER("RELEASE sense", sense,
1182 SCST_SENSE_BUFFERSIZE);
1183 scst_check_internal_sense(dev, rc,
1184 sense, SCST_SENSE_BUFFERSIZE);
1194 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18) */
1196 /* scst_mutex supposed to be held */
1197 static void scst_clear_reservation(struct scst_tgt_dev *tgt_dev)
1199 struct scst_device *dev = tgt_dev->dev;
1204 spin_lock_bh(&dev->dev_lock);
1205 if (dev->dev_reserved &&
1206 !test_bit(SCST_TGT_DEV_RESERVED, &tgt_dev->tgt_dev_flags)) {
1207 /* This is one who holds the reservation */
1208 struct scst_tgt_dev *tgt_dev_tmp;
1209 list_for_each_entry(tgt_dev_tmp, &dev->dev_tgt_dev_list,
1210 dev_tgt_dev_list_entry) {
1211 clear_bit(SCST_TGT_DEV_RESERVED,
1212 &tgt_dev_tmp->tgt_dev_flags);
1214 dev->dev_reserved = 0;
1217 spin_unlock_bh(&dev->dev_lock);
1220 scst_send_release(dev);
1226 struct scst_session *scst_alloc_session(struct scst_tgt *tgt, gfp_t gfp_mask,
1227 const char *initiator_name)
1229 struct scst_session *sess;
1236 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
1237 sess = kmem_cache_alloc(scst_sess_cachep, gfp_mask);
1239 sess = kmem_cache_zalloc(scst_sess_cachep, gfp_mask);
1242 TRACE(TRACE_OUT_OF_MEM, "%s",
1243 "Allocation of scst_session failed");
1246 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
1247 memset(sess, 0, sizeof(*sess));
1250 sess->init_phase = SCST_SESS_IPH_INITING;
1251 sess->shut_phase = SCST_SESS_SPH_READY;
1252 atomic_set(&sess->refcnt, 0);
1253 for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
1254 struct list_head *sess_tgt_dev_list_head =
1255 &sess->sess_tgt_dev_list_hash[i];
1256 INIT_LIST_HEAD(sess_tgt_dev_list_head);
1258 spin_lock_init(&sess->sess_list_lock);
1259 INIT_LIST_HEAD(&sess->search_cmd_list);
1261 INIT_LIST_HEAD(&sess->init_deferred_cmd_list);
1262 INIT_LIST_HEAD(&sess->init_deferred_mcmd_list);
1264 #ifdef CONFIG_SCST_MEASURE_LATENCY
1265 spin_lock_init(&sess->meas_lock);
1268 len = strlen(initiator_name);
1269 nm = kmalloc(len + 1, gfp_mask);
1271 PRINT_ERROR("%s", "Unable to allocate sess->initiator_name");
1275 strcpy(nm, initiator_name);
1276 sess->initiator_name = nm;
1283 kmem_cache_free(scst_sess_cachep, sess);
1288 void scst_free_session(struct scst_session *sess)
1292 mutex_lock(&scst_mutex);
1294 TRACE_DBG("Removing sess %p from the list", sess);
1295 list_del(&sess->sess_list_entry);
1296 TRACE_DBG("Removing session %p from acg %s", sess, sess->acg->acg_name);
1297 list_del(&sess->acg_sess_list_entry);
1299 scst_sess_free_tgt_devs(sess);
1301 wake_up_all(&sess->tgt->unreg_waitQ);
1303 mutex_unlock(&scst_mutex);
1305 kfree(sess->initiator_name);
1306 kmem_cache_free(scst_sess_cachep, sess);
1312 void scst_free_session_callback(struct scst_session *sess)
1314 struct completion *c;
1318 TRACE_DBG("Freeing session %p", sess);
1320 c = sess->shutdown_compl;
1322 if (sess->unreg_done_fn) {
1323 TRACE_DBG("Calling unreg_done_fn(%p)", sess);
1324 sess->unreg_done_fn(sess);
1325 TRACE_DBG("%s", "unreg_done_fn() returned");
1327 scst_free_session(sess);
1336 void scst_sched_session_free(struct scst_session *sess)
1338 unsigned long flags;
1342 if (sess->shut_phase != SCST_SESS_SPH_SHUTDOWN) {
1343 PRINT_CRIT_ERROR("session %p is going to shutdown with unknown "
1344 "shut phase %lx", sess, sess->shut_phase);
1348 spin_lock_irqsave(&scst_mgmt_lock, flags);
1349 TRACE_DBG("Adding sess %p to scst_sess_shut_list", sess);
1350 list_add_tail(&sess->sess_shut_list_entry, &scst_sess_shut_list);
1351 spin_unlock_irqrestore(&scst_mgmt_lock, flags);
1353 wake_up(&scst_mgmt_waitQ);
1359 void scst_cmd_get(struct scst_cmd *cmd)
1361 __scst_cmd_get(cmd);
1363 EXPORT_SYMBOL(scst_cmd_get);
1365 void scst_cmd_put(struct scst_cmd *cmd)
1367 __scst_cmd_put(cmd);
1369 EXPORT_SYMBOL(scst_cmd_put);
1371 struct scst_cmd *scst_alloc_cmd(gfp_t gfp_mask)
1373 struct scst_cmd *cmd;
1377 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
1378 cmd = kmem_cache_alloc(scst_cmd_cachep, gfp_mask);
1380 cmd = kmem_cache_zalloc(scst_cmd_cachep, gfp_mask);
1383 TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of scst_cmd failed");
1386 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
1387 memset(cmd, 0, sizeof(*cmd));
1390 cmd->state = SCST_CMD_STATE_INIT_WAIT;
1391 cmd->start_time = jiffies;
1392 atomic_set(&cmd->cmd_ref, 1);
1393 cmd->cmd_lists = &scst_main_cmd_lists;
1394 INIT_LIST_HEAD(&cmd->mgmt_cmd_list);
1395 cmd->queue_type = SCST_CMD_QUEUE_SIMPLE;
1396 cmd->timeout = SCST_DEFAULT_TIMEOUT;
1399 cmd->is_send_status = 1;
1400 cmd->resp_data_len = -1;
1402 cmd->dbl_ua_orig_data_direction = SCST_DATA_UNKNOWN;
1403 cmd->dbl_ua_orig_resp_data_len = -1;
1410 static void scst_destroy_put_cmd(struct scst_cmd *cmd)
1412 scst_sess_put(cmd->sess);
1415 * At this point tgt_dev can be dead, but the pointer remains non-NULL
1417 if (likely(cmd->tgt_dev != NULL))
1420 scst_destroy_cmd(cmd);
1424 /* No locks supposed to be held */
1425 void scst_free_cmd(struct scst_cmd *cmd)
1431 TRACE_DBG("Freeing cmd %p (tag %llu)",
1432 cmd, (long long unsigned int)cmd->tag);
1434 if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
1435 TRACE_MGMT_DBG("Freeing aborted cmd %p (scst_cmd_count %d)",
1436 cmd, atomic_read(&scst_cmd_count));
1439 sBUG_ON(cmd->inc_blocking || cmd->needs_unblocking ||
1440 cmd->dec_on_dev_needed);
1442 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
1443 #if defined(CONFIG_SCST_EXTRACHECKS)
1444 if (cmd->scsi_req) {
1445 PRINT_ERROR("%s: %s", __func__, "Cmd with unfreed "
1447 scst_release_request(cmd);
1453 * Target driver can already free sg buffer before calling
1454 * scst_tgt_cmd_done(). E.g., scst_local has to do that.
1456 if (!cmd->tgt_data_buf_alloced)
1457 scst_check_restore_sg_buff(cmd);
1459 if (unlikely(cmd->internal)) {
1460 if (cmd->bufflen > 0)
1461 scst_release_space(cmd);
1462 scst_destroy_cmd(cmd);
1466 if (cmd->tgtt->on_free_cmd != NULL) {
1467 TRACE_DBG("Calling target's on_free_cmd(%p)", cmd);
1468 cmd->tgtt->on_free_cmd(cmd);
1469 TRACE_DBG("%s", "Target's on_free_cmd() returned");
1472 if (likely(cmd->dev != NULL)) {
1473 struct scst_dev_type *handler = cmd->dev->handler;
1474 if (handler->on_free_cmd != NULL) {
1475 TRACE_DBG("Calling dev handler %s on_free_cmd(%p)",
1476 handler->name, cmd);
1477 handler->on_free_cmd(cmd);
1478 TRACE_DBG("Dev handler %s on_free_cmd() returned",
1483 scst_release_space(cmd);
1485 if (unlikely(cmd->sense != NULL)) {
1486 TRACE_MEM("Releasing sense %p (cmd %p)", cmd->sense, cmd);
1487 mempool_free(cmd->sense, scst_sense_mempool);
1491 if (likely(cmd->tgt_dev != NULL)) {
1492 #ifdef CONFIG_SCST_EXTRACHECKS
1493 if (unlikely(!cmd->sent_for_exec)) {
1494 PRINT_ERROR("Finishing not executed cmd %p (opcode "
1495 "%d, target %s, lun %lld, sn %ld, expected_sn %ld)",
1496 cmd, cmd->cdb[0], cmd->tgtt->name,
1497 (long long unsigned int)cmd->lun,
1498 cmd->sn, cmd->tgt_dev->expected_sn);
1499 scst_unblock_deferred(cmd->tgt_dev, cmd);
1503 if (unlikely(cmd->out_of_sn)) {
1504 TRACE_SN("Out of SN cmd %p (tag %llu, sn %ld), "
1506 (long long unsigned int)cmd->tag,
1508 destroy = test_and_set_bit(SCST_CMD_CAN_BE_DESTROYED,
1513 if (likely(destroy))
1514 scst_destroy_put_cmd(cmd);
1521 /* No locks supposed to be held. */
1522 void scst_check_retries(struct scst_tgt *tgt)
1524 int need_wake_up = 0;
1529 * We don't worry about overflow of finished_cmds, because we check
1530 * only for its change.
1532 atomic_inc(&tgt->finished_cmds);
1533 /* See comment in scst_queue_retry_cmd() */
1534 smp_mb__after_atomic_inc();
1535 if (unlikely(tgt->retry_cmds > 0)) {
1536 struct scst_cmd *c, *tc;
1537 unsigned long flags;
1539 TRACE_RETRY("Checking retry cmd list (retry_cmds %d)",
1542 spin_lock_irqsave(&tgt->tgt_lock, flags);
1543 list_for_each_entry_safe(c, tc, &tgt->retry_cmd_list,
1547 TRACE_RETRY("Moving retry cmd %p to head of active "
1548 "cmd list (retry_cmds left %d)",
1549 c, tgt->retry_cmds);
1550 spin_lock(&c->cmd_lists->cmd_list_lock);
1551 list_move(&c->cmd_list_entry,
1552 &c->cmd_lists->active_cmd_list);
1553 wake_up(&c->cmd_lists->cmd_list_waitQ);
1554 spin_unlock(&c->cmd_lists->cmd_list_lock);
1557 if (need_wake_up >= 2) /* "slow start" */
1560 spin_unlock_irqrestore(&tgt->tgt_lock, flags);
1567 void scst_tgt_retry_timer_fn(unsigned long arg)
1569 struct scst_tgt *tgt = (struct scst_tgt *)arg;
1570 unsigned long flags;
1572 TRACE_RETRY("Retry timer expired (retry_cmds %d)", tgt->retry_cmds);
1574 spin_lock_irqsave(&tgt->tgt_lock, flags);
1575 tgt->retry_timer_active = 0;
1576 spin_unlock_irqrestore(&tgt->tgt_lock, flags);
1578 scst_check_retries(tgt);
1584 struct scst_mgmt_cmd *scst_alloc_mgmt_cmd(gfp_t gfp_mask)
1586 struct scst_mgmt_cmd *mcmd;
1590 mcmd = mempool_alloc(scst_mgmt_mempool, gfp_mask);
1592 PRINT_CRIT_ERROR("%s", "Allocation of management command "
1593 "failed, some commands and their data could leak");
1596 memset(mcmd, 0, sizeof(*mcmd));
1603 void scst_free_mgmt_cmd(struct scst_mgmt_cmd *mcmd)
1605 unsigned long flags;
1609 spin_lock_irqsave(&mcmd->sess->sess_list_lock, flags);
1610 atomic_dec(&mcmd->sess->sess_cmd_count);
1611 spin_unlock_irqrestore(&mcmd->sess->sess_list_lock, flags);
1613 scst_sess_put(mcmd->sess);
1615 if (mcmd->mcmd_tgt_dev != NULL)
1618 mempool_free(mcmd, scst_mgmt_mempool);
1624 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
1625 int scst_alloc_request(struct scst_cmd *cmd)
1628 struct scsi_request *req;
1629 int gm = scst_cmd_atomic(cmd) ? GFP_ATOMIC : GFP_KERNEL;
1633 /* cmd->dev->scsi_dev must be non-NULL here */
1634 req = scsi_allocate_request(cmd->dev->scsi_dev, gm);
1636 TRACE(TRACE_OUT_OF_MEM, "%s",
1637 "Allocation of scsi_request failed");
1642 cmd->scsi_req = req;
1644 memcpy(req->sr_cmnd, cmd->cdb, cmd->cdb_len);
1645 req->sr_cmd_len = cmd->cdb_len;
1646 req->sr_data_direction = cmd->data_direction;
1647 req->sr_use_sg = cmd->sg_cnt;
1648 req->sr_bufflen = cmd->bufflen;
1649 req->sr_buffer = cmd->sg;
1650 req->sr_request->rq_disk = cmd->dev->rq_disk;
1651 req->sr_sense_buffer[0] = 0;
1653 cmd->scsi_req->upper_private_data = cmd;
1660 void scst_release_request(struct scst_cmd *cmd)
1662 scsi_release_request(cmd->scsi_req);
1663 cmd->scsi_req = NULL;
1667 int scst_alloc_space(struct scst_cmd *cmd)
1671 int atomic = scst_cmd_atomic(cmd);
1673 struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
1677 gfp_mask = tgt_dev->gfp_mask | (atomic ? GFP_ATOMIC : GFP_KERNEL);
1679 flags = atomic ? SCST_POOL_NO_ALLOC_ON_CACHE_MISS : 0;
1681 flags |= SCST_POOL_ALLOC_NO_CACHED;
1683 cmd->sg = sgv_pool_alloc(tgt_dev->pool, cmd->bufflen, gfp_mask, flags,
1684 &cmd->sg_cnt, &cmd->sgv, &cmd->dev->dev_mem_lim, NULL);
1685 if (cmd->sg == NULL)
1688 if (unlikely(cmd->sg_cnt > tgt_dev->max_sg_cnt)) {
1691 PRINT_INFO("Unable to complete command due to "
1692 "SG IO count limitation (requested %d, "
1693 "available %d, tgt lim %d)", cmd->sg_cnt,
1694 tgt_dev->max_sg_cnt, cmd->tgt->sg_tablesize);
1707 sgv_pool_free(cmd->sgv, &cmd->dev->dev_mem_lim);
1714 static void scst_release_space(struct scst_cmd *cmd)
1718 if (cmd->sgv == NULL)
1721 if (cmd->tgt_data_buf_alloced || cmd->dh_data_buf_alloced) {
1722 TRACE_MEM("%s", "*data_buf_alloced set, returning");
1726 sgv_pool_free(cmd->sgv, &cmd->dev->dev_mem_lim);
1739 void scst_copy_sg(struct scst_cmd *cmd, enum scst_sg_copy_dir copy_dir)
1741 struct scatterlist *src_sg, *dst_sg;
1742 unsigned int src_sg_cnt, src_len, dst_len, src_offs, dst_offs;
1743 struct page *src, *dst;
1744 unsigned int s, d, to_copy;
1748 if (copy_dir == SCST_SG_COPY_FROM_TARGET) {
1749 src_sg = cmd->tgt_sg;
1750 src_sg_cnt = cmd->tgt_sg_cnt;
1752 to_copy = cmd->bufflen;
1755 src_sg_cnt = cmd->sg_cnt;
1756 dst_sg = cmd->tgt_sg;
1757 to_copy = cmd->resp_data_len;
1760 TRACE_MEM("cmd %p, copy_dir %d, src_sg %p, src_sg_cnt %d, dst_sg %p, "
1761 "to_copy %d", cmd, copy_dir, src_sg, src_sg_cnt, dst_sg,
1764 dst = sg_page(dst_sg);
1765 dst_len = dst_sg->length;
1766 dst_offs = dst_sg->offset;
1771 while (s < src_sg_cnt) {
1772 src = sg_page(&src_sg[s]);
1773 src_len = src_sg[s].length;
1774 src_offs += src_sg[s].offset;
1780 * Himem pages are not allowed here, see the
1781 * corresponding #warning in scst_main.c. Correct
1782 * your target driver or dev handler to not alloc
1785 EXTRACHECKS_BUG_ON(PageHighMem(dst) ||
1788 TRACE_MEM("cmd %p, to_copy %d, src %p, src_len %d, "
1789 "src_offs %d, dst %p, dst_len %d, dst_offs %d",
1790 cmd, to_copy, src, src_len, src_offs, dst,
1793 if ((src_offs == 0) && (dst_offs == 0) &&
1794 (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE)) {
1795 copy_page(page_address(dst), page_address(src));
1798 n = min(PAGE_SIZE - dst_offs,
1799 PAGE_SIZE - src_offs);
1800 n = min(n, src_len);
1801 n = min(n, dst_len);
1802 memcpy(page_address(dst) + dst_offs,
1803 page_address(src) + src_offs, n);
1804 dst_offs -= min(n, dst_offs);
1805 src_offs -= min(n, src_offs);
1808 TRACE_MEM("cmd %p, n %d, s %d", cmd, n, s);
1818 dst = sg_page(&dst_sg[d]);
1819 dst_len = dst_sg[d].length;
1820 dst_offs += dst_sg[d].offset;
1822 } while (src_len > 0);
1832 static const int SCST_CDB_LENGTH[8] = { 6, 10, 10, -1, 16, 12, -1, -1 };
1834 #define SCST_CDB_GROUP(opcode) ((opcode >> 5) & 0x7)
1835 #define SCST_GET_CDB_LEN(opcode) SCST_CDB_LENGTH[SCST_CDB_GROUP(opcode)]
1837 int scst_get_cdb_len(const uint8_t *cdb)
1839 return SCST_GET_CDB_LEN(cdb[0]);
1842 /* get_trans_len_x extract x bytes from cdb as length starting from off */
1844 /* for special commands */
1845 static int get_trans_len_block_limit(struct scst_cmd *cmd, uint8_t off)
1851 static int get_trans_len_read_capacity(struct scst_cmd *cmd, uint8_t off)
1853 cmd->bufflen = READ_CAP_LEN;
1857 static int get_trans_len_single(struct scst_cmd *cmd, uint8_t off)
1863 static int get_trans_len_read_pos(struct scst_cmd *cmd, uint8_t off)
1865 uint8_t *p = (uint8_t *)cmd->cdb + off;
1869 cmd->bufflen |= ((u32)p[0]) << 8;
1870 cmd->bufflen |= ((u32)p[1]);
1872 switch (cmd->cdb[1] & 0x1f) {
1876 if (cmd->bufflen != 0) {
1877 PRINT_ERROR("READ POSITION: Invalid non-zero (%d) "
1878 "allocation length for service action %x",
1879 cmd->bufflen, cmd->cdb[1] & 0x1f);
1885 switch (cmd->cdb[1] & 0x1f) {
1894 cmd->bufflen = max(28, cmd->bufflen);
1897 PRINT_ERROR("READ POSITION: Invalid service action %x",
1898 cmd->cdb[1] & 0x1f);
1906 scst_set_cmd_error(cmd,
1907 SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
1912 static int get_trans_len_1(struct scst_cmd *cmd, uint8_t off)
1914 cmd->bufflen = (u32)cmd->cdb[off];
1918 static int get_trans_len_1_256(struct scst_cmd *cmd, uint8_t off)
1920 cmd->bufflen = (u32)cmd->cdb[off];
1921 if (cmd->bufflen == 0)
1926 static int get_trans_len_2(struct scst_cmd *cmd, uint8_t off)
1928 const uint8_t *p = cmd->cdb + off;
1931 cmd->bufflen |= ((u32)p[0]) << 8;
1932 cmd->bufflen |= ((u32)p[1]);
1937 static int get_trans_len_3(struct scst_cmd *cmd, uint8_t off)
1939 const uint8_t *p = cmd->cdb + off;
1942 cmd->bufflen |= ((u32)p[0]) << 16;
1943 cmd->bufflen |= ((u32)p[1]) << 8;
1944 cmd->bufflen |= ((u32)p[2]);
1949 static int get_trans_len_4(struct scst_cmd *cmd, uint8_t off)
1951 const uint8_t *p = cmd->cdb + off;
1954 cmd->bufflen |= ((u32)p[0]) << 24;
1955 cmd->bufflen |= ((u32)p[1]) << 16;
1956 cmd->bufflen |= ((u32)p[2]) << 8;
1957 cmd->bufflen |= ((u32)p[3]);
1962 static int get_trans_len_none(struct scst_cmd *cmd, uint8_t off)
1968 int scst_get_cdb_info(struct scst_cmd *cmd)
1970 int dev_type = cmd->dev->handler->type;
1973 const struct scst_sdbops *ptr = NULL;
1977 op = cmd->cdb[0]; /* get clear opcode */
1979 TRACE_DBG("opcode=%02x, cdblen=%d bytes, tblsize=%d, "
1980 "dev_type=%d", op, SCST_GET_CDB_LEN(op), SCST_CDB_TBL_SIZE,
1983 i = scst_scsi_op_list[op];
1984 while (i < SCST_CDB_TBL_SIZE && scst_scsi_op_table[i].ops == op) {
1985 if (scst_scsi_op_table[i].devkey[dev_type] != SCST_CDB_NOTSUPP) {
1986 ptr = &scst_scsi_op_table[i];
1987 TRACE_DBG("op = 0x%02x+'%c%c%c%c%c%c%c%c%c%c'+<%s>",
1988 ptr->ops, ptr->devkey[0], /* disk */
1989 ptr->devkey[1], /* tape */
1990 ptr->devkey[2], /* printer */
1991 ptr->devkey[3], /* cpu */
1992 ptr->devkey[4], /* cdr */
1993 ptr->devkey[5], /* cdrom */
1994 ptr->devkey[6], /* scanner */
1995 ptr->devkey[7], /* worm */
1996 ptr->devkey[8], /* changer */
1997 ptr->devkey[9], /* commdev */
1999 TRACE_DBG("direction=%d flags=%d off=%d",
2009 /* opcode not found or now not used !!! */
2010 TRACE(TRACE_SCSI, "Unknown opcode 0x%x for type %d", op,
2013 cmd->op_flags = SCST_INFO_INVALID;
2017 cmd->cdb_len = SCST_GET_CDB_LEN(op);
2018 cmd->op_name = ptr->op_name;
2019 cmd->data_direction = ptr->direction;
2020 cmd->op_flags = ptr->flags;
2021 res = (*ptr->get_trans_len)(cmd, ptr->off);
2027 EXPORT_SYMBOL(scst_get_cdb_info);
2030 * Routine to extract a lun number from an 8-byte LUN structure
2031 * in network byte order (BE).
2032 * (see SAM-2, Section 4.12.3 page 40)
2033 * Supports 2 types of lun unpacking: peripheral and logical unit.
2035 uint64_t scst_unpack_lun(const uint8_t *lun, int len)
2037 uint64_t res = NO_SUCH_LUN;
2042 TRACE_BUFF_FLAG(TRACE_DEBUG, "Raw LUN", lun, len);
2044 if (unlikely(len < 2)) {
2045 PRINT_ERROR("Illegal lun length %d, expected 2 bytes or "
2053 if ((*((uint64_t *)lun) &
2054 __constant_cpu_to_be64(0x0000FFFFFFFFFFFFLL)) != 0)
2058 if (*((uint16_t *)&lun[2]) != 0)
2062 if (*((uint32_t *)&lun[2]) != 0)
2070 address_method = (*lun) >> 6; /* high 2 bits of byte 0 */
2071 switch (address_method) {
2072 case 0: /* peripheral device addressing method */
2075 PRINT_ERROR("Illegal BUS INDENTIFIER in LUN "
2076 "peripheral device addressing method 0x%02x, "
2077 "expected 0", *lun);
2084 * Looks like it's legal to use it as flat space addressing
2091 case 1: /* flat space addressing method */
2092 res = *(lun + 1) | (((*lun) & 0x3f) << 8);
2095 case 2: /* logical unit addressing method */
2097 PRINT_ERROR("Illegal BUS NUMBER in LUN logical unit "
2098 "addressing method 0x%02x, expected 0",
2102 if (*(lun + 1) & 0xe0) {
2103 PRINT_ERROR("Illegal TARGET in LUN logical unit "
2104 "addressing method 0x%02x, expected 0",
2105 (*(lun + 1) & 0xf8) >> 5);
2108 res = *(lun + 1) & 0x1f;
2111 case 3: /* extended logical unit addressing method */
2113 PRINT_ERROR("Unimplemented LUN addressing method %u",
2119 TRACE_EXIT_RES((int)res);
2123 PRINT_ERROR("%s", "Multi-level LUN unimplemented");
2127 int scst_calc_block_shift(int sector_size)
2129 int block_shift = 0;
2132 if (sector_size == 0)
2142 if (block_shift < 9) {
2143 PRINT_ERROR("Wrong sector size %d", sector_size);
2147 TRACE_EXIT_RES(block_shift);
2150 EXPORT_SYMBOL(scst_calc_block_shift);
2152 int scst_sbc_generic_parse(struct scst_cmd *cmd,
2153 int (*get_block_shift)(struct scst_cmd *cmd))
2160 * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
2161 * therefore change them only if necessary
2164 TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
2165 cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
2167 switch (cmd->cdb[0]) {
2168 case SERVICE_ACTION_IN:
2169 if ((cmd->cdb[1] & 0x1f) == SAI_READ_CAPACITY_16) {
2170 cmd->bufflen = READ_CAP16_LEN;
2171 cmd->data_direction = SCST_DATA_READ;
2178 if ((cmd->cdb[1] & BYTCHK) == 0) {
2179 cmd->data_len = cmd->bufflen << get_block_shift(cmd);
2190 if (cmd->op_flags & SCST_TRANSFER_LEN_TYPE_FIXED) {
2192 * No need for locks here, since *_detach() can not be
2193 * called, when there are existing commands.
2195 cmd->bufflen = cmd->bufflen << get_block_shift(cmd);
2199 if ((cmd->op_flags & (SCST_SMALL_TIMEOUT | SCST_LONG_TIMEOUT)) == 0)
2200 cmd->timeout = SCST_GENERIC_DISK_REG_TIMEOUT;
2201 else if (cmd->op_flags & SCST_SMALL_TIMEOUT)
2202 cmd->timeout = SCST_GENERIC_DISK_SMALL_TIMEOUT;
2203 else if (cmd->op_flags & SCST_LONG_TIMEOUT)
2204 cmd->timeout = SCST_GENERIC_DISK_LONG_TIMEOUT;
2206 TRACE_DBG("res %d, bufflen %d, data_len %d, direct %d",
2207 res, cmd->bufflen, cmd->data_len, cmd->data_direction);
2209 TRACE_EXIT_RES(res);
2212 EXPORT_SYMBOL(scst_sbc_generic_parse);
2214 int scst_cdrom_generic_parse(struct scst_cmd *cmd,
2215 int (*get_block_shift)(struct scst_cmd *cmd))
2222 * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
2223 * therefore change them only if necessary
2226 TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
2227 cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
2229 cmd->cdb[1] &= 0x1f;
2231 switch (cmd->cdb[0]) {
2236 if ((cmd->cdb[1] & BYTCHK) == 0) {
2237 cmd->data_len = cmd->bufflen << get_block_shift(cmd);
2247 if (cmd->op_flags & SCST_TRANSFER_LEN_TYPE_FIXED)
2248 cmd->bufflen = cmd->bufflen << get_block_shift(cmd);
2251 if ((cmd->op_flags & (SCST_SMALL_TIMEOUT | SCST_LONG_TIMEOUT)) == 0)
2252 cmd->timeout = SCST_GENERIC_CDROM_REG_TIMEOUT;
2253 else if (cmd->op_flags & SCST_SMALL_TIMEOUT)
2254 cmd->timeout = SCST_GENERIC_CDROM_SMALL_TIMEOUT;
2255 else if (cmd->op_flags & SCST_LONG_TIMEOUT)
2256 cmd->timeout = SCST_GENERIC_CDROM_LONG_TIMEOUT;
2258 TRACE_DBG("res=%d, bufflen=%d, direct=%d", res, cmd->bufflen,
2259 cmd->data_direction);
2264 EXPORT_SYMBOL(scst_cdrom_generic_parse);
2266 int scst_modisk_generic_parse(struct scst_cmd *cmd,
2267 int (*get_block_shift)(struct scst_cmd *cmd))
2274 * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
2275 * therefore change them only if necessary
2278 TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
2279 cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
2281 cmd->cdb[1] &= 0x1f;
2283 switch (cmd->cdb[0]) {
2288 if ((cmd->cdb[1] & BYTCHK) == 0) {
2289 cmd->data_len = cmd->bufflen << get_block_shift(cmd);
2299 if (cmd->op_flags & SCST_TRANSFER_LEN_TYPE_FIXED)
2300 cmd->bufflen = cmd->bufflen << get_block_shift(cmd);
2303 if ((cmd->op_flags & (SCST_SMALL_TIMEOUT | SCST_LONG_TIMEOUT)) == 0)
2304 cmd->timeout = SCST_GENERIC_MODISK_REG_TIMEOUT;
2305 else if (cmd->op_flags & SCST_SMALL_TIMEOUT)
2306 cmd->timeout = SCST_GENERIC_MODISK_SMALL_TIMEOUT;
2307 else if (cmd->op_flags & SCST_LONG_TIMEOUT)
2308 cmd->timeout = SCST_GENERIC_MODISK_LONG_TIMEOUT;
2310 TRACE_DBG("res=%d, bufflen=%d, direct=%d", res, cmd->bufflen,
2311 cmd->data_direction);
2313 TRACE_EXIT_RES(res);
2316 EXPORT_SYMBOL(scst_modisk_generic_parse);
2318 int scst_tape_generic_parse(struct scst_cmd *cmd,
2319 int (*get_block_size)(struct scst_cmd *cmd))
2326 * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
2327 * therefore change them only if necessary
2330 TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
2331 cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
2333 if (cmd->cdb[0] == READ_POSITION) {
2334 int tclp = cmd->cdb[1] & 4;
2335 int long_bit = cmd->cdb[1] & 2;
2336 int bt = cmd->cdb[1] & 1;
2338 if ((tclp == long_bit) && (!bt || !long_bit)) {
2340 tclp ? POSITION_LEN_LONG : POSITION_LEN_SHORT;
2341 cmd->data_direction = SCST_DATA_READ;
2344 cmd->data_direction = SCST_DATA_NONE;
2348 if (cmd->op_flags & SCST_TRANSFER_LEN_TYPE_FIXED & cmd->cdb[1])
2349 cmd->bufflen = cmd->bufflen * get_block_size(cmd);
2351 if ((cmd->op_flags & (SCST_SMALL_TIMEOUT | SCST_LONG_TIMEOUT)) == 0)
2352 cmd->timeout = SCST_GENERIC_TAPE_REG_TIMEOUT;
2353 else if (cmd->op_flags & SCST_SMALL_TIMEOUT)
2354 cmd->timeout = SCST_GENERIC_TAPE_SMALL_TIMEOUT;
2355 else if (cmd->op_flags & SCST_LONG_TIMEOUT)
2356 cmd->timeout = SCST_GENERIC_TAPE_LONG_TIMEOUT;
2358 TRACE_EXIT_RES(res);
2361 EXPORT_SYMBOL(scst_tape_generic_parse);
2363 static int scst_null_parse(struct scst_cmd *cmd)
2370 * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
2371 * therefore change them only if necessary
2374 TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
2375 cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
2377 switch (cmd->cdb[0]) {
2383 TRACE_DBG("res %d bufflen %d direct %d",
2384 res, cmd->bufflen, cmd->data_direction);
2390 int scst_changer_generic_parse(struct scst_cmd *cmd,
2391 int (*nothing)(struct scst_cmd *cmd))
2393 int res = scst_null_parse(cmd);
2395 if (cmd->op_flags & SCST_LONG_TIMEOUT)
2396 cmd->timeout = SCST_GENERIC_CHANGER_LONG_TIMEOUT;
2398 cmd->timeout = SCST_GENERIC_CHANGER_TIMEOUT;
2402 EXPORT_SYMBOL(scst_changer_generic_parse);
2404 int scst_processor_generic_parse(struct scst_cmd *cmd,
2405 int (*nothing)(struct scst_cmd *cmd))
2407 int res = scst_null_parse(cmd);
2409 if (cmd->op_flags & SCST_LONG_TIMEOUT)
2410 cmd->timeout = SCST_GENERIC_PROCESSOR_LONG_TIMEOUT;
2412 cmd->timeout = SCST_GENERIC_PROCESSOR_TIMEOUT;
2416 EXPORT_SYMBOL(scst_processor_generic_parse);
2418 int scst_raid_generic_parse(struct scst_cmd *cmd,
2419 int (*nothing)(struct scst_cmd *cmd))
2421 int res = scst_null_parse(cmd);
2423 if (cmd->op_flags & SCST_LONG_TIMEOUT)
2424 cmd->timeout = SCST_GENERIC_RAID_LONG_TIMEOUT;
2426 cmd->timeout = SCST_GENERIC_RAID_TIMEOUT;
2430 EXPORT_SYMBOL(scst_raid_generic_parse);
2432 int scst_block_generic_dev_done(struct scst_cmd *cmd,
2433 void (*set_block_shift)(struct scst_cmd *cmd, int block_shift))
2435 int opcode = cmd->cdb[0];
2436 int status = cmd->status;
2437 int res = SCST_CMD_STATE_DEFAULT;
2442 * SCST sets good defaults for cmd->is_send_status and
2443 * cmd->resp_data_len based on cmd->status and cmd->data_direction,
2444 * therefore change them only if necessary
2447 if ((status == SAM_STAT_GOOD) || (status == SAM_STAT_CONDITION_MET)) {
2451 /* Always keep track of disk capacity */
2452 int buffer_size, sector_size, sh;
2455 buffer_size = scst_get_buf_first(cmd, &buffer);
2456 if (unlikely(buffer_size <= 0)) {
2457 if (buffer_size < 0) {
2458 PRINT_ERROR("%s: Unable to get the"
2459 " buffer (%d)", __func__, buffer_size);
2465 ((buffer[4] << 24) | (buffer[5] << 16) |
2466 (buffer[6] << 8) | (buffer[7] << 0));
2467 scst_put_buf(cmd, buffer);
2468 if (sector_size != 0)
2469 sh = scst_calc_block_shift(sector_size);
2472 set_block_shift(cmd, sh);
2473 TRACE_DBG("block_shift %d", sh);
2482 TRACE_DBG("cmd->is_send_status=%x, cmd->resp_data_len=%d, "
2483 "res=%d", cmd->is_send_status, cmd->resp_data_len, res);
2486 TRACE_EXIT_RES(res);
2489 EXPORT_SYMBOL(scst_block_generic_dev_done);
2491 int scst_tape_generic_dev_done(struct scst_cmd *cmd,
2492 void (*set_block_size)(struct scst_cmd *cmd, int block_shift))
2494 int opcode = cmd->cdb[0];
2495 int res = SCST_CMD_STATE_DEFAULT;
2496 int buffer_size, bs;
2497 uint8_t *buffer = NULL;
2502 * SCST sets good defaults for cmd->is_send_status and
2503 * cmd->resp_data_len based on cmd->status and cmd->data_direction,
2504 * therefore change them only if necessary
2510 buffer_size = scst_get_buf_first(cmd, &buffer);
2511 if (unlikely(buffer_size <= 0)) {
2512 if (buffer_size < 0) {
2513 PRINT_ERROR("%s: Unable to get the buffer (%d)",
2514 __func__, buffer_size);
2523 TRACE_DBG("%s", "MODE_SENSE");
2524 if ((cmd->cdb[2] & 0xC0) == 0) {
2525 if (buffer[3] == 8) {
2526 bs = (buffer[9] << 16) |
2527 (buffer[10] << 8) | buffer[11];
2528 set_block_size(cmd, bs);
2533 TRACE_DBG("%s", "MODE_SELECT");
2534 if (buffer[3] == 8) {
2535 bs = (buffer[9] << 16) | (buffer[10] << 8) |
2537 set_block_size(cmd, bs);
2548 scst_put_buf(cmd, buffer);
2553 TRACE_EXIT_RES(res);
2556 EXPORT_SYMBOL(scst_tape_generic_dev_done);
2558 static void scst_check_internal_sense(struct scst_device *dev, int result,
2559 uint8_t *sense, int sense_len)
2563 if (host_byte(result) == DID_RESET) {
2564 TRACE(TRACE_MGMT_MINOR, "%s", "DID_RESET received, triggering "
2566 scst_set_sense(sense, sense_len,
2567 SCST_LOAD_SENSE(scst_sense_reset_UA));
2568 scst_dev_check_set_UA(dev, NULL, sense, sense_len);
2569 } else if ((status_byte(result) == CHECK_CONDITION) &&
2570 SCST_SENSE_VALID(sense) && scst_is_ua_sense(sense))
2571 scst_dev_check_set_UA(dev, NULL, sense, sense_len);
2577 int scst_obtain_device_parameters(struct scst_device *dev)
2581 uint8_t buffer[4+0x0A];
2582 uint8_t sense_buffer[SCST_SENSE_BUFFERSIZE];
2586 EXTRACHECKS_BUG_ON(dev->scsi_dev == NULL);
2588 for (i = 0; i < 5; i++) {
2589 /* Get control mode page */
2590 memset(cmd, 0, sizeof(cmd));
2591 cmd[0] = MODE_SENSE;
2592 cmd[1] = 8; /* DBD */
2594 cmd[4] = sizeof(buffer);
2596 memset(buffer, 0, sizeof(buffer));
2597 memset(sense_buffer, 0, sizeof(sense_buffer));
2599 TRACE(TRACE_SCSI, "%s", "Doing internal MODE_SENSE");
2600 res = scsi_execute(dev->scsi_dev, cmd, SCST_DATA_READ, buffer,
2601 sizeof(buffer), sense_buffer, 15, 0, 0);
2603 TRACE_DBG("MODE_SENSE done: %x", res);
2605 if (scsi_status_is_good(res)) {
2608 PRINT_BUFF_FLAG(TRACE_SCSI,
2609 "Returned control mode page data",
2610 buffer, sizeof(buffer));
2612 dev->tst = buffer[4+2] >> 5;
2613 q = buffer[4+3] >> 4;
2614 if (q > SCST_CONTR_MODE_QUEUE_ALG_UNRESTRICTED_REORDER) {
2615 PRINT_ERROR("Too big QUEUE ALG %x, dev "
2616 "%d:%d:%d:%d", dev->queue_alg,
2617 dev->scsi_dev->host->host_no,
2618 dev->scsi_dev->channel,
2619 dev->scsi_dev->id, dev->scsi_dev->lun);
2622 dev->swp = (buffer[4+4] & 0x8) >> 3;
2623 dev->tas = (buffer[4+5] & 0x40) >> 6;
2626 * Unfortunately, SCSI ML doesn't provide a way to
2627 * specify commands task attribute, so we can rely on
2628 * device's restricted reordering only.
2630 dev->has_own_order_mgmt = !dev->queue_alg;
2632 TRACE(TRACE_SCSI|TRACE_MGMT_MINOR,
2633 "Device %d:%d:%d:%d: TST %x, "
2634 "QUEUE ALG %x, SWP %x, TAS %x, "
2635 "has_own_order_mgmt %d",
2636 dev->scsi_dev->host->host_no,
2637 dev->scsi_dev->channel, dev->scsi_dev->id,
2638 dev->scsi_dev->lun, dev->tst, dev->queue_alg,
2639 dev->swp, dev->tas, dev->has_own_order_mgmt);
2644 if ((status_byte(res) == CHECK_CONDITION) &&
2647 * 3ware controller is buggy and returns CONDITION_GOOD
2648 * instead of CHECK_CONDITION
2652 SCST_SENSE_VALID(sense_buffer)) {
2653 if (sense_buffer[2] == ILLEGAL_REQUEST) {
2654 TRACE(TRACE_SCSI|TRACE_MGMT_MINOR,
2655 "Device %d:%d:%d:%d doesn't"
2656 " support control mode page,"
2657 " using defaults: TST %x,"
2658 " QUEUE ALG %x, SWP %x, TAS %x,"
2659 " has_own_order_mgmt %d",
2660 dev->scsi_dev->host->host_no,
2661 dev->scsi_dev->channel,
2668 dev->has_own_order_mgmt);
2671 } else if (sense_buffer[2] == NOT_READY) {
2673 "Device %d:%d:%d:%d not ready",
2674 dev->scsi_dev->host->host_no,
2675 dev->scsi_dev->channel,
2677 dev->scsi_dev->lun);
2682 TRACE(TRACE_SCSI|TRACE_MGMT_MINOR,
2683 "Internal MODE SENSE to "
2684 "device %d:%d:%d:%d failed: %x",
2685 dev->scsi_dev->host->host_no,
2686 dev->scsi_dev->channel,
2688 dev->scsi_dev->lun, res);
2689 PRINT_BUFF_FLAG(TRACE_SCSI|TRACE_MGMT_MINOR,
2691 sense_buffer, sizeof(sense_buffer));
2693 scst_check_internal_sense(dev, res, sense_buffer,
2694 sizeof(sense_buffer));
2700 TRACE_EXIT_RES(res);
2703 EXPORT_SYMBOL(scst_obtain_device_parameters);
2705 /* Called under dev_lock and BH off */
2706 void scst_process_reset(struct scst_device *dev,
2707 struct scst_session *originator, struct scst_cmd *exclude_cmd,
2708 struct scst_mgmt_cmd *mcmd, bool setUA)
2710 struct scst_tgt_dev *tgt_dev;
2711 struct scst_cmd *cmd, *tcmd;
2715 /* Clear RESERVE'ation, if necessary */
2716 if (dev->dev_reserved) {
2717 list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
2718 dev_tgt_dev_list_entry) {
2719 TRACE(TRACE_MGMT_MINOR, "Clearing RESERVE'ation for "
2721 (long long unsigned int)tgt_dev->lun);
2722 clear_bit(SCST_TGT_DEV_RESERVED,
2723 &tgt_dev->tgt_dev_flags);
2725 dev->dev_reserved = 0;
2727 * There is no need to send RELEASE, since the device is going
2728 * to be resetted. Actually, since we can be in RESET TM
2729 * function, it might be dangerous.
2733 dev->dev_double_ua_possible = 1;
2735 list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
2736 dev_tgt_dev_list_entry) {
2737 struct scst_session *sess = tgt_dev->sess;
2739 spin_lock_bh(&tgt_dev->tgt_dev_lock);
2740 scst_free_all_UA(tgt_dev);
2741 spin_unlock_bh(&tgt_dev->tgt_dev_lock);
2743 spin_lock_irq(&sess->sess_list_lock);
2745 TRACE_DBG("Searching in search cmd list (sess=%p)", sess);
2746 list_for_each_entry(cmd, &sess->search_cmd_list,
2747 search_cmd_list_entry) {
2748 if (cmd == exclude_cmd)
2750 if ((cmd->tgt_dev == tgt_dev) ||
2751 ((cmd->tgt_dev == NULL) &&
2752 (cmd->lun == tgt_dev->lun))) {
2753 scst_abort_cmd(cmd, mcmd,
2754 (tgt_dev->sess != originator), 0);
2757 spin_unlock_irq(&sess->sess_list_lock);
2760 list_for_each_entry_safe(cmd, tcmd, &dev->blocked_cmd_list,
2761 blocked_cmd_list_entry) {
2762 if (test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)) {
2763 list_del(&cmd->blocked_cmd_list_entry);
2764 TRACE_MGMT_DBG("Adding aborted blocked cmd %p "
2765 "to active cmd list", cmd);
2766 spin_lock_irq(&cmd->cmd_lists->cmd_list_lock);
2767 list_add_tail(&cmd->cmd_list_entry,
2768 &cmd->cmd_lists->active_cmd_list);
2769 wake_up(&cmd->cmd_lists->cmd_list_waitQ);
2770 spin_unlock_irq(&cmd->cmd_lists->cmd_list_lock);
2775 /* BH already off */
2776 spin_lock(&scst_temp_UA_lock);
2777 scst_set_sense(scst_temp_UA, sizeof(scst_temp_UA),
2778 SCST_LOAD_SENSE(scst_sense_reset_UA));
2779 scst_dev_check_set_local_UA(dev, exclude_cmd, scst_temp_UA,
2780 sizeof(scst_temp_UA));
2781 spin_unlock(&scst_temp_UA_lock);
2788 int scst_set_pending_UA(struct scst_cmd *cmd)
2791 struct scst_tgt_dev_UA *UA_entry;
2795 TRACE(TRACE_MGMT_MINOR, "Setting pending UA cmd %p", cmd);
2797 spin_lock_bh(&cmd->tgt_dev->tgt_dev_lock);
2799 /* UA list could be cleared behind us, so retest */
2800 if (list_empty(&cmd->tgt_dev->UA_list)) {
2802 "SCST_TGT_DEV_UA_PENDING set, but UA_list empty");
2807 UA_entry = list_entry(cmd->tgt_dev->UA_list.next, typeof(*UA_entry),
2810 TRACE_DBG("next %p UA_entry %p",
2811 cmd->tgt_dev->UA_list.next, UA_entry);
2813 scst_set_cmd_error_sense(cmd, UA_entry->UA_sense_buffer,
2814 sizeof(UA_entry->UA_sense_buffer));
2818 list_del(&UA_entry->UA_list_entry);
2820 mempool_free(UA_entry, scst_ua_mempool);
2822 if (list_empty(&cmd->tgt_dev->UA_list)) {
2823 clear_bit(SCST_TGT_DEV_UA_PENDING,
2824 &cmd->tgt_dev->tgt_dev_flags);
2827 spin_unlock_bh(&cmd->tgt_dev->tgt_dev_lock);
2830 TRACE_EXIT_RES(res);
2834 spin_unlock_bh(&cmd->tgt_dev->tgt_dev_lock);
2838 /* Called under tgt_dev_lock and BH off */
2839 static void scst_alloc_set_UA(struct scst_tgt_dev *tgt_dev,
2840 const uint8_t *sense, int sense_len, int head)
2842 struct scst_tgt_dev_UA *UA_entry = NULL;
2846 UA_entry = mempool_alloc(scst_ua_mempool, GFP_ATOMIC);
2847 if (UA_entry == NULL) {
2848 PRINT_CRIT_ERROR("%s", "UNIT ATTENTION memory "
2849 "allocation failed. The UNIT ATTENTION "
2850 "on some sessions will be missed");
2851 PRINT_BUFFER("Lost UA", sense, sense_len);
2854 memset(UA_entry, 0, sizeof(*UA_entry));
2856 if (sense_len > (int)sizeof(UA_entry->UA_sense_buffer))
2857 sense_len = sizeof(UA_entry->UA_sense_buffer);
2858 memcpy(UA_entry->UA_sense_buffer, sense, sense_len);
2860 set_bit(SCST_TGT_DEV_UA_PENDING, &tgt_dev->tgt_dev_flags);
2862 TRACE_MGMT_DBG("Adding new UA to tgt_dev %p", tgt_dev);
2865 list_add(&UA_entry->UA_list_entry, &tgt_dev->UA_list);
2867 list_add_tail(&UA_entry->UA_list_entry, &tgt_dev->UA_list);
2874 void scst_check_set_UA(struct scst_tgt_dev *tgt_dev,
2875 const uint8_t *sense, int sense_len, int head)
2878 struct scst_tgt_dev_UA *UA_entry_tmp;
2882 spin_lock_bh(&tgt_dev->tgt_dev_lock);
2884 list_for_each_entry(UA_entry_tmp, &tgt_dev->UA_list,
2886 if (memcmp(sense, UA_entry_tmp->UA_sense_buffer,
2888 TRACE_MGMT_DBG("%s", "UA already exists");
2895 scst_alloc_set_UA(tgt_dev, sense, sense_len, head);
2897 spin_unlock_bh(&tgt_dev->tgt_dev_lock);
2903 /* Called under dev_lock and BH off */
2904 void scst_dev_check_set_local_UA(struct scst_device *dev,
2905 struct scst_cmd *exclude, const uint8_t *sense, int sense_len)
2907 struct scst_tgt_dev *tgt_dev, *exclude_tgt_dev = NULL;
2911 if (exclude != NULL)
2912 exclude_tgt_dev = exclude->tgt_dev;
2914 list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
2915 dev_tgt_dev_list_entry) {
2916 if (tgt_dev != exclude_tgt_dev)
2917 scst_check_set_UA(tgt_dev, sense, sense_len, 0);
2924 /* Called under dev_lock and BH off */
2925 void __scst_dev_check_set_UA(struct scst_device *dev,
2926 struct scst_cmd *exclude, const uint8_t *sense, int sense_len)
2930 TRACE(TRACE_MGMT_MINOR, "Processing UA dev %p", dev);
2932 /* Check for reset UA */
2933 if (sense[12] == SCST_SENSE_ASC_UA_RESET)
2934 scst_process_reset(dev,
2935 (exclude != NULL) ? exclude->sess : NULL,
2936 exclude, NULL, false);
2938 scst_dev_check_set_local_UA(dev, exclude, sense, sense_len);
2944 /* Called under tgt_dev_lock or when tgt_dev is unused */
2945 static void scst_free_all_UA(struct scst_tgt_dev *tgt_dev)
2947 struct scst_tgt_dev_UA *UA_entry, *t;
2951 list_for_each_entry_safe(UA_entry, t,
2952 &tgt_dev->UA_list, UA_list_entry) {
2953 TRACE_MGMT_DBG("Clearing UA for tgt_dev lun %lld",
2954 (long long unsigned int)tgt_dev->lun);
2955 list_del(&UA_entry->UA_list_entry);
2958 INIT_LIST_HEAD(&tgt_dev->UA_list);
2959 clear_bit(SCST_TGT_DEV_UA_PENDING, &tgt_dev->tgt_dev_flags);
2966 struct scst_cmd *__scst_check_deferred_commands(struct scst_tgt_dev *tgt_dev)
2968 struct scst_cmd *res = NULL, *cmd, *t;
2969 typeof(tgt_dev->expected_sn) expected_sn = tgt_dev->expected_sn;
2971 spin_lock_irq(&tgt_dev->sn_lock);
2973 if (unlikely(tgt_dev->hq_cmd_count != 0))
2977 list_for_each_entry_safe(cmd, t, &tgt_dev->deferred_cmd_list,
2978 sn_cmd_list_entry) {
2979 EXTRACHECKS_BUG_ON(cmd->queue_type ==
2980 SCST_CMD_QUEUE_HEAD_OF_QUEUE);
2981 if (cmd->sn == expected_sn) {
2982 TRACE_SN("Deferred command %p (sn %ld, set %d) found",
2983 cmd, cmd->sn, cmd->sn_set);
2984 tgt_dev->def_cmd_count--;
2985 list_del(&cmd->sn_cmd_list_entry);
2989 spin_lock(&cmd->cmd_lists->cmd_list_lock);
2990 TRACE_SN("Adding cmd %p to active cmd list",
2992 list_add_tail(&cmd->cmd_list_entry,
2993 &cmd->cmd_lists->active_cmd_list);
2994 wake_up(&cmd->cmd_lists->cmd_list_waitQ);
2995 spin_unlock(&cmd->cmd_lists->cmd_list_lock);
3002 list_for_each_entry(cmd, &tgt_dev->skipped_sn_list,
3003 sn_cmd_list_entry) {
3004 EXTRACHECKS_BUG_ON(cmd->queue_type ==
3005 SCST_CMD_QUEUE_HEAD_OF_QUEUE);
3006 if (cmd->sn == expected_sn) {
3007 atomic_t *slot = cmd->sn_slot;
3009 * !! At this point any pointer in cmd, except !!
3010 * !! sn_slot and sn_cmd_list_entry, could be !!
3011 * !! already destroyed !!
3013 TRACE_SN("cmd %p (tag %llu) with skipped sn %ld found",
3015 (long long unsigned int)cmd->tag,
3017 tgt_dev->def_cmd_count--;
3018 list_del(&cmd->sn_cmd_list_entry);
3019 spin_unlock_irq(&tgt_dev->sn_lock);
3020 if (test_and_set_bit(SCST_CMD_CAN_BE_DESTROYED,
3022 scst_destroy_put_cmd(cmd);
3023 scst_inc_expected_sn(tgt_dev, slot);
3024 expected_sn = tgt_dev->expected_sn;
3025 spin_lock_irq(&tgt_dev->sn_lock);
3031 spin_unlock_irq(&tgt_dev->sn_lock);
3035 void scst_add_thr_data(struct scst_tgt_dev *tgt_dev,
3036 struct scst_thr_data_hdr *data,
3037 void (*free_fn) (struct scst_thr_data_hdr *data))
3039 data->owner_thr = current;
3040 atomic_set(&data->ref, 1);
3041 EXTRACHECKS_BUG_ON(free_fn == NULL);
3042 data->free_fn = free_fn;
3043 spin_lock(&tgt_dev->thr_data_lock);
3044 list_add_tail(&data->thr_data_list_entry, &tgt_dev->thr_data_list);
3045 spin_unlock(&tgt_dev->thr_data_lock);
3047 EXPORT_SYMBOL(scst_add_thr_data);
3049 void scst_del_all_thr_data(struct scst_tgt_dev *tgt_dev)
3051 spin_lock(&tgt_dev->thr_data_lock);
3052 while (!list_empty(&tgt_dev->thr_data_list)) {
3053 struct scst_thr_data_hdr *d = list_entry(
3054 tgt_dev->thr_data_list.next, typeof(*d),
3055 thr_data_list_entry);
3056 list_del(&d->thr_data_list_entry);
3057 spin_unlock(&tgt_dev->thr_data_lock);
3058 scst_thr_data_put(d);
3059 spin_lock(&tgt_dev->thr_data_lock);
3061 spin_unlock(&tgt_dev->thr_data_lock);
3064 EXPORT_SYMBOL(scst_del_all_thr_data);
3066 void scst_dev_del_all_thr_data(struct scst_device *dev)
3068 struct scst_tgt_dev *tgt_dev;
3072 mutex_lock(&scst_mutex);
3074 list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
3075 dev_tgt_dev_list_entry) {
3076 scst_del_all_thr_data(tgt_dev);
3079 mutex_unlock(&scst_mutex);
3084 EXPORT_SYMBOL(scst_dev_del_all_thr_data);
3086 struct scst_thr_data_hdr *__scst_find_thr_data(struct scst_tgt_dev *tgt_dev,
3087 struct task_struct *tsk)
3089 struct scst_thr_data_hdr *res = NULL, *d;
3091 spin_lock(&tgt_dev->thr_data_lock);
3092 list_for_each_entry(d, &tgt_dev->thr_data_list, thr_data_list_entry) {
3093 if (d->owner_thr == tsk) {
3095 scst_thr_data_get(res);
3099 spin_unlock(&tgt_dev->thr_data_lock);
3102 EXPORT_SYMBOL(__scst_find_thr_data);
3104 /* dev_lock supposed to be held and BH disabled */
3105 void __scst_block_dev(struct scst_device *dev)
3108 TRACE_MGMT_DBG("Device BLOCK(new %d), dev %p", dev->block_count, dev);
3112 static void scst_block_dev(struct scst_device *dev, int outstanding)
3114 spin_lock_bh(&dev->dev_lock);
3115 __scst_block_dev(dev);
3116 spin_unlock_bh(&dev->dev_lock);
3119 * Memory barrier is necessary here, because we need to read
3120 * on_dev_count in wait_event() below after we increased block_count.
3121 * Otherwise, we can miss wake up in scst_dec_on_dev_cmd().
3122 * We use the explicit barrier, because spin_unlock_bh() doesn't
3123 * provide the necessary memory barrier functionality.
3127 TRACE_MGMT_DBG("Waiting during blocking outstanding %d (on_dev_count "
3128 "%d)", outstanding, atomic_read(&dev->on_dev_count));
3129 wait_event(dev->on_dev_waitQ,
3130 atomic_read(&dev->on_dev_count) <= outstanding);
3131 TRACE_MGMT_DBG("%s", "wait_event() returned");
3135 void scst_block_dev_cmd(struct scst_cmd *cmd, int outstanding)
3137 sBUG_ON(cmd->needs_unblocking);
3139 cmd->needs_unblocking = 1;
3140 TRACE_MGMT_DBG("Needs unblocking cmd %p (tag %llu)",
3141 cmd, (long long unsigned int)cmd->tag);
3143 scst_block_dev(cmd->dev, outstanding);
3147 void scst_unblock_dev(struct scst_device *dev)
3149 spin_lock_bh(&dev->dev_lock);
3150 TRACE_MGMT_DBG("Device UNBLOCK(new %d), dev %p",
3151 dev->block_count-1, dev);
3152 if (--dev->block_count == 0)
3153 scst_unblock_cmds(dev);
3154 spin_unlock_bh(&dev->dev_lock);
3155 sBUG_ON(dev->block_count < 0);
3159 void scst_unblock_dev_cmd(struct scst_cmd *cmd)
3161 scst_unblock_dev(cmd->dev);
3162 cmd->needs_unblocking = 0;
3166 int scst_inc_on_dev_cmd(struct scst_cmd *cmd)
3169 struct scst_device *dev = cmd->dev;
3173 sBUG_ON(cmd->inc_blocking || cmd->dec_on_dev_needed);
3175 atomic_inc(&dev->on_dev_count);
3176 cmd->dec_on_dev_needed = 1;
3177 TRACE_DBG("New on_dev_count %d", atomic_read(&dev->on_dev_count));
3179 if (unlikely(cmd->internal) && (cmd->cdb[0] == REQUEST_SENSE)) {
3181 * The original command can already block the device, so
3182 * REQUEST SENSE command should always pass.
3187 #ifdef CONFIG_SCST_STRICT_SERIALIZING
3188 spin_lock_bh(&dev->dev_lock);
3189 if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)))
3191 if (dev->block_count > 0) {
3192 scst_dec_on_dev_cmd(cmd);
3193 TRACE_MGMT_DBG("Delaying cmd %p due to blocking or strict "
3194 "serializing (tag %llu, dev %p)", cmd, cmd->tag, dev);
3195 list_add_tail(&cmd->blocked_cmd_list_entry,
3196 &dev->blocked_cmd_list);
3199 __scst_block_dev(dev);
3200 cmd->inc_blocking = 1;
3202 spin_unlock_bh(&dev->dev_lock);
3206 if (unlikely(dev->block_count > 0)) {
3207 spin_lock_bh(&dev->dev_lock);
3208 if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)))
3210 if (dev->block_count > 0) {
3211 scst_dec_on_dev_cmd(cmd);
3212 TRACE_MGMT_DBG("Delaying cmd %p due to blocking "
3213 "(tag %llu, dev %p)", cmd,
3214 (long long unsigned int)cmd->tag, dev);
3215 list_add_tail(&cmd->blocked_cmd_list_entry,
3216 &dev->blocked_cmd_list);
3218 spin_unlock_bh(&dev->dev_lock);
3221 TRACE_MGMT_DBG("%s", "Somebody unblocked the device, "
3224 spin_unlock_bh(&dev->dev_lock);
3226 if (unlikely(dev->dev_double_ua_possible)) {
3227 spin_lock_bh(&dev->dev_lock);
3228 if (dev->block_count == 0) {
3229 TRACE_MGMT_DBG("cmd %p (tag %llu), blocking further "
3230 "cmds due to possible double reset UA (dev %p)",
3231 cmd, (long long unsigned int)cmd->tag, dev);
3232 __scst_block_dev(dev);
3233 cmd->inc_blocking = 1;
3235 spin_unlock_bh(&dev->dev_lock);
3236 TRACE_MGMT_DBG("Somebody blocked the device, "
3237 "repeating (count %d)", dev->block_count);
3240 spin_unlock_bh(&dev->dev_lock);
3245 TRACE_EXIT_RES(res);
3249 spin_unlock_bh(&dev->dev_lock);
3253 /* Called under dev_lock */
3254 static void scst_unblock_cmds(struct scst_device *dev)
3256 #ifdef CONFIG_SCST_STRICT_SERIALIZING
3257 struct scst_cmd *cmd, *t;
3258 unsigned long flags;
3262 local_irq_save(flags);
3263 list_for_each_entry_safe(cmd, t, &dev->blocked_cmd_list,
3264 blocked_cmd_list_entry) {
3267 * Since only one cmd per time is being executed, expected_sn
3268 * can't change behind us, if the corresponding cmd is in
3269 * blocked_cmd_list, but we could be called before
3270 * scst_inc_expected_sn().
3272 * For HQ commands SN is not set.
3274 if (likely(!cmd->internal && cmd->sn_set)) {
3275 typeof(cmd->tgt_dev->expected_sn) expected_sn;
3276 if (cmd->tgt_dev == NULL)
3278 expected_sn = cmd->tgt_dev->expected_sn;
3279 if (cmd->sn == expected_sn)
3281 else if (cmd->sn != (expected_sn+1))
3285 list_del(&cmd->blocked_cmd_list_entry);
3286 TRACE_MGMT_DBG("Adding cmd %p to head of active cmd list", cmd);
3287 spin_lock(&cmd->cmd_lists->cmd_list_lock);
3288 list_add(&cmd->cmd_list_entry,
3289 &cmd->cmd_lists->active_cmd_list);
3290 wake_up(&cmd->cmd_lists->cmd_list_waitQ);
3291 spin_unlock(&cmd->cmd_lists->cmd_list_lock);
3295 local_irq_restore(flags);
3296 #else /* CONFIG_SCST_STRICT_SERIALIZING */
3297 struct scst_cmd *cmd, *tcmd;
3298 unsigned long flags;
3302 local_irq_save(flags);
3303 list_for_each_entry_safe(cmd, tcmd, &dev->blocked_cmd_list,
3304 blocked_cmd_list_entry) {
3305 list_del(&cmd->blocked_cmd_list_entry);
3306 TRACE_MGMT_DBG("Adding blocked cmd %p to active cmd list", cmd);
3307 spin_lock(&cmd->cmd_lists->cmd_list_lock);
3308 if (unlikely(cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE))
3309 list_add(&cmd->cmd_list_entry,
3310 &cmd->cmd_lists->active_cmd_list);
3312 list_add_tail(&cmd->cmd_list_entry,
3313 &cmd->cmd_lists->active_cmd_list);
3314 wake_up(&cmd->cmd_lists->cmd_list_waitQ);
3315 spin_unlock(&cmd->cmd_lists->cmd_list_lock);
3317 local_irq_restore(flags);
3318 #endif /* CONFIG_SCST_STRICT_SERIALIZING */
3324 static void __scst_unblock_deferred(struct scst_tgt_dev *tgt_dev,
3325 struct scst_cmd *out_of_sn_cmd)
3327 EXTRACHECKS_BUG_ON(!out_of_sn_cmd->sn_set);
3329 if (out_of_sn_cmd->sn == tgt_dev->expected_sn) {
3330 scst_inc_expected_sn(tgt_dev, out_of_sn_cmd->sn_slot);
3331 scst_make_deferred_commands_active(tgt_dev);
3333 out_of_sn_cmd->out_of_sn = 1;
3334 spin_lock_irq(&tgt_dev->sn_lock);
3335 tgt_dev->def_cmd_count++;
3336 list_add_tail(&out_of_sn_cmd->sn_cmd_list_entry,
3337 &tgt_dev->skipped_sn_list);
3338 TRACE_SN("out_of_sn_cmd %p with sn %ld added to skipped_sn_list"
3339 " (expected_sn %ld)", out_of_sn_cmd, out_of_sn_cmd->sn,
3340 tgt_dev->expected_sn);
3341 spin_unlock_irq(&tgt_dev->sn_lock);
3347 void scst_unblock_deferred(struct scst_tgt_dev *tgt_dev,
3348 struct scst_cmd *out_of_sn_cmd)
3352 if (!out_of_sn_cmd->sn_set) {
3353 TRACE_SN("cmd %p without sn", out_of_sn_cmd);
3357 __scst_unblock_deferred(tgt_dev, out_of_sn_cmd);
3364 void scst_on_hq_cmd_response(struct scst_cmd *cmd)
3366 struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
3370 if (!cmd->hq_cmd_inced)
3373 spin_lock_irq(&tgt_dev->sn_lock);
3374 tgt_dev->hq_cmd_count--;
3375 spin_unlock_irq(&tgt_dev->sn_lock);
3377 EXTRACHECKS_BUG_ON(tgt_dev->hq_cmd_count < 0);
3380 * There is no problem in checking hq_cmd_count in the
3381 * non-locked state. In the worst case we will only have
3382 * unneeded run of the deferred commands.
3384 if (tgt_dev->hq_cmd_count == 0)
3385 scst_make_deferred_commands_active(tgt_dev);
3392 void scst_xmit_process_aborted_cmd(struct scst_cmd *cmd)
3396 TRACE_MGMT_DBG("Aborted cmd %p done (cmd_ref %d, "
3397 "scst_cmd_count %d)", cmd, atomic_read(&cmd->cmd_ref),
3398 atomic_read(&scst_cmd_count));
3400 scst_done_cmd_mgmt(cmd);
3402 if (test_bit(SCST_CMD_ABORTED_OTHER, &cmd->cmd_flags)) {
3403 if (cmd->completed) {
3404 /* It's completed and it's OK to return its result */
3408 if (cmd->dev->tas) {
3409 TRACE_MGMT_DBG("Flag ABORTED OTHER set for cmd %p "
3410 "(tag %llu), returning TASK ABORTED ", cmd,
3411 (long long unsigned int)cmd->tag);
3412 scst_set_cmd_error_status(cmd, SAM_STAT_TASK_ABORTED);
3414 TRACE_MGMT_DBG("Flag ABORTED OTHER set for cmd %p "
3415 "(tag %llu), aborting without delivery or "
3417 cmd, (long long unsigned int)cmd->tag);
3419 * There is no need to check/requeue possible UA,
3420 * because, if it exists, it will be delivered
3421 * by the "completed" branch above.
3423 clear_bit(SCST_CMD_ABORTED_OTHER, &cmd->cmd_flags);
3432 void __init scst_scsi_op_list_init(void)
3439 for (i = 0; i < 256; i++)
3440 scst_scsi_op_list[i] = SCST_CDB_TBL_SIZE;
3442 for (i = 0; i < SCST_CDB_TBL_SIZE; i++) {
3443 if (scst_scsi_op_table[i].ops != op) {
3444 op = scst_scsi_op_table[i].ops;
3445 scst_scsi_op_list[op] = i;
3453 #ifdef CONFIG_SCST_DEBUG
3454 /* Original taken from the XFS code */
3455 unsigned long scst_random(void)
3458 static unsigned long RandomValue;
3459 static DEFINE_SPINLOCK(lock);
3460 /* cycles pseudo-randomly through all values between 1 and 2^31 - 2 */
3464 unsigned long flags;
3466 spin_lock_irqsave(&lock, flags);
3468 RandomValue = jiffies;
3474 rv = 16807 * lo - 2836 * hi;
3478 spin_unlock_irqrestore(&lock, flags);
3481 EXPORT_SYMBOL(scst_random);
3484 #ifdef CONFIG_SCST_DEBUG_TM
3486 #define TM_DBG_STATE_ABORT 0
3487 #define TM_DBG_STATE_RESET 1
3488 #define TM_DBG_STATE_OFFLINE 2
3490 #define INIT_TM_DBG_STATE TM_DBG_STATE_ABORT
3492 static void tm_dbg_timer_fn(unsigned long arg);
3494 static DEFINE_SPINLOCK(scst_tm_dbg_lock);
3495 /* All serialized by scst_tm_dbg_lock */
3497 unsigned int tm_dbg_release:1;
3498 unsigned int tm_dbg_blocked:1;
3500 static LIST_HEAD(tm_dbg_delayed_cmd_list);
3501 static int tm_dbg_delayed_cmds_count;
3502 static int tm_dbg_passed_cmds_count;
3503 static int tm_dbg_state;
3504 static int tm_dbg_on_state_passes;
3505 static DEFINE_TIMER(tm_dbg_timer, tm_dbg_timer_fn, 0, 0);
3506 static wait_queue_head_t *tm_dbg_p_cmd_list_waitQ;
3508 static const int tm_dbg_on_state_num_passes[] = { 5, 1, 0x7ffffff };
3510 static void tm_dbg_init_tgt_dev(struct scst_tgt_dev *tgt_dev,
3511 struct scst_acg_dev *acg_dev)
3513 if ((acg_dev->acg == scst_default_acg) && (acg_dev->lun == 0)) {
3514 unsigned long flags;
3515 /* Do TM debugging only for LUN 0 */
3516 spin_lock_irqsave(&scst_tm_dbg_lock, flags);
3517 tm_dbg_p_cmd_list_waitQ =
3518 &tgt_dev->dev->p_cmd_lists->cmd_list_waitQ;
3519 tm_dbg_state = INIT_TM_DBG_STATE;
3520 tm_dbg_on_state_passes =
3521 tm_dbg_on_state_num_passes[tm_dbg_state];
3522 __set_bit(SCST_TGT_DEV_UNDER_TM_DBG, &tgt_dev->tgt_dev_flags);
3523 PRINT_INFO("LUN 0 connected from initiator %s is under "
3524 "TM debugging", tgt_dev->sess->tgt->tgtt->name);
3525 spin_unlock_irqrestore(&scst_tm_dbg_lock, flags);
3529 static void tm_dbg_deinit_tgt_dev(struct scst_tgt_dev *tgt_dev)
3531 if (test_bit(SCST_TGT_DEV_UNDER_TM_DBG, &tgt_dev->tgt_dev_flags)) {
3532 unsigned long flags;
3533 del_timer_sync(&tm_dbg_timer);
3534 spin_lock_irqsave(&scst_tm_dbg_lock, flags);
3535 tm_dbg_p_cmd_list_waitQ = NULL;
3536 spin_unlock_irqrestore(&scst_tm_dbg_lock, flags);
3540 static void tm_dbg_timer_fn(unsigned long arg)
3542 TRACE_MGMT_DBG("%s", "delayed cmd timer expired");
3543 tm_dbg_flags.tm_dbg_release = 1;
3544 /* Used to make sure that all woken up threads see the new value */
3546 wake_up_all(tm_dbg_p_cmd_list_waitQ);
3549 /* Called under scst_tm_dbg_lock and IRQs off */
3550 static void tm_dbg_delay_cmd(struct scst_cmd *cmd)
3552 switch (tm_dbg_state) {
3553 case TM_DBG_STATE_ABORT:
3554 if (tm_dbg_delayed_cmds_count == 0) {
3555 unsigned long d = 58*HZ + (scst_random() % (4*HZ));
3556 TRACE_MGMT_DBG("STATE ABORT: delaying cmd %p (tag %llu)"
3557 " for %ld.%ld seconds (%ld HZ), "
3558 "tm_dbg_on_state_passes=%d", cmd, cmd->tag,
3559 d/HZ, (d%HZ)*100/HZ, d, tm_dbg_on_state_passes);
3560 mod_timer(&tm_dbg_timer, jiffies + d);
3562 tm_dbg_flags.tm_dbg_blocked = 1;
3565 TRACE_MGMT_DBG("Delaying another timed cmd %p "
3566 "(tag %llu), delayed_cmds_count=%d, "
3567 "tm_dbg_on_state_passes=%d", cmd, cmd->tag,
3568 tm_dbg_delayed_cmds_count,
3569 tm_dbg_on_state_passes);
3570 if (tm_dbg_delayed_cmds_count == 2)
3571 tm_dbg_flags.tm_dbg_blocked = 0;
3575 case TM_DBG_STATE_RESET:
3576 case TM_DBG_STATE_OFFLINE:
3577 TRACE_MGMT_DBG("STATE RESET/OFFLINE: delaying cmd %p "
3578 "(tag %llu), delayed_cmds_count=%d, "
3579 "tm_dbg_on_state_passes=%d", cmd, cmd->tag,
3580 tm_dbg_delayed_cmds_count, tm_dbg_on_state_passes);
3581 tm_dbg_flags.tm_dbg_blocked = 1;
3587 /* IRQs already off */
3588 spin_lock(&cmd->cmd_lists->cmd_list_lock);
3589 list_add_tail(&cmd->cmd_list_entry, &tm_dbg_delayed_cmd_list);
3590 spin_unlock(&cmd->cmd_lists->cmd_list_lock);
3591 cmd->tm_dbg_delayed = 1;
3592 tm_dbg_delayed_cmds_count++;
3597 void tm_dbg_check_released_cmds(void)
3599 if (tm_dbg_flags.tm_dbg_release) {
3600 struct scst_cmd *cmd, *tc;
3601 spin_lock_irq(&scst_tm_dbg_lock);
3602 list_for_each_entry_safe_reverse(cmd, tc,
3603 &tm_dbg_delayed_cmd_list, cmd_list_entry) {
3604 TRACE_MGMT_DBG("Releasing timed cmd %p (tag %llu), "
3605 "delayed_cmds_count=%d", cmd, cmd->tag,
3606 tm_dbg_delayed_cmds_count);
3607 spin_lock(&cmd->cmd_lists->cmd_list_lock);
3608 list_move(&cmd->cmd_list_entry,
3609 &cmd->cmd_lists->active_cmd_list);
3610 spin_unlock(&cmd->cmd_lists->cmd_list_lock);
3612 tm_dbg_flags.tm_dbg_release = 0;
3613 spin_unlock_irq(&scst_tm_dbg_lock);
3617 /* Called under scst_tm_dbg_lock */
3618 static void tm_dbg_change_state(void)
3620 tm_dbg_flags.tm_dbg_blocked = 0;
3621 if (--tm_dbg_on_state_passes == 0) {
3622 switch (tm_dbg_state) {
3623 case TM_DBG_STATE_ABORT:
3624 TRACE_MGMT_DBG("%s", "Changing "
3625 "tm_dbg_state to RESET");
3628 tm_dbg_flags.tm_dbg_blocked = 0;
3630 case TM_DBG_STATE_RESET:
3631 case TM_DBG_STATE_OFFLINE:
3632 #ifdef CONFIG_SCST_TM_DBG_GO_OFFLINE
3633 TRACE_MGMT_DBG("%s", "Changing "
3634 "tm_dbg_state to OFFLINE");
3636 TM_DBG_STATE_OFFLINE;
3638 TRACE_MGMT_DBG("%s", "Changing "
3639 "tm_dbg_state to ABORT");
3647 tm_dbg_on_state_passes =
3648 tm_dbg_on_state_num_passes[tm_dbg_state];
3651 TRACE_MGMT_DBG("%s", "Deleting timer");
3652 del_timer(&tm_dbg_timer);
3656 int tm_dbg_check_cmd(struct scst_cmd *cmd)
3659 unsigned long flags;
3661 if (cmd->tm_dbg_immut)
3664 if (cmd->tm_dbg_delayed) {
3665 spin_lock_irqsave(&scst_tm_dbg_lock, flags);
3666 TRACE_MGMT_DBG("Processing delayed cmd %p (tag %llu), "
3667 "delayed_cmds_count=%d", cmd, cmd->tag,
3668 tm_dbg_delayed_cmds_count);
3670 cmd->tm_dbg_immut = 1;
3671 tm_dbg_delayed_cmds_count--;
3672 if ((tm_dbg_delayed_cmds_count == 0) &&
3673 (tm_dbg_state == TM_DBG_STATE_ABORT))
3674 tm_dbg_change_state();
3675 spin_unlock_irqrestore(&scst_tm_dbg_lock, flags);
3676 } else if (cmd->tgt_dev && test_bit(SCST_TGT_DEV_UNDER_TM_DBG,
3677 &cmd->tgt_dev->tgt_dev_flags)) {
3678 /* Delay 50th command */
3679 spin_lock_irqsave(&scst_tm_dbg_lock, flags);
3680 if (tm_dbg_flags.tm_dbg_blocked ||
3681 (++tm_dbg_passed_cmds_count % 50) == 0) {
3682 tm_dbg_delay_cmd(cmd);
3685 cmd->tm_dbg_immut = 1;
3686 spin_unlock_irqrestore(&scst_tm_dbg_lock, flags);
3694 void tm_dbg_release_cmd(struct scst_cmd *cmd)
3697 unsigned long flags;
3699 spin_lock_irqsave(&scst_tm_dbg_lock, flags);
3700 list_for_each_entry(c, &tm_dbg_delayed_cmd_list,
3703 TRACE_MGMT_DBG("Abort request for "
3704 "delayed cmd %p (tag=%llu), moving it to "
3705 "active cmd list (delayed_cmds_count=%d)",
3706 c, c->tag, tm_dbg_delayed_cmds_count);
3708 if (!test_bit(SCST_CMD_ABORTED_OTHER,
3710 /* Test how completed commands handled */
3711 if (((scst_random() % 10) == 5)) {
3712 scst_set_cmd_error(cmd,
3714 scst_sense_hardw_error));
3715 /* It's completed now */
3719 spin_lock(&cmd->cmd_lists->cmd_list_lock);
3720 list_move(&c->cmd_list_entry,
3721 &c->cmd_lists->active_cmd_list);
3722 wake_up(&c->cmd_lists->cmd_list_waitQ);
3723 spin_unlock(&cmd->cmd_lists->cmd_list_lock);
3727 spin_unlock_irqrestore(&scst_tm_dbg_lock, flags);
3730 /* Might be called under scst_mutex */
3731 void tm_dbg_task_mgmt(struct scst_device *dev, const char *fn, int force)
3733 unsigned long flags;
3736 struct scst_tgt_dev *tgt_dev;
3739 spin_lock_bh(&dev->dev_lock);
3740 list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
3741 dev_tgt_dev_list_entry) {
3742 if (test_bit(SCST_TGT_DEV_UNDER_TM_DBG,
3743 &tgt_dev->tgt_dev_flags)) {
3748 spin_unlock_bh(&dev->dev_lock);
3754 spin_lock_irqsave(&scst_tm_dbg_lock, flags);
3755 if ((tm_dbg_state != TM_DBG_STATE_OFFLINE) || force) {
3756 TRACE_MGMT_DBG("%s: freeing %d delayed cmds", fn,
3757 tm_dbg_delayed_cmds_count);
3758 tm_dbg_change_state();
3759 tm_dbg_flags.tm_dbg_release = 1;
3761 * Used to make sure that all woken up threads see the new
3765 if (tm_dbg_p_cmd_list_waitQ != NULL)
3766 wake_up_all(tm_dbg_p_cmd_list_waitQ);
3768 TRACE_MGMT_DBG("%s: while OFFLINE state, doing nothing", fn);
3770 spin_unlock_irqrestore(&scst_tm_dbg_lock, flags);