4 * Copyright (C) 2004 - 2009 Vladislav Bolkhovitin <vst@vlnb.net>
5 * Copyright (C) 2004 - 2005 Leonid Stoljar
6 * Copyright (C) 2007 - 2009 ID7 Ltd.
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation, version 2
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
19 #include <linux/init.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/list.h>
23 #include <linux/spinlock.h>
24 #include <linux/slab.h>
25 #include <linux/sched.h>
26 #include <linux/kthread.h>
27 #include <linux/cdrom.h>
28 #include <linux/unistd.h>
29 #include <linux/string.h>
30 #include <asm/kmap_types.h>
33 #include "scst_priv.h"
36 #include "scst_cdbprobe.h"
38 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 26)
39 struct scsi_io_context {
40 unsigned int full_cdb_used:1;
42 void (*done)(void *data, char *sense, int result, int resid);
43 char sense[SCST_SENSE_BUFFERSIZE];
44 unsigned char full_cdb[0];
46 static struct kmem_cache *scsi_io_context_cache;
49 static void scst_free_tgt_dev(struct scst_tgt_dev *tgt_dev);
50 static void scst_check_internal_sense(struct scst_device *dev, int result,
51 uint8_t *sense, int sense_len);
52 static void scst_queue_report_luns_changed_UA(struct scst_session *sess,
54 static void __scst_check_set_UA(struct scst_tgt_dev *tgt_dev,
55 const uint8_t *sense, int sense_len, int flags);
56 static void scst_alloc_set_UA(struct scst_tgt_dev *tgt_dev,
57 const uint8_t *sense, int sense_len, int flags);
58 static void scst_free_all_UA(struct scst_tgt_dev *tgt_dev);
59 static void scst_release_space(struct scst_cmd *cmd);
60 static void scst_sess_free_tgt_devs(struct scst_session *sess);
61 static void scst_unblock_cmds(struct scst_device *dev);
62 static void scst_clear_reservation(struct scst_tgt_dev *tgt_dev);
63 static struct scst_tgt_dev *scst_alloc_add_tgt_dev(struct scst_session *sess,
64 struct scst_acg_dev *acg_dev);
66 #ifdef CONFIG_SCST_DEBUG_TM
67 static void tm_dbg_init_tgt_dev(struct scst_tgt_dev *tgt_dev,
68 struct scst_acg_dev *acg_dev);
69 static void tm_dbg_deinit_tgt_dev(struct scst_tgt_dev *tgt_dev);
71 static inline void tm_dbg_init_tgt_dev(struct scst_tgt_dev *tgt_dev,
72 struct scst_acg_dev *acg_dev) {}
73 static inline void tm_dbg_deinit_tgt_dev(struct scst_tgt_dev *tgt_dev) {}
74 #endif /* CONFIG_SCST_DEBUG_TM */
76 int scst_alloc_sense(struct scst_cmd *cmd, int atomic)
79 gfp_t gfp_mask = atomic ? GFP_ATOMIC : (GFP_KERNEL|__GFP_NOFAIL);
83 if (cmd->sense != NULL)
86 cmd->sense = mempool_alloc(scst_sense_mempool, gfp_mask);
87 if (cmd->sense == NULL) {
88 PRINT_CRIT_ERROR("Sense memory allocation failed (op %x). "
89 "The sense data will be lost!!", cmd->cdb[0]);
95 cmd->sense_bufflen = SCST_SENSE_BUFFERSIZE;
96 memset(cmd->sense, 0, SCST_SENSE_BUFFERSIZE);
102 EXPORT_SYMBOL(scst_alloc_sense);
104 int scst_alloc_set_sense(struct scst_cmd *cmd, int atomic,
105 const uint8_t *sense, unsigned int len)
111 res = scst_alloc_sense(cmd, atomic);
113 PRINT_BUFFER("Lost sense", sense, len);
117 memcpy(cmd->sense, sense, min((int)len, (int)cmd->sense_bufflen));
118 TRACE_BUFFER("Sense set", cmd->sense, cmd->sense_bufflen);
124 EXPORT_SYMBOL(scst_alloc_set_sense);
126 void scst_set_cmd_error_status(struct scst_cmd *cmd, int status)
130 cmd->status = status;
131 cmd->host_status = DID_OK;
133 cmd->dbl_ua_orig_resp_data_len = cmd->resp_data_len;
134 cmd->dbl_ua_orig_data_direction = cmd->data_direction;
136 cmd->data_direction = SCST_DATA_NONE;
137 cmd->resp_data_len = 0;
138 cmd->is_send_status = 1;
145 EXPORT_SYMBOL(scst_set_cmd_error_status);
147 void scst_set_cmd_error(struct scst_cmd *cmd, int key, int asc, int ascq)
153 scst_set_cmd_error_status(cmd, SAM_STAT_CHECK_CONDITION);
155 rc = scst_alloc_sense(cmd, 1);
157 PRINT_ERROR("Lost sense data (key %x, asc %x, ascq %x)",
162 scst_set_sense(cmd->sense, cmd->sense_bufflen,
163 scst_get_cmd_dev_d_sense(cmd), key, asc, ascq);
164 TRACE_BUFFER("Sense set", cmd->sense, cmd->sense_bufflen);
170 EXPORT_SYMBOL(scst_set_cmd_error);
172 void scst_set_sense(uint8_t *buffer, int len, bool d_sense,
173 int key, int asc, int ascq)
177 memset(buffer, 0, len);
180 /* Descriptor format */
182 PRINT_ERROR("Length %d of sense buffer too small to "
183 "fit sense %x:%x:%x", len, key, asc, ascq);
186 buffer[0] = 0x72; /* Response Code */
188 buffer[1] = key; /* Sense Key */
190 buffer[2] = asc; /* ASC */
192 buffer[3] = ascq; /* ASCQ */
196 PRINT_ERROR("Length %d of sense buffer too small to "
197 "fit sense %x:%x:%x", len, key, asc, ascq);
200 buffer[0] = 0x70; /* Response Code */
202 buffer[2] = key; /* Sense Key */
204 buffer[7] = 0x0a; /* Additional Sense Length */
206 buffer[12] = asc; /* ASC */
208 buffer[13] = ascq; /* ASCQ */
211 TRACE_BUFFER("Sense set", buffer, len);
214 EXPORT_SYMBOL(scst_set_sense);
216 bool scst_analyze_sense(const uint8_t *sense, int len, unsigned int valid_mask,
217 int key, int asc, int ascq)
222 if ((sense[0] == 0x70) || (sense[0] == 0x71)) {
226 PRINT_ERROR("Sense too small to analyze (%d, "
232 if ((valid_mask & SCST_SENSE_KEY_VALID) && (sense[2] != key))
236 if ((valid_mask & SCST_SENSE_ASC_VALID) && (sense[12] != asc))
240 if ((valid_mask & SCST_SENSE_ASCQ_VALID) && (sense[13] != ascq))
242 } else if ((sense[0] == 0x72) || (sense[0] == 0x73)) {
243 /* Descriptor format */
246 PRINT_ERROR("Sense too small to analyze (%d, "
247 "type descriptor)", len);
252 if ((valid_mask & SCST_SENSE_KEY_VALID) && (sense[1] != key))
256 if ((valid_mask & SCST_SENSE_ASC_VALID) && (sense[2] != asc))
260 if ((valid_mask & SCST_SENSE_ASCQ_VALID) && (sense[3] != ascq))
268 TRACE_EXIT_RES((int)res);
271 EXPORT_SYMBOL(scst_analyze_sense);
273 bool scst_is_ua_sense(const uint8_t *sense, int len)
275 if (SCST_SENSE_VALID(sense))
276 return scst_analyze_sense(sense, len,
277 SCST_SENSE_KEY_VALID, UNIT_ATTENTION, 0, 0);
281 EXPORT_SYMBOL(scst_is_ua_sense);
283 bool scst_is_ua_global(const uint8_t *sense, int len)
287 /* Changing it don't forget to change scst_requeue_ua() as well!! */
289 if (scst_analyze_sense(sense, len, SCST_SENSE_ALL_VALID,
290 SCST_LOAD_SENSE(scst_sense_reported_luns_data_changed)))
298 void scst_check_convert_sense(struct scst_cmd *cmd)
304 if ((cmd->sense == NULL) || (cmd->status != SAM_STAT_CHECK_CONDITION))
307 d_sense = scst_get_cmd_dev_d_sense(cmd);
308 if (d_sense && ((cmd->sense[0] == 0x70) || (cmd->sense[0] == 0x71))) {
309 TRACE_MGMT_DBG("Converting fixed sense to descriptor (cmd %p)",
311 if (cmd->sense_bufflen < 14) {
312 PRINT_ERROR("Sense too small to convert (%d, "
313 "type fixed)", cmd->sense_bufflen);
316 scst_set_sense(cmd->sense, cmd->sense_bufflen, d_sense,
317 cmd->sense[2], cmd->sense[12], cmd->sense[13]);
318 } else if (!d_sense && ((cmd->sense[0] == 0x72) ||
319 (cmd->sense[0] == 0x73))) {
320 TRACE_MGMT_DBG("Converting descriptor sense to fixed (cmd %p)",
322 if (cmd->sense_bufflen < 4) {
323 PRINT_ERROR("Sense too small to convert (%d, "
324 "type descryptor)", cmd->sense_bufflen);
327 scst_set_sense(cmd->sense, cmd->sense_bufflen, d_sense,
328 cmd->sense[1], cmd->sense[2], cmd->sense[3]);
335 EXPORT_SYMBOL(scst_check_convert_sense);
337 static void scst_set_cmd_error_sense(struct scst_cmd *cmd, uint8_t *sense,
342 scst_set_cmd_error_status(cmd, SAM_STAT_CHECK_CONDITION);
343 scst_alloc_set_sense(cmd, 1, sense, len);
349 void scst_set_busy(struct scst_cmd *cmd)
351 int c = atomic_read(&cmd->sess->sess_cmd_count);
355 if ((c <= 1) || (cmd->sess->init_phase != SCST_SESS_IPH_READY)) {
356 scst_set_cmd_error_status(cmd, SAM_STAT_BUSY);
357 TRACE(TRACE_MGMT_MINOR, "Sending BUSY status to initiator %s "
358 "(cmds count %d, queue_type %x, sess->init_phase %d)",
359 cmd->sess->initiator_name, c,
360 cmd->queue_type, cmd->sess->init_phase);
362 scst_set_cmd_error_status(cmd, SAM_STAT_TASK_SET_FULL);
363 TRACE(TRACE_MGMT_MINOR, "Sending QUEUE_FULL status to "
364 "initiator %s (cmds count %d, queue_type %x, "
365 "sess->init_phase %d)", cmd->sess->initiator_name, c,
366 cmd->queue_type, cmd->sess->init_phase);
372 EXPORT_SYMBOL(scst_set_busy);
374 void scst_set_initial_UA(struct scst_session *sess, int key, int asc, int ascq)
380 TRACE_MGMT_DBG("Setting for sess %p initial UA %x/%x/%x", sess, key,
383 /* Protect sess_tgt_dev_list_hash */
384 mutex_lock(&scst_mutex);
386 for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
387 struct list_head *sess_tgt_dev_list_head =
388 &sess->sess_tgt_dev_list_hash[i];
389 struct scst_tgt_dev *tgt_dev;
391 list_for_each_entry(tgt_dev, sess_tgt_dev_list_head,
392 sess_tgt_dev_list_entry) {
393 spin_lock_bh(&tgt_dev->tgt_dev_lock);
394 if (!list_empty(&tgt_dev->UA_list)) {
395 struct scst_tgt_dev_UA *ua;
397 ua = list_entry(tgt_dev->UA_list.next,
398 typeof(*ua), UA_list_entry);
399 if (scst_analyze_sense(ua->UA_sense_buffer,
400 sizeof(ua->UA_sense_buffer),
401 SCST_SENSE_ALL_VALID,
402 SCST_LOAD_SENSE(scst_sense_reset_UA))) {
403 scst_set_sense(ua->UA_sense_buffer,
404 sizeof(ua->UA_sense_buffer),
405 tgt_dev->dev->d_sense,
409 "The first UA isn't RESET UA");
411 PRINT_ERROR("%s", "There's no RESET UA to "
413 spin_unlock_bh(&tgt_dev->tgt_dev_lock);
417 mutex_unlock(&scst_mutex);
422 EXPORT_SYMBOL(scst_set_initial_UA);
424 static struct scst_aen *scst_alloc_aen(struct scst_session *sess,
425 uint64_t unpacked_lun)
427 struct scst_aen *aen;
431 aen = mempool_alloc(scst_aen_mempool, GFP_KERNEL);
433 PRINT_ERROR("AEN memory allocation failed. Corresponding "
434 "event notification will not be performed (initiator "
435 "%s)", sess->initiator_name);
438 memset(aen, 0, sizeof(*aen));
443 aen->lun = scst_pack_lun(unpacked_lun);
446 TRACE_EXIT_HRES((unsigned long)aen);
450 static void scst_free_aen(struct scst_aen *aen)
454 scst_sess_put(aen->sess);
455 mempool_free(aen, scst_aen_mempool);
461 /* Must be called unded scst_mutex */
462 void scst_gen_aen_or_ua(struct scst_tgt_dev *tgt_dev,
463 int key, int asc, int ascq)
465 struct scst_tgt_template *tgtt = tgt_dev->sess->tgt->tgtt;
466 uint8_t sense_buffer[SCST_STANDARD_SENSE_LEN];
470 if (tgtt->report_aen != NULL) {
471 struct scst_aen *aen;
474 aen = scst_alloc_aen(tgt_dev->sess, tgt_dev->lun);
478 aen->event_fn = SCST_AEN_SCSI;
479 aen->aen_sense_len = SCST_STANDARD_SENSE_LEN;
480 scst_set_sense(aen->aen_sense, aen->aen_sense_len,
481 tgt_dev->dev->d_sense, key, asc, ascq);
483 TRACE_DBG("Calling target's %s report_aen(%p)",
485 rc = tgtt->report_aen(aen);
486 TRACE_DBG("Target's %s report_aen(%p) returned %d",
487 tgtt->name, aen, rc);
488 if (rc == SCST_AEN_RES_SUCCESS)
495 TRACE_MGMT_DBG("AEN not supported, queuing plain UA (tgt_dev %p)",
497 scst_set_sense(sense_buffer, sizeof(sense_buffer),
498 tgt_dev->dev->d_sense, key, asc, ascq);
499 scst_check_set_UA(tgt_dev, sense_buffer, sizeof(sense_buffer), 0);
507 void scst_capacity_data_changed(struct scst_device *dev)
509 struct scst_tgt_dev *tgt_dev;
513 if (dev->type != TYPE_DISK) {
514 TRACE_MGMT_DBG("Device type %d isn't for CAPACITY DATA "
515 "CHANGED UA", dev->type);
519 TRACE_MGMT_DBG("CAPACITY DATA CHANGED (dev %p)", dev);
521 mutex_lock(&scst_mutex);
523 list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
524 dev_tgt_dev_list_entry) {
525 scst_gen_aen_or_ua(tgt_dev,
526 SCST_LOAD_SENSE(scst_sense_capacity_data_changed));
529 mutex_unlock(&scst_mutex);
535 EXPORT_SYMBOL(scst_capacity_data_changed);
537 static inline bool scst_is_report_luns_changed_type(int type)
548 case TYPE_MEDIUM_CHANGER:
557 /* scst_mutex supposed to be held */
558 static void scst_queue_report_luns_changed_UA(struct scst_session *sess,
561 uint8_t sense_buffer[SCST_STANDARD_SENSE_LEN];
562 struct list_head *shead;
563 struct scst_tgt_dev *tgt_dev;
568 TRACE_MGMT_DBG("Queuing REPORTED LUNS DATA CHANGED UA "
573 for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
574 shead = &sess->sess_tgt_dev_list_hash[i];
576 list_for_each_entry(tgt_dev, shead,
577 sess_tgt_dev_list_entry) {
578 /* Lockdep triggers here a false positive.. */
579 spin_lock(&tgt_dev->tgt_dev_lock);
583 for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
584 shead = &sess->sess_tgt_dev_list_hash[i];
586 list_for_each_entry(tgt_dev, shead,
587 sess_tgt_dev_list_entry) {
588 if (!scst_is_report_luns_changed_type(
592 scst_set_sense(sense_buffer, sizeof(sense_buffer),
593 tgt_dev->dev->d_sense,
594 SCST_LOAD_SENSE(scst_sense_reported_luns_data_changed));
596 __scst_check_set_UA(tgt_dev, sense_buffer,
597 sizeof(sense_buffer),
598 flags | SCST_SET_UA_FLAG_GLOBAL);
602 for (i = TGT_DEV_HASH_SIZE-1; i >= 0; i--) {
603 shead = &sess->sess_tgt_dev_list_hash[i];
605 list_for_each_entry_reverse(tgt_dev,
606 shead, sess_tgt_dev_list_entry) {
607 spin_unlock(&tgt_dev->tgt_dev_lock);
617 /* The activity supposed to be suspended and scst_mutex held */
618 static void scst_report_luns_changed_sess(struct scst_session *sess)
621 struct scst_tgt_template *tgtt = sess->tgt->tgtt;
627 TRACE_DBG("REPORTED LUNS DATA CHANGED (sess %p)", sess);
629 for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
630 struct list_head *shead;
631 struct scst_tgt_dev *tgt_dev;
633 shead = &sess->sess_tgt_dev_list_hash[i];
635 list_for_each_entry(tgt_dev, shead,
636 sess_tgt_dev_list_entry) {
637 if (scst_is_report_luns_changed_type(
638 tgt_dev->dev->type)) {
640 d_sense = tgt_dev->dev->d_sense;
647 if (tgtt->report_aen != NULL) {
648 struct scst_aen *aen;
651 aen = scst_alloc_aen(sess, lun);
655 aen->event_fn = SCST_AEN_SCSI;
656 aen->aen_sense_len = SCST_STANDARD_SENSE_LEN;
657 scst_set_sense(aen->aen_sense, aen->aen_sense_len, d_sense,
658 SCST_LOAD_SENSE(scst_sense_reported_luns_data_changed));
660 TRACE_DBG("Calling target's %s report_aen(%p)",
662 rc = tgtt->report_aen(aen);
663 TRACE_DBG("Target's %s report_aen(%p) returned %d",
664 tgtt->name, aen, rc);
665 if (rc == SCST_AEN_RES_SUCCESS)
672 scst_queue_report_luns_changed_UA(sess, 0);
679 /* The activity supposed to be suspended and scst_mutex held */
680 void scst_report_luns_changed(struct scst_acg *acg)
682 struct scst_session *sess;
686 TRACE_MGMT_DBG("REPORTED LUNS DATA CHANGED (acg %s)", acg->acg_name);
688 list_for_each_entry(sess, &acg->acg_sess_list, acg_sess_list_entry) {
689 scst_report_luns_changed_sess(sess);
696 void scst_aen_done(struct scst_aen *aen)
700 TRACE_MGMT_DBG("AEN %p (fn %d) done (initiator %s)", aen,
701 aen->event_fn, aen->sess->initiator_name);
703 if (aen->delivery_status == SCST_AEN_RES_SUCCESS)
706 if (aen->event_fn != SCST_AEN_SCSI)
709 TRACE_MGMT_DBG("Delivery of SCSI AEN failed (initiator %s)",
710 aen->sess->initiator_name);
712 if (scst_analyze_sense(aen->aen_sense, aen->aen_sense_len,
713 SCST_SENSE_ALL_VALID, SCST_LOAD_SENSE(
714 scst_sense_reported_luns_data_changed))) {
715 mutex_lock(&scst_mutex);
716 scst_queue_report_luns_changed_UA(aen->sess,
717 SCST_SET_UA_FLAG_AT_HEAD);
718 mutex_unlock(&scst_mutex);
720 struct list_head *shead;
721 struct scst_tgt_dev *tgt_dev;
724 lun = scst_unpack_lun((uint8_t *)&aen->lun, sizeof(aen->lun));
726 mutex_lock(&scst_mutex);
728 /* tgt_dev might get dead, so we need to reseek it */
729 shead = &aen->sess->sess_tgt_dev_list_hash[HASH_VAL(lun)];
730 list_for_each_entry(tgt_dev, shead,
731 sess_tgt_dev_list_entry) {
732 if (tgt_dev->lun == lun) {
733 TRACE_MGMT_DBG("Requeuing failed AEN UA for "
734 "tgt_dev %p", tgt_dev);
735 scst_check_set_UA(tgt_dev, aen->aen_sense,
737 SCST_SET_UA_FLAG_AT_HEAD);
742 mutex_unlock(&scst_mutex);
751 EXPORT_SYMBOL(scst_aen_done);
753 void scst_requeue_ua(struct scst_cmd *cmd)
757 if (scst_analyze_sense(cmd->sense, cmd->sense_bufflen,
758 SCST_SENSE_ALL_VALID,
759 SCST_LOAD_SENSE(scst_sense_reported_luns_data_changed))) {
760 TRACE_MGMT_DBG("Requeuing REPORTED LUNS DATA CHANGED UA "
761 "for delivery failed cmd %p", cmd);
762 mutex_lock(&scst_mutex);
763 scst_queue_report_luns_changed_UA(cmd->sess,
764 SCST_SET_UA_FLAG_AT_HEAD);
765 mutex_unlock(&scst_mutex);
767 TRACE_MGMT_DBG("Requeuing UA for delivery failed cmd %p", cmd);
768 scst_check_set_UA(cmd->tgt_dev, cmd->sense,
769 cmd->sense_bufflen, SCST_SET_UA_FLAG_AT_HEAD);
776 /* The activity supposed to be suspended and scst_mutex held */
777 static void scst_check_reassign_sess(struct scst_session *sess)
779 struct scst_acg *acg, *old_acg;
780 struct scst_acg_dev *acg_dev;
782 struct list_head *shead;
783 struct scst_tgt_dev *tgt_dev;
784 bool luns_changed = false;
785 bool add_failed, something_freed, not_needed_freed = false;
789 TRACE_MGMT_DBG("Checking reassignment for sess %p (initiator %s)",
790 sess, sess->initiator_name);
792 acg = scst_find_acg(sess);
793 if (acg == sess->acg) {
794 TRACE_MGMT_DBG("No reassignment for sess %p", sess);
798 TRACE_MGMT_DBG("sess %p will be reassigned from acg %s to acg %s",
799 sess, sess->acg->acg_name, acg->acg_name);
802 sess->acg = NULL; /* to catch implicit dependencies earlier */
806 list_for_each_entry(acg_dev, &acg->acg_dev_list, acg_dev_list_entry) {
807 unsigned int inq_changed_ua_needed = 0;
809 for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
810 shead = &sess->sess_tgt_dev_list_hash[i];
812 list_for_each_entry(tgt_dev, shead,
813 sess_tgt_dev_list_entry) {
814 if ((tgt_dev->dev == acg_dev->dev) &&
815 (tgt_dev->lun == acg_dev->lun) &&
816 (tgt_dev->acg_dev->rd_only == acg_dev->rd_only)) {
817 TRACE_MGMT_DBG("sess %p: tgt_dev %p for "
818 "LUN %lld stays the same",
820 (unsigned long long)tgt_dev->lun);
821 tgt_dev->acg_dev = acg_dev;
823 } else if (tgt_dev->lun == acg_dev->lun)
824 inq_changed_ua_needed = 1;
830 TRACE_MGMT_DBG("sess %p: Allocing new tgt_dev for LUN %lld",
831 sess, (unsigned long long)acg_dev->lun);
833 tgt_dev = scst_alloc_add_tgt_dev(sess, acg_dev);
834 if (tgt_dev == NULL) {
839 tgt_dev->inq_changed_ua_needed = inq_changed_ua_needed ||
845 something_freed = false;
846 not_needed_freed = true;
847 for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
848 struct scst_tgt_dev *t;
849 shead = &sess->sess_tgt_dev_list_hash[i];
851 list_for_each_entry_safe(tgt_dev, t, shead,
852 sess_tgt_dev_list_entry) {
853 if (tgt_dev->acg_dev->acg != acg) {
854 TRACE_MGMT_DBG("sess %p: Deleting not used "
855 "tgt_dev %p for LUN %lld",
857 (unsigned long long)tgt_dev->lun);
859 something_freed = true;
860 scst_free_tgt_dev(tgt_dev);
865 if (add_failed && something_freed) {
866 TRACE_MGMT_DBG("sess %p: Retrying adding new tgt_devs", sess);
872 TRACE_DBG("Moving sess %p from acg %s to acg %s", sess,
873 old_acg->acg_name, acg->acg_name);
874 list_move_tail(&sess->acg_sess_list_entry, &acg->acg_sess_list);
877 scst_report_luns_changed_sess(sess);
879 for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
880 shead = &sess->sess_tgt_dev_list_hash[i];
882 list_for_each_entry(tgt_dev, shead,
883 sess_tgt_dev_list_entry) {
884 if (tgt_dev->inq_changed_ua_needed) {
885 TRACE_MGMT_DBG("sess %p: Setting "
886 "INQUIRY DATA HAS CHANGED UA "
887 "(tgt_dev %p)", sess, tgt_dev);
889 tgt_dev->inq_changed_ua_needed = 0;
891 scst_gen_aen_or_ua(tgt_dev,
892 SCST_LOAD_SENSE(scst_sense_inquery_data_changed));
903 /* The activity supposed to be suspended and scst_mutex held */
904 void scst_check_reassign_sessions(void)
906 struct scst_tgt_template *tgtt;
910 list_for_each_entry(tgtt, &scst_template_list, scst_template_list_entry) {
911 struct scst_tgt *tgt;
912 list_for_each_entry(tgt, &tgtt->tgt_list, tgt_list_entry) {
913 struct scst_session *sess;
914 list_for_each_entry(sess, &tgt->sess_list,
916 scst_check_reassign_sess(sess);
925 int scst_get_cmd_abnormal_done_state(const struct scst_cmd *cmd)
931 switch (cmd->state) {
932 case SCST_CMD_STATE_INIT_WAIT:
933 case SCST_CMD_STATE_INIT:
934 case SCST_CMD_STATE_PRE_PARSE:
935 case SCST_CMD_STATE_DEV_PARSE:
936 case SCST_CMD_STATE_DEV_DONE:
938 res = SCST_CMD_STATE_FINISHED_INTERNAL;
940 res = SCST_CMD_STATE_PRE_XMIT_RESP;
943 case SCST_CMD_STATE_PRE_DEV_DONE:
944 case SCST_CMD_STATE_MODE_SELECT_CHECKS:
945 res = SCST_CMD_STATE_DEV_DONE;
948 case SCST_CMD_STATE_PRE_XMIT_RESP:
949 res = SCST_CMD_STATE_XMIT_RESP;
952 case SCST_CMD_STATE_PREPROCESS_DONE:
953 case SCST_CMD_STATE_PREPARE_SPACE:
954 case SCST_CMD_STATE_RDY_TO_XFER:
955 case SCST_CMD_STATE_DATA_WAIT:
956 case SCST_CMD_STATE_TGT_PRE_EXEC:
957 case SCST_CMD_STATE_SEND_FOR_EXEC:
958 case SCST_CMD_STATE_LOCAL_EXEC:
959 case SCST_CMD_STATE_REAL_EXEC:
960 case SCST_CMD_STATE_REAL_EXECUTING:
961 res = SCST_CMD_STATE_PRE_DEV_DONE;
965 PRINT_CRIT_ERROR("Wrong cmd state %d (cmd %p, op %x)",
966 cmd->state, cmd, cmd->cdb[0]);
968 /* Invalid state to supress compiler's warning */
969 res = SCST_CMD_STATE_LAST_ACTIVE;
975 EXPORT_SYMBOL(scst_get_cmd_abnormal_done_state);
977 void scst_set_cmd_abnormal_done_state(struct scst_cmd *cmd)
981 #ifdef CONFIG_SCST_EXTRACHECKS
982 switch (cmd->state) {
983 case SCST_CMD_STATE_XMIT_RESP:
984 case SCST_CMD_STATE_FINISHED:
985 case SCST_CMD_STATE_FINISHED_INTERNAL:
986 case SCST_CMD_STATE_XMIT_WAIT:
987 PRINT_CRIT_ERROR("Wrong cmd state %d (cmd %p, op %x)",
988 cmd->state, cmd, cmd->cdb[0]);
993 cmd->state = scst_get_cmd_abnormal_done_state(cmd);
995 #ifdef CONFIG_SCST_EXTRACHECKS
996 if ((cmd->state != SCST_CMD_STATE_PRE_XMIT_RESP) &&
997 (cmd->tgt_dev == NULL) && !cmd->internal) {
998 PRINT_CRIT_ERROR("Wrong not inited cmd state %d (cmd %p, "
999 "op %x)", cmd->state, cmd, cmd->cdb[0]);
1007 EXPORT_SYMBOL(scst_set_cmd_abnormal_done_state);
1009 void scst_set_resp_data_len(struct scst_cmd *cmd, int resp_data_len)
1015 scst_check_restore_sg_buff(cmd);
1016 cmd->resp_data_len = resp_data_len;
1018 if (resp_data_len == cmd->bufflen)
1022 for (i = 0; i < cmd->sg_cnt; i++) {
1023 l += cmd->sg[i].length;
1024 if (l >= resp_data_len) {
1025 int left = resp_data_len - (l - cmd->sg[i].length);
1026 #ifdef CONFIG_SCST_DEBUG
1027 TRACE(TRACE_SG_OP|TRACE_MEMORY, "cmd %p (tag %llu), "
1028 "resp_data_len %d, i %d, cmd->sg[i].length %d, "
1030 cmd, (long long unsigned int)cmd->tag,
1032 cmd->sg[i].length, left);
1034 cmd->orig_sg_cnt = cmd->sg_cnt;
1035 cmd->orig_sg_entry = i;
1036 cmd->orig_entry_len = cmd->sg[i].length;
1037 cmd->sg_cnt = (left > 0) ? i+1 : i;
1038 cmd->sg[i].length = left;
1039 cmd->sg_buff_modified = 1;
1048 EXPORT_SYMBOL(scst_set_resp_data_len);
1051 int scst_queue_retry_cmd(struct scst_cmd *cmd, int finished_cmds)
1053 struct scst_tgt *tgt = cmd->tgt;
1055 unsigned long flags;
1059 spin_lock_irqsave(&tgt->tgt_lock, flags);
1062 * Memory barrier is needed here, because we need the exact order
1063 * between the read and write between retry_cmds and finished_cmds to
1064 * not miss the case when a command finished while we queuing it for
1065 * retry after the finished_cmds check.
1068 TRACE_RETRY("TGT QUEUE FULL: incrementing retry_cmds %d",
1070 if (finished_cmds != atomic_read(&tgt->finished_cmds)) {
1071 /* At least one cmd finished, so try again */
1073 TRACE_RETRY("Some command(s) finished, direct retry "
1074 "(finished_cmds=%d, tgt->finished_cmds=%d, "
1075 "retry_cmds=%d)", finished_cmds,
1076 atomic_read(&tgt->finished_cmds), tgt->retry_cmds);
1078 goto out_unlock_tgt;
1081 TRACE_RETRY("Adding cmd %p to retry cmd list", cmd);
1082 list_add_tail(&cmd->cmd_list_entry, &tgt->retry_cmd_list);
1084 if (!tgt->retry_timer_active) {
1085 tgt->retry_timer.expires = jiffies + SCST_TGT_RETRY_TIMEOUT;
1086 add_timer(&tgt->retry_timer);
1087 tgt->retry_timer_active = 1;
1091 spin_unlock_irqrestore(&tgt->tgt_lock, flags);
1093 TRACE_EXIT_RES(res);
1097 /* Returns 0 to continue, >0 to restart, <0 to break */
1098 static int scst_check_hw_pending_cmd(struct scst_cmd *cmd,
1099 unsigned long cur_time, unsigned long max_time,
1100 struct scst_session *sess, unsigned long *flags,
1101 struct scst_tgt_template *tgtt)
1103 int res = -1; /* break */
1105 TRACE_DBG("cmd %p, hw_pending %d, proc time %ld, "
1106 "pending time %ld", cmd, cmd->cmd_hw_pending,
1107 (long)(cur_time - cmd->start_time) / HZ,
1108 (long)(cur_time - cmd->hw_pending_start) / HZ);
1110 if (time_before_eq(cur_time, cmd->start_time + max_time)) {
1111 /* Cmds are ordered, so no need to check more */
1115 if (!cmd->cmd_hw_pending) {
1116 res = 0; /* continue */
1120 if (time_before(cur_time, cmd->hw_pending_start + max_time)) {
1121 /* Cmds are ordered, so no need to check more */
1125 TRACE_MGMT_DBG("Cmd %p HW pending for too long %ld (state %x)",
1126 cmd, (cur_time - cmd->hw_pending_start) / HZ,
1129 cmd->cmd_hw_pending = 0;
1131 spin_unlock_irqrestore(&sess->sess_list_lock, *flags);
1132 tgtt->on_hw_pending_cmd_timeout(cmd);
1133 spin_lock_irqsave(&sess->sess_list_lock, *flags);
1135 res = 1; /* restart */
1138 TRACE_EXIT_RES(res);
1142 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
1143 static void scst_hw_pending_work_fn(void *p)
1145 static void scst_hw_pending_work_fn(struct delayed_work *work)
1148 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
1149 struct scst_session *sess = (struct scst_session *)p;
1151 struct scst_session *sess = container_of(work, struct scst_session,
1154 struct scst_tgt_template *tgtt = sess->tgt->tgtt;
1155 struct scst_cmd *cmd;
1156 unsigned long cur_time = jiffies;
1157 unsigned long flags;
1158 unsigned long max_time = tgtt->max_hw_pending_time * HZ;
1162 TRACE_DBG("HW pending work (sess %p, max time %ld)", sess, max_time/HZ);
1164 clear_bit(SCST_SESS_HW_PENDING_WORK_SCHEDULED, &sess->sess_aflags);
1166 spin_lock_irqsave(&sess->sess_list_lock, flags);
1169 list_for_each_entry(cmd, &sess->search_cmd_list,
1170 sess_cmd_list_entry) {
1173 rc = scst_check_hw_pending_cmd(cmd, cur_time, max_time, sess,
1184 list_for_each_entry(cmd, &sess->after_pre_xmit_cmd_list,
1185 sess_cmd_list_entry) {
1188 rc = scst_check_hw_pending_cmd(cmd, cur_time, max_time, sess,
1198 if (!list_empty(&sess->search_cmd_list) ||
1199 !list_empty(&sess->after_pre_xmit_cmd_list)) {
1201 * For stuck cmds if there is no activity we might need to have
1202 * one more run to release them, so reschedule once again.
1204 TRACE_DBG("Sched HW pending work for sess %p (max time %d)",
1205 sess, tgtt->max_hw_pending_time);
1206 set_bit(SCST_SESS_HW_PENDING_WORK_SCHEDULED, &sess->sess_aflags);
1207 schedule_delayed_work(&sess->hw_pending_work,
1208 tgtt->max_hw_pending_time * HZ);
1211 spin_unlock_irqrestore(&sess->sess_list_lock, flags);
1217 /* Called under scst_mutex and suspended activity */
1218 int scst_alloc_device(gfp_t gfp_mask, struct scst_device **out_dev)
1220 struct scst_device *dev;
1222 static int dev_num; /* protected by scst_mutex */
1226 dev = kzalloc(sizeof(*dev), gfp_mask);
1228 TRACE(TRACE_OUT_OF_MEM, "%s",
1229 "Allocation of scst_device failed");
1234 dev->handler = &scst_null_devtype;
1235 dev->p_cmd_lists = &scst_main_cmd_lists;
1236 atomic_set(&dev->dev_cmd_count, 0);
1237 atomic_set(&dev->write_cmd_count, 0);
1238 scst_init_mem_lim(&dev->dev_mem_lim);
1239 spin_lock_init(&dev->dev_lock);
1240 atomic_set(&dev->on_dev_count, 0);
1241 INIT_LIST_HEAD(&dev->blocked_cmd_list);
1242 INIT_LIST_HEAD(&dev->dev_tgt_dev_list);
1243 INIT_LIST_HEAD(&dev->dev_acg_dev_list);
1244 INIT_LIST_HEAD(&dev->threads_list);
1245 init_waitqueue_head(&dev->on_dev_waitQ);
1246 dev->dev_double_ua_possible = 1;
1247 dev->queue_alg = SCST_CONTR_MODE_QUEUE_ALG_UNRESTRICTED_REORDER;
1248 dev->dev_num = dev_num++;
1250 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25) && defined(SCST_IO_CONTEXT)
1251 #if defined(CONFIG_BLOCK)
1252 dev->dev_io_ctx = alloc_io_context(GFP_KERNEL, -1);
1253 if (dev->dev_io_ctx == NULL) {
1254 TRACE(TRACE_OUT_OF_MEM, "%s", "Failed to alloc dev IO context");
1265 TRACE_EXIT_RES(res);
1269 /* Called under scst_mutex and suspended activity */
1270 void scst_free_device(struct scst_device *dev)
1274 #ifdef CONFIG_SCST_EXTRACHECKS
1275 if (!list_empty(&dev->dev_tgt_dev_list) ||
1276 !list_empty(&dev->dev_acg_dev_list)) {
1277 PRINT_CRIT_ERROR("%s: dev_tgt_dev_list or dev_acg_dev_list "
1278 "is not empty!", __func__);
1283 __exit_io_context(dev->dev_io_ctx);
1291 void scst_init_mem_lim(struct scst_mem_lim *mem_lim)
1293 atomic_set(&mem_lim->alloced_pages, 0);
1294 mem_lim->max_allowed_pages =
1295 ((uint64_t)scst_max_dev_cmd_mem << 10) >> (PAGE_SHIFT - 10);
1297 EXPORT_SYMBOL(scst_init_mem_lim);
1299 static struct scst_acg_dev *scst_alloc_acg_dev(struct scst_acg *acg,
1300 struct scst_device *dev, uint64_t lun)
1302 struct scst_acg_dev *res;
1306 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
1307 res = kmem_cache_alloc(scst_acgd_cachep, GFP_KERNEL);
1309 res = kmem_cache_zalloc(scst_acgd_cachep, GFP_KERNEL);
1312 TRACE(TRACE_OUT_OF_MEM,
1313 "%s", "Allocation of scst_acg_dev failed");
1316 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
1317 memset(res, 0, sizeof(*res));
1325 TRACE_EXIT_HRES(res);
1329 /* The activity supposed to be suspended and scst_mutex held */
1330 static void scst_free_acg_dev(struct scst_acg_dev *acg_dev)
1334 TRACE_DBG("Removing acg_dev %p from acg_dev_list and dev_acg_dev_list",
1336 list_del(&acg_dev->acg_dev_list_entry);
1337 list_del(&acg_dev->dev_acg_dev_list_entry);
1339 kmem_cache_free(scst_acgd_cachep, acg_dev);
1345 /* The activity supposed to be suspended and scst_mutex held */
1346 struct scst_acg *scst_alloc_add_acg(const char *acg_name)
1348 struct scst_acg *acg;
1352 acg = kzalloc(sizeof(*acg), GFP_KERNEL);
1354 TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of acg failed");
1358 INIT_LIST_HEAD(&acg->acg_dev_list);
1359 INIT_LIST_HEAD(&acg->acg_sess_list);
1360 INIT_LIST_HEAD(&acg->acn_list);
1361 acg->acg_name = acg_name;
1363 TRACE_DBG("Adding acg %s to scst_acg_list", acg_name);
1364 list_add_tail(&acg->scst_acg_list_entry, &scst_acg_list);
1366 scst_check_reassign_sessions();
1369 TRACE_EXIT_HRES(acg);
1373 /* The activity supposed to be suspended and scst_mutex held */
1374 int scst_destroy_acg(struct scst_acg *acg)
1376 struct scst_acn *n, *nn;
1377 struct scst_acg_dev *acg_dev, *acg_dev_tmp;
1382 if (!list_empty(&acg->acg_sess_list)) {
1383 PRINT_ERROR("%s: acg_sess_list is not empty!", __func__);
1388 TRACE_DBG("Removing acg %s from scst_acg_list", acg->acg_name);
1389 list_del(&acg->scst_acg_list_entry);
1391 /* Freeing acg_devs */
1392 list_for_each_entry_safe(acg_dev, acg_dev_tmp, &acg->acg_dev_list,
1393 acg_dev_list_entry) {
1394 struct scst_tgt_dev *tgt_dev, *tt;
1395 list_for_each_entry_safe(tgt_dev, tt,
1396 &acg_dev->dev->dev_tgt_dev_list,
1397 dev_tgt_dev_list_entry) {
1398 if (tgt_dev->acg_dev == acg_dev)
1399 scst_free_tgt_dev(tgt_dev);
1401 scst_free_acg_dev(acg_dev);
1405 list_for_each_entry_safe(n, nn, &acg->acn_list,
1407 list_del(&n->acn_list_entry);
1411 INIT_LIST_HEAD(&acg->acn_list);
1415 TRACE_EXIT_RES(res);
1420 * scst_mutex supposed to be held, there must not be parallel activity in this
1423 static struct scst_tgt_dev *scst_alloc_add_tgt_dev(struct scst_session *sess,
1424 struct scst_acg_dev *acg_dev)
1426 int ini_sg, ini_unchecked_isa_dma, ini_use_clustering;
1427 struct scst_tgt_dev *tgt_dev, *t = NULL;
1428 struct scst_device *dev = acg_dev->dev;
1429 struct list_head *sess_tgt_dev_list_head;
1430 struct scst_tgt_template *vtt = sess->tgt->tgtt;
1432 bool share_io_ctx = false;
1433 uint8_t sense_buffer[SCST_STANDARD_SENSE_LEN];
1437 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
1438 tgt_dev = kmem_cache_alloc(scst_tgtd_cachep, GFP_KERNEL);
1440 tgt_dev = kmem_cache_zalloc(scst_tgtd_cachep, GFP_KERNEL);
1442 if (tgt_dev == NULL) {
1443 TRACE(TRACE_OUT_OF_MEM, "%s",
1444 "Allocation of scst_tgt_dev failed");
1447 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
1448 memset(tgt_dev, 0, sizeof(*tgt_dev));
1452 tgt_dev->lun = acg_dev->lun;
1453 tgt_dev->acg_dev = acg_dev;
1454 tgt_dev->sess = sess;
1455 atomic_set(&tgt_dev->tgt_dev_cmd_count, 0);
1457 scst_sgv_pool_use_norm(tgt_dev);
1459 if (dev->scsi_dev != NULL) {
1460 ini_sg = dev->scsi_dev->host->sg_tablesize;
1461 ini_unchecked_isa_dma = dev->scsi_dev->host->unchecked_isa_dma;
1462 ini_use_clustering = (dev->scsi_dev->host->use_clustering ==
1465 ini_sg = (1 << 15) /* infinite */;
1466 ini_unchecked_isa_dma = 0;
1467 ini_use_clustering = 0;
1469 tgt_dev->max_sg_cnt = min(ini_sg, sess->tgt->sg_tablesize);
1471 if ((sess->tgt->tgtt->use_clustering || ini_use_clustering) &&
1472 !sess->tgt->tgtt->no_clustering)
1473 scst_sgv_pool_use_norm_clust(tgt_dev);
1475 if (sess->tgt->tgtt->unchecked_isa_dma || ini_unchecked_isa_dma)
1476 scst_sgv_pool_use_dma(tgt_dev);
1478 if (dev->scsi_dev != NULL) {
1479 TRACE_MGMT_DBG("host=%d, channel=%d, id=%d, lun=%d, "
1480 "SCST lun=%lld", dev->scsi_dev->host->host_no,
1481 dev->scsi_dev->channel, dev->scsi_dev->id,
1483 (long long unsigned int)tgt_dev->lun);
1485 TRACE_MGMT_DBG("Virtual device %s on SCST lun=%lld",
1486 dev->virt_name, (long long unsigned int)tgt_dev->lun);
1489 spin_lock_init(&tgt_dev->tgt_dev_lock);
1490 INIT_LIST_HEAD(&tgt_dev->UA_list);
1491 spin_lock_init(&tgt_dev->thr_data_lock);
1492 INIT_LIST_HEAD(&tgt_dev->thr_data_list);
1493 spin_lock_init(&tgt_dev->sn_lock);
1494 INIT_LIST_HEAD(&tgt_dev->deferred_cmd_list);
1495 INIT_LIST_HEAD(&tgt_dev->skipped_sn_list);
1496 tgt_dev->expected_sn = 1;
1497 tgt_dev->num_free_sn_slots = ARRAY_SIZE(tgt_dev->sn_slots)-1;
1498 tgt_dev->cur_sn_slot = &tgt_dev->sn_slots[0];
1499 for (i = 0; i < (int)ARRAY_SIZE(tgt_dev->sn_slots); i++)
1500 atomic_set(&tgt_dev->sn_slots[i], 0);
1502 if (dev->handler->parse_atomic &&
1503 (sess->tgt->tgtt->preprocessing_done == NULL)) {
1504 if (sess->tgt->tgtt->rdy_to_xfer_atomic)
1505 __set_bit(SCST_TGT_DEV_AFTER_INIT_WR_ATOMIC,
1506 &tgt_dev->tgt_dev_flags);
1507 if (dev->handler->exec_atomic)
1508 __set_bit(SCST_TGT_DEV_AFTER_INIT_OTH_ATOMIC,
1509 &tgt_dev->tgt_dev_flags);
1511 if (dev->handler->exec_atomic) {
1512 if (sess->tgt->tgtt->rdy_to_xfer_atomic)
1513 __set_bit(SCST_TGT_DEV_AFTER_RESTART_WR_ATOMIC,
1514 &tgt_dev->tgt_dev_flags);
1515 __set_bit(SCST_TGT_DEV_AFTER_RESTART_OTH_ATOMIC,
1516 &tgt_dev->tgt_dev_flags);
1517 __set_bit(SCST_TGT_DEV_AFTER_RX_DATA_ATOMIC,
1518 &tgt_dev->tgt_dev_flags);
1520 if (dev->handler->dev_done_atomic &&
1521 sess->tgt->tgtt->xmit_response_atomic) {
1522 __set_bit(SCST_TGT_DEV_AFTER_EXEC_ATOMIC,
1523 &tgt_dev->tgt_dev_flags);
1526 scst_set_sense(sense_buffer, sizeof(sense_buffer),
1527 dev->d_sense, SCST_LOAD_SENSE(scst_sense_reset_UA));
1528 scst_alloc_set_UA(tgt_dev, sense_buffer, sizeof(sense_buffer), 0);
1530 tm_dbg_init_tgt_dev(tgt_dev, acg_dev);
1532 if (tgt_dev->sess->initiator_name != NULL) {
1533 spin_lock_bh(&dev->dev_lock);
1534 list_for_each_entry(t, &dev->dev_tgt_dev_list,
1535 dev_tgt_dev_list_entry) {
1536 TRACE_DBG("t name %s (tgt_dev name %s)",
1537 t->sess->initiator_name,
1538 tgt_dev->sess->initiator_name);
1539 if (t->sess->initiator_name == NULL)
1541 if (strcmp(t->sess->initiator_name,
1542 tgt_dev->sess->initiator_name) == 0) {
1543 share_io_ctx = true;
1547 spin_unlock_bh(&dev->dev_lock);
1551 TRACE_MGMT_DBG("Sharing IO context %p (tgt_dev %p, ini %s)",
1552 t->tgt_dev_io_ctx, tgt_dev,
1553 tgt_dev->sess->initiator_name);
1554 tgt_dev->tgt_dev_io_ctx = ioc_task_link(t->tgt_dev_io_ctx);
1556 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25) && defined(SCST_IO_CONTEXT)
1557 #if defined(CONFIG_BLOCK)
1558 tgt_dev->tgt_dev_io_ctx = alloc_io_context(GFP_KERNEL, -1);
1559 if (tgt_dev->tgt_dev_io_ctx == NULL) {
1560 TRACE(TRACE_OUT_OF_MEM, "Failed to alloc tgt_dev IO "
1561 "context for dev %s (initiator %s)",
1562 dev->virt_name, sess->initiator_name);
1569 if (vtt->threads_num > 0) {
1571 if (dev->handler->threads_num > 0)
1572 rc = scst_add_dev_threads(dev, vtt->threads_num);
1573 else if (dev->handler->threads_num == 0)
1574 rc = scst_add_global_threads(vtt->threads_num);
1579 if (dev->handler && dev->handler->attach_tgt) {
1580 TRACE_DBG("Calling dev handler's attach_tgt(%p)",
1582 rc = dev->handler->attach_tgt(tgt_dev);
1583 TRACE_DBG("%s", "Dev handler's attach_tgt() returned");
1585 PRINT_ERROR("Device handler's %s attach_tgt() "
1586 "failed: %d", dev->handler->name, rc);
1591 spin_lock_bh(&dev->dev_lock);
1592 list_add_tail(&tgt_dev->dev_tgt_dev_list_entry, &dev->dev_tgt_dev_list);
1593 if (dev->dev_reserved)
1594 __set_bit(SCST_TGT_DEV_RESERVED, &tgt_dev->tgt_dev_flags);
1595 spin_unlock_bh(&dev->dev_lock);
1597 sess_tgt_dev_list_head =
1598 &sess->sess_tgt_dev_list_hash[HASH_VAL(tgt_dev->lun)];
1599 list_add_tail(&tgt_dev->sess_tgt_dev_list_entry,
1600 sess_tgt_dev_list_head);
1607 if (vtt->threads_num > 0) {
1608 if (dev->handler->threads_num > 0)
1609 scst_del_dev_threads(dev, vtt->threads_num);
1610 else if (dev->handler->threads_num == 0)
1611 scst_del_global_threads(vtt->threads_num);
1615 scst_free_all_UA(tgt_dev);
1616 __exit_io_context(tgt_dev->tgt_dev_io_ctx);
1618 kmem_cache_free(scst_tgtd_cachep, tgt_dev);
1623 /* No locks supposed to be held, scst_mutex - held */
1624 void scst_nexus_loss(struct scst_tgt_dev *tgt_dev, bool queue_UA)
1628 scst_clear_reservation(tgt_dev);
1630 /* With activity suspended the lock isn't needed, but let's be safe */
1631 spin_lock_bh(&tgt_dev->tgt_dev_lock);
1632 scst_free_all_UA(tgt_dev);
1633 memset(tgt_dev->tgt_dev_sense, 0, sizeof(tgt_dev->tgt_dev_sense));
1634 spin_unlock_bh(&tgt_dev->tgt_dev_lock);
1637 uint8_t sense_buffer[SCST_STANDARD_SENSE_LEN];
1638 scst_set_sense(sense_buffer, sizeof(sense_buffer),
1639 tgt_dev->dev->d_sense,
1640 SCST_LOAD_SENSE(scst_sense_nexus_loss_UA));
1641 scst_check_set_UA(tgt_dev, sense_buffer,
1642 sizeof(sense_buffer), 0);
1650 * scst_mutex supposed to be held, there must not be parallel activity in this
1653 static void scst_free_tgt_dev(struct scst_tgt_dev *tgt_dev)
1655 struct scst_device *dev = tgt_dev->dev;
1656 struct scst_tgt_template *vtt = tgt_dev->sess->tgt->tgtt;
1660 tm_dbg_deinit_tgt_dev(tgt_dev);
1662 spin_lock_bh(&dev->dev_lock);
1663 list_del(&tgt_dev->dev_tgt_dev_list_entry);
1664 spin_unlock_bh(&dev->dev_lock);
1666 list_del(&tgt_dev->sess_tgt_dev_list_entry);
1668 scst_clear_reservation(tgt_dev);
1669 scst_free_all_UA(tgt_dev);
1671 if (dev->handler && dev->handler->detach_tgt) {
1672 TRACE_DBG("Calling dev handler's detach_tgt(%p)",
1674 dev->handler->detach_tgt(tgt_dev);
1675 TRACE_DBG("%s", "Dev handler's detach_tgt() returned");
1678 if (vtt->threads_num > 0) {
1679 if (dev->handler->threads_num > 0)
1680 scst_del_dev_threads(dev, vtt->threads_num);
1681 else if (dev->handler->threads_num == 0)
1682 scst_del_global_threads(vtt->threads_num);
1685 __exit_io_context(tgt_dev->tgt_dev_io_ctx);
1687 kmem_cache_free(scst_tgtd_cachep, tgt_dev);
1693 /* scst_mutex supposed to be held */
1694 int scst_sess_alloc_tgt_devs(struct scst_session *sess)
1697 struct scst_acg_dev *acg_dev;
1698 struct scst_tgt_dev *tgt_dev;
1702 list_for_each_entry(acg_dev, &sess->acg->acg_dev_list,
1703 acg_dev_list_entry) {
1704 tgt_dev = scst_alloc_add_tgt_dev(sess, acg_dev);
1705 if (tgt_dev == NULL) {
1716 scst_sess_free_tgt_devs(sess);
1721 * scst_mutex supposed to be held, there must not be parallel activity in this
1724 static void scst_sess_free_tgt_devs(struct scst_session *sess)
1727 struct scst_tgt_dev *tgt_dev, *t;
1731 /* The session is going down, no users, so no locks */
1732 for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
1733 struct list_head *sess_tgt_dev_list_head =
1734 &sess->sess_tgt_dev_list_hash[i];
1735 list_for_each_entry_safe(tgt_dev, t, sess_tgt_dev_list_head,
1736 sess_tgt_dev_list_entry) {
1737 scst_free_tgt_dev(tgt_dev);
1739 INIT_LIST_HEAD(sess_tgt_dev_list_head);
1746 /* The activity supposed to be suspended and scst_mutex held */
1747 int scst_acg_add_dev(struct scst_acg *acg, struct scst_device *dev,
1748 uint64_t lun, int read_only, bool gen_scst_report_luns_changed)
1751 struct scst_acg_dev *acg_dev;
1752 struct scst_tgt_dev *tgt_dev;
1753 struct scst_session *sess;
1754 LIST_HEAD(tmp_tgt_dev_list);
1758 INIT_LIST_HEAD(&tmp_tgt_dev_list);
1760 acg_dev = scst_alloc_acg_dev(acg, dev, lun);
1761 if (acg_dev == NULL) {
1765 acg_dev->rd_only = read_only;
1767 TRACE_DBG("Adding acg_dev %p to acg_dev_list and dev_acg_dev_list",
1769 list_add_tail(&acg_dev->acg_dev_list_entry, &acg->acg_dev_list);
1770 list_add_tail(&acg_dev->dev_acg_dev_list_entry, &dev->dev_acg_dev_list);
1772 list_for_each_entry(sess, &acg->acg_sess_list, acg_sess_list_entry) {
1773 tgt_dev = scst_alloc_add_tgt_dev(sess, acg_dev);
1774 if (tgt_dev == NULL) {
1778 list_add_tail(&tgt_dev->extra_tgt_dev_list_entry,
1782 if (gen_scst_report_luns_changed)
1783 scst_report_luns_changed(acg);
1785 if (dev->virt_name != NULL) {
1786 PRINT_INFO("Added device %s to group %s (LUN %lld, "
1787 "rd_only %d)", dev->virt_name, acg->acg_name,
1788 (long long unsigned int)lun,
1791 PRINT_INFO("Added device %d:%d:%d:%d to group %s (LUN "
1792 "%lld, rd_only %d)",
1793 dev->scsi_dev->host->host_no,
1794 dev->scsi_dev->channel, dev->scsi_dev->id,
1795 dev->scsi_dev->lun, acg->acg_name,
1796 (long long unsigned int)lun,
1801 TRACE_EXIT_RES(res);
1805 list_for_each_entry(tgt_dev, &tmp_tgt_dev_list,
1806 extra_tgt_dev_list_entry) {
1807 scst_free_tgt_dev(tgt_dev);
1809 scst_free_acg_dev(acg_dev);
1813 /* The activity supposed to be suspended and scst_mutex held */
1814 int scst_acg_remove_dev(struct scst_acg *acg, struct scst_device *dev,
1815 bool gen_scst_report_luns_changed)
1818 struct scst_acg_dev *acg_dev = NULL, *a;
1819 struct scst_tgt_dev *tgt_dev, *tt;
1823 list_for_each_entry(a, &acg->acg_dev_list, acg_dev_list_entry) {
1824 if (a->dev == dev) {
1830 if (acg_dev == NULL) {
1831 PRINT_ERROR("Device is not found in group %s", acg->acg_name);
1836 list_for_each_entry_safe(tgt_dev, tt, &dev->dev_tgt_dev_list,
1837 dev_tgt_dev_list_entry) {
1838 if (tgt_dev->acg_dev == acg_dev)
1839 scst_free_tgt_dev(tgt_dev);
1841 scst_free_acg_dev(acg_dev);
1843 if (gen_scst_report_luns_changed)
1844 scst_report_luns_changed(acg);
1846 if (dev->virt_name != NULL) {
1847 PRINT_INFO("Removed device %s from group %s",
1848 dev->virt_name, acg->acg_name);
1850 PRINT_INFO("Removed device %d:%d:%d:%d from group %s",
1851 dev->scsi_dev->host->host_no,
1852 dev->scsi_dev->channel, dev->scsi_dev->id,
1853 dev->scsi_dev->lun, acg->acg_name);
1857 TRACE_EXIT_RES(res);
1861 /* The activity supposed to be suspended and scst_mutex held */
1862 int scst_acg_add_name(struct scst_acg *acg, const char *name)
1871 list_for_each_entry(n, &acg->acn_list, acn_list_entry) {
1872 if (strcmp(n->name, name) == 0) {
1873 PRINT_ERROR("Name %s already exists in group %s",
1874 name, acg->acg_name);
1880 n = kmalloc(sizeof(*n), GFP_KERNEL);
1882 PRINT_ERROR("%s", "Unable to allocate scst_acn");
1888 nm = kmalloc(len + 1, GFP_KERNEL);
1890 PRINT_ERROR("%s", "Unable to allocate scst_acn->name");
1898 list_add_tail(&n->acn_list_entry, &acg->acn_list);
1902 PRINT_INFO("Added name %s to group %s", name, acg->acg_name);
1903 scst_check_reassign_sessions();
1906 TRACE_EXIT_RES(res);
1914 /* scst_mutex supposed to be held */
1915 void __scst_acg_remove_acn(struct scst_acn *n)
1919 list_del(&n->acn_list_entry);
1927 /* The activity supposed to be suspended and scst_mutex held */
1928 int scst_acg_remove_name(struct scst_acg *acg, const char *name, bool reassign)
1935 list_for_each_entry(n, &acg->acn_list, acn_list_entry) {
1936 if (strcmp(n->name, name) == 0) {
1937 __scst_acg_remove_acn(n);
1944 PRINT_INFO("Removed name %s from group %s", name,
1947 scst_check_reassign_sessions();
1949 PRINT_ERROR("Unable to find name %s in group %s", name,
1952 TRACE_EXIT_RES(res);
1956 static struct scst_cmd *scst_create_prepare_internal_cmd(
1957 struct scst_cmd *orig_cmd, int bufsize)
1959 struct scst_cmd *res;
1960 gfp_t gfp_mask = scst_cmd_atomic(orig_cmd) ? GFP_ATOMIC : GFP_KERNEL;
1964 res = scst_alloc_cmd(gfp_mask);
1968 res->cmd_lists = orig_cmd->cmd_lists;
1969 res->sess = orig_cmd->sess;
1970 res->atomic = scst_cmd_atomic(orig_cmd);
1972 res->tgtt = orig_cmd->tgtt;
1973 res->tgt = orig_cmd->tgt;
1974 res->dev = orig_cmd->dev;
1975 res->tgt_dev = orig_cmd->tgt_dev;
1976 res->lun = orig_cmd->lun;
1977 res->queue_type = SCST_CMD_QUEUE_HEAD_OF_QUEUE;
1978 res->data_direction = SCST_DATA_UNKNOWN;
1979 res->orig_cmd = orig_cmd;
1980 res->bufflen = bufsize;
1982 scst_sess_get(res->sess);
1983 if (res->tgt_dev != NULL)
1986 res->state = SCST_CMD_STATE_PRE_PARSE;
1989 TRACE_EXIT_HRES((unsigned long)res);
1993 int scst_prepare_request_sense(struct scst_cmd *orig_cmd)
1996 static const uint8_t request_sense[6] =
1997 { REQUEST_SENSE, 0, 0, 0, SCST_SENSE_BUFFERSIZE, 0 };
1998 struct scst_cmd *rs_cmd;
2002 if (orig_cmd->sense != NULL) {
2003 TRACE_MEM("Releasing sense %p (orig_cmd %p)",
2004 orig_cmd->sense, orig_cmd);
2005 mempool_free(orig_cmd->sense, scst_sense_mempool);
2006 orig_cmd->sense = NULL;
2009 rs_cmd = scst_create_prepare_internal_cmd(orig_cmd,
2010 SCST_SENSE_BUFFERSIZE);
2014 memcpy(rs_cmd->cdb, request_sense, sizeof(request_sense));
2015 rs_cmd->cdb[1] |= scst_get_cmd_dev_d_sense(orig_cmd);
2016 rs_cmd->cdb_len = sizeof(request_sense);
2017 rs_cmd->data_direction = SCST_DATA_READ;
2018 rs_cmd->expected_data_direction = rs_cmd->data_direction;
2019 rs_cmd->expected_transfer_len = SCST_SENSE_BUFFERSIZE;
2020 rs_cmd->expected_values_set = 1;
2022 TRACE(TRACE_MGMT_MINOR, "Adding REQUEST SENSE cmd %p to head of active "
2023 "cmd list", rs_cmd);
2024 spin_lock_irq(&rs_cmd->cmd_lists->cmd_list_lock);
2025 list_add(&rs_cmd->cmd_list_entry, &rs_cmd->cmd_lists->active_cmd_list);
2026 wake_up(&rs_cmd->cmd_lists->cmd_list_waitQ);
2027 spin_unlock_irq(&rs_cmd->cmd_lists->cmd_list_lock);
2030 TRACE_EXIT_RES(res);
2038 static void scst_complete_request_sense(struct scst_cmd *req_cmd)
2040 struct scst_cmd *orig_cmd = req_cmd->orig_cmd;
2046 sBUG_ON(orig_cmd == NULL);
2048 len = scst_get_buf_first(req_cmd, &buf);
2050 if (scsi_status_is_good(req_cmd->status) && (len > 0) &&
2051 SCST_SENSE_VALID(buf) && (!SCST_NO_SENSE(buf))) {
2052 PRINT_BUFF_FLAG(TRACE_SCSI, "REQUEST SENSE returned",
2054 scst_alloc_set_sense(orig_cmd, scst_cmd_atomic(req_cmd), buf,
2057 PRINT_ERROR("%s", "Unable to get the sense via "
2058 "REQUEST SENSE, returning HARDWARE ERROR");
2059 scst_set_cmd_error(orig_cmd,
2060 SCST_LOAD_SENSE(scst_sense_hardw_error));
2064 scst_put_buf(req_cmd, buf);
2066 TRACE(TRACE_MGMT_MINOR, "Adding orig cmd %p to head of active "
2067 "cmd list", orig_cmd);
2068 spin_lock_irq(&orig_cmd->cmd_lists->cmd_list_lock);
2069 list_add(&orig_cmd->cmd_list_entry, &orig_cmd->cmd_lists->active_cmd_list);
2070 wake_up(&orig_cmd->cmd_lists->cmd_list_waitQ);
2071 spin_unlock_irq(&orig_cmd->cmd_lists->cmd_list_lock);
2077 int scst_finish_internal_cmd(struct scst_cmd *cmd)
2083 sBUG_ON(!cmd->internal);
2085 if (cmd->cdb[0] == REQUEST_SENSE)
2086 scst_complete_request_sense(cmd);
2088 __scst_cmd_put(cmd);
2090 res = SCST_CMD_STATE_RES_CONT_NEXT;
2092 TRACE_EXIT_HRES(res);
2096 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
2097 static void scst_req_done(struct scsi_cmnd *scsi_cmd)
2099 struct scsi_request *req;
2104 req = scsi_cmd->sc_request;
2106 if (req->sr_bufflen)
2107 kfree(req->sr_buffer);
2108 scsi_release_request(req);
2116 static void scst_send_release(struct scst_device *dev)
2118 struct scsi_request *req;
2119 struct scsi_device *scsi_dev;
2124 if (dev->scsi_dev == NULL)
2127 scsi_dev = dev->scsi_dev;
2129 req = scsi_allocate_request(scsi_dev, GFP_KERNEL);
2131 PRINT_ERROR("Allocation of scsi_request failed: unable "
2132 "to RELEASE device %d:%d:%d:%d",
2133 scsi_dev->host->host_no, scsi_dev->channel,
2134 scsi_dev->id, scsi_dev->lun);
2138 memset(cdb, 0, sizeof(cdb));
2140 cdb[1] = (scsi_dev->scsi_level <= SCSI_2) ?
2141 ((scsi_dev->lun << 5) & 0xe0) : 0;
2142 memcpy(req->sr_cmnd, cdb, sizeof(cdb));
2143 req->sr_cmd_len = sizeof(cdb);
2144 req->sr_data_direction = SCST_DATA_NONE;
2146 req->sr_bufflen = 0;
2147 req->sr_buffer = NULL;
2148 req->sr_request->rq_disk = dev->rq_disk;
2149 req->sr_sense_buffer[0] = 0;
2151 TRACE(TRACE_DEBUG | TRACE_SCSI, "Sending RELEASE req %p to SCSI "
2153 scst_do_req(req, req->sr_cmnd, (void *)req->sr_buffer, req->sr_bufflen,
2154 scst_req_done, 15, 3);
2160 #else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18) */
2161 static void scst_send_release(struct scst_device *dev)
2163 struct scsi_device *scsi_dev;
2164 unsigned char cdb[6];
2165 uint8_t sense[SCSI_SENSE_BUFFERSIZE];
2170 if (dev->scsi_dev == NULL)
2173 scsi_dev = dev->scsi_dev;
2175 for (i = 0; i < 5; i++) {
2176 memset(cdb, 0, sizeof(cdb));
2178 cdb[1] = (scsi_dev->scsi_level <= SCSI_2) ?
2179 ((scsi_dev->lun << 5) & 0xe0) : 0;
2181 memset(sense, 0, sizeof(sense));
2183 TRACE(TRACE_DEBUG | TRACE_SCSI, "%s", "Sending RELEASE req to "
2185 rc = scsi_execute(scsi_dev, cdb, SCST_DATA_NONE, NULL, 0,
2187 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)
2191 TRACE_DBG("MODE_SENSE done: %x", rc);
2193 if (scsi_status_is_good(rc)) {
2196 PRINT_ERROR("RELEASE failed: %d", rc);
2197 PRINT_BUFFER("RELEASE sense", sense, sizeof(sense));
2198 scst_check_internal_sense(dev, rc, sense,
2207 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18) */
2209 /* scst_mutex supposed to be held */
2210 static void scst_clear_reservation(struct scst_tgt_dev *tgt_dev)
2212 struct scst_device *dev = tgt_dev->dev;
2217 spin_lock_bh(&dev->dev_lock);
2218 if (dev->dev_reserved &&
2219 !test_bit(SCST_TGT_DEV_RESERVED, &tgt_dev->tgt_dev_flags)) {
2220 /* This is one who holds the reservation */
2221 struct scst_tgt_dev *tgt_dev_tmp;
2222 list_for_each_entry(tgt_dev_tmp, &dev->dev_tgt_dev_list,
2223 dev_tgt_dev_list_entry) {
2224 clear_bit(SCST_TGT_DEV_RESERVED,
2225 &tgt_dev_tmp->tgt_dev_flags);
2227 dev->dev_reserved = 0;
2230 spin_unlock_bh(&dev->dev_lock);
2233 scst_send_release(dev);
2239 struct scst_session *scst_alloc_session(struct scst_tgt *tgt, gfp_t gfp_mask,
2240 const char *initiator_name)
2242 struct scst_session *sess;
2249 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
2250 sess = kmem_cache_alloc(scst_sess_cachep, gfp_mask);
2252 sess = kmem_cache_zalloc(scst_sess_cachep, gfp_mask);
2255 TRACE(TRACE_OUT_OF_MEM, "%s",
2256 "Allocation of scst_session failed");
2259 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
2260 memset(sess, 0, sizeof(*sess));
2263 sess->init_phase = SCST_SESS_IPH_INITING;
2264 sess->shut_phase = SCST_SESS_SPH_READY;
2265 atomic_set(&sess->refcnt, 0);
2266 for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
2267 struct list_head *sess_tgt_dev_list_head =
2268 &sess->sess_tgt_dev_list_hash[i];
2269 INIT_LIST_HEAD(sess_tgt_dev_list_head);
2271 spin_lock_init(&sess->sess_list_lock);
2272 INIT_LIST_HEAD(&sess->search_cmd_list);
2273 INIT_LIST_HEAD(&sess->after_pre_xmit_cmd_list);
2275 INIT_LIST_HEAD(&sess->init_deferred_cmd_list);
2276 INIT_LIST_HEAD(&sess->init_deferred_mcmd_list);
2277 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20))
2278 INIT_DELAYED_WORK(&sess->hw_pending_work,
2279 (void (*)(struct work_struct *))scst_hw_pending_work_fn);
2281 INIT_WORK(&sess->hw_pending_work, scst_hw_pending_work_fn, sess);
2284 #ifdef CONFIG_SCST_MEASURE_LATENCY
2285 spin_lock_init(&sess->meas_lock);
2288 len = strlen(initiator_name);
2289 nm = kmalloc(len + 1, gfp_mask);
2291 PRINT_ERROR("%s", "Unable to allocate sess->initiator_name");
2295 strcpy(nm, initiator_name);
2296 sess->initiator_name = nm;
2303 kmem_cache_free(scst_sess_cachep, sess);
2308 void scst_free_session(struct scst_session *sess)
2312 mutex_lock(&scst_mutex);
2314 TRACE_DBG("Removing sess %p from the list", sess);
2315 list_del(&sess->sess_list_entry);
2316 TRACE_DBG("Removing session %p from acg %s", sess, sess->acg->acg_name);
2317 list_del(&sess->acg_sess_list_entry);
2319 scst_sess_free_tgt_devs(sess);
2321 wake_up_all(&sess->tgt->unreg_waitQ);
2323 mutex_unlock(&scst_mutex);
2325 kfree(sess->initiator_name);
2326 kmem_cache_free(scst_sess_cachep, sess);
2332 void scst_free_session_callback(struct scst_session *sess)
2334 struct completion *c;
2338 TRACE_DBG("Freeing session %p", sess);
2340 cancel_delayed_work_sync(&sess->hw_pending_work);
2342 c = sess->shutdown_compl;
2344 if (sess->unreg_done_fn) {
2345 TRACE_DBG("Calling unreg_done_fn(%p)", sess);
2346 sess->unreg_done_fn(sess);
2347 TRACE_DBG("%s", "unreg_done_fn() returned");
2349 scst_free_session(sess);
2358 void scst_sched_session_free(struct scst_session *sess)
2360 unsigned long flags;
2364 if (sess->shut_phase != SCST_SESS_SPH_SHUTDOWN) {
2365 PRINT_CRIT_ERROR("session %p is going to shutdown with unknown "
2366 "shut phase %lx", sess, sess->shut_phase);
2370 spin_lock_irqsave(&scst_mgmt_lock, flags);
2371 TRACE_DBG("Adding sess %p to scst_sess_shut_list", sess);
2372 list_add_tail(&sess->sess_shut_list_entry, &scst_sess_shut_list);
2373 spin_unlock_irqrestore(&scst_mgmt_lock, flags);
2375 wake_up(&scst_mgmt_waitQ);
2381 void scst_cmd_get(struct scst_cmd *cmd)
2383 __scst_cmd_get(cmd);
2385 EXPORT_SYMBOL(scst_cmd_get);
2387 void scst_cmd_put(struct scst_cmd *cmd)
2389 __scst_cmd_put(cmd);
2391 EXPORT_SYMBOL(scst_cmd_put);
2393 struct scst_cmd *scst_alloc_cmd(gfp_t gfp_mask)
2395 struct scst_cmd *cmd;
2399 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
2400 cmd = kmem_cache_alloc(scst_cmd_cachep, gfp_mask);
2402 cmd = kmem_cache_zalloc(scst_cmd_cachep, gfp_mask);
2405 TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of scst_cmd failed");
2408 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
2409 memset(cmd, 0, sizeof(*cmd));
2412 cmd->state = SCST_CMD_STATE_INIT_WAIT;
2413 cmd->start_time = jiffies;
2414 atomic_set(&cmd->cmd_ref, 1);
2415 cmd->cmd_lists = &scst_main_cmd_lists;
2416 INIT_LIST_HEAD(&cmd->mgmt_cmd_list);
2417 cmd->queue_type = SCST_CMD_QUEUE_SIMPLE;
2418 cmd->timeout = SCST_DEFAULT_TIMEOUT;
2421 cmd->is_send_status = 1;
2422 cmd->resp_data_len = -1;
2424 cmd->dbl_ua_orig_data_direction = SCST_DATA_UNKNOWN;
2425 cmd->dbl_ua_orig_resp_data_len = -1;
2432 static void scst_destroy_put_cmd(struct scst_cmd *cmd)
2434 scst_sess_put(cmd->sess);
2437 * At this point tgt_dev can be dead, but the pointer remains non-NULL
2439 if (likely(cmd->tgt_dev != NULL))
2442 scst_destroy_cmd(cmd);
2446 /* No locks supposed to be held */
2447 void scst_free_cmd(struct scst_cmd *cmd)
2453 TRACE_DBG("Freeing cmd %p (tag %llu)",
2454 cmd, (long long unsigned int)cmd->tag);
2456 if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
2457 TRACE_MGMT_DBG("Freeing aborted cmd %p (scst_cmd_count %d)",
2458 cmd, atomic_read(&scst_cmd_count));
2461 sBUG_ON(cmd->inc_blocking || cmd->needs_unblocking ||
2462 cmd->dec_on_dev_needed);
2464 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
2465 #if defined(CONFIG_SCST_EXTRACHECKS)
2466 if (cmd->scsi_req) {
2467 PRINT_ERROR("%s: %s", __func__, "Cmd with unfreed "
2469 scst_release_request(cmd);
2475 * Target driver can already free sg buffer before calling
2476 * scst_tgt_cmd_done(). E.g., scst_local has to do that.
2478 if (!cmd->tgt_data_buf_alloced)
2479 scst_check_restore_sg_buff(cmd);
2481 if (cmd->tgtt->on_free_cmd != NULL) {
2482 TRACE_DBG("Calling target's on_free_cmd(%p)", cmd);
2483 cmd->tgtt->on_free_cmd(cmd);
2484 TRACE_DBG("%s", "Target's on_free_cmd() returned");
2487 if (likely(cmd->dev != NULL)) {
2488 struct scst_dev_type *handler = cmd->dev->handler;
2489 if (handler->on_free_cmd != NULL) {
2490 TRACE_DBG("Calling dev handler %s on_free_cmd(%p)",
2491 handler->name, cmd);
2492 handler->on_free_cmd(cmd);
2493 TRACE_DBG("Dev handler %s on_free_cmd() returned",
2498 scst_release_space(cmd);
2500 if (unlikely(cmd->sense != NULL)) {
2501 TRACE_MEM("Releasing sense %p (cmd %p)", cmd->sense, cmd);
2502 mempool_free(cmd->sense, scst_sense_mempool);
2506 if (likely(cmd->tgt_dev != NULL)) {
2507 #ifdef CONFIG_SCST_EXTRACHECKS
2508 if (unlikely(!cmd->sent_for_exec) && !cmd->internal) {
2509 PRINT_ERROR("Finishing not executed cmd %p (opcode "
2510 "%d, target %s, LUN %lld, sn %ld, expected_sn %ld)",
2511 cmd, cmd->cdb[0], cmd->tgtt->name,
2512 (long long unsigned int)cmd->lun,
2513 cmd->sn, cmd->tgt_dev->expected_sn);
2514 scst_unblock_deferred(cmd->tgt_dev, cmd);
2518 if (unlikely(cmd->out_of_sn)) {
2519 TRACE_SN("Out of SN cmd %p (tag %llu, sn %ld), "
2521 (long long unsigned int)cmd->tag,
2523 destroy = test_and_set_bit(SCST_CMD_CAN_BE_DESTROYED,
2528 if (likely(destroy))
2529 scst_destroy_put_cmd(cmd);
2535 /* No locks supposed to be held. */
2536 void scst_check_retries(struct scst_tgt *tgt)
2538 int need_wake_up = 0;
2543 * We don't worry about overflow of finished_cmds, because we check
2544 * only for its change.
2546 atomic_inc(&tgt->finished_cmds);
2547 /* See comment in scst_queue_retry_cmd() */
2548 smp_mb__after_atomic_inc();
2549 if (unlikely(tgt->retry_cmds > 0)) {
2550 struct scst_cmd *c, *tc;
2551 unsigned long flags;
2553 TRACE_RETRY("Checking retry cmd list (retry_cmds %d)",
2556 spin_lock_irqsave(&tgt->tgt_lock, flags);
2557 list_for_each_entry_safe(c, tc, &tgt->retry_cmd_list,
2561 TRACE_RETRY("Moving retry cmd %p to head of active "
2562 "cmd list (retry_cmds left %d)",
2563 c, tgt->retry_cmds);
2564 spin_lock(&c->cmd_lists->cmd_list_lock);
2565 list_move(&c->cmd_list_entry,
2566 &c->cmd_lists->active_cmd_list);
2567 wake_up(&c->cmd_lists->cmd_list_waitQ);
2568 spin_unlock(&c->cmd_lists->cmd_list_lock);
2571 if (need_wake_up >= 2) /* "slow start" */
2574 spin_unlock_irqrestore(&tgt->tgt_lock, flags);
2581 void scst_tgt_retry_timer_fn(unsigned long arg)
2583 struct scst_tgt *tgt = (struct scst_tgt *)arg;
2584 unsigned long flags;
2586 TRACE_RETRY("Retry timer expired (retry_cmds %d)", tgt->retry_cmds);
2588 spin_lock_irqsave(&tgt->tgt_lock, flags);
2589 tgt->retry_timer_active = 0;
2590 spin_unlock_irqrestore(&tgt->tgt_lock, flags);
2592 scst_check_retries(tgt);
2598 struct scst_mgmt_cmd *scst_alloc_mgmt_cmd(gfp_t gfp_mask)
2600 struct scst_mgmt_cmd *mcmd;
2604 mcmd = mempool_alloc(scst_mgmt_mempool, gfp_mask);
2606 PRINT_CRIT_ERROR("%s", "Allocation of management command "
2607 "failed, some commands and their data could leak");
2610 memset(mcmd, 0, sizeof(*mcmd));
2617 void scst_free_mgmt_cmd(struct scst_mgmt_cmd *mcmd)
2619 unsigned long flags;
2623 spin_lock_irqsave(&mcmd->sess->sess_list_lock, flags);
2624 atomic_dec(&mcmd->sess->sess_cmd_count);
2625 spin_unlock_irqrestore(&mcmd->sess->sess_list_lock, flags);
2627 scst_sess_put(mcmd->sess);
2629 if (mcmd->mcmd_tgt_dev != NULL)
2632 mempool_free(mcmd, scst_mgmt_mempool);
2638 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
2639 int scst_alloc_request(struct scst_cmd *cmd)
2642 struct scsi_request *req;
2643 int gm = scst_cmd_atomic(cmd) ? GFP_ATOMIC : GFP_KERNEL;
2647 /* cmd->dev->scsi_dev must be non-NULL here */
2648 req = scsi_allocate_request(cmd->dev->scsi_dev, gm);
2650 TRACE(TRACE_OUT_OF_MEM, "%s",
2651 "Allocation of scsi_request failed");
2656 cmd->scsi_req = req;
2658 memcpy(req->sr_cmnd, cmd->cdb, cmd->cdb_len);
2659 req->sr_cmd_len = cmd->cdb_len;
2660 req->sr_data_direction = cmd->data_direction;
2661 req->sr_use_sg = cmd->sg_cnt;
2662 req->sr_bufflen = cmd->bufflen;
2663 req->sr_buffer = cmd->sg;
2664 req->sr_request->rq_disk = cmd->dev->rq_disk;
2665 req->sr_sense_buffer[0] = 0;
2667 cmd->scsi_req->upper_private_data = cmd;
2674 void scst_release_request(struct scst_cmd *cmd)
2676 scsi_release_request(cmd->scsi_req);
2677 cmd->scsi_req = NULL;
2681 static bool is_report_sg_limitation(void)
2683 #if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
2684 return (trace_flag & TRACE_OUT_OF_MEM) != 0;
2690 int scst_alloc_space(struct scst_cmd *cmd)
2694 int atomic = scst_cmd_atomic(cmd);
2696 struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
2701 gfp_mask = tgt_dev->gfp_mask | (atomic ? GFP_ATOMIC : GFP_KERNEL);
2703 flags = atomic ? SGV_POOL_NO_ALLOC_ON_CACHE_MISS : 0;
2705 flags |= SGV_POOL_ALLOC_NO_CACHED;
2707 cmd->sg = sgv_pool_alloc(tgt_dev->pool, cmd->bufflen, gfp_mask, flags,
2708 &cmd->sg_cnt, &cmd->sgv, &cmd->dev->dev_mem_lim, NULL);
2709 if (cmd->sg == NULL)
2712 if (unlikely(cmd->sg_cnt > tgt_dev->max_sg_cnt)) {
2713 if ((ll < 10) || is_report_sg_limitation()) {
2714 PRINT_INFO("Unable to complete command due to "
2715 "SG IO count limitation (requested %d, "
2716 "available %d, tgt lim %d)", cmd->sg_cnt,
2717 tgt_dev->max_sg_cnt, cmd->tgt->sg_tablesize);
2723 if (cmd->data_direction != SCST_DATA_BIDI)
2726 cmd->in_sg = sgv_pool_alloc(tgt_dev->pool, cmd->in_bufflen, gfp_mask,
2727 flags, &cmd->in_sg_cnt, &cmd->in_sgv,
2728 &cmd->dev->dev_mem_lim, NULL);
2729 if (cmd->in_sg == NULL)
2732 if (unlikely(cmd->in_sg_cnt > tgt_dev->max_sg_cnt)) {
2733 if ((ll < 10) || is_report_sg_limitation()) {
2734 PRINT_INFO("Unable to complete command due to "
2735 "SG IO count limitation (IN buffer, requested "
2736 "%d, available %d, tgt lim %d)", cmd->in_sg_cnt,
2737 tgt_dev->max_sg_cnt, cmd->tgt->sg_tablesize);
2740 goto out_in_sg_free;
2751 sgv_pool_free(cmd->in_sgv, &cmd->dev->dev_mem_lim);
2757 sgv_pool_free(cmd->sgv, &cmd->dev->dev_mem_lim);
2764 static void scst_release_space(struct scst_cmd *cmd)
2768 if (cmd->sgv == NULL)
2771 if (cmd->tgt_data_buf_alloced || cmd->dh_data_buf_alloced) {
2772 TRACE_MEM("%s", "*data_buf_alloced set, returning");
2776 sgv_pool_free(cmd->sgv, &cmd->dev->dev_mem_lim);
2783 if (cmd->in_sgv != NULL) {
2784 sgv_pool_free(cmd->in_sgv, &cmd->dev->dev_mem_lim);
2788 cmd->in_bufflen = 0;
2796 #if !((LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30)) && defined(SCSI_EXEC_REQ_FIFO_DEFINED))
2799 * Can switch to the next dst_sg element, so, to copy to strictly only
2800 * one dst_sg element, it must be either last in the chain, or
2801 * copy_len == dst_sg->length.
2803 static int sg_copy_elem(struct scatterlist **pdst_sg, size_t *pdst_len,
2804 size_t *pdst_offs, struct scatterlist *src_sg,
2806 enum km_type d_km_type, enum km_type s_km_type)
2809 struct scatterlist *dst_sg;
2810 size_t src_len, dst_len, src_offs, dst_offs;
2811 struct page *src_page, *dst_page;
2814 dst_len = *pdst_len;
2815 dst_offs = *pdst_offs;
2816 dst_page = sg_page(dst_sg);
2818 src_page = sg_page(src_sg);
2819 src_len = src_sg->length;
2820 src_offs = src_sg->offset;
2823 void *saddr, *daddr;
2826 saddr = kmap_atomic(src_page +
2827 (src_offs >> PAGE_SHIFT), s_km_type) +
2828 (src_offs & ~PAGE_MASK);
2829 daddr = kmap_atomic(dst_page +
2830 (dst_offs >> PAGE_SHIFT), d_km_type) +
2831 (dst_offs & ~PAGE_MASK);
2833 if (((src_offs & ~PAGE_MASK) == 0) &&
2834 ((dst_offs & ~PAGE_MASK) == 0) &&
2835 (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE) &&
2836 (copy_len >= PAGE_SIZE)) {
2837 copy_page(daddr, saddr);
2840 n = min_t(size_t, PAGE_SIZE - (dst_offs & ~PAGE_MASK),
2841 PAGE_SIZE - (src_offs & ~PAGE_MASK));
2842 n = min(n, src_len);
2843 n = min(n, dst_len);
2844 n = min_t(size_t, n, copy_len);
2845 memcpy(daddr, saddr, n);
2850 kunmap_atomic(saddr, s_km_type);
2851 kunmap_atomic(daddr, d_km_type);
2861 dst_sg = sg_next(dst_sg);
2864 dst_page = sg_page(dst_sg);
2865 dst_len = dst_sg->length;
2866 dst_offs = dst_sg->offset;
2868 } while (src_len > 0);
2872 *pdst_len = dst_len;
2873 *pdst_offs = dst_offs;
2878 * sg_copy - copy one SG vector to another
2879 * @dst_sg: destination SG
2880 * @src_sg: source SG
2881 * @nents_to_copy: maximum number of entries to copy
2882 * @copy_len: maximum amount of data to copy. If 0, then copy all.
2883 * @d_km_type: kmap_atomic type for the destination SG
2884 * @s_km_type: kmap_atomic type for the source SG
2887 * Data from the source SG vector will be copied to the destination SG
2888 * vector. End of the vectors will be determined by sg_next() returning
2889 * NULL. Returns number of bytes copied.
2891 static int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
2892 int nents_to_copy, size_t copy_len,
2893 enum km_type d_km_type, enum km_type s_km_type)
2896 size_t dst_len, dst_offs;
2899 copy_len = 0x7FFFFFFF; /* copy all */
2901 if (nents_to_copy == 0)
2902 nents_to_copy = 0x7FFFFFFF; /* copy all */
2904 dst_len = dst_sg->length;
2905 dst_offs = dst_sg->offset;
2908 int copied = sg_copy_elem(&dst_sg, &dst_len, &dst_offs,
2909 src_sg, copy_len, d_km_type, s_km_type);
2912 if ((copy_len == 0) || (dst_sg == NULL))
2916 if (nents_to_copy == 0)
2919 src_sg = sg_next(src_sg);
2920 } while (src_sg != NULL);
2926 #endif /* !((LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30)) && defined(SCSI_EXEC_REQ_FIFO_DEFINED)) */
2928 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 26) && !((LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30)) && defined(SCSI_EXEC_REQ_FIFO_DEFINED))
2930 #include <linux/pfn.h>
2932 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27)
2933 static inline int object_is_on_stack(void *obj)
2935 void *stack = task_stack_page(current);
2937 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
2941 struct blk_kern_sg_work {
2942 atomic_t bios_inflight;
2943 struct sg_table sg_table;
2944 struct scatterlist *src_sgl;
2947 static void blk_rq_unmap_kern_sg(struct request *rq, int err);
2949 static void blk_free_kern_sg_work(struct blk_kern_sg_work *bw)
2951 TRACE_DBG("Freeing bw %p", bw);
2952 sg_free_table(&bw->sg_table);
2957 static void blk_bio_map_kern_endio(struct bio *bio, int err)
2959 struct blk_kern_sg_work *bw = bio->bi_private;
2961 TRACE_DBG("bio %p finished", bio);
2964 /* Decrement the bios in processing and, if zero, free */
2965 BUG_ON(atomic_read(&bw->bios_inflight) <= 0);
2966 if (atomic_dec_and_test(&bw->bios_inflight)) {
2967 TRACE_DBG("sgl %p, new_sgl %p, new_sgl_nents %d",
2968 bw->src_sgl, bw->sg_table.sgl,
2969 bw->sg_table.nents);
2970 if ((bio_data_dir(bio) == READ) && (err == 0)) {
2971 unsigned long flags;
2973 TRACE_DBG("Copying sgl %p (nents %d) to "
2974 "orig_sgl %p", bw->sg_table.sgl,
2975 bw->sg_table.nents, bw->src_sgl);
2977 local_irq_save(flags); /* to protect KMs */
2978 sg_copy(bw->src_sgl, bw->sg_table.sgl, 0, 0,
2979 KM_BIO_DST_IRQ, KM_BIO_SRC_IRQ);
2980 local_irq_restore(flags);
2982 blk_free_kern_sg_work(bw);
2990 static int blk_rq_copy_kern_sg(struct request *rq, struct scatterlist *sgl,
2991 int nents, struct blk_kern_sg_work **pbw,
2992 gfp_t gfp, gfp_t page_gfp)
2995 struct scatterlist *sg;
2996 struct scatterlist *new_sgl;
2998 size_t len = 0, to_copy;
2999 struct blk_kern_sg_work *bw;
3001 bw = kzalloc(sizeof(*bw), gfp);
3003 PRINT_ERROR("%s", "Unable to alloc blk_kern_sg_work");
3009 for_each_sg(sgl, sg, nents, i)
3013 new_sgl_nents = PFN_UP(len);
3015 res = sg_alloc_table(&bw->sg_table, new_sgl_nents, gfp);
3017 PRINT_ERROR("Unable to alloc copy sg table (nents %d)",
3022 new_sgl = bw->sg_table.sgl;
3024 TRACE_DBG("sgl %p, nents %d, to_copy %lld, new_sgl %p, new_sgl_nents %d",
3025 sgl, nents, (long long)to_copy, new_sgl, new_sgl_nents);
3027 for_each_sg(new_sgl, sg, new_sgl_nents, i) {
3030 pg = alloc_page(page_gfp);
3032 PRINT_ERROR("Unable to alloc copy page (left %lld)",
3034 goto err_free_new_sgl;
3037 sg_assign_page(sg, pg);
3038 sg->length = min_t(size_t, PAGE_SIZE, len);
3043 if (rq_data_dir(rq) == WRITE) {
3045 * We need to limit amount of copied data to to_copy, because
3046 * sgl might have the last element in sgl not marked as last in
3049 TRACE_DBG("Copying sgl %p (nents %d) to new_sgl %p "
3050 "(new_sgl_nents %d), to_copy %lld", sgl, nents,
3051 new_sgl, new_sgl_nents, (long long)to_copy);
3052 sg_copy(new_sgl, sgl, 0, to_copy,
3053 KM_USER0, KM_USER1);
3058 * REQ_COPY_USER name is misleading. It should be something like
3059 * REQ_HAS_TAIL_SPACE_FOR_PADDING.
3061 rq->cmd_flags |= REQ_COPY_USER;
3067 for_each_sg(new_sgl, sg, new_sgl_nents, i) {
3068 struct page *pg = sg_page(sg);
3073 sg_free_table(&bw->sg_table);
3081 static int __blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
3082 int nents, struct blk_kern_sg_work *bw, gfp_t gfp)
3085 struct request_queue *q = rq->q;
3086 int rw = rq_data_dir(rq);
3090 struct scatterlist *sg, *prev_sg = NULL;
3091 struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
3094 if (unlikely((sgl == NULL) || (sgl->length == 0) || (nents <= 0))) {
3101 * Let's keep each bio allocation inside a single page to decrease
3102 * probability of failure.
3104 max_nr_vecs = min_t(size_t,
3105 ((PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec)),
3108 TRACE_DBG("sgl %p, nents %d, bw %p, max_nr_vecs %d", sgl, nents, bw,
3111 need_new_bio = true;
3114 for_each_sg(sgl, sg, nents, i) {
3115 struct page *page = sg_page(sg);
3116 void *page_addr = page_address(page);
3117 size_t len = sg->length, l;
3118 size_t offset = sg->offset;
3124 * Each segment must be aligned on DMA boundary and
3125 * not on stack. The last one may have unaligned
3126 * length as long as the total length is aligned to
3127 * DMA padding alignment.
3133 if (((sg->offset | l) & queue_dma_alignment(q)) ||
3134 (page_addr && object_is_on_stack(page_addr + sg->offset))) {
3135 TRACE_DBG("%s", "DMA alignment or offset don't match");
3145 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30)
3146 bio = bio_kmalloc(gfp, max_nr_vecs);
3148 bio = bio_alloc(gfp, max_nr_vecs);
3151 PRINT_ERROR("%s", "Can't to alloc bio");
3156 TRACE_DBG("bio %p alloced", bio);
3159 bio->bi_rw |= 1 << BIO_RW;
3162 bio->bi_private = bw;
3163 bio->bi_end_io = blk_bio_map_kern_endio;
3168 tbio = tbio->bi_next = bio;
3171 bytes = min_t(size_t, len, PAGE_SIZE - offset);
3173 rc = bio_add_pc_page(q, bio, page, bytes, offset);
3175 if (unlikely(need_new_bio || (rc < 0))) {
3180 PRINT_ERROR("bio_add_pc_page() failed: "
3184 need_new_bio = true;
3191 need_new_bio = false;
3194 page = nth_page(page, 1);
3203 /* Total length must be aligned on DMA padding alignment */
3204 if ((tot_len & q->dma_pad_mask) &&
3205 !(rq->cmd_flags & REQ_COPY_USER)) {
3206 TRACE_DBG("Total len %lld doesn't match DMA pad mask %x",
3207 (long long)tot_len, q->dma_pad_mask);
3213 atomic_set(&bw->bios_inflight, bios);
3215 while (hbio != NULL) {
3217 hbio = hbio->bi_next;
3218 bio->bi_next = NULL;
3220 blk_queue_bounce(q, &bio);
3222 res = blk_rq_append_bio(q, rq, bio);
3223 if (unlikely(res != 0)) {
3224 PRINT_ERROR("blk_rq_append_bio() failed: %d", res);
3225 bio->bi_next = hbio;
3227 /* We can have one or more bios bounced */
3228 goto out_unmap_bios;
3232 rq->buffer = rq->data = NULL;
3237 while (hbio != NULL) {
3239 hbio = hbio->bi_next;
3245 blk_rq_unmap_kern_sg(rq, res);
3250 * blk_rq_map_kern_sg - map kernel data to a request, for REQ_TYPE_BLOCK_PC
3251 * @rq: request to fill
3253 * @nents: number of elements in @sgl
3254 * @gfp: memory allocation flags
3257 * Data will be mapped directly if possible. Otherwise a bounce
3258 * buffer will be used.
3260 static int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
3261 int nents, gfp_t gfp)
3265 res = __blk_rq_map_kern_sg(rq, sgl, nents, NULL, gfp);
3266 if (unlikely(res != 0)) {
3267 struct blk_kern_sg_work *bw = NULL;
3269 TRACE_DBG("__blk_rq_map_kern_sg() failed: %d", res);
3271 res = blk_rq_copy_kern_sg(rq, sgl, nents, &bw,
3272 gfp, rq->q->bounce_gfp | gfp);
3273 if (unlikely(res != 0))
3276 res = __blk_rq_map_kern_sg(rq, bw->sg_table.sgl,
3277 bw->sg_table.nents, bw, gfp);
3279 TRACE_DBG("Copied __blk_rq_map_kern_sg() failed: %d",
3281 blk_free_kern_sg_work(bw);
3286 rq->buffer = rq->data = NULL;
3293 * blk_rq_unmap_kern_sg - unmap a request with kernel sg
3294 * @rq: request to unmap
3295 * @err: non-zero error code
3298 * Unmap a rq previously mapped by blk_rq_map_kern_sg(). Must be called
3299 * only in case of an error!
3301 static void blk_rq_unmap_kern_sg(struct request *rq, int err)
3303 struct bio *bio = rq->bio;
3306 struct bio *b = bio;
3308 b->bi_end_io(b, err);
3315 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 26) && !(LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30) && defined(SCSI_EXEC_REQ_FIFO_DEFINED)) */
3317 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 26)
3318 static void scsi_end_async(struct request *req, int error)
3320 struct scsi_io_context *sioc = req->end_io_data;
3322 TRACE_DBG("sioc %p, cmd %p", sioc, sioc->data);
3325 sioc->done(sioc->data, sioc->sense, req->errors, req->data_len);
3327 if (!sioc->full_cdb_used)
3328 kmem_cache_free(scsi_io_context_cache, sioc);
3332 __blk_put_request(req->q, req);
3337 * scst_scsi_exec_async - executes a SCSI command in pass-through mode
3338 * @cmd: scst command
3339 * @done: callback function when done
3341 int scst_scsi_exec_async(struct scst_cmd *cmd,
3342 void (*done)(void *, char *, int, int))
3345 struct request_queue *q = cmd->dev->scsi_dev->request_queue;
3347 struct scsi_io_context *sioc;
3348 int write = (cmd->data_direction & SCST_DATA_WRITE) ? WRITE : READ;
3349 gfp_t gfp = scst_cmd_atomic(cmd) ? GFP_ATOMIC : GFP_KERNEL;
3350 int cmd_len = cmd->cdb_len;
3352 if (cmd->ext_cdb_len == 0) {
3353 TRACE_DBG("Simple CDB (cmd_len %d)", cmd_len);
3354 sioc = kmem_cache_zalloc(scsi_io_context_cache, gfp);
3360 cmd_len += cmd->ext_cdb_len;
3362 TRACE_DBG("Extended CDB (cmd_len %d)", cmd_len);
3364 sioc = kzalloc(sizeof(*sioc) + cmd_len, gfp);
3370 sioc->full_cdb_used = 1;
3372 memcpy(sioc->full_cdb, cmd->cdb, cmd->cdb_len);
3373 memcpy(&sioc->full_cdb[cmd->cdb_len], cmd->ext_cdb,
3377 rq = blk_get_request(q, write, gfp);
3383 rq->cmd_type = REQ_TYPE_BLOCK_PC;
3384 rq->cmd_flags |= REQ_QUIET;
3386 if (cmd->sg != NULL) {
3387 res = blk_rq_map_kern_sg(rq, cmd->sg, cmd->sg_cnt, gfp);
3389 TRACE_DBG("blk_rq_map_kern_sg() failed: %d", res);
3394 if (cmd->data_direction == SCST_DATA_BIDI) {
3395 struct request *next_rq;
3397 if (!test_bit(QUEUE_FLAG_BIDI, &q->queue_flags)) {
3399 goto out_free_unmap;
3402 next_rq = blk_get_request(q, READ, gfp);
3403 if (next_rq == NULL) {
3405 goto out_free_unmap;
3407 rq->next_rq = next_rq;
3408 next_rq->cmd_type = rq->cmd_type;
3410 res = blk_rq_map_kern_sg(next_rq, cmd->in_sg,
3411 cmd->in_sg_cnt, gfp);
3413 goto out_free_unmap;
3416 TRACE_DBG("sioc %p, cmd %p", sioc, cmd);
3421 rq->cmd_len = cmd_len;
3422 if (cmd->ext_cdb_len == 0) {
3423 memset(rq->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */
3424 memcpy(rq->cmd, cmd->cdb, cmd->cdb_len);
3426 rq->cmd = sioc->full_cdb;
3428 rq->sense = sioc->sense;
3429 rq->sense_len = sizeof(sioc->sense);
3430 rq->timeout = cmd->timeout;
3431 rq->retries = cmd->retries;
3432 rq->end_io_data = sioc;
3434 blk_execute_rq_nowait(rq->q, NULL, rq,
3435 (cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE), scsi_end_async);
3440 if (rq->next_rq != NULL) {
3441 blk_put_request(rq->next_rq);
3444 blk_rq_unmap_kern_sg(rq, res);
3447 blk_put_request(rq);
3450 if (!sioc->full_cdb_used)
3451 kmem_cache_free(scsi_io_context_cache, sioc);
3457 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 26) */
3459 void scst_copy_sg(struct scst_cmd *cmd, enum scst_sg_copy_dir copy_dir)
3461 struct scatterlist *src_sg, *dst_sg;
3462 unsigned int to_copy;
3463 int atomic = scst_cmd_atomic(cmd);
3467 if (copy_dir == SCST_SG_COPY_FROM_TARGET) {
3468 if (cmd->data_direction != SCST_DATA_BIDI) {
3469 src_sg = cmd->tgt_sg;
3471 to_copy = cmd->bufflen;
3473 TRACE_MEM("BIDI cmd %p", cmd);
3474 src_sg = cmd->tgt_in_sg;
3475 dst_sg = cmd->in_sg;
3476 to_copy = cmd->in_bufflen;
3480 dst_sg = cmd->tgt_sg;
3481 to_copy = cmd->resp_data_len;
3484 TRACE_MEM("cmd %p, copy_dir %d, src_sg %p, dst_sg %p, to_copy %lld",
3485 cmd, copy_dir, src_sg, dst_sg, (long long)to_copy);
3487 if (unlikely(src_sg == NULL) || unlikely(dst_sg == NULL)) {
3489 * It can happened, e.g., with scst_user for cmd with delay
3490 * alloc, which failed with Check Condition.
3495 sg_copy(dst_sg, src_sg, 0, to_copy,
3496 atomic ? KM_SOFTIRQ0 : KM_USER0,
3497 atomic ? KM_SOFTIRQ1 : KM_USER1);
3504 static const int SCST_CDB_LENGTH[8] = { 6, 10, 10, -1, 16, 12, -1, -1 };
3506 #define SCST_CDB_GROUP(opcode) ((opcode >> 5) & 0x7)
3507 #define SCST_GET_CDB_LEN(opcode) SCST_CDB_LENGTH[SCST_CDB_GROUP(opcode)]
3509 int scst_get_cdb_len(const uint8_t *cdb)
3511 return SCST_GET_CDB_LEN(cdb[0]);
3514 /* get_trans_len_x extract x bytes from cdb as length starting from off */
3516 static int get_trans_cdb_len_10(struct scst_cmd *cmd, uint8_t off)
3523 static int get_trans_len_block_limit(struct scst_cmd *cmd, uint8_t off)
3529 static int get_trans_len_read_capacity(struct scst_cmd *cmd, uint8_t off)
3531 cmd->bufflen = READ_CAP_LEN;
3535 static int get_trans_len_serv_act_in(struct scst_cmd *cmd, uint8_t off)
3541 if ((cmd->cdb[1] & 0x1f) == SAI_READ_CAPACITY_16) {
3542 cmd->op_name = "READ CAPACITY(16)";
3543 cmd->bufflen = READ_CAP16_LEN;
3544 cmd->op_flags |= SCST_IMPLICIT_HQ;
3546 cmd->op_flags |= SCST_UNKNOWN_LENGTH;
3548 TRACE_EXIT_RES(res);
3552 static int get_trans_len_single(struct scst_cmd *cmd, uint8_t off)
3558 static int get_trans_len_read_pos(struct scst_cmd *cmd, uint8_t off)
3560 uint8_t *p = (uint8_t *)cmd->cdb + off;
3564 cmd->bufflen |= ((u32)p[0]) << 8;
3565 cmd->bufflen |= ((u32)p[1]);
3567 switch (cmd->cdb[1] & 0x1f) {
3571 if (cmd->bufflen != 0) {
3572 PRINT_ERROR("READ POSITION: Invalid non-zero (%d) "
3573 "allocation length for service action %x",
3574 cmd->bufflen, cmd->cdb[1] & 0x1f);
3580 switch (cmd->cdb[1] & 0x1f) {
3589 cmd->bufflen = max(28, cmd->bufflen);
3592 PRINT_ERROR("READ POSITION: Invalid service action %x",
3593 cmd->cdb[1] & 0x1f);
3601 scst_set_cmd_error(cmd,
3602 SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
3607 static int get_trans_len_1(struct scst_cmd *cmd, uint8_t off)
3609 cmd->bufflen = (u32)cmd->cdb[off];
3613 static int get_trans_len_1_256(struct scst_cmd *cmd, uint8_t off)
3615 cmd->bufflen = (u32)cmd->cdb[off];
3616 if (cmd->bufflen == 0)
3621 static int get_trans_len_2(struct scst_cmd *cmd, uint8_t off)
3623 const uint8_t *p = cmd->cdb + off;
3626 cmd->bufflen |= ((u32)p[0]) << 8;
3627 cmd->bufflen |= ((u32)p[1]);
3632 static int get_trans_len_3(struct scst_cmd *cmd, uint8_t off)
3634 const uint8_t *p = cmd->cdb + off;
3637 cmd->bufflen |= ((u32)p[0]) << 16;
3638 cmd->bufflen |= ((u32)p[1]) << 8;
3639 cmd->bufflen |= ((u32)p[2]);
3644 static int get_trans_len_4(struct scst_cmd *cmd, uint8_t off)
3646 const uint8_t *p = cmd->cdb + off;
3649 cmd->bufflen |= ((u32)p[0]) << 24;
3650 cmd->bufflen |= ((u32)p[1]) << 16;
3651 cmd->bufflen |= ((u32)p[2]) << 8;
3652 cmd->bufflen |= ((u32)p[3]);
3657 static int get_trans_len_none(struct scst_cmd *cmd, uint8_t off)
3663 int scst_get_cdb_info(struct scst_cmd *cmd)
3665 int dev_type = cmd->dev->type;
3668 const struct scst_sdbops *ptr = NULL;
3672 op = cmd->cdb[0]; /* get clear opcode */
3674 TRACE_DBG("opcode=%02x, cdblen=%d bytes, tblsize=%d, "
3675 "dev_type=%d", op, SCST_GET_CDB_LEN(op), SCST_CDB_TBL_SIZE,
3678 i = scst_scsi_op_list[op];
3679 while (i < SCST_CDB_TBL_SIZE && scst_scsi_op_table[i].ops == op) {
3680 if (scst_scsi_op_table[i].devkey[dev_type] != SCST_CDB_NOTSUPP) {
3681 ptr = &scst_scsi_op_table[i];
3682 TRACE_DBG("op = 0x%02x+'%c%c%c%c%c%c%c%c%c%c'+<%s>",
3683 ptr->ops, ptr->devkey[0], /* disk */
3684 ptr->devkey[1], /* tape */
3685 ptr->devkey[2], /* printer */
3686 ptr->devkey[3], /* cpu */
3687 ptr->devkey[4], /* cdr */
3688 ptr->devkey[5], /* cdrom */
3689 ptr->devkey[6], /* scanner */
3690 ptr->devkey[7], /* worm */
3691 ptr->devkey[8], /* changer */
3692 ptr->devkey[9], /* commdev */
3694 TRACE_DBG("direction=%d flags=%d off=%d",
3703 if (unlikely(ptr == NULL)) {
3704 /* opcode not found or now not used !!! */
3705 TRACE(TRACE_SCSI, "Unknown opcode 0x%x for type %d", op,
3708 cmd->op_flags = SCST_INFO_NOT_FOUND;
3712 cmd->cdb_len = SCST_GET_CDB_LEN(op);
3713 cmd->op_name = ptr->op_name;
3714 cmd->data_direction = ptr->direction;
3715 cmd->op_flags = ptr->flags;
3716 res = (*ptr->get_trans_len)(cmd, ptr->off);
3719 TRACE_EXIT_RES(res);
3722 EXPORT_SYMBOL(scst_get_cdb_info);
3724 /* Packs SCST LUN back to SCSI form using peripheral device addressing method */
3725 uint64_t scst_pack_lun(const uint64_t lun)
3728 uint16_t *p = (uint16_t *)&res;
3731 *p = cpu_to_be16(*p);
3733 TRACE_EXIT_HRES((unsigned long)res);
3738 * Routine to extract a lun number from an 8-byte LUN structure
3739 * in network byte order (BE).
3740 * (see SAM-2, Section 4.12.3 page 40)
3741 * Supports 2 types of lun unpacking: peripheral and logical unit.
3743 uint64_t scst_unpack_lun(const uint8_t *lun, int len)
3745 uint64_t res = NO_SUCH_LUN;
3750 TRACE_BUFF_FLAG(TRACE_DEBUG, "Raw LUN", lun, len);
3752 if (unlikely(len < 2)) {
3753 PRINT_ERROR("Illegal lun length %d, expected 2 bytes or "
3761 if ((*((uint64_t *)lun) &
3762 __constant_cpu_to_be64(0x0000FFFFFFFFFFFFLL)) != 0)
3766 if (*((uint16_t *)&lun[2]) != 0)
3770 if (*((uint32_t *)&lun[2]) != 0)
3778 address_method = (*lun) >> 6; /* high 2 bits of byte 0 */
3779 switch (address_method) {
3780 case 0: /* peripheral device addressing method */
3783 PRINT_ERROR("Illegal BUS INDENTIFIER in LUN "
3784 "peripheral device addressing method 0x%02x, "
3785 "expected 0", *lun);
3792 * Looks like it's legal to use it as flat space addressing
3799 case 1: /* flat space addressing method */
3800 res = *(lun + 1) | (((*lun) & 0x3f) << 8);
3803 case 2: /* logical unit addressing method */
3805 PRINT_ERROR("Illegal BUS NUMBER in LUN logical unit "
3806 "addressing method 0x%02x, expected 0",
3810 if (*(lun + 1) & 0xe0) {