4 * Copyright (C) 2004 - 2009 Vladislav Bolkhovitin <vst@vlnb.net>
5 * Copyright (C) 2004 - 2005 Leonid Stoljar
6 * Copyright (C) 2007 - 2009 ID7 Ltd.
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation, version 2
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
19 #include <linux/init.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/list.h>
23 #include <linux/spinlock.h>
24 #include <linux/slab.h>
25 #include <linux/sched.h>
26 #include <linux/kthread.h>
27 #include <linux/cdrom.h>
28 #include <linux/unistd.h>
29 #include <linux/string.h>
30 #include <asm/kmap_types.h>
33 #include "scst_priv.h"
36 #include "scst_cdbprobe.h"
38 static void scst_free_tgt_dev(struct scst_tgt_dev *tgt_dev);
39 static void scst_check_internal_sense(struct scst_device *dev, int result,
40 uint8_t *sense, int sense_len);
41 static void scst_queue_report_luns_changed_UA(struct scst_session *sess,
43 static void __scst_check_set_UA(struct scst_tgt_dev *tgt_dev,
44 const uint8_t *sense, int sense_len, int flags);
45 static void scst_alloc_set_UA(struct scst_tgt_dev *tgt_dev,
46 const uint8_t *sense, int sense_len, int flags);
47 static void scst_free_all_UA(struct scst_tgt_dev *tgt_dev);
48 static void scst_release_space(struct scst_cmd *cmd);
49 static void scst_sess_free_tgt_devs(struct scst_session *sess);
50 static void scst_unblock_cmds(struct scst_device *dev);
51 static void scst_clear_reservation(struct scst_tgt_dev *tgt_dev);
52 static struct scst_tgt_dev *scst_alloc_add_tgt_dev(struct scst_session *sess,
53 struct scst_acg_dev *acg_dev);
55 #ifdef CONFIG_SCST_DEBUG_TM
56 static void tm_dbg_init_tgt_dev(struct scst_tgt_dev *tgt_dev,
57 struct scst_acg_dev *acg_dev);
58 static void tm_dbg_deinit_tgt_dev(struct scst_tgt_dev *tgt_dev);
60 static inline void tm_dbg_init_tgt_dev(struct scst_tgt_dev *tgt_dev,
61 struct scst_acg_dev *acg_dev) {}
62 static inline void tm_dbg_deinit_tgt_dev(struct scst_tgt_dev *tgt_dev) {}
63 #endif /* CONFIG_SCST_DEBUG_TM */
65 int scst_alloc_sense(struct scst_cmd *cmd, int atomic)
68 gfp_t gfp_mask = atomic ? GFP_ATOMIC : (GFP_KERNEL|__GFP_NOFAIL);
72 if (cmd->sense != NULL)
75 cmd->sense = mempool_alloc(scst_sense_mempool, gfp_mask);
76 if (cmd->sense == NULL) {
77 PRINT_CRIT_ERROR("Sense memory allocation failed (op %x). "
78 "The sense data will be lost!!", cmd->cdb[0]);
84 cmd->sense_bufflen = SCST_SENSE_BUFFERSIZE;
85 memset(cmd->sense, 0, SCST_SENSE_BUFFERSIZE);
91 EXPORT_SYMBOL(scst_alloc_sense);
93 int scst_alloc_set_sense(struct scst_cmd *cmd, int atomic,
94 const uint8_t *sense, unsigned int len)
100 res = scst_alloc_sense(cmd, atomic);
102 PRINT_BUFFER("Lost sense", sense, len);
106 memcpy(cmd->sense, sense, min((int)len, (int)cmd->sense_bufflen));
107 TRACE_BUFFER("Sense set", cmd->sense, cmd->sense_bufflen);
113 EXPORT_SYMBOL(scst_alloc_set_sense);
115 void scst_set_cmd_error_status(struct scst_cmd *cmd, int status)
119 cmd->status = status;
120 cmd->host_status = DID_OK;
122 cmd->dbl_ua_orig_resp_data_len = cmd->resp_data_len;
123 cmd->dbl_ua_orig_data_direction = cmd->data_direction;
125 cmd->data_direction = SCST_DATA_NONE;
126 cmd->resp_data_len = 0;
127 cmd->is_send_status = 1;
134 EXPORT_SYMBOL(scst_set_cmd_error_status);
136 void scst_set_cmd_error(struct scst_cmd *cmd, int key, int asc, int ascq)
142 scst_set_cmd_error_status(cmd, SAM_STAT_CHECK_CONDITION);
144 rc = scst_alloc_sense(cmd, 1);
146 PRINT_ERROR("Lost sense data (key %x, asc %x, ascq %x)",
151 scst_set_sense(cmd->sense, cmd->sense_bufflen,
152 scst_get_cmd_dev_d_sense(cmd), key, asc, ascq);
153 TRACE_BUFFER("Sense set", cmd->sense, cmd->sense_bufflen);
159 EXPORT_SYMBOL(scst_set_cmd_error);
161 void scst_set_sense(uint8_t *buffer, int len, bool d_sense,
162 int key, int asc, int ascq)
166 memset(buffer, 0, len);
169 /* Descriptor format */
171 PRINT_ERROR("Length %d of sense buffer too small to "
172 "fit sense %x:%x:%x", len, key, asc, ascq);
175 buffer[0] = 0x72; /* Response Code */
177 buffer[1] = key; /* Sense Key */
179 buffer[2] = asc; /* ASC */
181 buffer[3] = ascq; /* ASCQ */
185 PRINT_ERROR("Length %d of sense buffer too small to "
186 "fit sense %x:%x:%x", len, key, asc, ascq);
189 buffer[0] = 0x70; /* Response Code */
191 buffer[2] = key; /* Sense Key */
193 buffer[7] = 0x0a; /* Additional Sense Length */
195 buffer[12] = asc; /* ASC */
197 buffer[13] = ascq; /* ASCQ */
200 TRACE_BUFFER("Sense set", buffer, len);
203 EXPORT_SYMBOL(scst_set_sense);
205 bool scst_analyze_sense(const uint8_t *sense, int len, unsigned int valid_mask,
206 int key, int asc, int ascq)
211 if ((sense[0] == 0x70) || (sense[0] == 0x71)) {
215 PRINT_ERROR("Sense too small to analyze (%d, "
221 if ((valid_mask & SCST_SENSE_KEY_VALID) && (sense[2] != key))
225 if ((valid_mask & SCST_SENSE_ASC_VALID) && (sense[12] != asc))
229 if ((valid_mask & SCST_SENSE_ASCQ_VALID) && (sense[13] != ascq))
231 } else if ((sense[0] == 0x72) || (sense[0] == 0x73)) {
232 /* Descriptor format */
235 PRINT_ERROR("Sense too small to analyze (%d, "
236 "type descriptor)", len);
241 if ((valid_mask & SCST_SENSE_KEY_VALID) && (sense[1] != key))
245 if ((valid_mask & SCST_SENSE_ASC_VALID) && (sense[2] != asc))
249 if ((valid_mask & SCST_SENSE_ASCQ_VALID) && (sense[3] != ascq))
257 TRACE_EXIT_RES((int)res);
260 EXPORT_SYMBOL(scst_analyze_sense);
262 bool scst_is_ua_sense(const uint8_t *sense, int len)
264 if (SCST_SENSE_VALID(sense))
265 return scst_analyze_sense(sense, len,
266 SCST_SENSE_KEY_VALID, UNIT_ATTENTION, 0, 0);
270 EXPORT_SYMBOL(scst_is_ua_sense);
272 bool scst_is_ua_global(const uint8_t *sense, int len)
276 /* Changing it don't forget to change scst_requeue_ua() as well!! */
278 if (scst_analyze_sense(sense, len, SCST_SENSE_ALL_VALID,
279 SCST_LOAD_SENSE(scst_sense_reported_luns_data_changed)))
287 void scst_check_convert_sense(struct scst_cmd *cmd)
293 if ((cmd->sense == NULL) || (cmd->status != SAM_STAT_CHECK_CONDITION))
296 d_sense = scst_get_cmd_dev_d_sense(cmd);
297 if (d_sense && ((cmd->sense[0] == 0x70) || (cmd->sense[0] == 0x71))) {
298 TRACE_MGMT_DBG("Converting fixed sense to descriptor (cmd %p)",
300 if (cmd->sense_bufflen < 14) {
301 PRINT_ERROR("Sense too small to convert (%d, "
302 "type fixed)", cmd->sense_bufflen);
305 scst_set_sense(cmd->sense, cmd->sense_bufflen, d_sense,
306 cmd->sense[2], cmd->sense[12], cmd->sense[13]);
307 } else if (!d_sense && ((cmd->sense[0] == 0x72) ||
308 (cmd->sense[0] == 0x73))) {
309 TRACE_MGMT_DBG("Converting descriptor sense to fixed (cmd %p)",
311 if (cmd->sense_bufflen < 4) {
312 PRINT_ERROR("Sense too small to convert (%d, "
313 "type descryptor)", cmd->sense_bufflen);
316 scst_set_sense(cmd->sense, cmd->sense_bufflen, d_sense,
317 cmd->sense[1], cmd->sense[2], cmd->sense[3]);
324 EXPORT_SYMBOL(scst_check_convert_sense);
326 static void scst_set_cmd_error_sense(struct scst_cmd *cmd, uint8_t *sense,
331 scst_set_cmd_error_status(cmd, SAM_STAT_CHECK_CONDITION);
332 scst_alloc_set_sense(cmd, 1, sense, len);
338 void scst_set_busy(struct scst_cmd *cmd)
340 int c = atomic_read(&cmd->sess->sess_cmd_count);
344 if ((c <= 1) || (cmd->sess->init_phase != SCST_SESS_IPH_READY)) {
345 scst_set_cmd_error_status(cmd, SAM_STAT_BUSY);
346 TRACE(TRACE_MGMT_MINOR, "Sending BUSY status to initiator %s "
347 "(cmds count %d, queue_type %x, sess->init_phase %d)",
348 cmd->sess->initiator_name, c,
349 cmd->queue_type, cmd->sess->init_phase);
351 scst_set_cmd_error_status(cmd, SAM_STAT_TASK_SET_FULL);
352 TRACE(TRACE_MGMT_MINOR, "Sending QUEUE_FULL status to "
353 "initiator %s (cmds count %d, queue_type %x, "
354 "sess->init_phase %d)", cmd->sess->initiator_name, c,
355 cmd->queue_type, cmd->sess->init_phase);
361 EXPORT_SYMBOL(scst_set_busy);
363 void scst_set_initial_UA(struct scst_session *sess, int key, int asc, int ascq)
369 TRACE_MGMT_DBG("Setting for sess %p initial UA %x/%x/%x", sess, key,
372 /* Protect sess_tgt_dev_list_hash */
373 mutex_lock(&scst_mutex);
375 for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
376 struct list_head *sess_tgt_dev_list_head =
377 &sess->sess_tgt_dev_list_hash[i];
378 struct scst_tgt_dev *tgt_dev;
380 list_for_each_entry(tgt_dev, sess_tgt_dev_list_head,
381 sess_tgt_dev_list_entry) {
382 spin_lock_bh(&tgt_dev->tgt_dev_lock);
383 if (!list_empty(&tgt_dev->UA_list)) {
384 struct scst_tgt_dev_UA *ua;
386 ua = list_entry(tgt_dev->UA_list.next,
387 typeof(*ua), UA_list_entry);
388 if (scst_analyze_sense(ua->UA_sense_buffer,
389 sizeof(ua->UA_sense_buffer),
390 SCST_SENSE_ALL_VALID,
391 SCST_LOAD_SENSE(scst_sense_reset_UA))) {
392 scst_set_sense(ua->UA_sense_buffer,
393 sizeof(ua->UA_sense_buffer),
394 tgt_dev->dev->d_sense,
398 "The first UA isn't RESET UA");
400 PRINT_ERROR("%s", "There's no RESET UA to "
402 spin_unlock_bh(&tgt_dev->tgt_dev_lock);
406 mutex_unlock(&scst_mutex);
411 EXPORT_SYMBOL(scst_set_initial_UA);
413 static struct scst_aen *scst_alloc_aen(struct scst_tgt_dev *tgt_dev)
415 struct scst_aen *aen;
419 aen = mempool_alloc(scst_aen_mempool, GFP_KERNEL);
421 PRINT_ERROR("AEN memory allocation failed. Corresponding "
422 "event notification will not be performed (initiator "
423 "%s)", tgt_dev->sess->initiator_name);
426 memset(aen, 0, sizeof(*aen));
428 aen->sess = tgt_dev->sess;
429 scst_sess_get(aen->sess);
431 aen->lun = scst_pack_lun(tgt_dev->lun);
434 TRACE_EXIT_HRES((unsigned long)aen);
438 static void scst_free_aen(struct scst_aen *aen)
442 scst_sess_put(aen->sess);
443 mempool_free(aen, scst_aen_mempool);
450 void scst_capacity_data_changed(struct scst_device *dev)
452 struct scst_tgt_dev *tgt_dev;
453 uint8_t sense_buffer[SCST_STANDARD_SENSE_LEN];
457 if (dev->type != TYPE_DISK) {
458 TRACE_MGMT_DBG("Device type %d isn't for CAPACITY DATA "
459 "CHANGED UA", dev->type);
463 TRACE_MGMT_DBG("CAPACITY DATA CHANGED (dev %p)", dev);
465 mutex_lock(&scst_mutex);
467 list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
468 dev_tgt_dev_list_entry) {
469 struct scst_tgt_template *tgtt = tgt_dev->sess->tgt->tgtt;
471 if (tgtt->report_aen != NULL) {
472 struct scst_aen *aen;
475 aen = scst_alloc_aen(tgt_dev);
479 aen->event_fn = SCST_AEN_SCSI;
480 aen->aen_sense_len = SCST_STANDARD_SENSE_LEN;
481 scst_set_sense(aen->aen_sense, aen->aen_sense_len,
482 tgt_dev->dev->d_sense,
483 SCST_LOAD_SENSE(scst_sense_capacity_data_changed));
485 TRACE_DBG("Calling target's %s report_aen(%p)",
487 rc = tgtt->report_aen(aen);
488 TRACE_DBG("Target's %s report_aen(%p) returned %d",
489 tgtt->name, aen, rc);
490 if (rc == SCST_AEN_RES_SUCCESS)
496 TRACE_MGMT_DBG("Queuing CAPACITY DATA CHANGED UA (tgt_dev %p)",
498 scst_set_sense(sense_buffer, sizeof(sense_buffer),
499 tgt_dev->dev->d_sense,
500 SCST_LOAD_SENSE(scst_sense_capacity_data_changed));
501 scst_check_set_UA(tgt_dev, sense_buffer,
502 sizeof(sense_buffer), 0);
505 mutex_unlock(&scst_mutex);
511 EXPORT_SYMBOL(scst_capacity_data_changed);
513 static inline bool scst_is_report_luns_changed_type(int type)
524 case TYPE_MEDIUM_CHANGER:
533 /* scst_mutex supposed to be held */
534 static void scst_queue_report_luns_changed_UA(struct scst_session *sess,
537 uint8_t sense_buffer[SCST_STANDARD_SENSE_LEN];
538 struct list_head *shead;
539 struct scst_tgt_dev *tgt_dev;
544 TRACE_MGMT_DBG("Queuing REPORTED LUNS DATA CHANGED UA "
549 for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
550 shead = &sess->sess_tgt_dev_list_hash[i];
552 list_for_each_entry(tgt_dev, shead,
553 sess_tgt_dev_list_entry) {
554 /* Lockdep triggers here a false positive.. */
555 spin_lock(&tgt_dev->tgt_dev_lock);
559 for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
560 shead = &sess->sess_tgt_dev_list_hash[i];
562 list_for_each_entry(tgt_dev, shead,
563 sess_tgt_dev_list_entry) {
564 if (!scst_is_report_luns_changed_type(
568 scst_set_sense(sense_buffer, sizeof(sense_buffer),
569 tgt_dev->dev->d_sense,
570 SCST_LOAD_SENSE(scst_sense_reported_luns_data_changed));
572 __scst_check_set_UA(tgt_dev, sense_buffer,
573 sizeof(sense_buffer),
574 flags | SCST_SET_UA_FLAG_GLOBAL);
578 for (i = TGT_DEV_HASH_SIZE-1; i >= 0; i--) {
579 shead = &sess->sess_tgt_dev_list_hash[i];
581 list_for_each_entry_reverse(tgt_dev,
582 shead, sess_tgt_dev_list_entry) {
583 spin_unlock(&tgt_dev->tgt_dev_lock);
593 /* The activity supposed to be suspended and scst_mutex held */
594 static void scst_report_luns_changed_sess(struct scst_session *sess)
597 struct list_head *shead;
598 struct scst_tgt_dev *tgt_dev;
599 struct scst_tgt_template *tgtt = sess->tgt->tgtt;
603 TRACE_DBG("REPORTED LUNS DATA CHANGED (sess %p)", sess);
605 for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
606 shead = &sess->sess_tgt_dev_list_hash[i];
608 list_for_each_entry(tgt_dev, shead,
609 sess_tgt_dev_list_entry) {
610 if (scst_is_report_luns_changed_type(
615 TRACE_MGMT_DBG("Not found a device capable REPORTED "
616 "LUNS DATA CHANGED UA (sess %p)", sess);
620 if (tgtt->report_aen != NULL) {
621 struct scst_aen *aen;
624 aen = scst_alloc_aen(tgt_dev);
628 aen->event_fn = SCST_AEN_SCSI;
629 aen->aen_sense_len = SCST_STANDARD_SENSE_LEN;
630 scst_set_sense(aen->aen_sense, aen->aen_sense_len,
631 tgt_dev->dev->d_sense,
632 SCST_LOAD_SENSE(scst_sense_reported_luns_data_changed));
634 TRACE_DBG("Calling target's %s report_aen(%p)",
636 rc = tgtt->report_aen(aen);
637 TRACE_DBG("Target's %s report_aen(%p) returned %d",
638 tgtt->name, aen, rc);
639 if (rc == SCST_AEN_RES_SUCCESS)
646 scst_queue_report_luns_changed_UA(sess, 0);
653 /* The activity supposed to be suspended and scst_mutex held */
654 void scst_report_luns_changed(struct scst_acg *acg)
656 struct scst_session *sess;
660 TRACE_MGMT_DBG("REPORTED LUNS DATA CHANGED (acg %s)", acg->acg_name);
662 list_for_each_entry(sess, &acg->acg_sess_list, acg_sess_list_entry) {
663 scst_report_luns_changed_sess(sess);
670 void scst_aen_done(struct scst_aen *aen)
674 TRACE_MGMT_DBG("AEN %p (fn %d) done (initiator %s)", aen,
675 aen->event_fn, aen->sess->initiator_name);
677 if (aen->delivery_status == SCST_AEN_RES_SUCCESS)
680 if (aen->event_fn != SCST_AEN_SCSI)
683 TRACE_MGMT_DBG("Delivery of SCSI AEN failed (initiator %s)",
684 aen->sess->initiator_name);
686 if (scst_analyze_sense(aen->aen_sense, aen->aen_sense_len,
687 SCST_SENSE_ALL_VALID, SCST_LOAD_SENSE(
688 scst_sense_reported_luns_data_changed))) {
689 mutex_lock(&scst_mutex);
690 scst_queue_report_luns_changed_UA(aen->sess,
691 SCST_SET_UA_FLAG_AT_HEAD);
692 mutex_unlock(&scst_mutex);
693 } else if (scst_analyze_sense(aen->aen_sense, aen->aen_sense_len,
694 SCST_SENSE_ALL_VALID,
695 SCST_LOAD_SENSE(scst_sense_capacity_data_changed))) {
696 /* tgt_dev might get dead, so we need to reseek it */
697 struct list_head *shead;
698 struct scst_tgt_dev *tgt_dev;
701 lun = scst_unpack_lun((uint8_t *)&aen->lun, sizeof(aen->lun));
703 mutex_lock(&scst_mutex);
705 shead = &aen->sess->sess_tgt_dev_list_hash[HASH_VAL(lun)];
706 list_for_each_entry(tgt_dev, shead,
707 sess_tgt_dev_list_entry) {
708 if (tgt_dev->lun == lun) {
709 TRACE_MGMT_DBG("Queuing CAPACITY DATA CHANGED "
710 "UA (tgt_dev %p)", tgt_dev);
711 scst_check_set_UA(tgt_dev, aen->aen_sense,
713 SCST_SET_UA_FLAG_AT_HEAD);
718 mutex_unlock(&scst_mutex);
720 PRINT_ERROR("%s", "Unknown SCSI AEN");
728 EXPORT_SYMBOL(scst_aen_done);
730 void scst_requeue_ua(struct scst_cmd *cmd)
734 if (scst_analyze_sense(cmd->sense, cmd->sense_bufflen,
735 SCST_SENSE_ALL_VALID,
736 SCST_LOAD_SENSE(scst_sense_reported_luns_data_changed))) {
737 TRACE_MGMT_DBG("Requeuing REPORTED LUNS DATA CHANGED UA "
738 "for delivery failed cmd %p", cmd);
739 mutex_lock(&scst_mutex);
740 scst_queue_report_luns_changed_UA(cmd->sess,
741 SCST_SET_UA_FLAG_AT_HEAD);
742 mutex_unlock(&scst_mutex);
744 TRACE_MGMT_DBG("Requeuing UA for delivery failed cmd %p", cmd);
745 scst_check_set_UA(cmd->tgt_dev, cmd->sense,
746 cmd->sense_bufflen, SCST_SET_UA_FLAG_AT_HEAD);
753 /* The activity supposed to be suspended and scst_mutex held */
754 static void scst_check_reassign_sess(struct scst_session *sess)
756 struct scst_acg *acg, *old_acg;
757 struct scst_acg_dev *acg_dev;
759 struct list_head *shead;
760 struct scst_tgt_dev *tgt_dev;
761 bool luns_changed = false;
762 bool add_failed, something_freed, not_needed_freed = false;
766 TRACE_MGMT_DBG("Checking reassignment for sess %p (initiator %s)",
767 sess, sess->initiator_name);
769 acg = scst_find_acg(sess);
770 if (acg == sess->acg) {
771 TRACE_MGMT_DBG("No reassignment for sess %p", sess);
775 TRACE_MGMT_DBG("sess %p will be reassigned from acg %s to acg %s",
776 sess, sess->acg->acg_name, acg->acg_name);
779 sess->acg = NULL; /* to catch implicit dependencies earlier */
783 list_for_each_entry(acg_dev, &acg->acg_dev_list, acg_dev_list_entry) {
784 unsigned int inq_changed_ua_needed = 0;
786 for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
787 shead = &sess->sess_tgt_dev_list_hash[i];
789 list_for_each_entry(tgt_dev, shead,
790 sess_tgt_dev_list_entry) {
791 if ((tgt_dev->dev == acg_dev->dev) &&
792 (tgt_dev->lun == acg_dev->lun) &&
793 (tgt_dev->acg_dev->rd_only == acg_dev->rd_only)) {
794 TRACE_MGMT_DBG("sess %p: tgt_dev %p for "
795 "LUN %lld stays the same",
797 (unsigned long long)tgt_dev->lun);
798 tgt_dev->acg_dev = acg_dev;
800 } else if (tgt_dev->lun == acg_dev->lun)
801 inq_changed_ua_needed = 1;
807 TRACE_MGMT_DBG("sess %p: Allocing new tgt_dev for LUN %lld",
808 sess, (unsigned long long)acg_dev->lun);
810 tgt_dev = scst_alloc_add_tgt_dev(sess, acg_dev);
811 if (tgt_dev == NULL) {
816 tgt_dev->inq_changed_ua_needed = inq_changed_ua_needed ||
822 something_freed = false;
823 not_needed_freed = true;
824 for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
825 struct scst_tgt_dev *t;
826 shead = &sess->sess_tgt_dev_list_hash[i];
828 list_for_each_entry_safe(tgt_dev, t, shead,
829 sess_tgt_dev_list_entry) {
830 if (tgt_dev->acg_dev->acg != acg) {
831 TRACE_MGMT_DBG("sess %p: Deleting not used "
832 "tgt_dev %p for LUN %lld",
834 (unsigned long long)tgt_dev->lun);
836 something_freed = true;
837 scst_free_tgt_dev(tgt_dev);
842 if (add_failed && something_freed) {
843 TRACE_MGMT_DBG("sess %p: Retrying adding new tgt_devs", sess);
849 TRACE_DBG("Moving sess %p from acg %s to acg %s", sess,
850 old_acg->acg_name, acg->acg_name);
851 list_move_tail(&sess->acg_sess_list_entry, &acg->acg_sess_list);
854 uint8_t sense_buffer[SCST_STANDARD_SENSE_LEN];
856 scst_report_luns_changed_sess(sess);
858 for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
859 shead = &sess->sess_tgt_dev_list_hash[i];
861 list_for_each_entry(tgt_dev, shead,
862 sess_tgt_dev_list_entry) {
863 if (tgt_dev->inq_changed_ua_needed) {
864 TRACE_MGMT_DBG("sess %p: Setting "
865 "INQUIRY DATA HAS CHANGED UA "
866 "(tgt_dev %p)", sess, tgt_dev);
868 tgt_dev->inq_changed_ua_needed = 0;
870 scst_set_sense(sense_buffer,
871 sizeof(sense_buffer),
872 tgt_dev->dev->d_sense,
873 SCST_LOAD_SENSE(scst_sense_inquery_data_changed));
875 scst_check_set_UA(tgt_dev, sense_buffer,
876 sizeof(sense_buffer), 0);
887 /* The activity supposed to be suspended and scst_mutex held */
888 void scst_check_reassign_sessions(void)
890 struct scst_tgt_template *tgtt;
894 list_for_each_entry(tgtt, &scst_template_list, scst_template_list_entry) {
895 struct scst_tgt *tgt;
896 list_for_each_entry(tgt, &tgtt->tgt_list, tgt_list_entry) {
897 struct scst_session *sess;
898 list_for_each_entry(sess, &tgt->sess_list,
900 scst_check_reassign_sess(sess);
909 int scst_get_cmd_abnormal_done_state(const struct scst_cmd *cmd)
915 switch (cmd->state) {
916 case SCST_CMD_STATE_INIT_WAIT:
917 case SCST_CMD_STATE_INIT:
918 case SCST_CMD_STATE_PRE_PARSE:
919 case SCST_CMD_STATE_DEV_PARSE:
920 case SCST_CMD_STATE_DEV_DONE:
922 res = SCST_CMD_STATE_FINISHED_INTERNAL;
924 res = SCST_CMD_STATE_PRE_XMIT_RESP;
927 case SCST_CMD_STATE_PRE_DEV_DONE:
928 case SCST_CMD_STATE_MODE_SELECT_CHECKS:
929 res = SCST_CMD_STATE_DEV_DONE;
932 case SCST_CMD_STATE_PRE_XMIT_RESP:
933 res = SCST_CMD_STATE_XMIT_RESP;
936 case SCST_CMD_STATE_PREPROCESS_DONE:
937 case SCST_CMD_STATE_PREPARE_SPACE:
938 case SCST_CMD_STATE_RDY_TO_XFER:
939 case SCST_CMD_STATE_DATA_WAIT:
940 case SCST_CMD_STATE_TGT_PRE_EXEC:
941 case SCST_CMD_STATE_SEND_FOR_EXEC:
942 case SCST_CMD_STATE_LOCAL_EXEC:
943 case SCST_CMD_STATE_REAL_EXEC:
944 case SCST_CMD_STATE_REAL_EXECUTING:
945 res = SCST_CMD_STATE_PRE_DEV_DONE;
949 PRINT_CRIT_ERROR("Wrong cmd state %d (cmd %p, op %x)",
950 cmd->state, cmd, cmd->cdb[0]);
952 /* Invalid state to supress compiler's warning */
953 res = SCST_CMD_STATE_LAST_ACTIVE;
959 EXPORT_SYMBOL(scst_get_cmd_abnormal_done_state);
961 void scst_set_cmd_abnormal_done_state(struct scst_cmd *cmd)
965 #ifdef CONFIG_SCST_EXTRACHECKS
966 switch (cmd->state) {
967 case SCST_CMD_STATE_XMIT_RESP:
968 case SCST_CMD_STATE_FINISHED:
969 case SCST_CMD_STATE_FINISHED_INTERNAL:
970 case SCST_CMD_STATE_XMIT_WAIT:
971 PRINT_CRIT_ERROR("Wrong cmd state %d (cmd %p, op %x)",
972 cmd->state, cmd, cmd->cdb[0]);
977 cmd->state = scst_get_cmd_abnormal_done_state(cmd);
979 #ifdef CONFIG_SCST_EXTRACHECKS
980 if ((cmd->state != SCST_CMD_STATE_PRE_XMIT_RESP) &&
981 (cmd->tgt_dev == NULL) && !cmd->internal) {
982 PRINT_CRIT_ERROR("Wrong not inited cmd state %d (cmd %p, "
983 "op %x)", cmd->state, cmd, cmd->cdb[0]);
991 EXPORT_SYMBOL(scst_set_cmd_abnormal_done_state);
993 void scst_set_resp_data_len(struct scst_cmd *cmd, int resp_data_len)
999 scst_check_restore_sg_buff(cmd);
1000 cmd->resp_data_len = resp_data_len;
1002 if (resp_data_len == cmd->bufflen)
1006 for (i = 0; i < cmd->sg_cnt; i++) {
1007 l += cmd->sg[i].length;
1008 if (l >= resp_data_len) {
1009 int left = resp_data_len - (l - cmd->sg[i].length);
1010 #ifdef CONFIG_SCST_DEBUG
1011 TRACE(TRACE_SG_OP|TRACE_MEMORY, "cmd %p (tag %llu), "
1012 "resp_data_len %d, i %d, cmd->sg[i].length %d, "
1014 cmd, (long long unsigned int)cmd->tag,
1016 cmd->sg[i].length, left);
1018 cmd->orig_sg_cnt = cmd->sg_cnt;
1019 cmd->orig_sg_entry = i;
1020 cmd->orig_entry_len = cmd->sg[i].length;
1021 cmd->sg_cnt = (left > 0) ? i+1 : i;
1022 cmd->sg[i].length = left;
1023 cmd->sg_buff_modified = 1;
1032 EXPORT_SYMBOL(scst_set_resp_data_len);
1035 int scst_queue_retry_cmd(struct scst_cmd *cmd, int finished_cmds)
1037 struct scst_tgt *tgt = cmd->tgt;
1039 unsigned long flags;
1043 spin_lock_irqsave(&tgt->tgt_lock, flags);
1046 * Memory barrier is needed here, because we need the exact order
1047 * between the read and write between retry_cmds and finished_cmds to
1048 * not miss the case when a command finished while we queuing it for
1049 * retry after the finished_cmds check.
1052 TRACE_RETRY("TGT QUEUE FULL: incrementing retry_cmds %d",
1054 if (finished_cmds != atomic_read(&tgt->finished_cmds)) {
1055 /* At least one cmd finished, so try again */
1057 TRACE_RETRY("Some command(s) finished, direct retry "
1058 "(finished_cmds=%d, tgt->finished_cmds=%d, "
1059 "retry_cmds=%d)", finished_cmds,
1060 atomic_read(&tgt->finished_cmds), tgt->retry_cmds);
1062 goto out_unlock_tgt;
1065 TRACE_RETRY("Adding cmd %p to retry cmd list", cmd);
1066 list_add_tail(&cmd->cmd_list_entry, &tgt->retry_cmd_list);
1068 if (!tgt->retry_timer_active) {
1069 tgt->retry_timer.expires = jiffies + SCST_TGT_RETRY_TIMEOUT;
1070 add_timer(&tgt->retry_timer);
1071 tgt->retry_timer_active = 1;
1075 spin_unlock_irqrestore(&tgt->tgt_lock, flags);
1077 TRACE_EXIT_RES(res);
1081 /* Returns 0 to continue, >0 to restart, <0 to break */
1082 static int scst_check_hw_pending_cmd(struct scst_cmd *cmd,
1083 unsigned long cur_time, unsigned long max_time,
1084 struct scst_session *sess, unsigned long *flags,
1085 struct scst_tgt_template *tgtt)
1087 int res = -1; /* break */
1089 TRACE_DBG("cmd %p, hw_pending %d, proc time %ld, "
1090 "pending time %ld", cmd, cmd->cmd_hw_pending,
1091 (long)(cur_time - cmd->start_time) / HZ,
1092 (long)(cur_time - cmd->hw_pending_start) / HZ);
1094 if (time_before_eq(cur_time, cmd->start_time + max_time)) {
1095 /* Cmds are ordered, so no need to check more */
1099 if (!cmd->cmd_hw_pending) {
1100 res = 0; /* continue */
1104 if (time_before(cur_time, cmd->hw_pending_start + max_time)) {
1105 /* Cmds are ordered, so no need to check more */
1109 TRACE_MGMT_DBG("Cmd %p HW pending for too long %ld (state %x)",
1110 cmd, (cur_time - cmd->hw_pending_start) / HZ,
1113 cmd->cmd_hw_pending = 0;
1115 spin_unlock_irqrestore(&sess->sess_list_lock, *flags);
1116 tgtt->on_hw_pending_cmd_timeout(cmd);
1117 spin_lock_irqsave(&sess->sess_list_lock, *flags);
1119 res = 1; /* restart */
1122 TRACE_EXIT_RES(res);
1126 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
1127 static void scst_hw_pending_work_fn(void *p)
1129 static void scst_hw_pending_work_fn(struct delayed_work *work)
1132 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
1133 struct scst_session *sess = (struct scst_session *)p;
1135 struct scst_session *sess = container_of(work, struct scst_session,
1138 struct scst_tgt_template *tgtt = sess->tgt->tgtt;
1139 struct scst_cmd *cmd;
1140 unsigned long cur_time = jiffies;
1141 unsigned long flags;
1142 unsigned long max_time = tgtt->max_hw_pending_time * HZ;
1146 TRACE_DBG("HW pending work (sess %p, max time %ld)", sess, max_time/HZ);
1148 clear_bit(SCST_SESS_HW_PENDING_WORK_SCHEDULED, &sess->sess_aflags);
1150 spin_lock_irqsave(&sess->sess_list_lock, flags);
1153 list_for_each_entry(cmd, &sess->search_cmd_list,
1154 sess_cmd_list_entry) {
1157 rc = scst_check_hw_pending_cmd(cmd, cur_time, max_time, sess,
1168 list_for_each_entry(cmd, &sess->after_pre_xmit_cmd_list,
1169 sess_cmd_list_entry) {
1172 rc = scst_check_hw_pending_cmd(cmd, cur_time, max_time, sess,
1182 if (!list_empty(&sess->search_cmd_list) ||
1183 !list_empty(&sess->after_pre_xmit_cmd_list)) {
1185 * For stuck cmds if there is no activity we might need to have
1186 * one more run to release them, so reschedule once again.
1188 TRACE_DBG("Sched HW pending work for sess %p (max time %d)",
1189 sess, tgtt->max_hw_pending_time);
1190 set_bit(SCST_SESS_HW_PENDING_WORK_SCHEDULED, &sess->sess_aflags);
1191 schedule_delayed_work(&sess->hw_pending_work,
1192 tgtt->max_hw_pending_time * HZ);
1195 spin_unlock_irqrestore(&sess->sess_list_lock, flags);
1201 /* Called under scst_mutex and suspended activity */
1202 int scst_alloc_device(gfp_t gfp_mask, struct scst_device **out_dev)
1204 struct scst_device *dev;
1206 static int dev_num; /* protected by scst_mutex */
1210 dev = kzalloc(sizeof(*dev), gfp_mask);
1212 TRACE(TRACE_OUT_OF_MEM, "%s",
1213 "Allocation of scst_device failed");
1218 dev->handler = &scst_null_devtype;
1219 dev->p_cmd_lists = &scst_main_cmd_lists;
1220 atomic_set(&dev->dev_cmd_count, 0);
1221 atomic_set(&dev->write_cmd_count, 0);
1222 scst_init_mem_lim(&dev->dev_mem_lim);
1223 spin_lock_init(&dev->dev_lock);
1224 atomic_set(&dev->on_dev_count, 0);
1225 INIT_LIST_HEAD(&dev->blocked_cmd_list);
1226 INIT_LIST_HEAD(&dev->dev_tgt_dev_list);
1227 INIT_LIST_HEAD(&dev->dev_acg_dev_list);
1228 INIT_LIST_HEAD(&dev->threads_list);
1229 init_waitqueue_head(&dev->on_dev_waitQ);
1230 dev->dev_double_ua_possible = 1;
1231 dev->queue_alg = SCST_CONTR_MODE_QUEUE_ALG_UNRESTRICTED_REORDER;
1232 dev->dev_num = dev_num++;
1234 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
1235 #if defined(CONFIG_BLOCK) && defined(SCST_IO_CONTEXT)
1236 dev->dev_io_ctx = alloc_io_context(GFP_KERNEL, -1);
1237 if (dev->dev_io_ctx == NULL) {
1238 TRACE(TRACE_OUT_OF_MEM, "%s", "Failed to alloc dev IO context");
1249 TRACE_EXIT_RES(res);
1253 /* Called under scst_mutex and suspended activity */
1254 void scst_free_device(struct scst_device *dev)
1258 #ifdef CONFIG_SCST_EXTRACHECKS
1259 if (!list_empty(&dev->dev_tgt_dev_list) ||
1260 !list_empty(&dev->dev_acg_dev_list)) {
1261 PRINT_CRIT_ERROR("%s: dev_tgt_dev_list or dev_acg_dev_list "
1262 "is not empty!", __func__);
1267 __exit_io_context(dev->dev_io_ctx);
1275 void scst_init_mem_lim(struct scst_mem_lim *mem_lim)
1277 atomic_set(&mem_lim->alloced_pages, 0);
1278 mem_lim->max_allowed_pages =
1279 ((uint64_t)scst_max_dev_cmd_mem << 10) >> (PAGE_SHIFT - 10);
1281 EXPORT_SYMBOL(scst_init_mem_lim);
1283 static struct scst_acg_dev *scst_alloc_acg_dev(struct scst_acg *acg,
1284 struct scst_device *dev, uint64_t lun)
1286 struct scst_acg_dev *res;
1290 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
1291 res = kmem_cache_alloc(scst_acgd_cachep, GFP_KERNEL);
1293 res = kmem_cache_zalloc(scst_acgd_cachep, GFP_KERNEL);
1296 TRACE(TRACE_OUT_OF_MEM,
1297 "%s", "Allocation of scst_acg_dev failed");
1300 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
1301 memset(res, 0, sizeof(*res));
1309 TRACE_EXIT_HRES(res);
1313 /* The activity supposed to be suspended and scst_mutex held */
1314 static void scst_free_acg_dev(struct scst_acg_dev *acg_dev)
1318 TRACE_DBG("Removing acg_dev %p from acg_dev_list and dev_acg_dev_list",
1320 list_del(&acg_dev->acg_dev_list_entry);
1321 list_del(&acg_dev->dev_acg_dev_list_entry);
1323 kmem_cache_free(scst_acgd_cachep, acg_dev);
1329 /* The activity supposed to be suspended and scst_mutex held */
1330 struct scst_acg *scst_alloc_add_acg(const char *acg_name)
1332 struct scst_acg *acg;
1336 acg = kzalloc(sizeof(*acg), GFP_KERNEL);
1338 TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of acg failed");
1342 INIT_LIST_HEAD(&acg->acg_dev_list);
1343 INIT_LIST_HEAD(&acg->acg_sess_list);
1344 INIT_LIST_HEAD(&acg->acn_list);
1345 acg->acg_name = acg_name;
1347 TRACE_DBG("Adding acg %s to scst_acg_list", acg_name);
1348 list_add_tail(&acg->scst_acg_list_entry, &scst_acg_list);
1350 scst_check_reassign_sessions();
1353 TRACE_EXIT_HRES(acg);
1357 /* The activity supposed to be suspended and scst_mutex held */
1358 int scst_destroy_acg(struct scst_acg *acg)
1360 struct scst_acn *n, *nn;
1361 struct scst_acg_dev *acg_dev, *acg_dev_tmp;
1366 if (!list_empty(&acg->acg_sess_list)) {
1367 PRINT_ERROR("%s: acg_sess_list is not empty!", __func__);
1372 TRACE_DBG("Removing acg %s from scst_acg_list", acg->acg_name);
1373 list_del(&acg->scst_acg_list_entry);
1375 /* Freeing acg_devs */
1376 list_for_each_entry_safe(acg_dev, acg_dev_tmp, &acg->acg_dev_list,
1377 acg_dev_list_entry) {
1378 struct scst_tgt_dev *tgt_dev, *tt;
1379 list_for_each_entry_safe(tgt_dev, tt,
1380 &acg_dev->dev->dev_tgt_dev_list,
1381 dev_tgt_dev_list_entry) {
1382 if (tgt_dev->acg_dev == acg_dev)
1383 scst_free_tgt_dev(tgt_dev);
1385 scst_free_acg_dev(acg_dev);
1389 list_for_each_entry_safe(n, nn, &acg->acn_list,
1391 list_del(&n->acn_list_entry);
1395 INIT_LIST_HEAD(&acg->acn_list);
1399 TRACE_EXIT_RES(res);
1404 * scst_mutex supposed to be held, there must not be parallel activity in this
1407 static struct scst_tgt_dev *scst_alloc_add_tgt_dev(struct scst_session *sess,
1408 struct scst_acg_dev *acg_dev)
1410 int ini_sg, ini_unchecked_isa_dma, ini_use_clustering;
1411 struct scst_tgt_dev *tgt_dev, *t = NULL;
1412 struct scst_device *dev = acg_dev->dev;
1413 struct list_head *sess_tgt_dev_list_head;
1414 struct scst_tgt_template *vtt = sess->tgt->tgtt;
1416 bool share_io_ctx = false;
1417 uint8_t sense_buffer[SCST_STANDARD_SENSE_LEN];
1421 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
1422 tgt_dev = kmem_cache_alloc(scst_tgtd_cachep, GFP_KERNEL);
1424 tgt_dev = kmem_cache_zalloc(scst_tgtd_cachep, GFP_KERNEL);
1426 if (tgt_dev == NULL) {
1427 TRACE(TRACE_OUT_OF_MEM, "%s",
1428 "Allocation of scst_tgt_dev failed");
1431 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
1432 memset(tgt_dev, 0, sizeof(*tgt_dev));
1436 tgt_dev->lun = acg_dev->lun;
1437 tgt_dev->acg_dev = acg_dev;
1438 tgt_dev->sess = sess;
1439 atomic_set(&tgt_dev->tgt_dev_cmd_count, 0);
1441 scst_sgv_pool_use_norm(tgt_dev);
1443 if (dev->scsi_dev != NULL) {
1444 ini_sg = dev->scsi_dev->host->sg_tablesize;
1445 ini_unchecked_isa_dma = dev->scsi_dev->host->unchecked_isa_dma;
1446 ini_use_clustering = (dev->scsi_dev->host->use_clustering ==
1449 ini_sg = (1 << 15) /* infinite */;
1450 ini_unchecked_isa_dma = 0;
1451 ini_use_clustering = 0;
1453 tgt_dev->max_sg_cnt = min(ini_sg, sess->tgt->sg_tablesize);
1455 if ((sess->tgt->tgtt->use_clustering || ini_use_clustering) &&
1456 !sess->tgt->tgtt->no_clustering)
1457 scst_sgv_pool_use_norm_clust(tgt_dev);
1459 if (sess->tgt->tgtt->unchecked_isa_dma || ini_unchecked_isa_dma)
1460 scst_sgv_pool_use_dma(tgt_dev);
1462 if (dev->scsi_dev != NULL) {
1463 TRACE_MGMT_DBG("host=%d, channel=%d, id=%d, lun=%d, "
1464 "SCST lun=%lld", dev->scsi_dev->host->host_no,
1465 dev->scsi_dev->channel, dev->scsi_dev->id,
1467 (long long unsigned int)tgt_dev->lun);
1469 TRACE_MGMT_DBG("Virtual device %s on SCST lun=%lld",
1470 dev->virt_name, (long long unsigned int)tgt_dev->lun);
1473 spin_lock_init(&tgt_dev->tgt_dev_lock);
1474 INIT_LIST_HEAD(&tgt_dev->UA_list);
1475 spin_lock_init(&tgt_dev->thr_data_lock);
1476 INIT_LIST_HEAD(&tgt_dev->thr_data_list);
1477 spin_lock_init(&tgt_dev->sn_lock);
1478 INIT_LIST_HEAD(&tgt_dev->deferred_cmd_list);
1479 INIT_LIST_HEAD(&tgt_dev->skipped_sn_list);
1480 tgt_dev->expected_sn = 1;
1481 tgt_dev->num_free_sn_slots = ARRAY_SIZE(tgt_dev->sn_slots)-1;
1482 tgt_dev->cur_sn_slot = &tgt_dev->sn_slots[0];
1483 for (i = 0; i < (int)ARRAY_SIZE(tgt_dev->sn_slots); i++)
1484 atomic_set(&tgt_dev->sn_slots[i], 0);
1486 if (dev->handler->parse_atomic &&
1487 (sess->tgt->tgtt->preprocessing_done == NULL)) {
1488 if (sess->tgt->tgtt->rdy_to_xfer_atomic)
1489 __set_bit(SCST_TGT_DEV_AFTER_INIT_WR_ATOMIC,
1490 &tgt_dev->tgt_dev_flags);
1491 if (dev->handler->exec_atomic)
1492 __set_bit(SCST_TGT_DEV_AFTER_INIT_OTH_ATOMIC,
1493 &tgt_dev->tgt_dev_flags);
1495 if (dev->handler->exec_atomic) {
1496 if (sess->tgt->tgtt->rdy_to_xfer_atomic)
1497 __set_bit(SCST_TGT_DEV_AFTER_RESTART_WR_ATOMIC,
1498 &tgt_dev->tgt_dev_flags);
1499 __set_bit(SCST_TGT_DEV_AFTER_RESTART_OTH_ATOMIC,
1500 &tgt_dev->tgt_dev_flags);
1501 __set_bit(SCST_TGT_DEV_AFTER_RX_DATA_ATOMIC,
1502 &tgt_dev->tgt_dev_flags);
1504 if (dev->handler->dev_done_atomic &&
1505 sess->tgt->tgtt->xmit_response_atomic) {
1506 __set_bit(SCST_TGT_DEV_AFTER_EXEC_ATOMIC,
1507 &tgt_dev->tgt_dev_flags);
1510 scst_set_sense(sense_buffer, sizeof(sense_buffer),
1511 dev->d_sense, SCST_LOAD_SENSE(scst_sense_reset_UA));
1512 scst_alloc_set_UA(tgt_dev, sense_buffer, sizeof(sense_buffer), 0);
1514 tm_dbg_init_tgt_dev(tgt_dev, acg_dev);
1516 if (tgt_dev->sess->initiator_name != NULL) {
1517 spin_lock_bh(&dev->dev_lock);
1518 list_for_each_entry(t, &dev->dev_tgt_dev_list,
1519 dev_tgt_dev_list_entry) {
1520 TRACE_DBG("t name %s (tgt_dev name %s)",
1521 t->sess->initiator_name,
1522 tgt_dev->sess->initiator_name);
1523 if (t->sess->initiator_name == NULL)
1525 if (strcmp(t->sess->initiator_name,
1526 tgt_dev->sess->initiator_name) == 0) {
1527 share_io_ctx = true;
1531 spin_unlock_bh(&dev->dev_lock);
1535 TRACE_MGMT_DBG("Sharing IO context %p (tgt_dev %p, ini %s)",
1536 t->tgt_dev_io_ctx, tgt_dev,
1537 tgt_dev->sess->initiator_name);
1538 tgt_dev->tgt_dev_io_ctx = ioc_task_link(t->tgt_dev_io_ctx);
1540 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
1541 #if defined(CONFIG_BLOCK) && defined(SCST_IO_CONTEXT)
1542 tgt_dev->tgt_dev_io_ctx = alloc_io_context(GFP_KERNEL, -1);
1543 if (tgt_dev->tgt_dev_io_ctx == NULL) {
1544 TRACE(TRACE_OUT_OF_MEM, "Failed to alloc tgt_dev IO "
1545 "context for dev %s (initiator %s)",
1546 dev->virt_name, sess->initiator_name);
1553 if (vtt->threads_num > 0) {
1555 if (dev->handler->threads_num > 0)
1556 rc = scst_add_dev_threads(dev, vtt->threads_num);
1557 else if (dev->handler->threads_num == 0)
1558 rc = scst_add_global_threads(vtt->threads_num);
1563 if (dev->handler && dev->handler->attach_tgt) {
1564 TRACE_DBG("Calling dev handler's attach_tgt(%p)",
1566 rc = dev->handler->attach_tgt(tgt_dev);
1567 TRACE_DBG("%s", "Dev handler's attach_tgt() returned");
1569 PRINT_ERROR("Device handler's %s attach_tgt() "
1570 "failed: %d", dev->handler->name, rc);
1575 spin_lock_bh(&dev->dev_lock);
1576 list_add_tail(&tgt_dev->dev_tgt_dev_list_entry, &dev->dev_tgt_dev_list);
1577 if (dev->dev_reserved)
1578 __set_bit(SCST_TGT_DEV_RESERVED, &tgt_dev->tgt_dev_flags);
1579 spin_unlock_bh(&dev->dev_lock);
1581 sess_tgt_dev_list_head =
1582 &sess->sess_tgt_dev_list_hash[HASH_VAL(tgt_dev->lun)];
1583 list_add_tail(&tgt_dev->sess_tgt_dev_list_entry,
1584 sess_tgt_dev_list_head);
1591 if (vtt->threads_num > 0) {
1592 if (dev->handler->threads_num > 0)
1593 scst_del_dev_threads(dev, vtt->threads_num);
1594 else if (dev->handler->threads_num == 0)
1595 scst_del_global_threads(vtt->threads_num);
1599 scst_free_all_UA(tgt_dev);
1600 __exit_io_context(tgt_dev->tgt_dev_io_ctx);
1602 kmem_cache_free(scst_tgtd_cachep, tgt_dev);
1607 /* No locks supposed to be held, scst_mutex - held */
1608 void scst_nexus_loss(struct scst_tgt_dev *tgt_dev, bool queue_UA)
1612 scst_clear_reservation(tgt_dev);
1614 /* With activity suspended the lock isn't needed, but let's be safe */
1615 spin_lock_bh(&tgt_dev->tgt_dev_lock);
1616 scst_free_all_UA(tgt_dev);
1617 memset(tgt_dev->tgt_dev_sense, 0, sizeof(tgt_dev->tgt_dev_sense));
1618 spin_unlock_bh(&tgt_dev->tgt_dev_lock);
1621 uint8_t sense_buffer[SCST_STANDARD_SENSE_LEN];
1622 scst_set_sense(sense_buffer, sizeof(sense_buffer),
1623 tgt_dev->dev->d_sense,
1624 SCST_LOAD_SENSE(scst_sense_nexus_loss_UA));
1625 scst_check_set_UA(tgt_dev, sense_buffer,
1626 sizeof(sense_buffer), 0);
1634 * scst_mutex supposed to be held, there must not be parallel activity in this
1637 static void scst_free_tgt_dev(struct scst_tgt_dev *tgt_dev)
1639 struct scst_device *dev = tgt_dev->dev;
1640 struct scst_tgt_template *vtt = tgt_dev->sess->tgt->tgtt;
1644 tm_dbg_deinit_tgt_dev(tgt_dev);
1646 spin_lock_bh(&dev->dev_lock);
1647 list_del(&tgt_dev->dev_tgt_dev_list_entry);
1648 spin_unlock_bh(&dev->dev_lock);
1650 list_del(&tgt_dev->sess_tgt_dev_list_entry);
1652 scst_clear_reservation(tgt_dev);
1653 scst_free_all_UA(tgt_dev);
1655 if (dev->handler && dev->handler->detach_tgt) {
1656 TRACE_DBG("Calling dev handler's detach_tgt(%p)",
1658 dev->handler->detach_tgt(tgt_dev);
1659 TRACE_DBG("%s", "Dev handler's detach_tgt() returned");
1662 if (vtt->threads_num > 0) {
1663 if (dev->handler->threads_num > 0)
1664 scst_del_dev_threads(dev, vtt->threads_num);
1665 else if (dev->handler->threads_num == 0)
1666 scst_del_global_threads(vtt->threads_num);
1669 __exit_io_context(tgt_dev->tgt_dev_io_ctx);
1671 kmem_cache_free(scst_tgtd_cachep, tgt_dev);
1677 /* scst_mutex supposed to be held */
1678 int scst_sess_alloc_tgt_devs(struct scst_session *sess)
1681 struct scst_acg_dev *acg_dev;
1682 struct scst_tgt_dev *tgt_dev;
1686 list_for_each_entry(acg_dev, &sess->acg->acg_dev_list,
1687 acg_dev_list_entry) {
1688 tgt_dev = scst_alloc_add_tgt_dev(sess, acg_dev);
1689 if (tgt_dev == NULL) {
1700 scst_sess_free_tgt_devs(sess);
1705 * scst_mutex supposed to be held, there must not be parallel activity in this
1708 static void scst_sess_free_tgt_devs(struct scst_session *sess)
1711 struct scst_tgt_dev *tgt_dev, *t;
1715 /* The session is going down, no users, so no locks */
1716 for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
1717 struct list_head *sess_tgt_dev_list_head =
1718 &sess->sess_tgt_dev_list_hash[i];
1719 list_for_each_entry_safe(tgt_dev, t, sess_tgt_dev_list_head,
1720 sess_tgt_dev_list_entry) {
1721 scst_free_tgt_dev(tgt_dev);
1723 INIT_LIST_HEAD(sess_tgt_dev_list_head);
1730 /* The activity supposed to be suspended and scst_mutex held */
1731 int scst_acg_add_dev(struct scst_acg *acg, struct scst_device *dev,
1732 uint64_t lun, int read_only)
1735 struct scst_acg_dev *acg_dev;
1736 struct scst_tgt_dev *tgt_dev;
1737 struct scst_session *sess;
1738 LIST_HEAD(tmp_tgt_dev_list);
1742 INIT_LIST_HEAD(&tmp_tgt_dev_list);
1744 #ifdef CONFIG_SCST_EXTRACHECKS
1745 list_for_each_entry(acg_dev, &acg->acg_dev_list, acg_dev_list_entry) {
1746 if (acg_dev->dev == dev) {
1747 PRINT_ERROR("Device is already in group %s",
1755 acg_dev = scst_alloc_acg_dev(acg, dev, lun);
1756 if (acg_dev == NULL) {
1760 acg_dev->rd_only = read_only;
1762 TRACE_DBG("Adding acg_dev %p to acg_dev_list and dev_acg_dev_list",
1764 list_add_tail(&acg_dev->acg_dev_list_entry, &acg->acg_dev_list);
1765 list_add_tail(&acg_dev->dev_acg_dev_list_entry, &dev->dev_acg_dev_list);
1767 list_for_each_entry(sess, &acg->acg_sess_list, acg_sess_list_entry) {
1768 tgt_dev = scst_alloc_add_tgt_dev(sess, acg_dev);
1769 if (tgt_dev == NULL) {
1773 list_add_tail(&tgt_dev->extra_tgt_dev_list_entry,
1777 scst_report_luns_changed(acg);
1779 if (dev->virt_name != NULL) {
1780 PRINT_INFO("Added device %s to group %s (LUN %lld, "
1781 "rd_only %d)", dev->virt_name, acg->acg_name,
1782 (long long unsigned int)lun,
1785 PRINT_INFO("Added device %d:%d:%d:%d to group %s (LUN "
1786 "%lld, rd_only %d)",
1787 dev->scsi_dev->host->host_no,
1788 dev->scsi_dev->channel, dev->scsi_dev->id,
1789 dev->scsi_dev->lun, acg->acg_name,
1790 (long long unsigned int)lun,
1795 TRACE_EXIT_RES(res);
1799 list_for_each_entry(tgt_dev, &tmp_tgt_dev_list,
1800 extra_tgt_dev_list_entry) {
1801 scst_free_tgt_dev(tgt_dev);
1803 scst_free_acg_dev(acg_dev);
1807 /* The activity supposed to be suspended and scst_mutex held */
1808 int scst_acg_remove_dev(struct scst_acg *acg, struct scst_device *dev)
1811 struct scst_acg_dev *acg_dev = NULL, *a;
1812 struct scst_tgt_dev *tgt_dev, *tt;
1816 list_for_each_entry(a, &acg->acg_dev_list, acg_dev_list_entry) {
1817 if (a->dev == dev) {
1823 if (acg_dev == NULL) {
1824 PRINT_ERROR("Device is not found in group %s", acg->acg_name);
1829 list_for_each_entry_safe(tgt_dev, tt, &dev->dev_tgt_dev_list,
1830 dev_tgt_dev_list_entry) {
1831 if (tgt_dev->acg_dev == acg_dev)
1832 scst_free_tgt_dev(tgt_dev);
1834 scst_free_acg_dev(acg_dev);
1836 scst_report_luns_changed(acg);
1838 if (dev->virt_name != NULL) {
1839 PRINT_INFO("Removed device %s from group %s",
1840 dev->virt_name, acg->acg_name);
1842 PRINT_INFO("Removed device %d:%d:%d:%d from group %s",
1843 dev->scsi_dev->host->host_no,
1844 dev->scsi_dev->channel, dev->scsi_dev->id,
1845 dev->scsi_dev->lun, acg->acg_name);
1849 TRACE_EXIT_RES(res);
1853 /* The activity supposed to be suspended and scst_mutex held */
1854 int scst_acg_add_name(struct scst_acg *acg, const char *name)
1863 list_for_each_entry(n, &acg->acn_list, acn_list_entry) {
1864 if (strcmp(n->name, name) == 0) {
1865 PRINT_ERROR("Name %s already exists in group %s",
1866 name, acg->acg_name);
1872 n = kmalloc(sizeof(*n), GFP_KERNEL);
1874 PRINT_ERROR("%s", "Unable to allocate scst_acn");
1880 nm = kmalloc(len + 1, GFP_KERNEL);
1882 PRINT_ERROR("%s", "Unable to allocate scst_acn->name");
1890 list_add_tail(&n->acn_list_entry, &acg->acn_list);
1894 PRINT_INFO("Added name %s to group %s", name, acg->acg_name);
1895 scst_check_reassign_sessions();
1898 TRACE_EXIT_RES(res);
1906 /* scst_mutex supposed to be held */
1907 void __scst_acg_remove_acn(struct scst_acn *n)
1911 list_del(&n->acn_list_entry);
1919 /* The activity supposed to be suspended and scst_mutex held */
1920 int scst_acg_remove_name(struct scst_acg *acg, const char *name, bool reassign)
1927 list_for_each_entry(n, &acg->acn_list, acn_list_entry) {
1928 if (strcmp(n->name, name) == 0) {
1929 __scst_acg_remove_acn(n);
1936 PRINT_INFO("Removed name %s from group %s", name,
1939 scst_check_reassign_sessions();
1941 PRINT_ERROR("Unable to find name %s in group %s", name,
1944 TRACE_EXIT_RES(res);
1948 static struct scst_cmd *scst_create_prepare_internal_cmd(
1949 struct scst_cmd *orig_cmd, int bufsize)
1951 struct scst_cmd *res;
1952 gfp_t gfp_mask = scst_cmd_atomic(orig_cmd) ? GFP_ATOMIC : GFP_KERNEL;
1956 res = scst_alloc_cmd(gfp_mask);
1960 res->cmd_lists = orig_cmd->cmd_lists;
1961 res->sess = orig_cmd->sess;
1962 res->atomic = scst_cmd_atomic(orig_cmd);
1964 res->tgtt = orig_cmd->tgtt;
1965 res->tgt = orig_cmd->tgt;
1966 res->dev = orig_cmd->dev;
1967 res->tgt_dev = orig_cmd->tgt_dev;
1968 res->lun = orig_cmd->lun;
1969 res->queue_type = SCST_CMD_QUEUE_HEAD_OF_QUEUE;
1970 res->data_direction = SCST_DATA_UNKNOWN;
1971 res->orig_cmd = orig_cmd;
1972 res->bufflen = bufsize;
1974 scst_sess_get(res->sess);
1975 if (res->tgt_dev != NULL)
1978 res->state = SCST_CMD_STATE_PRE_PARSE;
1981 TRACE_EXIT_HRES((unsigned long)res);
1985 int scst_prepare_request_sense(struct scst_cmd *orig_cmd)
1988 static const uint8_t request_sense[6] =
1989 { REQUEST_SENSE, 0, 0, 0, SCST_SENSE_BUFFERSIZE, 0 };
1990 struct scst_cmd *rs_cmd;
1994 if (orig_cmd->sense != NULL) {
1995 TRACE_MEM("Releasing sense %p (orig_cmd %p)",
1996 orig_cmd->sense, orig_cmd);
1997 mempool_free(orig_cmd->sense, scst_sense_mempool);
1998 orig_cmd->sense = NULL;
2001 rs_cmd = scst_create_prepare_internal_cmd(orig_cmd,
2002 SCST_SENSE_BUFFERSIZE);
2006 memcpy(rs_cmd->cdb, request_sense, sizeof(request_sense));
2007 rs_cmd->cdb[1] |= scst_get_cmd_dev_d_sense(orig_cmd);
2008 rs_cmd->cdb_len = sizeof(request_sense);
2009 rs_cmd->data_direction = SCST_DATA_READ;
2010 rs_cmd->expected_data_direction = rs_cmd->data_direction;
2011 rs_cmd->expected_transfer_len = SCST_SENSE_BUFFERSIZE;
2012 rs_cmd->expected_values_set = 1;
2014 TRACE(TRACE_MGMT_MINOR, "Adding REQUEST SENSE cmd %p to head of active "
2015 "cmd list", rs_cmd);
2016 spin_lock_irq(&rs_cmd->cmd_lists->cmd_list_lock);
2017 list_add(&rs_cmd->cmd_list_entry, &rs_cmd->cmd_lists->active_cmd_list);
2018 wake_up(&rs_cmd->cmd_lists->cmd_list_waitQ);
2019 spin_unlock_irq(&rs_cmd->cmd_lists->cmd_list_lock);
2022 TRACE_EXIT_RES(res);
2030 static void scst_complete_request_sense(struct scst_cmd *req_cmd)
2032 struct scst_cmd *orig_cmd = req_cmd->orig_cmd;
2038 sBUG_ON(orig_cmd == NULL);
2040 len = scst_get_buf_first(req_cmd, &buf);
2042 if (scsi_status_is_good(req_cmd->status) && (len > 0) &&
2043 SCST_SENSE_VALID(buf) && (!SCST_NO_SENSE(buf))) {
2044 PRINT_BUFF_FLAG(TRACE_SCSI, "REQUEST SENSE returned",
2046 scst_alloc_set_sense(orig_cmd, scst_cmd_atomic(req_cmd), buf,
2049 PRINT_ERROR("%s", "Unable to get the sense via "
2050 "REQUEST SENSE, returning HARDWARE ERROR");
2051 scst_set_cmd_error(orig_cmd,
2052 SCST_LOAD_SENSE(scst_sense_hardw_error));
2056 scst_put_buf(req_cmd, buf);
2058 TRACE(TRACE_MGMT_MINOR, "Adding orig cmd %p to head of active "
2059 "cmd list", orig_cmd);
2060 spin_lock_irq(&orig_cmd->cmd_lists->cmd_list_lock);
2061 list_add(&orig_cmd->cmd_list_entry, &orig_cmd->cmd_lists->active_cmd_list);
2062 wake_up(&orig_cmd->cmd_lists->cmd_list_waitQ);
2063 spin_unlock_irq(&orig_cmd->cmd_lists->cmd_list_lock);
2069 int scst_finish_internal_cmd(struct scst_cmd *cmd)
2075 sBUG_ON(!cmd->internal);
2077 if (cmd->cdb[0] == REQUEST_SENSE)
2078 scst_complete_request_sense(cmd);
2080 __scst_cmd_put(cmd);
2082 res = SCST_CMD_STATE_RES_CONT_NEXT;
2084 TRACE_EXIT_HRES(res);
2088 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
2089 static void scst_req_done(struct scsi_cmnd *scsi_cmd)
2091 struct scsi_request *req;
2095 if (scsi_cmd && (req = scsi_cmd->sc_request)) {
2097 if (req->sr_bufflen)
2098 kfree(req->sr_buffer);
2099 scsi_release_request(req);
2107 static void scst_send_release(struct scst_device *dev)
2109 struct scsi_request *req;
2110 struct scsi_device *scsi_dev;
2115 if (dev->scsi_dev == NULL)
2118 scsi_dev = dev->scsi_dev;
2120 req = scsi_allocate_request(scsi_dev, GFP_KERNEL);
2122 PRINT_ERROR("Allocation of scsi_request failed: unable "
2123 "to RELEASE device %d:%d:%d:%d",
2124 scsi_dev->host->host_no, scsi_dev->channel,
2125 scsi_dev->id, scsi_dev->lun);
2129 memset(cdb, 0, sizeof(cdb));
2131 cdb[1] = (scsi_dev->scsi_level <= SCSI_2) ?
2132 ((scsi_dev->lun << 5) & 0xe0) : 0;
2133 memcpy(req->sr_cmnd, cdb, sizeof(cdb));
2134 req->sr_cmd_len = sizeof(cdb);
2135 req->sr_data_direction = SCST_DATA_NONE;
2137 req->sr_bufflen = 0;
2138 req->sr_buffer = NULL;
2139 req->sr_request->rq_disk = dev->rq_disk;
2140 req->sr_sense_buffer[0] = 0;
2142 TRACE(TRACE_DEBUG | TRACE_SCSI, "Sending RELEASE req %p to SCSI "
2144 scst_do_req(req, req->sr_cmnd, (void *)req->sr_buffer, req->sr_bufflen,
2145 scst_req_done, 15, 3);
2151 #else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18) */
2152 static void scst_send_release(struct scst_device *dev)
2154 struct scsi_device *scsi_dev;
2155 unsigned char cdb[6];
2156 uint8_t sense[SCSI_SENSE_BUFFERSIZE];
2161 if (dev->scsi_dev == NULL)
2164 scsi_dev = dev->scsi_dev;
2166 for (i = 0; i < 5; i++) {
2167 memset(cdb, 0, sizeof(cdb));
2169 cdb[1] = (scsi_dev->scsi_level <= SCSI_2) ?
2170 ((scsi_dev->lun << 5) & 0xe0) : 0;
2172 memset(sense, 0, sizeof(sense));
2174 TRACE(TRACE_DEBUG | TRACE_SCSI, "%s", "Sending RELEASE req to "
2176 rc = scsi_execute(scsi_dev, cdb, SCST_DATA_NONE, NULL, 0,
2178 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)
2182 TRACE_DBG("MODE_SENSE done: %x", rc);
2184 if (scsi_status_is_good(rc)) {
2187 PRINT_ERROR("RELEASE failed: %d", rc);
2188 PRINT_BUFFER("RELEASE sense", sense, sizeof(sense));
2189 scst_check_internal_sense(dev, rc, sense,
2198 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18) */
2200 /* scst_mutex supposed to be held */
2201 static void scst_clear_reservation(struct scst_tgt_dev *tgt_dev)
2203 struct scst_device *dev = tgt_dev->dev;
2208 spin_lock_bh(&dev->dev_lock);
2209 if (dev->dev_reserved &&
2210 !test_bit(SCST_TGT_DEV_RESERVED, &tgt_dev->tgt_dev_flags)) {
2211 /* This is one who holds the reservation */
2212 struct scst_tgt_dev *tgt_dev_tmp;
2213 list_for_each_entry(tgt_dev_tmp, &dev->dev_tgt_dev_list,
2214 dev_tgt_dev_list_entry) {
2215 clear_bit(SCST_TGT_DEV_RESERVED,
2216 &tgt_dev_tmp->tgt_dev_flags);
2218 dev->dev_reserved = 0;
2221 spin_unlock_bh(&dev->dev_lock);
2224 scst_send_release(dev);
2230 struct scst_session *scst_alloc_session(struct scst_tgt *tgt, gfp_t gfp_mask,
2231 const char *initiator_name)
2233 struct scst_session *sess;
2240 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
2241 sess = kmem_cache_alloc(scst_sess_cachep, gfp_mask);
2243 sess = kmem_cache_zalloc(scst_sess_cachep, gfp_mask);
2246 TRACE(TRACE_OUT_OF_MEM, "%s",
2247 "Allocation of scst_session failed");
2250 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
2251 memset(sess, 0, sizeof(*sess));
2254 sess->init_phase = SCST_SESS_IPH_INITING;
2255 sess->shut_phase = SCST_SESS_SPH_READY;
2256 atomic_set(&sess->refcnt, 0);
2257 for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
2258 struct list_head *sess_tgt_dev_list_head =
2259 &sess->sess_tgt_dev_list_hash[i];
2260 INIT_LIST_HEAD(sess_tgt_dev_list_head);
2262 spin_lock_init(&sess->sess_list_lock);
2263 INIT_LIST_HEAD(&sess->search_cmd_list);
2264 INIT_LIST_HEAD(&sess->after_pre_xmit_cmd_list);
2266 INIT_LIST_HEAD(&sess->init_deferred_cmd_list);
2267 INIT_LIST_HEAD(&sess->init_deferred_mcmd_list);
2268 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20))
2269 INIT_DELAYED_WORK(&sess->hw_pending_work,
2270 (void (*)(struct work_struct *))scst_hw_pending_work_fn);
2272 INIT_WORK(&sess->hw_pending_work, scst_hw_pending_work_fn, sess);
2275 #ifdef CONFIG_SCST_MEASURE_LATENCY
2276 spin_lock_init(&sess->meas_lock);
2279 len = strlen(initiator_name);
2280 nm = kmalloc(len + 1, gfp_mask);
2282 PRINT_ERROR("%s", "Unable to allocate sess->initiator_name");
2286 strcpy(nm, initiator_name);
2287 sess->initiator_name = nm;
2294 kmem_cache_free(scst_sess_cachep, sess);
2299 void scst_free_session(struct scst_session *sess)
2303 mutex_lock(&scst_mutex);
2305 TRACE_DBG("Removing sess %p from the list", sess);
2306 list_del(&sess->sess_list_entry);
2307 TRACE_DBG("Removing session %p from acg %s", sess, sess->acg->acg_name);
2308 list_del(&sess->acg_sess_list_entry);
2310 scst_sess_free_tgt_devs(sess);
2312 wake_up_all(&sess->tgt->unreg_waitQ);
2314 mutex_unlock(&scst_mutex);
2316 kfree(sess->initiator_name);
2317 kmem_cache_free(scst_sess_cachep, sess);
2323 void scst_free_session_callback(struct scst_session *sess)
2325 struct completion *c;
2329 TRACE_DBG("Freeing session %p", sess);
2331 cancel_delayed_work_sync(&sess->hw_pending_work);
2333 c = sess->shutdown_compl;
2335 if (sess->unreg_done_fn) {
2336 TRACE_DBG("Calling unreg_done_fn(%p)", sess);
2337 sess->unreg_done_fn(sess);
2338 TRACE_DBG("%s", "unreg_done_fn() returned");
2340 scst_free_session(sess);
2349 void scst_sched_session_free(struct scst_session *sess)
2351 unsigned long flags;
2355 if (sess->shut_phase != SCST_SESS_SPH_SHUTDOWN) {
2356 PRINT_CRIT_ERROR("session %p is going to shutdown with unknown "
2357 "shut phase %lx", sess, sess->shut_phase);
2361 spin_lock_irqsave(&scst_mgmt_lock, flags);
2362 TRACE_DBG("Adding sess %p to scst_sess_shut_list", sess);
2363 list_add_tail(&sess->sess_shut_list_entry, &scst_sess_shut_list);
2364 spin_unlock_irqrestore(&scst_mgmt_lock, flags);
2366 wake_up(&scst_mgmt_waitQ);
2372 void scst_cmd_get(struct scst_cmd *cmd)
2374 __scst_cmd_get(cmd);
2376 EXPORT_SYMBOL(scst_cmd_get);
2378 void scst_cmd_put(struct scst_cmd *cmd)
2380 __scst_cmd_put(cmd);
2382 EXPORT_SYMBOL(scst_cmd_put);
2384 struct scst_cmd *scst_alloc_cmd(gfp_t gfp_mask)
2386 struct scst_cmd *cmd;
2390 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
2391 cmd = kmem_cache_alloc(scst_cmd_cachep, gfp_mask);
2393 cmd = kmem_cache_zalloc(scst_cmd_cachep, gfp_mask);
2396 TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of scst_cmd failed");
2399 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
2400 memset(cmd, 0, sizeof(*cmd));
2403 cmd->state = SCST_CMD_STATE_INIT_WAIT;
2404 cmd->start_time = jiffies;
2405 atomic_set(&cmd->cmd_ref, 1);
2406 cmd->cmd_lists = &scst_main_cmd_lists;
2407 INIT_LIST_HEAD(&cmd->mgmt_cmd_list);
2408 cmd->queue_type = SCST_CMD_QUEUE_SIMPLE;
2409 cmd->timeout = SCST_DEFAULT_TIMEOUT;
2412 cmd->is_send_status = 1;
2413 cmd->resp_data_len = -1;
2415 cmd->dbl_ua_orig_data_direction = SCST_DATA_UNKNOWN;
2416 cmd->dbl_ua_orig_resp_data_len = -1;
2423 static void scst_destroy_put_cmd(struct scst_cmd *cmd)
2425 scst_sess_put(cmd->sess);
2428 * At this point tgt_dev can be dead, but the pointer remains non-NULL
2430 if (likely(cmd->tgt_dev != NULL))
2433 scst_destroy_cmd(cmd);
2437 /* No locks supposed to be held */
2438 void scst_free_cmd(struct scst_cmd *cmd)
2444 TRACE_DBG("Freeing cmd %p (tag %llu)",
2445 cmd, (long long unsigned int)cmd->tag);
2447 if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
2448 TRACE_MGMT_DBG("Freeing aborted cmd %p (scst_cmd_count %d)",
2449 cmd, atomic_read(&scst_cmd_count));
2452 sBUG_ON(cmd->inc_blocking || cmd->needs_unblocking ||
2453 cmd->dec_on_dev_needed);
2455 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
2456 #if defined(CONFIG_SCST_EXTRACHECKS)
2457 if (cmd->scsi_req) {
2458 PRINT_ERROR("%s: %s", __func__, "Cmd with unfreed "
2460 scst_release_request(cmd);
2466 * Target driver can already free sg buffer before calling
2467 * scst_tgt_cmd_done(). E.g., scst_local has to do that.
2469 if (!cmd->tgt_data_buf_alloced)
2470 scst_check_restore_sg_buff(cmd);
2472 if (cmd->tgtt->on_free_cmd != NULL) {
2473 TRACE_DBG("Calling target's on_free_cmd(%p)", cmd);
2474 cmd->tgtt->on_free_cmd(cmd);
2475 TRACE_DBG("%s", "Target's on_free_cmd() returned");
2478 if (likely(cmd->dev != NULL)) {
2479 struct scst_dev_type *handler = cmd->dev->handler;
2480 if (handler->on_free_cmd != NULL) {
2481 TRACE_DBG("Calling dev handler %s on_free_cmd(%p)",
2482 handler->name, cmd);
2483 handler->on_free_cmd(cmd);
2484 TRACE_DBG("Dev handler %s on_free_cmd() returned",
2489 scst_release_space(cmd);
2491 if (unlikely(cmd->sense != NULL)) {
2492 TRACE_MEM("Releasing sense %p (cmd %p)", cmd->sense, cmd);
2493 mempool_free(cmd->sense, scst_sense_mempool);
2497 if (likely(cmd->tgt_dev != NULL)) {
2498 #ifdef CONFIG_SCST_EXTRACHECKS
2499 if (unlikely(!cmd->sent_for_exec) && !cmd->internal) {
2500 PRINT_ERROR("Finishing not executed cmd %p (opcode "
2501 "%d, target %s, LUN %lld, sn %ld, expected_sn %ld)",
2502 cmd, cmd->cdb[0], cmd->tgtt->name,
2503 (long long unsigned int)cmd->lun,
2504 cmd->sn, cmd->tgt_dev->expected_sn);
2505 scst_unblock_deferred(cmd->tgt_dev, cmd);
2509 if (unlikely(cmd->out_of_sn)) {
2510 TRACE_SN("Out of SN cmd %p (tag %llu, sn %ld), "
2512 (long long unsigned int)cmd->tag,
2514 destroy = test_and_set_bit(SCST_CMD_CAN_BE_DESTROYED,
2519 if (likely(destroy))
2520 scst_destroy_put_cmd(cmd);
2526 /* No locks supposed to be held. */
2527 void scst_check_retries(struct scst_tgt *tgt)
2529 int need_wake_up = 0;
2534 * We don't worry about overflow of finished_cmds, because we check
2535 * only for its change.
2537 atomic_inc(&tgt->finished_cmds);
2538 /* See comment in scst_queue_retry_cmd() */
2539 smp_mb__after_atomic_inc();
2540 if (unlikely(tgt->retry_cmds > 0)) {
2541 struct scst_cmd *c, *tc;
2542 unsigned long flags;
2544 TRACE_RETRY("Checking retry cmd list (retry_cmds %d)",
2547 spin_lock_irqsave(&tgt->tgt_lock, flags);
2548 list_for_each_entry_safe(c, tc, &tgt->retry_cmd_list,
2552 TRACE_RETRY("Moving retry cmd %p to head of active "
2553 "cmd list (retry_cmds left %d)",
2554 c, tgt->retry_cmds);
2555 spin_lock(&c->cmd_lists->cmd_list_lock);
2556 list_move(&c->cmd_list_entry,
2557 &c->cmd_lists->active_cmd_list);
2558 wake_up(&c->cmd_lists->cmd_list_waitQ);
2559 spin_unlock(&c->cmd_lists->cmd_list_lock);
2562 if (need_wake_up >= 2) /* "slow start" */
2565 spin_unlock_irqrestore(&tgt->tgt_lock, flags);
2572 void scst_tgt_retry_timer_fn(unsigned long arg)
2574 struct scst_tgt *tgt = (struct scst_tgt *)arg;
2575 unsigned long flags;
2577 TRACE_RETRY("Retry timer expired (retry_cmds %d)", tgt->retry_cmds);
2579 spin_lock_irqsave(&tgt->tgt_lock, flags);
2580 tgt->retry_timer_active = 0;
2581 spin_unlock_irqrestore(&tgt->tgt_lock, flags);
2583 scst_check_retries(tgt);
2589 struct scst_mgmt_cmd *scst_alloc_mgmt_cmd(gfp_t gfp_mask)
2591 struct scst_mgmt_cmd *mcmd;
2595 mcmd = mempool_alloc(scst_mgmt_mempool, gfp_mask);
2597 PRINT_CRIT_ERROR("%s", "Allocation of management command "
2598 "failed, some commands and their data could leak");
2601 memset(mcmd, 0, sizeof(*mcmd));
2608 void scst_free_mgmt_cmd(struct scst_mgmt_cmd *mcmd)
2610 unsigned long flags;
2614 spin_lock_irqsave(&mcmd->sess->sess_list_lock, flags);
2615 atomic_dec(&mcmd->sess->sess_cmd_count);
2616 spin_unlock_irqrestore(&mcmd->sess->sess_list_lock, flags);
2618 scst_sess_put(mcmd->sess);
2620 if (mcmd->mcmd_tgt_dev != NULL)
2623 mempool_free(mcmd, scst_mgmt_mempool);
2629 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
2630 int scst_alloc_request(struct scst_cmd *cmd)
2633 struct scsi_request *req;
2634 int gm = scst_cmd_atomic(cmd) ? GFP_ATOMIC : GFP_KERNEL;
2638 /* cmd->dev->scsi_dev must be non-NULL here */
2639 req = scsi_allocate_request(cmd->dev->scsi_dev, gm);
2641 TRACE(TRACE_OUT_OF_MEM, "%s",
2642 "Allocation of scsi_request failed");
2647 cmd->scsi_req = req;
2649 memcpy(req->sr_cmnd, cmd->cdb, cmd->cdb_len);
2650 req->sr_cmd_len = cmd->cdb_len;
2651 req->sr_data_direction = cmd->data_direction;
2652 req->sr_use_sg = cmd->sg_cnt;
2653 req->sr_bufflen = cmd->bufflen;
2654 req->sr_buffer = cmd->sg;
2655 req->sr_request->rq_disk = cmd->dev->rq_disk;
2656 req->sr_sense_buffer[0] = 0;
2658 cmd->scsi_req->upper_private_data = cmd;
2665 void scst_release_request(struct scst_cmd *cmd)
2667 scsi_release_request(cmd->scsi_req);
2668 cmd->scsi_req = NULL;
2672 static bool is_report_sg_limitation(void)
2674 #if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
2675 return (trace_flag & TRACE_OUT_OF_MEM) != 0;
2681 int scst_alloc_space(struct scst_cmd *cmd)
2685 int atomic = scst_cmd_atomic(cmd);
2687 struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
2692 gfp_mask = tgt_dev->gfp_mask | (atomic ? GFP_ATOMIC : GFP_KERNEL);
2694 flags = atomic ? SGV_POOL_NO_ALLOC_ON_CACHE_MISS : 0;
2696 flags |= SGV_POOL_ALLOC_NO_CACHED;
2698 cmd->sg = sgv_pool_alloc(tgt_dev->pool, cmd->bufflen, gfp_mask, flags,
2699 &cmd->sg_cnt, &cmd->sgv, &cmd->dev->dev_mem_lim, NULL);
2700 if (cmd->sg == NULL)
2703 if (unlikely(cmd->sg_cnt > tgt_dev->max_sg_cnt)) {
2704 if ((ll < 10) || is_report_sg_limitation()) {
2705 PRINT_INFO("Unable to complete command due to "
2706 "SG IO count limitation (requested %d, "
2707 "available %d, tgt lim %d)", cmd->sg_cnt,
2708 tgt_dev->max_sg_cnt, cmd->tgt->sg_tablesize);
2714 if (cmd->data_direction != SCST_DATA_BIDI)
2717 cmd->in_sg = sgv_pool_alloc(tgt_dev->pool, cmd->in_bufflen, gfp_mask,
2718 flags, &cmd->in_sg_cnt, &cmd->in_sgv,
2719 &cmd->dev->dev_mem_lim, NULL);
2720 if (cmd->in_sg == NULL)
2723 if (unlikely(cmd->in_sg_cnt > tgt_dev->max_sg_cnt)) {
2724 if ((ll < 10) || is_report_sg_limitation()) {
2725 PRINT_INFO("Unable to complete command due to "
2726 "SG IO count limitation (IN buffer, requested "
2727 "%d, available %d, tgt lim %d)", cmd->in_sg_cnt,
2728 tgt_dev->max_sg_cnt, cmd->tgt->sg_tablesize);
2731 goto out_in_sg_free;
2742 sgv_pool_free(cmd->in_sgv, &cmd->dev->dev_mem_lim);
2748 sgv_pool_free(cmd->sgv, &cmd->dev->dev_mem_lim);
2755 static void scst_release_space(struct scst_cmd *cmd)
2759 if (cmd->sgv == NULL)
2762 if (cmd->tgt_data_buf_alloced || cmd->dh_data_buf_alloced) {
2763 TRACE_MEM("%s", "*data_buf_alloced set, returning");
2767 sgv_pool_free(cmd->sgv, &cmd->dev->dev_mem_lim);
2774 if (cmd->in_sgv != NULL) {
2775 sgv_pool_free(cmd->in_sgv, &cmd->dev->dev_mem_lim);
2779 cmd->in_bufflen = 0;
2787 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 30) || !defined(SCSI_EXEC_REQ_FIFO_DEFINED)
2790 * Can switch to the next dst_sg element, so, to copy to strictly only
2791 * one dst_sg element, it must be either last in the chain, or
2792 * copy_len == dst_sg->length.
2794 static int __sg_copy_elem(struct scatterlist **pdst_sg, size_t *pdst_len,
2795 size_t *pdst_offs, struct scatterlist *src_sg,
2797 enum km_type d_km_type, enum km_type s_km_type)
2800 struct scatterlist *dst_sg;
2801 size_t src_len, dst_len, src_offs, dst_offs;
2802 struct page *src_page, *dst_page;
2805 copy_len = 0x7FFFFFFF; /* copy all */
2808 dst_len = *pdst_len;
2809 dst_offs = *pdst_offs;
2810 dst_page = sg_page(dst_sg);
2812 src_page = sg_page(src_sg);
2813 src_len = src_sg->length;
2814 src_offs = src_sg->offset;
2817 void *saddr, *daddr;
2820 saddr = kmap_atomic(src_page +
2821 (src_offs >> PAGE_SHIFT), s_km_type) +
2822 (src_offs & ~PAGE_MASK);
2823 daddr = kmap_atomic(dst_page +
2824 (dst_offs >> PAGE_SHIFT), d_km_type) +
2825 (dst_offs & ~PAGE_MASK);
2827 if (((src_offs & ~PAGE_MASK) == 0) &&
2828 ((dst_offs & ~PAGE_MASK) == 0) &&
2829 (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE) &&
2830 (copy_len >= PAGE_SIZE)) {
2831 copy_page(daddr, saddr);
2834 n = min_t(size_t, PAGE_SIZE - (dst_offs & ~PAGE_MASK),
2835 PAGE_SIZE - (src_offs & ~PAGE_MASK));
2836 n = min(n, src_len);
2837 n = min(n, dst_len);
2838 n = min_t(size_t, n, copy_len);
2839 memcpy(daddr, saddr, n);
2844 kunmap_atomic(saddr, s_km_type);
2845 kunmap_atomic(daddr, d_km_type);
2855 dst_sg = sg_next(dst_sg);
2858 dst_page = sg_page(dst_sg);
2859 dst_len = dst_sg->length;
2860 dst_offs = dst_sg->offset;
2862 } while (src_len > 0);
2866 *pdst_len = dst_len;
2867 *pdst_offs = dst_offs;
2872 * sg_copy_elem - copy one SG element to another
2873 * @dst_sg: destination SG element
2874 * @src_sg: source SG element
2875 * @copy_len: maximum amount of data to copy. If 0, then copy all.
2876 * @d_km_type: kmap_atomic type for the destination SG
2877 * @s_km_type: kmap_atomic type for the source SG
2880 * Data from the source SG element will be copied to the destination SG
2881 * element. Returns number of bytes copied. Can switch to the next dst_sg
2882 * element, so, to copy to strictly only one dst_sg element, it must be
2883 * either last in the chain, or copy_len == dst_sg->length.
2885 int sg_copy_elem(struct scatterlist *dst_sg, struct scatterlist *src_sg,
2886 size_t copy_len, enum km_type d_km_type,
2887 enum km_type s_km_type)
2889 size_t dst_len = dst_sg->length, dst_offs = dst_sg->offset;
2891 return __sg_copy_elem(&dst_sg, &dst_len, &dst_offs, src_sg,
2892 copy_len, d_km_type, s_km_type);
2897 * sg_copy - copy one SG vector to another
2898 * @dst_sg: destination SG
2899 * @src_sg: source SG
2900 * @copy_len: maximum amount of data to copy. If 0, then copy all.
2901 * @d_km_type: kmap_atomic type for the destination SG
2902 * @s_km_type: kmap_atomic type for the source SG
2905 * Data from the source SG vector will be copied to the destination SG
2906 * vector. End of the vectors will be determined by sg_next() returning
2907 * NULL. Returns number of bytes copied.
2909 int sg_copy(struct scatterlist *dst_sg,
2910 struct scatterlist *src_sg, size_t copy_len,
2911 enum km_type d_km_type, enum km_type s_km_type)
2914 size_t dst_len, dst_offs;
2917 copy_len = 0x7FFFFFFF; /* copy all */
2919 dst_len = dst_sg->length;
2920 dst_offs = dst_sg->offset;
2923 copy_len -= __sg_copy_elem(&dst_sg, &dst_len, &dst_offs,
2924 src_sg, copy_len, d_km_type, s_km_type);
2925 if ((copy_len == 0) || (dst_sg == NULL))
2928 src_sg = sg_next(src_sg);
2929 } while (src_sg != NULL);
2934 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 30) || !defined(SCSI_EXEC_REQ_FIFO_DEFINED) */
2936 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30) && !defined(SCSI_EXEC_REQ_FIFO_DEFINED)
2937 #include <linux/pfn.h>
2939 struct blk_kern_sg_hdr {
2940 struct scatterlist *orig_sgp;
2942 struct sg_table new_sg_table;
2943 struct scatterlist *saved_sg;
2948 #define BLK_KERN_SG_HDR_ENTRIES (1 + (sizeof(struct blk_kern_sg_hdr) - 1) / \
2949 sizeof(struct scatterlist))
2952 * blk_rq_unmap_kern_sg - "unmaps" data buffers in the request
2953 * @req: request to unmap
2954 * @do_copy: sets copy data between buffers, if needed, or not
2957 * It frees all additional buffers allocated for SG->BIO mapping.
2959 void blk_rq_unmap_kern_sg(struct request *req, int do_copy)
2961 struct blk_kern_sg_hdr *hdr = (struct blk_kern_sg_hdr *)req->end_io_data;
2966 if (hdr->tail_only) {
2967 /* Tail element only was copied */
2968 struct scatterlist *saved_sg = hdr->saved_sg;
2969 struct scatterlist *tail_sg = hdr->orig_sgp;
2971 if ((rq_data_dir(req) == READ) && do_copy)
2972 sg_copy_elem(saved_sg, tail_sg, tail_sg->length,
2973 KM_BIO_DST_IRQ, KM_BIO_SRC_IRQ);
2975 __free_pages(sg_page(tail_sg), get_order(tail_sg->length));
2976 *tail_sg = *saved_sg;
2979 /* The whole SG was copied */
2980 struct sg_table new_sg_table = hdr->new_sg_table;
2981 struct scatterlist *new_sgl = new_sg_table.sgl +
2982 BLK_KERN_SG_HDR_ENTRIES;
2983 struct scatterlist *orig_sgl = hdr->orig_sgp;
2985 if ((rq_data_dir(req) == READ) && do_copy)
2986 sg_copy(orig_sgl, new_sgl, 0, KM_BIO_DST_IRQ,
2989 sg_free_table(&new_sg_table);
2996 static int blk_rq_handle_align_tail_only(struct request *rq,
2997 struct scatterlist *sg_to_copy,
2998 gfp_t gfp, gfp_t page_gfp)
3001 struct scatterlist *tail_sg = sg_to_copy;
3002 struct scatterlist *saved_sg;
3003 struct blk_kern_sg_hdr *hdr;
3007 saved_sg_nents = 1 + BLK_KERN_SG_HDR_ENTRIES;
3009 saved_sg = kmalloc(sizeof(*saved_sg) * saved_sg_nents, gfp);
3010 if (saved_sg == NULL)
3013 sg_init_table(saved_sg, saved_sg_nents);
3015 hdr = (struct blk_kern_sg_hdr *)saved_sg;
3016 saved_sg += BLK_KERN_SG_HDR_ENTRIES;
3017 saved_sg_nents -= BLK_KERN_SG_HDR_ENTRIES;
3019 hdr->tail_only = true;
3020 hdr->orig_sgp = tail_sg;
3021 hdr->saved_sg = saved_sg;
3023 *saved_sg = *tail_sg;
3025 pg = alloc_pages(page_gfp, get_order(tail_sg->length));
3027 goto err_free_saved_sg;
3029 sg_assign_page(tail_sg, pg);
3030 tail_sg->offset = 0;
3032 if (rq_data_dir(rq) == WRITE)
3033 sg_copy_elem(tail_sg, saved_sg, saved_sg->length,
3034 KM_USER1, KM_USER0);
3036 rq->end_io_data = hdr;
3037 rq->cmd_flags |= REQ_COPY_USER;
3050 static int blk_rq_handle_align(struct request *rq, struct scatterlist **psgl,
3051 int *pnents, struct scatterlist *sgl_to_copy,
3052 int nents_to_copy, gfp_t gfp, gfp_t page_gfp)
3055 struct scatterlist *sgl = *psgl;
3056 int nents = *pnents;
3057 struct sg_table sg_table;
3058 struct scatterlist *sg;
3059 struct scatterlist *new_sgl;
3060 size_t len = 0, to_copy;
3062 struct blk_kern_sg_hdr *hdr;
3064 if (sgl != sgl_to_copy) {
3065 /* copy only the last element */
3066 res = blk_rq_handle_align_tail_only(rq, sgl_to_copy,
3070 /* else go through */
3073 for_each_sg(sgl, sg, nents, i)
3077 new_sgl_nents = PFN_UP(len) + BLK_KERN_SG_HDR_ENTRIES;
3079 res = sg_alloc_table(&sg_table, new_sgl_nents, gfp);
3083 new_sgl = sg_table.sgl;
3084 hdr = (struct blk_kern_sg_hdr *)new_sgl;
3085 new_sgl += BLK_KERN_SG_HDR_ENTRIES;
3086 new_sgl_nents -= BLK_KERN_SG_HDR_ENTRIES;
3088 hdr->tail_only = false;
3089 hdr->orig_sgp = sgl;
3090 hdr->new_sg_table = sg_table;
3092 for_each_sg(new_sgl, sg, new_sgl_nents, i) {
3095 pg = alloc_page(page_gfp);
3097 goto err_free_new_sgl;
3099 sg_assign_page(sg, pg);
3100 sg->length = min_t(size_t, PAGE_SIZE, len);
3105 if (rq_data_dir(rq) == WRITE) {
3107 * We need to limit amount of copied data to to_copy, because
3108 * sgl might have the last element not marked as last in
3111 sg_copy(new_sgl, sgl, to_copy, KM_USER0, KM_USER1);
3114 rq->end_io_data = hdr;
3115 rq->cmd_flags |= REQ_COPY_USER;
3118 *pnents = new_sgl_nents;
3124 for_each_sg(new_sgl, sg, new_sgl_nents, i) {
3125 struct page *pg = sg_page(sg);
3130 sg_free_table(&sg_table);
3136 static void bio_map_kern_endio(struct bio *bio, int err)
3141 static int __blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
3142 int nents, gfp_t gfp, struct scatterlist **sgl_to_copy,
3146 struct request_queue *q = rq->q;
3147 int rw = rq_data_dir(rq);
3151 struct scatterlist *sg, *prev_sg = NULL;
3152 struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
3154 *sgl_to_copy = NULL;
3156 if (unlikely((sgl == 0) || (nents <= 0))) {
3163 * Let's keep each bio allocation inside a single page to decrease
3164 * probability of failure.
3166 max_nr_vecs = min_t(size_t,
3167 ((PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec)),
3170 need_new_bio = true;
3172 for_each_sg(sgl, sg, nents, i) {
3173 struct page *page = sg_page(sg);
3174 void *page_addr = page_address(page);
3175 size_t len = sg->length, l;
3176 size_t offset = sg->offset;
3182 * Each segment must be aligned on DMA boundary and
3183 * not on stack. The last one may have unaligned
3184 * length as long as the total length is aligned to
3185 * DMA padding alignment.
3191 if (((sg->offset | l) & queue_dma_alignment(q)) ||
3192 (page_addr && object_is_on_stack(page_addr + sg->offset))) {
3202 bio = bio_kmalloc(gfp, max_nr_vecs);
3209 bio->bi_rw |= 1 << BIO_RW;
3211 bio->bi_end_io = bio_map_kern_endio;
3216 tbio = tbio->bi_next = bio;
3219 bytes = min_t(size_t, len, PAGE_SIZE - offset);
3221 rc = bio_add_pc_page(q, bio, page, bytes, offset);
3223 if (unlikely(need_new_bio || (rc < 0))) {
3230 need_new_bio = true;
3237 need_new_bio = false;
3240 page = nth_page(page, 1);
3249 /* Total length must be aligned on DMA padding alignment */
3250 if ((tot_len & q->dma_pad_mask) &&
3251 !(rq->cmd_flags & REQ_COPY_USER)) {
3253 if (sgl->offset == 0) {
3254 *sgl_to_copy = prev_sg;
3261 while (hbio != NULL) {
3263 hbio = hbio->bi_next;
3264 bio->bi_next = NULL;
3266 blk_queue_bounce(q, &bio);
3268 res = blk_rq_append_bio(q, rq, bio);
3269 if (unlikely(res != 0)) {
3270 bio->bi_next = hbio;
3276 rq->buffer = rq->data = NULL;
3283 *nents_to_copy = nents;
3286 while (hbio != NULL) {
3288 hbio = hbio->bi_next;
3295 * blk_rq_map_kern_sg - map kernel data to a request, for REQ_TYPE_BLOCK_PC
3296 * @rq: request to fill
3298 * @nents: number of elements in @sgl
3299 * @gfp: memory allocation flags
3302 * Data will be mapped directly if possible. Otherwise a bounce
3303 * buffer will be used.
3305 int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
3306 int nents, gfp_t gfp)
3309 struct scatterlist *sg_to_copy = NULL;
3310 int nents_to_copy = 0;
3312 if (unlikely((sgl == 0) || (sgl->length == 0) ||
3313 (nents <= 0) || (rq->end_io_data != NULL))) {
3319 res = __blk_rq_map_kern_sg(rq, sgl, nents, gfp, &sg_to_copy,
3321 if (unlikely(res != 0)) {
3322 if (sg_to_copy == NULL)
3325 res = blk_rq_handle_align(rq, &sgl, &nents, sg_to_copy,
3326 nents_to_copy, gfp, rq->q->bounce_gfp | gfp);
3327 if (unlikely(res != 0))
3330 res = __blk_rq_map_kern_sg(rq, sgl, nents, gfp, &sg_to_copy,
3333 blk_rq_unmap_kern_sg(rq, 0);
3338 rq->buffer = rq->data = NULL;
3344 struct scsi_io_context {
3347 void (*done)(void *data, char *sense, int result, int resid);
3348 char sense[SCSI_SENSE_BUFFERSIZE];
3351 static void scsi_end_async(struct request *req, int error)
3353 struct scsi_io_context *sioc = req->end_io_data;
3355 req->end_io_data = sioc->blk_data;
3356 blk_rq_unmap_kern_sg(req, (error == 0));
3359 sioc->done(sioc->data, sioc->sense, req->errors, req->data_len);
3362 __blk_put_request(req->q, req);
3366 * scsi_execute_async - insert request
3367 * @sdev: scsi device
3368 * @cmd: scsi command
3369 * @cmd_len: length of scsi cdb
3370 * @data_direction: DMA_TO_DEVICE, DMA_FROM_DEVICE, or DMA_NONE
3371 * @sgl: data buffer scatterlist
3372 * @nents: number of elements in the sgl
3373 * @timeout: request timeout in seconds
3374 * @retries: number of times to retry request
3375 * @privdata: data passed to done()
3376 * @done: callback function when done
3377 * @gfp: memory allocation flags
3378 * @flags: one or more SCSI_ASYNC_EXEC_FLAG_* flags
3380 int scsi_execute_async(struct scsi_device *sdev, const unsigned char *cmd,
3381 int cmd_len, int data_direction, struct scatterlist *sgl,
3382 int nents, int timeout, int retries, void *privdata,
3383 void (*done)(void *, char *, int, int), gfp_t gfp,
3386 struct request *req;
3387 struct scsi_io_context *sioc;
3389 int write = (data_direction == DMA_TO_DEVICE);
3391 sioc = kzalloc(sizeof(*sioc), gfp);
3393 return DRIVER_ERROR << 24;
3395 req = blk_get_request(sdev->request_queue, write, gfp);
3398 req->cmd_type = REQ_TYPE_BLOCK_PC;
3399 req->cmd_flags |= REQ_QUIET;
3401 if (flags & SCSI_ASYNC_EXEC_FLAG_HAS_TAIL_SPACE_FOR_PADDING)
3402 req->cmd_flags |= REQ_COPY_USER;
3405 err = blk_rq_map_kern_sg(req, sgl, nents, gfp);
3410 sioc->blk_data = req->end_io_data;
3411 sioc->data = privdata;
3414 req->cmd_len = cmd_len;
3415 memset(req->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */
3416 memcpy(req->cmd, cmd, req->cmd_len);
3417 req->sense = sioc->sense;
3419 req->timeout = timeout;
3420 req->retries = retries;
3421 req->end_io_data = sioc;
3423 blk_execute_rq_nowait(req->q, NULL, req,
3424 flags & SCSI_ASYNC_EXEC_FLAG_AT_HEAD, scsi_end_async);
3428 blk_put_request(req);
3432 return DRIVER_ERROR << 24;
3434 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30) && !defined(SCSI_EXEC_REQ_FIFO_DEFINED) */
3436 void scst_copy_sg(struct scst_cmd *cmd, enum scst_sg_copy_dir copy_dir)
3438 struct scatterlist *src_sg, *dst_sg;
3439 unsigned int to_copy;
3440 int atomic = scst_cmd_atomic(cmd);
3444 if (copy_dir == SCST_SG_COPY_FROM_TARGET) {
3445 if (cmd->data_direction != SCST_DATA_BIDI) {
3446 src_sg = cmd->tgt_sg;
3448 to_copy = cmd->bufflen;
3450 TRACE_MEM("BIDI cmd %p", cmd);
3451 src_sg = cmd->tgt_in_sg;
3452 dst_sg = cmd->in_sg;
3453 to_copy = cmd->in_bufflen;
3457 dst_sg = cmd->tgt_sg;
3458 to_copy = cmd->resp_data_len;
3461 TRACE_MEM("cmd %p, copy_dir %d, src_sg %p, dst_sg %p, "
3462 "to_copy %d", cmd, copy_dir, src_sg, dst_sg, to_copy);
3464 if (unlikely(src_sg == NULL) || unlikely(dst_sg == NULL)) {
3466 * It can happened, e.g., with scst_user for cmd with delay
3467 * alloc, which failed with Check Condition.
3472 sg_copy(dst_sg, src_sg, to_copy, atomic ? KM_SOFTIRQ0 : KM_USER0,
3473 atomic ? KM_SOFTIRQ1 : KM_USER1);
3480 static const int SCST_CDB_LENGTH[8] = { 6, 10, 10, -1, 16, 12, -1, -1 };
3482 #define SCST_CDB_GROUP(opcode) ((opcode >> 5) & 0x7)
3483 #define SCST_GET_CDB_LEN(opcode) SCST_CDB_LENGTH[SCST_CDB_GROUP(opcode)]
3485 int scst_get_cdb_len(const uint8_t *cdb)
3487 return SCST_GET_CDB_LEN(cdb[0]);
3490 /* get_trans_len_x extract x bytes from cdb as length starting from off */
3492 static int get_trans_cdb_len_10(struct scst_cmd *cmd, uint8_t off)
3499 static int get_trans_len_block_limit(struct scst_cmd *cmd, uint8_t off)
3505 static int get_trans_len_read_capacity(struct scst_cmd *cmd, uint8_t off)
3507 cmd->bufflen = READ_CAP_LEN;
3511 static int get_trans_len_serv_act_in(struct scst_cmd *cmd, uint8_t off)
3517 if ((cmd->cdb[1] & 0x1f) == SAI_READ_CAPACITY_16) {
3518 cmd->op_name = "READ CAPACITY(16)";
3519 cmd->bufflen = READ_CAP16_LEN;
3520 cmd->op_flags |= SCST_IMPLICIT_HQ;
3522 cmd->op_flags |= SCST_UNKNOWN_LENGTH;
3524 TRACE_EXIT_RES(res);
3528 static int get_trans_len_single(struct scst_cmd *cmd, uint8_t off)
3534 static int get_trans_len_read_pos(struct scst_cmd *cmd, uint8_t off)
3536 uint8_t *p = (uint8_t *)cmd->cdb + off;
3540 cmd->bufflen |= ((u32)p[0]) << 8;
3541 cmd->bufflen |= ((u32)p[1]);
3543 switch (cmd->cdb[1] & 0x1f) {
3547 if (cmd->bufflen != 0) {
3548 PRINT_ERROR("READ POSITION: Invalid non-zero (%d) "
3549 "allocation length for service action %x",
3550 cmd->bufflen, cmd->cdb[1] & 0x1f);
3556 switch (cmd->cdb[1] & 0x1f) {
3565 cmd->bufflen = max(28, cmd->bufflen);
3568 PRINT_ERROR("READ POSITION: Invalid service action %x",
3569 cmd->cdb[1] & 0x1f);
3577 scst_set_cmd_error(cmd,
3578 SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
3583 static int get_trans_len_1(struct scst_cmd *cmd, uint8_t off)
3585 cmd->bufflen = (u32)cmd->cdb[off];
3589 static int get_trans_len_1_256(struct scst_cmd *cmd, uint8_t off)
3591 cmd->bufflen = (u32)cmd->cdb[off];
3592 if (cmd->bufflen == 0)
3597 static int get_trans_len_2(struct scst_cmd *cmd, uint8_t off)
3599 const uint8_t *p = cmd->cdb + off;
3602 cmd->bufflen |= ((u32)p[0]) << 8;
3603 cmd->bufflen |= ((u32)p[1]);
3608 static int get_trans_len_3(struct scst_cmd *cmd, uint8_t off)
3610 const uint8_t *p = cmd->cdb + off;
3613 cmd->bufflen |= ((u32)p[0]) << 16;
3614 cmd->bufflen |= ((u32)p[1]) << 8;
3615 cmd->bufflen |= ((u32)p[2]);
3620 static int get_trans_len_4(struct scst_cmd *cmd, uint8_t off)
3622 const uint8_t *p = cmd->cdb + off;
3625 cmd->bufflen |= ((u32)p[0]) << 24;
3626 cmd->bufflen |= ((u32)p[1]) << 16;
3627 cmd->bufflen |= ((u32)p[2]) << 8;
3628 cmd->bufflen |= ((u32)p[3]);
3633 static int get_trans_len_none(struct scst_cmd *cmd, uint8_t off)
3639 int scst_get_cdb_info(struct scst_cmd *cmd)
3641 int dev_type = cmd->dev->type;
3644 const struct scst_sdbops *ptr = NULL;
3648 op = cmd->cdb[0]; /* get clear opcode */
3650 TRACE_DBG("opcode=%02x, cdblen=%d bytes, tblsize=%d, "
3651 "dev_type=%d", op, SCST_GET_CDB_LEN(op), SCST_CDB_TBL_SIZE,
3654 i = scst_scsi_op_list[op];
3655 while (i < SCST_CDB_TBL_SIZE && scst_scsi_op_table[i].ops == op) {
3656 if (scst_scsi_op_table[i].devkey[dev_type] != SCST_CDB_NOTSUPP) {
3657 ptr = &scst_scsi_op_table[i];
3658 TRACE_DBG("op = 0x%02x+'%c%c%c%c%c%c%c%c%c%c'+<%s>",
3659 ptr->ops, ptr->devkey[0], /* disk */
3660 ptr->devkey[1], /* tape */
3661 ptr->devkey[2], /* printer */
3662 ptr->devkey[3], /* cpu */
3663 ptr->devkey[4], /* cdr */
3664 ptr->devkey[5], /* cdrom */
3665 ptr->devkey[6], /* scanner */
3666 ptr->devkey[7], /* worm */
3667 ptr->devkey[8], /* changer */
3668 ptr->devkey[9], /* commdev */
3670 TRACE_DBG("direction=%d flags=%d off=%d",
3679 if (unlikely(ptr == NULL)) {
3680 /* opcode not found or now not used !!! */
3681 TRACE(TRACE_SCSI, "Unknown opcode 0x%x for type %d", op,
3684 cmd->op_flags = SCST_INFO_NOT_FOUND;
3688 cmd->cdb_len = SCST_GET_CDB_LEN(op);
3689 cmd->op_name = ptr->op_name;
3690 cmd->data_direction = ptr->direction;
3691 cmd->op_flags = ptr->flags;
3692 res = (*ptr->get_trans_len)(cmd, ptr->off);
3695 TRACE_EXIT_RES(res);
3698 EXPORT_SYMBOL(scst_get_cdb_info);
3700 /* Packs SCST LUN back to SCSI form using peripheral device addressing method */
3701 uint64_t scst_pack_lun(const uint64_t lun)
3704 uint16_t *p = (uint16_t *)&res;
3707 *p = cpu_to_be16(*p);
3709 TRACE_EXIT_HRES((unsigned long)res);
3714 * Routine to extract a lun number from an 8-byte LUN structure
3715 * in network byte order (BE).
3716 * (see SAM-2, Section 4.12.3 page 40)
3717 * Supports 2 types of lun unpacking: peripheral and logical unit.
3719 uint64_t scst_unpack_lun(const uint8_t *lun, int len)
3721 uint64_t res = NO_SUCH_LUN;
3726 TRACE_BUFF_FLAG(TRACE_DEBUG, "Raw LUN", lun, len);
3728 if (unlikely(len < 2)) {
3729 PRINT_ERROR("Illegal lun length %d, expected 2 bytes or "
3737 if ((*((uint64_t *)lun) &
3738 __constant_cpu_to_be64(0x0000FFFFFFFFFFFFLL)) != 0)
3742 if (*((uint16_t *)&lun[2]) != 0)
3746 if (*((uint32_t *)&lun[2]) != 0)
3754 address_method = (*lun) >> 6; /* high 2 bits of byte 0 */
3755 switch (address_method) {
3756 case 0: /* peripheral device addressing method */
3759 PRINT_ERROR("Illegal BUS INDENTIFIER in LUN "
3760 "peripheral device addressing method 0x%02x, "
3761 "expected 0", *lun);
3768 * Looks like it's legal to use it as flat space addressing
3775 case 1: /* flat space addressing method */
3776 res = *(lun + 1) | (((*lun) & 0x3f) << 8);
3779 case 2: /* logical unit addressing method */
3781 PRINT_ERROR("Illegal BUS NUMBER in LUN logical unit "
3782 "addressing method 0x%02x, expected 0",
3786 if (*(lun + 1) & 0xe0) {
3787 PRINT_ERROR("Illegal TARGET in LUN logical unit "
3788 "addressing method 0x%02x, expected 0",
3789 (*(lun + 1) & 0xf8) >> 5);
3792 res = *(lun + 1) & 0x1f;
3795 case 3: /* extended logical unit addressing method */
3797 PRINT_ERROR("Unimplemented LUN addressing method %u",