4 * Copyright (C) 2004-2006 Vladislav Bolkhovitin <vst@vlnb.net>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation, version 2
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
18 #include <linux/init.h>
19 #include <linux/kernel.h>
20 #include <linux/errno.h>
21 #include <linux/list.h>
22 #include <linux/spinlock.h>
23 #include <linux/slab.h>
24 #include <linux/sched.h>
25 #include <asm/unistd.h>
26 #include <asm/string.h>
29 #include <linux/highmem.h>
32 #include "scst_debug.h"
34 #include "scst_priv.h"
37 #include "scst_cdbprobe.h"
39 static void scst_free_tgt_dev(struct scst_tgt_dev *tgt_dev);
41 void scst_set_cmd_error_status(struct scst_cmd *cmd, int status)
46 cmd->masked_status = status >> 1;
47 cmd->host_status = DID_OK;
49 cmd->data_direction = SCST_DATA_NONE;
50 cmd->tgt_resp_flags = SCST_TSC_FLAG_STATUS;
51 cmd->resp_data_len = 0;
57 void scst_set_cmd_error(struct scst_cmd *cmd, int key, int asc, int ascq)
61 scst_set_cmd_error_status(cmd, SAM_STAT_CHECK_CONDITION);
62 scst_set_sense(cmd->sense_buffer, sizeof(cmd->sense_buffer),
64 TRACE_BUFFER("Sense set", cmd->sense_buffer, sizeof(cmd->sense_buffer));
70 void scst_set_cmd_error_sense(struct scst_cmd *cmd, uint8_t *sense,
75 scst_set_cmd_error_status(cmd, SAM_STAT_CHECK_CONDITION);
77 memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer));
78 memcpy(cmd->sense_buffer, sense, min((unsigned long)len,
79 (unsigned long)sizeof(cmd->sense_buffer)));
80 TRACE_BUFFER("Sense set", cmd->sense_buffer, sizeof(cmd->sense_buffer));
86 void scst_set_busy(struct scst_cmd *cmd)
90 if ((cmd->sess->sess_cmd_count <= 1) ||
91 (cmd->sess->init_phase != SCST_SESS_IPH_READY))
93 scst_set_cmd_error_status(cmd, SAM_STAT_BUSY);
94 TRACE_MGMT_DBG("Sending BUSY status to initiator %s "
95 "(cmds count %d, queue_type %x, sess->init_phase %d)",
96 cmd->sess->initiator_name, cmd->sess->sess_cmd_count,
97 cmd->queue_type, cmd->sess->init_phase);
99 scst_set_cmd_error_status(cmd, SAM_STAT_TASK_SET_FULL);
100 TRACE_MGMT_DBG("Sending QUEUE_FULL status to initiator %s "
101 "(cmds count %d, queue_type %x, sess->init_phase %d)",
102 cmd->sess->initiator_name, cmd->sess->sess_cmd_count,
103 cmd->queue_type, cmd->sess->init_phase);
110 void scst_set_resp_data_len(struct scst_cmd *cmd, int resp_data_len)
116 scst_check_restore_sg_buff(cmd);
117 cmd->resp_data_len = resp_data_len;
119 if (resp_data_len == cmd->bufflen)
123 for(i = 0; i < cmd->sg_cnt; i++) {
124 l += cmd->sg[i].length;
125 if (l >= resp_data_len) {
126 int left = resp_data_len - (l - cmd->sg[i].length);
127 TRACE(TRACE_SG, "cmd %p (tag %d), "
128 "resp_data_len %d, i %d, cmd->sg[i].length %d, "
129 "left %d", cmd, cmd->tag, resp_data_len, i,
130 cmd->sg[i].length, left);
131 cmd->orig_sg_cnt = cmd->sg_cnt;
132 cmd->orig_sg_entry = i;
133 cmd->orig_entry_len = cmd->sg[i].length;
135 cmd->sg[i].length = left;
136 cmd->sg_buff_modified = 1;
146 struct scst_device *scst_alloc_device(int gfp_mask)
148 struct scst_device *dev;
152 dev = kzalloc(sizeof(*dev), gfp_mask);
153 TRACE_MEM("kzalloc() for dev (%zd): %p", sizeof(*dev), dev);
155 TRACE(TRACE_OUT_OF_MEM, "%s",
156 "Allocation of scst_device failed");
160 spin_lock_init(&dev->dev_lock);
161 atomic_set(&dev->on_dev_count, 0);
162 INIT_LIST_HEAD(&dev->blocked_cmd_list);
163 INIT_LIST_HEAD(&dev->dev_tgt_dev_list);
164 INIT_LIST_HEAD(&dev->dev_acg_dev_list);
165 init_waitqueue_head(&dev->on_dev_waitQ);
166 dev->dev_double_ua_possible = 1;
167 dev->dev_serialized = 1;
170 TRACE_EXIT_HRES(dev);
174 void scst_free_device(struct scst_device *dev)
179 if (!list_empty(&dev->dev_tgt_dev_list) ||
180 !list_empty(&dev->dev_acg_dev_list))
182 PRINT_ERROR_PR("%s: dev_tgt_dev_list or dev_acg_dev_list "
183 "is not empty!", __FUNCTION__);
188 TRACE_MEM("kfree for dev: %p", dev);
195 struct scst_acg_dev *scst_alloc_acg_dev(struct scst_acg *acg,
196 struct scst_device *dev, lun_t lun)
198 struct scst_acg_dev *res;
202 res = kmem_cache_alloc(scst_acgd_cachep, GFP_KERNEL);
203 TRACE_MEM("kmem_cache_alloc() for acg_dev (%zd): %p", sizeof(*res), res);
205 TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of scst_acg_dev failed");
208 memset(res, 0, sizeof(*res));
215 TRACE_EXIT_HRES(res);
219 /* scst_mutex supposed to be held */
220 void scst_free_acg_dev(struct scst_acg_dev *acg_dev)
224 TRACE_DBG("Removing acg_dev %p from acg_dev_list and dev_acg_dev_list",
226 list_del(&acg_dev->acg_dev_list_entry);
227 list_del(&acg_dev->dev_acg_dev_list_entry);
229 TRACE_MEM("kfree for acg_dev: %p", acg_dev);
230 kmem_cache_free(scst_acgd_cachep, acg_dev);
236 /* scst_mutex supposed to be held */
237 struct scst_acg *scst_alloc_add_acg(const char *acg_name)
239 struct scst_acg *acg;
243 acg = kzalloc(sizeof(*acg), GFP_KERNEL);
244 TRACE_MEM("kzalloc() for acg (%zd): %p", sizeof(*acg), acg);
246 TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of acg failed");
250 INIT_LIST_HEAD(&acg->acg_dev_list);
251 INIT_LIST_HEAD(&acg->acg_sess_list);
252 INIT_LIST_HEAD(&acg->acn_list);
253 acg->acg_name = acg_name;
255 TRACE_DBG("Adding acg %s to scst_acg_list", acg_name);
256 list_add_tail(&acg->scst_acg_list_entry, &scst_acg_list);
259 TRACE_EXIT_HRES(acg);
263 /* scst_mutex supposed to be held */
264 int scst_destroy_acg(struct scst_acg *acg)
266 struct scst_acn *n, *nn;
267 struct scst_acg_dev *acg_dev, *acg_dev_tmp;
272 if (!list_empty(&acg->acg_sess_list)) {
273 PRINT_ERROR_PR("%s: acg_sess_list is not empty!", __FUNCTION__);
278 __scst_suspend_activity();
280 TRACE_DBG("Removing acg %s from scst_acg_list", acg->acg_name);
281 list_del(&acg->scst_acg_list_entry);
283 /* Freeing acg_devs */
284 list_for_each_entry_safe(acg_dev, acg_dev_tmp, &acg->acg_dev_list,
287 struct scst_tgt_dev *tgt_dev, *tt;
288 list_for_each_entry_safe(tgt_dev, tt,
289 &acg_dev->dev->dev_tgt_dev_list,
290 dev_tgt_dev_list_entry)
292 if (tgt_dev->acg_dev == acg_dev)
293 scst_free_tgt_dev(tgt_dev);
295 scst_free_acg_dev(acg_dev);
298 __scst_resume_activity();
301 list_for_each_entry_safe(n, nn, &acg->acn_list,
304 list_del(&n->acn_list_entry);
305 TRACE_MEM("kfree() for scst_acn->name: %p", n->name);
307 TRACE_MEM("kfree() for scst_acn: %p", n);
310 INIT_LIST_HEAD(&acg->acn_list);
312 TRACE_MEM("kfree for acg: %p", acg);
321 * No spin locks supposed to be held, scst_mutex - held.
322 * The activity is suspended.
324 static struct scst_tgt_dev *scst_alloc_add_tgt_dev(struct scst_session *sess,
325 struct scst_acg_dev *acg_dev)
327 struct scst_tgt_dev *tgt_dev;
328 struct scst_device *dev = acg_dev->dev;
333 tgt_dev = kmem_cache_alloc(scst_tgtd_cachep, GFP_KERNEL);
334 TRACE_MEM("kmem_cache_alloc(GFP_KERNEL) for tgt_dev (%zd): %p",
335 sizeof(*tgt_dev), tgt_dev);
337 memset(tgt_dev, 0, sizeof(*tgt_dev));
339 TRACE(TRACE_OUT_OF_MEM, "%s",
340 "Allocation of scst_tgt_dev failed");
344 tgt_dev->acg_dev = acg_dev;
345 tgt_dev->sess = sess;
346 tgt_dev->cmd_count = 0;
348 if (dev->scsi_dev != NULL) {
349 TRACE(TRACE_DEBUG, "host=%d, channel=%d, id=%d, lun=%d, "
350 "SCST lun=%Ld", dev->scsi_dev->host->host_no,
351 dev->scsi_dev->channel, dev->scsi_dev->id,
352 dev->scsi_dev->lun, (uint64_t)tgt_dev->acg_dev->lun);
355 TRACE(TRACE_MINOR, "Virtual device SCST lun=%Ld",
356 (uint64_t)tgt_dev->acg_dev->lun);
359 spin_lock_init(&tgt_dev->tgt_dev_lock);
360 INIT_LIST_HEAD(&tgt_dev->UA_list);
361 spin_lock_init(&tgt_dev->sn_lock);
362 INIT_LIST_HEAD(&tgt_dev->deferred_cmd_list);
363 INIT_LIST_HEAD(&tgt_dev->skipped_sn_list);
365 spin_lock_bh(&scst_temp_UA_lock);
366 scst_set_sense(scst_temp_UA, sizeof(scst_temp_UA),
367 SCST_LOAD_SENSE(scst_sense_reset_UA));
368 scst_alloc_set_UA(tgt_dev, scst_temp_UA, sizeof(scst_temp_UA));
369 spin_unlock_bh(&scst_temp_UA_lock);
371 tm_dbg_init_tgt_dev(tgt_dev, acg_dev);
373 if (dev->handler && dev->handler->attach_tgt) {
374 TRACE_DBG("Calling dev handler's attach_tgt(%p)",
376 res = dev->handler->attach_tgt(tgt_dev);
377 TRACE_DBG("%s", "Dev handler's attach_tgt() returned");
379 PRINT_ERROR_PR("Device handler's %s attach_tgt() "
380 "failed: %d", dev->handler->name, res);
385 list_add_tail(&tgt_dev->dev_tgt_dev_list_entry, &dev->dev_tgt_dev_list);
386 if (dev->dev_reserved)
387 __set_bit(SCST_TGT_DEV_RESERVED, &tgt_dev->tgt_dev_flags);
389 list_add_tail(&tgt_dev->sess_tgt_dev_list_entry,
390 &sess->sess_tgt_dev_list);
397 TRACE_MEM("kfree for tgt_dev: %p", tgt_dev);
398 kmem_cache_free(scst_tgtd_cachep, tgt_dev);
403 static void scst_send_release(struct scst_tgt_dev *tgt_dev);
406 * No locks supposed to be held, scst_mutex - held.
407 * The activity is suspended.
409 void scst_reset_tgt_dev(struct scst_tgt_dev *tgt_dev, int nexus_loss)
411 struct scst_device *dev = tgt_dev->acg_dev->dev;
413 if (dev->dev_reserved &&
414 !test_bit(SCST_TGT_DEV_RESERVED, &tgt_dev->tgt_dev_flags))
416 /* This is one who holds the reservation */
417 struct scst_tgt_dev *tgt_dev_tmp;
418 list_for_each_entry(tgt_dev_tmp, &dev->dev_tgt_dev_list,
419 dev_tgt_dev_list_entry)
421 clear_bit(SCST_TGT_DEV_RESERVED,
422 &tgt_dev_tmp->tgt_dev_flags);
424 dev->dev_reserved = 0;
426 scst_send_release(tgt_dev);
429 spin_lock_bh(&scst_temp_UA_lock);
431 scst_set_sense(scst_temp_UA, sizeof(scst_temp_UA),
432 SCST_LOAD_SENSE(scst_sense_nexus_loss_UA));
434 scst_set_sense(scst_temp_UA, sizeof(scst_temp_UA),
435 SCST_LOAD_SENSE(scst_sense_reset_UA));
437 scst_check_set_UA(tgt_dev, scst_temp_UA, sizeof(scst_temp_UA));
438 spin_unlock_bh(&scst_temp_UA_lock);
442 * No locks supposed to be held, scst_mutex - held.
443 * The activity is suspended.
445 static void scst_free_tgt_dev(struct scst_tgt_dev *tgt_dev)
447 struct scst_device *dev = tgt_dev->acg_dev->dev;
451 tm_dbg_deinit_tgt_dev(tgt_dev);
453 list_del(&tgt_dev->dev_tgt_dev_list_entry);
454 list_del(&tgt_dev->sess_tgt_dev_list_entry);
456 scst_reset_tgt_dev(tgt_dev, 0);
457 scst_free_all_UA(tgt_dev);
459 if (dev->handler && dev->handler->detach_tgt) {
460 TRACE_DBG("Calling dev handler's detach_tgt(%p)",
462 dev->handler->detach_tgt(tgt_dev);
463 TRACE_DBG("%s", "Dev handler's detach_tgt() returned");
466 TRACE_MEM("kfree for tgt_dev: %p", tgt_dev);
467 kmem_cache_free(scst_tgtd_cachep, tgt_dev);
473 /* scst_mutex supposed to be held */
474 int scst_sess_alloc_tgt_devs(struct scst_session *sess)
477 struct scst_acg_dev *acg_dev;
478 struct scst_tgt_dev *tgt_dev;
482 __scst_suspend_activity();
484 INIT_LIST_HEAD(&sess->sess_tgt_dev_list);
485 list_for_each_entry(acg_dev, &sess->acg->acg_dev_list,
488 tgt_dev = scst_alloc_add_tgt_dev(sess, acg_dev);
489 if (tgt_dev == NULL) {
496 __scst_resume_activity();
502 scst_sess_free_tgt_devs(sess);
506 /* scst_mutex supposed to be held and activity suspended */
507 void scst_sess_free_tgt_devs(struct scst_session *sess)
509 struct scst_tgt_dev *tgt_dev, *t;
513 /* The session is going down, no users, so no locks */
514 list_for_each_entry_safe(tgt_dev, t, &sess->sess_tgt_dev_list,
515 sess_tgt_dev_list_entry)
517 scst_free_tgt_dev(tgt_dev);
519 INIT_LIST_HEAD(&sess->sess_tgt_dev_list);
525 /* scst_mutex supposed to be held */
526 int scst_acg_add_dev(struct scst_acg *acg, struct scst_device *dev, lun_t lun,
530 struct scst_acg_dev *acg_dev;
531 struct scst_tgt_dev *tgt_dev;
532 struct scst_session *sess;
533 LIST_HEAD(tmp_tgt_dev_list);
537 INIT_LIST_HEAD(&tmp_tgt_dev_list);
540 list_for_each_entry(acg_dev, &acg->acg_dev_list, acg_dev_list_entry) {
541 if (acg_dev->dev == dev) {
542 PRINT_ERROR_PR("Device is already in group %s",
550 acg_dev = scst_alloc_acg_dev(acg, dev, lun);
551 if (acg_dev == NULL) {
555 acg_dev->rd_only_flag = read_only;
557 __scst_suspend_activity();
559 TRACE_DBG("Adding acg_dev %p to acg_dev_list and dev_acg_dev_list",
561 list_add_tail(&acg_dev->acg_dev_list_entry, &acg->acg_dev_list);
562 list_add_tail(&acg_dev->dev_acg_dev_list_entry, &dev->dev_acg_dev_list);
564 list_for_each_entry(sess, &acg->acg_sess_list, acg_sess_list_entry)
566 tgt_dev = scst_alloc_add_tgt_dev(sess, acg_dev);
567 if (tgt_dev == NULL) {
571 list_add_tail(&tgt_dev->extra_tgt_dev_list_entry,
576 __scst_resume_activity();
583 list_for_each_entry(tgt_dev, &tmp_tgt_dev_list,
584 extra_tgt_dev_list_entry)
586 scst_free_tgt_dev(tgt_dev);
588 scst_free_acg_dev(acg_dev);
592 /* scst_mutex supposed to be held */
593 int scst_acg_remove_dev(struct scst_acg *acg, struct scst_device *dev)
596 struct scst_acg_dev *acg_dev = NULL, *a;
597 struct scst_tgt_dev *tgt_dev, *tt;
601 list_for_each_entry(a, &acg->acg_dev_list, acg_dev_list_entry) {
608 if (acg_dev == NULL) {
609 PRINT_ERROR_PR("Device is not found in group %s", acg->acg_name);
614 __scst_suspend_activity();
616 list_for_each_entry_safe(tgt_dev, tt, &dev->dev_tgt_dev_list,
617 dev_tgt_dev_list_entry)
619 if (tgt_dev->acg_dev == acg_dev)
620 scst_free_tgt_dev(tgt_dev);
622 scst_free_acg_dev(acg_dev);
624 __scst_resume_activity();
631 /* scst_mutex supposed to be held */
632 int scst_acg_add_name(struct scst_acg *acg, const char *name)
641 list_for_each_entry(n, &acg->acn_list, acn_list_entry)
643 if (strcmp(n->name, name) == 0) {
644 PRINT_ERROR_PR("Name %s already exists in access "
645 "control group %s", name, acg->acg_name);
651 n = kmalloc(sizeof(*n), GFP_KERNEL);
652 TRACE_MEM("kmalloc(GFP_KERNEL) for scst_acn (%zd): %p", sizeof(*n), n);
654 PRINT_ERROR_PR("%s", "Unable to allocate scst_acn");
660 nm = kmalloc(len + 1, GFP_KERNEL);
661 TRACE_MEM("kmalloc(GFP_KERNEL) for scst_acn->name (%d): %p",
664 PRINT_ERROR_PR("%s", "Unable to allocate scst_acn->name");
672 list_add_tail(&n->acn_list_entry, &acg->acn_list);
679 TRACE_MEM("kfree() for scst_acn: %p", n);
684 /* scst_mutex supposed to be held */
685 int scst_acg_remove_name(struct scst_acg *acg, const char *name)
692 list_for_each_entry(n, &acg->acn_list, acn_list_entry)
694 if (strcmp(n->name, name) == 0) {
695 list_del(&n->acn_list_entry);
696 TRACE_MEM("kfree() for scst_acn->name: %p", n->name);
698 TRACE_MEM("kfree() for scst_acn: %p", n);
706 PRINT_ERROR_PR("Unable to find name %s in access control "
707 "group %s", name, acg->acg_name);
714 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
715 static void scst_req_done(struct scsi_cmnd *scsi_cmd)
717 struct scsi_request *req;
721 if (scsi_cmd && (req = scsi_cmd->sc_request)) {
723 if (req->sr_bufflen) {
724 TRACE_MEM("kfree for req->sr_buffer: %p",
726 kfree(req->sr_buffer);
728 scsi_release_request(req);
736 static void scst_send_release(struct scst_tgt_dev *tgt_dev)
738 struct scsi_request *req;
739 struct scsi_device *scsi_dev;
744 if (tgt_dev->acg_dev->dev->scsi_dev == NULL)
747 scsi_dev = tgt_dev->acg_dev->dev->scsi_dev;
749 req = scsi_allocate_request(scsi_dev, GFP_KERNEL);
751 PRINT_ERROR_PR("Allocation of scsi_request failed: unable "
752 "to RELEASE device %d:%d:%d:%d",
753 scsi_dev->host->host_no, scsi_dev->channel,
754 scsi_dev->id, scsi_dev->lun);
758 memset(cdb, 0, sizeof(cdb));
760 cdb[1] = (scsi_dev->scsi_level <= SCSI_2) ?
761 ((scsi_dev->lun << 5) & 0xe0) : 0;
762 memcpy(req->sr_cmnd, cdb, sizeof(cdb));
763 req->sr_cmd_len = sizeof(cdb);
764 req->sr_data_direction = SCST_DATA_NONE;
767 req->sr_buffer = NULL;
768 req->sr_request->rq_disk = tgt_dev->acg_dev->dev->rq_disk;
769 req->sr_sense_buffer[0] = 0;
771 TRACE(TRACE_DEBUG | TRACE_SCSI, "Sending RELEASE req %p to SCSI "
773 scst_do_req(req, req->sr_cmnd, (void *)req->sr_buffer, req->sr_bufflen,
774 scst_req_done, SCST_DEFAULT_TIMEOUT, 3);
780 #else /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) */
781 static void scst_send_release(struct scst_tgt_dev *tgt_dev)
783 struct scsi_device *scsi_dev;
784 unsigned char cdb[6];
785 unsigned char sense[SCSI_SENSE_BUFFERSIZE];
790 if (tgt_dev->acg_dev->dev->scsi_dev == NULL)
793 scsi_dev = tgt_dev->acg_dev->dev->scsi_dev;
795 memset(cdb, 0, sizeof(cdb));
797 cdb[1] = (scsi_dev->scsi_level <= SCSI_2) ?
798 ((scsi_dev->lun << 5) & 0xe0) : 0;
800 TRACE(TRACE_DEBUG | TRACE_SCSI, "%s", "Sending RELEASE req to SCSI "
802 rc = scsi_execute(scsi_dev, cdb, SCST_DATA_NONE, NULL, 0,
803 sense, SCST_DEFAULT_TIMEOUT,
806 PRINT_INFO_PR("scsi_execute() failed: %d", rc);
814 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) */
816 struct scst_session *scst_alloc_session(struct scst_tgt *tgt, int gfp_mask,
817 const char *initiator_name)
819 struct scst_session *sess;
825 sess = kmem_cache_alloc(scst_sess_cachep, gfp_mask);
826 TRACE_MEM("kmem_cache_alloc() for sess (%zd): %p", sizeof(*sess), sess);
828 memset(sess, 0, sizeof(*sess));
830 TRACE(TRACE_OUT_OF_MEM, "%s",
831 "Allocation of scst_session failed");
835 sess->init_phase = SCST_SESS_IPH_INITING;
836 atomic_set(&sess->refcnt, 0);
837 INIT_LIST_HEAD(&sess->sess_tgt_dev_list);
838 INIT_LIST_HEAD(&sess->search_cmd_list);
840 INIT_LIST_HEAD(&sess->init_deferred_cmd_list);
841 INIT_LIST_HEAD(&sess->init_deferred_mcmd_list);
843 len = strlen(initiator_name);
844 nm = kmalloc(len + 1, gfp_mask);
845 TRACE_MEM("kmalloc(GFP_KERNEL) for sess->initiator_name (%d): %p",
848 PRINT_ERROR_PR("%s", "Unable to allocate sess->initiator_name");
852 strcpy(nm, initiator_name);
853 sess->initiator_name = nm;
860 TRACE_MEM("kfree() for sess: %p", sess);
861 kmem_cache_free(scst_sess_cachep, sess);
866 void scst_free_session(struct scst_session *sess)
871 TRACE_DBG("Removing sess %p from the list", sess);
872 list_del(&sess->sess_list_entry);
873 TRACE_DBG("Removing session %p from acg %s", sess, sess->acg->acg_name);
874 list_del(&sess->acg_sess_list_entry);
876 __scst_suspend_activity();
877 scst_sess_free_tgt_devs(sess);
878 __scst_resume_activity();
880 wake_up_all(&sess->tgt->unreg_waitQ);
884 TRACE_MEM("kfree for sess->initiator_name: %p", sess->initiator_name);
885 kfree(sess->initiator_name);
887 TRACE_MEM("kfree for sess: %p", sess);
888 kmem_cache_free(scst_sess_cachep, sess);
894 void scst_free_session_callback(struct scst_session *sess)
896 struct semaphore *shm;
900 TRACE_DBG("Freeing session %p", sess);
902 shm = sess->shutdown_mutex;
904 if (sess->unreg_done_fn) {
905 TRACE_DBG("Calling unreg_done_fn(%p)", sess);
906 sess->unreg_done_fn(sess);
907 TRACE_DBG("%s", "unreg_done_fn() returned");
909 scst_free_session(sess);
918 void scst_sched_session_free(struct scst_session *sess)
924 spin_lock_irqsave(&scst_mgmt_lock, flags);
925 TRACE_DBG("Adding sess %p to scst_sess_mgmt_list", sess);
926 list_add_tail(&sess->sess_mgmt_list_entry, &scst_sess_mgmt_list);
927 spin_unlock_irqrestore(&scst_mgmt_lock, flags);
929 wake_up(&scst_mgmt_waitQ);
935 struct scst_cmd *scst_alloc_cmd(int gfp_mask)
937 struct scst_cmd *cmd;
941 cmd = kmem_cache_alloc(scst_cmd_cachep, gfp_mask);
942 TRACE_MEM("kmem_cache_alloc() for cmd (%zd): %p", sizeof(*cmd), cmd);
944 memset(cmd, 0, sizeof(*cmd));
946 TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of scst_cmd failed");
950 cmd->queue_type = SCST_CMD_QUEUE_UNTAGGED;
951 cmd->timeout = SCST_DEFAULT_TIMEOUT;
952 cmd->retries = SCST_DEFAULT_RETRIES;
954 cmd->tgt_resp_flags = SCST_TSC_FLAG_STATUS;
955 cmd->resp_data_len = -1;
962 static void scst_destroy_put_cmd(struct scst_cmd *cmd)
964 scst_sess_put(cmd->sess);
966 /* At this point tgt_dev can be dead, but the pointer remains not-NULL */
967 if (likely(cmd->tgt_dev != NULL))
968 scst_dec_cmd_count();
970 scst_destroy_cmd(cmd);
974 /* No locks supposed to be held. Must be called only from scst_finish_cmd()! */
975 void scst_free_cmd(struct scst_cmd *cmd)
981 BUG_ON(cmd->blocking);
983 #if defined(EXTRACHECKS) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18))
985 PRINT_ERROR_PR("%s: %s", __FUNCTION__, "Cmd with unfreed "
987 scst_release_request(cmd);
991 TRACE_DBG("Calling target's on_free_cmd(%p)", cmd);
992 cmd->tgtt->on_free_cmd(cmd);
993 TRACE_DBG("%s", "Target's on_free_cmd() returned");
995 if (likely(cmd->dev != NULL)) {
996 struct scst_dev_type *handler = cmd->dev->handler;
997 if (handler->on_free_cmd != NULL) {
998 TRACE_DBG("Calling dev handler %s on_free_cmd(%p)",
1000 handler->on_free_cmd(cmd);
1001 TRACE_DBG("Dev handler %s on_free_cmd() returned",
1006 scst_release_space(cmd);
1008 if (likely(cmd->tgt_dev != NULL)) {
1010 if (cmd->sent_to_midlev == 0) {
1011 PRINT_ERROR_PR("Finishing not executed cmd (opcode %d, "
1012 "target %s, lun %Ld, sn %d, expected_sn %d)",
1013 cmd->cdb[0], cmd->tgtt->name, (uint64_t)cmd->lun,
1014 cmd->sn, cmd->tgt_dev->expected_sn);
1015 scst_inc_expected_sn_unblock(cmd->tgt_dev, cmd, 0);
1018 if (unlikely(test_bit(SCST_CMD_OUT_OF_SN,
1021 spin_lock_bh(&cmd->tgt_dev->sn_lock);
1022 set_bit(SCST_CMD_CAN_BE_DESTROYED,
1024 barrier(); /* to reread SCST_CMD_OUT_OF_SN */
1025 destroy = !test_bit(SCST_CMD_OUT_OF_SN,
1027 TRACE(TRACE_SCSI_SERIALIZING, "Out of SN "
1028 "cmd %p (tag %d, sn %d), destroy=%d", cmd,
1029 cmd->tag, cmd->sn, destroy);
1030 spin_unlock_bh(&cmd->tgt_dev->sn_lock);
1034 if (likely(destroy))
1035 scst_destroy_put_cmd(cmd);
1041 /* No locks supposed to be held. */
1042 void scst_check_retries(struct scst_tgt *tgt, int processible_env)
1044 int need_wake_up = 0;
1049 * We don't worry about overflow of finished_cmds, because we check
1050 * only for its change
1052 atomic_inc(&tgt->finished_cmds);
1053 smp_mb__after_atomic_inc();
1054 if (unlikely(tgt->retry_cmds > 0))
1056 struct scst_cmd *c, *tc;
1057 unsigned long flags;
1059 TRACE(TRACE_RETRY, "Checking retry cmd list (retry_cmds %d)",
1062 spin_lock_irqsave(&tgt->tgt_lock, flags);
1063 spin_lock(&scst_list_lock);
1065 list_for_each_entry_safe(c, tc, &tgt->retry_cmd_list,
1070 TRACE(TRACE_RETRY, "Moving retry cmd %p to active cmd "
1071 "list (retry_cmds left %d)", c, tgt->retry_cmds);
1072 list_move(&c->cmd_list_entry, &scst_active_cmd_list);
1075 if (need_wake_up >= 2) /* "slow start" */
1079 spin_unlock(&scst_list_lock);
1080 spin_unlock_irqrestore(&tgt->tgt_lock, flags);
1083 if (need_wake_up && !processible_env)
1084 wake_up(&scst_list_waitQ);
1090 void scst_tgt_retry_timer_fn(unsigned long arg)
1092 struct scst_tgt *tgt = (struct scst_tgt*)arg;
1093 unsigned long flags;
1095 TRACE(TRACE_RETRY, "Retry timer expired (retry_cmds %d)",
1098 spin_lock_irqsave(&tgt->tgt_lock, flags);
1099 tgt->retry_timer_active = 0;
1100 spin_unlock_irqrestore(&tgt->tgt_lock, flags);
1102 scst_check_retries(tgt, 0);
1108 struct scst_mgmt_cmd *scst_alloc_mgmt_cmd(int gfp_mask)
1110 struct scst_mgmt_cmd *mcmd;
1114 mcmd = mempool_alloc(scst_mgmt_mempool, gfp_mask);
1115 TRACE_MEM("mempool_alloc() for mgmt cmd (%zd): %p", sizeof(*mcmd),
1118 PRINT_ERROR("%s", "Allocation of management command "
1119 "failed, some commands and their data could leak");
1122 memset(mcmd, 0, sizeof(*mcmd));
1129 void scst_free_mgmt_cmd(struct scst_mgmt_cmd *mcmd, int del)
1131 unsigned long flags;
1135 spin_lock_irqsave(&scst_list_lock, flags);
1137 list_del(&mcmd->mgmt_cmd_list_entry);
1138 mcmd->sess->sess_cmd_count--;
1139 spin_unlock_irqrestore(&scst_list_lock, flags);
1141 scst_sess_put(mcmd->sess);
1143 if (mcmd->mcmd_tgt_dev != NULL)
1144 scst_dec_cmd_count();
1146 TRACE_MEM("mempool_free for mgmt cmd: %p", mcmd);
1147 mempool_free(mcmd, scst_mgmt_mempool);
1153 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
1154 int scst_alloc_request(struct scst_cmd *cmd)
1157 struct scsi_request *req;
1158 int gm = scst_cmd_atomic(cmd) ? GFP_ATOMIC : GFP_KERNEL;
1162 /* cmd->dev->scsi_dev must be non-NULL here */
1163 req = scsi_allocate_request(cmd->dev->scsi_dev, gm);
1165 TRACE(TRACE_OUT_OF_MEM, "%s",
1166 "Allocation of scsi_request failed");
1171 cmd->scsi_req = req;
1173 memcpy(req->sr_cmnd, cmd->cdb, cmd->cdb_len);
1174 req->sr_cmd_len = cmd->cdb_len;
1175 req->sr_data_direction = cmd->data_direction;
1176 req->sr_use_sg = cmd->sg_cnt;
1177 req->sr_bufflen = cmd->bufflen;
1178 req->sr_buffer = cmd->sg;
1179 req->sr_request->rq_disk = cmd->dev->rq_disk;
1180 req->sr_sense_buffer[0] = 0;
1182 cmd->scsi_req->upper_private_data = cmd;
1189 void scst_release_request(struct scst_cmd *cmd)
1191 scsi_release_request(cmd->scsi_req);
1192 cmd->scsi_req = NULL;
1196 int scst_alloc_space(struct scst_cmd *cmd)
1198 int tgt_sg = cmd->tgt->sg_tablesize;
1202 int ini_unchecked_isa_dma, ini_use_clustering;
1203 struct sgv_pool *pool;
1204 struct sgv_pool_obj *sgv;
1208 if (cmd->data_buf_alloced) {
1209 TRACE_MEM("%s", "data_buf_alloced set, returning");
1210 BUG_ON(cmd->sg == NULL);
1215 gfp_mask = __GFP_NOWARN;
1216 gfp_mask |= (scst_cmd_atomic(cmd) ? GFP_ATOMIC : GFP_KERNEL);
1217 pool = &scst_sgv.norm;
1219 if (cmd->dev->scsi_dev != NULL) {
1220 ini_sg = cmd->dev->scsi_dev->host->sg_tablesize;
1221 ini_unchecked_isa_dma =
1222 cmd->dev->scsi_dev->host->unchecked_isa_dma;
1223 ini_use_clustering =
1224 (cmd->dev->scsi_dev->host->use_clustering ==
1228 ini_sg = (1 << 15) /* infinite */;
1229 ini_unchecked_isa_dma = 0;
1230 ini_use_clustering = 0;
1233 if (cmd->tgtt->use_clustering || ini_use_clustering)
1235 TRACE_MEM("%s", "Use clustering");
1236 pool = &scst_sgv.norm_clust;
1239 if (cmd->tgtt->unchecked_isa_dma || ini_unchecked_isa_dma) {
1240 TRACE_MEM("%s", "Use ISA DMA memory");
1241 gfp_mask |= GFP_DMA;
1242 pool = &scst_sgv.dma;
1245 gfp_mask |= __GFP_HIGHMEM;
1246 pool = &scst_sgv.highmem;
1250 sgv = sgv_pool_alloc(pool, cmd->bufflen, gfp_mask, &cmd->sg_cnt);
1254 if (unlikely(cmd->sg_cnt > ini_sg)) {
1257 PRINT_INFO("Unable to complete command due to "
1258 "underlying device SG IO count limitation "
1259 "(requested %d, available %d)", cmd->sg_cnt,
1266 if (unlikely(cmd->sg_cnt > tgt_sg)) {
1269 PRINT_INFO("Unable to complete command due to "
1270 "target device %s SG IO count limitation "
1271 "(requested %d, available %d)", cmd->tgtt->name,
1272 cmd->sg_cnt, tgt_sg);
1279 cmd->sg = sgv_pool_sg(sgv);
1293 void scst_release_space(struct scst_cmd *cmd)
1297 if (cmd->data_buf_alloced) {
1298 TRACE_MEM("%s", "data_buf_alloced set, returning");
1303 scst_check_restore_sg_buff(cmd);
1304 sgv_pool_free(cmd->sgv);
1318 int __scst_get_buf(struct scst_cmd *cmd, uint8_t **buf)
1321 struct scatterlist *sg = cmd->sg;
1322 int i = cmd->get_sg_buf_entry_num;
1328 if (i >= cmd->sg_cnt)
1332 * HIGHMEM pages not merged (clustered), so if it's
1333 * not HIGHMEM page, kmap() is the same as page_address()
1335 if (scst_cmd_atomic(cmd)) {
1341 *buf = kmap_atomic(sg[i].page, km);
1343 *buf = kmap(sg[i].page);
1345 *buf = page_address(sg[i].page);
1348 cmd->get_sg_buf_entry_num++;
1351 TRACE_EXIT_RES(res);
1355 static const int SCST_CDB_LENGTH[8] = { 6, 10, 10, -1, 16, 12, -1, -1 };
1357 #define SCST_CDB_GROUP(opcode) ((opcode >> 5) & 0x7)
1358 #define SCST_GET_CDB_LEN(opcode) SCST_CDB_LENGTH[SCST_CDB_GROUP(opcode)]
1360 int scst_get_cdb_len(const uint8_t *cdb)
1362 return SCST_GET_CDB_LEN(cdb[0]);
1365 int scst_get_cdb_info(const uint8_t *cdb_p, int dev_type,
1366 struct scst_info_cdb *info_p)
1370 const struct scst_sdbops *ptr = NULL;
1374 memset(info_p, 0, sizeof(*info_p));
1375 info_p->direction = SCST_DATA_NONE;
1376 info_p->op_name = "NOOP";
1377 op = *cdb_p; /* get clear opcode */
1379 TRACE(TRACE_SCSI, "opcode=%02x, cdblen=%d bytes, tblsize=%zd, "
1380 "dev_type=%d", op, SCST_GET_CDB_LEN(op), SCST_CDB_TBL_SIZE,
1383 i = scst_scsi_op_list[op];
1384 while (i < SCST_CDB_TBL_SIZE && scst_scsi_op_table[i].ops == op) {
1385 if (scst_scsi_op_table[i].devkey[dev_type] != SCST_CDB_NOTSUPP) {
1386 ptr = &scst_scsi_op_table[i];
1388 TRACE(TRACE_SCSI, "op = 0x%02x+'%c%c%c%c%c%c%c%c%c%c'+<%s>",
1389 ptr->ops, ptr->devkey[0], /* disk */
1390 ptr->devkey[1], /* tape */
1391 ptr->devkey[2], /* printer */
1392 ptr->devkey[3], /* cpu */
1393 ptr->devkey[4], /* cdr */
1394 ptr->devkey[5], /* cdrom */
1395 ptr->devkey[6], /* scanner */
1396 ptr->devkey[7], /* worm */
1397 ptr->devkey[8], /* changer */
1398 ptr->devkey[9], /* commdev */
1402 "direction=%d size_field_len=%d fixed=%d flag1=%d flag2=%d",
1404 ptr->size_field_len,
1405 ptr->fixed, ptr->flag1, ptr->flag2);
1413 /* opcode not found or now not used !!! */
1414 TRACE(TRACE_SCSI, "Unknown opcode 0x%x for type %d", op,
1420 info_p->cdb_len = SCST_GET_CDB_LEN(op);
1421 info_p->op_name = ptr->op_name;
1423 info_p->direction = ptr->direction;
1424 if (info_p->direction == SCST_DATA_NONE)
1428 info_p->flags = ptr->fixed;
1431 * CDB length needed, because we must know offsets:
1432 * 1) for 6-bytes CDB len = 1 byte or 3 bytes(if real transfer exist)
1433 * 2) for 10-bytes CDB len = 1 byte or 2 bytes(0x24,0x25 = 3)
1434 * 3) for 12-bytes CDB len = 1 byte or 4 bytes
1437 /* 3. transfer_len */
1438 if (SCST_GET_CDB_LEN(op) == 6) {
1439 if (ptr->size_field_len == 3) {
1440 /* length = 3 bytes */
1441 info_p->transfer_len = (((*(cdb_p + 2)) & 0xff) << 16) +
1442 (((*(cdb_p + 3)) & 0xff) << 8) +
1443 ((*(cdb_p + 4)) & 0xff);
1444 info_p->transfer_len &= 0xffffff;
1445 } else if (ptr->size_field_len == 1) {
1447 * Warning!!! CDB 'READ BLOCK LIMITS'
1448 * always returns 6-byte block with limits
1449 * info_p->transfer_len = (int)(*(cdb_p + 4));
1451 info_p->transfer_len = ((op == READ_BLOCK_LIMITS) ?
1452 SCST_BLOCK_LIMIT_LEN :
1453 *(cdb_p + 4)) & 0xff;
1455 } else if (SCST_GET_CDB_LEN(op) == 10) {
1456 if (ptr->size_field_len == 3)
1458 * SET window usees 3 bytes length SET/GET WINDOW
1459 * if ((uint8_t)ptr->ops == 0x24 || 0x25)
1462 info_p->transfer_len = (((*(cdb_p + 6)) & 0xff) << 16) +
1463 (((*(cdb_p + 7)) & 0xff) << 8) +
1464 ((*(cdb_p + 8)) & 0xff);
1465 info_p->transfer_len &= 0xffffff;
1466 } else if (ptr->size_field_len == 2) {
1467 info_p->transfer_len = (((*(cdb_p + 7)) & 0xff) << 8) +
1468 ((*(cdb_p + 8)) & 0xff);
1469 info_p->transfer_len &= 0xffff;
1470 } else if (ptr->size_field_len == 1) {
1471 info_p->transfer_len = (*(cdb_p + 8));
1473 /* opcode = READ-WRITE UPDATED BLOCK */
1474 if ((ptr->ops == 0x5d) ||
1475 (ptr->ops == UPDATE_BLOCK) ||
1476 (ptr->ops == WRITE_SAME)) {
1477 /* the opcode always returns 1 block */
1478 info_p->flags |= SCST_TRANSFER_LEN_TYPE_FIXED;
1479 info_p->transfer_len = 1;
1482 if ((ptr->ops == COMPARE) || (ptr->ops == COPY_VERIFY)) {
1483 /* ese other place in CDB [3,4],5 */
1484 info_p->transfer_len = (*(cdb_p + 5));
1487 info_p->transfer_len &= 0xff;
1489 } else if (SCST_GET_CDB_LEN(op) == 12) {
1490 if (ptr->size_field_len == 4) {
1491 info_p->transfer_len = (((*(cdb_p + 6)) & 0xff) << 24) +
1492 (((*(cdb_p + 7)) & 0xff) << 16) +
1493 (((*(cdb_p + 8)) & 0xff) << 8) +
1494 ((*(cdb_p + 9)) & 0xff);
1495 info_p->transfer_len &= 0xffffffff;
1496 } else if (ptr->size_field_len == 3) {
1497 info_p->transfer_len = (((*(cdb_p + 7)) & 0xff) << 16) +
1498 (((*(cdb_p + 8)) & 0xff) << 8) +
1499 ((*(cdb_p + 9)) & 0xff);
1500 info_p->transfer_len &= 0xffffff;
1501 } else if (ptr->size_field_len == 2) {
1502 info_p->transfer_len = (((*(cdb_p + 8)) & 0xff) << 8) +
1503 ((*(cdb_p + 9)) & 0xff);
1504 info_p->transfer_len &= 0xffff;
1506 if (ptr->size_field_len == 1) {
1507 info_p->transfer_len = (*(cdb_p + 9));
1508 info_p->transfer_len &= 0xff;
1511 } else if (SCST_GET_CDB_LEN(op) == 16) {
1512 if (ptr->size_field_len == 4) {
1513 info_p->transfer_len =
1514 (((*(cdb_p + 10)) & 0xff) << 24) +
1515 (((*(cdb_p + 11)) & 0xff) << 16) +
1516 (((*(cdb_p + 12)) & 0xff) << 8) +
1517 ((*(cdb_p + 13)) & 0xff);
1520 if (!info_p->transfer_len) {
1522 "Warning! transfer_len 0, direction %d change on " "%d",
1523 info_p->direction, SCST_DATA_NONE);
1524 info_p->direction = SCST_DATA_NONE;
1532 void scst_scsi_op_list_init(void)
1539 for (i = 0; i < 256; i++)
1540 scst_scsi_op_list[i] = SCST_CDB_TBL_SIZE;
1542 for (i = 0; i < SCST_CDB_TBL_SIZE; i++) {
1543 if (scst_scsi_op_table[i].ops != op) {
1544 op = scst_scsi_op_table[i].ops;
1545 scst_scsi_op_list[op] = i;
1554 * Routine to extract a lun number from an 8-byte LUN structure
1555 * in network byte order (BE).
1556 * (see SAM-2, Section 4.12.3 page 40)
1557 * Supports 2 types of lun unpacking: peripheral and logical unit.
1559 lun_t scst_unpack_lun(const uint8_t *lun, int len)
1561 lun_t res = (lun_t)-1;
1566 TRACE_BUFF_FLAG(TRACE_DEBUG, "Raw LUN", lun, len);
1569 PRINT_ERROR_PR("Illegal lun length %d, expected 2 bytes or "
1578 if ((*((uint64_t*)lun) &
1579 __constant_cpu_to_be64(0x0000FFFFFFFFFFFFLL)) != 0)
1584 if (*((uint16_t*)&lun[2]) != 0)
1588 if (*((uint32_t*)&lun[2]) != 0)
1596 address_method = (*lun) >> 6; /* high 2 bits of byte 0 */
1597 switch (address_method) {
1598 case 0: /* peripheral device addressing method */
1600 PRINT_ERROR_PR("Illegal BUS INDENTIFIER in LUN "
1601 "peripheral device addressing method 0x%02x, "
1602 "expected 0", *lun);
1608 case 1: /* flat space addressing method */
1609 res = *(lun + 1) | (((*lun) & 0x3f) << 8);
1612 case 2: /* logical unit addressing method */
1614 PRINT_ERROR_PR("Illegal BUS NUMBER in LUN logical unit "
1615 "addressing method 0x%02x, expected 0",
1619 if (*(lun + 1) & 0xe0) {
1620 PRINT_ERROR_PR("Illegal TARGET in LUN logical unit "
1621 "addressing method 0x%02x, expected 0",
1622 (*(lun + 1) & 0xf8) >> 5);
1625 res = *(lun + 1) & 0x1f;
1628 case 3: /* extended logical unit addressing method */
1630 PRINT_ERROR_PR("Unimplemented LUN addressing method %u",
1636 TRACE_EXIT_RES((int)res);
1640 PRINT_ERROR_PR("%s", "Multi-level LUN unimplemented");
1644 /* Called under dev_lock and BH off */
1645 void scst_process_reset(struct scst_device *dev,
1646 struct scst_session *originator, struct scst_cmd *exclude_cmd,
1647 struct scst_mgmt_cmd *mcmd)
1649 struct scst_tgt_dev *tgt_dev;
1650 struct scst_cmd *cmd, *tcmd;
1655 /* Clear RESERVE'ation, if necessary */
1656 if (dev->dev_reserved) {
1657 list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
1658 dev_tgt_dev_list_entry)
1660 TRACE(TRACE_MGMT, "Clearing RESERVE'ation for tgt_dev "
1661 "lun %d", tgt_dev->acg_dev->lun);
1662 clear_bit(SCST_TGT_DEV_RESERVED,
1663 &tgt_dev->tgt_dev_flags);
1665 dev->dev_reserved = 0;
1667 * There is no need to send RELEASE, since the device is going
1672 dev->dev_double_ua_possible = 1;
1673 dev->dev_serialized = 1;
1675 /* BH already off */
1676 spin_lock(&scst_temp_UA_lock);
1677 scst_set_sense(scst_temp_UA, sizeof(scst_temp_UA),
1678 SCST_LOAD_SENSE(scst_sense_reset_UA));
1679 __scst_process_UA(dev, exclude_cmd, scst_temp_UA, sizeof(scst_temp_UA),
1681 spin_unlock(&scst_temp_UA_lock);
1683 list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
1684 dev_tgt_dev_list_entry)
1686 struct scst_session *sess = tgt_dev->sess;
1688 spin_lock_irq(&scst_list_lock);
1690 TRACE_DBG("Searching in search cmd list (sess=%p)", sess);
1691 list_for_each_entry(cmd, &sess->search_cmd_list,
1692 search_cmd_list_entry) {
1693 if (cmd == exclude_cmd)
1695 if ((cmd->tgt_dev == tgt_dev) ||
1696 ((cmd->tgt_dev == NULL) &&
1697 (cmd->lun == tgt_dev->acg_dev->lun))) {
1698 scst_abort_cmd(cmd, mcmd,
1699 (tgt_dev->sess != originator), 0);
1702 spin_unlock_irq(&scst_list_lock);
1705 list_for_each_entry_safe(cmd, tcmd, &dev->blocked_cmd_list,
1706 blocked_cmd_list_entry) {
1707 if (test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)) {
1708 list_del(&cmd->blocked_cmd_list_entry);
1709 TRACE_MGMT_DBG("Moving aborted blocked cmd %p "
1710 "to active cmd list", cmd);
1711 spin_lock_irq(&scst_list_lock);
1712 list_move_tail(&cmd->cmd_list_entry,
1713 &scst_active_cmd_list);
1714 spin_unlock_irq(&scst_list_lock);
1720 wake_up(&scst_list_waitQ);
1726 int scst_set_pending_UA(struct scst_cmd *cmd)
1729 struct scst_tgt_dev_UA *UA_entry;
1733 TRACE(TRACE_MGMT, "Setting pending UA cmd %p", cmd);
1735 spin_lock_bh(&cmd->tgt_dev->tgt_dev_lock);
1737 /* UA list could be cleared behind us, so retest */
1738 if (list_empty(&cmd->tgt_dev->UA_list)) {
1740 "SCST_TGT_DEV_UA_PENDING set, but UA_list empty");
1745 UA_entry = list_entry(cmd->tgt_dev->UA_list.next, typeof(*UA_entry),
1748 TRACE_DBG("next %p UA_entry %p",
1749 cmd->tgt_dev->UA_list.next, UA_entry);
1751 scst_set_cmd_error_sense(cmd, UA_entry->UA_sense_buffer,
1752 sizeof(UA_entry->UA_sense_buffer));
1756 list_del(&UA_entry->UA_list_entry);
1759 TRACE_MEM("mempool_free for UA_entry: %p", UA_entry);
1760 mempool_free(UA_entry, scst_ua_mempool);
1762 if (list_empty(&cmd->tgt_dev->UA_list)) {
1763 clear_bit(SCST_TGT_DEV_UA_PENDING,
1764 &cmd->tgt_dev->tgt_dev_flags);
1767 spin_unlock_bh(&cmd->tgt_dev->tgt_dev_lock);
1770 TRACE_EXIT_RES(res);
1774 spin_unlock_bh(&cmd->tgt_dev->tgt_dev_lock);
1778 /* Called under dev_lock, tgt_dev_lock and BH off */
1779 void scst_alloc_set_UA(struct scst_tgt_dev *tgt_dev,
1780 const uint8_t *sense, int sense_len)
1782 struct scst_tgt_dev_UA *UA_entry = NULL;
1786 UA_entry = mempool_alloc(scst_ua_mempool, GFP_ATOMIC);
1787 TRACE_MEM("mempool_alloc(GFP_ATOMIC) for UA_entry (%zd): %p",
1788 sizeof(*UA_entry), UA_entry);
1789 if (UA_entry == NULL) {
1790 PRINT_ERROR_PR("%s", "UNIT ATTENTION memory "
1791 "allocation failed. The UNIT ATTENTION "
1792 "on some sessions will be missed");
1795 memset(UA_entry, 0, sizeof(*UA_entry));
1797 if (sense_len > sizeof(UA_entry->UA_sense_buffer))
1798 sense_len = sizeof(UA_entry->UA_sense_buffer);
1799 memcpy(UA_entry->UA_sense_buffer, sense, sense_len);
1800 set_bit(SCST_TGT_DEV_UA_PENDING, &tgt_dev->tgt_dev_flags);
1801 smp_mb__after_set_bit();
1802 list_add_tail(&UA_entry->UA_list_entry, &tgt_dev->UA_list);
1809 /* Called under dev_lock and BH off */
1810 void scst_check_set_UA(struct scst_tgt_dev *tgt_dev,
1811 const uint8_t *sense, int sense_len)
1814 struct scst_tgt_dev_UA *UA_entry_tmp;
1818 spin_lock(&tgt_dev->tgt_dev_lock);
1820 list_for_each_entry(UA_entry_tmp, &tgt_dev->UA_list,
1823 if (sense[12] == UA_entry_tmp->UA_sense_buffer[12]) {
1830 scst_alloc_set_UA(tgt_dev, sense, sense_len);
1832 spin_unlock(&tgt_dev->tgt_dev_lock);
1838 /* Called under dev_lock and BH off */
1839 void __scst_process_UA(struct scst_device *dev,
1840 struct scst_cmd *exclude, const uint8_t *sense, int sense_len,
1843 struct scst_tgt_dev *tgt_dev, *exclude_tgt_dev = NULL;
1847 TRACE(TRACE_MGMT, "Processing UA dev %p", dev);
1849 if (exclude != NULL)
1850 exclude_tgt_dev = exclude->tgt_dev;
1852 /* Check for reset UA */
1853 if (!internal && (sense[12] == SCST_SENSE_ASC_UA_RESET)) {
1854 scst_process_reset(dev, (exclude != NULL) ? exclude->sess : NULL,
1858 list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
1859 dev_tgt_dev_list_entry) {
1860 if (tgt_dev != exclude_tgt_dev)
1861 scst_check_set_UA(tgt_dev, sense, sense_len);
1868 /* Called under tgt_dev_lock or when tgt_dev is unused */
1869 void scst_free_all_UA(struct scst_tgt_dev *tgt_dev)
1871 struct scst_tgt_dev_UA *UA_entry, *t;
1875 list_for_each_entry_safe(UA_entry, t, &tgt_dev->UA_list, UA_list_entry) {
1876 TRACE_MGMT_DBG("Clearing UA for tgt_dev lun %d",
1877 tgt_dev->acg_dev->lun);
1878 list_del(&UA_entry->UA_list_entry);
1879 TRACE_MEM("kfree for UA_entry: %p", UA_entry);
1882 INIT_LIST_HEAD(&tgt_dev->UA_list);
1883 clear_bit(SCST_TGT_DEV_UA_PENDING, &tgt_dev->tgt_dev_flags);
1889 struct scst_cmd *__scst_check_deferred_commands(struct scst_tgt_dev *tgt_dev,
1892 struct scst_cmd *cmd = NULL, *tcmd;
1894 if (tgt_dev->def_cmd_count == 0)
1897 spin_lock_bh(&tgt_dev->sn_lock);
1900 list_for_each_entry(tcmd, &tgt_dev->deferred_cmd_list,
1901 sn_cmd_list_entry) {
1902 if (tcmd->sn == expected_sn) {
1903 TRACE(TRACE_SCSI_SERIALIZING,
1904 "Deferred command sn %d found", tcmd->sn);
1905 tgt_dev->def_cmd_count--;
1906 list_del(&tcmd->sn_cmd_list_entry);
1912 list_for_each_entry(tcmd, &tgt_dev->skipped_sn_list,
1913 sn_cmd_list_entry) {
1914 if (tcmd->sn == expected_sn) {
1916 * !! At this point any pointer in tcmd, except !!
1917 * !! sn_cmd_list_entry, could be already destroyed !!
1919 TRACE(TRACE_SCSI_SERIALIZING,
1920 "cmd %p (tag %d) with skipped sn %d found", tcmd,
1921 tcmd->tag, tcmd->sn);
1922 tgt_dev->def_cmd_count--;
1923 list_del(&tcmd->sn_cmd_list_entry);
1924 if (test_bit(SCST_CMD_CAN_BE_DESTROYED,
1925 &tcmd->cmd_flags)) {
1926 scst_destroy_put_cmd(tcmd);
1928 smp_mb__before_clear_bit();
1929 clear_bit(SCST_CMD_OUT_OF_SN, &tcmd->cmd_flags);
1931 expected_sn = __scst_inc_expected_sn(tgt_dev);
1937 spin_unlock_bh(&tgt_dev->sn_lock);
1944 int scst_inc_on_dev_cmd(struct scst_cmd *cmd)
1947 struct scst_device *dev = cmd->dev;
1949 BUG_ON(cmd->blocking);
1951 atomic_inc(&dev->on_dev_count);
1953 #ifdef STRICT_SERIALIZING
1954 spin_lock_bh(&dev->dev_lock);
1955 if (test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))
1957 if (dev->block_count > 0) {
1958 scst_dec_on_dev_cmd(cmd);
1959 TRACE_MGMT_DBG("Delaying cmd %p due to blocking or serializing"
1960 "(tag %d, dev %p)", cmd, cmd->tag, dev);
1961 list_add_tail(&cmd->blocked_cmd_list_entry,
1962 &dev->blocked_cmd_list);
1965 __scst_block_dev(cmd->dev);
1968 spin_unlock_bh(&dev->dev_lock);
1972 if (unlikely(dev->block_count > 0)) {
1973 spin_lock_bh(&dev->dev_lock);
1974 if (test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))
1976 barrier(); /* to reread block_count */
1977 if (dev->block_count > 0) {
1978 scst_dec_on_dev_cmd(cmd);
1979 TRACE_MGMT_DBG("Delaying cmd %p due to blocking or "
1980 "serializing (tag %d, dev %p)", cmd,
1982 list_add_tail(&cmd->blocked_cmd_list_entry,
1983 &dev->blocked_cmd_list);
1985 spin_unlock_bh(&dev->dev_lock);
1988 TRACE_MGMT_DBG("%s", "Somebody unblocked the device, "
1991 spin_unlock_bh(&dev->dev_lock);
1993 if (unlikely(cmd->dev->dev_serialized)) {
1994 spin_lock_bh(&dev->dev_lock);
1995 barrier(); /* to reread block_count */
1996 if (cmd->dev->block_count == 0) {
1997 TRACE_MGMT_DBG("cmd %p (tag %d), blocking further "
1998 "cmds due to serializing (dev %p)", cmd,
2000 __scst_block_dev(cmd->dev);
2003 spin_unlock_bh(&dev->dev_lock);
2006 spin_unlock_bh(&dev->dev_lock);
2014 spin_unlock_bh(&dev->dev_lock);
2018 /* Called under dev_lock */
2019 void scst_unblock_cmds(struct scst_device *dev)
2021 #ifdef STRICT_SERIALIZING
2022 struct scst_cmd *cmd;
2027 list_for_each_entry(cmd, &dev->blocked_cmd_list,
2028 blocked_cmd_list_entry) {
2030 * Since only one cmd per time is being executed, expected_sn
2031 * can't change behind us, if the corresponding cmd is in
2034 if ((cmd->tgt_dev && (cmd->sn == cmd->tgt_dev->expected_sn)) ||
2035 (unlikely(cmd->internal) || unlikely(cmd->retry))) {
2036 unsigned long flags;
2037 list_del(&cmd->blocked_cmd_list_entry);
2038 TRACE_MGMT_DBG("Moving cmd %p to active cmd list", cmd);
2039 spin_lock_irqsave(&scst_list_lock, flags);
2040 list_move(&cmd->cmd_list_entry, &scst_active_cmd_list);
2041 spin_unlock_irqrestore(&scst_list_lock, flags);
2042 wake_up(&scst_list_waitQ);
2048 if (!found && !list_empty(&dev->blocked_cmd_list)) {
2049 TRACE(TRACE_MINOR, "%s", "No commands unblocked when "
2050 "blocked cmd list is not empty");
2053 #else /* STRICT_SERIALIZING */
2054 struct scst_cmd *cmd, *tcmd;
2055 unsigned long flags;
2059 spin_lock_irqsave(&scst_list_lock, flags);
2060 list_for_each_entry_safe(cmd, tcmd, &dev->blocked_cmd_list,
2061 blocked_cmd_list_entry) {
2062 list_del(&cmd->blocked_cmd_list_entry);
2063 TRACE_MGMT_DBG("Moving blocked cmd %p to active cmd list", cmd);
2064 list_move_tail(&cmd->cmd_list_entry, &scst_active_cmd_list);
2065 wake_up(&scst_list_waitQ);
2067 spin_unlock_irqrestore(&scst_list_lock, flags);
2068 #endif /* STRICT_SERIALIZING */
2074 static struct scst_cmd *scst_inc_expected_sn(
2075 struct scst_tgt_dev *tgt_dev, struct scst_cmd *out_of_sn_cmd)
2077 struct scst_cmd *res = NULL;
2079 if (out_of_sn_cmd->sn == tgt_dev->expected_sn) {
2080 __scst_inc_expected_sn(tgt_dev);
2082 spin_lock_bh(&tgt_dev->sn_lock);
2083 tgt_dev->def_cmd_count++;
2084 set_bit(SCST_CMD_OUT_OF_SN, &out_of_sn_cmd->cmd_flags);
2085 list_add_tail(&out_of_sn_cmd->sn_cmd_list_entry,
2086 &tgt_dev->skipped_sn_list);
2087 TRACE(TRACE_SCSI_SERIALIZING, "out_of_sn_cmd %p with sn %d "
2088 "added to skipped_sn_list (expected_sn %d)",
2089 out_of_sn_cmd, out_of_sn_cmd->sn, tgt_dev->expected_sn);
2090 spin_unlock_bh(&tgt_dev->sn_lock);
2091 smp_mb(); /* just in case, we need new value of tgt_dev->expected_sn */
2093 res = scst_check_deferred_commands(tgt_dev, tgt_dev->expected_sn);
2097 void scst_inc_expected_sn_unblock(struct scst_tgt_dev *tgt_dev,
2098 struct scst_cmd *cmd_sn, int locked)
2100 struct scst_cmd *cmd;
2104 cmd = scst_inc_expected_sn(tgt_dev, cmd_sn);
2106 unsigned long flags = 0;
2108 spin_lock_irqsave(&scst_list_lock, flags);
2109 TRACE(TRACE_SCSI_SERIALIZING, "cmd %p with sn %d "
2110 "moved to active cmd list", cmd, cmd->sn);
2111 list_move(&cmd->cmd_list_entry, &scst_active_cmd_list);
2113 spin_unlock_irqrestore(&scst_list_lock, flags);
2114 if (!cmd_sn->processible_env)
2115 wake_up(&scst_list_waitQ);
2123 /* Original taken from the XFS code */
2124 unsigned long scst_random(void)
2127 static unsigned long RandomValue;
2128 static spinlock_t lock = SPIN_LOCK_UNLOCKED;
2129 /* cycles pseudo-randomly through all values between 1 and 2^31 - 2 */
2133 unsigned long flags;
2135 spin_lock_irqsave(&lock, flags);
2137 RandomValue = jiffies;
2143 rv = 16807 * lo - 2836 * hi;
2144 if (rv <= 0) rv += 2147483647;
2146 spin_unlock_irqrestore(&lock, flags);
2153 #define TM_DBG_STATE_ABORT 0
2154 #define TM_DBG_STATE_RESET 1
2155 #define TM_DBG_STATE_OFFLINE 2
2157 #define INIT_TM_DBG_STATE TM_DBG_STATE_ABORT
2159 static void tm_dbg_timer_fn(unsigned long arg);
2161 /* All serialized by scst_list_lock */
2162 static int tm_dbg_release;
2163 static int tm_dbg_blocked;
2164 static LIST_HEAD(tm_dbg_delayed_cmd_list);
2165 static int tm_dbg_delayed_cmds_count;
2166 static int tm_dbg_passed_cmds_count;
2167 static int tm_dbg_state;
2168 static int tm_dbg_on_state_passes;
2169 static DEFINE_TIMER(tm_dbg_timer, tm_dbg_timer_fn, 0, 0);
2171 static const int tm_dbg_on_state_num_passes[] = { 10, 1, 0x7ffffff };
2173 void tm_dbg_init_tgt_dev(struct scst_tgt_dev *tgt_dev,
2174 struct scst_acg_dev *acg_dev)
2176 if ((acg_dev->acg == scst_default_acg) && (acg_dev->lun == 0)) {
2177 /* Do TM debugging only for LUN 0 */
2178 tm_dbg_state = INIT_TM_DBG_STATE;
2179 tm_dbg_on_state_passes =
2180 tm_dbg_on_state_num_passes[tm_dbg_state];
2181 __set_bit(SCST_TGT_DEV_UNDER_TM_DBG, &tgt_dev->tgt_dev_flags);
2182 PRINT_INFO("LUN 0 connected from initiator %s is under "
2183 "TM debugging", tgt_dev->sess->tgt->tgtt->name);
2187 void tm_dbg_deinit_tgt_dev(struct scst_tgt_dev *tgt_dev)
2189 if (test_bit(SCST_TGT_DEV_UNDER_TM_DBG, &tgt_dev->tgt_dev_flags))
2190 del_timer_sync(&tm_dbg_timer);
2193 static void tm_dbg_timer_fn(unsigned long arg)
2195 TRACE_MGMT_DBG("%s: delayed cmd timer expired", __func__);
2198 wake_up_all(&scst_list_waitQ);
2201 /* Called under scst_list_lock */
2202 static void tm_dbg_delay_cmd(struct scst_cmd *cmd)
2204 switch(tm_dbg_state) {
2205 case TM_DBG_STATE_ABORT:
2206 if (tm_dbg_delayed_cmds_count == 0) {
2207 unsigned long d = 58*HZ + (scst_random() % (4*HZ));
2208 TRACE_MGMT_DBG("%s: delaying timed cmd %p (tag %d) "
2209 "for %ld.%ld seconds (%ld HZ)", __func__, cmd, cmd->tag,
2210 d/HZ, (d%HZ)*100/HZ, d);
2211 mod_timer(&tm_dbg_timer, jiffies + d);
2216 TRACE_MGMT_DBG("%s: delaying another timed cmd %p "
2217 "(tag %d), delayed_cmds_count=%d", __func__, cmd,
2218 cmd->tag, tm_dbg_delayed_cmds_count);
2219 if (tm_dbg_delayed_cmds_count == 2)
2224 case TM_DBG_STATE_RESET:
2225 case TM_DBG_STATE_OFFLINE:
2226 TRACE_MGMT_DBG("%s: delaying cmd %p "
2227 "(tag %d), delayed_cmds_count=%d", __func__, cmd,
2228 cmd->tag, tm_dbg_delayed_cmds_count);
2235 list_move_tail(&cmd->cmd_list_entry, &tm_dbg_delayed_cmd_list);
2236 cmd->tm_dbg_delayed = 1;
2237 tm_dbg_delayed_cmds_count++;
2241 /* Called under scst_list_lock */
2242 void tm_dbg_check_released_cmds(void)
2244 if (tm_dbg_release) {
2245 struct scst_cmd *cmd, *tc;
2246 list_for_each_entry_safe_reverse(cmd, tc,
2247 &tm_dbg_delayed_cmd_list, cmd_list_entry) {
2248 TRACE_MGMT_DBG("%s: Releasing timed cmd %p "
2249 "(tag %d), delayed_cmds_count=%d", __func__,
2250 cmd, cmd->tag, tm_dbg_delayed_cmds_count);
2251 list_move(&cmd->cmd_list_entry, &scst_active_cmd_list);
2257 static void tm_dbg_change_state(void)
2260 if (--tm_dbg_on_state_passes == 0) {
2261 switch(tm_dbg_state) {
2262 case TM_DBG_STATE_ABORT:
2263 TRACE_MGMT_DBG("%s", "Changing "
2264 "tm_dbg_state to RESET");
2269 case TM_DBG_STATE_RESET:
2270 case TM_DBG_STATE_OFFLINE:
2271 if (TM_DBG_GO_OFFLINE) {
2272 TRACE_MGMT_DBG("%s", "Changing "
2273 "tm_dbg_state to OFFLINE");
2275 TM_DBG_STATE_OFFLINE;
2277 TRACE_MGMT_DBG("%s", "Changing "
2278 "tm_dbg_state to ABORT");
2286 tm_dbg_on_state_passes =
2287 tm_dbg_on_state_num_passes[tm_dbg_state];
2290 TRACE_MGMT_DBG("%s", "Deleting timer");
2291 del_timer(&tm_dbg_timer);
2294 /* Called under scst_list_lock */
2295 int tm_dbg_check_cmd(struct scst_cmd *cmd)
2299 if (cmd->tm_dbg_immut)
2302 if (cmd->tm_dbg_delayed) {
2303 TRACE_MGMT_DBG("Processing delayed cmd %p (tag %d), "
2304 "delayed_cmds_count=%d", cmd, cmd->tag,
2305 tm_dbg_delayed_cmds_count);
2307 cmd->tm_dbg_immut = 1;
2308 tm_dbg_delayed_cmds_count--;
2309 if ((tm_dbg_delayed_cmds_count == 0) &&
2310 (tm_dbg_state == TM_DBG_STATE_ABORT))
2311 tm_dbg_change_state();
2313 } else if (cmd->tgt_dev && test_bit(SCST_TGT_DEV_UNDER_TM_DBG,
2314 &cmd->tgt_dev->tgt_dev_flags)) {
2315 /* Delay 50th command */
2316 if (tm_dbg_blocked || (++tm_dbg_passed_cmds_count % 50) == 0) {
2317 tm_dbg_delay_cmd(cmd);
2320 cmd->tm_dbg_immut = 1;
2327 /* Called under scst_list_lock */
2328 void tm_dbg_release_cmd(struct scst_cmd *cmd)
2331 list_for_each_entry(c, &tm_dbg_delayed_cmd_list,
2334 TRACE_MGMT_DBG("Abort request for "
2335 "delayed cmd %p (tag=%d), moving it to "
2336 "active cmd list (delayed_cmds_count=%d)",
2337 c, c->tag, tm_dbg_delayed_cmds_count);
2338 list_move(&c->cmd_list_entry, &scst_active_cmd_list);
2339 wake_up_all(&scst_list_waitQ);
2345 /* Called under scst_list_lock */
2346 void tm_dbg_task_mgmt(const char *fn)
2348 if (tm_dbg_state != TM_DBG_STATE_OFFLINE) {
2349 TRACE_MGMT_DBG("%s: freeing %d delayed cmds", fn,
2350 tm_dbg_delayed_cmds_count);
2351 tm_dbg_change_state();
2354 wake_up_all(&scst_list_waitQ);
2356 TRACE_MGMT_DBG("%s: while OFFLINE state, doing nothing", fn);
2360 int tm_dbg_is_release(void)
2362 return tm_dbg_release;
2364 #endif /* DEBUG_TM */