4 * Copyright (C) 2004-2007 Vladislav Bolkhovitin <vst@vlnb.net>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation, version 2
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
18 #include <linux/module.h>
20 #include <linux/init.h>
21 #include <linux/kernel.h>
22 #include <linux/errno.h>
23 #include <linux/list.h>
24 #include <linux/spinlock.h>
25 #include <linux/slab.h>
26 #include <linux/sched.h>
27 #include <asm/unistd.h>
28 #include <asm/string.h>
29 #include <linux/kthread.h>
32 #include "scst_priv.h"
35 #if defined(CONFIG_HIGHMEM4G) || defined(CONFIG_HIGHMEM64G)
36 #warning HIGHMEM kernel configurations are fully supported, but not \
37 recommended for performance reasons. Consider change VMSPLIT \
38 option or use 64-bit configuration instead. See README file for \
43 #error SCST_HIGHMEM configuration isn't supported and broken, because there \
44 is no real point to support it, at least it definitely doesn't worth \
45 the effort. Better use no-HIGHMEM kernel with VMSPLIT option \
46 or in 64-bit configuration instead. See README file for details.
49 #if !defined(SCSI_EXEC_REQ_FIFO_DEFINED) && !defined(STRICT_SERIALIZING)
50 #warning Patch scst_exec_req_fifo-<kernel-version>.patch was not applied on \
51 your kernel and STRICT_SERIALIZING isn't defined. Pass-through dev \
52 handlers will not be supported.
56 * All targets, devices and dev_types management is done under this mutex.
58 * It must NOT be used in any works (schedule_work(), etc.), because
59 * otherwise a deadlock (double lock, actually) is possible, e.g., with
60 * scst_user detach_tgt(), which is called under scst_mutex and calls
61 * flush_scheduled_work().
63 DEFINE_MUTEX(scst_mutex);
65 LIST_HEAD(scst_template_list);
66 LIST_HEAD(scst_dev_list);
67 LIST_HEAD(scst_dev_type_list);
69 spinlock_t scst_main_lock = SPIN_LOCK_UNLOCKED;
71 struct kmem_cache *scst_mgmt_cachep;
72 mempool_t *scst_mgmt_mempool;
73 struct kmem_cache *scst_ua_cachep;
74 mempool_t *scst_ua_mempool;
75 struct kmem_cache *scst_tgtd_cachep;
76 struct kmem_cache *scst_sess_cachep;
77 struct kmem_cache *scst_acgd_cachep;
79 LIST_HEAD(scst_acg_list);
80 struct scst_acg *scst_default_acg;
82 spinlock_t scst_init_lock = SPIN_LOCK_UNLOCKED;
83 DECLARE_WAIT_QUEUE_HEAD(scst_init_cmd_list_waitQ);
84 LIST_HEAD(scst_init_cmd_list);
85 unsigned int scst_init_poll_cnt;
87 struct kmem_cache *scst_cmd_cachep;
89 #if defined(DEBUG) || defined(TRACING)
90 unsigned long scst_trace_flag = SCST_DEFAULT_LOG_FLAGS;
93 unsigned long scst_flags;
94 atomic_t scst_cmd_count = ATOMIC_INIT(0);
96 spinlock_t scst_cmd_mem_lock = SPIN_LOCK_UNLOCKED;
97 unsigned long scst_cur_cmd_mem, scst_cur_max_cmd_mem;
98 unsigned long scst_max_cmd_mem;
100 struct scst_cmd_lists scst_main_cmd_lists;
102 struct scst_tasklet scst_tasklets[NR_CPUS];
105 spinlock_t scst_mcmd_lock = SPIN_LOCK_UNLOCKED;
106 LIST_HEAD(scst_active_mgmt_cmd_list);
107 LIST_HEAD(scst_delayed_mgmt_cmd_list);
108 DECLARE_WAIT_QUEUE_HEAD(scst_mgmt_cmd_list_waitQ);
110 DECLARE_WAIT_QUEUE_HEAD(scst_mgmt_waitQ);
111 spinlock_t scst_mgmt_lock = SPIN_LOCK_UNLOCKED;
112 LIST_HEAD(scst_sess_init_list);
113 LIST_HEAD(scst_sess_shut_list);
115 DECLARE_WAIT_QUEUE_HEAD(scst_dev_cmd_waitQ);
117 DEFINE_MUTEX(scst_suspend_mutex);
118 LIST_HEAD(scst_cmd_lists_list); /* protected by scst_suspend_mutex */
120 static int scst_threads;
121 struct scst_threads_info_t scst_threads_info;
123 static int suspend_count;
125 int scst_virt_dev_last_id = 1; /* protected by scst_mutex */
128 * This buffer and lock are intended to avoid memory allocation, which
129 * could fail in improper places.
131 spinlock_t scst_temp_UA_lock = SPIN_LOCK_UNLOCKED;
132 uint8_t scst_temp_UA[SCST_SENSE_BUFFERSIZE];
134 module_param_named(scst_threads, scst_threads, int, 0);
135 MODULE_PARM_DESC(scst_threads, "SCSI target threads count");
137 module_param_named(scst_max_cmd_mem, scst_max_cmd_mem, long, 0);
138 MODULE_PARM_DESC(scst_max_cmd_mem, "Maximum memory allowed to be consumed by "
139 "the SCST commands at any given time in Mb");
141 int scst_register_target_template(struct scst_tgt_template *vtt)
144 struct scst_tgt_template *t;
145 static DEFINE_MUTEX(m);
149 INIT_LIST_HEAD(&vtt->tgt_list);
152 PRINT_ERROR_PR("Target driver %s doesn't have a "
153 "detect() method.", vtt->name);
159 PRINT_ERROR_PR("Target driver %s doesn't have a "
160 "release() method.", vtt->name);
165 if (!vtt->xmit_response) {
166 PRINT_ERROR_PR("Target driver %s doesn't have a "
167 "xmit_response() method.", vtt->name);
172 if (vtt->threads_num < 0) {
173 PRINT_ERROR_PR("Wrong threads_num value %d for "
174 "target \"%s\"", vtt->threads_num,
180 if (!vtt->no_proc_entry) {
181 res = scst_build_proc_target_dir_entries(vtt);
186 if (vtt->preprocessing_done == NULL)
187 vtt->preprocessing_done_atomic = 1;
189 if (mutex_lock_interruptible(&m) != 0)
192 if (mutex_lock_interruptible(&scst_mutex) != 0)
194 list_for_each_entry(t, &scst_template_list, scst_template_list_entry) {
195 if (strcmp(t->name, vtt->name) == 0) {
196 PRINT_ERROR_PR("Target driver %s already registered",
198 mutex_unlock(&scst_mutex);
202 mutex_unlock(&scst_mutex);
204 TRACE_DBG("%s", "Calling target driver's detect()");
205 res = vtt->detect(vtt);
206 TRACE_DBG("Target driver's detect() returned %d", res);
208 PRINT_ERROR_PR("%s", "The detect() routine failed");
213 mutex_lock(&scst_mutex);
214 list_add_tail(&vtt->scst_template_list_entry, &scst_template_list);
215 mutex_unlock(&scst_mutex);
219 PRINT_INFO_PR("Target template %s registered successfully", vtt->name);
231 scst_cleanup_proc_target_dir_entries(vtt);
234 PRINT_ERROR_PR("Failed to register target template %s", vtt->name);
238 void scst_unregister_target_template(struct scst_tgt_template *vtt)
240 struct scst_tgt *tgt;
241 struct scst_tgt_template *t;
246 mutex_lock(&scst_mutex);
248 list_for_each_entry(t, &scst_template_list, scst_template_list_entry) {
249 if (strcmp(t->name, vtt->name) == 0) {
255 PRINT_ERROR_PR("Target driver %s isn't registered", vtt->name);
260 list_for_each_entry(tgt, &vtt->tgt_list, tgt_list_entry) {
261 mutex_unlock(&scst_mutex);
262 scst_unregister(tgt);
263 mutex_lock(&scst_mutex);
266 list_del(&vtt->scst_template_list_entry);
268 PRINT_INFO_PR("Target template %s unregistered successfully", vtt->name);
271 mutex_unlock(&scst_mutex);
273 scst_cleanup_proc_target_dir_entries(vtt);
279 struct scst_tgt *scst_register(struct scst_tgt_template *vtt,
280 const char *target_name)
282 struct scst_tgt *tgt;
286 tgt = kzalloc(sizeof(*tgt), GFP_KERNEL);
288 TRACE(TRACE_OUT_OF_MEM, "%s", "kzalloc() failed");
292 INIT_LIST_HEAD(&tgt->sess_list);
293 init_waitqueue_head(&tgt->unreg_waitQ);
295 tgt->sg_tablesize = vtt->sg_tablesize;
296 spin_lock_init(&tgt->tgt_lock);
297 INIT_LIST_HEAD(&tgt->retry_cmd_list);
298 atomic_set(&tgt->finished_cmds, 0);
299 init_timer(&tgt->retry_timer);
300 tgt->retry_timer.data = (unsigned long)tgt;
301 tgt->retry_timer.function = scst_tgt_retry_timer_fn;
303 scst_suspend_activity();
304 mutex_lock(&scst_mutex);
306 if (target_name != NULL) {
307 int len = strlen(target_name) + 1 +
308 strlen(SCST_DEFAULT_ACG_NAME) + 1;
310 tgt->default_group_name = kmalloc(len, GFP_KERNEL);
311 if (tgt->default_group_name == NULL) {
312 TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of default "
313 "group name failed");
316 sprintf(tgt->default_group_name, "%s_%s", SCST_DEFAULT_ACG_NAME,
320 if (scst_build_proc_target_entries(tgt) < 0)
323 list_add_tail(&tgt->tgt_list_entry, &vtt->tgt_list);
325 mutex_unlock(&scst_mutex);
326 scst_resume_activity();
328 PRINT_INFO_PR("Target %s for template %s registered successfully",
329 target_name, vtt->name);
336 if (tgt->default_group_name)
337 kfree(tgt->default_group_name);
340 mutex_unlock(&scst_mutex);
341 scst_resume_activity();
347 PRINT_ERROR_PR("Failed to register target for template %s", vtt->name);
351 static inline int test_sess_list(struct scst_tgt *tgt)
354 mutex_lock(&scst_mutex);
355 res = list_empty(&tgt->sess_list);
356 mutex_unlock(&scst_mutex);
360 void scst_unregister(struct scst_tgt *tgt)
362 struct scst_session *sess;
363 struct scst_tgt_template *vtt = tgt->tgtt;
367 TRACE_DBG("%s", "Calling target driver's release()");
368 tgt->tgtt->release(tgt);
369 TRACE_DBG("%s", "Target driver's release() returned");
371 mutex_lock(&scst_mutex);
372 list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) {
373 sBUG_ON(sess->shut_phase == SCST_SESS_SPH_READY);
375 mutex_unlock(&scst_mutex);
377 TRACE_DBG("%s", "Waiting for sessions shutdown");
378 wait_event(tgt->unreg_waitQ, test_sess_list(tgt));
379 TRACE_DBG("%s", "wait_event() returned");
381 scst_suspend_activity();
382 mutex_lock(&scst_mutex);
384 list_del(&tgt->tgt_list_entry);
386 scst_cleanup_proc_target_entries(tgt);
388 if (tgt->default_group_name)
389 kfree(tgt->default_group_name);
391 mutex_unlock(&scst_mutex);
392 scst_resume_activity();
394 del_timer_sync(&tgt->retry_timer);
398 PRINT_INFO_PR("Target for template %s unregistered successfully",
405 void scst_suspend_activity(void)
409 mutex_lock(&scst_suspend_mutex);
411 TRACE_MGMT_DBG("suspend_count %d", suspend_count);
413 if (suspend_count > 1)
416 set_bit(SCST_FLAG_SUSPENDING, &scst_flags);
417 set_bit(SCST_FLAG_SUSPENDED, &scst_flags);
418 smp_mb__after_set_bit();
420 TRACE_MGMT_DBG("Waiting for %d active commands to complete",
421 atomic_read(&scst_cmd_count));
422 wait_event(scst_dev_cmd_waitQ, atomic_read(&scst_cmd_count) == 0);
423 TRACE_MGMT_DBG("%s", "wait_event() returned");
425 clear_bit(SCST_FLAG_SUSPENDING, &scst_flags);
426 smp_mb__after_clear_bit();
428 TRACE_MGMT_DBG("Waiting for %d active commands finally to complete",
429 atomic_read(&scst_cmd_count));
430 wait_event(scst_dev_cmd_waitQ, atomic_read(&scst_cmd_count) == 0);
431 TRACE_MGMT_DBG("%s", "wait_event() returned");
434 mutex_unlock(&scst_suspend_mutex);
440 void scst_resume_activity(void)
442 struct scst_cmd_lists *l;
446 mutex_lock(&scst_suspend_mutex);
448 TRACE_MGMT_DBG("suspend_count %d", suspend_count);
450 if (suspend_count > 0)
453 clear_bit(SCST_FLAG_SUSPENDED, &scst_flags);
454 smp_mb__after_clear_bit();
456 list_for_each_entry(l, &scst_cmd_lists_list, lists_list_entry) {
457 wake_up_all(&l->cmd_list_waitQ);
459 wake_up_all(&scst_init_cmd_list_waitQ);
461 spin_lock_irq(&scst_mcmd_lock);
462 if (!list_empty(&scst_delayed_mgmt_cmd_list)) {
463 struct scst_mgmt_cmd *m;
464 m = list_entry(scst_delayed_mgmt_cmd_list.next, typeof(*m),
465 mgmt_cmd_list_entry);
466 TRACE_MGMT_DBG("Moving delayed mgmt cmd %p to head of active "
468 list_move(&m->mgmt_cmd_list_entry, &scst_active_mgmt_cmd_list);
470 spin_unlock_irq(&scst_mcmd_lock);
471 wake_up_all(&scst_mgmt_cmd_list_waitQ);
474 mutex_unlock(&scst_suspend_mutex);
480 static int scst_register_device(struct scsi_device *scsidp)
483 struct scst_device *dev;
484 struct scst_dev_type *dt;
488 scst_suspend_activity();
489 mutex_lock(&scst_mutex);
491 res = scst_alloc_device(GFP_KERNEL, &dev);
495 dev->type = scsidp->type;
497 dev->rq_disk = alloc_disk(1);
498 if (dev->rq_disk == NULL) {
502 dev->rq_disk->major = SCST_MAJOR;
504 dev->scsi_dev = scsidp;
506 list_add_tail(&dev->dev_list_entry, &scst_dev_list);
508 list_for_each_entry(dt, &scst_dev_type_list, dev_type_list_entry) {
509 if (dt->type == scsidp->type) {
510 res = scst_assign_dev_handler(dev, dt);
518 mutex_unlock(&scst_mutex);
519 scst_resume_activity();
522 PRINT_INFO_PR("Attached SCSI target mid-level at "
523 "scsi%d, channel %d, id %d, lun %d, type %d",
524 scsidp->host->host_no, scsidp->channel, scsidp->id,
525 scsidp->lun, scsidp->type);
528 PRINT_ERROR_PR("Failed to attach SCSI target mid-level "
529 "at scsi%d, channel %d, id %d, lun %d, type %d",
530 scsidp->host->host_no, scsidp->channel, scsidp->id,
531 scsidp->lun, scsidp->type);
538 list_del(&dev->dev_list_entry);
539 put_disk(dev->rq_disk);
542 scst_free_device(dev);
546 static void scst_unregister_device(struct scsi_device *scsidp)
548 struct scst_device *d, *dev = NULL;
549 struct scst_acg_dev *acg_dev, *aa;
553 scst_suspend_activity();
554 mutex_lock(&scst_mutex);
556 list_for_each_entry(d, &scst_dev_list, dev_list_entry) {
557 if (d->scsi_dev == scsidp) {
559 TRACE_DBG("Target device %p found", dev);
564 PRINT_ERROR_PR("%s", "Target device not found");
568 list_del(&dev->dev_list_entry);
570 list_for_each_entry_safe(acg_dev, aa, &dev->dev_acg_dev_list,
571 dev_acg_dev_list_entry)
573 scst_acg_remove_dev(acg_dev->acg, dev);
576 scst_assign_dev_handler(dev, NULL);
578 put_disk(dev->rq_disk);
579 scst_free_device(dev);
581 PRINT_INFO_PR("Detached SCSI target mid-level from scsi%d, channel %d, "
582 "id %d, lun %d, type %d", scsidp->host->host_no,
583 scsidp->channel, scsidp->id, scsidp->lun, scsidp->type);
586 mutex_unlock(&scst_mutex);
587 scst_resume_activity();
593 static int scst_dev_handler_check(struct scst_dev_type *dev_handler)
597 if (dev_handler->parse == NULL) {
598 PRINT_ERROR_PR("scst dev_type driver %s doesn't have a "
599 "parse() method.", dev_handler->name);
604 if (dev_handler->exec == NULL)
605 dev_handler->exec_atomic = 1;
607 if (dev_handler->dev_done == NULL)
608 dev_handler->dev_done_atomic = 1;
615 int scst_register_virtual_device(struct scst_dev_type *dev_handler,
616 const char *dev_name)
619 struct scst_device *dev = NULL;
623 if (dev_handler == NULL) {
624 PRINT_ERROR_PR("%s: valid device handler must be supplied",
630 if (dev_name == NULL) {
631 PRINT_ERROR_PR("%s: device name must be non-NULL", __FUNCTION__);
636 res = scst_dev_handler_check(dev_handler);
640 scst_suspend_activity();
641 if (mutex_lock_interruptible(&scst_mutex) != 0) {
646 res = scst_alloc_device(GFP_KERNEL, &dev);
650 dev->type = dev_handler->type;
651 dev->scsi_dev = NULL;
652 dev->virt_name = dev_name;
653 dev->virt_id = scst_virt_dev_last_id++;
655 list_add_tail(&dev->dev_list_entry, &scst_dev_list);
659 rc = scst_assign_dev_handler(dev, dev_handler);
666 mutex_unlock(&scst_mutex);
669 scst_resume_activity();
673 PRINT_INFO_PR("Attached SCSI target mid-level to virtual "
674 "device %s (id %d)", dev_name, dev->virt_id);
677 PRINT_INFO_PR("Failed to attach SCSI target mid-level to "
678 "virtual device %s", dev_name);
685 list_del(&dev->dev_list_entry);
686 scst_free_device(dev);
690 void scst_unregister_virtual_device(int id)
692 struct scst_device *d, *dev = NULL;
693 struct scst_acg_dev *acg_dev, *aa;
697 scst_suspend_activity();
698 mutex_lock(&scst_mutex);
700 list_for_each_entry(d, &scst_dev_list, dev_list_entry) {
701 if (d->virt_id == id) {
703 TRACE_DBG("Target device %p found", dev);
708 PRINT_ERROR_PR("%s", "Target device not found");
712 list_del(&dev->dev_list_entry);
714 list_for_each_entry_safe(acg_dev, aa, &dev->dev_acg_dev_list,
715 dev_acg_dev_list_entry)
717 scst_acg_remove_dev(acg_dev->acg, dev);
720 scst_assign_dev_handler(dev, NULL);
722 PRINT_INFO_PR("Detached SCSI target mid-level from virtual device %s "
723 "(id %d)", dev->virt_name, dev->virt_id);
725 scst_free_device(dev);
728 mutex_unlock(&scst_mutex);
729 scst_resume_activity();
735 int scst_register_dev_driver(struct scst_dev_type *dev_type)
737 struct scst_dev_type *dt;
738 struct scst_device *dev;
744 res = scst_dev_handler_check(dev_type);
748 #if !defined(SCSI_EXEC_REQ_FIFO_DEFINED) && !defined(STRICT_SERIALIZING)
749 if (dev_type->exec == NULL) {
750 PRINT_ERROR_PR("Pass-through dev handlers (handler \"%s\") not "
751 "supported. Consider applying on your kernel patch "
752 "scst_exec_req_fifo-<kernel-version>.patch or define "
753 "STRICT_SERIALIZING", dev_type->name);
759 scst_suspend_activity();
760 if (mutex_lock_interruptible(&scst_mutex) != 0) {
766 list_for_each_entry(dt, &scst_dev_type_list, dev_type_list_entry) {
767 if (strcmp(dt->name, dev_type->name) == 0) {
768 PRINT_ERROR_PR("Device type handler \"%s\" already "
777 res = scst_build_proc_dev_handler_dir_entries(dev_type);
782 list_add_tail(&dev_type->dev_type_list_entry, &scst_dev_type_list);
784 list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
785 if ((dev->scsi_dev == NULL) || (dev->handler != NULL))
787 if (dev->scsi_dev->type == dev_type->type)
788 scst_assign_dev_handler(dev, dev_type);
791 mutex_unlock(&scst_mutex);
792 scst_resume_activity();
795 PRINT_INFO_PR("Device handler \"%s\" for type %d registered "
796 "successfully", dev_type->name, dev_type->type);
804 mutex_unlock(&scst_mutex);
807 scst_resume_activity();
808 PRINT_ERROR_PR("Failed to register device handler \"%s\" for type %d",
809 dev_type->name, dev_type->type);
813 void scst_unregister_dev_driver(struct scst_dev_type *dev_type)
815 struct scst_device *dev;
816 struct scst_dev_type *dt;
821 scst_suspend_activity();
822 mutex_lock(&scst_mutex);
824 list_for_each_entry(dt, &scst_dev_type_list, dev_type_list_entry) {
825 if (strcmp(dt->name, dev_type->name) == 0) {
831 PRINT_ERROR_PR("Dev handler \"%s\" isn't registered",
836 list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
837 if (dev->handler == dev_type) {
838 scst_assign_dev_handler(dev, NULL);
839 TRACE_DBG("Dev handler removed from device %p", dev);
843 list_del(&dev_type->dev_type_list_entry);
845 mutex_unlock(&scst_mutex);
846 scst_resume_activity();
848 scst_cleanup_proc_dev_handler_dir_entries(dev_type);
850 PRINT_INFO_PR("Device handler \"%s\" for type %d unloaded",
851 dev_type->name, dev_type->type);
858 mutex_unlock(&scst_mutex);
859 scst_resume_activity();
863 int scst_register_virtual_dev_driver(struct scst_dev_type *dev_type)
869 res = scst_dev_handler_check(dev_type);
873 if (!dev_type->no_proc) {
874 res = scst_build_proc_dev_handler_dir_entries(dev_type);
879 if (dev_type->type != -1) {
880 PRINT_INFO_PR("Virtual device handler %s for type %d "
881 "registered successfully", dev_type->name,
884 PRINT_INFO_PR("Virtual device handler \"%s\" registered "
885 "successfully", dev_type->name);
893 PRINT_ERROR_PR("Failed to register virtual device handler \"%s\"",
898 void scst_unregister_virtual_dev_driver(struct scst_dev_type *dev_type)
902 if (!dev_type->no_proc)
903 scst_cleanup_proc_dev_handler_dir_entries(dev_type);
905 PRINT_INFO_PR("Device handler \"%s\" unloaded", dev_type->name);
911 /* Called under scst_mutex and suspended activity */
912 int scst_add_dev_threads(struct scst_device *dev, int num)
916 struct scst_cmd_thread_t *thr;
921 list_for_each_entry(thr, &dev->threads_list, thread_list_entry) {
925 for (i = 0; i < num; i++) {
926 thr = kmalloc(sizeof(*thr), GFP_KERNEL);
929 PRINT_ERROR_PR("Failed to allocate thr %d", res);
932 strncpy(nm, dev->handler->name, ARRAY_SIZE(nm)-1);
933 nm[ARRAY_SIZE(nm)-1] = '\0';
934 thr->cmd_thread = kthread_run(scst_cmd_thread,
935 &dev->cmd_lists, "%sd%d_%d", nm, dev->dev_num, n++);
936 if (IS_ERR(thr->cmd_thread)) {
937 res = PTR_ERR(thr->cmd_thread);
938 PRINT_ERROR_PR("kthread_create() failed: %d", res);
942 list_add(&thr->thread_list_entry, &dev->threads_list);
950 /* Called under scst_mutex and suspended activity */
951 static int scst_create_dev_threads(struct scst_device *dev)
958 if (dev->handler->threads_num <= 0)
961 threads_num = dev->handler->threads_num;
963 spin_lock_init(&dev->cmd_lists.cmd_list_lock);
964 INIT_LIST_HEAD(&dev->cmd_lists.active_cmd_list);
965 init_waitqueue_head(&dev->cmd_lists.cmd_list_waitQ);
967 res = scst_add_dev_threads(dev, threads_num);
971 mutex_lock(&scst_suspend_mutex);
972 list_add_tail(&dev->cmd_lists.lists_list_entry,
973 &scst_cmd_lists_list);
974 mutex_unlock(&scst_suspend_mutex);
976 dev->p_cmd_lists = &dev->cmd_lists;
983 /* Called under scst_mutex and suspended activity */
984 void scst_del_dev_threads(struct scst_device *dev, int num)
986 struct scst_cmd_thread_t *ct, *tmp;
991 list_for_each_entry_safe(ct, tmp, &dev->threads_list,
993 int rc = kthread_stop(ct->cmd_thread);
995 TRACE_MGMT_DBG("kthread_stop() failed: %d", rc);
997 list_del(&ct->thread_list_entry);
999 if ((num >0) && (++i >= num))
1007 /* Called under scst_mutex and suspended activity */
1008 static void scst_stop_dev_threads(struct scst_device *dev)
1012 if (list_empty(&dev->threads_list))
1015 scst_del_dev_threads(dev, -1);
1017 if (dev->p_cmd_lists == &dev->cmd_lists) {
1018 mutex_lock(&scst_suspend_mutex);
1019 list_del(&dev->cmd_lists.lists_list_entry);
1020 mutex_unlock(&scst_suspend_mutex);
1028 /* The activity supposed to be suspended and scst_mutex held */
1029 int scst_assign_dev_handler(struct scst_device *dev,
1030 struct scst_dev_type *handler)
1033 struct scst_tgt_dev *tgt_dev;
1034 LIST_HEAD(attached_tgt_devs);
1038 if (dev->handler == handler)
1041 if (dev->handler && dev->handler->detach_tgt) {
1042 list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
1043 dev_tgt_dev_list_entry) {
1044 TRACE_DBG("Calling dev handler's detach_tgt(%p)",
1046 dev->handler->detach_tgt(tgt_dev);
1047 TRACE_DBG("%s", "Dev handler's detach_tgt() returned");
1051 if (dev->handler && dev->handler->detach) {
1052 TRACE_DBG("%s", "Calling dev handler's detach()");
1053 dev->handler->detach(dev);
1054 TRACE_DBG("%s", "Old handler's detach() returned");
1057 scst_stop_dev_threads(dev);
1059 dev->handler = handler;
1062 res = scst_create_dev_threads(dev);
1067 if (handler && handler->attach) {
1068 TRACE_DBG("Calling new dev handler's attach(%p)", dev);
1069 res = handler->attach(dev);
1070 TRACE_DBG("New dev handler's attach() returned %d", res);
1072 PRINT_ERROR_PR("New device handler's %s attach() "
1073 "failed: %d", handler->name, res);
1078 if (handler && handler->attach_tgt) {
1079 list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
1080 dev_tgt_dev_list_entry) {
1081 TRACE_DBG("Calling dev handler's attach_tgt(%p)",
1083 res = handler->attach_tgt(tgt_dev);
1084 TRACE_DBG("%s", "Dev handler's attach_tgt() returned");
1086 PRINT_ERROR_PR("Device handler's %s attach_tgt() "
1087 "failed: %d", handler->name, res);
1088 goto out_err_detach_tgt;
1090 list_add_tail(&tgt_dev->extra_tgt_dev_list_entry,
1091 &attached_tgt_devs);
1097 scst_stop_dev_threads(dev);
1101 dev->handler = NULL;
1104 TRACE_EXIT_RES(res);
1108 if (handler && handler->detach_tgt) {
1109 list_for_each_entry(tgt_dev, &attached_tgt_devs,
1110 extra_tgt_dev_list_entry)
1112 TRACE_DBG("Calling handler's detach_tgt(%p)",
1114 handler->detach_tgt(tgt_dev);
1115 TRACE_DBG("%s", "Handler's detach_tgt() returned");
1118 if (handler && handler->detach) {
1119 TRACE_DBG("%s", "Calling handler's detach()");
1120 handler->detach(dev);
1121 TRACE_DBG("%s", "Handler's detach() returned");
1126 int scst_cmd_threads_count(void)
1130 /* Just to lower the race window, when user can get just changed value */
1131 mutex_lock(&scst_threads_info.cmd_threads_mutex);
1132 i = scst_threads_info.nr_cmd_threads;
1133 mutex_unlock(&scst_threads_info.cmd_threads_mutex);
1137 static void scst_threads_info_init(void)
1139 memset(&scst_threads_info, 0, sizeof(scst_threads_info));
1140 mutex_init(&scst_threads_info.cmd_threads_mutex);
1141 INIT_LIST_HEAD(&scst_threads_info.cmd_threads_list);
1144 /* scst_threads_info.cmd_threads_mutex supposed to be held */
1145 void __scst_del_cmd_threads(int num)
1147 struct scst_cmd_thread_t *ct, *tmp;
1152 i = scst_threads_info.nr_cmd_threads;
1153 if (num <= 0 || num > i) {
1154 PRINT_ERROR_PR("can not del %d cmd threads from %d", num, i);
1158 list_for_each_entry_safe(ct, tmp, &scst_threads_info.cmd_threads_list,
1159 thread_list_entry) {
1162 res = kthread_stop(ct->cmd_thread);
1164 TRACE_MGMT_DBG("kthread_stop() failed: %d", res);
1166 list_del(&ct->thread_list_entry);
1168 scst_threads_info.nr_cmd_threads--;
1178 /* scst_threads_info.cmd_threads_mutex supposed to be held */
1179 int __scst_add_cmd_threads(int num)
1182 static int scst_thread_num = 0;
1186 for (i = 0; i < num; i++) {
1187 struct scst_cmd_thread_t *thr;
1189 thr = kmalloc(sizeof(*thr), GFP_KERNEL);
1192 PRINT_ERROR_PR("fail to allocate thr %d", res);
1195 thr->cmd_thread = kthread_run(scst_cmd_thread,
1196 &scst_main_cmd_lists, "scsi_tgt%d",
1198 if (IS_ERR(thr->cmd_thread)) {
1199 res = PTR_ERR(thr->cmd_thread);
1200 PRINT_ERROR_PR("kthread_create() failed: %d", res);
1204 list_add(&thr->thread_list_entry,
1205 &scst_threads_info.cmd_threads_list);
1206 scst_threads_info.nr_cmd_threads++;
1211 TRACE_EXIT_RES(res);
1216 __scst_del_cmd_threads(i - 1);
1220 int scst_add_cmd_threads(int num)
1226 mutex_lock(&scst_threads_info.cmd_threads_mutex);
1227 res = __scst_add_cmd_threads(num);
1228 mutex_unlock(&scst_threads_info.cmd_threads_mutex);
1230 TRACE_EXIT_RES(res);
1234 void scst_del_cmd_threads(int num)
1238 mutex_lock(&scst_threads_info.cmd_threads_mutex);
1239 __scst_del_cmd_threads(num);
1240 mutex_unlock(&scst_threads_info.cmd_threads_mutex);
1246 static void scst_stop_all_threads(void)
1250 mutex_lock(&scst_threads_info.cmd_threads_mutex);
1251 __scst_del_cmd_threads(scst_threads_info.nr_cmd_threads);
1252 if (scst_threads_info.mgmt_cmd_thread)
1253 kthread_stop(scst_threads_info.mgmt_cmd_thread);
1254 if (scst_threads_info.mgmt_thread)
1255 kthread_stop(scst_threads_info.mgmt_thread);
1256 if (scst_threads_info.init_cmd_thread)
1257 kthread_stop(scst_threads_info.init_cmd_thread);
1258 mutex_unlock(&scst_threads_info.cmd_threads_mutex);
1264 static int scst_start_all_threads(int num)
1270 mutex_lock(&scst_threads_info.cmd_threads_mutex);
1271 res = __scst_add_cmd_threads(num);
1275 scst_threads_info.init_cmd_thread = kthread_run(scst_init_cmd_thread,
1276 NULL, "scsi_tgt_init");
1277 if (IS_ERR(scst_threads_info.init_cmd_thread)) {
1278 res = PTR_ERR(scst_threads_info.init_cmd_thread);
1279 PRINT_ERROR_PR("kthread_create() for init cmd failed: %d", res);
1280 scst_threads_info.init_cmd_thread = NULL;
1284 scst_threads_info.mgmt_cmd_thread = kthread_run(scst_mgmt_cmd_thread,
1285 NULL, "scsi_tgt_mc");
1286 if (IS_ERR(scst_threads_info.mgmt_cmd_thread)) {
1287 res = PTR_ERR(scst_threads_info.mgmt_cmd_thread);
1288 PRINT_ERROR_PR("kthread_create() for mcmd failed: %d", res);
1289 scst_threads_info.mgmt_cmd_thread = NULL;
1293 scst_threads_info.mgmt_thread = kthread_run(scst_mgmt_thread,
1294 NULL, "scsi_tgt_mgmt");
1295 if (IS_ERR(scst_threads_info.mgmt_thread)) {
1296 res = PTR_ERR(scst_threads_info.mgmt_thread);
1297 PRINT_ERROR_PR("kthread_create() for mgmt failed: %d", res);
1298 scst_threads_info.mgmt_thread = NULL;
1303 mutex_unlock(&scst_threads_info.cmd_threads_mutex);
1304 TRACE_EXIT_RES(res);
1318 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)
1319 static int scst_add(struct class_device *cdev)
1321 static int scst_add(struct class_device *cdev, struct class_interface *intf)
1324 struct scsi_device *scsidp;
1329 scsidp = to_scsi_device(cdev->dev);
1330 res = scst_register_device(scsidp);
1336 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)
1337 static void scst_remove(struct class_device *cdev)
1339 static void scst_remove(struct class_device *cdev, struct class_interface *intf)
1342 struct scsi_device *scsidp;
1346 scsidp = to_scsi_device(cdev->dev);
1347 scst_unregister_device(scsidp);
1353 static struct class_interface scst_interface = {
1355 .remove = scst_remove,
1358 static int __init init_scst(void)
1361 struct scst_cmd *cmd;
1366 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
1368 struct scsi_request *req;
1369 BUILD_BUG_ON(sizeof(cmd->sense_buffer) !=
1370 sizeof(req->sr_sense_buffer));
1374 struct scsi_sense_hdr *shdr;
1375 BUILD_BUG_ON((sizeof(cmd->sense_buffer) < sizeof(*shdr)) &&
1376 (sizeof(cmd->sense_buffer) >= SCST_SENSE_BUFFERSIZE));
1380 struct scst_tgt_dev *t;
1382 BUILD_BUG_ON(sizeof(t->curr_sn) != sizeof(t->expected_sn));
1383 BUILD_BUG_ON(sizeof(c->sn) != sizeof(t->expected_sn));
1386 BUILD_BUG_ON(SCST_DATA_UNKNOWN != DMA_BIDIRECTIONAL);
1387 BUILD_BUG_ON(SCST_DATA_WRITE != DMA_TO_DEVICE);
1388 BUILD_BUG_ON(SCST_DATA_READ != DMA_FROM_DEVICE);
1389 BUILD_BUG_ON(SCST_DATA_NONE != DMA_NONE);
1391 spin_lock_init(&scst_main_cmd_lists.cmd_list_lock);
1392 INIT_LIST_HEAD(&scst_main_cmd_lists.active_cmd_list);
1393 init_waitqueue_head(&scst_main_cmd_lists.cmd_list_waitQ);
1394 list_add_tail(&scst_main_cmd_lists.lists_list_entry,
1395 &scst_cmd_lists_list);
1397 scst_num_cpus = num_online_cpus();
1399 /* ToDo: register_cpu_notifier() */
1401 if (scst_threads == 0)
1402 scst_threads = scst_num_cpus;
1404 if (scst_threads < scst_num_cpus) {
1405 PRINT_ERROR_PR("%s", "scst_threads can not be less than "
1407 scst_threads = scst_num_cpus;
1410 scst_threads_info_init();
1412 #define INIT_CACHEP(p, s, t, o) do { \
1413 p = kmem_cache_create(s, sizeof(struct t), 0, \
1414 SCST_SLAB_FLAGS, NULL, NULL); \
1415 TRACE_MEM("Slab create: %s at %p size %zd", s, p, \
1416 sizeof(struct t)); \
1417 if (p == NULL) { res = -ENOMEM; goto o; } \
1420 INIT_CACHEP(scst_mgmt_cachep, SCST_MGMT_CMD_CACHE_STRING,
1421 scst_mgmt_cmd, out);
1422 INIT_CACHEP(scst_ua_cachep, SCST_UA_CACHE_STRING,
1423 scst_tgt_dev_UA, out_destroy_mgmt_cache);
1424 INIT_CACHEP(scst_cmd_cachep, SCST_CMD_CACHE_STRING,
1425 scst_cmd, out_destroy_ua_cache);
1426 INIT_CACHEP(scst_sess_cachep, SCST_SESSION_CACHE_STRING,
1427 scst_session, out_destroy_cmd_cache);
1428 INIT_CACHEP(scst_tgtd_cachep, SCST_TGT_DEV_CACHE_STRING,
1429 scst_tgt_dev, out_destroy_sess_cache);
1430 INIT_CACHEP(scst_acgd_cachep, SCST_ACG_DEV_CACHE_STRING,
1431 scst_acg_dev, out_destroy_tgt_cache);
1433 scst_mgmt_mempool = mempool_create(10, mempool_alloc_slab,
1434 mempool_free_slab, scst_mgmt_cachep);
1435 if (scst_mgmt_mempool == NULL) {
1437 goto out_destroy_acg_cache;
1440 scst_ua_mempool = mempool_create(25, mempool_alloc_slab,
1441 mempool_free_slab, scst_ua_cachep);
1442 if (scst_ua_mempool == NULL) {
1444 goto out_destroy_mgmt_mempool;
1447 if (scst_max_cmd_mem == 0) {
1450 #if BITS_PER_LONG == 32
1451 scst_max_cmd_mem = min(((uint64_t)si.totalram << PAGE_SHIFT) >> 2,
1454 scst_max_cmd_mem = (si.totalram << PAGE_SHIFT) >> 2;
1457 scst_max_cmd_mem <<= 20;
1459 res = scst_sgv_pools_init(scst_max_cmd_mem, 0);
1461 goto out_destroy_ua_mempool;
1463 scst_default_acg = scst_alloc_add_acg(SCST_DEFAULT_ACG_NAME);
1464 if (scst_default_acg == NULL) {
1466 goto out_destroy_sgv_pool;
1469 res = scsi_register_interface(&scst_interface);
1473 scst_scsi_op_list_init();
1475 for (i = 0; i < (int)ARRAY_SIZE(scst_tasklets); i++) {
1476 spin_lock_init(&scst_tasklets[i].tasklet_lock);
1477 INIT_LIST_HEAD(&scst_tasklets[i].tasklet_cmd_list);
1478 tasklet_init(&scst_tasklets[i].tasklet, (void*)scst_cmd_tasklet,
1479 (unsigned long)&scst_tasklets[i]);
1482 TRACE_DBG("%d CPUs found, starting %d threads", scst_num_cpus,
1485 res = scst_start_all_threads(scst_threads);
1487 goto out_thread_free;
1489 res = scst_proc_init_module();
1491 goto out_thread_free;
1494 PRINT_INFO_PR("SCST version %s loaded successfully (max mem for "
1495 "commands %ld Mb)", SCST_VERSION_STRING, scst_max_cmd_mem >> 20);
1498 TRACE_EXIT_RES(res);
1502 scst_stop_all_threads();
1504 scsi_unregister_interface(&scst_interface);
1507 scst_destroy_acg(scst_default_acg);
1509 out_destroy_sgv_pool:
1510 scst_sgv_pools_deinit();
1512 out_destroy_ua_mempool:
1513 mempool_destroy(scst_ua_mempool);
1515 out_destroy_mgmt_mempool:
1516 mempool_destroy(scst_mgmt_mempool);
1518 out_destroy_acg_cache:
1519 kmem_cache_destroy(scst_acgd_cachep);
1521 out_destroy_tgt_cache:
1522 kmem_cache_destroy(scst_tgtd_cachep);
1524 out_destroy_sess_cache:
1525 kmem_cache_destroy(scst_sess_cachep);
1527 out_destroy_cmd_cache:
1528 kmem_cache_destroy(scst_cmd_cachep);
1530 out_destroy_ua_cache:
1531 kmem_cache_destroy(scst_ua_cachep);
1533 out_destroy_mgmt_cache:
1534 kmem_cache_destroy(scst_mgmt_cachep);
1538 static void __exit exit_scst(void)
1540 #ifdef CONFIG_LOCKDEP
1541 static /* To hide the lockdep's warning about non-static key */
1543 DECLARE_MUTEX_LOCKED(shm);
1547 /* ToDo: unregister_cpu_notifier() */
1549 scst_proc_cleanup_module();
1551 scst_stop_all_threads();
1553 scsi_unregister_interface(&scst_interface);
1554 scst_destroy_acg(scst_default_acg);
1556 scst_sgv_pools_deinit();
1558 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
1559 #define DEINIT_CACHEP(p, s) do { \
1560 if (kmem_cache_destroy(p)) { \
1561 PRINT_INFO_PR("kmem_cache_destroy of %s returned an "\
1567 #define DEINIT_CACHEP(p, s) do { \
1568 kmem_cache_destroy(p); \
1573 mempool_destroy(scst_mgmt_mempool);
1574 mempool_destroy(scst_ua_mempool);
1576 DEINIT_CACHEP(scst_mgmt_cachep, SCST_MGMT_CMD_CACHE_STRING);
1577 DEINIT_CACHEP(scst_ua_cachep, SCST_UA_CACHE_STRING);
1578 DEINIT_CACHEP(scst_cmd_cachep, SCST_CMD_CACHE_STRING);
1579 DEINIT_CACHEP(scst_sess_cachep, SCST_SESSION_CACHE_STRING);
1580 DEINIT_CACHEP(scst_tgtd_cachep, SCST_TGT_DEV_CACHE_STRING);
1581 DEINIT_CACHEP(scst_acgd_cachep, SCST_ACG_DEV_CACHE_STRING);
1583 PRINT_INFO_PR("%s", "SCST unloaded");
1590 * Device Handler Side (i.e. scst_vdisk)
1592 EXPORT_SYMBOL(scst_register_dev_driver);
1593 EXPORT_SYMBOL(scst_unregister_dev_driver);
1594 EXPORT_SYMBOL(scst_register);
1595 EXPORT_SYMBOL(scst_unregister);
1597 EXPORT_SYMBOL(scst_register_virtual_device);
1598 EXPORT_SYMBOL(scst_unregister_virtual_device);
1599 EXPORT_SYMBOL(scst_register_virtual_dev_driver);
1600 EXPORT_SYMBOL(scst_unregister_virtual_dev_driver);
1602 EXPORT_SYMBOL(scst_set_busy);
1603 EXPORT_SYMBOL(scst_set_cmd_error_status);
1604 EXPORT_SYMBOL(scst_set_cmd_error);
1605 EXPORT_SYMBOL(scst_set_resp_data_len);
1607 EXPORT_SYMBOL(scst_process_active_cmd);
1610 * Target Driver Side (i.e. HBA)
1612 EXPORT_SYMBOL(scst_register_session);
1613 EXPORT_SYMBOL(scst_unregister_session);
1615 EXPORT_SYMBOL(scst_register_target_template);
1616 EXPORT_SYMBOL(scst_unregister_target_template);
1618 EXPORT_SYMBOL(scst_cmd_init_done);
1619 EXPORT_SYMBOL(scst_tgt_cmd_done);
1620 EXPORT_SYMBOL(scst_restart_cmd);
1621 EXPORT_SYMBOL(scst_rx_cmd);
1622 EXPORT_SYMBOL(scst_rx_data);
1623 EXPORT_SYMBOL(scst_rx_mgmt_fn);
1625 EXPORT_SYMBOL(scst_find_cmd);
1626 EXPORT_SYMBOL(scst_find_cmd_by_tag);
1631 EXPORT_SYMBOL(scst_suspend_activity);
1632 EXPORT_SYMBOL(scst_resume_activity);
1634 EXPORT_SYMBOL(scst_add_cmd_threads);
1635 EXPORT_SYMBOL(scst_del_cmd_threads);
1637 #if defined(DEBUG) || defined(TRACING)
1638 EXPORT_SYMBOL(scst_proc_log_entry_read);
1639 EXPORT_SYMBOL(scst_proc_log_entry_write);
1642 EXPORT_SYMBOL(scst_create_proc_entry);
1643 EXPORT_SYMBOL(scst_single_seq_open);
1645 EXPORT_SYMBOL(__scst_get_buf);
1646 EXPORT_SYMBOL(scst_get);
1647 EXPORT_SYMBOL(scst_put);
1649 EXPORT_SYMBOL(scst_alloc);
1650 EXPORT_SYMBOL(scst_free);
1652 EXPORT_SYMBOL(scst_check_local_events);
1654 /* Tgt_dev's threads local storage */
1655 EXPORT_SYMBOL(scst_add_thr_data);
1656 EXPORT_SYMBOL(scst_del_all_thr_data);
1657 EXPORT_SYMBOL(scst_dev_del_all_thr_data);
1658 EXPORT_SYMBOL(scst_find_thr_data);
1660 /* SGV pool routines */
1661 EXPORT_SYMBOL(sgv_pool_create);
1662 EXPORT_SYMBOL(sgv_pool_destroy);
1663 EXPORT_SYMBOL(sgv_pool_set_allocator);
1664 EXPORT_SYMBOL(sgv_pool_alloc);
1665 EXPORT_SYMBOL(sgv_pool_free);
1666 EXPORT_SYMBOL(sgv_get_priv);
1668 /* Generic parse() routines */
1669 EXPORT_SYMBOL(scst_calc_block_shift);
1670 EXPORT_SYMBOL(scst_sbc_generic_parse);
1671 EXPORT_SYMBOL(scst_cdrom_generic_parse);
1672 EXPORT_SYMBOL(scst_modisk_generic_parse);
1673 EXPORT_SYMBOL(scst_tape_generic_parse);
1674 EXPORT_SYMBOL(scst_changer_generic_parse);
1675 EXPORT_SYMBOL(scst_processor_generic_parse);
1676 EXPORT_SYMBOL(scst_raid_generic_parse);
1678 /* Generic dev_done() routines */
1679 EXPORT_SYMBOL(scst_block_generic_dev_done);
1680 EXPORT_SYMBOL(scst_tape_generic_dev_done);
1685 EXPORT_SYMBOL(scst_get_cdb_info);
1686 EXPORT_SYMBOL(scst_cmd_get_tgt_priv_lock);
1687 EXPORT_SYMBOL(scst_cmd_set_tgt_priv_lock);
1690 EXPORT_SYMBOL(scst_random);
1693 module_init(init_scst);
1694 module_exit(exit_scst);
1696 MODULE_AUTHOR("Vladislav Bolkhovitin & Leonid Stoljar");
1697 MODULE_LICENSE("GPL");
1698 MODULE_DESCRIPTION("SCSI target core");
1699 MODULE_VERSION(SCST_VERSION_STRING);