4 * Copyright (C) 2004 - 2009 Vladislav Bolkhovitin <vst@vlnb.net>
5 * Copyright (C) 2004 - 2005 Leonid Stoljar
6 * Copyright (C) 2007 - 2009 ID7 Ltd.
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation, version 2
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
19 #include <linux/module.h>
21 #include <linux/init.h>
22 #include <linux/kernel.h>
23 #include <linux/errno.h>
24 #include <linux/list.h>
25 #include <linux/spinlock.h>
26 #include <linux/slab.h>
27 #include <linux/sched.h>
28 #include <linux/unistd.h>
29 #include <linux/string.h>
30 #include <linux/kthread.h>
33 #include "scst_priv.h"
36 #if defined(CONFIG_HIGHMEM4G) || defined(CONFIG_HIGHMEM64G)
37 #warning "HIGHMEM kernel configurations are fully supported, but not\
38 recommended for performance reasons. Consider changing VMSPLIT\
39 option or use a 64-bit configuration instead. See README file for\
43 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26) && !defined(SCSI_EXEC_REQ_FIFO_DEFINED)
44 #if !defined(CONFIG_SCST_STRICT_SERIALIZING)
45 #warning "Patch scst_exec_req_fifo-<kernel-version> was not applied on\
46 your kernel and CONFIG_SCST_STRICT_SERIALIZING isn't defined.\
47 Pass-through dev handlers will not work."
51 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
52 #if !defined(SCST_IO_CONTEXT)
53 #warning "Patch io_context-<kernel-version> was not applied\
54 on your kernel. SCST will be working with not the best performance."
57 #warning "There is no patch io_context-<kernel-version>\
58 for your kernel version. For performance reasons it is strongly recommended\
59 to upgrade your kernel to version >= 2.6.27.x."
63 ** SCST global variables. They are all uninitialized to have their layout in
64 ** memory be exactly as specified. Otherwise compiler puts zero-initialized
65 ** variable separately from nonzero-initialized ones.
69 * All targets, devices and dev_types management is done under this mutex.
71 * It must NOT be used in any works (schedule_work(), etc.), because
72 * otherwise a deadlock (double lock, actually) is possible, e.g., with
73 * scst_user detach_tgt(), which is called under scst_mutex and calls
74 * flush_scheduled_work().
76 struct mutex scst_mutex;
78 /* All 3 protected by scst_mutex */
79 struct list_head scst_template_list;
80 struct list_head scst_dev_list;
81 struct list_head scst_dev_type_list;
83 spinlock_t scst_main_lock;
85 static struct kmem_cache *scst_mgmt_cachep;
86 mempool_t *scst_mgmt_mempool;
87 static struct kmem_cache *scst_mgmt_stub_cachep;
88 mempool_t *scst_mgmt_stub_mempool;
89 static struct kmem_cache *scst_ua_cachep;
90 mempool_t *scst_ua_mempool;
91 static struct kmem_cache *scst_sense_cachep;
92 mempool_t *scst_sense_mempool;
93 static struct kmem_cache *scst_aen_cachep;
94 mempool_t *scst_aen_mempool;
95 struct kmem_cache *scst_tgtd_cachep;
96 struct kmem_cache *scst_sess_cachep;
97 struct kmem_cache *scst_acgd_cachep;
99 struct list_head scst_acg_list;
100 struct scst_acg *scst_default_acg;
102 spinlock_t scst_init_lock;
103 wait_queue_head_t scst_init_cmd_list_waitQ;
104 struct list_head scst_init_cmd_list;
105 unsigned int scst_init_poll_cnt;
107 struct kmem_cache *scst_cmd_cachep;
109 #if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
110 unsigned long scst_trace_flag;
113 unsigned long scst_flags;
114 atomic_t scst_cmd_count;
116 struct scst_cmd_lists scst_main_cmd_lists;
118 struct scst_tasklet scst_tasklets[NR_CPUS];
120 spinlock_t scst_mcmd_lock;
121 struct list_head scst_active_mgmt_cmd_list;
122 struct list_head scst_delayed_mgmt_cmd_list;
123 wait_queue_head_t scst_mgmt_cmd_list_waitQ;
125 wait_queue_head_t scst_mgmt_waitQ;
126 spinlock_t scst_mgmt_lock;
127 struct list_head scst_sess_init_list;
128 struct list_head scst_sess_shut_list;
130 wait_queue_head_t scst_dev_cmd_waitQ;
132 static struct mutex scst_suspend_mutex;
133 /* protected by scst_suspend_mutex */
134 static struct list_head scst_cmd_lists_list;
136 static int scst_threads;
137 struct mutex scst_global_threads_mutex;
138 u32 scst_nr_global_threads;
139 static struct list_head scst_global_threads_list;
140 static struct task_struct *scst_init_cmd_thread;
141 static struct task_struct *scst_mgmt_thread;
142 static struct task_struct *scst_mgmt_cmd_thread;
144 static int suspend_count;
146 static int scst_virt_dev_last_id; /* protected by scst_mutex */
148 static unsigned int scst_max_cmd_mem;
149 unsigned int scst_max_dev_cmd_mem;
151 module_param_named(scst_threads, scst_threads, int, 0);
152 MODULE_PARM_DESC(scst_threads, "SCSI target threads count");
154 module_param_named(scst_max_cmd_mem, scst_max_cmd_mem, int, S_IRUGO);
155 MODULE_PARM_DESC(scst_max_cmd_mem, "Maximum memory allowed to be consumed by "
156 "all SCSI commands of all devices at any given time in MB");
158 module_param_named(scst_max_dev_cmd_mem, scst_max_dev_cmd_mem, int, S_IRUGO);
159 MODULE_PARM_DESC(scst_max_dev_cmd_mem, "Maximum memory allowed to be consumed "
160 "by all SCSI commands of a device at any given time in MB");
162 struct scst_dev_type scst_null_devtype = {
166 static void __scst_resume_activity(void);
168 int __scst_register_target_template(struct scst_tgt_template *vtt,
172 struct scst_tgt_template *t;
173 static DEFINE_MUTEX(m);
177 INIT_LIST_HEAD(&vtt->tgt_list);
179 if (strcmp(version, SCST_INTERFACE_VERSION) != 0) {
180 PRINT_ERROR("Incorrect version of target %s", vtt->name);
186 PRINT_ERROR("Target driver %s doesn't have a "
187 "detect() method.", vtt->name);
193 PRINT_ERROR("Target driver %s doesn't have a "
194 "release() method.", vtt->name);
199 if (!vtt->xmit_response) {
200 PRINT_ERROR("Target driver %s doesn't have a "
201 "xmit_response() method.", vtt->name);
206 if (vtt->threads_num < 0) {
207 PRINT_ERROR("Wrong threads_num value %d for "
208 "target \"%s\"", vtt->threads_num,
214 if (!vtt->no_proc_entry) {
215 res = scst_build_proc_target_dir_entries(vtt);
220 if (vtt->rdy_to_xfer == NULL)
221 vtt->rdy_to_xfer_atomic = 1;
223 if (mutex_lock_interruptible(&m) != 0)
226 if (mutex_lock_interruptible(&scst_mutex) != 0)
228 list_for_each_entry(t, &scst_template_list, scst_template_list_entry) {
229 if (strcmp(t->name, vtt->name) == 0) {
230 PRINT_ERROR("Target driver %s already registered",
232 mutex_unlock(&scst_mutex);
236 mutex_unlock(&scst_mutex);
238 TRACE_DBG("%s", "Calling target driver's detect()");
239 res = vtt->detect(vtt);
240 TRACE_DBG("Target driver's detect() returned %d", res);
242 PRINT_ERROR("%s", "The detect() routine failed");
247 mutex_lock(&scst_mutex);
248 list_add_tail(&vtt->scst_template_list_entry, &scst_template_list);
249 mutex_unlock(&scst_mutex);
253 PRINT_INFO("Target template %s registered successfully", vtt->name);
262 scst_cleanup_proc_target_dir_entries(vtt);
268 PRINT_ERROR("Failed to register target template %s", vtt->name);
271 EXPORT_SYMBOL(__scst_register_target_template);
273 void scst_unregister_target_template(struct scst_tgt_template *vtt)
275 struct scst_tgt *tgt;
276 struct scst_tgt_template *t;
281 mutex_lock(&scst_mutex);
283 list_for_each_entry(t, &scst_template_list, scst_template_list_entry) {
284 if (strcmp(t->name, vtt->name) == 0) {
290 PRINT_ERROR("Target driver %s isn't registered", vtt->name);
295 list_for_each_entry(tgt, &vtt->tgt_list, tgt_list_entry) {
296 mutex_unlock(&scst_mutex);
297 scst_unregister(tgt);
298 mutex_lock(&scst_mutex);
301 list_del(&vtt->scst_template_list_entry);
303 PRINT_INFO("Target template %s unregistered successfully", vtt->name);
306 mutex_unlock(&scst_mutex);
308 scst_cleanup_proc_target_dir_entries(vtt);
313 EXPORT_SYMBOL(scst_unregister_target_template);
315 struct scst_tgt *scst_register(struct scst_tgt_template *vtt,
316 const char *target_name)
318 struct scst_tgt *tgt;
323 tgt = kzalloc(sizeof(*tgt), GFP_KERNEL);
325 TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of tgt failed");
330 INIT_LIST_HEAD(&tgt->sess_list);
331 init_waitqueue_head(&tgt->unreg_waitQ);
333 tgt->sg_tablesize = vtt->sg_tablesize;
334 spin_lock_init(&tgt->tgt_lock);
335 INIT_LIST_HEAD(&tgt->retry_cmd_list);
336 atomic_set(&tgt->finished_cmds, 0);
337 init_timer(&tgt->retry_timer);
338 tgt->retry_timer.data = (unsigned long)tgt;
339 tgt->retry_timer.function = scst_tgt_retry_timer_fn;
341 rc = scst_suspend_activity(true);
343 goto out_free_tgt_err;
345 if (mutex_lock_interruptible(&scst_mutex) != 0) {
347 goto out_resume_free;
350 if (target_name != NULL) {
351 int len = strlen(target_name) + 1 +
352 strlen(SCST_DEFAULT_ACG_NAME) + 1;
354 tgt->default_group_name = kmalloc(len, GFP_KERNEL);
355 if (tgt->default_group_name == NULL) {
356 TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of default "
357 "group name failed");
359 goto out_unlock_resume;
361 sprintf(tgt->default_group_name, "%s_%s", SCST_DEFAULT_ACG_NAME,
365 rc = scst_build_proc_target_entries(tgt);
369 list_add_tail(&tgt->tgt_list_entry, &vtt->tgt_list);
371 mutex_unlock(&scst_mutex);
372 scst_resume_activity();
374 PRINT_INFO("Target %s (%p) for template %s registered successfully",
375 target_name, tgt, vtt->name);
382 kfree(tgt->default_group_name);
385 mutex_unlock(&scst_mutex);
388 scst_resume_activity();
395 PRINT_ERROR("Failed to register target %s for template %s (error %d)",
396 target_name, vtt->name, rc);
399 EXPORT_SYMBOL(scst_register);
401 static inline int test_sess_list(struct scst_tgt *tgt)
404 mutex_lock(&scst_mutex);
405 res = list_empty(&tgt->sess_list);
406 mutex_unlock(&scst_mutex);
410 void scst_unregister(struct scst_tgt *tgt)
412 struct scst_session *sess;
413 struct scst_tgt_template *vtt = tgt->tgtt;
417 TRACE_DBG("%s", "Calling target driver's release()");
418 tgt->tgtt->release(tgt);
419 TRACE_DBG("%s", "Target driver's release() returned");
421 mutex_lock(&scst_mutex);
423 list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) {
424 if (sess->shut_phase == SCST_SESS_SPH_READY) {
426 * Sometimes it's hard for target driver to track all
427 * its sessions (see scst_local, for example), so let's
430 mutex_unlock(&scst_mutex);
431 scst_unregister_session(sess, 0, NULL);
432 mutex_lock(&scst_mutex);
436 mutex_unlock(&scst_mutex);
438 TRACE_DBG("%s", "Waiting for sessions shutdown");
439 wait_event(tgt->unreg_waitQ, test_sess_list(tgt));
440 TRACE_DBG("%s", "wait_event() returned");
442 scst_suspend_activity(false);
443 mutex_lock(&scst_mutex);
445 list_del(&tgt->tgt_list_entry);
447 scst_cleanup_proc_target_entries(tgt);
449 kfree(tgt->default_group_name);
451 mutex_unlock(&scst_mutex);
452 scst_resume_activity();
454 del_timer_sync(&tgt->retry_timer);
456 PRINT_INFO("Target %p for template %s unregistered successfully",
464 EXPORT_SYMBOL(scst_unregister);
466 static int scst_susp_wait(bool interruptible)
473 res = wait_event_interruptible_timeout(scst_dev_cmd_waitQ,
474 (atomic_read(&scst_cmd_count) == 0),
475 SCST_SUSPENDING_TIMEOUT);
477 __scst_resume_activity();
483 wait_event(scst_dev_cmd_waitQ,
484 atomic_read(&scst_cmd_count) == 0);
486 TRACE_MGMT_DBG("wait_event() returned %d", res);
492 int scst_suspend_activity(bool interruptible)
500 if (mutex_lock_interruptible(&scst_suspend_mutex) != 0) {
505 mutex_lock(&scst_suspend_mutex);
507 TRACE_MGMT_DBG("suspend_count %d", suspend_count);
509 if (suspend_count > 1)
512 set_bit(SCST_FLAG_SUSPENDING, &scst_flags);
513 set_bit(SCST_FLAG_SUSPENDED, &scst_flags);
515 * Assignment of SCST_FLAG_SUSPENDING and SCST_FLAG_SUSPENDED must be
516 * ordered with scst_cmd_count. Otherwise lockless logic in
517 * scst_translate_lun() and scst_mgmt_translate_lun() won't work.
519 smp_mb__after_set_bit();
522 * See comment in scst_user.c::dev_user_task_mgmt_fn() for more
523 * information about scst_user behavior.
525 * ToDo: make the global suspending unneeded (switch to per-device
526 * reference counting? That would mean to switch off from lockless
527 * implementation of scst_translate_lun().. )
530 if (atomic_read(&scst_cmd_count) != 0) {
531 PRINT_INFO("Waiting for %d active commands to complete... This "
532 "might take few minutes for disks or few hours for "
533 "tapes, if you use long executed commands, like "
534 "REWIND or FORMAT. In case, if you have a hung user "
535 "space device (i.e. made using scst_user module) not "
536 "responding to any commands, if might take virtually "
537 "forever until the corresponding user space "
538 "program recovers and starts responding or gets "
539 "killed.", atomic_read(&scst_cmd_count));
543 res = scst_susp_wait(interruptible);
547 clear_bit(SCST_FLAG_SUSPENDING, &scst_flags);
548 /* See comment about smp_mb() above */
549 smp_mb__after_clear_bit();
551 TRACE_MGMT_DBG("Waiting for %d active commands finally to complete",
552 atomic_read(&scst_cmd_count));
554 res = scst_susp_wait(interruptible);
559 PRINT_INFO("%s", "All active commands completed");
562 mutex_unlock(&scst_suspend_mutex);
569 clear_bit(SCST_FLAG_SUSPENDING, &scst_flags);
570 /* See comment about smp_mb() above */
571 smp_mb__after_clear_bit();
574 EXPORT_SYMBOL(scst_suspend_activity);
576 static void __scst_resume_activity(void)
578 struct scst_cmd_lists *l;
583 TRACE_MGMT_DBG("suspend_count %d left", suspend_count);
584 if (suspend_count > 0)
587 clear_bit(SCST_FLAG_SUSPENDED, &scst_flags);
589 * The barrier is needed to make sure all woken up threads see the
590 * cleared flag. Not sure if it's really needed, but let's be safe.
592 smp_mb__after_clear_bit();
594 list_for_each_entry(l, &scst_cmd_lists_list, lists_list_entry) {
595 wake_up_all(&l->cmd_list_waitQ);
597 wake_up_all(&scst_init_cmd_list_waitQ);
599 spin_lock_irq(&scst_mcmd_lock);
600 if (!list_empty(&scst_delayed_mgmt_cmd_list)) {
601 struct scst_mgmt_cmd *m;
602 m = list_entry(scst_delayed_mgmt_cmd_list.next, typeof(*m),
603 mgmt_cmd_list_entry);
604 TRACE_MGMT_DBG("Moving delayed mgmt cmd %p to head of active "
606 list_move(&m->mgmt_cmd_list_entry, &scst_active_mgmt_cmd_list);
608 spin_unlock_irq(&scst_mcmd_lock);
609 wake_up_all(&scst_mgmt_cmd_list_waitQ);
616 void scst_resume_activity(void)
620 mutex_lock(&scst_suspend_mutex);
621 __scst_resume_activity();
622 mutex_unlock(&scst_suspend_mutex);
627 EXPORT_SYMBOL(scst_resume_activity);
629 static int scst_register_device(struct scsi_device *scsidp)
632 struct scst_device *dev;
633 struct scst_dev_type *dt;
637 res = scst_suspend_activity(true);
641 if (mutex_lock_interruptible(&scst_mutex) != 0) {
646 res = scst_alloc_device(GFP_KERNEL, &dev);
650 dev->type = scsidp->type;
652 dev->rq_disk = alloc_disk(1);
653 if (dev->rq_disk == NULL) {
657 dev->rq_disk->major = SCST_MAJOR;
659 dev->scsi_dev = scsidp;
661 list_add_tail(&dev->dev_list_entry, &scst_dev_list);
663 list_for_each_entry(dt, &scst_dev_type_list, dev_type_list_entry) {
664 if (dt->type == scsidp->type) {
665 res = scst_assign_dev_handler(dev, dt);
673 mutex_unlock(&scst_mutex);
676 scst_resume_activity();
680 PRINT_INFO("Attached to scsi%d, channel %d, id %d, lun %d, "
681 "type %d", scsidp->host->host_no, scsidp->channel,
682 scsidp->id, scsidp->lun, scsidp->type);
684 PRINT_ERROR("Failed to to scsi%d, channel %d, id %d, lun %d, "
685 "type %d", scsidp->host->host_no, scsidp->channel,
686 scsidp->id, scsidp->lun, scsidp->type);
693 list_del(&dev->dev_list_entry);
694 put_disk(dev->rq_disk);
697 scst_free_device(dev);
701 static void scst_unregister_device(struct scsi_device *scsidp)
703 struct scst_device *d, *dev = NULL;
704 struct scst_acg_dev *acg_dev, *aa;
708 scst_suspend_activity(false);
709 mutex_lock(&scst_mutex);
711 list_for_each_entry(d, &scst_dev_list, dev_list_entry) {
712 if (d->scsi_dev == scsidp) {
714 TRACE_DBG("Target device %p found", dev);
719 PRINT_ERROR("%s", "Target device not found");
723 list_del(&dev->dev_list_entry);
725 list_for_each_entry_safe(acg_dev, aa, &dev->dev_acg_dev_list,
726 dev_acg_dev_list_entry) {
727 scst_acg_remove_dev(acg_dev->acg, dev, true);
730 scst_assign_dev_handler(dev, &scst_null_devtype);
732 put_disk(dev->rq_disk);
733 scst_free_device(dev);
735 PRINT_INFO("Detached from scsi%d, channel %d, id %d, lun %d, type %d",
736 scsidp->host->host_no, scsidp->channel, scsidp->id,
737 scsidp->lun, scsidp->type);
740 mutex_unlock(&scst_mutex);
741 scst_resume_activity();
747 static int scst_dev_handler_check(struct scst_dev_type *dev_handler)
751 if (dev_handler->parse == NULL) {
752 PRINT_ERROR("scst dev_type driver %s doesn't have a "
753 "parse() method.", dev_handler->name);
758 if (dev_handler->exec == NULL) {
759 #ifdef CONFIG_SCST_ALLOW_PASSTHROUGH_IO_SUBMIT_IN_SIRQ
760 dev_handler->exec_atomic = 1;
762 dev_handler->exec_atomic = 0;
766 if (dev_handler->dev_done == NULL)
767 dev_handler->dev_done_atomic = 1;
774 int scst_register_virtual_device(struct scst_dev_type *dev_handler,
775 const char *dev_name)
778 struct scst_device *dev = NULL;
782 if (dev_handler == NULL) {
783 PRINT_ERROR("%s: valid device handler must be supplied",
789 if (dev_name == NULL) {
790 PRINT_ERROR("%s: device name must be non-NULL", __func__);
795 res = scst_dev_handler_check(dev_handler);
799 res = scst_suspend_activity(true);
803 if (mutex_lock_interruptible(&scst_mutex) != 0) {
808 res = scst_alloc_device(GFP_KERNEL, &dev);
812 dev->type = dev_handler->type;
813 dev->scsi_dev = NULL;
814 dev->virt_name = dev_name;
815 dev->virt_id = scst_virt_dev_last_id++;
817 list_add_tail(&dev->dev_list_entry, &scst_dev_list);
821 rc = scst_assign_dev_handler(dev, dev_handler);
828 mutex_unlock(&scst_mutex);
831 scst_resume_activity();
835 PRINT_INFO("Attached to virtual device %s (id %d)",
836 dev_name, dev->virt_id);
838 PRINT_INFO("Failed to attach to virtual device %s", dev_name);
844 list_del(&dev->dev_list_entry);
845 scst_free_device(dev);
848 EXPORT_SYMBOL(scst_register_virtual_device);
850 void scst_unregister_virtual_device(int id)
852 struct scst_device *d, *dev = NULL;
853 struct scst_acg_dev *acg_dev, *aa;
857 scst_suspend_activity(false);
858 mutex_lock(&scst_mutex);
860 list_for_each_entry(d, &scst_dev_list, dev_list_entry) {
861 if (d->virt_id == id) {
863 TRACE_DBG("Target device %p (id %d) found", dev, id);
868 PRINT_ERROR("Target virtual device (id %d) not found", id);
872 list_del(&dev->dev_list_entry);
874 list_for_each_entry_safe(acg_dev, aa, &dev->dev_acg_dev_list,
875 dev_acg_dev_list_entry)
877 scst_acg_remove_dev(acg_dev->acg, dev, true);
880 scst_assign_dev_handler(dev, &scst_null_devtype);
882 PRINT_INFO("Detached from virtual device %s (id %d)",
883 dev->virt_name, dev->virt_id);
885 scst_free_device(dev);
888 mutex_unlock(&scst_mutex);
889 scst_resume_activity();
894 EXPORT_SYMBOL(scst_unregister_virtual_device);
896 int __scst_register_dev_driver(struct scst_dev_type *dev_type,
899 struct scst_dev_type *dt;
900 struct scst_device *dev;
906 if (strcmp(version, SCST_INTERFACE_VERSION) != 0) {
907 PRINT_ERROR("Incorrect version of dev handler %s",
913 res = scst_dev_handler_check(dev_type);
917 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26) && !defined(SCSI_EXEC_REQ_FIFO_DEFINED)
918 #if !defined(CONFIG_SCST_STRICT_SERIALIZING)
919 if (dev_type->exec == NULL) {
920 PRINT_ERROR("Pass-through dev handlers (handler \"%s\") not "
921 "supported. Consider applying on your kernel patch "
922 "scst_exec_req_fifo-<kernel-version> or define "
923 "CONFIG_SCST_STRICT_SERIALIZING", dev_type->name);
930 res = scst_suspend_activity(true);
934 if (mutex_lock_interruptible(&scst_mutex) != 0) {
940 list_for_each_entry(dt, &scst_dev_type_list, dev_type_list_entry) {
941 if (strcmp(dt->name, dev_type->name) == 0) {
942 PRINT_ERROR("Device type handler \"%s\" already "
951 res = scst_build_proc_dev_handler_dir_entries(dev_type);
955 list_add_tail(&dev_type->dev_type_list_entry, &scst_dev_type_list);
957 list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
958 if (dev->scsi_dev == NULL || dev->handler != &scst_null_devtype)
960 if (dev->scsi_dev->type == dev_type->type)
961 scst_assign_dev_handler(dev, dev_type);
964 mutex_unlock(&scst_mutex);
965 scst_resume_activity();
968 PRINT_INFO("Device handler \"%s\" for type %d registered "
969 "successfully", dev_type->name, dev_type->type);
977 mutex_unlock(&scst_mutex);
980 scst_resume_activity();
983 PRINT_ERROR("Failed to register device handler \"%s\" for type %d",
984 dev_type->name, dev_type->type);
987 EXPORT_SYMBOL(__scst_register_dev_driver);
989 void scst_unregister_dev_driver(struct scst_dev_type *dev_type)
991 struct scst_device *dev;
992 struct scst_dev_type *dt;
997 scst_suspend_activity(false);
998 mutex_lock(&scst_mutex);
1000 list_for_each_entry(dt, &scst_dev_type_list, dev_type_list_entry) {
1001 if (strcmp(dt->name, dev_type->name) == 0) {
1007 PRINT_ERROR("Dev handler \"%s\" isn't registered",
1012 list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
1013 if (dev->handler == dev_type) {
1014 scst_assign_dev_handler(dev, &scst_null_devtype);
1015 TRACE_DBG("Dev handler removed from device %p", dev);
1019 list_del(&dev_type->dev_type_list_entry);
1021 mutex_unlock(&scst_mutex);
1022 scst_resume_activity();
1024 scst_cleanup_proc_dev_handler_dir_entries(dev_type);
1026 PRINT_INFO("Device handler \"%s\" for type %d unloaded",
1027 dev_type->name, dev_type->type);
1034 mutex_unlock(&scst_mutex);
1035 scst_resume_activity();
1038 EXPORT_SYMBOL(scst_unregister_dev_driver);
1040 int __scst_register_virtual_dev_driver(struct scst_dev_type *dev_type,
1041 const char *version)
1047 if (strcmp(version, SCST_INTERFACE_VERSION) != 0) {
1048 PRINT_ERROR("Incorrect version of virtual dev handler %s",
1054 res = scst_dev_handler_check(dev_type);
1058 if (!dev_type->no_proc) {
1059 res = scst_build_proc_dev_handler_dir_entries(dev_type);
1064 if (dev_type->type != -1) {
1065 PRINT_INFO("Virtual device handler %s for type %d "
1066 "registered successfully", dev_type->name,
1069 PRINT_INFO("Virtual device handler \"%s\" registered "
1070 "successfully", dev_type->name);
1074 TRACE_EXIT_RES(res);
1078 PRINT_ERROR("Failed to register virtual device handler \"%s\"",
1082 EXPORT_SYMBOL(__scst_register_virtual_dev_driver);
1084 void scst_unregister_virtual_dev_driver(struct scst_dev_type *dev_type)
1088 if (!dev_type->no_proc)
1089 scst_cleanup_proc_dev_handler_dir_entries(dev_type);
1091 PRINT_INFO("Device handler \"%s\" unloaded", dev_type->name);
1096 EXPORT_SYMBOL(scst_unregister_virtual_dev_driver);
1098 /* Called under scst_mutex */
1099 int scst_add_dev_threads(struct scst_device *dev, int num)
1103 struct scst_cmd_thread_t *thr;
1108 list_for_each_entry(thr, &dev->threads_list, thread_list_entry) {
1112 for (i = 0; i < num; i++) {
1113 thr = kmalloc(sizeof(*thr), GFP_KERNEL);
1116 PRINT_ERROR("Failed to allocate thr %d", res);
1119 strncpy(nm, dev->handler->name, ARRAY_SIZE(nm)-1);
1120 nm[ARRAY_SIZE(nm)-1] = '\0';
1121 thr->cmd_thread = kthread_create(scst_cmd_thread,
1122 &dev->cmd_lists, "%sd%d_%d", nm, dev->dev_num, n++);
1123 if (IS_ERR(thr->cmd_thread)) {
1124 res = PTR_ERR(thr->cmd_thread);
1125 PRINT_ERROR("kthread_create() failed: %d", res);
1130 list_add(&thr->thread_list_entry, &dev->threads_list);
1133 * ToDo: better to use tgt_dev_io_context instead, but we
1134 * are not ready for that yet.
1136 __exit_io_context(thr->cmd_thread->io_context);
1137 thr->cmd_thread->io_context = ioc_task_link(dev->dev_io_ctx);
1138 TRACE_DBG("Setting dev io ctx %p on thr %d", dev->dev_io_ctx,
1139 thr->cmd_thread->pid);
1141 wake_up_process(thr->cmd_thread);
1145 TRACE_EXIT_RES(res);
1149 scst_del_dev_threads(dev, i);
1153 /* Called under scst_mutex and suspended activity */
1154 static int scst_create_dev_threads(struct scst_device *dev)
1161 if (dev->handler->threads_num <= 0)
1164 threads_num = dev->handler->threads_num;
1166 spin_lock_init(&dev->cmd_lists.cmd_list_lock);
1167 INIT_LIST_HEAD(&dev->cmd_lists.active_cmd_list);
1168 init_waitqueue_head(&dev->cmd_lists.cmd_list_waitQ);
1170 res = scst_add_dev_threads(dev, threads_num);
1174 mutex_lock(&scst_suspend_mutex);
1175 list_add_tail(&dev->cmd_lists.lists_list_entry,
1176 &scst_cmd_lists_list);
1177 mutex_unlock(&scst_suspend_mutex);
1179 dev->p_cmd_lists = &dev->cmd_lists;
1182 TRACE_EXIT_RES(res);
1186 /* Called under scst_mutex */
1187 void scst_del_dev_threads(struct scst_device *dev, int num)
1189 struct scst_cmd_thread_t *ct, *tmp;
1197 list_for_each_entry_safe_reverse(ct, tmp, &dev->threads_list,
1198 thread_list_entry) {
1200 struct scst_tgt_dev *tgt_dev;
1202 list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
1203 dev_tgt_dev_list_entry) {
1204 struct scst_thr_data_hdr *td;
1205 td = __scst_find_thr_data(tgt_dev, ct->cmd_thread);
1207 scst_thr_data_put(td);
1212 rc = kthread_stop(ct->cmd_thread);
1214 TRACE_MGMT_DBG("kthread_stop() failed: %d", rc);
1216 list_del(&ct->thread_list_entry);
1219 if ((num > 0) && (++i >= num))
1228 /* Called under scst_mutex and suspended activity */
1229 static void scst_stop_dev_threads(struct scst_device *dev)
1233 if (list_empty(&dev->threads_list))
1236 scst_del_dev_threads(dev, -1);
1238 if (dev->p_cmd_lists == &dev->cmd_lists) {
1239 mutex_lock(&scst_suspend_mutex);
1240 list_del(&dev->cmd_lists.lists_list_entry);
1241 mutex_unlock(&scst_suspend_mutex);
1249 /* The activity supposed to be suspended and scst_mutex held */
1250 int scst_assign_dev_handler(struct scst_device *dev,
1251 struct scst_dev_type *handler)
1254 struct scst_tgt_dev *tgt_dev;
1255 LIST_HEAD(attached_tgt_devs);
1259 sBUG_ON(handler == NULL);
1261 if (dev->handler == handler)
1264 if (dev->handler && dev->handler->detach_tgt) {
1265 list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
1266 dev_tgt_dev_list_entry) {
1267 TRACE_DBG("Calling dev handler's detach_tgt(%p)",
1269 dev->handler->detach_tgt(tgt_dev);
1270 TRACE_DBG("%s", "Dev handler's detach_tgt() returned");
1274 if (dev->handler && dev->handler->detach) {
1275 TRACE_DBG("%s", "Calling dev handler's detach()");
1276 dev->handler->detach(dev);
1277 TRACE_DBG("%s", "Old handler's detach() returned");
1280 scst_stop_dev_threads(dev);
1282 dev->handler = handler;
1285 res = scst_create_dev_threads(dev);
1290 if (handler && handler->attach) {
1291 TRACE_DBG("Calling new dev handler's attach(%p)", dev);
1292 res = handler->attach(dev);
1293 TRACE_DBG("New dev handler's attach() returned %d", res);
1295 PRINT_ERROR("New device handler's %s attach() "
1296 "failed: %d", handler->name, res);
1301 if (handler && handler->attach_tgt) {
1302 list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
1303 dev_tgt_dev_list_entry) {
1304 TRACE_DBG("Calling dev handler's attach_tgt(%p)",
1306 res = handler->attach_tgt(tgt_dev);
1307 TRACE_DBG("%s", "Dev handler's attach_tgt() returned");
1309 PRINT_ERROR("Device handler's %s attach_tgt() "
1310 "failed: %d", handler->name, res);
1311 goto out_err_detach_tgt;
1313 list_add_tail(&tgt_dev->extra_tgt_dev_list_entry,
1314 &attached_tgt_devs);
1320 scst_stop_dev_threads(dev);
1324 dev->handler = &scst_null_devtype;
1327 TRACE_EXIT_RES(res);
1331 if (handler && handler->detach_tgt) {
1332 list_for_each_entry(tgt_dev, &attached_tgt_devs,
1333 extra_tgt_dev_list_entry)
1335 TRACE_DBG("Calling handler's detach_tgt(%p)",
1337 handler->detach_tgt(tgt_dev);
1338 TRACE_DBG("%s", "Handler's detach_tgt() returned");
1341 if (handler && handler->detach) {
1342 TRACE_DBG("%s", "Calling handler's detach()");
1343 handler->detach(dev);
1344 TRACE_DBG("%s", "Handler's detach() returned");
1349 int scst_global_threads_count(void)
1354 * Just to lower the race window, when user can get just changed value
1356 mutex_lock(&scst_global_threads_mutex);
1357 i = scst_nr_global_threads;
1358 mutex_unlock(&scst_global_threads_mutex);
1362 static void scst_threads_info_init(void)
1364 mutex_init(&scst_global_threads_mutex);
1365 INIT_LIST_HEAD(&scst_global_threads_list);
1368 /* scst_global_threads_mutex supposed to be held */
1369 void __scst_del_global_threads(int num)
1371 struct scst_cmd_thread_t *ct, *tmp;
1378 list_for_each_entry_safe(ct, tmp, &scst_global_threads_list,
1379 thread_list_entry) {
1382 res = kthread_stop(ct->cmd_thread);
1384 TRACE_MGMT_DBG("kthread_stop() failed: %d", res);
1385 list_del(&ct->thread_list_entry);
1387 scst_nr_global_threads--;
1398 /* scst_global_threads_mutex supposed to be held */
1399 int __scst_add_global_threads(int num)
1402 static int scst_thread_num;
1406 for (i = 0; i < num; i++) {
1407 struct scst_cmd_thread_t *thr;
1409 thr = kmalloc(sizeof(*thr), GFP_KERNEL);
1412 PRINT_ERROR("fail to allocate thr %d", res);
1415 thr->cmd_thread = kthread_create(scst_cmd_thread,
1416 &scst_main_cmd_lists, "scsi_tgt%d",
1418 if (IS_ERR(thr->cmd_thread)) {
1419 res = PTR_ERR(thr->cmd_thread);
1420 PRINT_ERROR("kthread_create() failed: %d", res);
1425 list_add(&thr->thread_list_entry, &scst_global_threads_list);
1426 scst_nr_global_threads++;
1428 wake_up_process(thr->cmd_thread);
1433 TRACE_EXIT_RES(res);
1437 __scst_del_global_threads(i);
1441 int scst_add_global_threads(int num)
1447 mutex_lock(&scst_global_threads_mutex);
1448 res = __scst_add_global_threads(num);
1449 mutex_unlock(&scst_global_threads_mutex);
1451 TRACE_EXIT_RES(res);
1454 EXPORT_SYMBOL(scst_add_global_threads);
1456 void scst_del_global_threads(int num)
1460 mutex_lock(&scst_global_threads_mutex);
1461 __scst_del_global_threads(num);
1462 mutex_unlock(&scst_global_threads_mutex);
1467 EXPORT_SYMBOL(scst_del_global_threads);
1469 static void scst_stop_all_threads(void)
1473 mutex_lock(&scst_global_threads_mutex);
1474 __scst_del_global_threads(-1);
1475 if (scst_mgmt_cmd_thread)
1476 kthread_stop(scst_mgmt_cmd_thread);
1477 if (scst_mgmt_thread)
1478 kthread_stop(scst_mgmt_thread);
1479 if (scst_init_cmd_thread)
1480 kthread_stop(scst_init_cmd_thread);
1481 mutex_unlock(&scst_global_threads_mutex);
1487 static int scst_start_all_threads(int num)
1493 mutex_lock(&scst_global_threads_mutex);
1494 res = __scst_add_global_threads(num);
1498 scst_init_cmd_thread = kthread_run(scst_init_thread,
1499 NULL, "scsi_tgt_init");
1500 if (IS_ERR(scst_init_cmd_thread)) {
1501 res = PTR_ERR(scst_init_cmd_thread);
1502 PRINT_ERROR("kthread_create() for init cmd failed: %d", res);
1503 scst_init_cmd_thread = NULL;
1507 scst_mgmt_cmd_thread = kthread_run(scst_tm_thread,
1509 if (IS_ERR(scst_mgmt_cmd_thread)) {
1510 res = PTR_ERR(scst_mgmt_cmd_thread);
1511 PRINT_ERROR("kthread_create() for TM failed: %d", res);
1512 scst_mgmt_cmd_thread = NULL;
1516 scst_mgmt_thread = kthread_run(scst_global_mgmt_thread,
1517 NULL, "scsi_tgt_mgmt");
1518 if (IS_ERR(scst_mgmt_thread)) {
1519 res = PTR_ERR(scst_mgmt_thread);
1520 PRINT_ERROR("kthread_create() for mgmt failed: %d", res);
1521 scst_mgmt_thread = NULL;
1526 mutex_unlock(&scst_global_threads_mutex);
1527 TRACE_EXIT_RES(res);
1535 EXPORT_SYMBOL(scst_get);
1541 EXPORT_SYMBOL(scst_put);
1543 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
1544 static int scst_add(struct class_device *cdev, struct class_interface *intf)
1546 static int scst_add(struct device *cdev, struct class_interface *intf)
1549 struct scsi_device *scsidp;
1554 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
1555 scsidp = to_scsi_device(cdev->dev);
1557 scsidp = to_scsi_device(cdev->parent);
1560 if (strcmp(scsidp->host->hostt->name, SCST_LOCAL_NAME) != 0)
1561 res = scst_register_device(scsidp);
1567 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
1568 static void scst_remove(struct class_device *cdev, struct class_interface *intf)
1570 static void scst_remove(struct device *cdev, struct class_interface *intf)
1573 struct scsi_device *scsidp;
1577 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
1578 scsidp = to_scsi_device(cdev->dev);
1580 scsidp = to_scsi_device(cdev->parent);
1583 if (strcmp(scsidp->host->hostt->name, SCST_LOCAL_NAME) != 0)
1584 scst_unregister_device(scsidp);
1590 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
1591 static struct class_interface scst_interface = {
1593 .remove = scst_remove,
1596 static struct class_interface scst_interface = {
1597 .add_dev = scst_add,
1598 .remove_dev = scst_remove,
1602 static void __init scst_print_config(void)
1607 i = snprintf(buf, sizeof(buf), "Enabled features: ");
1610 #ifdef CONFIG_SCST_STRICT_SERIALIZING
1611 i += snprintf(&buf[i], sizeof(buf) - i, "Strict serializing");
1614 #ifdef CONFIG_SCST_EXTRACHECKS
1615 i += snprintf(&buf[i], sizeof(buf) - i, "%sEXTRACHECKS",
1616 (j == i) ? "" : ", ");
1619 #ifdef CONFIG_SCST_TRACING
1620 i += snprintf(&buf[i], sizeof(buf) - i, "%sTRACING",
1621 (j == i) ? "" : ", ");
1624 #ifdef CONFIG_SCST_DEBUG
1625 i += snprintf(&buf[i], sizeof(buf) - i, "%sDEBUG",
1626 (j == i) ? "" : ", ");
1629 #ifdef CONFIG_SCST_DEBUG_TM
1630 i += snprintf(&buf[i], sizeof(buf) - i, "%sDEBUG_TM",
1631 (j == i) ? "" : ", ");
1634 #ifdef CONFIG_SCST_DEBUG_RETRY
1635 i += snprintf(&buf[i], sizeof(buf) - i, "%sDEBUG_RETRY",
1636 (j == i) ? "" : ", ");
1639 #ifdef CONFIG_SCST_DEBUG_OOM
1640 i += snprintf(&buf[i], sizeof(buf) - i, "%sDEBUG_OOM",
1641 (j == i) ? "" : ", ");
1644 #ifdef CONFIG_SCST_DEBUG_SN
1645 i += snprintf(&buf[i], sizeof(buf) - i, "%sDEBUG_SN",
1646 (j == i) ? "" : ", ");
1649 #ifdef CONFIG_SCST_USE_EXPECTED_VALUES
1650 i += snprintf(&buf[i], sizeof(buf) - i, "%sUSE_EXPECTED_VALUES",
1651 (j == i) ? "" : ", ");
1654 #ifdef CONFIG_SCST_ALLOW_PASSTHROUGH_IO_SUBMIT_IN_SIRQ
1655 i += snprintf(&buf[i], sizeof(buf) - i,
1656 "%sALLOW_PASSTHROUGH_IO_SUBMIT_IN_SIRQ",
1657 (j == i) ? "" : ", ");
1660 #ifdef CONFIG_SCST_STRICT_SECURITY
1661 i += snprintf(&buf[i], sizeof(buf) - i, "%sSCST_STRICT_SECURITY",
1662 (j == i) ? "" : ", ");
1666 PRINT_INFO("%s", buf);
1669 static int __init init_scst(void)
1676 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
1678 struct scsi_request *req;
1679 BUILD_BUG_ON(SCST_SENSE_BUFFERSIZE !=
1680 sizeof(req->sr_sense_buffer));
1684 struct scsi_sense_hdr *shdr;
1685 BUILD_BUG_ON(SCST_SENSE_BUFFERSIZE < sizeof(*shdr));
1689 struct scst_tgt_dev *t;
1691 BUILD_BUG_ON(sizeof(t->curr_sn) != sizeof(t->expected_sn));
1692 BUILD_BUG_ON(sizeof(c->sn) != sizeof(t->expected_sn));
1695 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
1696 #if !defined(SCST_IO_CONTEXT)
1697 PRINT_WARNING("%s", "Patch io_context was not applied on "
1698 "your kernel. SCST will be working with not the best "
1702 PRINT_WARNING("%s", "There is no patch io_context for your kernel "
1703 "version. For performance reasons it is strongly recommended "
1704 "to upgrade your kernel to version >= 2.6.27.x.");
1707 mutex_init(&scst_mutex);
1708 INIT_LIST_HEAD(&scst_template_list);
1709 INIT_LIST_HEAD(&scst_dev_list);
1710 INIT_LIST_HEAD(&scst_dev_type_list);
1711 spin_lock_init(&scst_main_lock);
1712 INIT_LIST_HEAD(&scst_acg_list);
1713 spin_lock_init(&scst_init_lock);
1714 init_waitqueue_head(&scst_init_cmd_list_waitQ);
1715 INIT_LIST_HEAD(&scst_init_cmd_list);
1716 #if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
1717 scst_trace_flag = SCST_DEFAULT_LOG_FLAGS;
1719 atomic_set(&scst_cmd_count, 0);
1720 spin_lock_init(&scst_mcmd_lock);
1721 INIT_LIST_HEAD(&scst_active_mgmt_cmd_list);
1722 INIT_LIST_HEAD(&scst_delayed_mgmt_cmd_list);
1723 init_waitqueue_head(&scst_mgmt_cmd_list_waitQ);
1724 init_waitqueue_head(&scst_mgmt_waitQ);
1725 spin_lock_init(&scst_mgmt_lock);
1726 INIT_LIST_HEAD(&scst_sess_init_list);
1727 INIT_LIST_HEAD(&scst_sess_shut_list);
1728 init_waitqueue_head(&scst_dev_cmd_waitQ);
1729 mutex_init(&scst_suspend_mutex);
1730 INIT_LIST_HEAD(&scst_cmd_lists_list);
1731 scst_virt_dev_last_id = 1;
1732 spin_lock_init(&scst_main_cmd_lists.cmd_list_lock);
1733 INIT_LIST_HEAD(&scst_main_cmd_lists.active_cmd_list);
1734 init_waitqueue_head(&scst_main_cmd_lists.cmd_list_waitQ);
1735 list_add_tail(&scst_main_cmd_lists.lists_list_entry,
1736 &scst_cmd_lists_list);
1738 res = scst_lib_init();
1742 scst_num_cpus = num_online_cpus();
1744 /* ToDo: register_cpu_notifier() */
1746 if (scst_threads == 0)
1747 scst_threads = scst_num_cpus;
1749 if (scst_threads < 1) {
1750 PRINT_ERROR("%s", "scst_threads can not be less than 1");
1751 scst_threads = scst_num_cpus;
1754 scst_threads_info_init();
1756 #define INIT_CACHEP(p, s, o) do { \
1757 p = KMEM_CACHE(s, SCST_SLAB_FLAGS); \
1758 TRACE_MEM("Slab create: %s at %p size %zd", #s, p, \
1759 sizeof(struct s)); \
1766 INIT_CACHEP(scst_mgmt_cachep, scst_mgmt_cmd, out_lib_exit);
1767 INIT_CACHEP(scst_mgmt_stub_cachep, scst_mgmt_cmd_stub,
1768 out_destroy_mgmt_cache);
1769 INIT_CACHEP(scst_ua_cachep, scst_tgt_dev_UA,
1770 out_destroy_mgmt_stub_cache);
1772 struct scst_sense { uint8_t s[SCST_SENSE_BUFFERSIZE]; };
1773 INIT_CACHEP(scst_sense_cachep, scst_sense,
1774 out_destroy_ua_cache);
1776 INIT_CACHEP(scst_aen_cachep, scst_aen, out_destroy_sense_cache);
1777 INIT_CACHEP(scst_cmd_cachep, scst_cmd, out_destroy_aen_cache);
1778 INIT_CACHEP(scst_sess_cachep, scst_session, out_destroy_cmd_cache);
1779 INIT_CACHEP(scst_tgtd_cachep, scst_tgt_dev, out_destroy_sess_cache);
1780 INIT_CACHEP(scst_acgd_cachep, scst_acg_dev, out_destroy_tgt_cache);
1782 scst_mgmt_mempool = mempool_create(64, mempool_alloc_slab,
1783 mempool_free_slab, scst_mgmt_cachep);
1784 if (scst_mgmt_mempool == NULL) {
1786 goto out_destroy_acg_cache;
1790 * All mgmt stubs, UAs and sense buffers are bursty and loosing them
1791 * may have fatal consequences, so let's have big pools for them.
1794 scst_mgmt_stub_mempool = mempool_create(1024, mempool_alloc_slab,
1795 mempool_free_slab, scst_mgmt_stub_cachep);
1796 if (scst_mgmt_stub_mempool == NULL) {
1798 goto out_destroy_mgmt_mempool;
1801 scst_ua_mempool = mempool_create(512, mempool_alloc_slab,
1802 mempool_free_slab, scst_ua_cachep);
1803 if (scst_ua_mempool == NULL) {
1805 goto out_destroy_mgmt_stub_mempool;
1808 scst_sense_mempool = mempool_create(1024, mempool_alloc_slab,
1809 mempool_free_slab, scst_sense_cachep);
1810 if (scst_sense_mempool == NULL) {
1812 goto out_destroy_ua_mempool;
1815 scst_aen_mempool = mempool_create(100, mempool_alloc_slab,
1816 mempool_free_slab, scst_aen_cachep);
1817 if (scst_aen_mempool == NULL) {
1819 goto out_destroy_sense_mempool;
1822 if (scst_max_cmd_mem == 0) {
1825 #if BITS_PER_LONG == 32
1826 scst_max_cmd_mem = min(
1827 (((uint64_t)(si.totalram - si.totalhigh) << PAGE_SHIFT)
1828 >> 20) >> 2, (uint64_t)1 << 30);
1830 scst_max_cmd_mem = (((si.totalram - si.totalhigh) << PAGE_SHIFT)
1835 if (scst_max_dev_cmd_mem != 0) {
1836 if (scst_max_dev_cmd_mem > scst_max_cmd_mem) {
1837 PRINT_ERROR("scst_max_dev_cmd_mem (%d) > "
1838 "scst_max_cmd_mem (%d)",
1839 scst_max_dev_cmd_mem,
1841 scst_max_dev_cmd_mem = scst_max_cmd_mem;
1844 scst_max_dev_cmd_mem = scst_max_cmd_mem * 2 / 5;
1846 res = scst_sgv_pools_init(
1847 ((uint64_t)scst_max_cmd_mem << 10) >> (PAGE_SHIFT - 10), 0);
1849 goto out_destroy_aen_mempool;
1851 scst_default_acg = scst_alloc_add_acg(SCST_DEFAULT_ACG_NAME);
1852 if (scst_default_acg == NULL) {
1854 goto out_destroy_sgv_pool;
1857 res = scsi_register_interface(&scst_interface);
1861 for (i = 0; i < (int)ARRAY_SIZE(scst_tasklets); i++) {
1862 spin_lock_init(&scst_tasklets[i].tasklet_lock);
1863 INIT_LIST_HEAD(&scst_tasklets[i].tasklet_cmd_list);
1864 tasklet_init(&scst_tasklets[i].tasklet,
1865 (void *)scst_cmd_tasklet,
1866 (unsigned long)&scst_tasklets[i]);
1869 TRACE_DBG("%d CPUs found, starting %d threads", scst_num_cpus,
1872 res = scst_start_all_threads(scst_threads);
1874 goto out_thread_free;
1876 res = scst_proc_init_module();
1878 goto out_thread_free;
1881 PRINT_INFO("SCST version %s loaded successfully (max mem for "
1882 "commands %dMB, per device %dMB)", SCST_VERSION_STRING,
1883 scst_max_cmd_mem, scst_max_dev_cmd_mem);
1885 scst_print_config();
1888 TRACE_EXIT_RES(res);
1892 scst_stop_all_threads();
1894 scsi_unregister_interface(&scst_interface);
1897 scst_destroy_acg(scst_default_acg);
1899 out_destroy_sgv_pool:
1900 scst_sgv_pools_deinit();
1902 out_destroy_aen_mempool:
1903 mempool_destroy(scst_aen_mempool);
1905 out_destroy_sense_mempool:
1906 mempool_destroy(scst_sense_mempool);
1908 out_destroy_ua_mempool:
1909 mempool_destroy(scst_ua_mempool);
1911 out_destroy_mgmt_stub_mempool:
1912 mempool_destroy(scst_mgmt_stub_mempool);
1914 out_destroy_mgmt_mempool:
1915 mempool_destroy(scst_mgmt_mempool);
1917 out_destroy_acg_cache:
1918 kmem_cache_destroy(scst_acgd_cachep);
1920 out_destroy_tgt_cache:
1921 kmem_cache_destroy(scst_tgtd_cachep);
1923 out_destroy_sess_cache:
1924 kmem_cache_destroy(scst_sess_cachep);
1926 out_destroy_cmd_cache:
1927 kmem_cache_destroy(scst_cmd_cachep);
1929 out_destroy_aen_cache:
1930 kmem_cache_destroy(scst_aen_cachep);
1932 out_destroy_sense_cache:
1933 kmem_cache_destroy(scst_sense_cachep);
1935 out_destroy_ua_cache:
1936 kmem_cache_destroy(scst_ua_cachep);
1938 out_destroy_mgmt_stub_cache:
1939 kmem_cache_destroy(scst_mgmt_stub_cachep);
1941 out_destroy_mgmt_cache:
1942 kmem_cache_destroy(scst_mgmt_cachep);
1949 static void __exit exit_scst(void)
1953 /* ToDo: unregister_cpu_notifier() */
1955 scst_proc_cleanup_module();
1957 scst_stop_all_threads();
1959 scsi_unregister_interface(&scst_interface);
1960 scst_destroy_acg(scst_default_acg);
1962 scst_sgv_pools_deinit();
1964 #define DEINIT_CACHEP(p) do { \
1965 kmem_cache_destroy(p); \
1969 mempool_destroy(scst_mgmt_mempool);
1970 mempool_destroy(scst_mgmt_stub_mempool);
1971 mempool_destroy(scst_ua_mempool);
1972 mempool_destroy(scst_sense_mempool);
1973 mempool_destroy(scst_aen_mempool);
1975 DEINIT_CACHEP(scst_mgmt_cachep);
1976 DEINIT_CACHEP(scst_mgmt_stub_cachep);
1977 DEINIT_CACHEP(scst_ua_cachep);
1978 DEINIT_CACHEP(scst_sense_cachep);
1979 DEINIT_CACHEP(scst_aen_cachep);
1980 DEINIT_CACHEP(scst_cmd_cachep);
1981 DEINIT_CACHEP(scst_sess_cachep);
1982 DEINIT_CACHEP(scst_tgtd_cachep);
1983 DEINIT_CACHEP(scst_acgd_cachep);
1987 PRINT_INFO("%s", "SCST unloaded");
1994 module_init(init_scst);
1995 module_exit(exit_scst);
1997 MODULE_AUTHOR("Vladislav Bolkhovitin");
1998 MODULE_LICENSE("GPL");
1999 MODULE_DESCRIPTION("SCSI target core");
2000 MODULE_VERSION(SCST_VERSION_STRING);