4 * Copyright (C) 2004 - 2009 Vladislav Bolkhovitin <vst@vlnb.net>
5 * Copyright (C) 2004 - 2005 Leonid Stoljar
6 * Copyright (C) 2007 - 2009 ID7 Ltd.
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation, version 2
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
19 #include <linux/module.h>
21 #include <linux/init.h>
22 #include <linux/kernel.h>
23 #include <linux/errno.h>
24 #include <linux/list.h>
25 #include <linux/spinlock.h>
26 #include <linux/slab.h>
27 #include <linux/sched.h>
28 #include <linux/unistd.h>
29 #include <linux/string.h>
30 #include <linux/kthread.h>
33 #include "scst_priv.h"
36 #if defined(CONFIG_HIGHMEM4G) || defined(CONFIG_HIGHMEM64G)
37 #warning "HIGHMEM kernel configurations are fully supported, but not\
38 recommended for performance reasons. Consider changing VMSPLIT\
39 option or use a 64-bit configuration instead. See README file for\
43 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 30)
44 #if !defined(SCSI_EXEC_REQ_FIFO_DEFINED) && \
45 !defined(CONFIG_SCST_STRICT_SERIALIZING)
46 #warning "Patch scst_exec_req_fifo-<kernel-version> was not applied on\
47 your kernel and CONFIG_SCST_STRICT_SERIALIZING isn't defined.\
48 Pass-through dev handlers will not work."
52 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
53 #if !defined(SCST_IO_CONTEXT)
54 #warning "Patch io_context-<kernel-version> was not applied\
55 on your kernel. SCST will be working with not the best performance."
58 #warning "There is no patch io_context-<kernel-version>\
59 for your kernel version. For performance reasons it is strongly recommended\
60 to upgrade your kernel to version >= 2.6.27.x."
64 ** SCST global variables. They are all uninitialized to have their layout in
65 ** memory be exactly as specified. Otherwise compiler puts zero-initialized
66 ** variable separately from nonzero-initialized ones.
70 * All targets, devices and dev_types management is done under this mutex.
72 * It must NOT be used in any works (schedule_work(), etc.), because
73 * otherwise a deadlock (double lock, actually) is possible, e.g., with
74 * scst_user detach_tgt(), which is called under scst_mutex and calls
75 * flush_scheduled_work().
77 struct mutex scst_mutex;
79 /* All 3 protected by scst_mutex */
80 struct list_head scst_template_list;
81 struct list_head scst_dev_list;
82 struct list_head scst_dev_type_list;
84 spinlock_t scst_main_lock;
86 static struct kmem_cache *scst_mgmt_cachep;
87 mempool_t *scst_mgmt_mempool;
88 static struct kmem_cache *scst_mgmt_stub_cachep;
89 mempool_t *scst_mgmt_stub_mempool;
90 static struct kmem_cache *scst_ua_cachep;
91 mempool_t *scst_ua_mempool;
92 static struct kmem_cache *scst_sense_cachep;
93 mempool_t *scst_sense_mempool;
94 static struct kmem_cache *scst_aen_cachep;
95 mempool_t *scst_aen_mempool;
96 struct kmem_cache *scst_tgtd_cachep;
97 struct kmem_cache *scst_sess_cachep;
98 struct kmem_cache *scst_acgd_cachep;
100 struct list_head scst_acg_list;
101 struct scst_acg *scst_default_acg;
103 spinlock_t scst_init_lock;
104 wait_queue_head_t scst_init_cmd_list_waitQ;
105 struct list_head scst_init_cmd_list;
106 unsigned int scst_init_poll_cnt;
108 struct kmem_cache *scst_cmd_cachep;
110 #if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
111 unsigned long scst_trace_flag;
114 unsigned long scst_flags;
115 atomic_t scst_cmd_count;
117 struct scst_cmd_lists scst_main_cmd_lists;
119 struct scst_tasklet scst_tasklets[NR_CPUS];
121 spinlock_t scst_mcmd_lock;
122 struct list_head scst_active_mgmt_cmd_list;
123 struct list_head scst_delayed_mgmt_cmd_list;
124 wait_queue_head_t scst_mgmt_cmd_list_waitQ;
126 wait_queue_head_t scst_mgmt_waitQ;
127 spinlock_t scst_mgmt_lock;
128 struct list_head scst_sess_init_list;
129 struct list_head scst_sess_shut_list;
131 wait_queue_head_t scst_dev_cmd_waitQ;
133 static struct mutex scst_suspend_mutex;
134 /* protected by scst_suspend_mutex */
135 static struct list_head scst_cmd_lists_list;
137 static int scst_threads;
138 struct mutex scst_global_threads_mutex;
139 u32 scst_nr_global_threads;
140 static struct list_head scst_global_threads_list;
141 static struct task_struct *scst_init_cmd_thread;
142 static struct task_struct *scst_mgmt_thread;
143 static struct task_struct *scst_mgmt_cmd_thread;
145 static int suspend_count;
147 static int scst_virt_dev_last_id; /* protected by scst_mutex */
149 static unsigned int scst_max_cmd_mem;
150 unsigned int scst_max_dev_cmd_mem;
152 module_param_named(scst_threads, scst_threads, int, 0);
153 MODULE_PARM_DESC(scst_threads, "SCSI target threads count");
155 module_param_named(scst_max_cmd_mem, scst_max_cmd_mem, int, S_IRUGO);
156 MODULE_PARM_DESC(scst_max_cmd_mem, "Maximum memory allowed to be consumed by "
157 "all SCSI commands of all devices at any given time in MB");
159 module_param_named(scst_max_dev_cmd_mem, scst_max_dev_cmd_mem, int, S_IRUGO);
160 MODULE_PARM_DESC(scst_max_dev_cmd_mem, "Maximum memory allowed to be consumed "
161 "by all SCSI commands of a device at any given time in MB");
163 struct scst_dev_type scst_null_devtype = {
167 static void __scst_resume_activity(void);
169 int __scst_register_target_template(struct scst_tgt_template *vtt,
173 struct scst_tgt_template *t;
174 static DEFINE_MUTEX(m);
178 INIT_LIST_HEAD(&vtt->tgt_list);
180 if (strcmp(version, SCST_INTERFACE_VERSION) != 0) {
181 PRINT_ERROR("Incorrect version of target %s", vtt->name);
187 PRINT_ERROR("Target driver %s doesn't have a "
188 "detect() method.", vtt->name);
194 PRINT_ERROR("Target driver %s doesn't have a "
195 "release() method.", vtt->name);
200 if (!vtt->xmit_response) {
201 PRINT_ERROR("Target driver %s doesn't have a "
202 "xmit_response() method.", vtt->name);
207 if (vtt->threads_num < 0) {
208 PRINT_ERROR("Wrong threads_num value %d for "
209 "target \"%s\"", vtt->threads_num,
215 if (!vtt->no_proc_entry) {
216 res = scst_build_proc_target_dir_entries(vtt);
221 if (vtt->rdy_to_xfer == NULL)
222 vtt->rdy_to_xfer_atomic = 1;
224 if (mutex_lock_interruptible(&m) != 0)
227 if (mutex_lock_interruptible(&scst_mutex) != 0)
229 list_for_each_entry(t, &scst_template_list, scst_template_list_entry) {
230 if (strcmp(t->name, vtt->name) == 0) {
231 PRINT_ERROR("Target driver %s already registered",
233 mutex_unlock(&scst_mutex);
237 mutex_unlock(&scst_mutex);
239 TRACE_DBG("%s", "Calling target driver's detect()");
240 res = vtt->detect(vtt);
241 TRACE_DBG("Target driver's detect() returned %d", res);
243 PRINT_ERROR("%s", "The detect() routine failed");
248 mutex_lock(&scst_mutex);
249 list_add_tail(&vtt->scst_template_list_entry, &scst_template_list);
250 mutex_unlock(&scst_mutex);
254 PRINT_INFO("Target template %s registered successfully", vtt->name);
263 scst_cleanup_proc_target_dir_entries(vtt);
269 PRINT_ERROR("Failed to register target template %s", vtt->name);
272 EXPORT_SYMBOL(__scst_register_target_template);
274 void scst_unregister_target_template(struct scst_tgt_template *vtt)
276 struct scst_tgt *tgt;
277 struct scst_tgt_template *t;
282 mutex_lock(&scst_mutex);
284 list_for_each_entry(t, &scst_template_list, scst_template_list_entry) {
285 if (strcmp(t->name, vtt->name) == 0) {
291 PRINT_ERROR("Target driver %s isn't registered", vtt->name);
296 list_for_each_entry(tgt, &vtt->tgt_list, tgt_list_entry) {
297 mutex_unlock(&scst_mutex);
298 scst_unregister(tgt);
299 mutex_lock(&scst_mutex);
302 list_del(&vtt->scst_template_list_entry);
304 PRINT_INFO("Target template %s unregistered successfully", vtt->name);
307 mutex_unlock(&scst_mutex);
309 scst_cleanup_proc_target_dir_entries(vtt);
314 EXPORT_SYMBOL(scst_unregister_target_template);
316 struct scst_tgt *scst_register(struct scst_tgt_template *vtt,
317 const char *target_name)
319 struct scst_tgt *tgt;
324 tgt = kzalloc(sizeof(*tgt), GFP_KERNEL);
326 TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of tgt failed");
331 INIT_LIST_HEAD(&tgt->sess_list);
332 init_waitqueue_head(&tgt->unreg_waitQ);
334 tgt->sg_tablesize = vtt->sg_tablesize;
335 spin_lock_init(&tgt->tgt_lock);
336 INIT_LIST_HEAD(&tgt->retry_cmd_list);
337 atomic_set(&tgt->finished_cmds, 0);
338 init_timer(&tgt->retry_timer);
339 tgt->retry_timer.data = (unsigned long)tgt;
340 tgt->retry_timer.function = scst_tgt_retry_timer_fn;
342 rc = scst_suspend_activity(true);
344 goto out_free_tgt_err;
346 if (mutex_lock_interruptible(&scst_mutex) != 0) {
348 goto out_resume_free;
351 if (target_name != NULL) {
352 int len = strlen(target_name) + 1 +
353 strlen(SCST_DEFAULT_ACG_NAME) + 1;
355 tgt->default_group_name = kmalloc(len, GFP_KERNEL);
356 if (tgt->default_group_name == NULL) {
357 TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of default "
358 "group name failed");
360 goto out_unlock_resume;
362 sprintf(tgt->default_group_name, "%s_%s", SCST_DEFAULT_ACG_NAME,
366 rc = scst_build_proc_target_entries(tgt);
370 list_add_tail(&tgt->tgt_list_entry, &vtt->tgt_list);
372 mutex_unlock(&scst_mutex);
373 scst_resume_activity();
375 PRINT_INFO("Target %s (%p) for template %s registered successfully",
376 target_name, tgt, vtt->name);
383 kfree(tgt->default_group_name);
386 mutex_unlock(&scst_mutex);
389 scst_resume_activity();
396 PRINT_ERROR("Failed to register target %s for template %s (error %d)",
397 target_name, vtt->name, rc);
400 EXPORT_SYMBOL(scst_register);
402 static inline int test_sess_list(struct scst_tgt *tgt)
405 mutex_lock(&scst_mutex);
406 res = list_empty(&tgt->sess_list);
407 mutex_unlock(&scst_mutex);
411 void scst_unregister(struct scst_tgt *tgt)
413 struct scst_session *sess;
414 struct scst_tgt_template *vtt = tgt->tgtt;
418 TRACE_DBG("%s", "Calling target driver's release()");
419 tgt->tgtt->release(tgt);
420 TRACE_DBG("%s", "Target driver's release() returned");
422 mutex_lock(&scst_mutex);
424 list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) {
425 if (sess->shut_phase == SCST_SESS_SPH_READY) {
427 * Sometimes it's hard for target driver to track all
428 * its sessions (see scst_local, for example), so let's
431 mutex_unlock(&scst_mutex);
432 scst_unregister_session(sess, 0, NULL);
433 mutex_lock(&scst_mutex);
437 mutex_unlock(&scst_mutex);
439 TRACE_DBG("%s", "Waiting for sessions shutdown");
440 wait_event(tgt->unreg_waitQ, test_sess_list(tgt));
441 TRACE_DBG("%s", "wait_event() returned");
443 scst_suspend_activity(false);
444 mutex_lock(&scst_mutex);
446 list_del(&tgt->tgt_list_entry);
448 scst_cleanup_proc_target_entries(tgt);
450 kfree(tgt->default_group_name);
452 mutex_unlock(&scst_mutex);
453 scst_resume_activity();
455 del_timer_sync(&tgt->retry_timer);
457 PRINT_INFO("Target %p for template %s unregistered successfully",
465 EXPORT_SYMBOL(scst_unregister);
467 static int scst_susp_wait(bool interruptible)
474 res = wait_event_interruptible_timeout(scst_dev_cmd_waitQ,
475 (atomic_read(&scst_cmd_count) == 0),
476 SCST_SUSPENDING_TIMEOUT);
478 __scst_resume_activity();
484 wait_event(scst_dev_cmd_waitQ,
485 atomic_read(&scst_cmd_count) == 0);
487 TRACE_MGMT_DBG("wait_event() returned %d", res);
493 int scst_suspend_activity(bool interruptible)
501 if (mutex_lock_interruptible(&scst_suspend_mutex) != 0) {
506 mutex_lock(&scst_suspend_mutex);
508 TRACE_MGMT_DBG("suspend_count %d", suspend_count);
510 if (suspend_count > 1)
513 set_bit(SCST_FLAG_SUSPENDING, &scst_flags);
514 set_bit(SCST_FLAG_SUSPENDED, &scst_flags);
516 * Assignment of SCST_FLAG_SUSPENDING and SCST_FLAG_SUSPENDED must be
517 * ordered with scst_cmd_count. Otherwise lockless logic in
518 * scst_translate_lun() and scst_mgmt_translate_lun() won't work.
520 smp_mb__after_set_bit();
523 * See comment in scst_user.c::dev_user_task_mgmt_fn() for more
524 * information about scst_user behavior.
526 * ToDo: make the global suspending unneeded (switch to per-device
527 * reference counting? That would mean to switch off from lockless
528 * implementation of scst_translate_lun().. )
531 if (atomic_read(&scst_cmd_count) != 0) {
532 PRINT_INFO("Waiting for %d active commands to complete... This "
533 "might take few minutes for disks or few hours for "
534 "tapes, if you use long executed commands, like "
535 "REWIND or FORMAT. In case, if you have a hung user "
536 "space device (i.e. made using scst_user module) not "
537 "responding to any commands, if might take virtually "
538 "forever until the corresponding user space "
539 "program recovers and starts responding or gets "
540 "killed.", atomic_read(&scst_cmd_count));
544 res = scst_susp_wait(interruptible);
548 clear_bit(SCST_FLAG_SUSPENDING, &scst_flags);
549 /* See comment about smp_mb() above */
550 smp_mb__after_clear_bit();
552 TRACE_MGMT_DBG("Waiting for %d active commands finally to complete",
553 atomic_read(&scst_cmd_count));
555 res = scst_susp_wait(interruptible);
560 PRINT_INFO("%s", "All active commands completed");
563 mutex_unlock(&scst_suspend_mutex);
570 clear_bit(SCST_FLAG_SUSPENDING, &scst_flags);
571 /* See comment about smp_mb() above */
572 smp_mb__after_clear_bit();
575 EXPORT_SYMBOL(scst_suspend_activity);
577 static void __scst_resume_activity(void)
579 struct scst_cmd_lists *l;
584 TRACE_MGMT_DBG("suspend_count %d left", suspend_count);
585 if (suspend_count > 0)
588 clear_bit(SCST_FLAG_SUSPENDED, &scst_flags);
590 * The barrier is needed to make sure all woken up threads see the
591 * cleared flag. Not sure if it's really needed, but let's be safe.
593 smp_mb__after_clear_bit();
595 list_for_each_entry(l, &scst_cmd_lists_list, lists_list_entry) {
596 wake_up_all(&l->cmd_list_waitQ);
598 wake_up_all(&scst_init_cmd_list_waitQ);
600 spin_lock_irq(&scst_mcmd_lock);
601 if (!list_empty(&scst_delayed_mgmt_cmd_list)) {
602 struct scst_mgmt_cmd *m;
603 m = list_entry(scst_delayed_mgmt_cmd_list.next, typeof(*m),
604 mgmt_cmd_list_entry);
605 TRACE_MGMT_DBG("Moving delayed mgmt cmd %p to head of active "
607 list_move(&m->mgmt_cmd_list_entry, &scst_active_mgmt_cmd_list);
609 spin_unlock_irq(&scst_mcmd_lock);
610 wake_up_all(&scst_mgmt_cmd_list_waitQ);
617 void scst_resume_activity(void)
621 mutex_lock(&scst_suspend_mutex);
622 __scst_resume_activity();
623 mutex_unlock(&scst_suspend_mutex);
628 EXPORT_SYMBOL(scst_resume_activity);
630 static int scst_register_device(struct scsi_device *scsidp)
633 struct scst_device *dev;
634 struct scst_dev_type *dt;
638 res = scst_suspend_activity(true);
642 if (mutex_lock_interruptible(&scst_mutex) != 0) {
647 res = scst_alloc_device(GFP_KERNEL, &dev);
651 dev->type = scsidp->type;
653 dev->rq_disk = alloc_disk(1);
654 if (dev->rq_disk == NULL) {
658 dev->rq_disk->major = SCST_MAJOR;
660 dev->scsi_dev = scsidp;
662 list_add_tail(&dev->dev_list_entry, &scst_dev_list);
664 list_for_each_entry(dt, &scst_dev_type_list, dev_type_list_entry) {
665 if (dt->type == scsidp->type) {
666 res = scst_assign_dev_handler(dev, dt);
674 mutex_unlock(&scst_mutex);
677 scst_resume_activity();
681 PRINT_INFO("Attached to scsi%d, channel %d, id %d, lun %d, "
682 "type %d", scsidp->host->host_no, scsidp->channel,
683 scsidp->id, scsidp->lun, scsidp->type);
685 PRINT_ERROR("Failed to to scsi%d, channel %d, id %d, lun %d, "
686 "type %d", scsidp->host->host_no, scsidp->channel,
687 scsidp->id, scsidp->lun, scsidp->type);
694 list_del(&dev->dev_list_entry);
695 put_disk(dev->rq_disk);
698 scst_free_device(dev);
702 static void scst_unregister_device(struct scsi_device *scsidp)
704 struct scst_device *d, *dev = NULL;
705 struct scst_acg_dev *acg_dev, *aa;
709 scst_suspend_activity(false);
710 mutex_lock(&scst_mutex);
712 list_for_each_entry(d, &scst_dev_list, dev_list_entry) {
713 if (d->scsi_dev == scsidp) {
715 TRACE_DBG("Target device %p found", dev);
720 PRINT_ERROR("%s", "Target device not found");
724 list_del(&dev->dev_list_entry);
726 list_for_each_entry_safe(acg_dev, aa, &dev->dev_acg_dev_list,
727 dev_acg_dev_list_entry) {
728 scst_acg_remove_dev(acg_dev->acg, dev, true);
731 scst_assign_dev_handler(dev, &scst_null_devtype);
733 put_disk(dev->rq_disk);
734 scst_free_device(dev);
736 PRINT_INFO("Detached from scsi%d, channel %d, id %d, lun %d, type %d",
737 scsidp->host->host_no, scsidp->channel, scsidp->id,
738 scsidp->lun, scsidp->type);
741 mutex_unlock(&scst_mutex);
742 scst_resume_activity();
748 static int scst_dev_handler_check(struct scst_dev_type *dev_handler)
752 if (dev_handler->parse == NULL) {
753 PRINT_ERROR("scst dev_type driver %s doesn't have a "
754 "parse() method.", dev_handler->name);
759 if (dev_handler->exec == NULL) {
760 #ifdef CONFIG_SCST_ALLOW_PASSTHROUGH_IO_SUBMIT_IN_SIRQ
761 dev_handler->exec_atomic = 1;
763 dev_handler->exec_atomic = 0;
767 if (dev_handler->dev_done == NULL)
768 dev_handler->dev_done_atomic = 1;
775 int scst_register_virtual_device(struct scst_dev_type *dev_handler,
776 const char *dev_name)
779 struct scst_device *dev = NULL;
783 if (dev_handler == NULL) {
784 PRINT_ERROR("%s: valid device handler must be supplied",
790 if (dev_name == NULL) {
791 PRINT_ERROR("%s: device name must be non-NULL", __func__);
796 res = scst_dev_handler_check(dev_handler);
800 res = scst_suspend_activity(true);
804 if (mutex_lock_interruptible(&scst_mutex) != 0) {
809 res = scst_alloc_device(GFP_KERNEL, &dev);
813 dev->type = dev_handler->type;
814 dev->scsi_dev = NULL;
815 dev->virt_name = dev_name;
816 dev->virt_id = scst_virt_dev_last_id++;
818 list_add_tail(&dev->dev_list_entry, &scst_dev_list);
822 rc = scst_assign_dev_handler(dev, dev_handler);
829 mutex_unlock(&scst_mutex);
832 scst_resume_activity();
836 PRINT_INFO("Attached to virtual device %s (id %d)",
837 dev_name, dev->virt_id);
839 PRINT_INFO("Failed to attach to virtual device %s", dev_name);
845 list_del(&dev->dev_list_entry);
846 scst_free_device(dev);
849 EXPORT_SYMBOL(scst_register_virtual_device);
851 void scst_unregister_virtual_device(int id)
853 struct scst_device *d, *dev = NULL;
854 struct scst_acg_dev *acg_dev, *aa;
858 scst_suspend_activity(false);
859 mutex_lock(&scst_mutex);
861 list_for_each_entry(d, &scst_dev_list, dev_list_entry) {
862 if (d->virt_id == id) {
864 TRACE_DBG("Target device %p (id %d) found", dev, id);
869 PRINT_ERROR("Target virtual device (id %d) not found", id);
873 list_del(&dev->dev_list_entry);
875 list_for_each_entry_safe(acg_dev, aa, &dev->dev_acg_dev_list,
876 dev_acg_dev_list_entry)
878 scst_acg_remove_dev(acg_dev->acg, dev, true);
881 scst_assign_dev_handler(dev, &scst_null_devtype);
883 PRINT_INFO("Detached from virtual device %s (id %d)",
884 dev->virt_name, dev->virt_id);
886 scst_free_device(dev);
889 mutex_unlock(&scst_mutex);
890 scst_resume_activity();
895 EXPORT_SYMBOL(scst_unregister_virtual_device);
897 int __scst_register_dev_driver(struct scst_dev_type *dev_type,
900 struct scst_dev_type *dt;
901 struct scst_device *dev;
907 if (strcmp(version, SCST_INTERFACE_VERSION) != 0) {
908 PRINT_ERROR("Incorrect version of dev handler %s",
914 res = scst_dev_handler_check(dev_type);
918 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 30)
919 #if !defined(SCSI_EXEC_REQ_FIFO_DEFINED) && \
920 !defined(CONFIG_SCST_STRICT_SERIALIZING)
921 if (dev_type->exec == NULL) {
922 PRINT_ERROR("Pass-through dev handlers (handler \"%s\") not "
923 "supported. Consider applying on your kernel patch "
924 "scst_exec_req_fifo-<kernel-version> or define "
925 "CONFIG_SCST_STRICT_SERIALIZING", dev_type->name);
932 res = scst_suspend_activity(true);
936 if (mutex_lock_interruptible(&scst_mutex) != 0) {
942 list_for_each_entry(dt, &scst_dev_type_list, dev_type_list_entry) {
943 if (strcmp(dt->name, dev_type->name) == 0) {
944 PRINT_ERROR("Device type handler \"%s\" already "
953 res = scst_build_proc_dev_handler_dir_entries(dev_type);
957 list_add_tail(&dev_type->dev_type_list_entry, &scst_dev_type_list);
959 list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
960 if (dev->scsi_dev == NULL || dev->handler != &scst_null_devtype)
962 if (dev->scsi_dev->type == dev_type->type)
963 scst_assign_dev_handler(dev, dev_type);
966 mutex_unlock(&scst_mutex);
967 scst_resume_activity();
970 PRINT_INFO("Device handler \"%s\" for type %d registered "
971 "successfully", dev_type->name, dev_type->type);
979 mutex_unlock(&scst_mutex);
982 scst_resume_activity();
985 PRINT_ERROR("Failed to register device handler \"%s\" for type %d",
986 dev_type->name, dev_type->type);
989 EXPORT_SYMBOL(__scst_register_dev_driver);
991 void scst_unregister_dev_driver(struct scst_dev_type *dev_type)
993 struct scst_device *dev;
994 struct scst_dev_type *dt;
999 scst_suspend_activity(false);
1000 mutex_lock(&scst_mutex);
1002 list_for_each_entry(dt, &scst_dev_type_list, dev_type_list_entry) {
1003 if (strcmp(dt->name, dev_type->name) == 0) {
1009 PRINT_ERROR("Dev handler \"%s\" isn't registered",
1014 list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
1015 if (dev->handler == dev_type) {
1016 scst_assign_dev_handler(dev, &scst_null_devtype);
1017 TRACE_DBG("Dev handler removed from device %p", dev);
1021 list_del(&dev_type->dev_type_list_entry);
1023 mutex_unlock(&scst_mutex);
1024 scst_resume_activity();
1026 scst_cleanup_proc_dev_handler_dir_entries(dev_type);
1028 PRINT_INFO("Device handler \"%s\" for type %d unloaded",
1029 dev_type->name, dev_type->type);
1036 mutex_unlock(&scst_mutex);
1037 scst_resume_activity();
1040 EXPORT_SYMBOL(scst_unregister_dev_driver);
1042 int __scst_register_virtual_dev_driver(struct scst_dev_type *dev_type,
1043 const char *version)
1049 if (strcmp(version, SCST_INTERFACE_VERSION) != 0) {
1050 PRINT_ERROR("Incorrect version of virtual dev handler %s",
1056 res = scst_dev_handler_check(dev_type);
1060 if (!dev_type->no_proc) {
1061 res = scst_build_proc_dev_handler_dir_entries(dev_type);
1066 if (dev_type->type != -1) {
1067 PRINT_INFO("Virtual device handler %s for type %d "
1068 "registered successfully", dev_type->name,
1071 PRINT_INFO("Virtual device handler \"%s\" registered "
1072 "successfully", dev_type->name);
1076 TRACE_EXIT_RES(res);
1080 PRINT_ERROR("Failed to register virtual device handler \"%s\"",
1084 EXPORT_SYMBOL(__scst_register_virtual_dev_driver);
1086 void scst_unregister_virtual_dev_driver(struct scst_dev_type *dev_type)
1090 if (!dev_type->no_proc)
1091 scst_cleanup_proc_dev_handler_dir_entries(dev_type);
1093 PRINT_INFO("Device handler \"%s\" unloaded", dev_type->name);
1098 EXPORT_SYMBOL(scst_unregister_virtual_dev_driver);
1100 /* Called under scst_mutex */
1101 int scst_add_dev_threads(struct scst_device *dev, int num)
1105 struct scst_cmd_thread_t *thr;
1110 list_for_each_entry(thr, &dev->threads_list, thread_list_entry) {
1114 for (i = 0; i < num; i++) {
1115 thr = kmalloc(sizeof(*thr), GFP_KERNEL);
1118 PRINT_ERROR("Failed to allocate thr %d", res);
1121 strncpy(nm, dev->handler->name, ARRAY_SIZE(nm)-1);
1122 nm[ARRAY_SIZE(nm)-1] = '\0';
1123 thr->cmd_thread = kthread_create(scst_cmd_thread,
1124 &dev->cmd_lists, "%sd%d_%d", nm, dev->dev_num, n++);
1125 if (IS_ERR(thr->cmd_thread)) {
1126 res = PTR_ERR(thr->cmd_thread);
1127 PRINT_ERROR("kthread_create() failed: %d", res);
1132 list_add(&thr->thread_list_entry, &dev->threads_list);
1135 * ToDo: better to use tgt_dev_io_context instead, but we
1136 * are not ready for that yet.
1138 __exit_io_context(thr->cmd_thread->io_context);
1139 thr->cmd_thread->io_context = ioc_task_link(dev->dev_io_ctx);
1140 TRACE_DBG("Setting dev io ctx %p on thr %d", dev->dev_io_ctx,
1141 thr->cmd_thread->pid);
1143 wake_up_process(thr->cmd_thread);
1147 TRACE_EXIT_RES(res);
1151 scst_del_dev_threads(dev, i);
1155 /* Called under scst_mutex and suspended activity */
1156 static int scst_create_dev_threads(struct scst_device *dev)
1163 if (dev->handler->threads_num <= 0)
1166 threads_num = dev->handler->threads_num;
1168 spin_lock_init(&dev->cmd_lists.cmd_list_lock);
1169 INIT_LIST_HEAD(&dev->cmd_lists.active_cmd_list);
1170 init_waitqueue_head(&dev->cmd_lists.cmd_list_waitQ);
1172 res = scst_add_dev_threads(dev, threads_num);
1176 mutex_lock(&scst_suspend_mutex);
1177 list_add_tail(&dev->cmd_lists.lists_list_entry,
1178 &scst_cmd_lists_list);
1179 mutex_unlock(&scst_suspend_mutex);
1181 dev->p_cmd_lists = &dev->cmd_lists;
1184 TRACE_EXIT_RES(res);
1188 /* Called under scst_mutex */
1189 void scst_del_dev_threads(struct scst_device *dev, int num)
1191 struct scst_cmd_thread_t *ct, *tmp;
1199 list_for_each_entry_safe_reverse(ct, tmp, &dev->threads_list,
1200 thread_list_entry) {
1202 struct scst_tgt_dev *tgt_dev;
1204 list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
1205 dev_tgt_dev_list_entry) {
1206 struct scst_thr_data_hdr *td;
1207 td = __scst_find_thr_data(tgt_dev, ct->cmd_thread);
1209 scst_thr_data_put(td);
1214 rc = kthread_stop(ct->cmd_thread);
1216 TRACE_MGMT_DBG("kthread_stop() failed: %d", rc);
1218 list_del(&ct->thread_list_entry);
1221 if ((num > 0) && (++i >= num))
1230 /* Called under scst_mutex and suspended activity */
1231 static void scst_stop_dev_threads(struct scst_device *dev)
1235 if (list_empty(&dev->threads_list))
1238 scst_del_dev_threads(dev, -1);
1240 if (dev->p_cmd_lists == &dev->cmd_lists) {
1241 mutex_lock(&scst_suspend_mutex);
1242 list_del(&dev->cmd_lists.lists_list_entry);
1243 mutex_unlock(&scst_suspend_mutex);
1251 /* The activity supposed to be suspended and scst_mutex held */
1252 int scst_assign_dev_handler(struct scst_device *dev,
1253 struct scst_dev_type *handler)
1256 struct scst_tgt_dev *tgt_dev;
1257 LIST_HEAD(attached_tgt_devs);
1261 sBUG_ON(handler == NULL);
1263 if (dev->handler == handler)
1266 if (dev->handler && dev->handler->detach_tgt) {
1267 list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
1268 dev_tgt_dev_list_entry) {
1269 TRACE_DBG("Calling dev handler's detach_tgt(%p)",
1271 dev->handler->detach_tgt(tgt_dev);
1272 TRACE_DBG("%s", "Dev handler's detach_tgt() returned");
1276 if (dev->handler && dev->handler->detach) {
1277 TRACE_DBG("%s", "Calling dev handler's detach()");
1278 dev->handler->detach(dev);
1279 TRACE_DBG("%s", "Old handler's detach() returned");
1282 scst_stop_dev_threads(dev);
1284 dev->handler = handler;
1287 res = scst_create_dev_threads(dev);
1292 if (handler && handler->attach) {
1293 TRACE_DBG("Calling new dev handler's attach(%p)", dev);
1294 res = handler->attach(dev);
1295 TRACE_DBG("New dev handler's attach() returned %d", res);
1297 PRINT_ERROR("New device handler's %s attach() "
1298 "failed: %d", handler->name, res);
1303 if (handler && handler->attach_tgt) {
1304 list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
1305 dev_tgt_dev_list_entry) {
1306 TRACE_DBG("Calling dev handler's attach_tgt(%p)",
1308 res = handler->attach_tgt(tgt_dev);
1309 TRACE_DBG("%s", "Dev handler's attach_tgt() returned");
1311 PRINT_ERROR("Device handler's %s attach_tgt() "
1312 "failed: %d", handler->name, res);
1313 goto out_err_detach_tgt;
1315 list_add_tail(&tgt_dev->extra_tgt_dev_list_entry,
1316 &attached_tgt_devs);
1322 scst_stop_dev_threads(dev);
1326 dev->handler = &scst_null_devtype;
1329 TRACE_EXIT_RES(res);
1333 if (handler && handler->detach_tgt) {
1334 list_for_each_entry(tgt_dev, &attached_tgt_devs,
1335 extra_tgt_dev_list_entry)
1337 TRACE_DBG("Calling handler's detach_tgt(%p)",
1339 handler->detach_tgt(tgt_dev);
1340 TRACE_DBG("%s", "Handler's detach_tgt() returned");
1343 if (handler && handler->detach) {
1344 TRACE_DBG("%s", "Calling handler's detach()");
1345 handler->detach(dev);
1346 TRACE_DBG("%s", "Handler's detach() returned");
1351 int scst_global_threads_count(void)
1356 * Just to lower the race window, when user can get just changed value
1358 mutex_lock(&scst_global_threads_mutex);
1359 i = scst_nr_global_threads;
1360 mutex_unlock(&scst_global_threads_mutex);
1364 static void scst_threads_info_init(void)
1366 mutex_init(&scst_global_threads_mutex);
1367 INIT_LIST_HEAD(&scst_global_threads_list);
1370 /* scst_global_threads_mutex supposed to be held */
1371 void __scst_del_global_threads(int num)
1373 struct scst_cmd_thread_t *ct, *tmp;
1380 list_for_each_entry_safe(ct, tmp, &scst_global_threads_list,
1381 thread_list_entry) {
1384 res = kthread_stop(ct->cmd_thread);
1386 TRACE_MGMT_DBG("kthread_stop() failed: %d", res);
1387 list_del(&ct->thread_list_entry);
1389 scst_nr_global_threads--;
1400 /* scst_global_threads_mutex supposed to be held */
1401 int __scst_add_global_threads(int num)
1404 static int scst_thread_num;
1408 for (i = 0; i < num; i++) {
1409 struct scst_cmd_thread_t *thr;
1411 thr = kmalloc(sizeof(*thr), GFP_KERNEL);
1414 PRINT_ERROR("fail to allocate thr %d", res);
1417 thr->cmd_thread = kthread_create(scst_cmd_thread,
1418 &scst_main_cmd_lists, "scsi_tgt%d",
1420 if (IS_ERR(thr->cmd_thread)) {
1421 res = PTR_ERR(thr->cmd_thread);
1422 PRINT_ERROR("kthread_create() failed: %d", res);
1427 list_add(&thr->thread_list_entry, &scst_global_threads_list);
1428 scst_nr_global_threads++;
1430 wake_up_process(thr->cmd_thread);
1435 TRACE_EXIT_RES(res);
1439 __scst_del_global_threads(i);
1443 int scst_add_global_threads(int num)
1449 mutex_lock(&scst_global_threads_mutex);
1450 res = __scst_add_global_threads(num);
1451 mutex_unlock(&scst_global_threads_mutex);
1453 TRACE_EXIT_RES(res);
1456 EXPORT_SYMBOL(scst_add_global_threads);
1458 void scst_del_global_threads(int num)
1462 mutex_lock(&scst_global_threads_mutex);
1463 __scst_del_global_threads(num);
1464 mutex_unlock(&scst_global_threads_mutex);
1469 EXPORT_SYMBOL(scst_del_global_threads);
1471 static void scst_stop_all_threads(void)
1475 mutex_lock(&scst_global_threads_mutex);
1476 __scst_del_global_threads(-1);
1477 if (scst_mgmt_cmd_thread)
1478 kthread_stop(scst_mgmt_cmd_thread);
1479 if (scst_mgmt_thread)
1480 kthread_stop(scst_mgmt_thread);
1481 if (scst_init_cmd_thread)
1482 kthread_stop(scst_init_cmd_thread);
1483 mutex_unlock(&scst_global_threads_mutex);
1489 static int scst_start_all_threads(int num)
1495 mutex_lock(&scst_global_threads_mutex);
1496 res = __scst_add_global_threads(num);
1500 scst_init_cmd_thread = kthread_run(scst_init_thread,
1501 NULL, "scsi_tgt_init");
1502 if (IS_ERR(scst_init_cmd_thread)) {
1503 res = PTR_ERR(scst_init_cmd_thread);
1504 PRINT_ERROR("kthread_create() for init cmd failed: %d", res);
1505 scst_init_cmd_thread = NULL;
1509 scst_mgmt_cmd_thread = kthread_run(scst_tm_thread,
1511 if (IS_ERR(scst_mgmt_cmd_thread)) {
1512 res = PTR_ERR(scst_mgmt_cmd_thread);
1513 PRINT_ERROR("kthread_create() for TM failed: %d", res);
1514 scst_mgmt_cmd_thread = NULL;
1518 scst_mgmt_thread = kthread_run(scst_global_mgmt_thread,
1519 NULL, "scsi_tgt_mgmt");
1520 if (IS_ERR(scst_mgmt_thread)) {
1521 res = PTR_ERR(scst_mgmt_thread);
1522 PRINT_ERROR("kthread_create() for mgmt failed: %d", res);
1523 scst_mgmt_thread = NULL;
1528 mutex_unlock(&scst_global_threads_mutex);
1529 TRACE_EXIT_RES(res);
1537 EXPORT_SYMBOL(scst_get);
1543 EXPORT_SYMBOL(scst_put);
1545 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
1546 static int scst_add(struct class_device *cdev, struct class_interface *intf)
1548 static int scst_add(struct device *cdev, struct class_interface *intf)
1551 struct scsi_device *scsidp;
1556 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
1557 scsidp = to_scsi_device(cdev->dev);
1559 scsidp = to_scsi_device(cdev->parent);
1562 if (strcmp(scsidp->host->hostt->name, SCST_LOCAL_NAME) != 0)
1563 res = scst_register_device(scsidp);
1569 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
1570 static void scst_remove(struct class_device *cdev, struct class_interface *intf)
1572 static void scst_remove(struct device *cdev, struct class_interface *intf)
1575 struct scsi_device *scsidp;
1579 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
1580 scsidp = to_scsi_device(cdev->dev);
1582 scsidp = to_scsi_device(cdev->parent);
1585 if (strcmp(scsidp->host->hostt->name, SCST_LOCAL_NAME) != 0)
1586 scst_unregister_device(scsidp);
1592 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
1593 static struct class_interface scst_interface = {
1595 .remove = scst_remove,
1598 static struct class_interface scst_interface = {
1599 .add_dev = scst_add,
1600 .remove_dev = scst_remove,
1604 static void __init scst_print_config(void)
1609 i = snprintf(buf, sizeof(buf), "Enabled features: ");
1612 #ifdef CONFIG_SCST_STRICT_SERIALIZING
1613 i += snprintf(&buf[i], sizeof(buf) - i, "Strict serializing");
1616 #ifdef CONFIG_SCST_EXTRACHECKS
1617 i += snprintf(&buf[i], sizeof(buf) - i, "%sEXTRACHECKS",
1618 (j == i) ? "" : ", ");
1621 #ifdef CONFIG_SCST_TRACING
1622 i += snprintf(&buf[i], sizeof(buf) - i, "%sTRACING",
1623 (j == i) ? "" : ", ");
1626 #ifdef CONFIG_SCST_DEBUG
1627 i += snprintf(&buf[i], sizeof(buf) - i, "%sDEBUG",
1628 (j == i) ? "" : ", ");
1631 #ifdef CONFIG_SCST_DEBUG_TM
1632 i += snprintf(&buf[i], sizeof(buf) - i, "%sDEBUG_TM",
1633 (j == i) ? "" : ", ");
1636 #ifdef CONFIG_SCST_DEBUG_RETRY
1637 i += snprintf(&buf[i], sizeof(buf) - i, "%sDEBUG_RETRY",
1638 (j == i) ? "" : ", ");
1641 #ifdef CONFIG_SCST_DEBUG_OOM
1642 i += snprintf(&buf[i], sizeof(buf) - i, "%sDEBUG_OOM",
1643 (j == i) ? "" : ", ");
1646 #ifdef CONFIG_SCST_DEBUG_SN
1647 i += snprintf(&buf[i], sizeof(buf) - i, "%sDEBUG_SN",
1648 (j == i) ? "" : ", ");
1651 #ifdef CONFIG_SCST_USE_EXPECTED_VALUES
1652 i += snprintf(&buf[i], sizeof(buf) - i, "%sUSE_EXPECTED_VALUES",
1653 (j == i) ? "" : ", ");
1656 #ifdef CONFIG_SCST_ALLOW_PASSTHROUGH_IO_SUBMIT_IN_SIRQ
1657 i += snprintf(&buf[i], sizeof(buf) - i,
1658 "%sALLOW_PASSTHROUGH_IO_SUBMIT_IN_SIRQ",
1659 (j == i) ? "" : ", ");
1662 #ifdef CONFIG_SCST_STRICT_SECURITY
1663 i += snprintf(&buf[i], sizeof(buf) - i, "%sSCST_STRICT_SECURITY",
1664 (j == i) ? "" : ", ");
1668 PRINT_INFO("%s", buf);
1671 static int __init init_scst(void)
1678 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
1680 struct scsi_request *req;
1681 BUILD_BUG_ON(SCST_SENSE_BUFFERSIZE !=
1682 sizeof(req->sr_sense_buffer));
1686 struct scsi_sense_hdr *shdr;
1687 BUILD_BUG_ON(SCST_SENSE_BUFFERSIZE < sizeof(*shdr));
1691 struct scst_tgt_dev *t;
1693 BUILD_BUG_ON(sizeof(t->curr_sn) != sizeof(t->expected_sn));
1694 BUILD_BUG_ON(sizeof(c->sn) != sizeof(t->expected_sn));
1697 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
1698 #if !defined(SCST_IO_CONTEXT)
1699 PRINT_WARNING("%s", "Patch io_context was not applied on "
1700 "your kernel. SCST will be working with not the best "
1704 PRINT_WARNING("%s", "There is no patch io_context for your kernel "
1705 "version. For performance reasons it is strongly recommended "
1706 "to upgrade your kernel to version >= 2.6.27.x.");
1709 mutex_init(&scst_mutex);
1710 INIT_LIST_HEAD(&scst_template_list);
1711 INIT_LIST_HEAD(&scst_dev_list);
1712 INIT_LIST_HEAD(&scst_dev_type_list);
1713 spin_lock_init(&scst_main_lock);
1714 INIT_LIST_HEAD(&scst_acg_list);
1715 spin_lock_init(&scst_init_lock);
1716 init_waitqueue_head(&scst_init_cmd_list_waitQ);
1717 INIT_LIST_HEAD(&scst_init_cmd_list);
1718 #if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
1719 scst_trace_flag = SCST_DEFAULT_LOG_FLAGS;
1721 atomic_set(&scst_cmd_count, 0);
1722 spin_lock_init(&scst_mcmd_lock);
1723 INIT_LIST_HEAD(&scst_active_mgmt_cmd_list);
1724 INIT_LIST_HEAD(&scst_delayed_mgmt_cmd_list);
1725 init_waitqueue_head(&scst_mgmt_cmd_list_waitQ);
1726 init_waitqueue_head(&scst_mgmt_waitQ);
1727 spin_lock_init(&scst_mgmt_lock);
1728 INIT_LIST_HEAD(&scst_sess_init_list);
1729 INIT_LIST_HEAD(&scst_sess_shut_list);
1730 init_waitqueue_head(&scst_dev_cmd_waitQ);
1731 mutex_init(&scst_suspend_mutex);
1732 INIT_LIST_HEAD(&scst_cmd_lists_list);
1733 scst_virt_dev_last_id = 1;
1734 spin_lock_init(&scst_main_cmd_lists.cmd_list_lock);
1735 INIT_LIST_HEAD(&scst_main_cmd_lists.active_cmd_list);
1736 init_waitqueue_head(&scst_main_cmd_lists.cmd_list_waitQ);
1737 list_add_tail(&scst_main_cmd_lists.lists_list_entry,
1738 &scst_cmd_lists_list);
1740 scst_num_cpus = num_online_cpus();
1742 /* ToDo: register_cpu_notifier() */
1744 if (scst_threads == 0)
1745 scst_threads = scst_num_cpus;
1747 if (scst_threads < 1) {
1748 PRINT_ERROR("%s", "scst_threads can not be less than 1");
1749 scst_threads = scst_num_cpus;
1752 scst_threads_info_init();
1754 #define INIT_CACHEP(p, s, o) do { \
1755 p = KMEM_CACHE(s, SCST_SLAB_FLAGS); \
1756 TRACE_MEM("Slab create: %s at %p size %zd", #s, p, \
1757 sizeof(struct s)); \
1764 INIT_CACHEP(scst_mgmt_cachep, scst_mgmt_cmd, out);
1765 INIT_CACHEP(scst_mgmt_stub_cachep, scst_mgmt_cmd_stub,
1766 out_destroy_mgmt_cache);
1767 INIT_CACHEP(scst_ua_cachep, scst_tgt_dev_UA,
1768 out_destroy_mgmt_stub_cache);
1770 struct scst_sense { uint8_t s[SCST_SENSE_BUFFERSIZE]; };
1771 INIT_CACHEP(scst_sense_cachep, scst_sense,
1772 out_destroy_ua_cache);
1774 INIT_CACHEP(scst_aen_cachep, scst_aen, out_destroy_sense_cache);
1775 INIT_CACHEP(scst_cmd_cachep, scst_cmd, out_destroy_aen_cache);
1776 INIT_CACHEP(scst_sess_cachep, scst_session, out_destroy_cmd_cache);
1777 INIT_CACHEP(scst_tgtd_cachep, scst_tgt_dev, out_destroy_sess_cache);
1778 INIT_CACHEP(scst_acgd_cachep, scst_acg_dev, out_destroy_tgt_cache);
1780 scst_mgmt_mempool = mempool_create(64, mempool_alloc_slab,
1781 mempool_free_slab, scst_mgmt_cachep);
1782 if (scst_mgmt_mempool == NULL) {
1784 goto out_destroy_acg_cache;
1788 * All mgmt stubs, UAs and sense buffers are bursty and loosing them
1789 * may have fatal consequences, so let's have big pools for them.
1792 scst_mgmt_stub_mempool = mempool_create(1024, mempool_alloc_slab,
1793 mempool_free_slab, scst_mgmt_stub_cachep);
1794 if (scst_mgmt_stub_mempool == NULL) {
1796 goto out_destroy_mgmt_mempool;
1799 scst_ua_mempool = mempool_create(512, mempool_alloc_slab,
1800 mempool_free_slab, scst_ua_cachep);
1801 if (scst_ua_mempool == NULL) {
1803 goto out_destroy_mgmt_stub_mempool;
1806 scst_sense_mempool = mempool_create(1024, mempool_alloc_slab,
1807 mempool_free_slab, scst_sense_cachep);
1808 if (scst_sense_mempool == NULL) {
1810 goto out_destroy_ua_mempool;
1813 scst_aen_mempool = mempool_create(100, mempool_alloc_slab,
1814 mempool_free_slab, scst_aen_cachep);
1815 if (scst_aen_mempool == NULL) {
1817 goto out_destroy_sense_mempool;
1820 if (scst_max_cmd_mem == 0) {
1823 #if BITS_PER_LONG == 32
1824 scst_max_cmd_mem = min(
1825 (((uint64_t)(si.totalram - si.totalhigh) << PAGE_SHIFT)
1826 >> 20) >> 2, (uint64_t)1 << 30);
1828 scst_max_cmd_mem = (((si.totalram - si.totalhigh) << PAGE_SHIFT)
1833 if (scst_max_dev_cmd_mem != 0) {
1834 if (scst_max_dev_cmd_mem > scst_max_cmd_mem) {
1835 PRINT_ERROR("scst_max_dev_cmd_mem (%d) > "
1836 "scst_max_cmd_mem (%d)",
1837 scst_max_dev_cmd_mem,
1839 scst_max_dev_cmd_mem = scst_max_cmd_mem;
1842 scst_max_dev_cmd_mem = scst_max_cmd_mem * 2 / 5;
1844 res = scst_sgv_pools_init(
1845 ((uint64_t)scst_max_cmd_mem << 10) >> (PAGE_SHIFT - 10), 0);
1847 goto out_destroy_aen_mempool;
1849 scst_default_acg = scst_alloc_add_acg(SCST_DEFAULT_ACG_NAME);
1850 if (scst_default_acg == NULL) {
1852 goto out_destroy_sgv_pool;
1855 res = scsi_register_interface(&scst_interface);
1859 scst_scsi_op_list_init();
1861 for (i = 0; i < (int)ARRAY_SIZE(scst_tasklets); i++) {
1862 spin_lock_init(&scst_tasklets[i].tasklet_lock);
1863 INIT_LIST_HEAD(&scst_tasklets[i].tasklet_cmd_list);
1864 tasklet_init(&scst_tasklets[i].tasklet,
1865 (void *)scst_cmd_tasklet,
1866 (unsigned long)&scst_tasklets[i]);
1869 TRACE_DBG("%d CPUs found, starting %d threads", scst_num_cpus,
1872 res = scst_start_all_threads(scst_threads);
1874 goto out_thread_free;
1876 res = scst_proc_init_module();
1878 goto out_thread_free;
1881 PRINT_INFO("SCST version %s loaded successfully (max mem for "
1882 "commands %dMB, per device %dMB)", SCST_VERSION_STRING,
1883 scst_max_cmd_mem, scst_max_dev_cmd_mem);
1885 scst_print_config();
1888 TRACE_EXIT_RES(res);
1892 scst_stop_all_threads();
1894 scsi_unregister_interface(&scst_interface);
1897 scst_destroy_acg(scst_default_acg);
1899 out_destroy_sgv_pool:
1900 scst_sgv_pools_deinit();
1902 out_destroy_aen_mempool:
1903 mempool_destroy(scst_aen_mempool);
1905 out_destroy_sense_mempool:
1906 mempool_destroy(scst_sense_mempool);
1908 out_destroy_ua_mempool:
1909 mempool_destroy(scst_ua_mempool);
1911 out_destroy_mgmt_stub_mempool:
1912 mempool_destroy(scst_mgmt_stub_mempool);
1914 out_destroy_mgmt_mempool:
1915 mempool_destroy(scst_mgmt_mempool);
1917 out_destroy_acg_cache:
1918 kmem_cache_destroy(scst_acgd_cachep);
1920 out_destroy_tgt_cache:
1921 kmem_cache_destroy(scst_tgtd_cachep);
1923 out_destroy_sess_cache:
1924 kmem_cache_destroy(scst_sess_cachep);
1926 out_destroy_cmd_cache:
1927 kmem_cache_destroy(scst_cmd_cachep);
1929 out_destroy_aen_cache:
1930 kmem_cache_destroy(scst_aen_cachep);
1932 out_destroy_sense_cache:
1933 kmem_cache_destroy(scst_sense_cachep);
1935 out_destroy_ua_cache:
1936 kmem_cache_destroy(scst_ua_cachep);
1938 out_destroy_mgmt_stub_cache:
1939 kmem_cache_destroy(scst_mgmt_stub_cachep);
1941 out_destroy_mgmt_cache:
1942 kmem_cache_destroy(scst_mgmt_cachep);
1946 static void __exit exit_scst(void)
1950 /* ToDo: unregister_cpu_notifier() */
1952 scst_proc_cleanup_module();
1954 scst_stop_all_threads();
1956 scsi_unregister_interface(&scst_interface);
1957 scst_destroy_acg(scst_default_acg);
1959 scst_sgv_pools_deinit();
1961 #define DEINIT_CACHEP(p) do { \
1962 kmem_cache_destroy(p); \
1966 mempool_destroy(scst_mgmt_mempool);
1967 mempool_destroy(scst_mgmt_stub_mempool);
1968 mempool_destroy(scst_ua_mempool);
1969 mempool_destroy(scst_sense_mempool);
1970 mempool_destroy(scst_aen_mempool);
1972 DEINIT_CACHEP(scst_mgmt_cachep);
1973 DEINIT_CACHEP(scst_mgmt_stub_cachep);
1974 DEINIT_CACHEP(scst_ua_cachep);
1975 DEINIT_CACHEP(scst_sense_cachep);
1976 DEINIT_CACHEP(scst_aen_cachep);
1977 DEINIT_CACHEP(scst_cmd_cachep);
1978 DEINIT_CACHEP(scst_sess_cachep);
1979 DEINIT_CACHEP(scst_tgtd_cachep);
1980 DEINIT_CACHEP(scst_acgd_cachep);
1982 PRINT_INFO("%s", "SCST unloaded");
1989 module_init(init_scst);
1990 module_exit(exit_scst);
1992 MODULE_AUTHOR("Vladislav Bolkhovitin");
1993 MODULE_LICENSE("GPL");
1994 MODULE_DESCRIPTION("SCSI target core");
1995 MODULE_VERSION(SCST_VERSION_STRING);