4 * Copyright (C) 2004 - 2008 Vladislav Bolkhovitin <vst@vlnb.net>
5 * Copyright (C) 2004 - 2005 Leonid Stoljar
6 * Copyright (C) 2007 - 2008 CMS Distribution Limited
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation, version 2
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
19 #include <linux/module.h>
21 #include <linux/init.h>
22 #include <linux/kernel.h>
23 #include <linux/errno.h>
24 #include <linux/list.h>
25 #include <linux/spinlock.h>
26 #include <linux/slab.h>
27 #include <linux/sched.h>
28 #include <linux/unistd.h>
29 #include <linux/string.h>
30 #include <linux/kthread.h>
33 #include "scst_priv.h"
36 #if defined(CONFIG_HIGHMEM4G) || defined(CONFIG_HIGHMEM64G)
37 #warning "HIGHMEM kernel configurations are fully supported, but not\
38 recommended for performance reasons. Consider changing VMSPLIT\
39 option or use a 64-bit configuration instead. See README file for\
43 #if !defined(SCSI_EXEC_REQ_FIFO_DEFINED) && \
44 !defined(CONFIG_SCST_STRICT_SERIALIZING)
45 #warning "Patch scst_exec_req_fifo-<kernel-version>.patch was not applied on\
46 your kernel and CONFIG_SCST_STRICT_SERIALIZING isn't defined.\
47 Pass-through dev handlers will not work."
50 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
51 #if !defined(SCST_IO_CONTEXT)
52 #warning "Patch io_context-<kernel-version>.patch was not applied\
53 on your kernel. SCST will be working with not the best performance."
58 ** SCST global variables. They are all uninitialized to have their layout in
59 ** memory be exactly as specified. Otherwise compiler puts zero-initialized
60 ** variable separately from nonzero-initialized ones.
64 * All targets, devices and dev_types management is done under this mutex.
66 * It must NOT be used in any works (schedule_work(), etc.), because
67 * otherwise a deadlock (double lock, actually) is possible, e.g., with
68 * scst_user detach_tgt(), which is called under scst_mutex and calls
69 * flush_scheduled_work().
71 struct mutex scst_mutex;
73 /* All 3 protected by scst_mutex */
74 static struct list_head scst_template_list;
75 struct list_head scst_dev_list;
76 struct list_head scst_dev_type_list;
78 spinlock_t scst_main_lock;
80 static struct kmem_cache *scst_mgmt_cachep;
81 mempool_t *scst_mgmt_mempool;
82 static struct kmem_cache *scst_mgmt_stub_cachep;
83 mempool_t *scst_mgmt_stub_mempool;
84 static struct kmem_cache *scst_ua_cachep;
85 mempool_t *scst_ua_mempool;
86 static struct kmem_cache *scst_sense_cachep;
87 mempool_t *scst_sense_mempool;
88 static struct kmem_cache *scst_aen_cachep;
89 mempool_t *scst_aen_mempool;
90 struct kmem_cache *scst_tgtd_cachep;
91 struct kmem_cache *scst_sess_cachep;
92 struct kmem_cache *scst_acgd_cachep;
94 struct list_head scst_acg_list;
95 struct scst_acg *scst_default_acg;
97 spinlock_t scst_init_lock;
98 wait_queue_head_t scst_init_cmd_list_waitQ;
99 struct list_head scst_init_cmd_list;
100 unsigned int scst_init_poll_cnt;
102 struct kmem_cache *scst_cmd_cachep;
104 #if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
105 unsigned long scst_trace_flag;
108 unsigned long scst_flags;
109 atomic_t scst_cmd_count;
111 struct scst_cmd_lists scst_main_cmd_lists;
113 struct scst_tasklet scst_tasklets[NR_CPUS];
115 spinlock_t scst_mcmd_lock;
116 struct list_head scst_active_mgmt_cmd_list;
117 struct list_head scst_delayed_mgmt_cmd_list;
118 wait_queue_head_t scst_mgmt_cmd_list_waitQ;
120 wait_queue_head_t scst_mgmt_waitQ;
121 spinlock_t scst_mgmt_lock;
122 struct list_head scst_sess_init_list;
123 struct list_head scst_sess_shut_list;
125 wait_queue_head_t scst_dev_cmd_waitQ;
127 static struct mutex scst_suspend_mutex;
128 /* protected by scst_suspend_mutex */
129 static struct list_head scst_cmd_lists_list;
131 static int scst_threads;
132 struct mutex scst_global_threads_mutex;
133 u32 scst_nr_global_threads;
134 static struct list_head scst_global_threads_list;
135 static struct task_struct *scst_init_cmd_thread;
136 static struct task_struct *scst_mgmt_thread;
137 static struct task_struct *scst_mgmt_cmd_thread;
139 static int suspend_count;
141 static int scst_virt_dev_last_id; /* protected by scst_mutex */
143 static unsigned int scst_max_cmd_mem;
144 unsigned int scst_max_dev_cmd_mem;
146 module_param_named(scst_threads, scst_threads, int, 0);
147 MODULE_PARM_DESC(scst_threads, "SCSI target threads count");
149 module_param_named(scst_max_cmd_mem, scst_max_cmd_mem, int, 0);
150 MODULE_PARM_DESC(scst_max_cmd_mem, "Maximum memory allowed to be consumed by "
151 "all SCSI commands of all devices at any given time in MB");
153 module_param_named(scst_max_dev_cmd_mem, scst_max_dev_cmd_mem, int, 0);
154 MODULE_PARM_DESC(scst_max_dev_cmd_mem, "Maximum memory allowed to be consumed "
155 "by all SCSI commands of a device at any given time in MB");
157 struct scst_dev_type scst_null_devtype = {
161 static void __scst_resume_activity(void);
163 int __scst_register_target_template(struct scst_tgt_template *vtt,
167 struct scst_tgt_template *t;
168 static DEFINE_MUTEX(m);
172 INIT_LIST_HEAD(&vtt->tgt_list);
174 if (strcmp(version, SCST_INTERFACE_VERSION) != 0) {
175 PRINT_ERROR("Incorrect version of target %s", vtt->name);
181 PRINT_ERROR("Target driver %s doesn't have a "
182 "detect() method.", vtt->name);
188 PRINT_ERROR("Target driver %s doesn't have a "
189 "release() method.", vtt->name);
194 if (!vtt->xmit_response) {
195 PRINT_ERROR("Target driver %s doesn't have a "
196 "xmit_response() method.", vtt->name);
201 if (vtt->threads_num < 0) {
202 PRINT_ERROR("Wrong threads_num value %d for "
203 "target \"%s\"", vtt->threads_num,
209 if (!vtt->no_proc_entry) {
210 res = scst_build_proc_target_dir_entries(vtt);
215 if (vtt->rdy_to_xfer == NULL)
216 vtt->rdy_to_xfer_atomic = 1;
218 if (mutex_lock_interruptible(&m) != 0)
221 if (mutex_lock_interruptible(&scst_mutex) != 0)
223 list_for_each_entry(t, &scst_template_list, scst_template_list_entry) {
224 if (strcmp(t->name, vtt->name) == 0) {
225 PRINT_ERROR("Target driver %s already registered",
227 mutex_unlock(&scst_mutex);
231 mutex_unlock(&scst_mutex);
233 TRACE_DBG("%s", "Calling target driver's detect()");
234 res = vtt->detect(vtt);
235 TRACE_DBG("Target driver's detect() returned %d", res);
237 PRINT_ERROR("%s", "The detect() routine failed");
242 mutex_lock(&scst_mutex);
243 list_add_tail(&vtt->scst_template_list_entry, &scst_template_list);
244 mutex_unlock(&scst_mutex);
248 PRINT_INFO("Target template %s registered successfully", vtt->name);
257 scst_cleanup_proc_target_dir_entries(vtt);
263 PRINT_ERROR("Failed to register target template %s", vtt->name);
266 EXPORT_SYMBOL(__scst_register_target_template);
268 void scst_unregister_target_template(struct scst_tgt_template *vtt)
270 struct scst_tgt *tgt;
271 struct scst_tgt_template *t;
276 mutex_lock(&scst_mutex);
278 list_for_each_entry(t, &scst_template_list, scst_template_list_entry) {
279 if (strcmp(t->name, vtt->name) == 0) {
285 PRINT_ERROR("Target driver %s isn't registered", vtt->name);
290 list_for_each_entry(tgt, &vtt->tgt_list, tgt_list_entry) {
291 mutex_unlock(&scst_mutex);
292 scst_unregister(tgt);
293 mutex_lock(&scst_mutex);
296 list_del(&vtt->scst_template_list_entry);
298 PRINT_INFO("Target template %s unregistered successfully", vtt->name);
301 mutex_unlock(&scst_mutex);
303 scst_cleanup_proc_target_dir_entries(vtt);
308 EXPORT_SYMBOL(scst_unregister_target_template);
310 struct scst_tgt *scst_register(struct scst_tgt_template *vtt,
311 const char *target_name)
313 struct scst_tgt *tgt;
318 tgt = kzalloc(sizeof(*tgt), GFP_KERNEL);
320 TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of tgt failed");
325 INIT_LIST_HEAD(&tgt->sess_list);
326 init_waitqueue_head(&tgt->unreg_waitQ);
328 tgt->sg_tablesize = vtt->sg_tablesize;
329 spin_lock_init(&tgt->tgt_lock);
330 INIT_LIST_HEAD(&tgt->retry_cmd_list);
331 atomic_set(&tgt->finished_cmds, 0);
332 init_timer(&tgt->retry_timer);
333 tgt->retry_timer.data = (unsigned long)tgt;
334 tgt->retry_timer.function = scst_tgt_retry_timer_fn;
336 rc = scst_suspend_activity(true);
338 goto out_free_tgt_err;
340 if (mutex_lock_interruptible(&scst_mutex) != 0) {
342 goto out_resume_free;
345 if (target_name != NULL) {
346 int len = strlen(target_name) + 1 +
347 strlen(SCST_DEFAULT_ACG_NAME) + 1;
349 tgt->default_group_name = kmalloc(len, GFP_KERNEL);
350 if (tgt->default_group_name == NULL) {
351 TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of default "
352 "group name failed");
354 goto out_unlock_resume;
356 sprintf(tgt->default_group_name, "%s_%s", SCST_DEFAULT_ACG_NAME,
360 rc = scst_build_proc_target_entries(tgt);
364 list_add_tail(&tgt->tgt_list_entry, &vtt->tgt_list);
366 mutex_unlock(&scst_mutex);
367 scst_resume_activity();
369 PRINT_INFO("Target %s (%p) for template %s registered successfully",
370 target_name, tgt, vtt->name);
377 kfree(tgt->default_group_name);
380 mutex_unlock(&scst_mutex);
383 scst_resume_activity();
390 PRINT_ERROR("Failed to register target %s for template %s (error %d)",
391 target_name, vtt->name, rc);
394 EXPORT_SYMBOL(scst_register);
396 static inline int test_sess_list(struct scst_tgt *tgt)
399 mutex_lock(&scst_mutex);
400 res = list_empty(&tgt->sess_list);
401 mutex_unlock(&scst_mutex);
405 void scst_unregister(struct scst_tgt *tgt)
407 struct scst_session *sess;
408 struct scst_tgt_template *vtt = tgt->tgtt;
412 TRACE_DBG("%s", "Calling target driver's release()");
413 tgt->tgtt->release(tgt);
414 TRACE_DBG("%s", "Target driver's release() returned");
416 mutex_lock(&scst_mutex);
418 list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) {
419 if (sess->shut_phase == SCST_SESS_SPH_READY) {
421 * Sometimes it's hard for target driver to track all
422 * its sessions (see scst_local, for example), so let's
425 mutex_unlock(&scst_mutex);
426 scst_unregister_session(sess, 0, NULL);
427 mutex_lock(&scst_mutex);
431 mutex_unlock(&scst_mutex);
433 TRACE_DBG("%s", "Waiting for sessions shutdown");
434 wait_event(tgt->unreg_waitQ, test_sess_list(tgt));
435 TRACE_DBG("%s", "wait_event() returned");
437 scst_suspend_activity(false);
438 mutex_lock(&scst_mutex);
440 list_del(&tgt->tgt_list_entry);
442 scst_cleanup_proc_target_entries(tgt);
444 kfree(tgt->default_group_name);
446 mutex_unlock(&scst_mutex);
447 scst_resume_activity();
449 del_timer_sync(&tgt->retry_timer);
451 PRINT_INFO("Target %p for template %s unregistered successfully",
459 EXPORT_SYMBOL(scst_unregister);
461 static int scst_susp_wait(bool interruptible)
468 res = wait_event_interruptible_timeout(scst_dev_cmd_waitQ,
469 (atomic_read(&scst_cmd_count) == 0),
470 SCST_SUSPENDING_TIMEOUT);
472 __scst_resume_activity();
478 wait_event(scst_dev_cmd_waitQ,
479 atomic_read(&scst_cmd_count) == 0);
481 TRACE_MGMT_DBG("wait_event() returned %d", res);
487 int scst_suspend_activity(bool interruptible)
495 if (mutex_lock_interruptible(&scst_suspend_mutex) != 0) {
500 mutex_lock(&scst_suspend_mutex);
502 TRACE_MGMT_DBG("suspend_count %d", suspend_count);
504 if (suspend_count > 1)
507 set_bit(SCST_FLAG_SUSPENDING, &scst_flags);
508 set_bit(SCST_FLAG_SUSPENDED, &scst_flags);
510 * Assignment of SCST_FLAG_SUSPENDING and SCST_FLAG_SUSPENDED must be
511 * ordered with scst_cmd_count. Otherwise lockless logic in
512 * scst_translate_lun() and scst_mgmt_translate_lun() won't work.
514 smp_mb__after_set_bit();
517 * See comment in scst_user.c::dev_user_task_mgmt_fn() for more
518 * information about scst_user behavior.
520 * ToDo: make the global suspending unneeded (switch to per-device
521 * reference counting? That would mean to switch off from lockless
522 * implementation of scst_translate_lun().. )
525 if (atomic_read(&scst_cmd_count) != 0) {
526 PRINT_INFO("Waiting for %d active commands to complete... This "
527 "might take few minutes for disks or few hours for "
528 "tapes, if you use long executed commands, like "
529 "REWIND or FORMAT. In case, if you have a hung user "
530 "space device (i.e. made using scst_user module) not "
531 "responding to any commands, if might take virtually "
532 "forever until the corresponding user space "
533 "program recovers and starts responding or gets "
534 "killed.", atomic_read(&scst_cmd_count));
538 res = scst_susp_wait(interruptible);
542 clear_bit(SCST_FLAG_SUSPENDING, &scst_flags);
543 /* See comment about smp_mb() above */
544 smp_mb__after_clear_bit();
546 TRACE_MGMT_DBG("Waiting for %d active commands finally to complete",
547 atomic_read(&scst_cmd_count));
549 res = scst_susp_wait(interruptible);
554 PRINT_INFO("%s", "All active commands completed");
557 mutex_unlock(&scst_suspend_mutex);
564 clear_bit(SCST_FLAG_SUSPENDING, &scst_flags);
565 /* See comment about smp_mb() above */
566 smp_mb__after_clear_bit();
569 EXPORT_SYMBOL(scst_suspend_activity);
571 static void __scst_resume_activity(void)
573 struct scst_cmd_lists *l;
578 TRACE_MGMT_DBG("suspend_count %d left", suspend_count);
579 if (suspend_count > 0)
582 clear_bit(SCST_FLAG_SUSPENDED, &scst_flags);
584 * The barrier is needed to make sure all woken up threads see the
585 * cleared flag. Not sure if it's really needed, but let's be safe.
587 smp_mb__after_clear_bit();
589 list_for_each_entry(l, &scst_cmd_lists_list, lists_list_entry) {
590 wake_up_all(&l->cmd_list_waitQ);
592 wake_up_all(&scst_init_cmd_list_waitQ);
594 spin_lock_irq(&scst_mcmd_lock);
595 if (!list_empty(&scst_delayed_mgmt_cmd_list)) {
596 struct scst_mgmt_cmd *m;
597 m = list_entry(scst_delayed_mgmt_cmd_list.next, typeof(*m),
598 mgmt_cmd_list_entry);
599 TRACE_MGMT_DBG("Moving delayed mgmt cmd %p to head of active "
601 list_move(&m->mgmt_cmd_list_entry, &scst_active_mgmt_cmd_list);
603 spin_unlock_irq(&scst_mcmd_lock);
604 wake_up_all(&scst_mgmt_cmd_list_waitQ);
611 void scst_resume_activity(void)
615 mutex_lock(&scst_suspend_mutex);
616 __scst_resume_activity();
617 mutex_unlock(&scst_suspend_mutex);
622 EXPORT_SYMBOL(scst_resume_activity);
624 static int scst_register_device(struct scsi_device *scsidp)
627 struct scst_device *dev;
628 struct scst_dev_type *dt;
632 res = scst_suspend_activity(true);
636 if (mutex_lock_interruptible(&scst_mutex) != 0) {
641 res = scst_alloc_device(GFP_KERNEL, &dev);
645 dev->type = scsidp->type;
647 dev->rq_disk = alloc_disk(1);
648 if (dev->rq_disk == NULL) {
652 dev->rq_disk->major = SCST_MAJOR;
654 dev->scsi_dev = scsidp;
656 list_add_tail(&dev->dev_list_entry, &scst_dev_list);
658 list_for_each_entry(dt, &scst_dev_type_list, dev_type_list_entry) {
659 if (dt->type == scsidp->type) {
660 res = scst_assign_dev_handler(dev, dt);
668 mutex_unlock(&scst_mutex);
671 scst_resume_activity();
675 PRINT_INFO("Attached SCSI target mid-level at "
676 "scsi%d, channel %d, id %d, lun %d, type %d",
677 scsidp->host->host_no, scsidp->channel, scsidp->id,
678 scsidp->lun, scsidp->type);
680 PRINT_ERROR("Failed to attach SCSI target mid-level "
681 "at scsi%d, channel %d, id %d, lun %d, type %d",
682 scsidp->host->host_no, scsidp->channel, scsidp->id,
683 scsidp->lun, scsidp->type);
690 list_del(&dev->dev_list_entry);
691 put_disk(dev->rq_disk);
694 scst_free_device(dev);
698 static void scst_unregister_device(struct scsi_device *scsidp)
700 struct scst_device *d, *dev = NULL;
701 struct scst_acg_dev *acg_dev, *aa;
705 scst_suspend_activity(false);
706 mutex_lock(&scst_mutex);
708 list_for_each_entry(d, &scst_dev_list, dev_list_entry) {
709 if (d->scsi_dev == scsidp) {
711 TRACE_DBG("Target device %p found", dev);
716 PRINT_ERROR("%s", "Target device not found");
720 list_del(&dev->dev_list_entry);
722 list_for_each_entry_safe(acg_dev, aa, &dev->dev_acg_dev_list,
723 dev_acg_dev_list_entry)
725 scst_acg_remove_dev(acg_dev->acg, dev);
728 scst_assign_dev_handler(dev, &scst_null_devtype);
730 put_disk(dev->rq_disk);
731 scst_free_device(dev);
733 PRINT_INFO("Detached SCSI target mid-level from scsi%d, channel %d, "
734 "id %d, lun %d, type %d", scsidp->host->host_no,
735 scsidp->channel, scsidp->id, scsidp->lun, scsidp->type);
738 mutex_unlock(&scst_mutex);
739 scst_resume_activity();
745 static int scst_dev_handler_check(struct scst_dev_type *dev_handler)
749 if (dev_handler->parse == NULL) {
750 PRINT_ERROR("scst dev_type driver %s doesn't have a "
751 "parse() method.", dev_handler->name);
756 if (dev_handler->exec == NULL) {
757 #ifdef CONFIG_SCST_ALLOW_PASSTHROUGH_IO_SUBMIT_IN_SIRQ
758 dev_handler->exec_atomic = 1;
760 dev_handler->exec_atomic = 0;
764 if (dev_handler->dev_done == NULL)
765 dev_handler->dev_done_atomic = 1;
772 int scst_register_virtual_device(struct scst_dev_type *dev_handler,
773 const char *dev_name)
776 struct scst_device *dev = NULL;
780 if (dev_handler == NULL) {
781 PRINT_ERROR("%s: valid device handler must be supplied",
787 if (dev_name == NULL) {
788 PRINT_ERROR("%s: device name must be non-NULL", __func__);
793 res = scst_dev_handler_check(dev_handler);
797 res = scst_suspend_activity(true);
801 if (mutex_lock_interruptible(&scst_mutex) != 0) {
806 res = scst_alloc_device(GFP_KERNEL, &dev);
810 dev->type = dev_handler->type;
811 dev->scsi_dev = NULL;
812 dev->virt_name = dev_name;
813 dev->virt_id = scst_virt_dev_last_id++;
815 list_add_tail(&dev->dev_list_entry, &scst_dev_list);
819 rc = scst_assign_dev_handler(dev, dev_handler);
826 mutex_unlock(&scst_mutex);
829 scst_resume_activity();
833 PRINT_INFO("Attached SCSI target mid-level to virtual "
834 "device %s (id %d)", dev_name, dev->virt_id);
836 PRINT_INFO("Failed to attach SCSI target mid-level to "
837 "virtual device %s", dev_name);
844 list_del(&dev->dev_list_entry);
845 scst_free_device(dev);
848 EXPORT_SYMBOL(scst_register_virtual_device);
850 void scst_unregister_virtual_device(int id)
852 struct scst_device *d, *dev = NULL;
853 struct scst_acg_dev *acg_dev, *aa;
857 scst_suspend_activity(false);
858 mutex_lock(&scst_mutex);
860 list_for_each_entry(d, &scst_dev_list, dev_list_entry) {
861 if (d->virt_id == id) {
863 TRACE_DBG("Target device %p (id %d) found", dev, id);
868 PRINT_ERROR("Target virtual device (id %d) not found", id);
872 list_del(&dev->dev_list_entry);
874 list_for_each_entry_safe(acg_dev, aa, &dev->dev_acg_dev_list,
875 dev_acg_dev_list_entry)
877 scst_acg_remove_dev(acg_dev->acg, dev);
880 scst_assign_dev_handler(dev, &scst_null_devtype);
882 PRINT_INFO("Detached SCSI target mid-level from virtual device %s "
883 "(id %d)", dev->virt_name, dev->virt_id);
885 scst_free_device(dev);
888 mutex_unlock(&scst_mutex);
889 scst_resume_activity();
894 EXPORT_SYMBOL(scst_unregister_virtual_device);
896 int __scst_register_dev_driver(struct scst_dev_type *dev_type,
899 struct scst_dev_type *dt;
900 struct scst_device *dev;
906 if (strcmp(version, SCST_INTERFACE_VERSION) != 0) {
907 PRINT_ERROR("Incorrect version of dev handler %s",
913 res = scst_dev_handler_check(dev_type);
917 #if !defined(SCSI_EXEC_REQ_FIFO_DEFINED) && \
918 !defined(CONFIG_SCST_STRICT_SERIALIZING)
919 if (dev_type->exec == NULL) {
920 PRINT_ERROR("Pass-through dev handlers (handler \"%s\") not "
921 "supported. Consider applying on your kernel patch "
922 "scst_exec_req_fifo-<kernel-version>.patch or define "
923 "CONFIG_SCST_STRICT_SERIALIZING", dev_type->name);
929 res = scst_suspend_activity(true);
933 if (mutex_lock_interruptible(&scst_mutex) != 0) {
939 list_for_each_entry(dt, &scst_dev_type_list, dev_type_list_entry) {
940 if (strcmp(dt->name, dev_type->name) == 0) {
941 PRINT_ERROR("Device type handler \"%s\" already "
950 res = scst_build_proc_dev_handler_dir_entries(dev_type);
954 list_add_tail(&dev_type->dev_type_list_entry, &scst_dev_type_list);
956 list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
957 if (dev->scsi_dev == NULL || dev->handler != &scst_null_devtype)
959 if (dev->scsi_dev->type == dev_type->type)
960 scst_assign_dev_handler(dev, dev_type);
963 mutex_unlock(&scst_mutex);
964 scst_resume_activity();
967 PRINT_INFO("Device handler \"%s\" for type %d registered "
968 "successfully", dev_type->name, dev_type->type);
976 mutex_unlock(&scst_mutex);
979 scst_resume_activity();
982 PRINT_ERROR("Failed to register device handler \"%s\" for type %d",
983 dev_type->name, dev_type->type);
986 EXPORT_SYMBOL(__scst_register_dev_driver);
988 void scst_unregister_dev_driver(struct scst_dev_type *dev_type)
990 struct scst_device *dev;
991 struct scst_dev_type *dt;
996 scst_suspend_activity(false);
997 mutex_lock(&scst_mutex);
999 list_for_each_entry(dt, &scst_dev_type_list, dev_type_list_entry) {
1000 if (strcmp(dt->name, dev_type->name) == 0) {
1006 PRINT_ERROR("Dev handler \"%s\" isn't registered",
1011 list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
1012 if (dev->handler == dev_type) {
1013 scst_assign_dev_handler(dev, &scst_null_devtype);
1014 TRACE_DBG("Dev handler removed from device %p", dev);
1018 list_del(&dev_type->dev_type_list_entry);
1020 mutex_unlock(&scst_mutex);
1021 scst_resume_activity();
1023 scst_cleanup_proc_dev_handler_dir_entries(dev_type);
1025 PRINT_INFO("Device handler \"%s\" for type %d unloaded",
1026 dev_type->name, dev_type->type);
1033 mutex_unlock(&scst_mutex);
1034 scst_resume_activity();
1037 EXPORT_SYMBOL(scst_unregister_dev_driver);
1039 int __scst_register_virtual_dev_driver(struct scst_dev_type *dev_type,
1040 const char *version)
1046 if (strcmp(version, SCST_INTERFACE_VERSION) != 0) {
1047 PRINT_ERROR("Incorrect version of virtual dev handler %s",
1053 res = scst_dev_handler_check(dev_type);
1057 if (!dev_type->no_proc) {
1058 res = scst_build_proc_dev_handler_dir_entries(dev_type);
1063 if (dev_type->type != -1) {
1064 PRINT_INFO("Virtual device handler %s for type %d "
1065 "registered successfully", dev_type->name,
1068 PRINT_INFO("Virtual device handler \"%s\" registered "
1069 "successfully", dev_type->name);
1073 TRACE_EXIT_RES(res);
1077 PRINT_ERROR("Failed to register virtual device handler \"%s\"",
1081 EXPORT_SYMBOL(__scst_register_virtual_dev_driver);
1083 void scst_unregister_virtual_dev_driver(struct scst_dev_type *dev_type)
1087 if (!dev_type->no_proc)
1088 scst_cleanup_proc_dev_handler_dir_entries(dev_type);
1090 PRINT_INFO("Device handler \"%s\" unloaded", dev_type->name);
1095 EXPORT_SYMBOL(scst_unregister_virtual_dev_driver);
1097 /* Called under scst_mutex */
1098 int scst_add_dev_threads(struct scst_device *dev, int num)
1102 struct scst_cmd_thread_t *thr;
1107 list_for_each_entry(thr, &dev->threads_list, thread_list_entry) {
1111 for (i = 0; i < num; i++) {
1112 thr = kmalloc(sizeof(*thr), GFP_KERNEL);
1115 PRINT_ERROR("Failed to allocate thr %d", res);
1118 strncpy(nm, dev->handler->name, ARRAY_SIZE(nm)-1);
1119 nm[ARRAY_SIZE(nm)-1] = '\0';
1120 thr->cmd_thread = kthread_create(scst_cmd_thread,
1121 &dev->cmd_lists, "%sd%d_%d", nm, dev->dev_num, n++);
1122 if (IS_ERR(thr->cmd_thread)) {
1123 res = PTR_ERR(thr->cmd_thread);
1124 PRINT_ERROR("kthread_create() failed: %d", res);
1129 list_add(&thr->thread_list_entry, &dev->threads_list);
1132 * ToDo: better to use tgt_dev_io_context instead, but we
1133 * are not ready for that yet.
1135 __exit_io_context(thr->cmd_thread->io_context);
1136 thr->cmd_thread->io_context = ioc_task_link(dev->dev_io_ctx);
1137 TRACE_DBG("Setting dev io ctx %p on thr %d", dev->dev_io_ctx,
1138 thr->cmd_thread->pid);
1140 wake_up_process(thr->cmd_thread);
1144 TRACE_EXIT_RES(res);
1148 scst_del_dev_threads(dev, i);
1152 /* Called under scst_mutex and suspended activity */
1153 static int scst_create_dev_threads(struct scst_device *dev)
1160 if (dev->handler->threads_num <= 0)
1163 threads_num = dev->handler->threads_num;
1165 spin_lock_init(&dev->cmd_lists.cmd_list_lock);
1166 INIT_LIST_HEAD(&dev->cmd_lists.active_cmd_list);
1167 init_waitqueue_head(&dev->cmd_lists.cmd_list_waitQ);
1169 res = scst_add_dev_threads(dev, threads_num);
1173 mutex_lock(&scst_suspend_mutex);
1174 list_add_tail(&dev->cmd_lists.lists_list_entry,
1175 &scst_cmd_lists_list);
1176 mutex_unlock(&scst_suspend_mutex);
1178 dev->p_cmd_lists = &dev->cmd_lists;
1181 TRACE_EXIT_RES(res);
1185 /* Called under scst_mutex */
1186 void scst_del_dev_threads(struct scst_device *dev, int num)
1188 struct scst_cmd_thread_t *ct, *tmp;
1196 list_for_each_entry_safe_reverse(ct, tmp, &dev->threads_list,
1197 thread_list_entry) {
1199 struct scst_tgt_dev *tgt_dev;
1201 list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
1202 dev_tgt_dev_list_entry) {
1203 struct scst_thr_data_hdr *td;
1204 td = __scst_find_thr_data(tgt_dev, ct->cmd_thread);
1206 scst_thr_data_put(td);
1211 rc = kthread_stop(ct->cmd_thread);
1213 TRACE_MGMT_DBG("kthread_stop() failed: %d", rc);
1215 list_del(&ct->thread_list_entry);
1218 if ((num > 0) && (++i >= num))
1227 /* Called under scst_mutex and suspended activity */
1228 static void scst_stop_dev_threads(struct scst_device *dev)
1232 if (list_empty(&dev->threads_list))
1235 scst_del_dev_threads(dev, -1);
1237 if (dev->p_cmd_lists == &dev->cmd_lists) {
1238 mutex_lock(&scst_suspend_mutex);
1239 list_del(&dev->cmd_lists.lists_list_entry);
1240 mutex_unlock(&scst_suspend_mutex);
1248 /* The activity supposed to be suspended and scst_mutex held */
1249 int scst_assign_dev_handler(struct scst_device *dev,
1250 struct scst_dev_type *handler)
1253 struct scst_tgt_dev *tgt_dev;
1254 LIST_HEAD(attached_tgt_devs);
1258 sBUG_ON(handler == NULL);
1260 if (dev->handler == handler)
1263 if (dev->handler && dev->handler->detach_tgt) {
1264 list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
1265 dev_tgt_dev_list_entry) {
1266 TRACE_DBG("Calling dev handler's detach_tgt(%p)",
1268 dev->handler->detach_tgt(tgt_dev);
1269 TRACE_DBG("%s", "Dev handler's detach_tgt() returned");
1273 if (dev->handler && dev->handler->detach) {
1274 TRACE_DBG("%s", "Calling dev handler's detach()");
1275 dev->handler->detach(dev);
1276 TRACE_DBG("%s", "Old handler's detach() returned");
1279 scst_stop_dev_threads(dev);
1281 dev->handler = handler;
1284 res = scst_create_dev_threads(dev);
1289 if (handler && handler->attach) {
1290 TRACE_DBG("Calling new dev handler's attach(%p)", dev);
1291 res = handler->attach(dev);
1292 TRACE_DBG("New dev handler's attach() returned %d", res);
1294 PRINT_ERROR("New device handler's %s attach() "
1295 "failed: %d", handler->name, res);
1300 if (handler && handler->attach_tgt) {
1301 list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
1302 dev_tgt_dev_list_entry) {
1303 TRACE_DBG("Calling dev handler's attach_tgt(%p)",
1305 res = handler->attach_tgt(tgt_dev);
1306 TRACE_DBG("%s", "Dev handler's attach_tgt() returned");
1308 PRINT_ERROR("Device handler's %s attach_tgt() "
1309 "failed: %d", handler->name, res);
1310 goto out_err_detach_tgt;
1312 list_add_tail(&tgt_dev->extra_tgt_dev_list_entry,
1313 &attached_tgt_devs);
1319 scst_stop_dev_threads(dev);
1323 dev->handler = &scst_null_devtype;
1326 TRACE_EXIT_RES(res);
1330 if (handler && handler->detach_tgt) {
1331 list_for_each_entry(tgt_dev, &attached_tgt_devs,
1332 extra_tgt_dev_list_entry)
1334 TRACE_DBG("Calling handler's detach_tgt(%p)",
1336 handler->detach_tgt(tgt_dev);
1337 TRACE_DBG("%s", "Handler's detach_tgt() returned");
1340 if (handler && handler->detach) {
1341 TRACE_DBG("%s", "Calling handler's detach()");
1342 handler->detach(dev);
1343 TRACE_DBG("%s", "Handler's detach() returned");
1348 int scst_global_threads_count(void)
1353 * Just to lower the race window, when user can get just changed value
1355 mutex_lock(&scst_global_threads_mutex);
1356 i = scst_nr_global_threads;
1357 mutex_unlock(&scst_global_threads_mutex);
1361 static void scst_threads_info_init(void)
1363 mutex_init(&scst_global_threads_mutex);
1364 INIT_LIST_HEAD(&scst_global_threads_list);
1367 /* scst_global_threads_mutex supposed to be held */
1368 void __scst_del_global_threads(int num)
1370 struct scst_cmd_thread_t *ct, *tmp;
1377 list_for_each_entry_safe(ct, tmp, &scst_global_threads_list,
1378 thread_list_entry) {
1381 res = kthread_stop(ct->cmd_thread);
1383 TRACE_MGMT_DBG("kthread_stop() failed: %d", res);
1384 list_del(&ct->thread_list_entry);
1386 scst_nr_global_threads--;
1397 /* scst_global_threads_mutex supposed to be held */
1398 int __scst_add_global_threads(int num)
1401 static int scst_thread_num;
1405 for (i = 0; i < num; i++) {
1406 struct scst_cmd_thread_t *thr;
1408 thr = kmalloc(sizeof(*thr), GFP_KERNEL);
1411 PRINT_ERROR("fail to allocate thr %d", res);
1414 thr->cmd_thread = kthread_create(scst_cmd_thread,
1415 &scst_main_cmd_lists, "scsi_tgt%d",
1417 if (IS_ERR(thr->cmd_thread)) {
1418 res = PTR_ERR(thr->cmd_thread);
1419 PRINT_ERROR("kthread_create() failed: %d", res);
1424 list_add(&thr->thread_list_entry, &scst_global_threads_list);
1425 scst_nr_global_threads++;
1427 wake_up_process(thr->cmd_thread);
1432 TRACE_EXIT_RES(res);
1436 __scst_del_global_threads(i);
1440 int scst_add_global_threads(int num)
1446 mutex_lock(&scst_global_threads_mutex);
1447 res = __scst_add_global_threads(num);
1448 mutex_unlock(&scst_global_threads_mutex);
1450 TRACE_EXIT_RES(res);
1453 EXPORT_SYMBOL(scst_add_global_threads);
1455 void scst_del_global_threads(int num)
1459 mutex_lock(&scst_global_threads_mutex);
1460 __scst_del_global_threads(num);
1461 mutex_unlock(&scst_global_threads_mutex);
1466 EXPORT_SYMBOL(scst_del_global_threads);
1468 static void scst_stop_all_threads(void)
1472 mutex_lock(&scst_global_threads_mutex);
1473 __scst_del_global_threads(-1);
1474 if (scst_mgmt_cmd_thread)
1475 kthread_stop(scst_mgmt_cmd_thread);
1476 if (scst_mgmt_thread)
1477 kthread_stop(scst_mgmt_thread);
1478 if (scst_init_cmd_thread)
1479 kthread_stop(scst_init_cmd_thread);
1480 mutex_unlock(&scst_global_threads_mutex);
1486 static int scst_start_all_threads(int num)
1492 mutex_lock(&scst_global_threads_mutex);
1493 res = __scst_add_global_threads(num);
1497 scst_init_cmd_thread = kthread_run(scst_init_thread,
1498 NULL, "scsi_tgt_init");
1499 if (IS_ERR(scst_init_cmd_thread)) {
1500 res = PTR_ERR(scst_init_cmd_thread);
1501 PRINT_ERROR("kthread_create() for init cmd failed: %d", res);
1502 scst_init_cmd_thread = NULL;
1506 scst_mgmt_cmd_thread = kthread_run(scst_tm_thread,
1508 if (IS_ERR(scst_mgmt_cmd_thread)) {
1509 res = PTR_ERR(scst_mgmt_cmd_thread);
1510 PRINT_ERROR("kthread_create() for TM failed: %d", res);
1511 scst_mgmt_cmd_thread = NULL;
1515 scst_mgmt_thread = kthread_run(scst_global_mgmt_thread,
1516 NULL, "scsi_tgt_mgmt");
1517 if (IS_ERR(scst_mgmt_thread)) {
1518 res = PTR_ERR(scst_mgmt_thread);
1519 PRINT_ERROR("kthread_create() for mgmt failed: %d", res);
1520 scst_mgmt_thread = NULL;
1525 mutex_unlock(&scst_global_threads_mutex);
1526 TRACE_EXIT_RES(res);
1534 EXPORT_SYMBOL(scst_get);
1540 EXPORT_SYMBOL(scst_put);
1542 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
1543 static int scst_add(struct class_device *cdev, struct class_interface *intf)
1545 static int scst_add(struct device *cdev, struct class_interface *intf)
1548 struct scsi_device *scsidp;
1553 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
1554 scsidp = to_scsi_device(cdev->dev);
1556 scsidp = to_scsi_device(cdev->parent);
1559 if (strcmp(scsidp->host->hostt->name, SCST_LOCAL_NAME) != 0)
1560 res = scst_register_device(scsidp);
1566 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
1567 static void scst_remove(struct class_device *cdev, struct class_interface *intf)
1569 static void scst_remove(struct device *cdev, struct class_interface *intf)
1572 struct scsi_device *scsidp;
1576 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
1577 scsidp = to_scsi_device(cdev->dev);
1579 scsidp = to_scsi_device(cdev->parent);
1582 if (strcmp(scsidp->host->hostt->name, SCST_LOCAL_NAME) != 0)
1583 scst_unregister_device(scsidp);
1589 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
1590 static struct class_interface scst_interface = {
1592 .remove = scst_remove,
1595 static struct class_interface scst_interface = {
1596 .add_dev = scst_add,
1597 .remove_dev = scst_remove,
1601 static void __init scst_print_config(void)
1606 i = snprintf(buf, sizeof(buf), "Enabled features: ");
1609 #ifdef CONFIG_SCST_STRICT_SERIALIZING
1610 i += snprintf(&buf[i], sizeof(buf) - i, "Strict serializing");
1613 #ifdef CONFIG_SCST_EXTRACHECKS
1614 i += snprintf(&buf[i], sizeof(buf) - i, "%sEXTRACHECKS",
1615 (j == i) ? "" : ", ");
1618 #ifdef CONFIG_SCST_TRACING
1619 i += snprintf(&buf[i], sizeof(buf) - i, "%sTRACING",
1620 (j == i) ? "" : ", ");
1623 #ifdef CONFIG_SCST_DEBUG
1624 i += snprintf(&buf[i], sizeof(buf) - i, "%sDEBUG",
1625 (j == i) ? "" : ", ");
1628 #ifdef CONFIG_SCST_DEBUG_TM
1629 i += snprintf(&buf[i], sizeof(buf) - i, "%sDEBUG_TM",
1630 (j == i) ? "" : ", ");
1633 #ifdef CONFIG_SCST_DEBUG_RETRY
1634 i += snprintf(&buf[i], sizeof(buf) - i, "%sDEBUG_RETRY",
1635 (j == i) ? "" : ", ");
1638 #ifdef CONFIG_SCST_DEBUG_OOM
1639 i += snprintf(&buf[i], sizeof(buf) - i, "%sDEBUG_OOM",
1640 (j == i) ? "" : ", ");
1643 #ifdef CONFIG_SCST_DEBUG_SN
1644 i += snprintf(&buf[i], sizeof(buf) - i, "%sDEBUG_SN",
1645 (j == i) ? "" : ", ");
1648 #ifdef CONFIG_SCST_USE_EXPECTED_VALUES
1649 i += snprintf(&buf[i], sizeof(buf) - i, "%sUSE_EXPECTED_VALUES",
1650 (j == i) ? "" : ", ");
1653 #ifdef CONFIG_SCST_ALLOW_PASSTHROUGH_IO_SUBMIT_IN_SIRQ
1654 i += snprintf(&buf[i], sizeof(buf) - i,
1655 "%sALLOW_PASSTHROUGH_IO_SUBMIT_IN_SIRQ",
1656 (j == i) ? "" : ", ");
1659 #ifdef CONFIG_SCST_STRICT_SECURITY
1660 i += snprintf(&buf[i], sizeof(buf) - i, "%sSCST_STRICT_SECURITY",
1661 (j == i) ? "" : ", ");
1665 PRINT_INFO("%s", buf);
1668 static int __init init_scst(void)
1675 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
1677 struct scsi_request *req;
1678 BUILD_BUG_ON(SCST_SENSE_BUFFERSIZE !=
1679 sizeof(req->sr_sense_buffer));
1683 struct scsi_sense_hdr *shdr;
1684 BUILD_BUG_ON(SCST_SENSE_BUFFERSIZE < sizeof(*shdr));
1688 struct scst_tgt_dev *t;
1690 BUILD_BUG_ON(sizeof(t->curr_sn) != sizeof(t->expected_sn));
1691 BUILD_BUG_ON(sizeof(c->sn) != sizeof(t->expected_sn));
1694 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
1695 #if !defined(SCST_IO_CONTEXT)
1696 PRINT_WARNING("%s", "Patch io_context was not applied on "
1697 "your kernel. SCST will be working with not the best "
1702 mutex_init(&scst_mutex);
1703 INIT_LIST_HEAD(&scst_template_list);
1704 INIT_LIST_HEAD(&scst_dev_list);
1705 INIT_LIST_HEAD(&scst_dev_type_list);
1706 spin_lock_init(&scst_main_lock);
1707 INIT_LIST_HEAD(&scst_acg_list);
1708 spin_lock_init(&scst_init_lock);
1709 init_waitqueue_head(&scst_init_cmd_list_waitQ);
1710 INIT_LIST_HEAD(&scst_init_cmd_list);
1711 #if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
1712 scst_trace_flag = SCST_DEFAULT_LOG_FLAGS;
1714 atomic_set(&scst_cmd_count, 0);
1715 spin_lock_init(&scst_mcmd_lock);
1716 INIT_LIST_HEAD(&scst_active_mgmt_cmd_list);
1717 INIT_LIST_HEAD(&scst_delayed_mgmt_cmd_list);
1718 init_waitqueue_head(&scst_mgmt_cmd_list_waitQ);
1719 init_waitqueue_head(&scst_mgmt_waitQ);
1720 spin_lock_init(&scst_mgmt_lock);
1721 INIT_LIST_HEAD(&scst_sess_init_list);
1722 INIT_LIST_HEAD(&scst_sess_shut_list);
1723 init_waitqueue_head(&scst_dev_cmd_waitQ);
1724 mutex_init(&scst_suspend_mutex);
1725 INIT_LIST_HEAD(&scst_cmd_lists_list);
1726 scst_virt_dev_last_id = 1;
1727 spin_lock_init(&scst_main_cmd_lists.cmd_list_lock);
1728 INIT_LIST_HEAD(&scst_main_cmd_lists.active_cmd_list);
1729 init_waitqueue_head(&scst_main_cmd_lists.cmd_list_waitQ);
1730 list_add_tail(&scst_main_cmd_lists.lists_list_entry,
1731 &scst_cmd_lists_list);
1733 scst_num_cpus = num_online_cpus();
1735 /* ToDo: register_cpu_notifier() */
1737 if (scst_threads == 0)
1738 scst_threads = scst_num_cpus;
1740 if (scst_threads < 1) {
1741 PRINT_ERROR("%s", "scst_threads can not be less than 1");
1742 scst_threads = scst_num_cpus;
1745 scst_threads_info_init();
1747 #define INIT_CACHEP(p, s, o) do { \
1748 p = KMEM_CACHE(s, SCST_SLAB_FLAGS); \
1749 TRACE_MEM("Slab create: %s at %p size %zd", #s, p, \
1750 sizeof(struct s)); \
1757 INIT_CACHEP(scst_mgmt_cachep, scst_mgmt_cmd, out);
1758 INIT_CACHEP(scst_mgmt_stub_cachep, scst_mgmt_cmd_stub,
1759 out_destroy_mgmt_cache);
1760 INIT_CACHEP(scst_ua_cachep, scst_tgt_dev_UA,
1761 out_destroy_mgmt_stub_cache);
1763 struct scst_sense { uint8_t s[SCST_SENSE_BUFFERSIZE]; };
1764 INIT_CACHEP(scst_sense_cachep, scst_sense,
1765 out_destroy_ua_cache);
1767 INIT_CACHEP(scst_aen_cachep, scst_aen, out_destroy_sense_cache);
1768 INIT_CACHEP(scst_cmd_cachep, scst_cmd, out_destroy_aen_cache);
1769 INIT_CACHEP(scst_sess_cachep, scst_session, out_destroy_cmd_cache);
1770 INIT_CACHEP(scst_tgtd_cachep, scst_tgt_dev, out_destroy_sess_cache);
1771 INIT_CACHEP(scst_acgd_cachep, scst_acg_dev, out_destroy_tgt_cache);
1773 scst_mgmt_mempool = mempool_create(64, mempool_alloc_slab,
1774 mempool_free_slab, scst_mgmt_cachep);
1775 if (scst_mgmt_mempool == NULL) {
1777 goto out_destroy_acg_cache;
1781 * All mgmt stubs, UAs and sense buffers are bursty and loosing them
1782 * may have fatal consequences, so let's have big pools for them.
1785 scst_mgmt_stub_mempool = mempool_create(1024, mempool_alloc_slab,
1786 mempool_free_slab, scst_mgmt_stub_cachep);
1787 if (scst_mgmt_stub_mempool == NULL) {
1789 goto out_destroy_mgmt_mempool;
1792 scst_ua_mempool = mempool_create(512, mempool_alloc_slab,
1793 mempool_free_slab, scst_ua_cachep);
1794 if (scst_ua_mempool == NULL) {
1796 goto out_destroy_mgmt_stub_mempool;
1799 scst_sense_mempool = mempool_create(1024, mempool_alloc_slab,
1800 mempool_free_slab, scst_sense_cachep);
1801 if (scst_sense_mempool == NULL) {
1803 goto out_destroy_ua_mempool;
1806 scst_aen_mempool = mempool_create(100, mempool_alloc_slab,
1807 mempool_free_slab, scst_aen_cachep);
1808 if (scst_aen_mempool == NULL) {
1810 goto out_destroy_sense_mempool;
1813 if (scst_max_cmd_mem == 0) {
1816 #if BITS_PER_LONG == 32
1817 scst_max_cmd_mem = min(
1818 (((uint64_t)(si.totalram - si.totalhigh) << PAGE_SHIFT)
1819 >> 20) >> 2, (uint64_t)1 << 30);
1821 scst_max_cmd_mem = (((si.totalram - si.totalhigh) << PAGE_SHIFT)
1826 if (scst_max_dev_cmd_mem != 0) {
1827 if (scst_max_dev_cmd_mem > scst_max_cmd_mem) {
1828 PRINT_ERROR("scst_max_dev_cmd_mem (%d) > "
1829 "scst_max_cmd_mem (%d)",
1830 scst_max_dev_cmd_mem,
1832 scst_max_dev_cmd_mem = scst_max_cmd_mem;
1835 scst_max_dev_cmd_mem = scst_max_cmd_mem * 2 / 5;
1837 res = scst_sgv_pools_init(
1838 ((uint64_t)scst_max_cmd_mem << 10) >> (PAGE_SHIFT - 10), 0);
1840 goto out_destroy_aen_mempool;
1842 scst_default_acg = scst_alloc_add_acg(SCST_DEFAULT_ACG_NAME);
1843 if (scst_default_acg == NULL) {
1845 goto out_destroy_sgv_pool;
1848 res = scsi_register_interface(&scst_interface);
1852 scst_scsi_op_list_init();
1854 for (i = 0; i < (int)ARRAY_SIZE(scst_tasklets); i++) {
1855 spin_lock_init(&scst_tasklets[i].tasklet_lock);
1856 INIT_LIST_HEAD(&scst_tasklets[i].tasklet_cmd_list);
1857 tasklet_init(&scst_tasklets[i].tasklet,
1858 (void *)scst_cmd_tasklet,
1859 (unsigned long)&scst_tasklets[i]);
1862 TRACE_DBG("%d CPUs found, starting %d threads", scst_num_cpus,
1865 res = scst_start_all_threads(scst_threads);
1867 goto out_thread_free;
1869 res = scst_proc_init_module();
1871 goto out_thread_free;
1874 PRINT_INFO("SCST version %s loaded successfully (max mem for "
1875 "commands %dMB, per device %dMB)", SCST_VERSION_STRING,
1876 scst_max_cmd_mem, scst_max_dev_cmd_mem);
1878 scst_print_config();
1881 TRACE_EXIT_RES(res);
1885 scst_stop_all_threads();
1887 scsi_unregister_interface(&scst_interface);
1890 scst_destroy_acg(scst_default_acg);
1892 out_destroy_sgv_pool:
1893 scst_sgv_pools_deinit();
1895 out_destroy_aen_mempool:
1896 mempool_destroy(scst_aen_mempool);
1898 out_destroy_sense_mempool:
1899 mempool_destroy(scst_sense_mempool);
1901 out_destroy_ua_mempool:
1902 mempool_destroy(scst_ua_mempool);
1904 out_destroy_mgmt_stub_mempool:
1905 mempool_destroy(scst_mgmt_stub_mempool);
1907 out_destroy_mgmt_mempool:
1908 mempool_destroy(scst_mgmt_mempool);
1910 out_destroy_acg_cache:
1911 kmem_cache_destroy(scst_acgd_cachep);
1913 out_destroy_tgt_cache:
1914 kmem_cache_destroy(scst_tgtd_cachep);
1916 out_destroy_sess_cache:
1917 kmem_cache_destroy(scst_sess_cachep);
1919 out_destroy_cmd_cache:
1920 kmem_cache_destroy(scst_cmd_cachep);
1922 out_destroy_aen_cache:
1923 kmem_cache_destroy(scst_aen_cachep);
1925 out_destroy_sense_cache:
1926 kmem_cache_destroy(scst_sense_cachep);
1928 out_destroy_ua_cache:
1929 kmem_cache_destroy(scst_ua_cachep);
1931 out_destroy_mgmt_stub_cache:
1932 kmem_cache_destroy(scst_mgmt_stub_cachep);
1934 out_destroy_mgmt_cache:
1935 kmem_cache_destroy(scst_mgmt_cachep);
1939 static void __exit exit_scst(void)
1943 /* ToDo: unregister_cpu_notifier() */
1945 scst_proc_cleanup_module();
1947 scst_stop_all_threads();
1949 scsi_unregister_interface(&scst_interface);
1950 scst_destroy_acg(scst_default_acg);
1952 scst_sgv_pools_deinit();
1954 #define DEINIT_CACHEP(p) do { \
1955 kmem_cache_destroy(p); \
1959 mempool_destroy(scst_mgmt_mempool);
1960 mempool_destroy(scst_mgmt_stub_mempool);
1961 mempool_destroy(scst_ua_mempool);
1962 mempool_destroy(scst_sense_mempool);
1963 mempool_destroy(scst_aen_mempool);
1965 DEINIT_CACHEP(scst_mgmt_cachep);
1966 DEINIT_CACHEP(scst_mgmt_stub_cachep);
1967 DEINIT_CACHEP(scst_ua_cachep);
1968 DEINIT_CACHEP(scst_sense_cachep);
1969 DEINIT_CACHEP(scst_aen_cachep);
1970 DEINIT_CACHEP(scst_cmd_cachep);
1971 DEINIT_CACHEP(scst_sess_cachep);
1972 DEINIT_CACHEP(scst_tgtd_cachep);
1973 DEINIT_CACHEP(scst_acgd_cachep);
1975 PRINT_INFO("%s", "SCST unloaded");
1982 module_init(init_scst);
1983 module_exit(exit_scst);
1985 MODULE_AUTHOR("Vladislav Bolkhovitin");
1986 MODULE_LICENSE("GPL");
1987 MODULE_DESCRIPTION("SCSI target core");
1988 MODULE_VERSION(SCST_VERSION_STRING);