4 * Copyright (C) 2004 - 2008 Vladislav Bolkhovitin <vst@vlnb.net>
5 * Copyright (C) 2004 - 2005 Leonid Stoljar
6 * Copyright (C) 2007 - 2008 CMS Distribution Limited
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation, version 2
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
19 #include <linux/module.h>
21 #include <linux/init.h>
22 #include <linux/kernel.h>
23 #include <linux/errno.h>
24 #include <linux/list.h>
25 #include <linux/spinlock.h>
26 #include <linux/slab.h>
27 #include <linux/sched.h>
28 #include <linux/unistd.h>
29 #include <linux/string.h>
30 #include <linux/kthread.h>
33 #include "scst_priv.h"
36 #if defined(CONFIG_HIGHMEM4G) || defined(CONFIG_HIGHMEM64G)
37 #warning "HIGHMEM kernel configurations are fully supported, but not \
38 recommended for performance reasons. Consider change VMSPLIT \
39 option or use 64-bit configuration instead. See README file for \
43 #if !defined(SCSI_EXEC_REQ_FIFO_DEFINED) && \
44 !defined(CONFIG_SCST_STRICT_SERIALIZING)
45 #warning "Patch scst_exec_req_fifo-<kernel-version>.patch was not applied on \
46 your kernel and CONFIG_SCST_STRICT_SERIALIZING isn't defined. \
47 Pass-through dev handlers will not be supported."
50 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
51 #if !defined(SCST_ALLOC_IO_CONTEXT_EXPORTED)
52 #warning "Patch export_alloc_io_context-<kernel-version>.patch was not applied \
53 on your kernel. SCST will be working with not the best performance."
58 ** SCST global variables. They are all uninitialized to have their layout in
59 ** memory be exactly as specified. Otherwise compiler puts zero-initialized
60 ** variable separately from nonzero-initialized ones.
64 * All targets, devices and dev_types management is done under this mutex.
66 * It must NOT be used in any works (schedule_work(), etc.), because
67 * otherwise a deadlock (double lock, actually) is possible, e.g., with
68 * scst_user detach_tgt(), which is called under scst_mutex and calls
69 * flush_scheduled_work().
71 struct mutex scst_mutex;
73 struct list_head scst_template_list;
74 struct list_head scst_dev_list;
75 struct list_head scst_dev_type_list;
77 spinlock_t scst_main_lock;
79 static struct kmem_cache *scst_mgmt_cachep;
80 mempool_t *scst_mgmt_mempool;
81 static struct kmem_cache *scst_mgmt_stub_cachep;
82 mempool_t *scst_mgmt_stub_mempool;
83 static struct kmem_cache *scst_ua_cachep;
84 mempool_t *scst_ua_mempool;
85 static struct kmem_cache *scst_sense_cachep;
86 mempool_t *scst_sense_mempool;
87 struct kmem_cache *scst_tgtd_cachep;
88 struct kmem_cache *scst_sess_cachep;
89 struct kmem_cache *scst_acgd_cachep;
91 struct list_head scst_acg_list;
92 struct scst_acg *scst_default_acg;
94 spinlock_t scst_init_lock;
95 wait_queue_head_t scst_init_cmd_list_waitQ;
96 struct list_head scst_init_cmd_list;
97 unsigned int scst_init_poll_cnt;
99 struct kmem_cache *scst_cmd_cachep;
101 #if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
102 unsigned long scst_trace_flag;
105 unsigned long scst_flags;
106 atomic_t scst_cmd_count;
108 struct scst_cmd_lists scst_main_cmd_lists;
110 struct scst_tasklet scst_tasklets[NR_CPUS];
112 spinlock_t scst_mcmd_lock;
113 struct list_head scst_active_mgmt_cmd_list;
114 struct list_head scst_delayed_mgmt_cmd_list;
115 wait_queue_head_t scst_mgmt_cmd_list_waitQ;
117 wait_queue_head_t scst_mgmt_waitQ;
118 spinlock_t scst_mgmt_lock;
119 struct list_head scst_sess_init_list;
120 struct list_head scst_sess_shut_list;
122 wait_queue_head_t scst_dev_cmd_waitQ;
124 struct mutex scst_suspend_mutex;
125 struct list_head scst_cmd_lists_list;
127 static int scst_threads;
128 struct scst_threads_info_t scst_threads_info;
130 static int suspend_count;
132 static int scst_virt_dev_last_id; /* protected by scst_mutex */
135 * This buffer and lock are intended to avoid memory allocation, which
136 * could fail in improper places.
138 spinlock_t scst_temp_UA_lock;
139 uint8_t scst_temp_UA[SCST_SENSE_BUFFERSIZE];
141 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
142 #if defined(CONFIG_BLOCK) && defined(SCST_ALLOC_IO_CONTEXT_EXPORTED)
143 static struct io_context *scst_ioc;
147 unsigned int scst_max_cmd_mem;
148 unsigned int scst_max_dev_cmd_mem;
150 module_param_named(scst_threads, scst_threads, int, 0);
151 MODULE_PARM_DESC(scst_threads, "SCSI target threads count");
153 module_param_named(scst_max_cmd_mem, scst_max_cmd_mem, int, 0);
154 MODULE_PARM_DESC(scst_max_cmd_mem, "Maximum memory allowed to be consumed by "
155 "all SCSI commands of all devices at any given time in MB");
157 module_param_named(scst_max_dev_cmd_mem, scst_max_dev_cmd_mem, int, 0);
158 MODULE_PARM_DESC(scst_max_dev_cmd_mem, "Maximum memory allowed to be consumed "
159 "by all SCSI commands of a device at any given time in MB");
161 struct scst_dev_type scst_null_devtype = {
165 static void __scst_resume_activity(void);
167 int __scst_register_target_template(struct scst_tgt_template *vtt,
171 struct scst_tgt_template *t;
172 static DEFINE_MUTEX(m);
176 INIT_LIST_HEAD(&vtt->tgt_list);
178 if (strcmp(version, SCST_INTERFACE_VERSION) != 0) {
179 PRINT_ERROR("Incorrect version of target %s", vtt->name);
185 PRINT_ERROR("Target driver %s doesn't have a "
186 "detect() method.", vtt->name);
192 PRINT_ERROR("Target driver %s doesn't have a "
193 "release() method.", vtt->name);
198 if (!vtt->xmit_response) {
199 PRINT_ERROR("Target driver %s doesn't have a "
200 "xmit_response() method.", vtt->name);
205 if (vtt->threads_num < 0) {
206 PRINT_ERROR("Wrong threads_num value %d for "
207 "target \"%s\"", vtt->threads_num,
213 if (!vtt->no_proc_entry) {
214 res = scst_build_proc_target_dir_entries(vtt);
219 if (vtt->rdy_to_xfer == NULL)
220 vtt->rdy_to_xfer_atomic = 1;
222 if (mutex_lock_interruptible(&m) != 0)
225 if (mutex_lock_interruptible(&scst_mutex) != 0)
227 list_for_each_entry(t, &scst_template_list, scst_template_list_entry) {
228 if (strcmp(t->name, vtt->name) == 0) {
229 PRINT_ERROR("Target driver %s already registered",
231 mutex_unlock(&scst_mutex);
235 mutex_unlock(&scst_mutex);
237 TRACE_DBG("%s", "Calling target driver's detect()");
238 res = vtt->detect(vtt);
239 TRACE_DBG("Target driver's detect() returned %d", res);
241 PRINT_ERROR("%s", "The detect() routine failed");
246 mutex_lock(&scst_mutex);
247 list_add_tail(&vtt->scst_template_list_entry, &scst_template_list);
248 mutex_unlock(&scst_mutex);
252 PRINT_INFO("Target template %s registered successfully", vtt->name);
261 scst_cleanup_proc_target_dir_entries(vtt);
267 PRINT_ERROR("Failed to register target template %s", vtt->name);
270 EXPORT_SYMBOL(__scst_register_target_template);
272 void scst_unregister_target_template(struct scst_tgt_template *vtt)
274 struct scst_tgt *tgt;
275 struct scst_tgt_template *t;
280 mutex_lock(&scst_mutex);
282 list_for_each_entry(t, &scst_template_list, scst_template_list_entry) {
283 if (strcmp(t->name, vtt->name) == 0) {
289 PRINT_ERROR("Target driver %s isn't registered", vtt->name);
294 list_for_each_entry(tgt, &vtt->tgt_list, tgt_list_entry) {
295 mutex_unlock(&scst_mutex);
296 scst_unregister(tgt);
297 mutex_lock(&scst_mutex);
300 list_del(&vtt->scst_template_list_entry);
302 PRINT_INFO("Target template %s unregistered successfully", vtt->name);
305 mutex_unlock(&scst_mutex);
307 scst_cleanup_proc_target_dir_entries(vtt);
312 EXPORT_SYMBOL(scst_unregister_target_template);
314 struct scst_tgt *scst_register(struct scst_tgt_template *vtt,
315 const char *target_name)
317 struct scst_tgt *tgt;
322 tgt = kzalloc(sizeof(*tgt), GFP_KERNEL);
324 TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of tgt failed");
329 INIT_LIST_HEAD(&tgt->sess_list);
330 init_waitqueue_head(&tgt->unreg_waitQ);
332 tgt->sg_tablesize = vtt->sg_tablesize;
333 spin_lock_init(&tgt->tgt_lock);
334 INIT_LIST_HEAD(&tgt->retry_cmd_list);
335 atomic_set(&tgt->finished_cmds, 0);
336 init_timer(&tgt->retry_timer);
337 tgt->retry_timer.data = (unsigned long)tgt;
338 tgt->retry_timer.function = scst_tgt_retry_timer_fn;
340 rc = scst_suspend_activity(true);
342 goto out_free_tgt_err;
344 if (mutex_lock_interruptible(&scst_mutex) != 0) {
346 goto out_resume_free;
349 if (target_name != NULL) {
350 int len = strlen(target_name) + 1 +
351 strlen(SCST_DEFAULT_ACG_NAME) + 1;
353 tgt->default_group_name = kmalloc(len, GFP_KERNEL);
354 if (tgt->default_group_name == NULL) {
355 TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of default "
356 "group name failed");
358 goto out_unlock_resume;
360 sprintf(tgt->default_group_name, "%s_%s", SCST_DEFAULT_ACG_NAME,
364 rc = scst_build_proc_target_entries(tgt);
368 list_add_tail(&tgt->tgt_list_entry, &vtt->tgt_list);
370 mutex_unlock(&scst_mutex);
371 scst_resume_activity();
373 PRINT_INFO("Target %s (%p) for template %s registered successfully",
374 target_name, tgt, vtt->name);
381 kfree(tgt->default_group_name);
384 mutex_unlock(&scst_mutex);
387 scst_resume_activity();
394 PRINT_ERROR("Failed to register target %s for template %s (error %d)",
395 target_name, vtt->name, rc);
398 EXPORT_SYMBOL(scst_register);
400 static inline int test_sess_list(struct scst_tgt *tgt)
403 mutex_lock(&scst_mutex);
404 res = list_empty(&tgt->sess_list);
405 mutex_unlock(&scst_mutex);
409 void scst_unregister(struct scst_tgt *tgt)
411 struct scst_session *sess;
412 struct scst_tgt_template *vtt = tgt->tgtt;
416 TRACE_DBG("%s", "Calling target driver's release()");
417 tgt->tgtt->release(tgt);
418 TRACE_DBG("%s", "Target driver's release() returned");
420 mutex_lock(&scst_mutex);
421 list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) {
422 sBUG_ON(sess->shut_phase == SCST_SESS_SPH_READY);
424 mutex_unlock(&scst_mutex);
426 TRACE_DBG("%s", "Waiting for sessions shutdown");
427 wait_event(tgt->unreg_waitQ, test_sess_list(tgt));
428 TRACE_DBG("%s", "wait_event() returned");
430 scst_suspend_activity(false);
431 mutex_lock(&scst_mutex);
433 list_del(&tgt->tgt_list_entry);
435 scst_cleanup_proc_target_entries(tgt);
437 kfree(tgt->default_group_name);
439 mutex_unlock(&scst_mutex);
440 scst_resume_activity();
442 del_timer_sync(&tgt->retry_timer);
444 PRINT_INFO("Target %p for template %s unregistered successfully",
452 EXPORT_SYMBOL(scst_unregister);
454 static int scst_susp_wait(bool interruptible)
461 res = wait_event_interruptible_timeout(scst_dev_cmd_waitQ,
462 (atomic_read(&scst_cmd_count) == 0),
463 SCST_SUSPENDING_TIMEOUT);
465 __scst_resume_activity();
471 wait_event(scst_dev_cmd_waitQ,
472 atomic_read(&scst_cmd_count) == 0);
474 TRACE_MGMT_DBG("wait_event() returned %d", res);
480 int scst_suspend_activity(bool interruptible)
488 if (mutex_lock_interruptible(&scst_suspend_mutex) != 0) {
493 mutex_lock(&scst_suspend_mutex);
495 TRACE_MGMT_DBG("suspend_count %d", suspend_count);
497 if (suspend_count > 1)
500 set_bit(SCST_FLAG_SUSPENDING, &scst_flags);
501 set_bit(SCST_FLAG_SUSPENDED, &scst_flags);
502 smp_mb__after_set_bit();
505 * See comment in scst_user.c::dev_user_task_mgmt_fn() for more
506 * information about scst_user behavior.
508 * ToDo: make the global suspending unneeded (Switch to per-device
509 * reference counting? That would mean to switch off from lockless
510 * implementation of scst_translate_lun().. )
513 if (atomic_read(&scst_cmd_count) != 0) {
514 PRINT_INFO("Waiting for %d active commands to complete... This "
515 "might take few minutes for disks or few hours for "
516 "tapes, if you use long executed commands, like "
517 "REWIND or FORMAT. In case, if you have a hung user "
518 "space device (i.e. made using scst_user module) not "
519 "responding to any commands, if might take virtually "
520 "forever until the corresponding user space "
521 "program recovers and starts responding or gets "
522 "killed.", atomic_read(&scst_cmd_count));
526 res = scst_susp_wait(interruptible);
530 clear_bit(SCST_FLAG_SUSPENDING, &scst_flags);
531 smp_mb__after_clear_bit();
533 TRACE_MGMT_DBG("Waiting for %d active commands finally to complete",
534 atomic_read(&scst_cmd_count));
536 res = scst_susp_wait(interruptible);
541 PRINT_INFO("%s", "All active commands completed");
544 mutex_unlock(&scst_suspend_mutex);
551 clear_bit(SCST_FLAG_SUSPENDING, &scst_flags);
552 smp_mb__after_clear_bit();
555 EXPORT_SYMBOL(scst_suspend_activity);
557 static void __scst_resume_activity(void)
559 struct scst_cmd_lists *l;
564 TRACE_MGMT_DBG("suspend_count %d left", suspend_count);
565 if (suspend_count > 0)
568 clear_bit(SCST_FLAG_SUSPENDED, &scst_flags);
569 smp_mb__after_clear_bit();
571 list_for_each_entry(l, &scst_cmd_lists_list, lists_list_entry) {
572 wake_up_all(&l->cmd_list_waitQ);
574 wake_up_all(&scst_init_cmd_list_waitQ);
576 spin_lock_irq(&scst_mcmd_lock);
577 if (!list_empty(&scst_delayed_mgmt_cmd_list)) {
578 struct scst_mgmt_cmd *m;
579 m = list_entry(scst_delayed_mgmt_cmd_list.next, typeof(*m),
580 mgmt_cmd_list_entry);
581 TRACE_MGMT_DBG("Moving delayed mgmt cmd %p to head of active "
583 list_move(&m->mgmt_cmd_list_entry, &scst_active_mgmt_cmd_list);
585 spin_unlock_irq(&scst_mcmd_lock);
586 wake_up_all(&scst_mgmt_cmd_list_waitQ);
593 void scst_resume_activity(void)
597 mutex_lock(&scst_suspend_mutex);
598 __scst_resume_activity();
599 mutex_unlock(&scst_suspend_mutex);
604 EXPORT_SYMBOL(scst_resume_activity);
606 static int scst_register_device(struct scsi_device *scsidp)
609 struct scst_device *dev;
610 struct scst_dev_type *dt;
614 res = scst_suspend_activity(true);
618 if (mutex_lock_interruptible(&scst_mutex) != 0) {
623 res = scst_alloc_device(GFP_KERNEL, &dev);
627 dev->type = scsidp->type;
629 dev->rq_disk = alloc_disk(1);
630 if (dev->rq_disk == NULL) {
634 dev->rq_disk->major = SCST_MAJOR;
636 dev->scsi_dev = scsidp;
638 list_add_tail(&dev->dev_list_entry, &scst_dev_list);
640 list_for_each_entry(dt, &scst_dev_type_list, dev_type_list_entry) {
641 if (dt->type == scsidp->type) {
642 res = scst_assign_dev_handler(dev, dt);
650 mutex_unlock(&scst_mutex);
653 scst_resume_activity();
657 PRINT_INFO("Attached SCSI target mid-level at "
658 "scsi%d, channel %d, id %d, lun %d, type %d",
659 scsidp->host->host_no, scsidp->channel, scsidp->id,
660 scsidp->lun, scsidp->type);
662 PRINT_ERROR("Failed to attach SCSI target mid-level "
663 "at scsi%d, channel %d, id %d, lun %d, type %d",
664 scsidp->host->host_no, scsidp->channel, scsidp->id,
665 scsidp->lun, scsidp->type);
672 list_del(&dev->dev_list_entry);
673 put_disk(dev->rq_disk);
676 scst_free_device(dev);
680 static void scst_unregister_device(struct scsi_device *scsidp)
682 struct scst_device *d, *dev = NULL;
683 struct scst_acg_dev *acg_dev, *aa;
687 scst_suspend_activity(false);
688 mutex_lock(&scst_mutex);
690 list_for_each_entry(d, &scst_dev_list, dev_list_entry) {
691 if (d->scsi_dev == scsidp) {
693 TRACE_DBG("Target device %p found", dev);
698 PRINT_ERROR("%s", "Target device not found");
702 list_del(&dev->dev_list_entry);
704 list_for_each_entry_safe(acg_dev, aa, &dev->dev_acg_dev_list,
705 dev_acg_dev_list_entry)
707 scst_acg_remove_dev(acg_dev->acg, dev);
710 scst_assign_dev_handler(dev, &scst_null_devtype);
712 put_disk(dev->rq_disk);
713 scst_free_device(dev);
715 PRINT_INFO("Detached SCSI target mid-level from scsi%d, channel %d, "
716 "id %d, lun %d, type %d", scsidp->host->host_no,
717 scsidp->channel, scsidp->id, scsidp->lun, scsidp->type);
720 mutex_unlock(&scst_mutex);
721 scst_resume_activity();
727 static int scst_dev_handler_check(struct scst_dev_type *dev_handler)
731 if (dev_handler->parse == NULL) {
732 PRINT_ERROR("scst dev_type driver %s doesn't have a "
733 "parse() method.", dev_handler->name);
738 if (dev_handler->exec == NULL) {
739 #ifdef CONFIG_SCST_ALLOW_PASSTHROUGH_IO_SUBMIT_IN_SIRQ
740 dev_handler->exec_atomic = 1;
742 dev_handler->exec_atomic = 0;
746 if (dev_handler->dev_done == NULL)
747 dev_handler->dev_done_atomic = 1;
754 int scst_register_virtual_device(struct scst_dev_type *dev_handler,
755 const char *dev_name)
758 struct scst_device *dev = NULL;
762 if (dev_handler == NULL) {
763 PRINT_ERROR("%s: valid device handler must be supplied",
769 if (dev_name == NULL) {
770 PRINT_ERROR("%s: device name must be non-NULL", __func__);
775 res = scst_dev_handler_check(dev_handler);
779 res = scst_suspend_activity(true);
783 if (mutex_lock_interruptible(&scst_mutex) != 0) {
788 res = scst_alloc_device(GFP_KERNEL, &dev);
792 dev->type = dev_handler->type;
793 dev->scsi_dev = NULL;
794 dev->virt_name = dev_name;
795 dev->virt_id = scst_virt_dev_last_id++;
797 list_add_tail(&dev->dev_list_entry, &scst_dev_list);
801 rc = scst_assign_dev_handler(dev, dev_handler);
808 mutex_unlock(&scst_mutex);
811 scst_resume_activity();
815 PRINT_INFO("Attached SCSI target mid-level to virtual "
816 "device %s (id %d)", dev_name, dev->virt_id);
818 PRINT_INFO("Failed to attach SCSI target mid-level to "
819 "virtual device %s", dev_name);
826 list_del(&dev->dev_list_entry);
827 scst_free_device(dev);
830 EXPORT_SYMBOL(scst_register_virtual_device);
832 void scst_unregister_virtual_device(int id)
834 struct scst_device *d, *dev = NULL;
835 struct scst_acg_dev *acg_dev, *aa;
839 scst_suspend_activity(false);
840 mutex_lock(&scst_mutex);
842 list_for_each_entry(d, &scst_dev_list, dev_list_entry) {
843 if (d->virt_id == id) {
845 TRACE_DBG("Target device %p (id %d) found", dev, id);
850 PRINT_ERROR("Target virtual device (id %d) not found", id);
854 list_del(&dev->dev_list_entry);
856 list_for_each_entry_safe(acg_dev, aa, &dev->dev_acg_dev_list,
857 dev_acg_dev_list_entry)
859 scst_acg_remove_dev(acg_dev->acg, dev);
862 scst_assign_dev_handler(dev, &scst_null_devtype);
864 PRINT_INFO("Detached SCSI target mid-level from virtual device %s "
865 "(id %d)", dev->virt_name, dev->virt_id);
867 scst_free_device(dev);
870 mutex_unlock(&scst_mutex);
871 scst_resume_activity();
876 EXPORT_SYMBOL(scst_unregister_virtual_device);
878 int __scst_register_dev_driver(struct scst_dev_type *dev_type,
881 struct scst_dev_type *dt;
882 struct scst_device *dev;
888 if (strcmp(version, SCST_INTERFACE_VERSION) != 0) {
889 PRINT_ERROR("Incorrect version of dev handler %s",
895 res = scst_dev_handler_check(dev_type);
899 #if !defined(SCSI_EXEC_REQ_FIFO_DEFINED) && \
900 !defined(CONFIG_SCST_STRICT_SERIALIZING)
901 if (dev_type->exec == NULL) {
902 PRINT_ERROR("Pass-through dev handlers (handler \"%s\") not "
903 "supported. Consider applying on your kernel patch "
904 "scst_exec_req_fifo-<kernel-version>.patch or define "
905 "CONFIG_SCST_STRICT_SERIALIZING", dev_type->name);
911 res = scst_suspend_activity(true);
915 if (mutex_lock_interruptible(&scst_mutex) != 0) {
921 list_for_each_entry(dt, &scst_dev_type_list, dev_type_list_entry) {
922 if (strcmp(dt->name, dev_type->name) == 0) {
923 PRINT_ERROR("Device type handler \"%s\" already "
932 res = scst_build_proc_dev_handler_dir_entries(dev_type);
936 list_add_tail(&dev_type->dev_type_list_entry, &scst_dev_type_list);
938 list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
939 if (dev->scsi_dev == NULL || dev->handler != &scst_null_devtype)
941 if (dev->scsi_dev->type == dev_type->type)
942 scst_assign_dev_handler(dev, dev_type);
945 mutex_unlock(&scst_mutex);
946 scst_resume_activity();
949 PRINT_INFO("Device handler \"%s\" for type %d registered "
950 "successfully", dev_type->name, dev_type->type);
958 mutex_unlock(&scst_mutex);
961 scst_resume_activity();
964 PRINT_ERROR("Failed to register device handler \"%s\" for type %d",
965 dev_type->name, dev_type->type);
968 EXPORT_SYMBOL(__scst_register_dev_driver);
970 void scst_unregister_dev_driver(struct scst_dev_type *dev_type)
972 struct scst_device *dev;
973 struct scst_dev_type *dt;
978 scst_suspend_activity(false);
979 mutex_lock(&scst_mutex);
981 list_for_each_entry(dt, &scst_dev_type_list, dev_type_list_entry) {
982 if (strcmp(dt->name, dev_type->name) == 0) {
988 PRINT_ERROR("Dev handler \"%s\" isn't registered",
993 list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
994 if (dev->handler == dev_type) {
995 scst_assign_dev_handler(dev, &scst_null_devtype);
996 TRACE_DBG("Dev handler removed from device %p", dev);
1000 list_del(&dev_type->dev_type_list_entry);
1002 mutex_unlock(&scst_mutex);
1003 scst_resume_activity();
1005 scst_cleanup_proc_dev_handler_dir_entries(dev_type);
1007 PRINT_INFO("Device handler \"%s\" for type %d unloaded",
1008 dev_type->name, dev_type->type);
1015 mutex_unlock(&scst_mutex);
1016 scst_resume_activity();
1019 EXPORT_SYMBOL(scst_unregister_dev_driver);
1021 int __scst_register_virtual_dev_driver(struct scst_dev_type *dev_type,
1022 const char *version)
1028 if (strcmp(version, SCST_INTERFACE_VERSION) != 0) {
1029 PRINT_ERROR("Incorrect version of virtual dev handler %s",
1035 res = scst_dev_handler_check(dev_type);
1039 if (!dev_type->no_proc) {
1040 res = scst_build_proc_dev_handler_dir_entries(dev_type);
1045 if (dev_type->type != -1) {
1046 PRINT_INFO("Virtual device handler %s for type %d "
1047 "registered successfully", dev_type->name,
1050 PRINT_INFO("Virtual device handler \"%s\" registered "
1051 "successfully", dev_type->name);
1055 TRACE_EXIT_RES(res);
1059 PRINT_ERROR("Failed to register virtual device handler \"%s\"",
1063 EXPORT_SYMBOL(__scst_register_virtual_dev_driver);
1065 void scst_unregister_virtual_dev_driver(struct scst_dev_type *dev_type)
1069 if (!dev_type->no_proc)
1070 scst_cleanup_proc_dev_handler_dir_entries(dev_type);
1072 PRINT_INFO("Device handler \"%s\" unloaded", dev_type->name);
1077 EXPORT_SYMBOL(scst_unregister_virtual_dev_driver);
1079 /* Called under scst_mutex */
1080 int scst_add_dev_threads(struct scst_device *dev, int num)
1084 struct scst_cmd_thread_t *thr;
1085 struct io_context *ioc = NULL;
1090 list_for_each_entry(thr, &dev->threads_list, thread_list_entry) {
1094 for (i = 0; i < num; i++) {
1095 thr = kmalloc(sizeof(*thr), GFP_KERNEL);
1098 PRINT_ERROR("Failed to allocate thr %d", res);
1101 strncpy(nm, dev->handler->name, ARRAY_SIZE(nm)-1);
1102 nm[ARRAY_SIZE(nm)-1] = '\0';
1103 thr->cmd_thread = kthread_create(scst_cmd_thread,
1104 &dev->cmd_lists, "%sd%d_%d", nm, dev->dev_num, n++);
1105 if (IS_ERR(thr->cmd_thread)) {
1106 res = PTR_ERR(thr->cmd_thread);
1107 PRINT_ERROR("kthread_create() failed: %d", res);
1112 list_add(&thr->thread_list_entry, &dev->threads_list);
1114 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
1115 #if defined(CONFIG_BLOCK) && defined(SCST_ALLOC_IO_CONTEXT_EXPORTED)
1117 * It would be better to keep io_context in tgt_dev and
1118 * dynamically assign it to the current thread on the IO
1119 * submission time to let each initiator have own
1120 * io_context. But, unfortunately, CFQ doesn't
1121 * support if a task has dynamically switched
1122 * io_context, it oopses on BUG_ON(!cic->dead_key) in
1123 * cic_free_func(). So, we have to have the same io_context
1124 * for all initiators.
1127 ioc = alloc_io_context(GFP_KERNEL, -1);
1128 TRACE_DBG("ioc %p (thr %d)", ioc, thr->cmd_thread->pid);
1131 put_io_context(thr->cmd_thread->io_context);
1132 thr->cmd_thread->io_context = ioc_task_link(ioc);
1133 TRACE_DBG("Setting ioc %p on thr %d", ioc,
1134 thr->cmd_thread->pid);
1137 wake_up_process(thr->cmd_thread);
1141 put_io_context(ioc);
1143 TRACE_EXIT_RES(res);
1147 /* Called under scst_mutex and suspended activity */
1148 static int scst_create_dev_threads(struct scst_device *dev)
1155 if (dev->handler->threads_num <= 0)
1158 threads_num = dev->handler->threads_num;
1160 spin_lock_init(&dev->cmd_lists.cmd_list_lock);
1161 INIT_LIST_HEAD(&dev->cmd_lists.active_cmd_list);
1162 init_waitqueue_head(&dev->cmd_lists.cmd_list_waitQ);
1164 res = scst_add_dev_threads(dev, threads_num);
1168 mutex_lock(&scst_suspend_mutex);
1169 list_add_tail(&dev->cmd_lists.lists_list_entry,
1170 &scst_cmd_lists_list);
1171 mutex_unlock(&scst_suspend_mutex);
1173 dev->p_cmd_lists = &dev->cmd_lists;
1176 TRACE_EXIT_RES(res);
1180 /* Called under scst_mutex */
1181 void scst_del_dev_threads(struct scst_device *dev, int num)
1183 struct scst_cmd_thread_t *ct, *tmp;
1188 list_for_each_entry_safe(ct, tmp, &dev->threads_list,
1189 thread_list_entry) {
1190 int rc = kthread_stop(ct->cmd_thread);
1192 TRACE_MGMT_DBG("kthread_stop() failed: %d", rc);
1193 list_del(&ct->thread_list_entry);
1195 if ((num > 0) && (++i >= num))
1203 /* Called under scst_mutex and suspended activity */
1204 static void scst_stop_dev_threads(struct scst_device *dev)
1208 if (list_empty(&dev->threads_list))
1211 scst_del_dev_threads(dev, -1);
1213 if (dev->p_cmd_lists == &dev->cmd_lists) {
1214 mutex_lock(&scst_suspend_mutex);
1215 list_del(&dev->cmd_lists.lists_list_entry);
1216 mutex_unlock(&scst_suspend_mutex);
1224 /* The activity supposed to be suspended and scst_mutex held */
1225 int scst_assign_dev_handler(struct scst_device *dev,
1226 struct scst_dev_type *handler)
1229 struct scst_tgt_dev *tgt_dev;
1230 LIST_HEAD(attached_tgt_devs);
1234 sBUG_ON(handler == NULL);
1236 if (dev->handler == handler)
1239 if (dev->handler && dev->handler->detach_tgt) {
1240 list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
1241 dev_tgt_dev_list_entry) {
1242 TRACE_DBG("Calling dev handler's detach_tgt(%p)",
1244 dev->handler->detach_tgt(tgt_dev);
1245 TRACE_DBG("%s", "Dev handler's detach_tgt() returned");
1249 if (dev->handler && dev->handler->detach) {
1250 TRACE_DBG("%s", "Calling dev handler's detach()");
1251 dev->handler->detach(dev);
1252 TRACE_DBG("%s", "Old handler's detach() returned");
1255 scst_stop_dev_threads(dev);
1257 dev->handler = handler;
1260 res = scst_create_dev_threads(dev);
1265 if (handler && handler->attach) {
1266 TRACE_DBG("Calling new dev handler's attach(%p)", dev);
1267 res = handler->attach(dev);
1268 TRACE_DBG("New dev handler's attach() returned %d", res);
1270 PRINT_ERROR("New device handler's %s attach() "
1271 "failed: %d", handler->name, res);
1276 if (handler && handler->attach_tgt) {
1277 list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
1278 dev_tgt_dev_list_entry) {
1279 TRACE_DBG("Calling dev handler's attach_tgt(%p)",
1281 res = handler->attach_tgt(tgt_dev);
1282 TRACE_DBG("%s", "Dev handler's attach_tgt() returned");
1284 PRINT_ERROR("Device handler's %s attach_tgt() "
1285 "failed: %d", handler->name, res);
1286 goto out_err_detach_tgt;
1288 list_add_tail(&tgt_dev->extra_tgt_dev_list_entry,
1289 &attached_tgt_devs);
1295 scst_stop_dev_threads(dev);
1299 dev->handler = &scst_null_devtype;
1302 TRACE_EXIT_RES(res);
1306 if (handler && handler->detach_tgt) {
1307 list_for_each_entry(tgt_dev, &attached_tgt_devs,
1308 extra_tgt_dev_list_entry)
1310 TRACE_DBG("Calling handler's detach_tgt(%p)",
1312 handler->detach_tgt(tgt_dev);
1313 TRACE_DBG("%s", "Handler's detach_tgt() returned");
1316 if (handler && handler->detach) {
1317 TRACE_DBG("%s", "Calling handler's detach()");
1318 handler->detach(dev);
1319 TRACE_DBG("%s", "Handler's detach() returned");
1324 int scst_cmd_threads_count(void)
1329 * Just to lower the race window, when user can get just changed value
1331 mutex_lock(&scst_threads_info.cmd_threads_mutex);
1332 i = scst_threads_info.nr_cmd_threads;
1333 mutex_unlock(&scst_threads_info.cmd_threads_mutex);
1337 static void scst_threads_info_init(void)
1339 memset(&scst_threads_info, 0, sizeof(scst_threads_info));
1340 mutex_init(&scst_threads_info.cmd_threads_mutex);
1341 INIT_LIST_HEAD(&scst_threads_info.cmd_threads_list);
1344 /* scst_threads_info.cmd_threads_mutex supposed to be held */
1345 void __scst_del_cmd_threads(int num)
1347 struct scst_cmd_thread_t *ct, *tmp;
1352 i = scst_threads_info.nr_cmd_threads;
1353 if (num <= 0 || num > i) {
1354 PRINT_ERROR("can not del %d cmd threads from %d", num, i);
1358 list_for_each_entry_safe(ct, tmp, &scst_threads_info.cmd_threads_list,
1359 thread_list_entry) {
1362 res = kthread_stop(ct->cmd_thread);
1364 TRACE_MGMT_DBG("kthread_stop() failed: %d", res);
1365 list_del(&ct->thread_list_entry);
1367 scst_threads_info.nr_cmd_threads--;
1377 /* scst_threads_info.cmd_threads_mutex supposed to be held */
1378 int __scst_add_cmd_threads(int num)
1381 static int scst_thread_num;
1385 for (i = 0; i < num; i++) {
1386 struct scst_cmd_thread_t *thr;
1388 thr = kmalloc(sizeof(*thr), GFP_KERNEL);
1391 PRINT_ERROR("fail to allocate thr %d", res);
1394 thr->cmd_thread = kthread_create(scst_cmd_thread,
1395 &scst_main_cmd_lists, "scsi_tgt%d",
1397 if (IS_ERR(thr->cmd_thread)) {
1398 res = PTR_ERR(thr->cmd_thread);
1399 PRINT_ERROR("kthread_create() failed: %d", res);
1404 list_add(&thr->thread_list_entry,
1405 &scst_threads_info.cmd_threads_list);
1406 scst_threads_info.nr_cmd_threads++;
1408 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
1409 #if defined(CONFIG_BLOCK) && defined(SCST_ALLOC_IO_CONTEXT_EXPORTED)
1410 /* See comment in scst_add_dev_threads() */
1411 if (scst_ioc == NULL) {
1412 scst_ioc = alloc_io_context(GFP_KERNEL, -1);
1413 TRACE_DBG("scst_ioc %p (thr %d)", scst_ioc,
1414 thr->cmd_thread->pid);
1417 put_io_context(thr->cmd_thread->io_context);
1418 thr->cmd_thread->io_context = ioc_task_link(scst_ioc);
1419 TRACE_DBG("Setting scst_ioc %p on thr %d",
1420 scst_ioc, thr->cmd_thread->pid);
1423 wake_up_process(thr->cmd_thread);
1428 TRACE_EXIT_RES(res);
1433 __scst_del_cmd_threads(i - 1);
1437 int scst_add_cmd_threads(int num)
1443 mutex_lock(&scst_threads_info.cmd_threads_mutex);
1444 res = __scst_add_cmd_threads(num);
1445 mutex_unlock(&scst_threads_info.cmd_threads_mutex);
1447 TRACE_EXIT_RES(res);
1450 EXPORT_SYMBOL(scst_add_cmd_threads);
1452 void scst_del_cmd_threads(int num)
1456 mutex_lock(&scst_threads_info.cmd_threads_mutex);
1457 __scst_del_cmd_threads(num);
1458 mutex_unlock(&scst_threads_info.cmd_threads_mutex);
1463 EXPORT_SYMBOL(scst_del_cmd_threads);
1465 static void scst_stop_all_threads(void)
1469 mutex_lock(&scst_threads_info.cmd_threads_mutex);
1470 __scst_del_cmd_threads(scst_threads_info.nr_cmd_threads);
1471 if (scst_threads_info.mgmt_cmd_thread)
1472 kthread_stop(scst_threads_info.mgmt_cmd_thread);
1473 if (scst_threads_info.mgmt_thread)
1474 kthread_stop(scst_threads_info.mgmt_thread);
1475 if (scst_threads_info.init_cmd_thread)
1476 kthread_stop(scst_threads_info.init_cmd_thread);
1477 mutex_unlock(&scst_threads_info.cmd_threads_mutex);
1483 static int scst_start_all_threads(int num)
1489 mutex_lock(&scst_threads_info.cmd_threads_mutex);
1490 res = __scst_add_cmd_threads(num);
1494 scst_threads_info.init_cmd_thread = kthread_run(scst_init_cmd_thread,
1495 NULL, "scsi_tgt_init");
1496 if (IS_ERR(scst_threads_info.init_cmd_thread)) {
1497 res = PTR_ERR(scst_threads_info.init_cmd_thread);
1498 PRINT_ERROR("kthread_create() for init cmd failed: %d", res);
1499 scst_threads_info.init_cmd_thread = NULL;
1503 scst_threads_info.mgmt_cmd_thread = kthread_run(scst_mgmt_cmd_thread,
1504 NULL, "scsi_tgt_mc");
1505 if (IS_ERR(scst_threads_info.mgmt_cmd_thread)) {
1506 res = PTR_ERR(scst_threads_info.mgmt_cmd_thread);
1507 PRINT_ERROR("kthread_create() for mcmd failed: %d", res);
1508 scst_threads_info.mgmt_cmd_thread = NULL;
1512 scst_threads_info.mgmt_thread = kthread_run(scst_mgmt_thread,
1513 NULL, "scsi_tgt_mgmt");
1514 if (IS_ERR(scst_threads_info.mgmt_thread)) {
1515 res = PTR_ERR(scst_threads_info.mgmt_thread);
1516 PRINT_ERROR("kthread_create() for mgmt failed: %d", res);
1517 scst_threads_info.mgmt_thread = NULL;
1522 mutex_unlock(&scst_threads_info.cmd_threads_mutex);
1523 TRACE_EXIT_RES(res);
1531 EXPORT_SYMBOL(scst_get);
1537 EXPORT_SYMBOL(scst_put);
1539 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
1540 static int scst_add(struct class_device *cdev, struct class_interface *intf)
1542 static int scst_add(struct device *cdev, struct class_interface *intf)
1545 struct scsi_device *scsidp;
1550 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
1551 scsidp = to_scsi_device(cdev->dev);
1553 scsidp = to_scsi_device(cdev->parent);
1556 if (strcmp(scsidp->host->hostt->name, SCST_LOCAL_NAME) != 0)
1557 res = scst_register_device(scsidp);
1563 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
1564 static void scst_remove(struct class_device *cdev, struct class_interface *intf)
1566 static void scst_remove(struct device *cdev, struct class_interface *intf)
1569 struct scsi_device *scsidp;
1573 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
1574 scsidp = to_scsi_device(cdev->dev);
1576 scsidp = to_scsi_device(cdev->parent);
1579 if (strcmp(scsidp->host->hostt->name, SCST_LOCAL_NAME) != 0)
1580 scst_unregister_device(scsidp);
1586 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
1587 static struct class_interface scst_interface = {
1589 .remove = scst_remove,
1592 static struct class_interface scst_interface = {
1593 .add_dev = scst_add,
1594 .remove_dev = scst_remove,
1598 static void __init scst_print_config(void)
1603 i = snprintf(buf, sizeof(buf), "Enabled features: ");
1606 #ifdef CONFIG_SCST_STRICT_SERIALIZING
1607 i += snprintf(&buf[i], sizeof(buf) - i, "Strict serializing");
1610 #ifdef CONFIG_SCST_EXTRACHECKS
1611 i += snprintf(&buf[i], sizeof(buf) - i, "%sEXTRACHECKS",
1612 (j == i) ? "" : ", ");
1615 #ifdef CONFIG_SCST_TRACING
1616 i += snprintf(&buf[i], sizeof(buf) - i, "%sTRACING",
1617 (j == i) ? "" : ", ");
1620 #ifdef CONFIG_SCST_DEBUG
1621 i += snprintf(&buf[i], sizeof(buf) - i, "%sDEBUG",
1622 (j == i) ? "" : ", ");
1625 #ifdef CONFIG_SCST_DEBUG_TM
1626 i += snprintf(&buf[i], sizeof(buf) - i, "%sDEBUG_TM",
1627 (j == i) ? "" : ", ");
1630 #ifdef CONFIG_SCST_DEBUG_RETRY
1631 i += snprintf(&buf[i], sizeof(buf) - i, "%sDEBUG_RETRY",
1632 (j == i) ? "" : ", ");
1635 #ifdef CONFIG_SCST_DEBUG_OOM
1636 i += snprintf(&buf[i], sizeof(buf) - i, "%sDEBUG_OOM",
1637 (j == i) ? "" : ", ");
1640 #ifdef CONFIG_SCST_DEBUG_SN
1641 i += snprintf(&buf[i], sizeof(buf) - i, "%sDEBUG_SN",
1642 (j == i) ? "" : ", ");
1645 #ifdef CONFIG_SCST_USE_EXPECTED_VALUES
1646 i += snprintf(&buf[i], sizeof(buf) - i, "%sUSE_EXPECTED_VALUES",
1647 (j == i) ? "" : ", ");
1650 #ifdef CONFIG_SCST_ALLOW_PASSTHROUGH_IO_SUBMIT_IN_SIRQ
1651 i += snprintf(&buf[i], sizeof(buf) - i,
1652 "%sALLOW_PASSTHROUGH_IO_SUBMIT_IN_SIRQ",
1653 (j == i) ? "" : ", ");
1656 #ifdef CONFIG_SCST_STRICT_SECURITY
1657 i += snprintf(&buf[i], sizeof(buf) - i, "%sSCST_STRICT_SECURITY",
1658 (j == i) ? "" : ", ");
1662 PRINT_INFO("%s", buf);
1665 static int __init init_scst(void)
1672 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
1674 struct scsi_request *req;
1675 BUILD_BUG_ON(SCST_SENSE_BUFFERSIZE !=
1676 sizeof(req->sr_sense_buffer));
1680 struct scsi_sense_hdr *shdr;
1681 BUILD_BUG_ON(SCST_SENSE_BUFFERSIZE < sizeof(*shdr));
1685 struct scst_tgt_dev *t;
1687 BUILD_BUG_ON(sizeof(t->curr_sn) != sizeof(t->expected_sn));
1688 BUILD_BUG_ON(sizeof(c->sn) != sizeof(t->expected_sn));
1691 BUILD_BUG_ON(SCST_DATA_UNKNOWN != DMA_BIDIRECTIONAL);
1692 BUILD_BUG_ON(SCST_DATA_WRITE != DMA_TO_DEVICE);
1693 BUILD_BUG_ON(SCST_DATA_READ != DMA_FROM_DEVICE);
1694 BUILD_BUG_ON(SCST_DATA_NONE != DMA_NONE);
1696 mutex_init(&scst_mutex);
1697 INIT_LIST_HEAD(&scst_template_list);
1698 INIT_LIST_HEAD(&scst_dev_list);
1699 INIT_LIST_HEAD(&scst_dev_type_list);
1700 spin_lock_init(&scst_main_lock);
1701 INIT_LIST_HEAD(&scst_acg_list);
1702 spin_lock_init(&scst_init_lock);
1703 init_waitqueue_head(&scst_init_cmd_list_waitQ);
1704 INIT_LIST_HEAD(&scst_init_cmd_list);
1705 #if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
1706 scst_trace_flag = SCST_DEFAULT_LOG_FLAGS;
1708 atomic_set(&scst_cmd_count, 0);
1709 spin_lock_init(&scst_mcmd_lock);
1710 INIT_LIST_HEAD(&scst_active_mgmt_cmd_list);
1711 INIT_LIST_HEAD(&scst_delayed_mgmt_cmd_list);
1712 init_waitqueue_head(&scst_mgmt_cmd_list_waitQ);
1713 init_waitqueue_head(&scst_mgmt_waitQ);
1714 spin_lock_init(&scst_mgmt_lock);
1715 INIT_LIST_HEAD(&scst_sess_init_list);
1716 INIT_LIST_HEAD(&scst_sess_shut_list);
1717 init_waitqueue_head(&scst_dev_cmd_waitQ);
1718 mutex_init(&scst_suspend_mutex);
1719 INIT_LIST_HEAD(&scst_cmd_lists_list);
1720 scst_virt_dev_last_id = 1;
1721 spin_lock_init(&scst_temp_UA_lock);
1723 spin_lock_init(&scst_main_cmd_lists.cmd_list_lock);
1724 INIT_LIST_HEAD(&scst_main_cmd_lists.active_cmd_list);
1725 init_waitqueue_head(&scst_main_cmd_lists.cmd_list_waitQ);
1726 list_add_tail(&scst_main_cmd_lists.lists_list_entry,
1727 &scst_cmd_lists_list);
1729 scst_num_cpus = num_online_cpus();
1731 /* ToDo: register_cpu_notifier() */
1733 if (scst_threads == 0)
1734 scst_threads = scst_num_cpus;
1736 if (scst_threads < 1) {
1737 PRINT_ERROR("%s", "scst_threads can not be less than 1");
1738 scst_threads = scst_num_cpus;
1741 scst_threads_info_init();
1743 #define INIT_CACHEP(p, s, o) do { \
1744 p = KMEM_CACHE(s, SCST_SLAB_FLAGS); \
1745 TRACE_MEM("Slab create: %s at %p size %zd", #s, p, \
1746 sizeof(struct s)); \
1753 INIT_CACHEP(scst_mgmt_cachep, scst_mgmt_cmd, out);
1754 INIT_CACHEP(scst_mgmt_stub_cachep, scst_mgmt_cmd_stub,
1755 out_destroy_mgmt_cache);
1756 INIT_CACHEP(scst_ua_cachep, scst_tgt_dev_UA,
1757 out_destroy_mgmt_stub_cache);
1759 struct scst_sense { uint8_t s[SCST_SENSE_BUFFERSIZE]; };
1760 INIT_CACHEP(scst_sense_cachep, scst_sense,
1761 out_destroy_ua_cache);
1763 INIT_CACHEP(scst_cmd_cachep, scst_cmd, out_destroy_sense_cache);
1764 INIT_CACHEP(scst_sess_cachep, scst_session, out_destroy_cmd_cache);
1765 INIT_CACHEP(scst_tgtd_cachep, scst_tgt_dev, out_destroy_sess_cache);
1766 INIT_CACHEP(scst_acgd_cachep, scst_acg_dev, out_destroy_tgt_cache);
1768 scst_mgmt_mempool = mempool_create(64, mempool_alloc_slab,
1769 mempool_free_slab, scst_mgmt_cachep);
1770 if (scst_mgmt_mempool == NULL) {
1772 goto out_destroy_acg_cache;
1775 scst_mgmt_stub_mempool = mempool_create(1024, mempool_alloc_slab,
1776 mempool_free_slab, scst_mgmt_stub_cachep);
1777 if (scst_mgmt_stub_mempool == NULL) {
1779 goto out_destroy_mgmt_mempool;
1782 scst_ua_mempool = mempool_create(64, mempool_alloc_slab,
1783 mempool_free_slab, scst_ua_cachep);
1784 if (scst_ua_mempool == NULL) {
1786 goto out_destroy_mgmt_stub_mempool;
1790 * Loosing sense may have fatal consequences, so let's have a big pool
1792 scst_sense_mempool = mempool_create(128, mempool_alloc_slab,
1793 mempool_free_slab, scst_sense_cachep);
1794 if (scst_sense_mempool == NULL) {
1796 goto out_destroy_ua_mempool;
1799 if (scst_max_cmd_mem == 0) {
1802 #if BITS_PER_LONG == 32
1803 scst_max_cmd_mem = min(
1804 (((uint64_t)si.totalram << PAGE_SHIFT) >> 20) >> 2,
1807 scst_max_cmd_mem = ((si.totalram << PAGE_SHIFT) >> 20) >> 2;
1811 if (scst_max_dev_cmd_mem != 0) {
1812 if (scst_max_dev_cmd_mem > scst_max_cmd_mem) {
1813 PRINT_ERROR("scst_max_dev_cmd_mem (%d) > "
1814 "scst_max_cmd_mem (%d)",
1815 scst_max_dev_cmd_mem,
1817 scst_max_dev_cmd_mem = scst_max_cmd_mem;
1820 scst_max_dev_cmd_mem = scst_max_cmd_mem * 2 / 5;
1822 res = scst_sgv_pools_init(
1823 ((uint64_t)scst_max_cmd_mem << 10) >> (PAGE_SHIFT - 10), 0);
1825 goto out_destroy_sense_mempool;
1827 scst_default_acg = scst_alloc_add_acg(SCST_DEFAULT_ACG_NAME);
1828 if (scst_default_acg == NULL) {
1830 goto out_destroy_sgv_pool;
1833 res = scsi_register_interface(&scst_interface);
1837 scst_scsi_op_list_init();
1839 for (i = 0; i < (int)ARRAY_SIZE(scst_tasklets); i++) {
1840 spin_lock_init(&scst_tasklets[i].tasklet_lock);
1841 INIT_LIST_HEAD(&scst_tasklets[i].tasklet_cmd_list);
1842 tasklet_init(&scst_tasklets[i].tasklet,
1843 (void *)scst_cmd_tasklet,
1844 (unsigned long)&scst_tasklets[i]);
1847 TRACE_DBG("%d CPUs found, starting %d threads", scst_num_cpus,
1850 res = scst_start_all_threads(scst_threads);
1852 goto out_thread_free;
1854 res = scst_proc_init_module();
1856 goto out_thread_free;
1859 PRINT_INFO("SCST version %s loaded successfully (max mem for "
1860 "commands %dMB, per device %dMB)", SCST_VERSION_STRING,
1861 scst_max_cmd_mem, scst_max_dev_cmd_mem);
1863 scst_print_config();
1866 TRACE_EXIT_RES(res);
1870 scst_stop_all_threads();
1872 scsi_unregister_interface(&scst_interface);
1875 scst_destroy_acg(scst_default_acg);
1877 out_destroy_sgv_pool:
1878 scst_sgv_pools_deinit();
1880 out_destroy_sense_mempool:
1881 mempool_destroy(scst_sense_mempool);
1883 out_destroy_ua_mempool:
1884 mempool_destroy(scst_ua_mempool);
1886 out_destroy_mgmt_stub_mempool:
1887 mempool_destroy(scst_mgmt_stub_mempool);
1889 out_destroy_mgmt_mempool:
1890 mempool_destroy(scst_mgmt_mempool);
1892 out_destroy_acg_cache:
1893 kmem_cache_destroy(scst_acgd_cachep);
1895 out_destroy_tgt_cache:
1896 kmem_cache_destroy(scst_tgtd_cachep);
1898 out_destroy_sess_cache:
1899 kmem_cache_destroy(scst_sess_cachep);
1901 out_destroy_cmd_cache:
1902 kmem_cache_destroy(scst_cmd_cachep);
1904 out_destroy_sense_cache:
1905 kmem_cache_destroy(scst_sense_cachep);
1907 out_destroy_ua_cache:
1908 kmem_cache_destroy(scst_ua_cachep);
1910 out_destroy_mgmt_stub_cache:
1911 kmem_cache_destroy(scst_mgmt_stub_cachep);
1913 out_destroy_mgmt_cache:
1914 kmem_cache_destroy(scst_mgmt_cachep);
1918 static void __exit exit_scst(void)
1922 /* ToDo: unregister_cpu_notifier() */
1924 scst_proc_cleanup_module();
1926 scst_stop_all_threads();
1928 scsi_unregister_interface(&scst_interface);
1929 scst_destroy_acg(scst_default_acg);
1931 scst_sgv_pools_deinit();
1933 #define DEINIT_CACHEP(p) do { \
1934 kmem_cache_destroy(p); \
1938 mempool_destroy(scst_mgmt_mempool);
1939 mempool_destroy(scst_mgmt_stub_mempool);
1940 mempool_destroy(scst_ua_mempool);
1941 mempool_destroy(scst_sense_mempool);
1943 DEINIT_CACHEP(scst_mgmt_cachep);
1944 DEINIT_CACHEP(scst_mgmt_stub_cachep);
1945 DEINIT_CACHEP(scst_ua_cachep);
1946 DEINIT_CACHEP(scst_sense_cachep);
1947 DEINIT_CACHEP(scst_cmd_cachep);
1948 DEINIT_CACHEP(scst_sess_cachep);
1949 DEINIT_CACHEP(scst_tgtd_cachep);
1950 DEINIT_CACHEP(scst_acgd_cachep);
1952 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
1953 #if defined(CONFIG_BLOCK) && defined(SCST_ALLOC_IO_CONTEXT_EXPORTED)
1954 put_io_context(scst_ioc);
1958 PRINT_INFO("%s", "SCST unloaded");
1965 module_init(init_scst);
1966 module_exit(exit_scst);
1968 MODULE_AUTHOR("Vladislav Bolkhovitin");
1969 MODULE_LICENSE("GPL");
1970 MODULE_DESCRIPTION("SCSI target core");
1971 MODULE_VERSION(SCST_VERSION_STRING);