4 * Copyright (C) 2004 - 2008 Vladislav Bolkhovitin <vst@vlnb.net>
5 * Copyright (C) 2004 - 2005 Leonid Stoljar
6 * Copyright (C) 2007 - 2008 CMS Distribution Limited
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation, version 2
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
19 #include <linux/module.h>
21 #include <linux/init.h>
22 #include <linux/kernel.h>
23 #include <linux/errno.h>
24 #include <linux/list.h>
25 #include <linux/spinlock.h>
26 #include <linux/slab.h>
27 #include <linux/sched.h>
28 #include <linux/unistd.h>
29 #include <linux/string.h>
30 #include <linux/kthread.h>
33 #include "scst_priv.h"
36 #if defined(CONFIG_HIGHMEM4G) || defined(CONFIG_HIGHMEM64G)
37 #warning "HIGHMEM kernel configurations are fully supported, but not \
38 recommended for performance reasons. Consider changing VMSPLIT \
39 option or use a 64-bit configuration instead. See README file for \
43 #if !defined(SCSI_EXEC_REQ_FIFO_DEFINED) && \
44 !defined(CONFIG_SCST_STRICT_SERIALIZING)
45 #warning "Patch scst_exec_req_fifo-<kernel-version>.patch was not applied on \
46 your kernel and CONFIG_SCST_STRICT_SERIALIZING isn't defined. \
47 Pass-through dev handlers will not be supported."
50 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
51 #if !defined(SCST_ALLOC_IO_CONTEXT_EXPORTED)
52 #warning "Patch export_alloc_io_context-<kernel-version>.patch was not applied \
53 on your kernel. SCST will be working with not the best performance."
58 ** SCST global variables. They are all uninitialized to have their layout in
59 ** memory be exactly as specified. Otherwise compiler puts zero-initialized
60 ** variable separately from nonzero-initialized ones.
64 * All targets, devices and dev_types management is done under this mutex.
66 * It must NOT be used in any works (schedule_work(), etc.), because
67 * otherwise a deadlock (double lock, actually) is possible, e.g., with
68 * scst_user detach_tgt(), which is called under scst_mutex and calls
69 * flush_scheduled_work().
71 struct mutex scst_mutex;
73 /* All 3 protected by scst_mutex */
74 static struct list_head scst_template_list;
75 struct list_head scst_dev_list;
76 struct list_head scst_dev_type_list;
78 spinlock_t scst_main_lock;
80 static struct kmem_cache *scst_mgmt_cachep;
81 mempool_t *scst_mgmt_mempool;
82 static struct kmem_cache *scst_mgmt_stub_cachep;
83 mempool_t *scst_mgmt_stub_mempool;
84 static struct kmem_cache *scst_ua_cachep;
85 mempool_t *scst_ua_mempool;
86 static struct kmem_cache *scst_sense_cachep;
87 mempool_t *scst_sense_mempool;
88 struct kmem_cache *scst_tgtd_cachep;
89 struct kmem_cache *scst_sess_cachep;
90 struct kmem_cache *scst_acgd_cachep;
92 struct list_head scst_acg_list;
93 struct scst_acg *scst_default_acg;
95 spinlock_t scst_init_lock;
96 wait_queue_head_t scst_init_cmd_list_waitQ;
97 struct list_head scst_init_cmd_list;
98 unsigned int scst_init_poll_cnt;
100 struct kmem_cache *scst_cmd_cachep;
102 #if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
103 unsigned long scst_trace_flag;
106 unsigned long scst_flags;
107 atomic_t scst_cmd_count;
109 struct scst_cmd_lists scst_main_cmd_lists;
111 struct scst_tasklet scst_tasklets[NR_CPUS];
113 spinlock_t scst_mcmd_lock;
114 struct list_head scst_active_mgmt_cmd_list;
115 struct list_head scst_delayed_mgmt_cmd_list;
116 wait_queue_head_t scst_mgmt_cmd_list_waitQ;
118 wait_queue_head_t scst_mgmt_waitQ;
119 spinlock_t scst_mgmt_lock;
120 struct list_head scst_sess_init_list;
121 struct list_head scst_sess_shut_list;
123 wait_queue_head_t scst_dev_cmd_waitQ;
125 static struct mutex scst_suspend_mutex;
126 /* protected by scst_suspend_mutex */
127 static struct list_head scst_cmd_lists_list;
129 static int scst_threads;
130 struct scst_threads_info_t scst_threads_info;
132 static int suspend_count;
134 static int scst_virt_dev_last_id; /* protected by scst_mutex */
137 * This buffer and lock are intended to avoid memory allocation, which
138 * could fail in improper places.
140 spinlock_t scst_temp_UA_lock;
141 uint8_t scst_temp_UA[SCST_SENSE_BUFFERSIZE];
143 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
144 #if defined(CONFIG_BLOCK) && defined(SCST_ALLOC_IO_CONTEXT_EXPORTED)
145 static struct io_context *scst_ioc;
149 static unsigned int scst_max_cmd_mem;
150 unsigned int scst_max_dev_cmd_mem;
152 module_param_named(scst_threads, scst_threads, int, 0);
153 MODULE_PARM_DESC(scst_threads, "SCSI target threads count");
155 module_param_named(scst_max_cmd_mem, scst_max_cmd_mem, int, 0);
156 MODULE_PARM_DESC(scst_max_cmd_mem, "Maximum memory allowed to be consumed by "
157 "all SCSI commands of all devices at any given time in MB");
159 module_param_named(scst_max_dev_cmd_mem, scst_max_dev_cmd_mem, int, 0);
160 MODULE_PARM_DESC(scst_max_dev_cmd_mem, "Maximum memory allowed to be consumed "
161 "by all SCSI commands of a device at any given time in MB");
163 struct scst_dev_type scst_null_devtype = {
167 static void __scst_resume_activity(void);
169 int __scst_register_target_template(struct scst_tgt_template *vtt,
173 struct scst_tgt_template *t;
174 static DEFINE_MUTEX(m);
178 INIT_LIST_HEAD(&vtt->tgt_list);
180 if (strcmp(version, SCST_INTERFACE_VERSION) != 0) {
181 PRINT_ERROR("Incorrect version of target %s", vtt->name);
187 PRINT_ERROR("Target driver %s doesn't have a "
188 "detect() method.", vtt->name);
194 PRINT_ERROR("Target driver %s doesn't have a "
195 "release() method.", vtt->name);
200 if (!vtt->xmit_response) {
201 PRINT_ERROR("Target driver %s doesn't have a "
202 "xmit_response() method.", vtt->name);
207 if (vtt->threads_num < 0) {
208 PRINT_ERROR("Wrong threads_num value %d for "
209 "target \"%s\"", vtt->threads_num,
215 if (!vtt->no_proc_entry) {
216 res = scst_build_proc_target_dir_entries(vtt);
221 if (vtt->rdy_to_xfer == NULL)
222 vtt->rdy_to_xfer_atomic = 1;
224 if (mutex_lock_interruptible(&m) != 0)
227 if (mutex_lock_interruptible(&scst_mutex) != 0)
229 list_for_each_entry(t, &scst_template_list, scst_template_list_entry) {
230 if (strcmp(t->name, vtt->name) == 0) {
231 PRINT_ERROR("Target driver %s already registered",
233 mutex_unlock(&scst_mutex);
237 mutex_unlock(&scst_mutex);
239 TRACE_DBG("%s", "Calling target driver's detect()");
240 res = vtt->detect(vtt);
241 TRACE_DBG("Target driver's detect() returned %d", res);
243 PRINT_ERROR("%s", "The detect() routine failed");
248 mutex_lock(&scst_mutex);
249 list_add_tail(&vtt->scst_template_list_entry, &scst_template_list);
250 mutex_unlock(&scst_mutex);
254 PRINT_INFO("Target template %s registered successfully", vtt->name);
263 scst_cleanup_proc_target_dir_entries(vtt);
269 PRINT_ERROR("Failed to register target template %s", vtt->name);
272 EXPORT_SYMBOL(__scst_register_target_template);
274 void scst_unregister_target_template(struct scst_tgt_template *vtt)
276 struct scst_tgt *tgt;
277 struct scst_tgt_template *t;
282 mutex_lock(&scst_mutex);
284 list_for_each_entry(t, &scst_template_list, scst_template_list_entry) {
285 if (strcmp(t->name, vtt->name) == 0) {
291 PRINT_ERROR("Target driver %s isn't registered", vtt->name);
296 list_for_each_entry(tgt, &vtt->tgt_list, tgt_list_entry) {
297 mutex_unlock(&scst_mutex);
298 scst_unregister(tgt);
299 mutex_lock(&scst_mutex);
302 list_del(&vtt->scst_template_list_entry);
304 PRINT_INFO("Target template %s unregistered successfully", vtt->name);
307 mutex_unlock(&scst_mutex);
309 scst_cleanup_proc_target_dir_entries(vtt);
314 EXPORT_SYMBOL(scst_unregister_target_template);
316 struct scst_tgt *scst_register(struct scst_tgt_template *vtt,
317 const char *target_name)
319 struct scst_tgt *tgt;
324 tgt = kzalloc(sizeof(*tgt), GFP_KERNEL);
326 TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of tgt failed");
331 INIT_LIST_HEAD(&tgt->sess_list);
332 init_waitqueue_head(&tgt->unreg_waitQ);
334 tgt->sg_tablesize = vtt->sg_tablesize;
335 spin_lock_init(&tgt->tgt_lock);
336 INIT_LIST_HEAD(&tgt->retry_cmd_list);
337 atomic_set(&tgt->finished_cmds, 0);
338 init_timer(&tgt->retry_timer);
339 tgt->retry_timer.data = (unsigned long)tgt;
340 tgt->retry_timer.function = scst_tgt_retry_timer_fn;
342 rc = scst_suspend_activity(true);
344 goto out_free_tgt_err;
346 if (mutex_lock_interruptible(&scst_mutex) != 0) {
348 goto out_resume_free;
351 if (target_name != NULL) {
352 int len = strlen(target_name) + 1 +
353 strlen(SCST_DEFAULT_ACG_NAME) + 1;
355 tgt->default_group_name = kmalloc(len, GFP_KERNEL);
356 if (tgt->default_group_name == NULL) {
357 TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of default "
358 "group name failed");
360 goto out_unlock_resume;
362 sprintf(tgt->default_group_name, "%s_%s", SCST_DEFAULT_ACG_NAME,
366 rc = scst_build_proc_target_entries(tgt);
370 list_add_tail(&tgt->tgt_list_entry, &vtt->tgt_list);
372 mutex_unlock(&scst_mutex);
373 scst_resume_activity();
375 PRINT_INFO("Target %s (%p) for template %s registered successfully",
376 target_name, tgt, vtt->name);
383 kfree(tgt->default_group_name);
386 mutex_unlock(&scst_mutex);
389 scst_resume_activity();
396 PRINT_ERROR("Failed to register target %s for template %s (error %d)",
397 target_name, vtt->name, rc);
400 EXPORT_SYMBOL(scst_register);
402 static inline int test_sess_list(struct scst_tgt *tgt)
405 mutex_lock(&scst_mutex);
406 res = list_empty(&tgt->sess_list);
407 mutex_unlock(&scst_mutex);
411 void scst_unregister(struct scst_tgt *tgt)
413 struct scst_session *sess;
414 struct scst_tgt_template *vtt = tgt->tgtt;
418 TRACE_DBG("%s", "Calling target driver's release()");
419 tgt->tgtt->release(tgt);
420 TRACE_DBG("%s", "Target driver's release() returned");
422 mutex_lock(&scst_mutex);
423 list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) {
424 sBUG_ON(sess->shut_phase == SCST_SESS_SPH_READY);
426 mutex_unlock(&scst_mutex);
428 TRACE_DBG("%s", "Waiting for sessions shutdown");
429 wait_event(tgt->unreg_waitQ, test_sess_list(tgt));
430 TRACE_DBG("%s", "wait_event() returned");
432 scst_suspend_activity(false);
433 mutex_lock(&scst_mutex);
435 list_del(&tgt->tgt_list_entry);
437 scst_cleanup_proc_target_entries(tgt);
439 kfree(tgt->default_group_name);
441 mutex_unlock(&scst_mutex);
442 scst_resume_activity();
444 del_timer_sync(&tgt->retry_timer);
446 PRINT_INFO("Target %p for template %s unregistered successfully",
454 EXPORT_SYMBOL(scst_unregister);
456 static int scst_susp_wait(bool interruptible)
463 res = wait_event_interruptible_timeout(scst_dev_cmd_waitQ,
464 (atomic_read(&scst_cmd_count) == 0),
465 SCST_SUSPENDING_TIMEOUT);
467 __scst_resume_activity();
473 wait_event(scst_dev_cmd_waitQ,
474 atomic_read(&scst_cmd_count) == 0);
476 TRACE_MGMT_DBG("wait_event() returned %d", res);
482 int scst_suspend_activity(bool interruptible)
490 if (mutex_lock_interruptible(&scst_suspend_mutex) != 0) {
495 mutex_lock(&scst_suspend_mutex);
497 TRACE_MGMT_DBG("suspend_count %d", suspend_count);
499 if (suspend_count > 1)
502 set_bit(SCST_FLAG_SUSPENDING, &scst_flags);
503 set_bit(SCST_FLAG_SUSPENDED, &scst_flags);
504 smp_mb__after_set_bit();
507 * See comment in scst_user.c::dev_user_task_mgmt_fn() for more
508 * information about scst_user behavior.
510 * ToDo: make the global suspending unneeded (Switch to per-device
511 * reference counting? That would mean to switch off from lockless
512 * implementation of scst_translate_lun().. )
515 if (atomic_read(&scst_cmd_count) != 0) {
516 PRINT_INFO("Waiting for %d active commands to complete... This "
517 "might take few minutes for disks or few hours for "
518 "tapes, if you use long executed commands, like "
519 "REWIND or FORMAT. In case, if you have a hung user "
520 "space device (i.e. made using scst_user module) not "
521 "responding to any commands, if might take virtually "
522 "forever until the corresponding user space "
523 "program recovers and starts responding or gets "
524 "killed.", atomic_read(&scst_cmd_count));
528 res = scst_susp_wait(interruptible);
532 clear_bit(SCST_FLAG_SUSPENDING, &scst_flags);
533 smp_mb__after_clear_bit();
535 TRACE_MGMT_DBG("Waiting for %d active commands finally to complete",
536 atomic_read(&scst_cmd_count));
538 res = scst_susp_wait(interruptible);
543 PRINT_INFO("%s", "All active commands completed");
546 mutex_unlock(&scst_suspend_mutex);
553 clear_bit(SCST_FLAG_SUSPENDING, &scst_flags);
554 smp_mb__after_clear_bit();
557 EXPORT_SYMBOL(scst_suspend_activity);
559 static void __scst_resume_activity(void)
561 struct scst_cmd_lists *l;
566 TRACE_MGMT_DBG("suspend_count %d left", suspend_count);
567 if (suspend_count > 0)
570 clear_bit(SCST_FLAG_SUSPENDED, &scst_flags);
571 smp_mb__after_clear_bit();
573 list_for_each_entry(l, &scst_cmd_lists_list, lists_list_entry) {
574 wake_up_all(&l->cmd_list_waitQ);
576 wake_up_all(&scst_init_cmd_list_waitQ);
578 spin_lock_irq(&scst_mcmd_lock);
579 if (!list_empty(&scst_delayed_mgmt_cmd_list)) {
580 struct scst_mgmt_cmd *m;
581 m = list_entry(scst_delayed_mgmt_cmd_list.next, typeof(*m),
582 mgmt_cmd_list_entry);
583 TRACE_MGMT_DBG("Moving delayed mgmt cmd %p to head of active "
585 list_move(&m->mgmt_cmd_list_entry, &scst_active_mgmt_cmd_list);
587 spin_unlock_irq(&scst_mcmd_lock);
588 wake_up_all(&scst_mgmt_cmd_list_waitQ);
595 void scst_resume_activity(void)
599 mutex_lock(&scst_suspend_mutex);
600 __scst_resume_activity();
601 mutex_unlock(&scst_suspend_mutex);
606 EXPORT_SYMBOL(scst_resume_activity);
608 static int scst_register_device(struct scsi_device *scsidp)
611 struct scst_device *dev;
612 struct scst_dev_type *dt;
616 res = scst_suspend_activity(true);
620 if (mutex_lock_interruptible(&scst_mutex) != 0) {
625 res = scst_alloc_device(GFP_KERNEL, &dev);
629 dev->type = scsidp->type;
631 dev->rq_disk = alloc_disk(1);
632 if (dev->rq_disk == NULL) {
636 dev->rq_disk->major = SCST_MAJOR;
638 dev->scsi_dev = scsidp;
640 list_add_tail(&dev->dev_list_entry, &scst_dev_list);
642 list_for_each_entry(dt, &scst_dev_type_list, dev_type_list_entry) {
643 if (dt->type == scsidp->type) {
644 res = scst_assign_dev_handler(dev, dt);
652 mutex_unlock(&scst_mutex);
655 scst_resume_activity();
659 PRINT_INFO("Attached SCSI target mid-level at "
660 "scsi%d, channel %d, id %d, lun %d, type %d",
661 scsidp->host->host_no, scsidp->channel, scsidp->id,
662 scsidp->lun, scsidp->type);
664 PRINT_ERROR("Failed to attach SCSI target mid-level "
665 "at scsi%d, channel %d, id %d, lun %d, type %d",
666 scsidp->host->host_no, scsidp->channel, scsidp->id,
667 scsidp->lun, scsidp->type);
674 list_del(&dev->dev_list_entry);
675 put_disk(dev->rq_disk);
678 scst_free_device(dev);
682 static void scst_unregister_device(struct scsi_device *scsidp)
684 struct scst_device *d, *dev = NULL;
685 struct scst_acg_dev *acg_dev, *aa;
689 scst_suspend_activity(false);
690 mutex_lock(&scst_mutex);
692 list_for_each_entry(d, &scst_dev_list, dev_list_entry) {
693 if (d->scsi_dev == scsidp) {
695 TRACE_DBG("Target device %p found", dev);
700 PRINT_ERROR("%s", "Target device not found");
704 list_del(&dev->dev_list_entry);
706 list_for_each_entry_safe(acg_dev, aa, &dev->dev_acg_dev_list,
707 dev_acg_dev_list_entry)
709 scst_acg_remove_dev(acg_dev->acg, dev);
712 scst_assign_dev_handler(dev, &scst_null_devtype);
714 put_disk(dev->rq_disk);
715 scst_free_device(dev);
717 PRINT_INFO("Detached SCSI target mid-level from scsi%d, channel %d, "
718 "id %d, lun %d, type %d", scsidp->host->host_no,
719 scsidp->channel, scsidp->id, scsidp->lun, scsidp->type);
722 mutex_unlock(&scst_mutex);
723 scst_resume_activity();
729 static int scst_dev_handler_check(struct scst_dev_type *dev_handler)
733 if (dev_handler->parse == NULL) {
734 PRINT_ERROR("scst dev_type driver %s doesn't have a "
735 "parse() method.", dev_handler->name);
740 if (dev_handler->exec == NULL) {
741 #ifdef CONFIG_SCST_ALLOW_PASSTHROUGH_IO_SUBMIT_IN_SIRQ
742 dev_handler->exec_atomic = 1;
744 dev_handler->exec_atomic = 0;
748 if (dev_handler->dev_done == NULL)
749 dev_handler->dev_done_atomic = 1;
756 int scst_register_virtual_device(struct scst_dev_type *dev_handler,
757 const char *dev_name)
760 struct scst_device *dev = NULL;
764 if (dev_handler == NULL) {
765 PRINT_ERROR("%s: valid device handler must be supplied",
771 if (dev_name == NULL) {
772 PRINT_ERROR("%s: device name must be non-NULL", __func__);
777 res = scst_dev_handler_check(dev_handler);
781 res = scst_suspend_activity(true);
785 if (mutex_lock_interruptible(&scst_mutex) != 0) {
790 res = scst_alloc_device(GFP_KERNEL, &dev);
794 dev->type = dev_handler->type;
795 dev->scsi_dev = NULL;
796 dev->virt_name = dev_name;
797 dev->virt_id = scst_virt_dev_last_id++;
799 list_add_tail(&dev->dev_list_entry, &scst_dev_list);
803 rc = scst_assign_dev_handler(dev, dev_handler);
810 mutex_unlock(&scst_mutex);
813 scst_resume_activity();
817 PRINT_INFO("Attached SCSI target mid-level to virtual "
818 "device %s (id %d)", dev_name, dev->virt_id);
820 PRINT_INFO("Failed to attach SCSI target mid-level to "
821 "virtual device %s", dev_name);
828 list_del(&dev->dev_list_entry);
829 scst_free_device(dev);
832 EXPORT_SYMBOL(scst_register_virtual_device);
834 void scst_unregister_virtual_device(int id)
836 struct scst_device *d, *dev = NULL;
837 struct scst_acg_dev *acg_dev, *aa;
841 scst_suspend_activity(false);
842 mutex_lock(&scst_mutex);
844 list_for_each_entry(d, &scst_dev_list, dev_list_entry) {
845 if (d->virt_id == id) {
847 TRACE_DBG("Target device %p (id %d) found", dev, id);
852 PRINT_ERROR("Target virtual device (id %d) not found", id);
856 list_del(&dev->dev_list_entry);
858 list_for_each_entry_safe(acg_dev, aa, &dev->dev_acg_dev_list,
859 dev_acg_dev_list_entry)
861 scst_acg_remove_dev(acg_dev->acg, dev);
864 scst_assign_dev_handler(dev, &scst_null_devtype);
866 PRINT_INFO("Detached SCSI target mid-level from virtual device %s "
867 "(id %d)", dev->virt_name, dev->virt_id);
869 scst_free_device(dev);
872 mutex_unlock(&scst_mutex);
873 scst_resume_activity();
878 EXPORT_SYMBOL(scst_unregister_virtual_device);
880 int __scst_register_dev_driver(struct scst_dev_type *dev_type,
883 struct scst_dev_type *dt;
884 struct scst_device *dev;
890 if (strcmp(version, SCST_INTERFACE_VERSION) != 0) {
891 PRINT_ERROR("Incorrect version of dev handler %s",
897 res = scst_dev_handler_check(dev_type);
901 #if !defined(SCSI_EXEC_REQ_FIFO_DEFINED) && \
902 !defined(CONFIG_SCST_STRICT_SERIALIZING)
903 if (dev_type->exec == NULL) {
904 PRINT_ERROR("Pass-through dev handlers (handler \"%s\") not "
905 "supported. Consider applying on your kernel patch "
906 "scst_exec_req_fifo-<kernel-version>.patch or define "
907 "CONFIG_SCST_STRICT_SERIALIZING", dev_type->name);
913 res = scst_suspend_activity(true);
917 if (mutex_lock_interruptible(&scst_mutex) != 0) {
923 list_for_each_entry(dt, &scst_dev_type_list, dev_type_list_entry) {
924 if (strcmp(dt->name, dev_type->name) == 0) {
925 PRINT_ERROR("Device type handler \"%s\" already "
934 res = scst_build_proc_dev_handler_dir_entries(dev_type);
938 list_add_tail(&dev_type->dev_type_list_entry, &scst_dev_type_list);
940 list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
941 if (dev->scsi_dev == NULL || dev->handler != &scst_null_devtype)
943 if (dev->scsi_dev->type == dev_type->type)
944 scst_assign_dev_handler(dev, dev_type);
947 mutex_unlock(&scst_mutex);
948 scst_resume_activity();
951 PRINT_INFO("Device handler \"%s\" for type %d registered "
952 "successfully", dev_type->name, dev_type->type);
960 mutex_unlock(&scst_mutex);
963 scst_resume_activity();
966 PRINT_ERROR("Failed to register device handler \"%s\" for type %d",
967 dev_type->name, dev_type->type);
970 EXPORT_SYMBOL(__scst_register_dev_driver);
972 void scst_unregister_dev_driver(struct scst_dev_type *dev_type)
974 struct scst_device *dev;
975 struct scst_dev_type *dt;
980 scst_suspend_activity(false);
981 mutex_lock(&scst_mutex);
983 list_for_each_entry(dt, &scst_dev_type_list, dev_type_list_entry) {
984 if (strcmp(dt->name, dev_type->name) == 0) {
990 PRINT_ERROR("Dev handler \"%s\" isn't registered",
995 list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
996 if (dev->handler == dev_type) {
997 scst_assign_dev_handler(dev, &scst_null_devtype);
998 TRACE_DBG("Dev handler removed from device %p", dev);
1002 list_del(&dev_type->dev_type_list_entry);
1004 mutex_unlock(&scst_mutex);
1005 scst_resume_activity();
1007 scst_cleanup_proc_dev_handler_dir_entries(dev_type);
1009 PRINT_INFO("Device handler \"%s\" for type %d unloaded",
1010 dev_type->name, dev_type->type);
1017 mutex_unlock(&scst_mutex);
1018 scst_resume_activity();
1021 EXPORT_SYMBOL(scst_unregister_dev_driver);
1023 int __scst_register_virtual_dev_driver(struct scst_dev_type *dev_type,
1024 const char *version)
1030 if (strcmp(version, SCST_INTERFACE_VERSION) != 0) {
1031 PRINT_ERROR("Incorrect version of virtual dev handler %s",
1037 res = scst_dev_handler_check(dev_type);
1041 if (!dev_type->no_proc) {
1042 res = scst_build_proc_dev_handler_dir_entries(dev_type);
1047 if (dev_type->type != -1) {
1048 PRINT_INFO("Virtual device handler %s for type %d "
1049 "registered successfully", dev_type->name,
1052 PRINT_INFO("Virtual device handler \"%s\" registered "
1053 "successfully", dev_type->name);
1057 TRACE_EXIT_RES(res);
1061 PRINT_ERROR("Failed to register virtual device handler \"%s\"",
1065 EXPORT_SYMBOL(__scst_register_virtual_dev_driver);
1067 void scst_unregister_virtual_dev_driver(struct scst_dev_type *dev_type)
1071 if (!dev_type->no_proc)
1072 scst_cleanup_proc_dev_handler_dir_entries(dev_type);
1074 PRINT_INFO("Device handler \"%s\" unloaded", dev_type->name);
1079 EXPORT_SYMBOL(scst_unregister_virtual_dev_driver);
1081 /* Called under scst_mutex */
1082 int scst_add_dev_threads(struct scst_device *dev, int num)
1086 struct scst_cmd_thread_t *thr;
1087 struct io_context *ioc = NULL;
1092 list_for_each_entry(thr, &dev->threads_list, thread_list_entry) {
1096 for (i = 0; i < num; i++) {
1097 thr = kmalloc(sizeof(*thr), GFP_KERNEL);
1100 PRINT_ERROR("Failed to allocate thr %d", res);
1103 strncpy(nm, dev->handler->name, ARRAY_SIZE(nm)-1);
1104 nm[ARRAY_SIZE(nm)-1] = '\0';
1105 thr->cmd_thread = kthread_create(scst_cmd_thread,
1106 &dev->cmd_lists, "%sd%d_%d", nm, dev->dev_num, n++);
1107 if (IS_ERR(thr->cmd_thread)) {
1108 res = PTR_ERR(thr->cmd_thread);
1109 PRINT_ERROR("kthread_create() failed: %d", res);
1114 list_add(&thr->thread_list_entry, &dev->threads_list);
1116 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
1117 #if defined(CONFIG_BLOCK) && defined(SCST_ALLOC_IO_CONTEXT_EXPORTED)
1119 * It would be better to keep io_context in tgt_dev and
1120 * dynamically assign it to the current thread on the IO
1121 * submission time to let each initiator have own
1122 * io_context. But, unfortunately, CFQ doesn't
1123 * support if a task has dynamically switched
1124 * io_context, it oopses on BUG_ON(!cic->dead_key) in
1125 * cic_free_func(). So, we have to have the same io_context
1126 * for all initiators.
1129 ioc = alloc_io_context(GFP_KERNEL, -1);
1130 TRACE_DBG("ioc %p (thr %d)", ioc, thr->cmd_thread->pid);
1133 put_io_context(thr->cmd_thread->io_context);
1134 thr->cmd_thread->io_context = ioc_task_link(ioc);
1135 TRACE_DBG("Setting ioc %p on thr %d", ioc,
1136 thr->cmd_thread->pid);
1139 wake_up_process(thr->cmd_thread);
1143 put_io_context(ioc);
1145 TRACE_EXIT_RES(res);
1149 /* Called under scst_mutex and suspended activity */
1150 static int scst_create_dev_threads(struct scst_device *dev)
1157 if (dev->handler->threads_num <= 0)
1160 threads_num = dev->handler->threads_num;
1162 spin_lock_init(&dev->cmd_lists.cmd_list_lock);
1163 INIT_LIST_HEAD(&dev->cmd_lists.active_cmd_list);
1164 init_waitqueue_head(&dev->cmd_lists.cmd_list_waitQ);
1166 res = scst_add_dev_threads(dev, threads_num);
1170 mutex_lock(&scst_suspend_mutex);
1171 list_add_tail(&dev->cmd_lists.lists_list_entry,
1172 &scst_cmd_lists_list);
1173 mutex_unlock(&scst_suspend_mutex);
1175 dev->p_cmd_lists = &dev->cmd_lists;
1178 TRACE_EXIT_RES(res);
1182 /* Called under scst_mutex */
1183 void scst_del_dev_threads(struct scst_device *dev, int num)
1185 struct scst_cmd_thread_t *ct, *tmp;
1190 list_for_each_entry_safe(ct, tmp, &dev->threads_list,
1191 thread_list_entry) {
1192 int rc = kthread_stop(ct->cmd_thread);
1194 TRACE_MGMT_DBG("kthread_stop() failed: %d", rc);
1195 list_del(&ct->thread_list_entry);
1197 if ((num > 0) && (++i >= num))
1205 /* Called under scst_mutex and suspended activity */
1206 static void scst_stop_dev_threads(struct scst_device *dev)
1210 if (list_empty(&dev->threads_list))
1213 scst_del_dev_threads(dev, -1);
1215 if (dev->p_cmd_lists == &dev->cmd_lists) {
1216 mutex_lock(&scst_suspend_mutex);
1217 list_del(&dev->cmd_lists.lists_list_entry);
1218 mutex_unlock(&scst_suspend_mutex);
1226 /* The activity supposed to be suspended and scst_mutex held */
1227 int scst_assign_dev_handler(struct scst_device *dev,
1228 struct scst_dev_type *handler)
1231 struct scst_tgt_dev *tgt_dev;
1232 LIST_HEAD(attached_tgt_devs);
1236 sBUG_ON(handler == NULL);
1238 if (dev->handler == handler)
1241 if (dev->handler && dev->handler->detach_tgt) {
1242 list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
1243 dev_tgt_dev_list_entry) {
1244 TRACE_DBG("Calling dev handler's detach_tgt(%p)",
1246 dev->handler->detach_tgt(tgt_dev);
1247 TRACE_DBG("%s", "Dev handler's detach_tgt() returned");
1251 if (dev->handler && dev->handler->detach) {
1252 TRACE_DBG("%s", "Calling dev handler's detach()");
1253 dev->handler->detach(dev);
1254 TRACE_DBG("%s", "Old handler's detach() returned");
1257 scst_stop_dev_threads(dev);
1259 dev->handler = handler;
1262 res = scst_create_dev_threads(dev);
1267 if (handler && handler->attach) {
1268 TRACE_DBG("Calling new dev handler's attach(%p)", dev);
1269 res = handler->attach(dev);
1270 TRACE_DBG("New dev handler's attach() returned %d", res);
1272 PRINT_ERROR("New device handler's %s attach() "
1273 "failed: %d", handler->name, res);
1278 if (handler && handler->attach_tgt) {
1279 list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
1280 dev_tgt_dev_list_entry) {
1281 TRACE_DBG("Calling dev handler's attach_tgt(%p)",
1283 res = handler->attach_tgt(tgt_dev);
1284 TRACE_DBG("%s", "Dev handler's attach_tgt() returned");
1286 PRINT_ERROR("Device handler's %s attach_tgt() "
1287 "failed: %d", handler->name, res);
1288 goto out_err_detach_tgt;
1290 list_add_tail(&tgt_dev->extra_tgt_dev_list_entry,
1291 &attached_tgt_devs);
1297 scst_stop_dev_threads(dev);
1301 dev->handler = &scst_null_devtype;
1304 TRACE_EXIT_RES(res);
1308 if (handler && handler->detach_tgt) {
1309 list_for_each_entry(tgt_dev, &attached_tgt_devs,
1310 extra_tgt_dev_list_entry)
1312 TRACE_DBG("Calling handler's detach_tgt(%p)",
1314 handler->detach_tgt(tgt_dev);
1315 TRACE_DBG("%s", "Handler's detach_tgt() returned");
1318 if (handler && handler->detach) {
1319 TRACE_DBG("%s", "Calling handler's detach()");
1320 handler->detach(dev);
1321 TRACE_DBG("%s", "Handler's detach() returned");
1326 int scst_cmd_threads_count(void)
1331 * Just to lower the race window, when user can get just changed value
1333 mutex_lock(&scst_threads_info.cmd_threads_mutex);
1334 i = scst_threads_info.nr_cmd_threads;
1335 mutex_unlock(&scst_threads_info.cmd_threads_mutex);
1339 static void scst_threads_info_init(void)
1341 memset(&scst_threads_info, 0, sizeof(scst_threads_info));
1342 mutex_init(&scst_threads_info.cmd_threads_mutex);
1343 INIT_LIST_HEAD(&scst_threads_info.cmd_threads_list);
1346 /* scst_threads_info.cmd_threads_mutex supposed to be held */
1347 void __scst_del_cmd_threads(int num)
1349 struct scst_cmd_thread_t *ct, *tmp;
1354 i = scst_threads_info.nr_cmd_threads;
1355 if (num <= 0 || num > i) {
1356 PRINT_ERROR("can not del %d cmd threads from %d", num, i);
1360 list_for_each_entry_safe(ct, tmp, &scst_threads_info.cmd_threads_list,
1361 thread_list_entry) {
1364 res = kthread_stop(ct->cmd_thread);
1366 TRACE_MGMT_DBG("kthread_stop() failed: %d", res);
1367 list_del(&ct->thread_list_entry);
1369 scst_threads_info.nr_cmd_threads--;
1379 /* scst_threads_info.cmd_threads_mutex supposed to be held */
1380 int __scst_add_cmd_threads(int num)
1383 static int scst_thread_num;
1387 for (i = 0; i < num; i++) {
1388 struct scst_cmd_thread_t *thr;
1390 thr = kmalloc(sizeof(*thr), GFP_KERNEL);
1393 PRINT_ERROR("fail to allocate thr %d", res);
1396 thr->cmd_thread = kthread_create(scst_cmd_thread,
1397 &scst_main_cmd_lists, "scsi_tgt%d",
1399 if (IS_ERR(thr->cmd_thread)) {
1400 res = PTR_ERR(thr->cmd_thread);
1401 PRINT_ERROR("kthread_create() failed: %d", res);
1406 list_add(&thr->thread_list_entry,
1407 &scst_threads_info.cmd_threads_list);
1408 scst_threads_info.nr_cmd_threads++;
1410 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
1411 #if defined(CONFIG_BLOCK) && defined(SCST_ALLOC_IO_CONTEXT_EXPORTED)
1412 /* See comment in scst_add_dev_threads() */
1413 if (scst_ioc == NULL) {
1414 scst_ioc = alloc_io_context(GFP_KERNEL, -1);
1415 TRACE_DBG("scst_ioc %p (thr %d)", scst_ioc,
1416 thr->cmd_thread->pid);
1419 put_io_context(thr->cmd_thread->io_context);
1420 thr->cmd_thread->io_context = ioc_task_link(scst_ioc);
1421 TRACE_DBG("Setting scst_ioc %p on thr %d",
1422 scst_ioc, thr->cmd_thread->pid);
1425 wake_up_process(thr->cmd_thread);
1430 TRACE_EXIT_RES(res);
1435 __scst_del_cmd_threads(i - 1);
1439 int scst_add_cmd_threads(int num)
1445 mutex_lock(&scst_threads_info.cmd_threads_mutex);
1446 res = __scst_add_cmd_threads(num);
1447 mutex_unlock(&scst_threads_info.cmd_threads_mutex);
1449 TRACE_EXIT_RES(res);
1452 EXPORT_SYMBOL(scst_add_cmd_threads);
1454 void scst_del_cmd_threads(int num)
1458 mutex_lock(&scst_threads_info.cmd_threads_mutex);
1459 __scst_del_cmd_threads(num);
1460 mutex_unlock(&scst_threads_info.cmd_threads_mutex);
1465 EXPORT_SYMBOL(scst_del_cmd_threads);
1467 static void scst_stop_all_threads(void)
1471 mutex_lock(&scst_threads_info.cmd_threads_mutex);
1472 __scst_del_cmd_threads(scst_threads_info.nr_cmd_threads);
1473 if (scst_threads_info.mgmt_cmd_thread)
1474 kthread_stop(scst_threads_info.mgmt_cmd_thread);
1475 if (scst_threads_info.mgmt_thread)
1476 kthread_stop(scst_threads_info.mgmt_thread);
1477 if (scst_threads_info.init_cmd_thread)
1478 kthread_stop(scst_threads_info.init_cmd_thread);
1479 mutex_unlock(&scst_threads_info.cmd_threads_mutex);
1485 static int scst_start_all_threads(int num)
1491 mutex_lock(&scst_threads_info.cmd_threads_mutex);
1492 res = __scst_add_cmd_threads(num);
1496 scst_threads_info.init_cmd_thread = kthread_run(scst_init_cmd_thread,
1497 NULL, "scsi_tgt_init");
1498 if (IS_ERR(scst_threads_info.init_cmd_thread)) {
1499 res = PTR_ERR(scst_threads_info.init_cmd_thread);
1500 PRINT_ERROR("kthread_create() for init cmd failed: %d", res);
1501 scst_threads_info.init_cmd_thread = NULL;
1505 scst_threads_info.mgmt_cmd_thread = kthread_run(scst_mgmt_cmd_thread,
1506 NULL, "scsi_tgt_mc");
1507 if (IS_ERR(scst_threads_info.mgmt_cmd_thread)) {
1508 res = PTR_ERR(scst_threads_info.mgmt_cmd_thread);
1509 PRINT_ERROR("kthread_create() for mcmd failed: %d", res);
1510 scst_threads_info.mgmt_cmd_thread = NULL;
1514 scst_threads_info.mgmt_thread = kthread_run(scst_mgmt_thread,
1515 NULL, "scsi_tgt_mgmt");
1516 if (IS_ERR(scst_threads_info.mgmt_thread)) {
1517 res = PTR_ERR(scst_threads_info.mgmt_thread);
1518 PRINT_ERROR("kthread_create() for mgmt failed: %d", res);
1519 scst_threads_info.mgmt_thread = NULL;
1524 mutex_unlock(&scst_threads_info.cmd_threads_mutex);
1525 TRACE_EXIT_RES(res);
1533 EXPORT_SYMBOL(scst_get);
1539 EXPORT_SYMBOL(scst_put);
1541 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
1542 static int scst_add(struct class_device *cdev, struct class_interface *intf)
1544 static int scst_add(struct device *cdev, struct class_interface *intf)
1547 struct scsi_device *scsidp;
1552 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
1553 scsidp = to_scsi_device(cdev->dev);
1555 scsidp = to_scsi_device(cdev->parent);
1558 if (strcmp(scsidp->host->hostt->name, SCST_LOCAL_NAME) != 0)
1559 res = scst_register_device(scsidp);
1565 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
1566 static void scst_remove(struct class_device *cdev, struct class_interface *intf)
1568 static void scst_remove(struct device *cdev, struct class_interface *intf)
1571 struct scsi_device *scsidp;
1575 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
1576 scsidp = to_scsi_device(cdev->dev);
1578 scsidp = to_scsi_device(cdev->parent);
1581 if (strcmp(scsidp->host->hostt->name, SCST_LOCAL_NAME) != 0)
1582 scst_unregister_device(scsidp);
1588 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
1589 static struct class_interface scst_interface = {
1591 .remove = scst_remove,
1594 static struct class_interface scst_interface = {
1595 .add_dev = scst_add,
1596 .remove_dev = scst_remove,
1600 static void __init scst_print_config(void)
1605 i = snprintf(buf, sizeof(buf), "Enabled features: ");
1608 #ifdef CONFIG_SCST_STRICT_SERIALIZING
1609 i += snprintf(&buf[i], sizeof(buf) - i, "Strict serializing");
1612 #ifdef CONFIG_SCST_EXTRACHECKS
1613 i += snprintf(&buf[i], sizeof(buf) - i, "%sEXTRACHECKS",
1614 (j == i) ? "" : ", ");
1617 #ifdef CONFIG_SCST_TRACING
1618 i += snprintf(&buf[i], sizeof(buf) - i, "%sTRACING",
1619 (j == i) ? "" : ", ");
1622 #ifdef CONFIG_SCST_DEBUG
1623 i += snprintf(&buf[i], sizeof(buf) - i, "%sDEBUG",
1624 (j == i) ? "" : ", ");
1627 #ifdef CONFIG_SCST_DEBUG_TM
1628 i += snprintf(&buf[i], sizeof(buf) - i, "%sDEBUG_TM",
1629 (j == i) ? "" : ", ");
1632 #ifdef CONFIG_SCST_DEBUG_RETRY
1633 i += snprintf(&buf[i], sizeof(buf) - i, "%sDEBUG_RETRY",
1634 (j == i) ? "" : ", ");
1637 #ifdef CONFIG_SCST_DEBUG_OOM
1638 i += snprintf(&buf[i], sizeof(buf) - i, "%sDEBUG_OOM",
1639 (j == i) ? "" : ", ");
1642 #ifdef CONFIG_SCST_DEBUG_SN
1643 i += snprintf(&buf[i], sizeof(buf) - i, "%sDEBUG_SN",
1644 (j == i) ? "" : ", ");
1647 #ifdef CONFIG_SCST_USE_EXPECTED_VALUES
1648 i += snprintf(&buf[i], sizeof(buf) - i, "%sUSE_EXPECTED_VALUES",
1649 (j == i) ? "" : ", ");
1652 #ifdef CONFIG_SCST_ALLOW_PASSTHROUGH_IO_SUBMIT_IN_SIRQ
1653 i += snprintf(&buf[i], sizeof(buf) - i,
1654 "%sALLOW_PASSTHROUGH_IO_SUBMIT_IN_SIRQ",
1655 (j == i) ? "" : ", ");
1658 #ifdef CONFIG_SCST_STRICT_SECURITY
1659 i += snprintf(&buf[i], sizeof(buf) - i, "%sSCST_STRICT_SECURITY",
1660 (j == i) ? "" : ", ");
1664 PRINT_INFO("%s", buf);
1667 static int __init init_scst(void)
1674 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
1676 struct scsi_request *req;
1677 BUILD_BUG_ON(SCST_SENSE_BUFFERSIZE !=
1678 sizeof(req->sr_sense_buffer));
1682 struct scsi_sense_hdr *shdr;
1683 BUILD_BUG_ON(SCST_SENSE_BUFFERSIZE < sizeof(*shdr));
1687 struct scst_tgt_dev *t;
1689 BUILD_BUG_ON(sizeof(t->curr_sn) != sizeof(t->expected_sn));
1690 BUILD_BUG_ON(sizeof(c->sn) != sizeof(t->expected_sn));
1693 BUILD_BUG_ON(SCST_DATA_UNKNOWN != DMA_BIDIRECTIONAL);
1694 BUILD_BUG_ON(SCST_DATA_WRITE != DMA_TO_DEVICE);
1695 BUILD_BUG_ON(SCST_DATA_READ != DMA_FROM_DEVICE);
1696 BUILD_BUG_ON(SCST_DATA_NONE != DMA_NONE);
1698 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
1699 #if !defined(SCST_ALLOC_IO_CONTEXT_EXPORTED)
1700 PRINT_WARNING("%s", "Patch export_alloc_io_context was not applied on "
1701 "your kernel. SCST will be working with not the best "
1706 mutex_init(&scst_mutex);
1707 INIT_LIST_HEAD(&scst_template_list);
1708 INIT_LIST_HEAD(&scst_dev_list);
1709 INIT_LIST_HEAD(&scst_dev_type_list);
1710 spin_lock_init(&scst_main_lock);
1711 INIT_LIST_HEAD(&scst_acg_list);
1712 spin_lock_init(&scst_init_lock);
1713 init_waitqueue_head(&scst_init_cmd_list_waitQ);
1714 INIT_LIST_HEAD(&scst_init_cmd_list);
1715 #if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
1716 scst_trace_flag = SCST_DEFAULT_LOG_FLAGS;
1718 atomic_set(&scst_cmd_count, 0);
1719 spin_lock_init(&scst_mcmd_lock);
1720 INIT_LIST_HEAD(&scst_active_mgmt_cmd_list);
1721 INIT_LIST_HEAD(&scst_delayed_mgmt_cmd_list);
1722 init_waitqueue_head(&scst_mgmt_cmd_list_waitQ);
1723 init_waitqueue_head(&scst_mgmt_waitQ);
1724 spin_lock_init(&scst_mgmt_lock);
1725 INIT_LIST_HEAD(&scst_sess_init_list);
1726 INIT_LIST_HEAD(&scst_sess_shut_list);
1727 init_waitqueue_head(&scst_dev_cmd_waitQ);
1728 mutex_init(&scst_suspend_mutex);
1729 INIT_LIST_HEAD(&scst_cmd_lists_list);
1730 scst_virt_dev_last_id = 1;
1731 spin_lock_init(&scst_temp_UA_lock);
1733 spin_lock_init(&scst_main_cmd_lists.cmd_list_lock);
1734 INIT_LIST_HEAD(&scst_main_cmd_lists.active_cmd_list);
1735 init_waitqueue_head(&scst_main_cmd_lists.cmd_list_waitQ);
1736 list_add_tail(&scst_main_cmd_lists.lists_list_entry,
1737 &scst_cmd_lists_list);
1739 scst_num_cpus = num_online_cpus();
1741 /* ToDo: register_cpu_notifier() */
1743 if (scst_threads == 0)
1744 scst_threads = scst_num_cpus;
1746 if (scst_threads < 1) {
1747 PRINT_ERROR("%s", "scst_threads can not be less than 1");
1748 scst_threads = scst_num_cpus;
1751 scst_threads_info_init();
1753 #define INIT_CACHEP(p, s, o) do { \
1754 p = KMEM_CACHE(s, SCST_SLAB_FLAGS); \
1755 TRACE_MEM("Slab create: %s at %p size %zd", #s, p, \
1756 sizeof(struct s)); \
1763 INIT_CACHEP(scst_mgmt_cachep, scst_mgmt_cmd, out);
1764 INIT_CACHEP(scst_mgmt_stub_cachep, scst_mgmt_cmd_stub,
1765 out_destroy_mgmt_cache);
1766 INIT_CACHEP(scst_ua_cachep, scst_tgt_dev_UA,
1767 out_destroy_mgmt_stub_cache);
1769 struct scst_sense { uint8_t s[SCST_SENSE_BUFFERSIZE]; };
1770 INIT_CACHEP(scst_sense_cachep, scst_sense,
1771 out_destroy_ua_cache);
1773 INIT_CACHEP(scst_cmd_cachep, scst_cmd, out_destroy_sense_cache);
1774 INIT_CACHEP(scst_sess_cachep, scst_session, out_destroy_cmd_cache);
1775 INIT_CACHEP(scst_tgtd_cachep, scst_tgt_dev, out_destroy_sess_cache);
1776 INIT_CACHEP(scst_acgd_cachep, scst_acg_dev, out_destroy_tgt_cache);
1778 scst_mgmt_mempool = mempool_create(64, mempool_alloc_slab,
1779 mempool_free_slab, scst_mgmt_cachep);
1780 if (scst_mgmt_mempool == NULL) {
1782 goto out_destroy_acg_cache;
1785 scst_mgmt_stub_mempool = mempool_create(1024, mempool_alloc_slab,
1786 mempool_free_slab, scst_mgmt_stub_cachep);
1787 if (scst_mgmt_stub_mempool == NULL) {
1789 goto out_destroy_mgmt_mempool;
1792 scst_ua_mempool = mempool_create(64, mempool_alloc_slab,
1793 mempool_free_slab, scst_ua_cachep);
1794 if (scst_ua_mempool == NULL) {
1796 goto out_destroy_mgmt_stub_mempool;
1800 * Loosing sense may have fatal consequences, so let's have a big pool
1802 scst_sense_mempool = mempool_create(128, mempool_alloc_slab,
1803 mempool_free_slab, scst_sense_cachep);
1804 if (scst_sense_mempool == NULL) {
1806 goto out_destroy_ua_mempool;
1809 if (scst_max_cmd_mem == 0) {
1812 #if BITS_PER_LONG == 32
1813 scst_max_cmd_mem = min(
1814 (((uint64_t)si.totalram << PAGE_SHIFT) >> 20) >> 2,
1817 scst_max_cmd_mem = ((si.totalram << PAGE_SHIFT) >> 20) >> 2;
1821 if (scst_max_dev_cmd_mem != 0) {
1822 if (scst_max_dev_cmd_mem > scst_max_cmd_mem) {
1823 PRINT_ERROR("scst_max_dev_cmd_mem (%d) > "
1824 "scst_max_cmd_mem (%d)",
1825 scst_max_dev_cmd_mem,
1827 scst_max_dev_cmd_mem = scst_max_cmd_mem;
1830 scst_max_dev_cmd_mem = scst_max_cmd_mem * 2 / 5;
1832 res = scst_sgv_pools_init(
1833 ((uint64_t)scst_max_cmd_mem << 10) >> (PAGE_SHIFT - 10), 0);
1835 goto out_destroy_sense_mempool;
1837 scst_default_acg = scst_alloc_add_acg(SCST_DEFAULT_ACG_NAME);
1838 if (scst_default_acg == NULL) {
1840 goto out_destroy_sgv_pool;
1843 res = scsi_register_interface(&scst_interface);
1847 scst_scsi_op_list_init();
1849 for (i = 0; i < (int)ARRAY_SIZE(scst_tasklets); i++) {
1850 spin_lock_init(&scst_tasklets[i].tasklet_lock);
1851 INIT_LIST_HEAD(&scst_tasklets[i].tasklet_cmd_list);
1852 tasklet_init(&scst_tasklets[i].tasklet,
1853 (void *)scst_cmd_tasklet,
1854 (unsigned long)&scst_tasklets[i]);
1857 TRACE_DBG("%d CPUs found, starting %d threads", scst_num_cpus,
1860 res = scst_start_all_threads(scst_threads);
1862 goto out_thread_free;
1864 res = scst_proc_init_module();
1866 goto out_thread_free;
1869 PRINT_INFO("SCST version %s loaded successfully (max mem for "
1870 "commands %dMB, per device %dMB)", SCST_VERSION_STRING,
1871 scst_max_cmd_mem, scst_max_dev_cmd_mem);
1873 scst_print_config();
1876 TRACE_EXIT_RES(res);
1880 scst_stop_all_threads();
1882 scsi_unregister_interface(&scst_interface);
1885 scst_destroy_acg(scst_default_acg);
1887 out_destroy_sgv_pool:
1888 scst_sgv_pools_deinit();
1890 out_destroy_sense_mempool:
1891 mempool_destroy(scst_sense_mempool);
1893 out_destroy_ua_mempool:
1894 mempool_destroy(scst_ua_mempool);
1896 out_destroy_mgmt_stub_mempool:
1897 mempool_destroy(scst_mgmt_stub_mempool);
1899 out_destroy_mgmt_mempool:
1900 mempool_destroy(scst_mgmt_mempool);
1902 out_destroy_acg_cache:
1903 kmem_cache_destroy(scst_acgd_cachep);
1905 out_destroy_tgt_cache:
1906 kmem_cache_destroy(scst_tgtd_cachep);
1908 out_destroy_sess_cache:
1909 kmem_cache_destroy(scst_sess_cachep);
1911 out_destroy_cmd_cache:
1912 kmem_cache_destroy(scst_cmd_cachep);
1914 out_destroy_sense_cache:
1915 kmem_cache_destroy(scst_sense_cachep);
1917 out_destroy_ua_cache:
1918 kmem_cache_destroy(scst_ua_cachep);
1920 out_destroy_mgmt_stub_cache:
1921 kmem_cache_destroy(scst_mgmt_stub_cachep);
1923 out_destroy_mgmt_cache:
1924 kmem_cache_destroy(scst_mgmt_cachep);
1928 static void __exit exit_scst(void)
1932 /* ToDo: unregister_cpu_notifier() */
1934 scst_proc_cleanup_module();
1936 scst_stop_all_threads();
1938 scsi_unregister_interface(&scst_interface);
1939 scst_destroy_acg(scst_default_acg);
1941 scst_sgv_pools_deinit();
1943 #define DEINIT_CACHEP(p) do { \
1944 kmem_cache_destroy(p); \
1948 mempool_destroy(scst_mgmt_mempool);
1949 mempool_destroy(scst_mgmt_stub_mempool);
1950 mempool_destroy(scst_ua_mempool);
1951 mempool_destroy(scst_sense_mempool);
1953 DEINIT_CACHEP(scst_mgmt_cachep);
1954 DEINIT_CACHEP(scst_mgmt_stub_cachep);
1955 DEINIT_CACHEP(scst_ua_cachep);
1956 DEINIT_CACHEP(scst_sense_cachep);
1957 DEINIT_CACHEP(scst_cmd_cachep);
1958 DEINIT_CACHEP(scst_sess_cachep);
1959 DEINIT_CACHEP(scst_tgtd_cachep);
1960 DEINIT_CACHEP(scst_acgd_cachep);
1962 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
1963 #if defined(CONFIG_BLOCK) && defined(SCST_ALLOC_IO_CONTEXT_EXPORTED)
1964 put_io_context(scst_ioc);
1968 PRINT_INFO("%s", "SCST unloaded");
1975 module_init(init_scst);
1976 module_exit(exit_scst);
1978 MODULE_AUTHOR("Vladislav Bolkhovitin");
1979 MODULE_LICENSE("GPL");
1980 MODULE_DESCRIPTION("SCSI target core");
1981 MODULE_VERSION(SCST_VERSION_STRING);