4 * Copyright (C) 2004-2007 Vladislav Bolkhovitin <vst@vlnb.net>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation, version 2
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
18 #include <linux/module.h>
20 #include <linux/init.h>
21 #include <linux/kernel.h>
22 #include <linux/errno.h>
23 #include <linux/list.h>
24 #include <linux/spinlock.h>
25 #include <linux/slab.h>
26 #include <linux/sched.h>
27 #include <linux/unistd.h>
28 #include <linux/string.h>
29 #include <linux/kthread.h>
32 #include "scst_priv.h"
35 #if defined(CONFIG_HIGHMEM4G) || defined(CONFIG_HIGHMEM64G)
36 #warning "HIGHMEM kernel configurations are fully supported, but not \
37 recommended for performance reasons. Consider change VMSPLIT \
38 option or use 64-bit configuration instead. See README file for \
43 #error "SCST_HIGHMEM configuration isn't supported and broken, because there \
44 is no real point to support it, at least it definitely doesn't worth \
45 the effort. Better use no-HIGHMEM kernel with VMSPLIT option \
46 or in 64-bit configuration instead. See README file for details."
49 #if !defined(SCSI_EXEC_REQ_FIFO_DEFINED) && !defined(STRICT_SERIALIZING)
50 #warning "Patch scst_exec_req_fifo-<kernel-version>.patch was not applied on \
51 your kernel and STRICT_SERIALIZING isn't defined. Pass-through dev \
52 handlers will not be supported."
56 ** SCST global variables. They are all uninitialized to have their layout in
57 ** memory be exactly as specified. Otherwise compiler puts zero-initialized
58 ** variable separately from nonzero-initialized ones.
62 * All targets, devices and dev_types management is done under this mutex.
64 * It must NOT be used in any works (schedule_work(), etc.), because
65 * otherwise a deadlock (double lock, actually) is possible, e.g., with
66 * scst_user detach_tgt(), which is called under scst_mutex and calls
67 * flush_scheduled_work().
69 struct mutex scst_mutex;
71 struct list_head scst_template_list;
72 struct list_head scst_dev_list;
73 struct list_head scst_dev_type_list;
75 spinlock_t scst_main_lock;
77 struct kmem_cache *scst_mgmt_cachep;
78 mempool_t *scst_mgmt_mempool;
79 struct kmem_cache *scst_mgmt_stub_cachep;
80 mempool_t *scst_mgmt_stub_mempool;
81 struct kmem_cache *scst_ua_cachep;
82 mempool_t *scst_ua_mempool;
83 struct kmem_cache *scst_sense_cachep;
84 mempool_t *scst_sense_mempool;
85 struct kmem_cache *scst_tgtd_cachep;
86 struct kmem_cache *scst_sess_cachep;
87 struct kmem_cache *scst_acgd_cachep;
89 struct list_head scst_acg_list;
90 struct scst_acg *scst_default_acg;
92 spinlock_t scst_init_lock;
93 wait_queue_head_t scst_init_cmd_list_waitQ;
94 struct list_head scst_init_cmd_list;
95 unsigned int scst_init_poll_cnt;
97 struct kmem_cache *scst_cmd_cachep;
99 #if defined(DEBUG) || defined(TRACING)
100 unsigned long scst_trace_flag;
103 unsigned long scst_flags;
104 atomic_t scst_cmd_count;
106 spinlock_t scst_cmd_mem_lock;
107 unsigned long scst_cur_cmd_mem, scst_cur_max_cmd_mem;
108 unsigned long scst_max_cmd_mem;
110 struct scst_cmd_lists scst_main_cmd_lists;
112 struct scst_tasklet scst_tasklets[NR_CPUS];
114 spinlock_t scst_mcmd_lock;
115 struct list_head scst_active_mgmt_cmd_list;
116 struct list_head scst_delayed_mgmt_cmd_list;
117 wait_queue_head_t scst_mgmt_cmd_list_waitQ;
119 wait_queue_head_t scst_mgmt_waitQ;
120 spinlock_t scst_mgmt_lock;
121 struct list_head scst_sess_init_list;
122 struct list_head scst_sess_shut_list;
124 wait_queue_head_t scst_dev_cmd_waitQ;
126 struct mutex scst_suspend_mutex;
127 struct list_head scst_cmd_lists_list;
129 static int scst_threads;
130 struct scst_threads_info_t scst_threads_info;
132 static int suspend_count;
134 static int scst_virt_dev_last_id; /* protected by scst_mutex */
137 * This buffer and lock are intended to avoid memory allocation, which
138 * could fail in improper places.
140 spinlock_t scst_temp_UA_lock;
141 uint8_t scst_temp_UA[SCST_SENSE_BUFFERSIZE];
143 module_param_named(scst_threads, scst_threads, int, 0);
144 MODULE_PARM_DESC(scst_threads, "SCSI target threads count");
146 module_param_named(scst_max_cmd_mem, scst_max_cmd_mem, long, 0);
147 MODULE_PARM_DESC(scst_max_cmd_mem, "Maximum memory allowed to be consumed by "
148 "the SCST commands at any given time in MB");
150 struct scst_dev_type scst_null_devtype = {
154 int __scst_register_target_template(struct scst_tgt_template *vtt,
158 struct scst_tgt_template *t;
159 static DEFINE_MUTEX(m);
163 INIT_LIST_HEAD(&vtt->tgt_list);
165 if (strcmp(version, SCST_INTERFACE_VERSION) != 0) {
166 PRINT_ERROR("Incorrect version of target %s", vtt->name);
172 PRINT_ERROR("Target driver %s doesn't have a "
173 "detect() method.", vtt->name);
179 PRINT_ERROR("Target driver %s doesn't have a "
180 "release() method.", vtt->name);
185 if (!vtt->xmit_response) {
186 PRINT_ERROR("Target driver %s doesn't have a "
187 "xmit_response() method.", vtt->name);
192 if (vtt->threads_num < 0) {
193 PRINT_ERROR("Wrong threads_num value %d for "
194 "target \"%s\"", vtt->threads_num,
200 if (!vtt->no_proc_entry) {
201 res = scst_build_proc_target_dir_entries(vtt);
206 if (mutex_lock_interruptible(&m) != 0)
209 if (mutex_lock_interruptible(&scst_mutex) != 0)
211 list_for_each_entry(t, &scst_template_list, scst_template_list_entry) {
212 if (strcmp(t->name, vtt->name) == 0) {
213 PRINT_ERROR("Target driver %s already registered",
215 mutex_unlock(&scst_mutex);
219 mutex_unlock(&scst_mutex);
221 TRACE_DBG("%s", "Calling target driver's detect()");
222 res = vtt->detect(vtt);
223 TRACE_DBG("Target driver's detect() returned %d", res);
225 PRINT_ERROR("%s", "The detect() routine failed");
230 mutex_lock(&scst_mutex);
231 list_add_tail(&vtt->scst_template_list_entry, &scst_template_list);
232 mutex_unlock(&scst_mutex);
236 PRINT_INFO("Target template %s registered successfully", vtt->name);
245 scst_cleanup_proc_target_dir_entries(vtt);
251 PRINT_ERROR("Failed to register target template %s", vtt->name);
255 void scst_unregister_target_template(struct scst_tgt_template *vtt)
257 struct scst_tgt *tgt;
258 struct scst_tgt_template *t;
263 mutex_lock(&scst_mutex);
265 list_for_each_entry(t, &scst_template_list, scst_template_list_entry) {
266 if (strcmp(t->name, vtt->name) == 0) {
272 PRINT_ERROR("Target driver %s isn't registered", vtt->name);
277 list_for_each_entry(tgt, &vtt->tgt_list, tgt_list_entry) {
278 mutex_unlock(&scst_mutex);
279 scst_unregister(tgt);
280 mutex_lock(&scst_mutex);
283 list_del(&vtt->scst_template_list_entry);
285 PRINT_INFO("Target template %s unregistered successfully", vtt->name);
288 mutex_unlock(&scst_mutex);
290 scst_cleanup_proc_target_dir_entries(vtt);
296 struct scst_tgt *scst_register(struct scst_tgt_template *vtt,
297 const char *target_name)
299 struct scst_tgt *tgt;
303 tgt = kzalloc(sizeof(*tgt), GFP_KERNEL);
305 TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of tgt failed");
309 INIT_LIST_HEAD(&tgt->sess_list);
310 init_waitqueue_head(&tgt->unreg_waitQ);
312 tgt->sg_tablesize = vtt->sg_tablesize;
313 spin_lock_init(&tgt->tgt_lock);
314 INIT_LIST_HEAD(&tgt->retry_cmd_list);
315 atomic_set(&tgt->finished_cmds, 0);
316 init_timer(&tgt->retry_timer);
317 tgt->retry_timer.data = (unsigned long)tgt;
318 tgt->retry_timer.function = scst_tgt_retry_timer_fn;
320 scst_suspend_activity();
321 mutex_lock(&scst_mutex);
323 if (target_name != NULL) {
324 int len = strlen(target_name) + 1 +
325 strlen(SCST_DEFAULT_ACG_NAME) + 1;
327 tgt->default_group_name = kmalloc(len, GFP_KERNEL);
328 if (tgt->default_group_name == NULL) {
329 TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of default "
330 "group name failed");
333 sprintf(tgt->default_group_name, "%s_%s", SCST_DEFAULT_ACG_NAME,
337 if (scst_build_proc_target_entries(tgt) < 0)
340 list_add_tail(&tgt->tgt_list_entry, &vtt->tgt_list);
342 mutex_unlock(&scst_mutex);
343 scst_resume_activity();
345 PRINT_INFO("Target %s (%p) for template %s registered successfully",
346 target_name, tgt, vtt->name);
353 if (tgt->default_group_name)
354 kfree(tgt->default_group_name);
357 mutex_unlock(&scst_mutex);
358 scst_resume_activity();
364 PRINT_ERROR("Failed to register target %s for template %s",
365 target_name, vtt->name);
369 static inline int test_sess_list(struct scst_tgt *tgt)
372 mutex_lock(&scst_mutex);
373 res = list_empty(&tgt->sess_list);
374 mutex_unlock(&scst_mutex);
378 void scst_unregister(struct scst_tgt *tgt)
380 struct scst_session *sess;
381 struct scst_tgt_template *vtt = tgt->tgtt;
385 TRACE_DBG("%s", "Calling target driver's release()");
386 tgt->tgtt->release(tgt);
387 TRACE_DBG("%s", "Target driver's release() returned");
389 mutex_lock(&scst_mutex);
390 list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) {
391 sBUG_ON(sess->shut_phase == SCST_SESS_SPH_READY);
393 mutex_unlock(&scst_mutex);
395 TRACE_DBG("%s", "Waiting for sessions shutdown");
396 wait_event(tgt->unreg_waitQ, test_sess_list(tgt));
397 TRACE_DBG("%s", "wait_event() returned");
399 scst_suspend_activity();
400 mutex_lock(&scst_mutex);
402 list_del(&tgt->tgt_list_entry);
404 scst_cleanup_proc_target_entries(tgt);
406 if (tgt->default_group_name)
407 kfree(tgt->default_group_name);
409 mutex_unlock(&scst_mutex);
410 scst_resume_activity();
412 del_timer_sync(&tgt->retry_timer);
414 PRINT_INFO("Target %p for template %s unregistered successfully",
423 void scst_suspend_activity(void)
427 mutex_lock(&scst_suspend_mutex);
429 TRACE_MGMT_DBG("suspend_count %d", suspend_count);
431 if (suspend_count > 1)
434 set_bit(SCST_FLAG_SUSPENDING, &scst_flags);
435 set_bit(SCST_FLAG_SUSPENDED, &scst_flags);
436 smp_mb__after_set_bit();
438 TRACE_MGMT_DBG("Waiting for %d active commands to complete",
439 atomic_read(&scst_cmd_count));
440 wait_event(scst_dev_cmd_waitQ, atomic_read(&scst_cmd_count) == 0);
441 TRACE_MGMT_DBG("%s", "wait_event() returned");
443 clear_bit(SCST_FLAG_SUSPENDING, &scst_flags);
444 smp_mb__after_clear_bit();
446 TRACE_MGMT_DBG("Waiting for %d active commands finally to complete",
447 atomic_read(&scst_cmd_count));
448 wait_event(scst_dev_cmd_waitQ, atomic_read(&scst_cmd_count) == 0);
449 TRACE_MGMT_DBG("%s", "wait_event() returned");
452 mutex_unlock(&scst_suspend_mutex);
458 void scst_resume_activity(void)
460 struct scst_cmd_lists *l;
464 mutex_lock(&scst_suspend_mutex);
467 TRACE_MGMT_DBG("suspend_count %d left", suspend_count);
468 if (suspend_count > 0)
471 clear_bit(SCST_FLAG_SUSPENDED, &scst_flags);
472 smp_mb__after_clear_bit();
474 list_for_each_entry(l, &scst_cmd_lists_list, lists_list_entry) {
475 wake_up_all(&l->cmd_list_waitQ);
477 wake_up_all(&scst_init_cmd_list_waitQ);
479 spin_lock_irq(&scst_mcmd_lock);
480 if (!list_empty(&scst_delayed_mgmt_cmd_list)) {
481 struct scst_mgmt_cmd *m;
482 m = list_entry(scst_delayed_mgmt_cmd_list.next, typeof(*m),
483 mgmt_cmd_list_entry);
484 TRACE_MGMT_DBG("Moving delayed mgmt cmd %p to head of active "
486 list_move(&m->mgmt_cmd_list_entry, &scst_active_mgmt_cmd_list);
488 spin_unlock_irq(&scst_mcmd_lock);
489 wake_up_all(&scst_mgmt_cmd_list_waitQ);
492 mutex_unlock(&scst_suspend_mutex);
498 static int scst_register_device(struct scsi_device *scsidp)
501 struct scst_device *dev;
502 struct scst_dev_type *dt;
506 scst_suspend_activity();
507 mutex_lock(&scst_mutex);
509 res = scst_alloc_device(GFP_KERNEL, &dev);
513 dev->type = scsidp->type;
515 dev->rq_disk = alloc_disk(1);
516 if (dev->rq_disk == NULL) {
520 dev->rq_disk->major = SCST_MAJOR;
522 dev->scsi_dev = scsidp;
524 list_add_tail(&dev->dev_list_entry, &scst_dev_list);
526 list_for_each_entry(dt, &scst_dev_type_list, dev_type_list_entry) {
527 if (dt->type == scsidp->type) {
528 res = scst_assign_dev_handler(dev, dt);
536 mutex_unlock(&scst_mutex);
537 scst_resume_activity();
540 PRINT_INFO("Attached SCSI target mid-level at "
541 "scsi%d, channel %d, id %d, lun %d, type %d",
542 scsidp->host->host_no, scsidp->channel, scsidp->id,
543 scsidp->lun, scsidp->type);
545 PRINT_ERROR("Failed to attach SCSI target mid-level "
546 "at scsi%d, channel %d, id %d, lun %d, type %d",
547 scsidp->host->host_no, scsidp->channel, scsidp->id,
548 scsidp->lun, scsidp->type);
555 list_del(&dev->dev_list_entry);
556 put_disk(dev->rq_disk);
559 scst_free_device(dev);
563 static void scst_unregister_device(struct scsi_device *scsidp)
565 struct scst_device *d, *dev = NULL;
566 struct scst_acg_dev *acg_dev, *aa;
570 scst_suspend_activity();
571 mutex_lock(&scst_mutex);
573 list_for_each_entry(d, &scst_dev_list, dev_list_entry) {
574 if (d->scsi_dev == scsidp) {
576 TRACE_DBG("Target device %p found", dev);
581 PRINT_ERROR("%s", "Target device not found");
585 list_del(&dev->dev_list_entry);
587 list_for_each_entry_safe(acg_dev, aa, &dev->dev_acg_dev_list,
588 dev_acg_dev_list_entry)
590 scst_acg_remove_dev(acg_dev->acg, dev);
593 scst_assign_dev_handler(dev, &scst_null_devtype);
595 put_disk(dev->rq_disk);
596 scst_free_device(dev);
598 PRINT_INFO("Detached SCSI target mid-level from scsi%d, channel %d, "
599 "id %d, lun %d, type %d", scsidp->host->host_no,
600 scsidp->channel, scsidp->id, scsidp->lun, scsidp->type);
603 mutex_unlock(&scst_mutex);
604 scst_resume_activity();
610 static int scst_dev_handler_check(struct scst_dev_type *dev_handler)
614 if (dev_handler->parse == NULL) {
615 PRINT_ERROR("scst dev_type driver %s doesn't have a "
616 "parse() method.", dev_handler->name);
621 if (dev_handler->exec == NULL) {
622 #ifdef ALLOW_PASSTHROUGH_IO_SUBMIT_IN_SIRQ
623 dev_handler->exec_atomic = 1;
625 dev_handler->exec_atomic = 0;
629 if (dev_handler->dev_done == NULL)
630 dev_handler->dev_done_atomic = 1;
637 int scst_register_virtual_device(struct scst_dev_type *dev_handler,
638 const char *dev_name)
641 struct scst_device *dev = NULL;
645 if (dev_handler == NULL) {
646 PRINT_ERROR("%s: valid device handler must be supplied",
652 if (dev_name == NULL) {
653 PRINT_ERROR("%s: device name must be non-NULL", __func__);
658 res = scst_dev_handler_check(dev_handler);
662 scst_suspend_activity();
663 if (mutex_lock_interruptible(&scst_mutex) != 0) {
668 res = scst_alloc_device(GFP_KERNEL, &dev);
672 dev->type = dev_handler->type;
673 dev->scsi_dev = NULL;
674 dev->virt_name = dev_name;
675 dev->virt_id = scst_virt_dev_last_id++;
677 list_add_tail(&dev->dev_list_entry, &scst_dev_list);
681 rc = scst_assign_dev_handler(dev, dev_handler);
688 mutex_unlock(&scst_mutex);
691 scst_resume_activity();
695 PRINT_INFO("Attached SCSI target mid-level to virtual "
696 "device %s (id %d)", dev_name, dev->virt_id);
698 PRINT_INFO("Failed to attach SCSI target mid-level to "
699 "virtual device %s", dev_name);
706 list_del(&dev->dev_list_entry);
707 scst_free_device(dev);
711 void scst_unregister_virtual_device(int id)
713 struct scst_device *d, *dev = NULL;
714 struct scst_acg_dev *acg_dev, *aa;
718 scst_suspend_activity();
719 mutex_lock(&scst_mutex);
721 list_for_each_entry(d, &scst_dev_list, dev_list_entry) {
722 if (d->virt_id == id) {
724 TRACE_DBG("Target device %p (id %d) found", dev, id);
729 PRINT_ERROR("Target virtual device (id %d) not found", id);
733 list_del(&dev->dev_list_entry);
735 list_for_each_entry_safe(acg_dev, aa, &dev->dev_acg_dev_list,
736 dev_acg_dev_list_entry)
738 scst_acg_remove_dev(acg_dev->acg, dev);
741 scst_assign_dev_handler(dev, &scst_null_devtype);
743 PRINT_INFO("Detached SCSI target mid-level from virtual device %s "
744 "(id %d)", dev->virt_name, dev->virt_id);
746 scst_free_device(dev);
749 mutex_unlock(&scst_mutex);
750 scst_resume_activity();
756 int __scst_register_dev_driver(struct scst_dev_type *dev_type,
759 struct scst_dev_type *dt;
760 struct scst_device *dev;
766 if (strcmp(version, SCST_INTERFACE_VERSION) != 0) {
767 PRINT_ERROR("Incorrect version of dev handler %s",
773 res = scst_dev_handler_check(dev_type);
777 #if !defined(SCSI_EXEC_REQ_FIFO_DEFINED) && !defined(STRICT_SERIALIZING)
778 if (dev_type->exec == NULL) {
779 PRINT_ERROR("Pass-through dev handlers (handler \"%s\") not "
780 "supported. Consider applying on your kernel patch "
781 "scst_exec_req_fifo-<kernel-version>.patch or define "
782 "STRICT_SERIALIZING", dev_type->name);
788 scst_suspend_activity();
789 if (mutex_lock_interruptible(&scst_mutex) != 0) {
795 list_for_each_entry(dt, &scst_dev_type_list, dev_type_list_entry) {
796 if (strcmp(dt->name, dev_type->name) == 0) {
797 PRINT_ERROR("Device type handler \"%s\" already "
806 res = scst_build_proc_dev_handler_dir_entries(dev_type);
810 list_add_tail(&dev_type->dev_type_list_entry, &scst_dev_type_list);
812 list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
813 if ((dev->scsi_dev == NULL) || (dev->handler != &scst_null_devtype))
815 if (dev->scsi_dev->type == dev_type->type)
816 scst_assign_dev_handler(dev, dev_type);
819 mutex_unlock(&scst_mutex);
820 scst_resume_activity();
823 PRINT_INFO("Device handler \"%s\" for type %d registered "
824 "successfully", dev_type->name, dev_type->type);
832 mutex_unlock(&scst_mutex);
835 scst_resume_activity();
838 PRINT_ERROR("Failed to register device handler \"%s\" for type %d",
839 dev_type->name, dev_type->type);
843 void scst_unregister_dev_driver(struct scst_dev_type *dev_type)
845 struct scst_device *dev;
846 struct scst_dev_type *dt;
851 scst_suspend_activity();
852 mutex_lock(&scst_mutex);
854 list_for_each_entry(dt, &scst_dev_type_list, dev_type_list_entry) {
855 if (strcmp(dt->name, dev_type->name) == 0) {
861 PRINT_ERROR("Dev handler \"%s\" isn't registered",
866 list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
867 if (dev->handler == dev_type) {
868 scst_assign_dev_handler(dev, &scst_null_devtype);
869 TRACE_DBG("Dev handler removed from device %p", dev);
873 list_del(&dev_type->dev_type_list_entry);
875 mutex_unlock(&scst_mutex);
876 scst_resume_activity();
878 scst_cleanup_proc_dev_handler_dir_entries(dev_type);
880 PRINT_INFO("Device handler \"%s\" for type %d unloaded",
881 dev_type->name, dev_type->type);
888 mutex_unlock(&scst_mutex);
889 scst_resume_activity();
893 int __scst_register_virtual_dev_driver(struct scst_dev_type *dev_type,
900 if (strcmp(version, SCST_INTERFACE_VERSION) != 0) {
901 PRINT_ERROR("Incorrect version of virtual dev handler %s",
907 res = scst_dev_handler_check(dev_type);
911 if (!dev_type->no_proc) {
912 res = scst_build_proc_dev_handler_dir_entries(dev_type);
917 if (dev_type->type != -1) {
918 PRINT_INFO("Virtual device handler %s for type %d "
919 "registered successfully", dev_type->name,
922 PRINT_INFO("Virtual device handler \"%s\" registered "
923 "successfully", dev_type->name);
931 PRINT_ERROR("Failed to register virtual device handler \"%s\"",
936 void scst_unregister_virtual_dev_driver(struct scst_dev_type *dev_type)
940 if (!dev_type->no_proc)
941 scst_cleanup_proc_dev_handler_dir_entries(dev_type);
943 PRINT_INFO("Device handler \"%s\" unloaded", dev_type->name);
949 /* Called under scst_mutex */
950 int scst_add_dev_threads(struct scst_device *dev, int num)
954 struct scst_cmd_thread_t *thr;
959 list_for_each_entry(thr, &dev->threads_list, thread_list_entry) {
963 for (i = 0; i < num; i++) {
964 thr = kmalloc(sizeof(*thr), GFP_KERNEL);
967 PRINT_ERROR("Failed to allocate thr %d", res);
970 strncpy(nm, dev->handler->name, ARRAY_SIZE(nm)-1);
971 nm[ARRAY_SIZE(nm)-1] = '\0';
972 thr->cmd_thread = kthread_run(scst_cmd_thread,
973 &dev->cmd_lists, "%sd%d_%d", nm, dev->dev_num, n++);
974 if (IS_ERR(thr->cmd_thread)) {
975 res = PTR_ERR(thr->cmd_thread);
976 PRINT_ERROR("kthread_create() failed: %d", res);
980 list_add(&thr->thread_list_entry, &dev->threads_list);
988 /* Called under scst_mutex and suspended activity */
989 static int scst_create_dev_threads(struct scst_device *dev)
996 if (dev->handler->threads_num <= 0)
999 threads_num = dev->handler->threads_num;
1001 spin_lock_init(&dev->cmd_lists.cmd_list_lock);
1002 INIT_LIST_HEAD(&dev->cmd_lists.active_cmd_list);
1003 init_waitqueue_head(&dev->cmd_lists.cmd_list_waitQ);
1005 res = scst_add_dev_threads(dev, threads_num);
1009 mutex_lock(&scst_suspend_mutex);
1010 list_add_tail(&dev->cmd_lists.lists_list_entry,
1011 &scst_cmd_lists_list);
1012 mutex_unlock(&scst_suspend_mutex);
1014 dev->p_cmd_lists = &dev->cmd_lists;
1017 TRACE_EXIT_RES(res);
1021 /* Called under scst_mutex */
1022 void scst_del_dev_threads(struct scst_device *dev, int num)
1024 struct scst_cmd_thread_t *ct, *tmp;
1029 list_for_each_entry_safe(ct, tmp, &dev->threads_list,
1030 thread_list_entry) {
1031 int rc = kthread_stop(ct->cmd_thread);
1033 TRACE_MGMT_DBG("kthread_stop() failed: %d", rc);
1034 list_del(&ct->thread_list_entry);
1036 if ((num > 0) && (++i >= num))
1044 /* Called under scst_mutex and suspended activity */
1045 static void scst_stop_dev_threads(struct scst_device *dev)
1049 if (list_empty(&dev->threads_list))
1052 scst_del_dev_threads(dev, -1);
1054 if (dev->p_cmd_lists == &dev->cmd_lists) {
1055 mutex_lock(&scst_suspend_mutex);
1056 list_del(&dev->cmd_lists.lists_list_entry);
1057 mutex_unlock(&scst_suspend_mutex);
1065 /* The activity supposed to be suspended and scst_mutex held */
1066 int scst_assign_dev_handler(struct scst_device *dev,
1067 struct scst_dev_type *handler)
1070 struct scst_tgt_dev *tgt_dev;
1071 LIST_HEAD(attached_tgt_devs);
1075 sBUG_ON(handler == NULL);
1077 if (dev->handler == handler)
1080 if (dev->handler && dev->handler->detach_tgt) {
1081 list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
1082 dev_tgt_dev_list_entry) {
1083 TRACE_DBG("Calling dev handler's detach_tgt(%p)",
1085 dev->handler->detach_tgt(tgt_dev);
1086 TRACE_DBG("%s", "Dev handler's detach_tgt() returned");
1090 if (dev->handler && dev->handler->detach) {
1091 TRACE_DBG("%s", "Calling dev handler's detach()");
1092 dev->handler->detach(dev);
1093 TRACE_DBG("%s", "Old handler's detach() returned");
1096 scst_stop_dev_threads(dev);
1098 dev->handler = handler;
1101 res = scst_create_dev_threads(dev);
1106 if (handler && handler->attach) {
1107 TRACE_DBG("Calling new dev handler's attach(%p)", dev);
1108 res = handler->attach(dev);
1109 TRACE_DBG("New dev handler's attach() returned %d", res);
1111 PRINT_ERROR("New device handler's %s attach() "
1112 "failed: %d", handler->name, res);
1117 if (handler && handler->attach_tgt) {
1118 list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
1119 dev_tgt_dev_list_entry) {
1120 TRACE_DBG("Calling dev handler's attach_tgt(%p)",
1122 res = handler->attach_tgt(tgt_dev);
1123 TRACE_DBG("%s", "Dev handler's attach_tgt() returned");
1125 PRINT_ERROR("Device handler's %s attach_tgt() "
1126 "failed: %d", handler->name, res);
1127 goto out_err_detach_tgt;
1129 list_add_tail(&tgt_dev->extra_tgt_dev_list_entry,
1130 &attached_tgt_devs);
1136 scst_stop_dev_threads(dev);
1140 dev->handler = &scst_null_devtype;
1143 TRACE_EXIT_RES(res);
1147 if (handler && handler->detach_tgt) {
1148 list_for_each_entry(tgt_dev, &attached_tgt_devs,
1149 extra_tgt_dev_list_entry)
1151 TRACE_DBG("Calling handler's detach_tgt(%p)",
1153 handler->detach_tgt(tgt_dev);
1154 TRACE_DBG("%s", "Handler's detach_tgt() returned");
1157 if (handler && handler->detach) {
1158 TRACE_DBG("%s", "Calling handler's detach()");
1159 handler->detach(dev);
1160 TRACE_DBG("%s", "Handler's detach() returned");
1165 int scst_cmd_threads_count(void)
1169 /* Just to lower the race window, when user can get just changed value */
1170 mutex_lock(&scst_threads_info.cmd_threads_mutex);
1171 i = scst_threads_info.nr_cmd_threads;
1172 mutex_unlock(&scst_threads_info.cmd_threads_mutex);
1176 static void scst_threads_info_init(void)
1178 memset(&scst_threads_info, 0, sizeof(scst_threads_info));
1179 mutex_init(&scst_threads_info.cmd_threads_mutex);
1180 INIT_LIST_HEAD(&scst_threads_info.cmd_threads_list);
1183 /* scst_threads_info.cmd_threads_mutex supposed to be held */
1184 void __scst_del_cmd_threads(int num)
1186 struct scst_cmd_thread_t *ct, *tmp;
1191 i = scst_threads_info.nr_cmd_threads;
1192 if (num <= 0 || num > i) {
1193 PRINT_ERROR("can not del %d cmd threads from %d", num, i);
1197 list_for_each_entry_safe(ct, tmp, &scst_threads_info.cmd_threads_list,
1198 thread_list_entry) {
1201 res = kthread_stop(ct->cmd_thread);
1203 TRACE_MGMT_DBG("kthread_stop() failed: %d", res);
1204 list_del(&ct->thread_list_entry);
1206 scst_threads_info.nr_cmd_threads--;
1216 /* scst_threads_info.cmd_threads_mutex supposed to be held */
1217 int __scst_add_cmd_threads(int num)
1220 static int scst_thread_num;
1224 for (i = 0; i < num; i++) {
1225 struct scst_cmd_thread_t *thr;
1227 thr = kmalloc(sizeof(*thr), GFP_KERNEL);
1230 PRINT_ERROR("fail to allocate thr %d", res);
1233 thr->cmd_thread = kthread_run(scst_cmd_thread,
1234 &scst_main_cmd_lists, "scsi_tgt%d",
1236 if (IS_ERR(thr->cmd_thread)) {
1237 res = PTR_ERR(thr->cmd_thread);
1238 PRINT_ERROR("kthread_create() failed: %d", res);
1242 list_add(&thr->thread_list_entry,
1243 &scst_threads_info.cmd_threads_list);
1244 scst_threads_info.nr_cmd_threads++;
1249 TRACE_EXIT_RES(res);
1254 __scst_del_cmd_threads(i - 1);
1258 int scst_add_cmd_threads(int num)
1264 mutex_lock(&scst_threads_info.cmd_threads_mutex);
1265 res = __scst_add_cmd_threads(num);
1266 mutex_unlock(&scst_threads_info.cmd_threads_mutex);
1268 TRACE_EXIT_RES(res);
1272 void scst_del_cmd_threads(int num)
1276 mutex_lock(&scst_threads_info.cmd_threads_mutex);
1277 __scst_del_cmd_threads(num);
1278 mutex_unlock(&scst_threads_info.cmd_threads_mutex);
1284 static void scst_stop_all_threads(void)
1288 mutex_lock(&scst_threads_info.cmd_threads_mutex);
1289 __scst_del_cmd_threads(scst_threads_info.nr_cmd_threads);
1290 if (scst_threads_info.mgmt_cmd_thread)
1291 kthread_stop(scst_threads_info.mgmt_cmd_thread);
1292 if (scst_threads_info.mgmt_thread)
1293 kthread_stop(scst_threads_info.mgmt_thread);
1294 if (scst_threads_info.init_cmd_thread)
1295 kthread_stop(scst_threads_info.init_cmd_thread);
1296 mutex_unlock(&scst_threads_info.cmd_threads_mutex);
1302 static int scst_start_all_threads(int num)
1308 mutex_lock(&scst_threads_info.cmd_threads_mutex);
1309 res = __scst_add_cmd_threads(num);
1313 scst_threads_info.init_cmd_thread = kthread_run(scst_init_cmd_thread,
1314 NULL, "scsi_tgt_init");
1315 if (IS_ERR(scst_threads_info.init_cmd_thread)) {
1316 res = PTR_ERR(scst_threads_info.init_cmd_thread);
1317 PRINT_ERROR("kthread_create() for init cmd failed: %d", res);
1318 scst_threads_info.init_cmd_thread = NULL;
1322 scst_threads_info.mgmt_cmd_thread = kthread_run(scst_mgmt_cmd_thread,
1323 NULL, "scsi_tgt_mc");
1324 if (IS_ERR(scst_threads_info.mgmt_cmd_thread)) {
1325 res = PTR_ERR(scst_threads_info.mgmt_cmd_thread);
1326 PRINT_ERROR("kthread_create() for mcmd failed: %d", res);
1327 scst_threads_info.mgmt_cmd_thread = NULL;
1331 scst_threads_info.mgmt_thread = kthread_run(scst_mgmt_thread,
1332 NULL, "scsi_tgt_mgmt");
1333 if (IS_ERR(scst_threads_info.mgmt_thread)) {
1334 res = PTR_ERR(scst_threads_info.mgmt_thread);
1335 PRINT_ERROR("kthread_create() for mgmt failed: %d", res);
1336 scst_threads_info.mgmt_thread = NULL;
1341 mutex_unlock(&scst_threads_info.cmd_threads_mutex);
1342 TRACE_EXIT_RES(res);
1356 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 15)
1357 static int scst_add(struct class_device *cdev)
1359 static int scst_add(struct class_device *cdev, struct class_interface *intf)
1362 struct scsi_device *scsidp;
1367 scsidp = to_scsi_device(cdev->dev);
1368 res = scst_register_device(scsidp);
1374 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 15)
1375 static void scst_remove(struct class_device *cdev)
1377 static void scst_remove(struct class_device *cdev, struct class_interface *intf)
1380 struct scsi_device *scsidp;
1384 scsidp = to_scsi_device(cdev->dev);
1385 scst_unregister_device(scsidp);
1391 static struct class_interface scst_interface = {
1393 .remove = scst_remove,
1396 static void __init scst_print_config(void)
1401 i = snprintf(buf, sizeof(buf), "Enabled features: ");
1404 #ifdef STRICT_SERIALIZING
1405 i += snprintf(&buf[i], sizeof(buf) - i, "Strict serializing");
1409 i += snprintf(&buf[i], sizeof(buf) - i, "%sEXTRACHECKS",
1410 (j == i) ? "" : ", ");
1414 i += snprintf(&buf[i], sizeof(buf) - i, "%sTRACING",
1415 (j == i) ? "" : ", ");
1419 i += snprintf(&buf[i], sizeof(buf) - i, "%sDEBUG",
1420 (j == i) ? "" : ", ");
1424 i += snprintf(&buf[i], sizeof(buf) - i, "%sDEBUG_TM",
1425 (j == i) ? "" : ", ");
1429 i += snprintf(&buf[i], sizeof(buf) - i, "%sDEBUG_RETRY",
1430 (j == i) ? "" : ", ");
1434 i += snprintf(&buf[i], sizeof(buf) - i, "%sDEBUG_OOM",
1435 (j == i) ? "" : ", ");
1439 i += snprintf(&buf[i], sizeof(buf) - i, "%sDEBUG_SN",
1440 (j == i) ? "" : ", ");
1443 #ifdef USE_EXPECTED_VALUES
1444 i += snprintf(&buf[i], sizeof(buf) - i, "%sUSE_EXPECTED_VALUES",
1445 (j == i) ? "" : ", ");
1448 #ifdef ALLOW_PASSTHROUGH_IO_SUBMIT_IN_SIRQ
1449 i += snprintf(&buf[i], sizeof(buf) - i, "%sALLOW_PASSTHROUGH_IO_SUBMIT_IN_SIRQ",
1450 (j == i) ? "" : ", ");
1453 #ifdef SCST_STRICT_SECURITY
1454 i += snprintf(&buf[i], sizeof(buf) - i, "%sSCST_STRICT_SECURITY",
1455 (j == i) ? "" : ", ");
1459 i += snprintf(&buf[i], sizeof(buf) - i, "%sSCST_HIGHMEM",
1460 (j == i) ? "" : ", ");
1464 PRINT_INFO("%s", buf);
1467 static int __init init_scst(void)
1474 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
1476 struct scsi_request *req;
1477 BUILD_BUG_ON(SCST_SENSE_BUFFERSIZE !=
1478 sizeof(req->sr_sense_buffer));
1482 struct scsi_sense_hdr *shdr;
1483 BUILD_BUG_ON(SCST_SENSE_BUFFERSIZE < sizeof(*shdr));
1487 struct scst_tgt_dev *t;
1489 BUILD_BUG_ON(sizeof(t->curr_sn) != sizeof(t->expected_sn));
1490 BUILD_BUG_ON(sizeof(c->sn) != sizeof(t->expected_sn));
1493 BUILD_BUG_ON(SCST_DATA_UNKNOWN != DMA_BIDIRECTIONAL);
1494 BUILD_BUG_ON(SCST_DATA_WRITE != DMA_TO_DEVICE);
1495 BUILD_BUG_ON(SCST_DATA_READ != DMA_FROM_DEVICE);
1496 BUILD_BUG_ON(SCST_DATA_NONE != DMA_NONE);
1498 mutex_init(&scst_mutex);
1499 INIT_LIST_HEAD(&scst_template_list);
1500 INIT_LIST_HEAD(&scst_dev_list);
1501 INIT_LIST_HEAD(&scst_dev_type_list);
1502 spin_lock_init(&scst_main_lock);
1503 INIT_LIST_HEAD(&scst_acg_list);
1504 spin_lock_init(&scst_init_lock);
1505 init_waitqueue_head(&scst_init_cmd_list_waitQ);
1506 INIT_LIST_HEAD(&scst_init_cmd_list);
1507 #if defined(DEBUG) || defined(TRACING)
1508 scst_trace_flag = SCST_DEFAULT_LOG_FLAGS;
1510 atomic_set(&scst_cmd_count, 0);
1511 spin_lock_init(&scst_cmd_mem_lock);
1512 spin_lock_init(&scst_mcmd_lock);
1513 INIT_LIST_HEAD(&scst_active_mgmt_cmd_list);
1514 INIT_LIST_HEAD(&scst_delayed_mgmt_cmd_list);
1515 init_waitqueue_head(&scst_mgmt_cmd_list_waitQ);
1516 init_waitqueue_head(&scst_mgmt_waitQ);
1517 spin_lock_init(&scst_mgmt_lock);
1518 INIT_LIST_HEAD(&scst_sess_init_list);
1519 INIT_LIST_HEAD(&scst_sess_shut_list);
1520 init_waitqueue_head(&scst_dev_cmd_waitQ);
1521 mutex_init(&scst_suspend_mutex);
1522 INIT_LIST_HEAD(&scst_cmd_lists_list);
1523 scst_virt_dev_last_id = 1;
1524 spin_lock_init(&scst_temp_UA_lock);
1526 spin_lock_init(&scst_main_cmd_lists.cmd_list_lock);
1527 INIT_LIST_HEAD(&scst_main_cmd_lists.active_cmd_list);
1528 init_waitqueue_head(&scst_main_cmd_lists.cmd_list_waitQ);
1529 list_add_tail(&scst_main_cmd_lists.lists_list_entry,
1530 &scst_cmd_lists_list);
1532 scst_num_cpus = num_online_cpus();
1534 /* ToDo: register_cpu_notifier() */
1536 if (scst_threads == 0)
1537 scst_threads = scst_num_cpus;
1539 if (scst_threads < 1) {
1540 PRINT_ERROR("%s", "scst_threads can not be less than 1");
1541 scst_threads = scst_num_cpus;
1544 scst_threads_info_init();
1546 #define INIT_CACHEP(p, s, o) do { \
1547 p = KMEM_CACHE(s, SCST_SLAB_FLAGS); \
1548 TRACE_MEM("Slab create: %s at %p size %zd", #s, p, \
1549 sizeof(struct s)); \
1556 INIT_CACHEP(scst_mgmt_cachep, scst_mgmt_cmd, out);
1557 INIT_CACHEP(scst_mgmt_stub_cachep, scst_mgmt_cmd_stub,
1558 out_destroy_mgmt_cache);
1559 INIT_CACHEP(scst_ua_cachep, scst_tgt_dev_UA,
1560 out_destroy_mgmt_stub_cache);
1562 struct scst_sense { uint8_t s[SCST_SENSE_BUFFERSIZE]; };
1563 INIT_CACHEP(scst_sense_cachep, scst_sense, out_destroy_ua_cache);
1565 INIT_CACHEP(scst_cmd_cachep, scst_cmd, out_destroy_sense_cache);
1566 INIT_CACHEP(scst_sess_cachep, scst_session, out_destroy_cmd_cache);
1567 INIT_CACHEP(scst_tgtd_cachep, scst_tgt_dev, out_destroy_sess_cache);
1568 INIT_CACHEP(scst_acgd_cachep, scst_acg_dev, out_destroy_tgt_cache);
1570 scst_mgmt_mempool = mempool_create(64, mempool_alloc_slab,
1571 mempool_free_slab, scst_mgmt_cachep);
1572 if (scst_mgmt_mempool == NULL) {
1574 goto out_destroy_acg_cache;
1577 scst_mgmt_stub_mempool = mempool_create(1024, mempool_alloc_slab,
1578 mempool_free_slab, scst_mgmt_stub_cachep);
1579 if (scst_mgmt_stub_mempool == NULL) {
1581 goto out_destroy_mgmt_mempool;
1584 scst_ua_mempool = mempool_create(64, mempool_alloc_slab,
1585 mempool_free_slab, scst_ua_cachep);
1586 if (scst_ua_mempool == NULL) {
1588 goto out_destroy_mgmt_stub_mempool;
1591 /* Loosing sense may have fatal consequences, so let's have a big pool */
1592 scst_sense_mempool = mempool_create(128, mempool_alloc_slab,
1593 mempool_free_slab, scst_sense_cachep);
1594 if (scst_sense_mempool == NULL) {
1596 goto out_destroy_ua_mempool;
1599 if (scst_max_cmd_mem == 0) {
1602 #if BITS_PER_LONG == 32
1603 scst_max_cmd_mem = min(((uint64_t)si.totalram << PAGE_SHIFT) >> 2,
1606 scst_max_cmd_mem = (si.totalram << PAGE_SHIFT) >> 2;
1609 scst_max_cmd_mem <<= 20;
1611 res = scst_sgv_pools_init(scst_max_cmd_mem, 0);
1613 goto out_destroy_sense_mempool;
1615 scst_default_acg = scst_alloc_add_acg(SCST_DEFAULT_ACG_NAME);
1616 if (scst_default_acg == NULL) {
1618 goto out_destroy_sgv_pool;
1621 res = scsi_register_interface(&scst_interface);
1625 scst_scsi_op_list_init();
1627 for (i = 0; i < (int)ARRAY_SIZE(scst_tasklets); i++) {
1628 spin_lock_init(&scst_tasklets[i].tasklet_lock);
1629 INIT_LIST_HEAD(&scst_tasklets[i].tasklet_cmd_list);
1630 tasklet_init(&scst_tasklets[i].tasklet, (void *)scst_cmd_tasklet,
1631 (unsigned long)&scst_tasklets[i]);
1634 TRACE_DBG("%d CPUs found, starting %d threads", scst_num_cpus,
1637 res = scst_start_all_threads(scst_threads);
1639 goto out_thread_free;
1641 res = scst_proc_init_module();
1643 goto out_thread_free;
1646 PRINT_INFO("SCST version %s loaded successfully (max mem for "
1647 "commands %ld Mb)", SCST_VERSION_STRING, scst_max_cmd_mem >> 20);
1649 scst_print_config();
1652 TRACE_EXIT_RES(res);
1656 scst_stop_all_threads();
1658 scsi_unregister_interface(&scst_interface);
1661 scst_destroy_acg(scst_default_acg);
1663 out_destroy_sgv_pool:
1664 scst_sgv_pools_deinit();
1666 out_destroy_sense_mempool:
1667 mempool_destroy(scst_sense_mempool);
1669 out_destroy_ua_mempool:
1670 mempool_destroy(scst_ua_mempool);
1672 out_destroy_mgmt_stub_mempool:
1673 mempool_destroy(scst_mgmt_stub_mempool);
1675 out_destroy_mgmt_mempool:
1676 mempool_destroy(scst_mgmt_mempool);
1678 out_destroy_acg_cache:
1679 kmem_cache_destroy(scst_acgd_cachep);
1681 out_destroy_tgt_cache:
1682 kmem_cache_destroy(scst_tgtd_cachep);
1684 out_destroy_sess_cache:
1685 kmem_cache_destroy(scst_sess_cachep);
1687 out_destroy_cmd_cache:
1688 kmem_cache_destroy(scst_cmd_cachep);
1690 out_destroy_sense_cache:
1691 kmem_cache_destroy(scst_sense_cachep);
1693 out_destroy_ua_cache:
1694 kmem_cache_destroy(scst_ua_cachep);
1696 out_destroy_mgmt_stub_cache:
1697 kmem_cache_destroy(scst_mgmt_stub_cachep);
1699 out_destroy_mgmt_cache:
1700 kmem_cache_destroy(scst_mgmt_cachep);
1704 static void __exit exit_scst(void)
1706 #ifdef CONFIG_LOCKDEP
1707 static /* To hide the lockdep's warning about non-static key */
1709 DECLARE_MUTEX_LOCKED(shm);
1713 /* ToDo: unregister_cpu_notifier() */
1715 scst_proc_cleanup_module();
1717 scst_stop_all_threads();
1719 scsi_unregister_interface(&scst_interface);
1720 scst_destroy_acg(scst_default_acg);
1722 scst_sgv_pools_deinit();
1724 #define DEINIT_CACHEP(p) do { \
1725 kmem_cache_destroy(p); \
1729 mempool_destroy(scst_mgmt_mempool);
1730 mempool_destroy(scst_mgmt_stub_mempool);
1731 mempool_destroy(scst_ua_mempool);
1732 mempool_destroy(scst_sense_mempool);
1734 DEINIT_CACHEP(scst_mgmt_cachep);
1735 DEINIT_CACHEP(scst_mgmt_stub_cachep);
1736 DEINIT_CACHEP(scst_ua_cachep);
1737 DEINIT_CACHEP(scst_sense_cachep);
1738 DEINIT_CACHEP(scst_cmd_cachep);
1739 DEINIT_CACHEP(scst_sess_cachep);
1740 DEINIT_CACHEP(scst_tgtd_cachep);
1741 DEINIT_CACHEP(scst_acgd_cachep);
1743 PRINT_INFO("%s", "SCST unloaded");
1750 * Device Handler Side (i.e. scst_vdisk)
1752 EXPORT_SYMBOL(__scst_register_dev_driver);
1753 EXPORT_SYMBOL(scst_unregister_dev_driver);
1754 EXPORT_SYMBOL(scst_register);
1755 EXPORT_SYMBOL(scst_unregister);
1757 EXPORT_SYMBOL(scst_register_virtual_device);
1758 EXPORT_SYMBOL(scst_unregister_virtual_device);
1759 EXPORT_SYMBOL(__scst_register_virtual_dev_driver);
1760 EXPORT_SYMBOL(scst_unregister_virtual_dev_driver);
1762 EXPORT_SYMBOL(scst_set_busy);
1763 EXPORT_SYMBOL(scst_set_cmd_error_status);
1764 EXPORT_SYMBOL(scst_set_cmd_error);
1765 EXPORT_SYMBOL(scst_set_resp_data_len);
1766 EXPORT_SYMBOL(scst_alloc_sense);
1767 EXPORT_SYMBOL(scst_alloc_set_sense);
1768 EXPORT_SYMBOL(scst_set_sense);
1769 EXPORT_SYMBOL(scst_set_cmd_error_sense);
1771 EXPORT_SYMBOL(scst_process_active_cmd);
1774 * Target Driver Side (i.e. HBA)
1776 EXPORT_SYMBOL(scst_register_session);
1777 EXPORT_SYMBOL(scst_unregister_session_ex);
1779 EXPORT_SYMBOL(__scst_register_target_template);
1780 EXPORT_SYMBOL(scst_unregister_target_template);
1782 EXPORT_SYMBOL(scst_cmd_init_done);
1783 EXPORT_SYMBOL(scst_tgt_cmd_done);
1784 EXPORT_SYMBOL(scst_restart_cmd);
1785 EXPORT_SYMBOL(scst_rx_cmd);
1786 EXPORT_SYMBOL(scst_rx_data);
1787 EXPORT_SYMBOL(scst_rx_mgmt_fn);
1789 EXPORT_SYMBOL(scst_find_cmd);
1790 EXPORT_SYMBOL(scst_find_cmd_by_tag);
1795 EXPORT_SYMBOL(scst_suspend_activity);
1796 EXPORT_SYMBOL(scst_resume_activity);
1798 EXPORT_SYMBOL(scst_add_cmd_threads);
1799 EXPORT_SYMBOL(scst_del_cmd_threads);
1801 #if defined(DEBUG) || defined(TRACING)
1802 EXPORT_SYMBOL(scst_proc_log_entry_read);
1803 EXPORT_SYMBOL(scst_proc_log_entry_write);
1806 EXPORT_SYMBOL(scst_create_proc_entry);
1807 EXPORT_SYMBOL(scst_single_seq_open);
1809 EXPORT_SYMBOL(scst_get);
1810 EXPORT_SYMBOL(scst_put);
1812 EXPORT_SYMBOL(scst_cmd_get);
1813 EXPORT_SYMBOL(scst_cmd_put);
1815 EXPORT_SYMBOL(scst_alloc);
1816 EXPORT_SYMBOL(scst_free);
1818 EXPORT_SYMBOL(scst_check_local_events);
1820 /* Tgt_dev's threads local storage */
1821 EXPORT_SYMBOL(scst_add_thr_data);
1822 EXPORT_SYMBOL(scst_del_all_thr_data);
1823 EXPORT_SYMBOL(scst_dev_del_all_thr_data);
1824 EXPORT_SYMBOL(scst_find_thr_data);
1826 /* SGV pool routines */
1827 EXPORT_SYMBOL(sgv_pool_create);
1828 EXPORT_SYMBOL(sgv_pool_destroy);
1829 EXPORT_SYMBOL(sgv_pool_set_allocator);
1830 EXPORT_SYMBOL(sgv_pool_alloc);
1831 EXPORT_SYMBOL(sgv_pool_free);
1832 EXPORT_SYMBOL(sgv_get_priv);
1834 /* Generic parse() routines */
1835 EXPORT_SYMBOL(scst_calc_block_shift);
1836 EXPORT_SYMBOL(scst_sbc_generic_parse);
1837 EXPORT_SYMBOL(scst_cdrom_generic_parse);
1838 EXPORT_SYMBOL(scst_modisk_generic_parse);
1839 EXPORT_SYMBOL(scst_tape_generic_parse);
1840 EXPORT_SYMBOL(scst_changer_generic_parse);
1841 EXPORT_SYMBOL(scst_processor_generic_parse);
1842 EXPORT_SYMBOL(scst_raid_generic_parse);
1844 /* Generic dev_done() routines */
1845 EXPORT_SYMBOL(scst_block_generic_dev_done);
1846 EXPORT_SYMBOL(scst_tape_generic_dev_done);
1851 EXPORT_SYMBOL(scst_get_cdb_info);
1852 EXPORT_SYMBOL(scst_cmd_get_tgt_priv_lock);
1853 EXPORT_SYMBOL(scst_cmd_set_tgt_priv_lock);
1854 EXPORT_SYMBOL(scst_obtain_device_parameters);
1857 EXPORT_SYMBOL(scst_random);
1860 module_init(init_scst);
1861 module_exit(exit_scst);
1863 MODULE_AUTHOR("Vladislav Bolkhovitin");
1864 MODULE_LICENSE("GPL");
1865 MODULE_DESCRIPTION("SCSI target core");
1866 MODULE_VERSION(SCST_VERSION_STRING);