/* Lists of commands with lock, if dedicated threads are used */
struct scst_cmd_lists cmd_lists;
+ /* Per-device dedicated IO context */
+ struct io_context *dev_io_ctx;
+
/* How many cmds alive on this dev */
atomic_t dev_cmd_count;
spinlock_t thr_data_lock;
struct list_head thr_data_list;
+ /* Per-(device, session) dedicated IO context */
+ struct io_context *tgt_dev_io_ctx;
+
spinlock_t tgt_dev_lock; /* per-session device lock */
/* List of UA's for this device, protected by tgt_dev_lock */
},
/*
- * Adds and deletes (stops) num SCST's threads. Returns 0 on success,
- * error code otherwise.
+ * Adds and deletes (stops) num of global SCST's threads. Returns 0 on
+ * success, error code otherwise.
*/
-int scst_add_cmd_threads(int num);
-void scst_del_cmd_threads(int num);
+int scst_add_global_threads(int num);
+void scst_del_global_threads(int num);
int scst_alloc_sense(struct scst_cmd *cmd, int atomic);
int scst_alloc_set_sense(struct scst_cmd *cmd, int atomic,
+++ /dev/null
-diff -upkr linux-2.6.27.2/block/blk-ioc.c linux-2.6.27.2/block/blk-ioc.c
---- linux-2.6.27.2/block/blk-ioc.c 2008-10-10 02:13:53.000000000 +0400
-+++ linux-2.6.27.2/block/blk-ioc.c 2008-11-25 21:27:01.000000000 +0300
-@@ -105,6 +105,7 @@ struct io_context *alloc_io_context(gfp_
-
- return ret;
- }
-+EXPORT_SYMBOL(alloc_io_context);
-
- /*
- * If the current task has no IO context then create one and initialise it.
-diff -upkr linux-2.6.27.2/include/linux/iocontext.h linux-2.6.27.2/include/linux/iocontext.h
---- linux-2.6.27.2/include/linux/iocontext.h 2008-10-10 02:13:53.000000000 +0400
-+++ linux-2.6.27.2/include/linux/iocontext.h 2008-11-26 13:23:03.000000000 +0300
-@@ -103,6 +103,7 @@ static inline struct io_context *ioc_tas
- int put_io_context(struct io_context *ioc);
- void exit_io_context(void);
- struct io_context *get_io_context(gfp_t gfp_flags, int node);
-+#define SCST_ALLOC_IO_CONTEXT_EXPORTED
- struct io_context *alloc_io_context(gfp_t gfp_flags, int node);
- void copy_io_context(struct io_context **pdst, struct io_context **psrc);
- #else
+++ /dev/null
-diff -upkr linux-2.6.28/block/blk-ioc.c linux-2.6.28/block/blk-ioc.c
---- linux-2.6.28/block/blk-ioc.c 2008-10-10 02:13:53.000000000 +0400
-+++ linux-2.6.28/block/blk-ioc.c 2008-11-25 21:27:01.000000000 +0300
-@@ -105,6 +105,7 @@ struct io_context *alloc_io_context(gfp_
-
- return ret;
- }
-+EXPORT_SYMBOL(alloc_io_context);
-
- /*
- * If the current task has no IO context then create one and initialise it.
-diff -upkr linux-2.6.28/include/linux/iocontext.h linux-2.6.28/include/linux/iocontext.h
---- linux-2.6.28/include/linux/iocontext.h 2008-10-10 02:13:53.000000000 +0400
-+++ linux-2.6.28/include/linux/iocontext.h 2008-11-26 13:23:03.000000000 +0300
-@@ -103,6 +103,7 @@ static inline struct io_context *ioc_tas
- int put_io_context(struct io_context *ioc);
- void exit_io_context(void);
- struct io_context *get_io_context(gfp_t gfp_flags, int node);
-+#define SCST_ALLOC_IO_CONTEXT_EXPORTED
- struct io_context *alloc_io_context(gfp_t gfp_flags, int node);
- void copy_io_context(struct io_context **pdst, struct io_context **psrc);
- #else
--- /dev/null
+diff -upkr linux-2.6.27.2/block/blk-ioc.c linux-2.6.27.2/block/blk-ioc.c
+--- linux-2.6.27.2/block/blk-ioc.c 2008-10-10 02:13:53.000000000 +0400
++++ linux-2.6.27.2/block/blk-ioc.c 2009-03-23 21:32:37.000000000 +0300
+@@ -65,6 +65,21 @@ static void cfq_exit(struct io_context *
+ rcu_read_unlock();
+ }
+
++void __exit_io_context(struct io_context *ioc)
++{
++ if (ioc == NULL)
++ return;
++
++ if (atomic_dec_and_test(&ioc->nr_tasks)) {
++ if (ioc->aic && ioc->aic->exit)
++ ioc->aic->exit(ioc->aic);
++ cfq_exit(ioc);
++
++ put_io_context(ioc);
++ }
++}
++EXPORT_SYMBOL(__exit_io_context);
++
+ /* Called by the exitting task */
+ void exit_io_context(void)
+ {
+@@ -75,13 +90,7 @@ void exit_io_context(void)
+ current->io_context = NULL;
+ task_unlock(current);
+
+- if (atomic_dec_and_test(&ioc->nr_tasks)) {
+- if (ioc->aic && ioc->aic->exit)
+- ioc->aic->exit(ioc->aic);
+- cfq_exit(ioc);
+-
+- put_io_context(ioc);
+- }
++ __exit_io_context(ioc);
+ }
+
+ struct io_context *alloc_io_context(gfp_t gfp_flags, int node)
+@@ -105,6 +114,7 @@ struct io_context *alloc_io_context(gfp_
+
+ return ret;
+ }
++EXPORT_SYMBOL(alloc_io_context);
+
+ /*
+ * If the current task has no IO context then create one and initialise it.
+diff -upkr linux-2.6.27.2/include/linux/iocontext.h linux-2.6.27.2/include/linux/iocontext.h
+--- linux-2.6.27.2/include/linux/iocontext.h 2008-10-10 02:13:53.000000000 +0400
++++ linux-2.6.27.2/include/linux/iocontext.h 2009-03-23 21:32:37.000000000 +0300
+@@ -103,7 +103,9 @@ static inline struct io_context *ioc_tas
+ int put_io_context(struct io_context *ioc);
+ void exit_io_context(void);
+ struct io_context *get_io_context(gfp_t gfp_flags, int node);
++#define SCST_IO_CONTEXT
+ struct io_context *alloc_io_context(gfp_t gfp_flags, int node);
++void __exit_io_context(struct io_context *ioc);
+ void copy_io_context(struct io_context **pdst, struct io_context **psrc);
+ #else
+ static inline void exit_io_context(void)
--- /dev/null
+diff -upkr linux-2.6.28/block/blk-ioc.c linux-2.6.28/block/blk-ioc.c
+--- linux-2.6.28/block/blk-ioc.c 2008-12-25 02:26:37.000000000 +0300
++++ linux-2.6.28/block/blk-ioc.c 2009-03-23 14:28:48.000000000 +0300
+@@ -65,6 +65,21 @@ static void cfq_exit(struct io_context *
+ rcu_read_unlock();
+ }
+
++void __exit_io_context(struct io_context *ioc)
++{
++ if (ioc == NULL)
++ return;
++
++ if (atomic_dec_and_test(&ioc->nr_tasks)) {
++ if (ioc->aic && ioc->aic->exit)
++ ioc->aic->exit(ioc->aic);
++ cfq_exit(ioc);
++
++ put_io_context(ioc);
++ }
++}
++EXPORT_SYMBOL(__exit_io_context);
++
+ /* Called by the exitting task */
+ void exit_io_context(void)
+ {
+@@ -75,13 +90,7 @@ void exit_io_context(void)
+ current->io_context = NULL;
+ task_unlock(current);
+
+- if (atomic_dec_and_test(&ioc->nr_tasks)) {
+- if (ioc->aic && ioc->aic->exit)
+- ioc->aic->exit(ioc->aic);
+- cfq_exit(ioc);
+-
+- put_io_context(ioc);
+- }
++ __exit_io_context(ioc);
+ }
+
+ struct io_context *alloc_io_context(gfp_t gfp_flags, int node)
+@@ -105,6 +114,7 @@ struct io_context *alloc_io_context(gfp_
+
+ return ret;
+ }
++EXPORT_SYMBOL(alloc_io_context);
+
+ /*
+ * If the current task has no IO context then create one and initialise it.
+diff -upkr linux-2.6.28/include/linux/iocontext.h linux-2.6.28/include/linux/iocontext.h
+--- linux-2.6.28/include/linux/iocontext.h 2008-12-25 02:26:37.000000000 +0300
++++ linux-2.6.28/include/linux/iocontext.h 2009-03-23 14:05:01.000000000 +0300
+@@ -103,7 +103,9 @@ static inline struct io_context *ioc_tas
+ int put_io_context(struct io_context *ioc);
+ void exit_io_context(void);
+ struct io_context *get_io_context(gfp_t gfp_flags, int node);
++#define SCST_IO_CONTEXT
+ struct io_context *alloc_io_context(gfp_t gfp_flags, int node);
++void __exit_io_context(struct io_context *ioc);
+ void copy_io_context(struct io_context **pdst, struct io_context **psrc);
+ #else
+ static inline void exit_io_context(void)
dev->queue_alg = SCST_CONTR_MODE_QUEUE_ALG_UNRESTRICTED_REORDER;
dev->dev_num = dev_num++;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
+#if defined(CONFIG_BLOCK) && defined(SCST_IO_CONTEXT)
+ dev->dev_io_ctx = alloc_io_context(GFP_KERNEL, -1);
+ if (dev->dev_io_ctx == NULL) {
+ TRACE(TRACE_OUT_OF_MEM, "%s", "Failed to alloc dev IO context");
+ res = -ENOMEM;
+ kfree(dev);
+ goto out;
+ }
+#endif
+#endif
+
*out_dev = dev;
out:
}
#endif
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
+#if defined(CONFIG_BLOCK) && defined(SCST_IO_CONTEXT)
+ __exit_io_context(dev->dev_io_ctx);
+#endif
+#endif
+
kfree(dev);
TRACE_EXIT();
tm_dbg_init_tgt_dev(tgt_dev, acg_dev);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
+#if defined(CONFIG_BLOCK) && defined(SCST_IO_CONTEXT)
+ tgt_dev->tgt_dev_io_ctx = alloc_io_context(GFP_KERNEL, -1);
+ if (tgt_dev->tgt_dev_io_ctx == NULL) {
+ TRACE(TRACE_OUT_OF_MEM, "Failed to alloc tgt_dev IO context "
+ "for dev %s (initiator %s)", dev->virt_name,
+ sess->initiator_name);
+ goto out_free;
+ }
+#endif
+#endif
+
if (vtt->threads_num > 0) {
rc = 0;
if (dev->handler->threads_num > 0)
rc = scst_add_dev_threads(dev, vtt->threads_num);
else if (dev->handler->threads_num == 0)
- rc = scst_add_cmd_threads(vtt->threads_num);
+ rc = scst_add_global_threads(vtt->threads_num);
if (rc != 0)
goto out_free;
}
if (dev->handler->threads_num > 0)
scst_del_dev_threads(dev, vtt->threads_num);
else if (dev->handler->threads_num == 0)
- scst_del_cmd_threads(vtt->threads_num);
+ scst_del_global_threads(vtt->threads_num);
}
out_free:
+ __exit_io_context(tgt_dev->tgt_dev_io_ctx);
+
kmem_cache_free(scst_tgtd_cachep, tgt_dev);
tgt_dev = NULL;
goto out;
if (dev->handler->threads_num > 0)
scst_del_dev_threads(dev, vtt->threads_num);
else if (dev->handler->threads_num == 0)
- scst_del_cmd_threads(vtt->threads_num);
+ scst_del_global_threads(vtt->threads_num);
}
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
+#if defined(CONFIG_BLOCK) && defined(SCST_IO_CONTEXT)
+ __exit_io_context(tgt_dev->tgt_dev_io_ctx);
+#endif
+#endif
+
kmem_cache_free(scst_tgtd_cachep, tgt_dev);
TRACE_EXIT();
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
-#if !defined(SCST_ALLOC_IO_CONTEXT_EXPORTED)
-#warning "Patch export_alloc_io_context-<kernel-version>.patch was not applied \
+#if !defined(SCST_IO_CONTEXT)
+#warning "Patch io_context-<kernel-version>.patch was not applied \
on your kernel. SCST will be working with not the best performance."
#endif
#endif
static struct list_head scst_cmd_lists_list;
static int scst_threads;
-struct scst_threads_info_t scst_threads_info;
+struct mutex scst_global_threads_mutex;
+u32 scst_nr_global_threads;
+static struct list_head scst_global_threads_list;
+static struct task_struct *scst_init_cmd_thread;
+static struct task_struct *scst_mgmt_thread;
+static struct task_struct *scst_mgmt_cmd_thread;
static int suspend_count;
static int scst_virt_dev_last_id; /* protected by scst_mutex */
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
-#if defined(CONFIG_BLOCK) && defined(SCST_ALLOC_IO_CONTEXT_EXPORTED)
-static struct io_context *scst_ioc;
-#endif
-#endif
-
static unsigned int scst_max_cmd_mem;
unsigned int scst_max_dev_cmd_mem;
int i, res = 0;
int n = 0;
struct scst_cmd_thread_t *thr;
- struct io_context *ioc = NULL;
char nm[12];
TRACE_ENTRY();
list_add(&thr->thread_list_entry, &dev->threads_list);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
-#if defined(CONFIG_BLOCK) && defined(SCST_ALLOC_IO_CONTEXT_EXPORTED)
+#if defined(CONFIG_BLOCK) && defined(SCST_IO_CONTEXT)
/*
- * It would be better to keep io_context in tgt_dev and
- * dynamically assign it to the current thread on the IO
- * submission time to let each initiator have own
- * io_context. But, unfortunately, CFQ doesn't
- * support if a task has dynamically switched
- * io_context, it oopses on BUG_ON(!cic->dead_key) in
- * cic_free_func(). So, we have to have the same io_context
- * for all initiators.
+ * ToDo: better to use tgt_dev_io_context instead, but we
+ * are not ready for that yet.
*/
- if (ioc == NULL) {
- ioc = alloc_io_context(GFP_KERNEL, -1);
- TRACE_DBG("ioc %p (thr %d)", ioc, thr->cmd_thread->pid);
- }
-
- put_io_context(thr->cmd_thread->io_context);
- thr->cmd_thread->io_context = ioc_task_link(ioc);
- TRACE_DBG("Setting ioc %p on thr %d", ioc,
+ __exit_io_context(thr->cmd_thread->io_context);
+ thr->cmd_thread->io_context = ioc_task_link(dev->dev_io_ctx);
+ TRACE_DBG("Setting dev io ctx %p on thr %d", dev->dev_io_ctx,
thr->cmd_thread->pid);
#endif
#endif
}
out:
- put_io_context(ioc);
-
TRACE_EXIT_RES(res);
return res;
}
goto out_null;
}
-int scst_cmd_threads_count(void)
+int scst_global_threads_count(void)
{
int i;
/*
* Just to lower the race window, when user can get just changed value
*/
- mutex_lock(&scst_threads_info.cmd_threads_mutex);
- i = scst_threads_info.nr_cmd_threads;
- mutex_unlock(&scst_threads_info.cmd_threads_mutex);
+ mutex_lock(&scst_global_threads_mutex);
+ i = scst_nr_global_threads;
+ mutex_unlock(&scst_global_threads_mutex);
return i;
}
static void scst_threads_info_init(void)
{
- memset(&scst_threads_info, 0, sizeof(scst_threads_info));
- mutex_init(&scst_threads_info.cmd_threads_mutex);
- INIT_LIST_HEAD(&scst_threads_info.cmd_threads_list);
+ mutex_init(&scst_global_threads_mutex);
+ INIT_LIST_HEAD(&scst_global_threads_list);
}
-/* scst_threads_info.cmd_threads_mutex supposed to be held */
-void __scst_del_cmd_threads(int num)
+/* scst_global_threads_mutex supposed to be held */
+void __scst_del_global_threads(int num)
{
struct scst_cmd_thread_t *ct, *tmp;
int i;
TRACE_ENTRY();
- i = scst_threads_info.nr_cmd_threads;
+ i = scst_nr_global_threads;
if (num <= 0 || num > i) {
PRINT_ERROR("can not del %d cmd threads from %d", num, i);
return;
}
- list_for_each_entry_safe(ct, tmp, &scst_threads_info.cmd_threads_list,
+ list_for_each_entry_safe(ct, tmp, &scst_global_threads_list,
thread_list_entry) {
int res;
TRACE_MGMT_DBG("kthread_stop() failed: %d", res);
list_del(&ct->thread_list_entry);
kfree(ct);
- scst_threads_info.nr_cmd_threads--;
+ scst_nr_global_threads--;
--num;
if (num == 0)
break;
return;
}
-/* scst_threads_info.cmd_threads_mutex supposed to be held */
-int __scst_add_cmd_threads(int num)
+/* scst_global_threads_mutex supposed to be held */
+int __scst_add_global_threads(int num)
{
int res = 0, i;
static int scst_thread_num;
goto out_error;
}
- list_add(&thr->thread_list_entry,
- &scst_threads_info.cmd_threads_list);
- scst_threads_info.nr_cmd_threads++;
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
-#if defined(CONFIG_BLOCK) && defined(SCST_ALLOC_IO_CONTEXT_EXPORTED)
- /* See comment in scst_add_dev_threads() */
- if (scst_ioc == NULL) {
- scst_ioc = alloc_io_context(GFP_KERNEL, -1);
- TRACE_DBG("scst_ioc %p (thr %d)", scst_ioc,
- thr->cmd_thread->pid);
- }
+ list_add(&thr->thread_list_entry, &scst_global_threads_list);
+ scst_nr_global_threads++;
- put_io_context(thr->cmd_thread->io_context);
- thr->cmd_thread->io_context = ioc_task_link(scst_ioc);
- TRACE_DBG("Setting scst_ioc %p on thr %d",
- scst_ioc, thr->cmd_thread->pid);
-#endif
-#endif
wake_up_process(thr->cmd_thread);
}
res = 0;
out_error:
if (i > 0)
- __scst_del_cmd_threads(i - 1);
+ __scst_del_global_threads(i - 1);
goto out;
}
-int scst_add_cmd_threads(int num)
+int scst_add_global_threads(int num)
{
int res;
TRACE_ENTRY();
- mutex_lock(&scst_threads_info.cmd_threads_mutex);
- res = __scst_add_cmd_threads(num);
- mutex_unlock(&scst_threads_info.cmd_threads_mutex);
+ mutex_lock(&scst_global_threads_mutex);
+ res = __scst_add_global_threads(num);
+ mutex_unlock(&scst_global_threads_mutex);
TRACE_EXIT_RES(res);
return res;
}
-EXPORT_SYMBOL(scst_add_cmd_threads);
+EXPORT_SYMBOL(scst_add_global_threads);
-void scst_del_cmd_threads(int num)
+void scst_del_global_threads(int num)
{
TRACE_ENTRY();
- mutex_lock(&scst_threads_info.cmd_threads_mutex);
- __scst_del_cmd_threads(num);
- mutex_unlock(&scst_threads_info.cmd_threads_mutex);
+ mutex_lock(&scst_global_threads_mutex);
+ __scst_del_global_threads(num);
+ mutex_unlock(&scst_global_threads_mutex);
TRACE_EXIT();
return;
}
-EXPORT_SYMBOL(scst_del_cmd_threads);
+EXPORT_SYMBOL(scst_del_global_threads);
static void scst_stop_all_threads(void)
{
TRACE_ENTRY();
- mutex_lock(&scst_threads_info.cmd_threads_mutex);
- __scst_del_cmd_threads(scst_threads_info.nr_cmd_threads);
- if (scst_threads_info.mgmt_cmd_thread)
- kthread_stop(scst_threads_info.mgmt_cmd_thread);
- if (scst_threads_info.mgmt_thread)
- kthread_stop(scst_threads_info.mgmt_thread);
- if (scst_threads_info.init_cmd_thread)
- kthread_stop(scst_threads_info.init_cmd_thread);
- mutex_unlock(&scst_threads_info.cmd_threads_mutex);
+ mutex_lock(&scst_global_threads_mutex);
+ __scst_del_global_threads(scst_nr_global_threads);
+ if (scst_mgmt_cmd_thread)
+ kthread_stop(scst_mgmt_cmd_thread);
+ if (scst_mgmt_thread)
+ kthread_stop(scst_mgmt_thread);
+ if (scst_init_cmd_thread)
+ kthread_stop(scst_init_cmd_thread);
+ mutex_unlock(&scst_global_threads_mutex);
TRACE_EXIT();
return;
TRACE_ENTRY();
- mutex_lock(&scst_threads_info.cmd_threads_mutex);
- res = __scst_add_cmd_threads(num);
+ mutex_lock(&scst_global_threads_mutex);
+ res = __scst_add_global_threads(num);
if (res < 0)
goto out;
- scst_threads_info.init_cmd_thread = kthread_run(scst_init_cmd_thread,
+ scst_init_cmd_thread = kthread_run(scst_init_thread,
NULL, "scsi_tgt_init");
- if (IS_ERR(scst_threads_info.init_cmd_thread)) {
- res = PTR_ERR(scst_threads_info.init_cmd_thread);
+ if (IS_ERR(scst_init_cmd_thread)) {
+ res = PTR_ERR(scst_init_cmd_thread);
PRINT_ERROR("kthread_create() for init cmd failed: %d", res);
- scst_threads_info.init_cmd_thread = NULL;
+ scst_init_cmd_thread = NULL;
goto out;
}
- scst_threads_info.mgmt_cmd_thread = kthread_run(scst_mgmt_cmd_thread,
- NULL, "scsi_tgt_mc");
- if (IS_ERR(scst_threads_info.mgmt_cmd_thread)) {
- res = PTR_ERR(scst_threads_info.mgmt_cmd_thread);
- PRINT_ERROR("kthread_create() for mcmd failed: %d", res);
- scst_threads_info.mgmt_cmd_thread = NULL;
+ scst_mgmt_cmd_thread = kthread_run(scst_tm_thread,
+ NULL, "scsi_tm");
+ if (IS_ERR(scst_mgmt_cmd_thread)) {
+ res = PTR_ERR(scst_mgmt_cmd_thread);
+ PRINT_ERROR("kthread_create() for TM failed: %d", res);
+ scst_mgmt_cmd_thread = NULL;
goto out;
}
- scst_threads_info.mgmt_thread = kthread_run(scst_mgmt_thread,
+ scst_mgmt_thread = kthread_run(scst_global_mgmt_thread,
NULL, "scsi_tgt_mgmt");
- if (IS_ERR(scst_threads_info.mgmt_thread)) {
- res = PTR_ERR(scst_threads_info.mgmt_thread);
+ if (IS_ERR(scst_mgmt_thread)) {
+ res = PTR_ERR(scst_mgmt_thread);
PRINT_ERROR("kthread_create() for mgmt failed: %d", res);
- scst_threads_info.mgmt_thread = NULL;
+ scst_mgmt_thread = NULL;
goto out;
}
out:
- mutex_unlock(&scst_threads_info.cmd_threads_mutex);
+ mutex_unlock(&scst_global_threads_mutex);
TRACE_EXIT_RES(res);
return res;
}
BUILD_BUG_ON(SCST_DATA_NONE != DMA_NONE);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
-#if !defined(SCST_ALLOC_IO_CONTEXT_EXPORTED)
- PRINT_WARNING("%s", "Patch export_alloc_io_context was not applied on "
+#if !defined(SCST_IO_CONTEXT)
+ PRINT_WARNING("%s", "Patch io_context was not applied on "
"your kernel. SCST will be working with not the best "
"performance.");
#endif
DEINIT_CACHEP(scst_tgtd_cachep);
DEINIT_CACHEP(scst_acgd_cachep);
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
-#if defined(CONFIG_BLOCK) && defined(SCST_ALLOC_IO_CONTEXT_EXPORTED)
- put_io_context(scst_ioc);
-#endif
-#endif
-
PRINT_INFO("%s", "SCST unloaded");
TRACE_EXIT();
struct list_head thread_list_entry;
};
-struct scst_threads_info_t {
- struct mutex cmd_threads_mutex;
- u32 nr_cmd_threads;
- struct list_head cmd_threads_list;
- struct task_struct *init_cmd_thread;
- struct task_struct *mgmt_thread;
- struct task_struct *mgmt_cmd_thread;
-};
+static inline void scst_set_io_context(struct scst_tgt_dev *tgt_dev)
+{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
+#if defined(CONFIG_BLOCK) && defined(SCST_IO_CONTEXT)
+ if (tgt_dev->dev->p_cmd_lists == &scst_main_cmd_lists) {
+ EXTRACHECKS_BUG_ON(current->io_context);
+ /*
+ * No need to call ioc_task_link(), because io_context
+ * supposed to be cleared in the end of the caller function.
+ */
+ current->io_context = tgt_dev->tgt_dev_io_ctx;
+ TRACE_DBG("io_context %p", tgt_dev->tgt_dev_io_ctx);
+ }
+#endif
+#endif
+}
+
+static inline void scst_reset_io_context(struct scst_tgt_dev *tgt_dev)
+{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
+#if defined(CONFIG_BLOCK) && defined(SCST_IO_CONTEXT)
+ if (current->io_context == tgt_dev->tgt_dev_io_ctx) {
+ current->io_context = NULL;
+ TRACE_DBG("io_context %p reset", tgt_dev->tgt_dev_io_ctx);
+ }
+#endif
+#endif
+}
+
+extern struct mutex scst_global_threads_mutex;
+extern u32 scst_nr_global_threads;
-extern struct scst_threads_info_t scst_threads_info;
-extern int scst_cmd_threads_count(void);
-extern int __scst_add_cmd_threads(int num);
-extern void __scst_del_cmd_threads(int num);
+extern int scst_global_threads_count(void);
+extern int __scst_add_global_threads(int num);
+extern void __scst_del_global_threads(int num);
extern struct scst_dev_type scst_null_devtype;
int scst_cmd_thread(void *arg);
void scst_cmd_tasklet(long p);
-int scst_init_cmd_thread(void *arg);
-int scst_mgmt_cmd_thread(void *arg);
-int scst_mgmt_thread(void *arg);
+int scst_init_thread(void *arg);
+int scst_tm_thread(void *arg);
+int scst_global_mgmt_thread(void *arg);
int scst_add_dev_threads(struct scst_device *dev, int num);
void scst_del_dev_threads(struct scst_device *dev, int num);
goto out_free;
}
- mutex_lock(&scst_threads_info.cmd_threads_mutex);
+ mutex_lock(&scst_global_threads_mutex);
- oldtn = scst_threads_info.nr_cmd_threads;
+ oldtn = scst_nr_global_threads;
newtn = simple_strtoul(buffer, NULL, 0);
if (newtn <= 0) {
PRINT_ERROR("Illegal threads num value %d", newtn);
}
delta = newtn - oldtn;
if (delta < 0)
- __scst_del_cmd_threads(-delta);
+ __scst_del_global_threads(-delta);
else
- __scst_add_cmd_threads(delta);
+ __scst_add_global_threads(delta);
PRINT_INFO("Changed cmd threads num: old %d, new %d", oldtn, newtn);
out_up_thr_free:
- mutex_unlock(&scst_threads_info.cmd_threads_mutex);
+ mutex_unlock(&scst_global_threads_mutex);
mutex_unlock(&scst_proc_mutex);
{
TRACE_ENTRY();
- seq_printf(seq, "%d\n", scst_cmd_threads_count());
+ seq_printf(seq, "%d\n", scst_global_threads_count());
TRACE_EXIT();
return 0;
TRACE_ENTRY();
+ scst_set_io_context(cmd->tgt_dev);
+
cmd->state = SCST_CMD_STATE_REAL_EXECUTING;
if (handler->exec) {
out_complete:
res = SCST_EXEC_COMPLETED;
-out:
+out_reset:
+ scst_reset_io_context(cmd->tgt_dev);
+
TRACE_EXIT();
return res;
out_restore:
/* Restore the state */
cmd->state = SCST_CMD_STATE_REAL_EXEC;
- goto out;
+ goto out_reset;
out_error:
scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
return res;
}
-int scst_init_cmd_thread(void *arg)
+int scst_init_thread(void *arg)
{
TRACE_ENTRY();
* process the commands lists.
*/
if (p_cmd_lists == &scst_main_cmd_lists) {
- sBUG_ON((scst_threads_info.nr_cmd_threads == 1) &&
+ sBUG_ON((scst_nr_global_threads == 1) &&
!list_empty(&scst_main_cmd_lists.active_cmd_list));
}
#endif
return res;
}
-int scst_mgmt_cmd_thread(void *arg)
+int scst_tm_thread(void *arg)
{
TRACE_ENTRY();
return res;
}
-int scst_mgmt_thread(void *arg)
+int scst_global_mgmt_thread(void *arg)
{
struct scst_session *sess;