- Memory barriers cleanup. Comments for them improved
[mirror/scst/.git] / scst / src / scst_main.c
1 /*
2  *  scst_main.c
3  *
4  *  Copyright (C) 2004 - 2008 Vladislav Bolkhovitin <vst@vlnb.net>
5  *  Copyright (C) 2004 - 2005 Leonid Stoljar
6  *  Copyright (C) 2007 - 2008 CMS Distribution Limited
7  *
8  *  This program is free software; you can redistribute it and/or
9  *  modify it under the terms of the GNU General Public License
10  *  as published by the Free Software Foundation, version 2
11  *  of the License.
12  *
13  *  This program is distributed in the hope that it will be useful,
14  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16  *  GNU General Public License for more details.
17  */
18
19 #include <linux/module.h>
20
21 #include <linux/init.h>
22 #include <linux/kernel.h>
23 #include <linux/errno.h>
24 #include <linux/list.h>
25 #include <linux/spinlock.h>
26 #include <linux/slab.h>
27 #include <linux/sched.h>
28 #include <linux/unistd.h>
29 #include <linux/string.h>
30 #include <linux/kthread.h>
31
32 #include "scst.h"
33 #include "scst_priv.h"
34 #include "scst_mem.h"
35
36 #if defined(CONFIG_HIGHMEM4G) || defined(CONFIG_HIGHMEM64G)
37 #warning "HIGHMEM kernel configurations are fully supported, but not \
38         recommended for performance reasons. Consider changing VMSPLIT \
39         option or use a 64-bit configuration instead. See README file for \
40         details."
41 #endif
42
43 #if !defined(SCSI_EXEC_REQ_FIFO_DEFINED) && \
44     !defined(CONFIG_SCST_STRICT_SERIALIZING)
45 #warning "Patch scst_exec_req_fifo-<kernel-version>.patch was not applied on \
46         your kernel and CONFIG_SCST_STRICT_SERIALIZING isn't defined. \
47         Pass-through dev handlers will not be supported."
48 #endif
49
50 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
51 #if !defined(SCST_ALLOC_IO_CONTEXT_EXPORTED)
52 #warning "Patch export_alloc_io_context-<kernel-version>.patch was not applied \
53         on your kernel. SCST will be working with not the best performance."
54 #endif
55 #endif
56
57 /**
58  ** SCST global variables. They are all uninitialized to have their layout in
59  ** memory be exactly as specified. Otherwise compiler puts zero-initialized
60  ** variable separately from nonzero-initialized ones.
61  **/
62
63 /*
64  * All targets, devices and dev_types management is done under this mutex.
65  *
66  * It must NOT be used in any works (schedule_work(), etc.), because
67  * otherwise a deadlock (double lock, actually) is possible, e.g., with
68  * scst_user detach_tgt(), which is called under scst_mutex and calls
69  * flush_scheduled_work().
70  */
71 struct mutex scst_mutex;
72
73  /* All 3 protected by scst_mutex */
74 static struct list_head scst_template_list;
75 struct list_head scst_dev_list;
76 struct list_head scst_dev_type_list;
77
78 spinlock_t scst_main_lock;
79
80 static struct kmem_cache *scst_mgmt_cachep;
81 mempool_t *scst_mgmt_mempool;
82 static struct kmem_cache *scst_mgmt_stub_cachep;
83 mempool_t *scst_mgmt_stub_mempool;
84 static struct kmem_cache *scst_ua_cachep;
85 mempool_t *scst_ua_mempool;
86 static struct kmem_cache *scst_sense_cachep;
87 mempool_t *scst_sense_mempool;
88 struct kmem_cache *scst_tgtd_cachep;
89 struct kmem_cache *scst_sess_cachep;
90 struct kmem_cache *scst_acgd_cachep;
91
92 struct list_head scst_acg_list;
93 struct scst_acg *scst_default_acg;
94
95 spinlock_t scst_init_lock;
96 wait_queue_head_t scst_init_cmd_list_waitQ;
97 struct list_head scst_init_cmd_list;
98 unsigned int scst_init_poll_cnt;
99
100 struct kmem_cache *scst_cmd_cachep;
101
102 #if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
103 unsigned long scst_trace_flag;
104 #endif
105
106 unsigned long scst_flags;
107 atomic_t scst_cmd_count;
108
109 struct scst_cmd_lists scst_main_cmd_lists;
110
111 struct scst_tasklet scst_tasklets[NR_CPUS];
112
113 spinlock_t scst_mcmd_lock;
114 struct list_head scst_active_mgmt_cmd_list;
115 struct list_head scst_delayed_mgmt_cmd_list;
116 wait_queue_head_t scst_mgmt_cmd_list_waitQ;
117
118 wait_queue_head_t scst_mgmt_waitQ;
119 spinlock_t scst_mgmt_lock;
120 struct list_head scst_sess_init_list;
121 struct list_head scst_sess_shut_list;
122
123 wait_queue_head_t scst_dev_cmd_waitQ;
124
125 static struct mutex scst_suspend_mutex;
126 /* protected by scst_suspend_mutex */
127 static struct list_head scst_cmd_lists_list;
128
129 static int scst_threads;
130 struct scst_threads_info_t scst_threads_info;
131
132 static int suspend_count;
133
134 static int scst_virt_dev_last_id; /* protected by scst_mutex */
135
136 /*
137  * This buffer and lock are intended to avoid memory allocation, which
138  * could fail in improper places.
139  */
140 spinlock_t scst_temp_UA_lock;
141 uint8_t scst_temp_UA[SCST_SENSE_BUFFERSIZE];
142
143 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
144 #if defined(CONFIG_BLOCK) && defined(SCST_ALLOC_IO_CONTEXT_EXPORTED)
145 static struct io_context *scst_ioc;
146 #endif
147 #endif
148
149 static unsigned int scst_max_cmd_mem;
150 unsigned int scst_max_dev_cmd_mem;
151
152 module_param_named(scst_threads, scst_threads, int, 0);
153 MODULE_PARM_DESC(scst_threads, "SCSI target threads count");
154
155 module_param_named(scst_max_cmd_mem, scst_max_cmd_mem, int, 0);
156 MODULE_PARM_DESC(scst_max_cmd_mem, "Maximum memory allowed to be consumed by "
157         "all SCSI commands of all devices at any given time in MB");
158
159 module_param_named(scst_max_dev_cmd_mem, scst_max_dev_cmd_mem, int, 0);
160 MODULE_PARM_DESC(scst_max_dev_cmd_mem, "Maximum memory allowed to be consumed "
161         "by all SCSI commands of a device at any given time in MB");
162
163 struct scst_dev_type scst_null_devtype = {
164         .name = "none",
165 };
166
167 static void __scst_resume_activity(void);
168
169 int __scst_register_target_template(struct scst_tgt_template *vtt,
170         const char *version)
171 {
172         int res = 0;
173         struct scst_tgt_template *t;
174         static DEFINE_MUTEX(m);
175
176         TRACE_ENTRY();
177
178         INIT_LIST_HEAD(&vtt->tgt_list);
179
180         if (strcmp(version, SCST_INTERFACE_VERSION) != 0) {
181                 PRINT_ERROR("Incorrect version of target %s", vtt->name);
182                 res = -EINVAL;
183                 goto out_err;
184         }
185
186         if (!vtt->detect) {
187                 PRINT_ERROR("Target driver %s doesn't have a "
188                         "detect() method.", vtt->name);
189                 res = -EINVAL;
190                 goto out_err;
191         }
192
193         if (!vtt->release) {
194                 PRINT_ERROR("Target driver %s doesn't have a "
195                         "release() method.", vtt->name);
196                 res = -EINVAL;
197                 goto out_err;
198         }
199
200         if (!vtt->xmit_response) {
201                 PRINT_ERROR("Target driver %s doesn't have a "
202                         "xmit_response() method.", vtt->name);
203                 res = -EINVAL;
204                 goto out_err;
205         }
206
207         if (vtt->threads_num < 0) {
208                 PRINT_ERROR("Wrong threads_num value %d for "
209                         "target \"%s\"", vtt->threads_num,
210                         vtt->name);
211                 res = -EINVAL;
212                 goto out_err;
213         }
214
215         if (!vtt->no_proc_entry) {
216                 res = scst_build_proc_target_dir_entries(vtt);
217                 if (res < 0)
218                         goto out_err;
219         }
220
221         if (vtt->rdy_to_xfer == NULL)
222                 vtt->rdy_to_xfer_atomic = 1;
223
224         if (mutex_lock_interruptible(&m) != 0)
225                 goto out_err;
226
227         if (mutex_lock_interruptible(&scst_mutex) != 0)
228                 goto out_m_up;
229         list_for_each_entry(t, &scst_template_list, scst_template_list_entry) {
230                 if (strcmp(t->name, vtt->name) == 0) {
231                         PRINT_ERROR("Target driver %s already registered",
232                                 vtt->name);
233                         mutex_unlock(&scst_mutex);
234                         goto out_cleanup;
235                 }
236         }
237         mutex_unlock(&scst_mutex);
238
239         TRACE_DBG("%s", "Calling target driver's detect()");
240         res = vtt->detect(vtt);
241         TRACE_DBG("Target driver's detect() returned %d", res);
242         if (res < 0) {
243                 PRINT_ERROR("%s", "The detect() routine failed");
244                 res = -EINVAL;
245                 goto out_cleanup;
246         }
247
248         mutex_lock(&scst_mutex);
249         list_add_tail(&vtt->scst_template_list_entry, &scst_template_list);
250         mutex_unlock(&scst_mutex);
251
252         res = 0;
253
254         PRINT_INFO("Target template %s registered successfully", vtt->name);
255
256         mutex_unlock(&m);
257
258 out:
259         TRACE_EXIT_RES(res);
260         return res;
261
262 out_cleanup:
263         scst_cleanup_proc_target_dir_entries(vtt);
264
265 out_m_up:
266         mutex_unlock(&m);
267
268 out_err:
269         PRINT_ERROR("Failed to register target template %s", vtt->name);
270         goto out;
271 }
272 EXPORT_SYMBOL(__scst_register_target_template);
273
274 void scst_unregister_target_template(struct scst_tgt_template *vtt)
275 {
276         struct scst_tgt *tgt;
277         struct scst_tgt_template *t;
278         int found = 0;
279
280         TRACE_ENTRY();
281
282         mutex_lock(&scst_mutex);
283
284         list_for_each_entry(t, &scst_template_list, scst_template_list_entry) {
285                 if (strcmp(t->name, vtt->name) == 0) {
286                         found = 1;
287                         break;
288                 }
289         }
290         if (!found) {
291                 PRINT_ERROR("Target driver %s isn't registered", vtt->name);
292                 goto out_up;
293         }
294
295 restart:
296         list_for_each_entry(tgt, &vtt->tgt_list, tgt_list_entry) {
297                 mutex_unlock(&scst_mutex);
298                 scst_unregister(tgt);
299                 mutex_lock(&scst_mutex);
300                 goto restart;
301         }
302         list_del(&vtt->scst_template_list_entry);
303
304         PRINT_INFO("Target template %s unregistered successfully", vtt->name);
305
306 out_up:
307         mutex_unlock(&scst_mutex);
308
309         scst_cleanup_proc_target_dir_entries(vtt);
310
311         TRACE_EXIT();
312         return;
313 }
314 EXPORT_SYMBOL(scst_unregister_target_template);
315
316 struct scst_tgt *scst_register(struct scst_tgt_template *vtt,
317         const char *target_name)
318 {
319         struct scst_tgt *tgt;
320         int rc = 0;
321
322         TRACE_ENTRY();
323
324         tgt = kzalloc(sizeof(*tgt), GFP_KERNEL);
325         if (tgt == NULL) {
326                 TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of tgt failed");
327                 rc = -ENOMEM;
328                 goto out_err;
329         }
330
331         INIT_LIST_HEAD(&tgt->sess_list);
332         init_waitqueue_head(&tgt->unreg_waitQ);
333         tgt->tgtt = vtt;
334         tgt->sg_tablesize = vtt->sg_tablesize;
335         spin_lock_init(&tgt->tgt_lock);
336         INIT_LIST_HEAD(&tgt->retry_cmd_list);
337         atomic_set(&tgt->finished_cmds, 0);
338         init_timer(&tgt->retry_timer);
339         tgt->retry_timer.data = (unsigned long)tgt;
340         tgt->retry_timer.function = scst_tgt_retry_timer_fn;
341
342         rc = scst_suspend_activity(true);
343         if (rc != 0)
344                 goto out_free_tgt_err;
345
346         if (mutex_lock_interruptible(&scst_mutex) != 0) {
347                 rc = -EINTR;
348                 goto out_resume_free;
349         }
350
351         if (target_name != NULL) {
352                 int len = strlen(target_name) + 1 +
353                         strlen(SCST_DEFAULT_ACG_NAME) + 1;
354
355                 tgt->default_group_name = kmalloc(len, GFP_KERNEL);
356                 if (tgt->default_group_name == NULL) {
357                         TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of default "
358                                 "group name failed");
359                         rc = -ENOMEM;
360                         goto out_unlock_resume;
361                 }
362                 sprintf(tgt->default_group_name, "%s_%s", SCST_DEFAULT_ACG_NAME,
363                         target_name);
364         }
365
366         rc = scst_build_proc_target_entries(tgt);
367         if (rc < 0)
368                 goto out_free_name;
369         else
370                 list_add_tail(&tgt->tgt_list_entry, &vtt->tgt_list);
371
372         mutex_unlock(&scst_mutex);
373         scst_resume_activity();
374
375         PRINT_INFO("Target %s (%p) for template %s registered successfully",
376                 target_name, tgt, vtt->name);
377
378 out:
379         TRACE_EXIT();
380         return tgt;
381
382 out_free_name:
383         kfree(tgt->default_group_name);
384
385 out_unlock_resume:
386         mutex_unlock(&scst_mutex);
387
388 out_resume_free:
389         scst_resume_activity();
390
391 out_free_tgt_err:
392         kfree(tgt);
393         tgt = NULL;
394
395 out_err:
396         PRINT_ERROR("Failed to register target %s for template %s (error %d)",
397                 target_name, vtt->name, rc);
398         goto out;
399 }
400 EXPORT_SYMBOL(scst_register);
401
402 static inline int test_sess_list(struct scst_tgt *tgt)
403 {
404         int res;
405         mutex_lock(&scst_mutex);
406         res = list_empty(&tgt->sess_list);
407         mutex_unlock(&scst_mutex);
408         return res;
409 }
410
411 void scst_unregister(struct scst_tgt *tgt)
412 {
413         struct scst_session *sess;
414         struct scst_tgt_template *vtt = tgt->tgtt;
415
416         TRACE_ENTRY();
417
418         TRACE_DBG("%s", "Calling target driver's release()");
419         tgt->tgtt->release(tgt);
420         TRACE_DBG("%s", "Target driver's release() returned");
421
422         mutex_lock(&scst_mutex);
423         list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) {
424                 sBUG_ON(sess->shut_phase == SCST_SESS_SPH_READY);
425         }
426         mutex_unlock(&scst_mutex);
427
428         TRACE_DBG("%s", "Waiting for sessions shutdown");
429         wait_event(tgt->unreg_waitQ, test_sess_list(tgt));
430         TRACE_DBG("%s", "wait_event() returned");
431
432         scst_suspend_activity(false);
433         mutex_lock(&scst_mutex);
434
435         list_del(&tgt->tgt_list_entry);
436
437         scst_cleanup_proc_target_entries(tgt);
438
439         kfree(tgt->default_group_name);
440
441         mutex_unlock(&scst_mutex);
442         scst_resume_activity();
443
444         del_timer_sync(&tgt->retry_timer);
445
446         PRINT_INFO("Target %p for template %s unregistered successfully",
447                 tgt, vtt->name);
448
449         kfree(tgt);
450
451         TRACE_EXIT();
452         return;
453 }
454 EXPORT_SYMBOL(scst_unregister);
455
456 static int scst_susp_wait(bool interruptible)
457 {
458         int res = 0;
459
460         TRACE_ENTRY();
461
462         if (interruptible) {
463                 res = wait_event_interruptible_timeout(scst_dev_cmd_waitQ,
464                         (atomic_read(&scst_cmd_count) == 0),
465                         SCST_SUSPENDING_TIMEOUT);
466                 if (res <= 0) {
467                         __scst_resume_activity();
468                         if (res == 0)
469                                 res = -EBUSY;
470                 } else
471                         res = 0;
472         } else
473                 wait_event(scst_dev_cmd_waitQ,
474                            atomic_read(&scst_cmd_count) == 0);
475
476         TRACE_MGMT_DBG("wait_event() returned %d", res);
477
478         TRACE_EXIT_RES(res);
479         return res;
480 }
481
482 int scst_suspend_activity(bool interruptible)
483 {
484         int res = 0;
485         bool rep = false;
486
487         TRACE_ENTRY();
488
489         if (interruptible) {
490                 if (mutex_lock_interruptible(&scst_suspend_mutex) != 0) {
491                         res = -EINTR;
492                         goto out;
493                 }
494         } else
495                 mutex_lock(&scst_suspend_mutex);
496
497         TRACE_MGMT_DBG("suspend_count %d", suspend_count);
498         suspend_count++;
499         if (suspend_count > 1)
500                 goto out_up;
501
502         set_bit(SCST_FLAG_SUSPENDING, &scst_flags);
503         set_bit(SCST_FLAG_SUSPENDED, &scst_flags);
504         /*
505          * Assignment of SCST_FLAG_SUSPENDING and SCST_FLAG_SUSPENDED must be
506          * ordered with scst_cmd_count. Otherwise lockless logic in
507          * scst_translate_lun() and scst_mgmt_translate_lun() won't work.
508          */
509         smp_mb__after_set_bit();
510
511         /*
512          * See comment in scst_user.c::dev_user_task_mgmt_fn() for more
513          * information about scst_user behavior.
514          *
515          * ToDo: make the global suspending unneeded (switch to per-device
516          * reference counting? That would mean to switch off from lockless
517          * implementation of scst_translate_lun().. )
518          */
519
520         if (atomic_read(&scst_cmd_count) != 0) {
521                 PRINT_INFO("Waiting for %d active commands to complete... This "
522                         "might take few minutes for disks or few hours for "
523                         "tapes, if you use long executed commands, like "
524                         "REWIND or FORMAT. In case, if you have a hung user "
525                         "space device (i.e. made using scst_user module) not "
526                         "responding to any commands, if might take virtually "
527                         "forever until the corresponding user space "
528                         "program recovers and starts responding or gets "
529                         "killed.", atomic_read(&scst_cmd_count));
530                 rep = true;
531         }
532
533         res = scst_susp_wait(interruptible);
534         if (res != 0)
535                 goto out_clear;
536
537         clear_bit(SCST_FLAG_SUSPENDING, &scst_flags);
538         /* See comment about smp_mb() above */
539         smp_mb__after_clear_bit();
540
541         TRACE_MGMT_DBG("Waiting for %d active commands finally to complete",
542                 atomic_read(&scst_cmd_count));
543
544         res = scst_susp_wait(interruptible);
545         if (res != 0)
546                 goto out_clear;
547
548         if (rep)
549                 PRINT_INFO("%s", "All active commands completed");
550
551 out_up:
552         mutex_unlock(&scst_suspend_mutex);
553
554 out:
555         TRACE_EXIT_RES(res);
556         return res;
557
558 out_clear:
559         clear_bit(SCST_FLAG_SUSPENDING, &scst_flags);
560         /* See comment about smp_mb() above */
561         smp_mb__after_clear_bit();
562         goto out_up;
563 }
564 EXPORT_SYMBOL(scst_suspend_activity);
565
566 static void __scst_resume_activity(void)
567 {
568         struct scst_cmd_lists *l;
569
570         TRACE_ENTRY();
571
572         suspend_count--;
573         TRACE_MGMT_DBG("suspend_count %d left", suspend_count);
574         if (suspend_count > 0)
575                 goto out;
576
577         clear_bit(SCST_FLAG_SUSPENDED, &scst_flags);
578         /*
579          * The barrier is needed to make sure all woken up threads see the
580          * cleared flag. Not sure if it's really needed, but let's be safe.
581          */
582         smp_mb__after_clear_bit();
583
584         list_for_each_entry(l, &scst_cmd_lists_list, lists_list_entry) {
585                 wake_up_all(&l->cmd_list_waitQ);
586         }
587         wake_up_all(&scst_init_cmd_list_waitQ);
588
589         spin_lock_irq(&scst_mcmd_lock);
590         if (!list_empty(&scst_delayed_mgmt_cmd_list)) {
591                 struct scst_mgmt_cmd *m;
592                 m = list_entry(scst_delayed_mgmt_cmd_list.next, typeof(*m),
593                                 mgmt_cmd_list_entry);
594                 TRACE_MGMT_DBG("Moving delayed mgmt cmd %p to head of active "
595                         "mgmt cmd list", m);
596                 list_move(&m->mgmt_cmd_list_entry, &scst_active_mgmt_cmd_list);
597         }
598         spin_unlock_irq(&scst_mcmd_lock);
599         wake_up_all(&scst_mgmt_cmd_list_waitQ);
600
601 out:
602         TRACE_EXIT();
603         return;
604 }
605
606 void scst_resume_activity(void)
607 {
608         TRACE_ENTRY();
609
610         mutex_lock(&scst_suspend_mutex);
611         __scst_resume_activity();
612         mutex_unlock(&scst_suspend_mutex);
613
614         TRACE_EXIT();
615         return;
616 }
617 EXPORT_SYMBOL(scst_resume_activity);
618
619 static int scst_register_device(struct scsi_device *scsidp)
620 {
621         int res = 0;
622         struct scst_device *dev;
623         struct scst_dev_type *dt;
624
625         TRACE_ENTRY();
626
627         res = scst_suspend_activity(true);
628         if (res != 0)
629                 goto out_err;
630
631         if (mutex_lock_interruptible(&scst_mutex) != 0) {
632                 res = -EINTR;
633                 goto out_resume;
634         }
635
636         res = scst_alloc_device(GFP_KERNEL, &dev);
637         if (res != 0)
638                 goto out_up;
639
640         dev->type = scsidp->type;
641
642         dev->rq_disk = alloc_disk(1);
643         if (dev->rq_disk == NULL) {
644                 res = -ENOMEM;
645                 goto out_free_dev;
646         }
647         dev->rq_disk->major = SCST_MAJOR;
648
649         dev->scsi_dev = scsidp;
650
651         list_add_tail(&dev->dev_list_entry, &scst_dev_list);
652
653         list_for_each_entry(dt, &scst_dev_type_list, dev_type_list_entry) {
654                 if (dt->type == scsidp->type) {
655                         res = scst_assign_dev_handler(dev, dt);
656                         if (res != 0)
657                                 goto out_free;
658                         break;
659                 }
660         }
661
662 out_up:
663         mutex_unlock(&scst_mutex);
664
665 out_resume:
666         scst_resume_activity();
667
668 out_err:
669         if (res == 0) {
670                 PRINT_INFO("Attached SCSI target mid-level at "
671                     "scsi%d, channel %d, id %d, lun %d, type %d",
672                     scsidp->host->host_no, scsidp->channel, scsidp->id,
673                     scsidp->lun, scsidp->type);
674         } else {
675                 PRINT_ERROR("Failed to attach SCSI target mid-level "
676                     "at scsi%d, channel %d, id %d, lun %d, type %d",
677                     scsidp->host->host_no, scsidp->channel, scsidp->id,
678                     scsidp->lun, scsidp->type);
679         }
680
681         TRACE_EXIT_RES(res);
682         return res;
683
684 out_free:
685         list_del(&dev->dev_list_entry);
686         put_disk(dev->rq_disk);
687
688 out_free_dev:
689         scst_free_device(dev);
690         goto out_up;
691 }
692
693 static void scst_unregister_device(struct scsi_device *scsidp)
694 {
695         struct scst_device *d, *dev = NULL;
696         struct scst_acg_dev *acg_dev, *aa;
697
698         TRACE_ENTRY();
699
700         scst_suspend_activity(false);
701         mutex_lock(&scst_mutex);
702
703         list_for_each_entry(d, &scst_dev_list, dev_list_entry) {
704                 if (d->scsi_dev == scsidp) {
705                         dev = d;
706                         TRACE_DBG("Target device %p found", dev);
707                         break;
708                 }
709         }
710         if (dev == NULL) {
711                 PRINT_ERROR("%s", "Target device not found");
712                 goto out_unblock;
713         }
714
715         list_del(&dev->dev_list_entry);
716
717         list_for_each_entry_safe(acg_dev, aa, &dev->dev_acg_dev_list,
718                                  dev_acg_dev_list_entry)
719         {
720                 scst_acg_remove_dev(acg_dev->acg, dev);
721         }
722
723         scst_assign_dev_handler(dev, &scst_null_devtype);
724
725         put_disk(dev->rq_disk);
726         scst_free_device(dev);
727
728         PRINT_INFO("Detached SCSI target mid-level from scsi%d, channel %d, "
729                 "id %d, lun %d, type %d", scsidp->host->host_no,
730                 scsidp->channel, scsidp->id, scsidp->lun, scsidp->type);
731
732 out_unblock:
733         mutex_unlock(&scst_mutex);
734         scst_resume_activity();
735
736         TRACE_EXIT();
737         return;
738 }
739
740 static int scst_dev_handler_check(struct scst_dev_type *dev_handler)
741 {
742         int res = 0;
743
744         if (dev_handler->parse == NULL) {
745                 PRINT_ERROR("scst dev_type driver %s doesn't have a "
746                         "parse() method.", dev_handler->name);
747                 res = -EINVAL;
748                 goto out;
749         }
750
751         if (dev_handler->exec == NULL) {
752 #ifdef CONFIG_SCST_ALLOW_PASSTHROUGH_IO_SUBMIT_IN_SIRQ
753                 dev_handler->exec_atomic = 1;
754 #else
755                 dev_handler->exec_atomic = 0;
756 #endif
757         }
758
759         if (dev_handler->dev_done == NULL)
760                 dev_handler->dev_done_atomic = 1;
761
762 out:
763         TRACE_EXIT_RES(res);
764         return res;
765 }
766
767 int scst_register_virtual_device(struct scst_dev_type *dev_handler,
768         const char *dev_name)
769 {
770         int res, rc;
771         struct scst_device *dev = NULL;
772
773         TRACE_ENTRY();
774
775         if (dev_handler == NULL) {
776                 PRINT_ERROR("%s: valid device handler must be supplied",
777                             __func__);
778                 res = -EINVAL;
779                 goto out;
780         }
781
782         if (dev_name == NULL) {
783                 PRINT_ERROR("%s: device name must be non-NULL", __func__);
784                 res = -EINVAL;
785                 goto out;
786         }
787
788         res = scst_dev_handler_check(dev_handler);
789         if (res != 0)
790                 goto out;
791
792         res = scst_suspend_activity(true);
793         if (res != 0)
794                 goto out;
795
796         if (mutex_lock_interruptible(&scst_mutex) != 0) {
797                 res = -EINTR;
798                 goto out_resume;
799         }
800
801         res = scst_alloc_device(GFP_KERNEL, &dev);
802         if (res != 0)
803                 goto out_up;
804
805         dev->type = dev_handler->type;
806         dev->scsi_dev = NULL;
807         dev->virt_name = dev_name;
808         dev->virt_id = scst_virt_dev_last_id++;
809
810         list_add_tail(&dev->dev_list_entry, &scst_dev_list);
811
812         res = dev->virt_id;
813
814         rc = scst_assign_dev_handler(dev, dev_handler);
815         if (rc != 0) {
816                 res = rc;
817                 goto out_free_del;
818         }
819
820 out_up:
821         mutex_unlock(&scst_mutex);
822
823 out_resume:
824         scst_resume_activity();
825
826 out:
827         if (res > 0) {
828                 PRINT_INFO("Attached SCSI target mid-level to virtual "
829                     "device %s (id %d)", dev_name, dev->virt_id);
830         } else {
831                 PRINT_INFO("Failed to attach SCSI target mid-level to "
832                     "virtual device %s", dev_name);
833         }
834
835         TRACE_EXIT_RES(res);
836         return res;
837
838 out_free_del:
839         list_del(&dev->dev_list_entry);
840         scst_free_device(dev);
841         goto out_up;
842 }
843 EXPORT_SYMBOL(scst_register_virtual_device);
844
845 void scst_unregister_virtual_device(int id)
846 {
847         struct scst_device *d, *dev = NULL;
848         struct scst_acg_dev *acg_dev, *aa;
849
850         TRACE_ENTRY();
851
852         scst_suspend_activity(false);
853         mutex_lock(&scst_mutex);
854
855         list_for_each_entry(d, &scst_dev_list, dev_list_entry) {
856                 if (d->virt_id == id) {
857                         dev = d;
858                         TRACE_DBG("Target device %p (id %d) found", dev, id);
859                         break;
860                 }
861         }
862         if (dev == NULL) {
863                 PRINT_ERROR("Target virtual device (id %d) not found", id);
864                 goto out_unblock;
865         }
866
867         list_del(&dev->dev_list_entry);
868
869         list_for_each_entry_safe(acg_dev, aa, &dev->dev_acg_dev_list,
870                                  dev_acg_dev_list_entry)
871         {
872                 scst_acg_remove_dev(acg_dev->acg, dev);
873         }
874
875         scst_assign_dev_handler(dev, &scst_null_devtype);
876
877         PRINT_INFO("Detached SCSI target mid-level from virtual device %s "
878                 "(id %d)", dev->virt_name, dev->virt_id);
879
880         scst_free_device(dev);
881
882 out_unblock:
883         mutex_unlock(&scst_mutex);
884         scst_resume_activity();
885
886         TRACE_EXIT();
887         return;
888 }
889 EXPORT_SYMBOL(scst_unregister_virtual_device);
890
891 int __scst_register_dev_driver(struct scst_dev_type *dev_type,
892         const char *version)
893 {
894         struct scst_dev_type *dt;
895         struct scst_device *dev;
896         int res;
897         int exist;
898
899         TRACE_ENTRY();
900
901         if (strcmp(version, SCST_INTERFACE_VERSION) != 0) {
902                 PRINT_ERROR("Incorrect version of dev handler %s",
903                         dev_type->name);
904                 res = -EINVAL;
905                 goto out_error;
906         }
907
908         res = scst_dev_handler_check(dev_type);
909         if (res != 0)
910                 goto out_error;
911
912 #if !defined(SCSI_EXEC_REQ_FIFO_DEFINED) && \
913     !defined(CONFIG_SCST_STRICT_SERIALIZING)
914         if (dev_type->exec == NULL) {
915                 PRINT_ERROR("Pass-through dev handlers (handler \"%s\") not "
916                         "supported. Consider applying on your kernel patch "
917                         "scst_exec_req_fifo-<kernel-version>.patch or define "
918                         "CONFIG_SCST_STRICT_SERIALIZING", dev_type->name);
919                 res = -EINVAL;
920                 goto out;
921         }
922 #endif
923
924         res = scst_suspend_activity(true);
925         if (res != 0)
926                 goto out_error;
927
928         if (mutex_lock_interruptible(&scst_mutex) != 0) {
929                 res = -EINTR;
930                 goto out_err_res;
931         }
932
933         exist = 0;
934         list_for_each_entry(dt, &scst_dev_type_list, dev_type_list_entry) {
935                 if (strcmp(dt->name, dev_type->name) == 0) {
936                         PRINT_ERROR("Device type handler \"%s\" already "
937                                 "exist", dt->name);
938                         exist = 1;
939                         break;
940                 }
941         }
942         if (exist)
943                 goto out_up;
944
945         res = scst_build_proc_dev_handler_dir_entries(dev_type);
946         if (res < 0)
947                 goto out_up;
948
949         list_add_tail(&dev_type->dev_type_list_entry, &scst_dev_type_list);
950
951         list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
952                 if (dev->scsi_dev == NULL || dev->handler != &scst_null_devtype)
953                         continue;
954                 if (dev->scsi_dev->type == dev_type->type)
955                         scst_assign_dev_handler(dev, dev_type);
956         }
957
958         mutex_unlock(&scst_mutex);
959         scst_resume_activity();
960
961         if (res == 0) {
962                 PRINT_INFO("Device handler \"%s\" for type %d registered "
963                         "successfully", dev_type->name, dev_type->type);
964         }
965
966 out:
967         TRACE_EXIT_RES(res);
968         return res;
969
970 out_up:
971         mutex_unlock(&scst_mutex);
972
973 out_err_res:
974         scst_resume_activity();
975
976 out_error:
977         PRINT_ERROR("Failed to register device handler \"%s\" for type %d",
978                 dev_type->name, dev_type->type);
979         goto out;
980 }
981 EXPORT_SYMBOL(__scst_register_dev_driver);
982
983 void scst_unregister_dev_driver(struct scst_dev_type *dev_type)
984 {
985         struct scst_device *dev;
986         struct scst_dev_type *dt;
987         int found = 0;
988
989         TRACE_ENTRY();
990
991         scst_suspend_activity(false);
992         mutex_lock(&scst_mutex);
993
994         list_for_each_entry(dt, &scst_dev_type_list, dev_type_list_entry) {
995                 if (strcmp(dt->name, dev_type->name) == 0) {
996                         found = 1;
997                         break;
998                 }
999         }
1000         if (!found) {
1001                 PRINT_ERROR("Dev handler \"%s\" isn't registered",
1002                         dev_type->name);
1003                 goto out_up;
1004         }
1005
1006         list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
1007                 if (dev->handler == dev_type) {
1008                         scst_assign_dev_handler(dev, &scst_null_devtype);
1009                         TRACE_DBG("Dev handler removed from device %p", dev);
1010                 }
1011         }
1012
1013         list_del(&dev_type->dev_type_list_entry);
1014
1015         mutex_unlock(&scst_mutex);
1016         scst_resume_activity();
1017
1018         scst_cleanup_proc_dev_handler_dir_entries(dev_type);
1019
1020         PRINT_INFO("Device handler \"%s\" for type %d unloaded",
1021                    dev_type->name, dev_type->type);
1022
1023 out:
1024         TRACE_EXIT();
1025         return;
1026
1027 out_up:
1028         mutex_unlock(&scst_mutex);
1029         scst_resume_activity();
1030         goto out;
1031 }
1032 EXPORT_SYMBOL(scst_unregister_dev_driver);
1033
1034 int __scst_register_virtual_dev_driver(struct scst_dev_type *dev_type,
1035         const char *version)
1036 {
1037         int res;
1038
1039         TRACE_ENTRY();
1040
1041         if (strcmp(version, SCST_INTERFACE_VERSION) != 0) {
1042                 PRINT_ERROR("Incorrect version of virtual dev handler %s",
1043                         dev_type->name);
1044                 res = -EINVAL;
1045                 goto out_err;
1046         }
1047
1048         res = scst_dev_handler_check(dev_type);
1049         if (res != 0)
1050                 goto out_err;
1051
1052         if (!dev_type->no_proc) {
1053                 res = scst_build_proc_dev_handler_dir_entries(dev_type);
1054                 if (res < 0)
1055                         goto out_err;
1056         }
1057
1058         if (dev_type->type != -1) {
1059                 PRINT_INFO("Virtual device handler %s for type %d "
1060                         "registered successfully", dev_type->name,
1061                         dev_type->type);
1062         } else {
1063                 PRINT_INFO("Virtual device handler \"%s\" registered "
1064                         "successfully", dev_type->name);
1065         }
1066
1067 out:
1068         TRACE_EXIT_RES(res);
1069         return res;
1070
1071 out_err:
1072         PRINT_ERROR("Failed to register virtual device handler \"%s\"",
1073                 dev_type->name);
1074         goto out;
1075 }
1076 EXPORT_SYMBOL(__scst_register_virtual_dev_driver);
1077
1078 void scst_unregister_virtual_dev_driver(struct scst_dev_type *dev_type)
1079 {
1080         TRACE_ENTRY();
1081
1082         if (!dev_type->no_proc)
1083                 scst_cleanup_proc_dev_handler_dir_entries(dev_type);
1084
1085         PRINT_INFO("Device handler \"%s\" unloaded", dev_type->name);
1086
1087         TRACE_EXIT();
1088         return;
1089 }
1090 EXPORT_SYMBOL(scst_unregister_virtual_dev_driver);
1091
1092 /* Called under scst_mutex */
1093 int scst_add_dev_threads(struct scst_device *dev, int num)
1094 {
1095         int i, res = 0;
1096         int n = 0;
1097         struct scst_cmd_thread_t *thr;
1098         struct io_context *ioc = NULL;
1099         char nm[12];
1100
1101         TRACE_ENTRY();
1102
1103         list_for_each_entry(thr, &dev->threads_list, thread_list_entry) {
1104                 n++;
1105         }
1106
1107         for (i = 0; i < num; i++) {
1108                 thr = kmalloc(sizeof(*thr), GFP_KERNEL);
1109                 if (!thr) {
1110                         res = -ENOMEM;
1111                         PRINT_ERROR("Failed to allocate thr %d", res);
1112                         goto out;
1113                 }
1114                 strncpy(nm, dev->handler->name, ARRAY_SIZE(nm)-1);
1115                 nm[ARRAY_SIZE(nm)-1] = '\0';
1116                 thr->cmd_thread = kthread_create(scst_cmd_thread,
1117                         &dev->cmd_lists, "%sd%d_%d", nm, dev->dev_num, n++);
1118                 if (IS_ERR(thr->cmd_thread)) {
1119                         res = PTR_ERR(thr->cmd_thread);
1120                         PRINT_ERROR("kthread_create() failed: %d", res);
1121                         kfree(thr);
1122                         goto out;
1123                 }
1124
1125                 list_add(&thr->thread_list_entry, &dev->threads_list);
1126
1127 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
1128 #if defined(CONFIG_BLOCK) && defined(SCST_ALLOC_IO_CONTEXT_EXPORTED)
1129                 /*
1130                  * It would be better to keep io_context in tgt_dev and
1131                  * dynamically assign it to the current thread on the IO
1132                  * submission time to let each initiator have own
1133                  * io_context. But, unfortunately, CFQ doesn't
1134                  * support if a task has dynamically switched
1135                  * io_context, it oopses on BUG_ON(!cic->dead_key) in
1136                  * cic_free_func(). So, we have to have the same io_context
1137                  * for all initiators.
1138                  */
1139                 if (ioc == NULL) {
1140                         ioc = alloc_io_context(GFP_KERNEL, -1);
1141                         TRACE_DBG("ioc %p (thr %d)", ioc, thr->cmd_thread->pid);
1142                 }
1143
1144                 put_io_context(thr->cmd_thread->io_context);
1145                 thr->cmd_thread->io_context = ioc_task_link(ioc);
1146                 TRACE_DBG("Setting ioc %p on thr %d", ioc,
1147                         thr->cmd_thread->pid);
1148 #endif
1149 #endif
1150                 wake_up_process(thr->cmd_thread);
1151         }
1152
1153 out:
1154         put_io_context(ioc);
1155
1156         TRACE_EXIT_RES(res);
1157         return res;
1158 }
1159
1160 /* Called under scst_mutex and suspended activity */
1161 static int scst_create_dev_threads(struct scst_device *dev)
1162 {
1163         int res = 0;
1164         int threads_num;
1165
1166         TRACE_ENTRY();
1167
1168         if (dev->handler->threads_num <= 0)
1169                 goto out;
1170
1171         threads_num = dev->handler->threads_num;
1172
1173         spin_lock_init(&dev->cmd_lists.cmd_list_lock);
1174         INIT_LIST_HEAD(&dev->cmd_lists.active_cmd_list);
1175         init_waitqueue_head(&dev->cmd_lists.cmd_list_waitQ);
1176
1177         res = scst_add_dev_threads(dev, threads_num);
1178         if (res != 0)
1179                 goto out;
1180
1181         mutex_lock(&scst_suspend_mutex);
1182         list_add_tail(&dev->cmd_lists.lists_list_entry,
1183                 &scst_cmd_lists_list);
1184         mutex_unlock(&scst_suspend_mutex);
1185
1186         dev->p_cmd_lists = &dev->cmd_lists;
1187
1188 out:
1189         TRACE_EXIT_RES(res);
1190         return res;
1191 }
1192
1193 /* Called under scst_mutex */
1194 void scst_del_dev_threads(struct scst_device *dev, int num)
1195 {
1196         struct scst_cmd_thread_t *ct, *tmp;
1197         int i = 0;
1198
1199         TRACE_ENTRY();
1200
1201         list_for_each_entry_safe(ct, tmp, &dev->threads_list,
1202                                 thread_list_entry) {
1203                 int rc = kthread_stop(ct->cmd_thread);
1204                 if (rc < 0)
1205                         TRACE_MGMT_DBG("kthread_stop() failed: %d", rc);
1206                 list_del(&ct->thread_list_entry);
1207                 kfree(ct);
1208                 if ((num > 0) && (++i >= num))
1209                         break;
1210         }
1211
1212         TRACE_EXIT();
1213         return;
1214 }
1215
1216 /* Called under scst_mutex and suspended activity */
1217 static void scst_stop_dev_threads(struct scst_device *dev)
1218 {
1219         TRACE_ENTRY();
1220
1221         if (list_empty(&dev->threads_list))
1222                 goto out;
1223
1224         scst_del_dev_threads(dev, -1);
1225
1226         if (dev->p_cmd_lists == &dev->cmd_lists) {
1227                 mutex_lock(&scst_suspend_mutex);
1228                 list_del(&dev->cmd_lists.lists_list_entry);
1229                 mutex_unlock(&scst_suspend_mutex);
1230         }
1231
1232 out:
1233         TRACE_EXIT();
1234         return;
1235 }
1236
1237 /* The activity supposed to be suspended and scst_mutex held */
1238 int scst_assign_dev_handler(struct scst_device *dev,
1239         struct scst_dev_type *handler)
1240 {
1241         int res = 0;
1242         struct scst_tgt_dev *tgt_dev;
1243         LIST_HEAD(attached_tgt_devs);
1244
1245         TRACE_ENTRY();
1246
1247         sBUG_ON(handler == NULL);
1248
1249         if (dev->handler == handler)
1250                 goto out;
1251
1252         if (dev->handler && dev->handler->detach_tgt) {
1253                 list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
1254                                 dev_tgt_dev_list_entry) {
1255                         TRACE_DBG("Calling dev handler's detach_tgt(%p)",
1256                                 tgt_dev);
1257                         dev->handler->detach_tgt(tgt_dev);
1258                         TRACE_DBG("%s", "Dev handler's detach_tgt() returned");
1259                 }
1260         }
1261
1262         if (dev->handler && dev->handler->detach) {
1263                 TRACE_DBG("%s", "Calling dev handler's detach()");
1264                 dev->handler->detach(dev);
1265                 TRACE_DBG("%s", "Old handler's detach() returned");
1266         }
1267
1268         scst_stop_dev_threads(dev);
1269
1270         dev->handler = handler;
1271
1272         if (handler) {
1273                 res = scst_create_dev_threads(dev);
1274                 if (res != 0)
1275                         goto out_null;
1276         }
1277
1278         if (handler && handler->attach) {
1279                 TRACE_DBG("Calling new dev handler's attach(%p)", dev);
1280                 res = handler->attach(dev);
1281                 TRACE_DBG("New dev handler's attach() returned %d", res);
1282                 if (res != 0) {
1283                         PRINT_ERROR("New device handler's %s attach() "
1284                                 "failed: %d", handler->name, res);
1285                 }
1286                 goto out_thr_null;
1287         }
1288
1289         if (handler && handler->attach_tgt) {
1290                 list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
1291                                 dev_tgt_dev_list_entry) {
1292                         TRACE_DBG("Calling dev handler's attach_tgt(%p)",
1293                                 tgt_dev);
1294                         res = handler->attach_tgt(tgt_dev);
1295                         TRACE_DBG("%s", "Dev handler's attach_tgt() returned");
1296                         if (res != 0) {
1297                                 PRINT_ERROR("Device handler's %s attach_tgt() "
1298                                     "failed: %d", handler->name, res);
1299                                 goto out_err_detach_tgt;
1300                         }
1301                         list_add_tail(&tgt_dev->extra_tgt_dev_list_entry,
1302                                 &attached_tgt_devs);
1303                 }
1304         }
1305
1306 out_thr_null:
1307         if (res != 0)
1308                 scst_stop_dev_threads(dev);
1309
1310 out_null:
1311         if (res != 0)
1312                 dev->handler = &scst_null_devtype;
1313
1314 out:
1315         TRACE_EXIT_RES(res);
1316         return res;
1317
1318 out_err_detach_tgt:
1319         if (handler && handler->detach_tgt) {
1320                 list_for_each_entry(tgt_dev, &attached_tgt_devs,
1321                                  extra_tgt_dev_list_entry)
1322                 {
1323                         TRACE_DBG("Calling handler's detach_tgt(%p)",
1324                                 tgt_dev);
1325                         handler->detach_tgt(tgt_dev);
1326                         TRACE_DBG("%s", "Handler's detach_tgt() returned");
1327                 }
1328         }
1329         if (handler && handler->detach) {
1330                 TRACE_DBG("%s", "Calling handler's detach()");
1331                 handler->detach(dev);
1332                 TRACE_DBG("%s", "Handler's detach() returned");
1333         }
1334         goto out_null;
1335 }
1336
1337 int scst_cmd_threads_count(void)
1338 {
1339         int i;
1340
1341         /*
1342          * Just to lower the race window, when user can get just changed value
1343          */
1344         mutex_lock(&scst_threads_info.cmd_threads_mutex);
1345         i = scst_threads_info.nr_cmd_threads;
1346         mutex_unlock(&scst_threads_info.cmd_threads_mutex);
1347         return i;
1348 }
1349
1350 static void scst_threads_info_init(void)
1351 {
1352         memset(&scst_threads_info, 0, sizeof(scst_threads_info));
1353         mutex_init(&scst_threads_info.cmd_threads_mutex);
1354         INIT_LIST_HEAD(&scst_threads_info.cmd_threads_list);
1355 }
1356
1357 /* scst_threads_info.cmd_threads_mutex supposed to be held */
1358 void __scst_del_cmd_threads(int num)
1359 {
1360         struct scst_cmd_thread_t *ct, *tmp;
1361         int i;
1362
1363         TRACE_ENTRY();
1364
1365         i = scst_threads_info.nr_cmd_threads;
1366         if (num <= 0 || num > i) {
1367                 PRINT_ERROR("can not del %d cmd threads from %d", num, i);
1368                 return;
1369         }
1370
1371         list_for_each_entry_safe(ct, tmp, &scst_threads_info.cmd_threads_list,
1372                                 thread_list_entry) {
1373                 int res;
1374
1375                 res = kthread_stop(ct->cmd_thread);
1376                 if (res < 0)
1377                         TRACE_MGMT_DBG("kthread_stop() failed: %d", res);
1378                 list_del(&ct->thread_list_entry);
1379                 kfree(ct);
1380                 scst_threads_info.nr_cmd_threads--;
1381                 --num;
1382                 if (num == 0)
1383                         break;
1384         }
1385
1386         TRACE_EXIT();
1387         return;
1388 }
1389
1390 /* scst_threads_info.cmd_threads_mutex supposed to be held */
1391 int __scst_add_cmd_threads(int num)
1392 {
1393         int res = 0, i;
1394         static int scst_thread_num;
1395
1396         TRACE_ENTRY();
1397
1398         for (i = 0; i < num; i++) {
1399                 struct scst_cmd_thread_t *thr;
1400
1401                 thr = kmalloc(sizeof(*thr), GFP_KERNEL);
1402                 if (!thr) {
1403                         res = -ENOMEM;
1404                         PRINT_ERROR("fail to allocate thr %d", res);
1405                         goto out_error;
1406                 }
1407                 thr->cmd_thread = kthread_create(scst_cmd_thread,
1408                         &scst_main_cmd_lists, "scsi_tgt%d",
1409                         scst_thread_num++);
1410                 if (IS_ERR(thr->cmd_thread)) {
1411                         res = PTR_ERR(thr->cmd_thread);
1412                         PRINT_ERROR("kthread_create() failed: %d", res);
1413                         kfree(thr);
1414                         goto out_error;
1415                 }
1416
1417                 list_add(&thr->thread_list_entry,
1418                         &scst_threads_info.cmd_threads_list);
1419                 scst_threads_info.nr_cmd_threads++;
1420
1421 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
1422 #if defined(CONFIG_BLOCK) && defined(SCST_ALLOC_IO_CONTEXT_EXPORTED)
1423                 /* See comment in scst_add_dev_threads() */
1424                 if (scst_ioc == NULL) {
1425                         scst_ioc = alloc_io_context(GFP_KERNEL, -1);
1426                         TRACE_DBG("scst_ioc %p (thr %d)", scst_ioc,
1427                                 thr->cmd_thread->pid);
1428                 }
1429
1430                 put_io_context(thr->cmd_thread->io_context);
1431                 thr->cmd_thread->io_context = ioc_task_link(scst_ioc);
1432                 TRACE_DBG("Setting scst_ioc %p on thr %d",
1433                         scst_ioc, thr->cmd_thread->pid);
1434 #endif
1435 #endif
1436                 wake_up_process(thr->cmd_thread);
1437         }
1438         res = 0;
1439
1440 out:
1441         TRACE_EXIT_RES(res);
1442         return res;
1443
1444 out_error:
1445         if (i > 0)
1446                 __scst_del_cmd_threads(i - 1);
1447         goto out;
1448 }
1449
1450 int scst_add_cmd_threads(int num)
1451 {
1452         int res;
1453
1454         TRACE_ENTRY();
1455
1456         mutex_lock(&scst_threads_info.cmd_threads_mutex);
1457         res = __scst_add_cmd_threads(num);
1458         mutex_unlock(&scst_threads_info.cmd_threads_mutex);
1459
1460         TRACE_EXIT_RES(res);
1461         return res;
1462 }
1463 EXPORT_SYMBOL(scst_add_cmd_threads);
1464
1465 void scst_del_cmd_threads(int num)
1466 {
1467         TRACE_ENTRY();
1468
1469         mutex_lock(&scst_threads_info.cmd_threads_mutex);
1470         __scst_del_cmd_threads(num);
1471         mutex_unlock(&scst_threads_info.cmd_threads_mutex);
1472
1473         TRACE_EXIT();
1474         return;
1475 }
1476 EXPORT_SYMBOL(scst_del_cmd_threads);
1477
1478 static void scst_stop_all_threads(void)
1479 {
1480         TRACE_ENTRY();
1481
1482         mutex_lock(&scst_threads_info.cmd_threads_mutex);
1483         __scst_del_cmd_threads(scst_threads_info.nr_cmd_threads);
1484         if (scst_threads_info.mgmt_cmd_thread)
1485                 kthread_stop(scst_threads_info.mgmt_cmd_thread);
1486         if (scst_threads_info.mgmt_thread)
1487                 kthread_stop(scst_threads_info.mgmt_thread);
1488         if (scst_threads_info.init_cmd_thread)
1489                 kthread_stop(scst_threads_info.init_cmd_thread);
1490         mutex_unlock(&scst_threads_info.cmd_threads_mutex);
1491
1492         TRACE_EXIT();
1493         return;
1494 }
1495
1496 static int scst_start_all_threads(int num)
1497 {
1498         int res;
1499
1500         TRACE_ENTRY();
1501
1502         mutex_lock(&scst_threads_info.cmd_threads_mutex);
1503         res = __scst_add_cmd_threads(num);
1504         if (res < 0)
1505                 goto out;
1506
1507         scst_threads_info.init_cmd_thread = kthread_run(scst_init_cmd_thread,
1508                 NULL, "scsi_tgt_init");
1509         if (IS_ERR(scst_threads_info.init_cmd_thread)) {
1510                 res = PTR_ERR(scst_threads_info.init_cmd_thread);
1511                 PRINT_ERROR("kthread_create() for init cmd failed: %d", res);
1512                 scst_threads_info.init_cmd_thread = NULL;
1513                 goto out;
1514         }
1515
1516         scst_threads_info.mgmt_cmd_thread = kthread_run(scst_mgmt_cmd_thread,
1517                 NULL, "scsi_tgt_mc");
1518         if (IS_ERR(scst_threads_info.mgmt_cmd_thread)) {
1519                 res = PTR_ERR(scst_threads_info.mgmt_cmd_thread);
1520                 PRINT_ERROR("kthread_create() for mcmd failed: %d", res);
1521                 scst_threads_info.mgmt_cmd_thread = NULL;
1522                 goto out;
1523         }
1524
1525         scst_threads_info.mgmt_thread = kthread_run(scst_mgmt_thread,
1526                 NULL, "scsi_tgt_mgmt");
1527         if (IS_ERR(scst_threads_info.mgmt_thread)) {
1528                 res = PTR_ERR(scst_threads_info.mgmt_thread);
1529                 PRINT_ERROR("kthread_create() for mgmt failed: %d", res);
1530                 scst_threads_info.mgmt_thread = NULL;
1531                 goto out;
1532         }
1533
1534 out:
1535         mutex_unlock(&scst_threads_info.cmd_threads_mutex);
1536         TRACE_EXIT_RES(res);
1537         return res;
1538 }
1539
1540 void scst_get(void)
1541 {
1542         __scst_get(0);
1543 }
1544 EXPORT_SYMBOL(scst_get);
1545
1546 void scst_put(void)
1547 {
1548         __scst_put();
1549 }
1550 EXPORT_SYMBOL(scst_put);
1551
1552 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
1553 static int scst_add(struct class_device *cdev, struct class_interface *intf)
1554 #else
1555 static int scst_add(struct device *cdev, struct class_interface *intf)
1556 #endif
1557 {
1558         struct scsi_device *scsidp;
1559         int res = 0;
1560
1561         TRACE_ENTRY();
1562
1563 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
1564         scsidp = to_scsi_device(cdev->dev);
1565 #else
1566         scsidp = to_scsi_device(cdev->parent);
1567 #endif
1568
1569         if (strcmp(scsidp->host->hostt->name, SCST_LOCAL_NAME) != 0)
1570                 res = scst_register_device(scsidp);
1571
1572         TRACE_EXIT();
1573         return res;
1574 }
1575
1576 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
1577 static void scst_remove(struct class_device *cdev, struct class_interface *intf)
1578 #else
1579 static void scst_remove(struct device *cdev, struct class_interface *intf)
1580 #endif
1581 {
1582         struct scsi_device *scsidp;
1583
1584         TRACE_ENTRY();
1585
1586 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
1587         scsidp = to_scsi_device(cdev->dev);
1588 #else
1589         scsidp = to_scsi_device(cdev->parent);
1590 #endif
1591
1592         if (strcmp(scsidp->host->hostt->name, SCST_LOCAL_NAME) != 0)
1593                 scst_unregister_device(scsidp);
1594
1595         TRACE_EXIT();
1596         return;
1597 }
1598
1599 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
1600 static struct class_interface scst_interface = {
1601         .add = scst_add,
1602         .remove = scst_remove,
1603 };
1604 #else
1605 static struct class_interface scst_interface = {
1606         .add_dev = scst_add,
1607         .remove_dev = scst_remove,
1608 };
1609 #endif
1610
1611 static void __init scst_print_config(void)
1612 {
1613         char buf[128];
1614         int i, j;
1615
1616         i = snprintf(buf, sizeof(buf), "Enabled features: ");
1617         j = i;
1618
1619 #ifdef CONFIG_SCST_STRICT_SERIALIZING
1620         i += snprintf(&buf[i], sizeof(buf) - i, "Strict serializing");
1621 #endif
1622
1623 #ifdef CONFIG_SCST_EXTRACHECKS
1624         i += snprintf(&buf[i], sizeof(buf) - i, "%sEXTRACHECKS",
1625                 (j == i) ? "" : ", ");
1626 #endif
1627
1628 #ifdef CONFIG_SCST_TRACING
1629         i += snprintf(&buf[i], sizeof(buf) - i, "%sTRACING",
1630                 (j == i) ? "" : ", ");
1631 #endif
1632
1633 #ifdef CONFIG_SCST_DEBUG
1634         i += snprintf(&buf[i], sizeof(buf) - i, "%sDEBUG",
1635                 (j == i) ? "" : ", ");
1636 #endif
1637
1638 #ifdef CONFIG_SCST_DEBUG_TM
1639         i += snprintf(&buf[i], sizeof(buf) - i, "%sDEBUG_TM",
1640                 (j == i) ? "" : ", ");
1641 #endif
1642
1643 #ifdef CONFIG_SCST_DEBUG_RETRY
1644         i += snprintf(&buf[i], sizeof(buf) - i, "%sDEBUG_RETRY",
1645                 (j == i) ? "" : ", ");
1646 #endif
1647
1648 #ifdef CONFIG_SCST_DEBUG_OOM
1649         i += snprintf(&buf[i], sizeof(buf) - i, "%sDEBUG_OOM",
1650                 (j == i) ? "" : ", ");
1651 #endif
1652
1653 #ifdef CONFIG_SCST_DEBUG_SN
1654         i += snprintf(&buf[i], sizeof(buf) - i, "%sDEBUG_SN",
1655                 (j == i) ? "" : ", ");
1656 #endif
1657
1658 #ifdef CONFIG_SCST_USE_EXPECTED_VALUES
1659         i += snprintf(&buf[i], sizeof(buf) - i, "%sUSE_EXPECTED_VALUES",
1660                 (j == i) ? "" : ", ");
1661 #endif
1662
1663 #ifdef CONFIG_SCST_ALLOW_PASSTHROUGH_IO_SUBMIT_IN_SIRQ
1664         i += snprintf(&buf[i], sizeof(buf) - i,
1665                 "%sALLOW_PASSTHROUGH_IO_SUBMIT_IN_SIRQ",
1666                 (j == i) ? "" : ", ");
1667 #endif
1668
1669 #ifdef CONFIG_SCST_STRICT_SECURITY
1670         i += snprintf(&buf[i], sizeof(buf) - i, "%sSCST_STRICT_SECURITY",
1671                 (j == i) ? "" : ", ");
1672 #endif
1673
1674         if (j != i)
1675                 PRINT_INFO("%s", buf);
1676 }
1677
1678 static int __init init_scst(void)
1679 {
1680         int res = 0, i;
1681         int scst_num_cpus;
1682
1683         TRACE_ENTRY();
1684
1685 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
1686         {
1687                 struct scsi_request *req;
1688                 BUILD_BUG_ON(SCST_SENSE_BUFFERSIZE !=
1689                         sizeof(req->sr_sense_buffer));
1690         }
1691 #else
1692         {
1693                 struct scsi_sense_hdr *shdr;
1694                 BUILD_BUG_ON(SCST_SENSE_BUFFERSIZE < sizeof(*shdr));
1695         }
1696 #endif
1697         {
1698                 struct scst_tgt_dev *t;
1699                 struct scst_cmd *c;
1700                 BUILD_BUG_ON(sizeof(t->curr_sn) != sizeof(t->expected_sn));
1701                 BUILD_BUG_ON(sizeof(c->sn) != sizeof(t->expected_sn));
1702         }
1703
1704         BUILD_BUG_ON(SCST_DATA_UNKNOWN != DMA_BIDIRECTIONAL);
1705         BUILD_BUG_ON(SCST_DATA_WRITE != DMA_TO_DEVICE);
1706         BUILD_BUG_ON(SCST_DATA_READ != DMA_FROM_DEVICE);
1707         BUILD_BUG_ON(SCST_DATA_NONE != DMA_NONE);
1708
1709 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
1710 #if !defined(SCST_ALLOC_IO_CONTEXT_EXPORTED)
1711         PRINT_WARNING("%s", "Patch export_alloc_io_context was not applied on "
1712                 "your kernel. SCST will be working with not the best "
1713                 "performance.");
1714 #endif
1715 #endif
1716
1717         mutex_init(&scst_mutex);
1718         INIT_LIST_HEAD(&scst_template_list);
1719         INIT_LIST_HEAD(&scst_dev_list);
1720         INIT_LIST_HEAD(&scst_dev_type_list);
1721         spin_lock_init(&scst_main_lock);
1722         INIT_LIST_HEAD(&scst_acg_list);
1723         spin_lock_init(&scst_init_lock);
1724         init_waitqueue_head(&scst_init_cmd_list_waitQ);
1725         INIT_LIST_HEAD(&scst_init_cmd_list);
1726 #if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
1727         scst_trace_flag = SCST_DEFAULT_LOG_FLAGS;
1728 #endif
1729         atomic_set(&scst_cmd_count, 0);
1730         spin_lock_init(&scst_mcmd_lock);
1731         INIT_LIST_HEAD(&scst_active_mgmt_cmd_list);
1732         INIT_LIST_HEAD(&scst_delayed_mgmt_cmd_list);
1733         init_waitqueue_head(&scst_mgmt_cmd_list_waitQ);
1734         init_waitqueue_head(&scst_mgmt_waitQ);
1735         spin_lock_init(&scst_mgmt_lock);
1736         INIT_LIST_HEAD(&scst_sess_init_list);
1737         INIT_LIST_HEAD(&scst_sess_shut_list);
1738         init_waitqueue_head(&scst_dev_cmd_waitQ);
1739         mutex_init(&scst_suspend_mutex);
1740         INIT_LIST_HEAD(&scst_cmd_lists_list);
1741         scst_virt_dev_last_id = 1;
1742         spin_lock_init(&scst_temp_UA_lock);
1743
1744         spin_lock_init(&scst_main_cmd_lists.cmd_list_lock);
1745         INIT_LIST_HEAD(&scst_main_cmd_lists.active_cmd_list);
1746         init_waitqueue_head(&scst_main_cmd_lists.cmd_list_waitQ);
1747         list_add_tail(&scst_main_cmd_lists.lists_list_entry,
1748                 &scst_cmd_lists_list);
1749
1750         scst_num_cpus = num_online_cpus();
1751
1752         /* ToDo: register_cpu_notifier() */
1753
1754         if (scst_threads == 0)
1755                 scst_threads = scst_num_cpus;
1756
1757         if (scst_threads < 1) {
1758                 PRINT_ERROR("%s", "scst_threads can not be less than 1");
1759                 scst_threads = scst_num_cpus;
1760         }
1761
1762         scst_threads_info_init();
1763
1764 #define INIT_CACHEP(p, s, o) do {                                       \
1765                 p = KMEM_CACHE(s, SCST_SLAB_FLAGS);                     \
1766                 TRACE_MEM("Slab create: %s at %p size %zd", #s, p,      \
1767                           sizeof(struct s));                            \
1768                 if (p == NULL) {                                        \
1769                         res = -ENOMEM;                                  \
1770                         goto o;                                         \
1771                 }                                                       \
1772         } while (0)
1773
1774         INIT_CACHEP(scst_mgmt_cachep, scst_mgmt_cmd, out);
1775         INIT_CACHEP(scst_mgmt_stub_cachep, scst_mgmt_cmd_stub,
1776                         out_destroy_mgmt_cache);
1777         INIT_CACHEP(scst_ua_cachep, scst_tgt_dev_UA,
1778                         out_destroy_mgmt_stub_cache);
1779         {
1780                 struct scst_sense { uint8_t s[SCST_SENSE_BUFFERSIZE]; };
1781                 INIT_CACHEP(scst_sense_cachep, scst_sense,
1782                             out_destroy_ua_cache);
1783         }
1784         INIT_CACHEP(scst_cmd_cachep, scst_cmd, out_destroy_sense_cache);
1785         INIT_CACHEP(scst_sess_cachep, scst_session, out_destroy_cmd_cache);
1786         INIT_CACHEP(scst_tgtd_cachep, scst_tgt_dev, out_destroy_sess_cache);
1787         INIT_CACHEP(scst_acgd_cachep, scst_acg_dev, out_destroy_tgt_cache);
1788
1789         scst_mgmt_mempool = mempool_create(64, mempool_alloc_slab,
1790                 mempool_free_slab, scst_mgmt_cachep);
1791         if (scst_mgmt_mempool == NULL) {
1792                 res = -ENOMEM;
1793                 goto out_destroy_acg_cache;
1794         }
1795
1796         scst_mgmt_stub_mempool = mempool_create(1024, mempool_alloc_slab,
1797                 mempool_free_slab, scst_mgmt_stub_cachep);
1798         if (scst_mgmt_stub_mempool == NULL) {
1799                 res = -ENOMEM;
1800                 goto out_destroy_mgmt_mempool;
1801         }
1802
1803         scst_ua_mempool = mempool_create(64, mempool_alloc_slab,
1804                 mempool_free_slab, scst_ua_cachep);
1805         if (scst_ua_mempool == NULL) {
1806                 res = -ENOMEM;
1807                 goto out_destroy_mgmt_stub_mempool;
1808         }
1809
1810         /*
1811          * Loosing sense may have fatal consequences, so let's have a big pool
1812          */
1813         scst_sense_mempool = mempool_create(128, mempool_alloc_slab,
1814                 mempool_free_slab, scst_sense_cachep);
1815         if (scst_sense_mempool == NULL) {
1816                 res = -ENOMEM;
1817                 goto out_destroy_ua_mempool;
1818         }
1819
1820         if (scst_max_cmd_mem == 0) {
1821                 struct sysinfo si;
1822                 si_meminfo(&si);
1823 #if BITS_PER_LONG == 32
1824                 scst_max_cmd_mem = min(
1825                         (((uint64_t)si.totalram << PAGE_SHIFT) >> 20) >> 2,
1826                         (uint64_t)1 << 30);
1827 #else
1828                 scst_max_cmd_mem = ((si.totalram << PAGE_SHIFT) >> 20) >> 2;
1829 #endif
1830         }
1831
1832         if (scst_max_dev_cmd_mem != 0) {
1833                 if (scst_max_dev_cmd_mem > scst_max_cmd_mem) {
1834                         PRINT_ERROR("scst_max_dev_cmd_mem (%d) > "
1835                                 "scst_max_cmd_mem (%d)",
1836                                 scst_max_dev_cmd_mem,
1837                                 scst_max_cmd_mem);
1838                         scst_max_dev_cmd_mem = scst_max_cmd_mem;
1839                 }
1840         } else
1841                 scst_max_dev_cmd_mem = scst_max_cmd_mem * 2 / 5;
1842
1843         res = scst_sgv_pools_init(
1844                 ((uint64_t)scst_max_cmd_mem << 10) >> (PAGE_SHIFT - 10), 0);
1845         if (res != 0)
1846                 goto out_destroy_sense_mempool;
1847
1848         scst_default_acg = scst_alloc_add_acg(SCST_DEFAULT_ACG_NAME);
1849         if (scst_default_acg == NULL) {
1850                 res = -ENOMEM;
1851                 goto out_destroy_sgv_pool;
1852         }
1853
1854         res = scsi_register_interface(&scst_interface);
1855         if (res != 0)
1856                 goto out_free_acg;
1857
1858         scst_scsi_op_list_init();
1859
1860         for (i = 0; i < (int)ARRAY_SIZE(scst_tasklets); i++) {
1861                 spin_lock_init(&scst_tasklets[i].tasklet_lock);
1862                 INIT_LIST_HEAD(&scst_tasklets[i].tasklet_cmd_list);
1863                 tasklet_init(&scst_tasklets[i].tasklet,
1864                              (void *)scst_cmd_tasklet,
1865                              (unsigned long)&scst_tasklets[i]);
1866         }
1867
1868         TRACE_DBG("%d CPUs found, starting %d threads", scst_num_cpus,
1869                 scst_threads);
1870
1871         res = scst_start_all_threads(scst_threads);
1872         if (res < 0)
1873                 goto out_thread_free;
1874
1875         res = scst_proc_init_module();
1876         if (res != 0)
1877                 goto out_thread_free;
1878
1879
1880         PRINT_INFO("SCST version %s loaded successfully (max mem for "
1881                 "commands %dMB, per device %dMB)", SCST_VERSION_STRING,
1882                 scst_max_cmd_mem, scst_max_dev_cmd_mem);
1883
1884         scst_print_config();
1885
1886 out:
1887         TRACE_EXIT_RES(res);
1888         return res;
1889
1890 out_thread_free:
1891         scst_stop_all_threads();
1892
1893         scsi_unregister_interface(&scst_interface);
1894
1895 out_free_acg:
1896         scst_destroy_acg(scst_default_acg);
1897
1898 out_destroy_sgv_pool:
1899         scst_sgv_pools_deinit();
1900
1901 out_destroy_sense_mempool:
1902         mempool_destroy(scst_sense_mempool);
1903
1904 out_destroy_ua_mempool:
1905         mempool_destroy(scst_ua_mempool);
1906
1907 out_destroy_mgmt_stub_mempool:
1908         mempool_destroy(scst_mgmt_stub_mempool);
1909
1910 out_destroy_mgmt_mempool:
1911         mempool_destroy(scst_mgmt_mempool);
1912
1913 out_destroy_acg_cache:
1914         kmem_cache_destroy(scst_acgd_cachep);
1915
1916 out_destroy_tgt_cache:
1917         kmem_cache_destroy(scst_tgtd_cachep);
1918
1919 out_destroy_sess_cache:
1920         kmem_cache_destroy(scst_sess_cachep);
1921
1922 out_destroy_cmd_cache:
1923         kmem_cache_destroy(scst_cmd_cachep);
1924
1925 out_destroy_sense_cache:
1926         kmem_cache_destroy(scst_sense_cachep);
1927
1928 out_destroy_ua_cache:
1929         kmem_cache_destroy(scst_ua_cachep);
1930
1931 out_destroy_mgmt_stub_cache:
1932         kmem_cache_destroy(scst_mgmt_stub_cachep);
1933
1934 out_destroy_mgmt_cache:
1935         kmem_cache_destroy(scst_mgmt_cachep);
1936         goto out;
1937 }
1938
1939 static void __exit exit_scst(void)
1940 {
1941         TRACE_ENTRY();
1942
1943         /* ToDo: unregister_cpu_notifier() */
1944
1945         scst_proc_cleanup_module();
1946
1947         scst_stop_all_threads();
1948
1949         scsi_unregister_interface(&scst_interface);
1950         scst_destroy_acg(scst_default_acg);
1951
1952         scst_sgv_pools_deinit();
1953
1954 #define DEINIT_CACHEP(p) do {           \
1955                 kmem_cache_destroy(p);  \
1956                 p = NULL;               \
1957         } while (0)
1958
1959         mempool_destroy(scst_mgmt_mempool);
1960         mempool_destroy(scst_mgmt_stub_mempool);
1961         mempool_destroy(scst_ua_mempool);
1962         mempool_destroy(scst_sense_mempool);
1963
1964         DEINIT_CACHEP(scst_mgmt_cachep);
1965         DEINIT_CACHEP(scst_mgmt_stub_cachep);
1966         DEINIT_CACHEP(scst_ua_cachep);
1967         DEINIT_CACHEP(scst_sense_cachep);
1968         DEINIT_CACHEP(scst_cmd_cachep);
1969         DEINIT_CACHEP(scst_sess_cachep);
1970         DEINIT_CACHEP(scst_tgtd_cachep);
1971         DEINIT_CACHEP(scst_acgd_cachep);
1972
1973 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
1974 #if defined(CONFIG_BLOCK) && defined(SCST_ALLOC_IO_CONTEXT_EXPORTED)
1975         put_io_context(scst_ioc);
1976 #endif
1977 #endif
1978
1979         PRINT_INFO("%s", "SCST unloaded");
1980
1981         TRACE_EXIT();
1982         return;
1983 }
1984
1985
1986 module_init(init_scst);
1987 module_exit(exit_scst);
1988
1989 MODULE_AUTHOR("Vladislav Bolkhovitin");
1990 MODULE_LICENSE("GPL");
1991 MODULE_DESCRIPTION("SCSI target core");
1992 MODULE_VERSION(SCST_VERSION_STRING);