Patch from frank zago <fzago@systemfabricworks.com> with some modifications. It expos...
[mirror/scst/.git] / scst / src / scst_main.c
1 /*
2  *  scst_main.c
3  *
4  *  Copyright (C) 2004 - 2009 Vladislav Bolkhovitin <vst@vlnb.net>
5  *  Copyright (C) 2004 - 2005 Leonid Stoljar
6  *  Copyright (C) 2007 - 2009 ID7 Ltd.
7  *
8  *  This program is free software; you can redistribute it and/or
9  *  modify it under the terms of the GNU General Public License
10  *  as published by the Free Software Foundation, version 2
11  *  of the License.
12  *
13  *  This program is distributed in the hope that it will be useful,
14  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16  *  GNU General Public License for more details.
17  */
18
19 #include <linux/module.h>
20
21 #include <linux/init.h>
22 #include <linux/kernel.h>
23 #include <linux/errno.h>
24 #include <linux/list.h>
25 #include <linux/spinlock.h>
26 #include <linux/slab.h>
27 #include <linux/sched.h>
28 #include <linux/unistd.h>
29 #include <linux/string.h>
30 #include <linux/kthread.h>
31
32 #include "scst.h"
33 #include "scst_priv.h"
34 #include "scst_mem.h"
35
36 #if defined(CONFIG_HIGHMEM4G) || defined(CONFIG_HIGHMEM64G)
37 #warning "HIGHMEM kernel configurations are fully supported, but not\
38  recommended for performance reasons. Consider changing VMSPLIT\
39  option or use a 64-bit configuration instead. See README file for\
40  details."
41 #endif
42
43 #if !defined(SCSI_EXEC_REQ_FIFO_DEFINED) && \
44     !defined(CONFIG_SCST_STRICT_SERIALIZING)
45 #warning "Patch scst_exec_req_fifo-<kernel-version>.patch was not applied on\
46  your kernel and CONFIG_SCST_STRICT_SERIALIZING isn't defined.\
47  Pass-through dev handlers will not work."
48 #endif
49
50 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
51 #if !defined(SCST_IO_CONTEXT)
52 #warning "Patch io_context-<kernel-version>.patch was not applied\
53  on your kernel. SCST will be working with not the best performance."
54 #endif
55 #endif
56
57 /**
58  ** SCST global variables. They are all uninitialized to have their layout in
59  ** memory be exactly as specified. Otherwise compiler puts zero-initialized
60  ** variable separately from nonzero-initialized ones.
61  **/
62
63 /*
64  * All targets, devices and dev_types management is done under this mutex.
65  *
66  * It must NOT be used in any works (schedule_work(), etc.), because
67  * otherwise a deadlock (double lock, actually) is possible, e.g., with
68  * scst_user detach_tgt(), which is called under scst_mutex and calls
69  * flush_scheduled_work().
70  */
71 struct mutex scst_mutex;
72
73  /* All 3 protected by scst_mutex */
74 static struct list_head scst_template_list;
75 struct list_head scst_dev_list;
76 struct list_head scst_dev_type_list;
77
78 spinlock_t scst_main_lock;
79
80 static struct kmem_cache *scst_mgmt_cachep;
81 mempool_t *scst_mgmt_mempool;
82 static struct kmem_cache *scst_mgmt_stub_cachep;
83 mempool_t *scst_mgmt_stub_mempool;
84 static struct kmem_cache *scst_ua_cachep;
85 mempool_t *scst_ua_mempool;
86 static struct kmem_cache *scst_sense_cachep;
87 mempool_t *scst_sense_mempool;
88 static struct kmem_cache *scst_aen_cachep;
89 mempool_t *scst_aen_mempool;
90 struct kmem_cache *scst_tgtd_cachep;
91 struct kmem_cache *scst_sess_cachep;
92 struct kmem_cache *scst_acgd_cachep;
93
94 struct list_head scst_acg_list;
95 struct scst_acg *scst_default_acg;
96
97 spinlock_t scst_init_lock;
98 wait_queue_head_t scst_init_cmd_list_waitQ;
99 struct list_head scst_init_cmd_list;
100 unsigned int scst_init_poll_cnt;
101
102 struct kmem_cache *scst_cmd_cachep;
103
104 #if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
105 unsigned long scst_trace_flag;
106 #endif
107
108 unsigned long scst_flags;
109 atomic_t scst_cmd_count;
110
111 struct scst_cmd_lists scst_main_cmd_lists;
112
113 struct scst_tasklet scst_tasklets[NR_CPUS];
114
115 spinlock_t scst_mcmd_lock;
116 struct list_head scst_active_mgmt_cmd_list;
117 struct list_head scst_delayed_mgmt_cmd_list;
118 wait_queue_head_t scst_mgmt_cmd_list_waitQ;
119
120 wait_queue_head_t scst_mgmt_waitQ;
121 spinlock_t scst_mgmt_lock;
122 struct list_head scst_sess_init_list;
123 struct list_head scst_sess_shut_list;
124
125 wait_queue_head_t scst_dev_cmd_waitQ;
126
127 static struct mutex scst_suspend_mutex;
128 /* protected by scst_suspend_mutex */
129 static struct list_head scst_cmd_lists_list;
130
131 static int scst_threads;
132 struct mutex scst_global_threads_mutex;
133 u32 scst_nr_global_threads;
134 static struct list_head scst_global_threads_list;
135 static struct task_struct *scst_init_cmd_thread;
136 static struct task_struct *scst_mgmt_thread;
137 static struct task_struct *scst_mgmt_cmd_thread;
138
139 static int suspend_count;
140
141 static int scst_virt_dev_last_id; /* protected by scst_mutex */
142
143 static unsigned int scst_max_cmd_mem;
144 unsigned int scst_max_dev_cmd_mem;
145
146 module_param_named(scst_threads, scst_threads, int, 0);
147 MODULE_PARM_DESC(scst_threads, "SCSI target threads count");
148
149 module_param_named(scst_max_cmd_mem, scst_max_cmd_mem, int, S_IRUGO);
150 MODULE_PARM_DESC(scst_max_cmd_mem, "Maximum memory allowed to be consumed by "
151         "all SCSI commands of all devices at any given time in MB");
152
153 module_param_named(scst_max_dev_cmd_mem, scst_max_dev_cmd_mem, int, S_IRUGO);
154 MODULE_PARM_DESC(scst_max_dev_cmd_mem, "Maximum memory allowed to be consumed "
155         "by all SCSI commands of a device at any given time in MB");
156
157 struct scst_dev_type scst_null_devtype = {
158         .name = "none",
159 };
160
161 static void __scst_resume_activity(void);
162
163 int __scst_register_target_template(struct scst_tgt_template *vtt,
164         const char *version)
165 {
166         int res = 0;
167         struct scst_tgt_template *t;
168         static DEFINE_MUTEX(m);
169
170         TRACE_ENTRY();
171
172         INIT_LIST_HEAD(&vtt->tgt_list);
173
174         if (strcmp(version, SCST_INTERFACE_VERSION) != 0) {
175                 PRINT_ERROR("Incorrect version of target %s", vtt->name);
176                 res = -EINVAL;
177                 goto out_err;
178         }
179
180         if (!vtt->detect) {
181                 PRINT_ERROR("Target driver %s doesn't have a "
182                         "detect() method.", vtt->name);
183                 res = -EINVAL;
184                 goto out_err;
185         }
186
187         if (!vtt->release) {
188                 PRINT_ERROR("Target driver %s doesn't have a "
189                         "release() method.", vtt->name);
190                 res = -EINVAL;
191                 goto out_err;
192         }
193
194         if (!vtt->xmit_response) {
195                 PRINT_ERROR("Target driver %s doesn't have a "
196                         "xmit_response() method.", vtt->name);
197                 res = -EINVAL;
198                 goto out_err;
199         }
200
201         if (vtt->threads_num < 0) {
202                 PRINT_ERROR("Wrong threads_num value %d for "
203                         "target \"%s\"", vtt->threads_num,
204                         vtt->name);
205                 res = -EINVAL;
206                 goto out_err;
207         }
208
209         if (!vtt->no_proc_entry) {
210                 res = scst_build_proc_target_dir_entries(vtt);
211                 if (res < 0)
212                         goto out_err;
213         }
214
215         if (vtt->rdy_to_xfer == NULL)
216                 vtt->rdy_to_xfer_atomic = 1;
217
218         if (mutex_lock_interruptible(&m) != 0)
219                 goto out_err;
220
221         if (mutex_lock_interruptible(&scst_mutex) != 0)
222                 goto out_m_up;
223         list_for_each_entry(t, &scst_template_list, scst_template_list_entry) {
224                 if (strcmp(t->name, vtt->name) == 0) {
225                         PRINT_ERROR("Target driver %s already registered",
226                                 vtt->name);
227                         mutex_unlock(&scst_mutex);
228                         goto out_cleanup;
229                 }
230         }
231         mutex_unlock(&scst_mutex);
232
233         TRACE_DBG("%s", "Calling target driver's detect()");
234         res = vtt->detect(vtt);
235         TRACE_DBG("Target driver's detect() returned %d", res);
236         if (res < 0) {
237                 PRINT_ERROR("%s", "The detect() routine failed");
238                 res = -EINVAL;
239                 goto out_cleanup;
240         }
241
242         mutex_lock(&scst_mutex);
243         list_add_tail(&vtt->scst_template_list_entry, &scst_template_list);
244         mutex_unlock(&scst_mutex);
245
246         res = 0;
247
248         PRINT_INFO("Target template %s registered successfully", vtt->name);
249
250         mutex_unlock(&m);
251
252 out:
253         TRACE_EXIT_RES(res);
254         return res;
255
256 out_cleanup:
257         scst_cleanup_proc_target_dir_entries(vtt);
258
259 out_m_up:
260         mutex_unlock(&m);
261
262 out_err:
263         PRINT_ERROR("Failed to register target template %s", vtt->name);
264         goto out;
265 }
266 EXPORT_SYMBOL(__scst_register_target_template);
267
268 void scst_unregister_target_template(struct scst_tgt_template *vtt)
269 {
270         struct scst_tgt *tgt;
271         struct scst_tgt_template *t;
272         int found = 0;
273
274         TRACE_ENTRY();
275
276         mutex_lock(&scst_mutex);
277
278         list_for_each_entry(t, &scst_template_list, scst_template_list_entry) {
279                 if (strcmp(t->name, vtt->name) == 0) {
280                         found = 1;
281                         break;
282                 }
283         }
284         if (!found) {
285                 PRINT_ERROR("Target driver %s isn't registered", vtt->name);
286                 goto out_up;
287         }
288
289 restart:
290         list_for_each_entry(tgt, &vtt->tgt_list, tgt_list_entry) {
291                 mutex_unlock(&scst_mutex);
292                 scst_unregister(tgt);
293                 mutex_lock(&scst_mutex);
294                 goto restart;
295         }
296         list_del(&vtt->scst_template_list_entry);
297
298         PRINT_INFO("Target template %s unregistered successfully", vtt->name);
299
300 out_up:
301         mutex_unlock(&scst_mutex);
302
303         scst_cleanup_proc_target_dir_entries(vtt);
304
305         TRACE_EXIT();
306         return;
307 }
308 EXPORT_SYMBOL(scst_unregister_target_template);
309
310 struct scst_tgt *scst_register(struct scst_tgt_template *vtt,
311         const char *target_name)
312 {
313         struct scst_tgt *tgt;
314         int rc = 0;
315
316         TRACE_ENTRY();
317
318         tgt = kzalloc(sizeof(*tgt), GFP_KERNEL);
319         if (tgt == NULL) {
320                 TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of tgt failed");
321                 rc = -ENOMEM;
322                 goto out_err;
323         }
324
325         INIT_LIST_HEAD(&tgt->sess_list);
326         init_waitqueue_head(&tgt->unreg_waitQ);
327         tgt->tgtt = vtt;
328         tgt->sg_tablesize = vtt->sg_tablesize;
329         spin_lock_init(&tgt->tgt_lock);
330         INIT_LIST_HEAD(&tgt->retry_cmd_list);
331         atomic_set(&tgt->finished_cmds, 0);
332         init_timer(&tgt->retry_timer);
333         tgt->retry_timer.data = (unsigned long)tgt;
334         tgt->retry_timer.function = scst_tgt_retry_timer_fn;
335
336         rc = scst_suspend_activity(true);
337         if (rc != 0)
338                 goto out_free_tgt_err;
339
340         if (mutex_lock_interruptible(&scst_mutex) != 0) {
341                 rc = -EINTR;
342                 goto out_resume_free;
343         }
344
345         if (target_name != NULL) {
346                 int len = strlen(target_name) + 1 +
347                         strlen(SCST_DEFAULT_ACG_NAME) + 1;
348
349                 tgt->default_group_name = kmalloc(len, GFP_KERNEL);
350                 if (tgt->default_group_name == NULL) {
351                         TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of default "
352                                 "group name failed");
353                         rc = -ENOMEM;
354                         goto out_unlock_resume;
355                 }
356                 sprintf(tgt->default_group_name, "%s_%s", SCST_DEFAULT_ACG_NAME,
357                         target_name);
358         }
359
360         rc = scst_build_proc_target_entries(tgt);
361         if (rc < 0)
362                 goto out_free_name;
363         else
364                 list_add_tail(&tgt->tgt_list_entry, &vtt->tgt_list);
365
366         mutex_unlock(&scst_mutex);
367         scst_resume_activity();
368
369         PRINT_INFO("Target %s (%p) for template %s registered successfully",
370                 target_name, tgt, vtt->name);
371
372 out:
373         TRACE_EXIT();
374         return tgt;
375
376 out_free_name:
377         kfree(tgt->default_group_name);
378
379 out_unlock_resume:
380         mutex_unlock(&scst_mutex);
381
382 out_resume_free:
383         scst_resume_activity();
384
385 out_free_tgt_err:
386         kfree(tgt);
387         tgt = NULL;
388
389 out_err:
390         PRINT_ERROR("Failed to register target %s for template %s (error %d)",
391                 target_name, vtt->name, rc);
392         goto out;
393 }
394 EXPORT_SYMBOL(scst_register);
395
396 static inline int test_sess_list(struct scst_tgt *tgt)
397 {
398         int res;
399         mutex_lock(&scst_mutex);
400         res = list_empty(&tgt->sess_list);
401         mutex_unlock(&scst_mutex);
402         return res;
403 }
404
405 void scst_unregister(struct scst_tgt *tgt)
406 {
407         struct scst_session *sess;
408         struct scst_tgt_template *vtt = tgt->tgtt;
409
410         TRACE_ENTRY();
411
412         TRACE_DBG("%s", "Calling target driver's release()");
413         tgt->tgtt->release(tgt);
414         TRACE_DBG("%s", "Target driver's release() returned");
415
416         mutex_lock(&scst_mutex);
417 again:
418         list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) {
419                 if (sess->shut_phase == SCST_SESS_SPH_READY) {
420                         /*
421                          * Sometimes it's hard for target driver to track all
422                          * its sessions (see scst_local, for example), so let's
423                          * help it.
424                          */
425                         mutex_unlock(&scst_mutex);
426                         scst_unregister_session(sess, 0, NULL);
427                         mutex_lock(&scst_mutex);
428                         goto again;
429                 }
430         }
431         mutex_unlock(&scst_mutex);
432
433         TRACE_DBG("%s", "Waiting for sessions shutdown");
434         wait_event(tgt->unreg_waitQ, test_sess_list(tgt));
435         TRACE_DBG("%s", "wait_event() returned");
436
437         scst_suspend_activity(false);
438         mutex_lock(&scst_mutex);
439
440         list_del(&tgt->tgt_list_entry);
441
442         scst_cleanup_proc_target_entries(tgt);
443
444         kfree(tgt->default_group_name);
445
446         mutex_unlock(&scst_mutex);
447         scst_resume_activity();
448
449         del_timer_sync(&tgt->retry_timer);
450
451         PRINT_INFO("Target %p for template %s unregistered successfully",
452                 tgt, vtt->name);
453
454         kfree(tgt);
455
456         TRACE_EXIT();
457         return;
458 }
459 EXPORT_SYMBOL(scst_unregister);
460
461 static int scst_susp_wait(bool interruptible)
462 {
463         int res = 0;
464
465         TRACE_ENTRY();
466
467         if (interruptible) {
468                 res = wait_event_interruptible_timeout(scst_dev_cmd_waitQ,
469                         (atomic_read(&scst_cmd_count) == 0),
470                         SCST_SUSPENDING_TIMEOUT);
471                 if (res <= 0) {
472                         __scst_resume_activity();
473                         if (res == 0)
474                                 res = -EBUSY;
475                 } else
476                         res = 0;
477         } else
478                 wait_event(scst_dev_cmd_waitQ,
479                            atomic_read(&scst_cmd_count) == 0);
480
481         TRACE_MGMT_DBG("wait_event() returned %d", res);
482
483         TRACE_EXIT_RES(res);
484         return res;
485 }
486
487 int scst_suspend_activity(bool interruptible)
488 {
489         int res = 0;
490         bool rep = false;
491
492         TRACE_ENTRY();
493
494         if (interruptible) {
495                 if (mutex_lock_interruptible(&scst_suspend_mutex) != 0) {
496                         res = -EINTR;
497                         goto out;
498                 }
499         } else
500                 mutex_lock(&scst_suspend_mutex);
501
502         TRACE_MGMT_DBG("suspend_count %d", suspend_count);
503         suspend_count++;
504         if (suspend_count > 1)
505                 goto out_up;
506
507         set_bit(SCST_FLAG_SUSPENDING, &scst_flags);
508         set_bit(SCST_FLAG_SUSPENDED, &scst_flags);
509         /*
510          * Assignment of SCST_FLAG_SUSPENDING and SCST_FLAG_SUSPENDED must be
511          * ordered with scst_cmd_count. Otherwise lockless logic in
512          * scst_translate_lun() and scst_mgmt_translate_lun() won't work.
513          */
514         smp_mb__after_set_bit();
515
516         /*
517          * See comment in scst_user.c::dev_user_task_mgmt_fn() for more
518          * information about scst_user behavior.
519          *
520          * ToDo: make the global suspending unneeded (switch to per-device
521          * reference counting? That would mean to switch off from lockless
522          * implementation of scst_translate_lun().. )
523          */
524
525         if (atomic_read(&scst_cmd_count) != 0) {
526                 PRINT_INFO("Waiting for %d active commands to complete... This "
527                         "might take few minutes for disks or few hours for "
528                         "tapes, if you use long executed commands, like "
529                         "REWIND or FORMAT. In case, if you have a hung user "
530                         "space device (i.e. made using scst_user module) not "
531                         "responding to any commands, if might take virtually "
532                         "forever until the corresponding user space "
533                         "program recovers and starts responding or gets "
534                         "killed.", atomic_read(&scst_cmd_count));
535                 rep = true;
536         }
537
538         res = scst_susp_wait(interruptible);
539         if (res != 0)
540                 goto out_clear;
541
542         clear_bit(SCST_FLAG_SUSPENDING, &scst_flags);
543         /* See comment about smp_mb() above */
544         smp_mb__after_clear_bit();
545
546         TRACE_MGMT_DBG("Waiting for %d active commands finally to complete",
547                 atomic_read(&scst_cmd_count));
548
549         res = scst_susp_wait(interruptible);
550         if (res != 0)
551                 goto out_clear;
552
553         if (rep)
554                 PRINT_INFO("%s", "All active commands completed");
555
556 out_up:
557         mutex_unlock(&scst_suspend_mutex);
558
559 out:
560         TRACE_EXIT_RES(res);
561         return res;
562
563 out_clear:
564         clear_bit(SCST_FLAG_SUSPENDING, &scst_flags);
565         /* See comment about smp_mb() above */
566         smp_mb__after_clear_bit();
567         goto out_up;
568 }
569 EXPORT_SYMBOL(scst_suspend_activity);
570
571 static void __scst_resume_activity(void)
572 {
573         struct scst_cmd_lists *l;
574
575         TRACE_ENTRY();
576
577         suspend_count--;
578         TRACE_MGMT_DBG("suspend_count %d left", suspend_count);
579         if (suspend_count > 0)
580                 goto out;
581
582         clear_bit(SCST_FLAG_SUSPENDED, &scst_flags);
583         /*
584          * The barrier is needed to make sure all woken up threads see the
585          * cleared flag. Not sure if it's really needed, but let's be safe.
586          */
587         smp_mb__after_clear_bit();
588
589         list_for_each_entry(l, &scst_cmd_lists_list, lists_list_entry) {
590                 wake_up_all(&l->cmd_list_waitQ);
591         }
592         wake_up_all(&scst_init_cmd_list_waitQ);
593
594         spin_lock_irq(&scst_mcmd_lock);
595         if (!list_empty(&scst_delayed_mgmt_cmd_list)) {
596                 struct scst_mgmt_cmd *m;
597                 m = list_entry(scst_delayed_mgmt_cmd_list.next, typeof(*m),
598                                 mgmt_cmd_list_entry);
599                 TRACE_MGMT_DBG("Moving delayed mgmt cmd %p to head of active "
600                         "mgmt cmd list", m);
601                 list_move(&m->mgmt_cmd_list_entry, &scst_active_mgmt_cmd_list);
602         }
603         spin_unlock_irq(&scst_mcmd_lock);
604         wake_up_all(&scst_mgmt_cmd_list_waitQ);
605
606 out:
607         TRACE_EXIT();
608         return;
609 }
610
611 void scst_resume_activity(void)
612 {
613         TRACE_ENTRY();
614
615         mutex_lock(&scst_suspend_mutex);
616         __scst_resume_activity();
617         mutex_unlock(&scst_suspend_mutex);
618
619         TRACE_EXIT();
620         return;
621 }
622 EXPORT_SYMBOL(scst_resume_activity);
623
624 static int scst_register_device(struct scsi_device *scsidp)
625 {
626         int res = 0;
627         struct scst_device *dev;
628         struct scst_dev_type *dt;
629
630         TRACE_ENTRY();
631
632         res = scst_suspend_activity(true);
633         if (res != 0)
634                 goto out_err;
635
636         if (mutex_lock_interruptible(&scst_mutex) != 0) {
637                 res = -EINTR;
638                 goto out_resume;
639         }
640
641         res = scst_alloc_device(GFP_KERNEL, &dev);
642         if (res != 0)
643                 goto out_up;
644
645         dev->type = scsidp->type;
646
647         dev->rq_disk = alloc_disk(1);
648         if (dev->rq_disk == NULL) {
649                 res = -ENOMEM;
650                 goto out_free_dev;
651         }
652         dev->rq_disk->major = SCST_MAJOR;
653
654         dev->scsi_dev = scsidp;
655
656         list_add_tail(&dev->dev_list_entry, &scst_dev_list);
657
658         list_for_each_entry(dt, &scst_dev_type_list, dev_type_list_entry) {
659                 if (dt->type == scsidp->type) {
660                         res = scst_assign_dev_handler(dev, dt);
661                         if (res != 0)
662                                 goto out_free;
663                         break;
664                 }
665         }
666
667 out_up:
668         mutex_unlock(&scst_mutex);
669
670 out_resume:
671         scst_resume_activity();
672
673 out_err:
674         if (res == 0) {
675                 PRINT_INFO("Attached SCSI target mid-level at "
676                     "scsi%d, channel %d, id %d, lun %d, type %d",
677                     scsidp->host->host_no, scsidp->channel, scsidp->id,
678                     scsidp->lun, scsidp->type);
679         } else {
680                 PRINT_ERROR("Failed to attach SCSI target mid-level "
681                     "at scsi%d, channel %d, id %d, lun %d, type %d",
682                     scsidp->host->host_no, scsidp->channel, scsidp->id,
683                     scsidp->lun, scsidp->type);
684         }
685
686         TRACE_EXIT_RES(res);
687         return res;
688
689 out_free:
690         list_del(&dev->dev_list_entry);
691         put_disk(dev->rq_disk);
692
693 out_free_dev:
694         scst_free_device(dev);
695         goto out_up;
696 }
697
698 static void scst_unregister_device(struct scsi_device *scsidp)
699 {
700         struct scst_device *d, *dev = NULL;
701         struct scst_acg_dev *acg_dev, *aa;
702
703         TRACE_ENTRY();
704
705         scst_suspend_activity(false);
706         mutex_lock(&scst_mutex);
707
708         list_for_each_entry(d, &scst_dev_list, dev_list_entry) {
709                 if (d->scsi_dev == scsidp) {
710                         dev = d;
711                         TRACE_DBG("Target device %p found", dev);
712                         break;
713                 }
714         }
715         if (dev == NULL) {
716                 PRINT_ERROR("%s", "Target device not found");
717                 goto out_unblock;
718         }
719
720         list_del(&dev->dev_list_entry);
721
722         list_for_each_entry_safe(acg_dev, aa, &dev->dev_acg_dev_list,
723                                  dev_acg_dev_list_entry)
724         {
725                 scst_acg_remove_dev(acg_dev->acg, dev);
726         }
727
728         scst_assign_dev_handler(dev, &scst_null_devtype);
729
730         put_disk(dev->rq_disk);
731         scst_free_device(dev);
732
733         PRINT_INFO("Detached SCSI target mid-level from scsi%d, channel %d, "
734                 "id %d, lun %d, type %d", scsidp->host->host_no,
735                 scsidp->channel, scsidp->id, scsidp->lun, scsidp->type);
736
737 out_unblock:
738         mutex_unlock(&scst_mutex);
739         scst_resume_activity();
740
741         TRACE_EXIT();
742         return;
743 }
744
745 static int scst_dev_handler_check(struct scst_dev_type *dev_handler)
746 {
747         int res = 0;
748
749         if (dev_handler->parse == NULL) {
750                 PRINT_ERROR("scst dev_type driver %s doesn't have a "
751                         "parse() method.", dev_handler->name);
752                 res = -EINVAL;
753                 goto out;
754         }
755
756         if (dev_handler->exec == NULL) {
757 #ifdef CONFIG_SCST_ALLOW_PASSTHROUGH_IO_SUBMIT_IN_SIRQ
758                 dev_handler->exec_atomic = 1;
759 #else
760                 dev_handler->exec_atomic = 0;
761 #endif
762         }
763
764         if (dev_handler->dev_done == NULL)
765                 dev_handler->dev_done_atomic = 1;
766
767 out:
768         TRACE_EXIT_RES(res);
769         return res;
770 }
771
772 int scst_register_virtual_device(struct scst_dev_type *dev_handler,
773         const char *dev_name)
774 {
775         int res, rc;
776         struct scst_device *dev = NULL;
777
778         TRACE_ENTRY();
779
780         if (dev_handler == NULL) {
781                 PRINT_ERROR("%s: valid device handler must be supplied",
782                             __func__);
783                 res = -EINVAL;
784                 goto out;
785         }
786
787         if (dev_name == NULL) {
788                 PRINT_ERROR("%s: device name must be non-NULL", __func__);
789                 res = -EINVAL;
790                 goto out;
791         }
792
793         res = scst_dev_handler_check(dev_handler);
794         if (res != 0)
795                 goto out;
796
797         res = scst_suspend_activity(true);
798         if (res != 0)
799                 goto out;
800
801         if (mutex_lock_interruptible(&scst_mutex) != 0) {
802                 res = -EINTR;
803                 goto out_resume;
804         }
805
806         res = scst_alloc_device(GFP_KERNEL, &dev);
807         if (res != 0)
808                 goto out_up;
809
810         dev->type = dev_handler->type;
811         dev->scsi_dev = NULL;
812         dev->virt_name = dev_name;
813         dev->virt_id = scst_virt_dev_last_id++;
814
815         list_add_tail(&dev->dev_list_entry, &scst_dev_list);
816
817         res = dev->virt_id;
818
819         rc = scst_assign_dev_handler(dev, dev_handler);
820         if (rc != 0) {
821                 res = rc;
822                 goto out_free_del;
823         }
824
825 out_up:
826         mutex_unlock(&scst_mutex);
827
828 out_resume:
829         scst_resume_activity();
830
831 out:
832         if (res > 0) {
833                 PRINT_INFO("Attached SCSI target mid-level to virtual "
834                     "device %s (id %d)", dev_name, dev->virt_id);
835         } else {
836                 PRINT_INFO("Failed to attach SCSI target mid-level to "
837                     "virtual device %s", dev_name);
838         }
839
840         TRACE_EXIT_RES(res);
841         return res;
842
843 out_free_del:
844         list_del(&dev->dev_list_entry);
845         scst_free_device(dev);
846         goto out_up;
847 }
848 EXPORT_SYMBOL(scst_register_virtual_device);
849
850 void scst_unregister_virtual_device(int id)
851 {
852         struct scst_device *d, *dev = NULL;
853         struct scst_acg_dev *acg_dev, *aa;
854
855         TRACE_ENTRY();
856
857         scst_suspend_activity(false);
858         mutex_lock(&scst_mutex);
859
860         list_for_each_entry(d, &scst_dev_list, dev_list_entry) {
861                 if (d->virt_id == id) {
862                         dev = d;
863                         TRACE_DBG("Target device %p (id %d) found", dev, id);
864                         break;
865                 }
866         }
867         if (dev == NULL) {
868                 PRINT_ERROR("Target virtual device (id %d) not found", id);
869                 goto out_unblock;
870         }
871
872         list_del(&dev->dev_list_entry);
873
874         list_for_each_entry_safe(acg_dev, aa, &dev->dev_acg_dev_list,
875                                  dev_acg_dev_list_entry)
876         {
877                 scst_acg_remove_dev(acg_dev->acg, dev);
878         }
879
880         scst_assign_dev_handler(dev, &scst_null_devtype);
881
882         PRINT_INFO("Detached SCSI target mid-level from virtual device %s "
883                 "(id %d)", dev->virt_name, dev->virt_id);
884
885         scst_free_device(dev);
886
887 out_unblock:
888         mutex_unlock(&scst_mutex);
889         scst_resume_activity();
890
891         TRACE_EXIT();
892         return;
893 }
894 EXPORT_SYMBOL(scst_unregister_virtual_device);
895
896 int __scst_register_dev_driver(struct scst_dev_type *dev_type,
897         const char *version)
898 {
899         struct scst_dev_type *dt;
900         struct scst_device *dev;
901         int res;
902         int exist;
903
904         TRACE_ENTRY();
905
906         if (strcmp(version, SCST_INTERFACE_VERSION) != 0) {
907                 PRINT_ERROR("Incorrect version of dev handler %s",
908                         dev_type->name);
909                 res = -EINVAL;
910                 goto out_error;
911         }
912
913         res = scst_dev_handler_check(dev_type);
914         if (res != 0)
915                 goto out_error;
916
917 #if !defined(SCSI_EXEC_REQ_FIFO_DEFINED) && \
918     !defined(CONFIG_SCST_STRICT_SERIALIZING)
919         if (dev_type->exec == NULL) {
920                 PRINT_ERROR("Pass-through dev handlers (handler \"%s\") not "
921                         "supported. Consider applying on your kernel patch "
922                         "scst_exec_req_fifo-<kernel-version>.patch or define "
923                         "CONFIG_SCST_STRICT_SERIALIZING", dev_type->name);
924                 res = -EINVAL;
925                 goto out;
926         }
927 #endif
928
929         res = scst_suspend_activity(true);
930         if (res != 0)
931                 goto out_error;
932
933         if (mutex_lock_interruptible(&scst_mutex) != 0) {
934                 res = -EINTR;
935                 goto out_err_res;
936         }
937
938         exist = 0;
939         list_for_each_entry(dt, &scst_dev_type_list, dev_type_list_entry) {
940                 if (strcmp(dt->name, dev_type->name) == 0) {
941                         PRINT_ERROR("Device type handler \"%s\" already "
942                                 "exist", dt->name);
943                         exist = 1;
944                         break;
945                 }
946         }
947         if (exist)
948                 goto out_up;
949
950         res = scst_build_proc_dev_handler_dir_entries(dev_type);
951         if (res < 0)
952                 goto out_up;
953
954         list_add_tail(&dev_type->dev_type_list_entry, &scst_dev_type_list);
955
956         list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
957                 if (dev->scsi_dev == NULL || dev->handler != &scst_null_devtype)
958                         continue;
959                 if (dev->scsi_dev->type == dev_type->type)
960                         scst_assign_dev_handler(dev, dev_type);
961         }
962
963         mutex_unlock(&scst_mutex);
964         scst_resume_activity();
965
966         if (res == 0) {
967                 PRINT_INFO("Device handler \"%s\" for type %d registered "
968                         "successfully", dev_type->name, dev_type->type);
969         }
970
971 out:
972         TRACE_EXIT_RES(res);
973         return res;
974
975 out_up:
976         mutex_unlock(&scst_mutex);
977
978 out_err_res:
979         scst_resume_activity();
980
981 out_error:
982         PRINT_ERROR("Failed to register device handler \"%s\" for type %d",
983                 dev_type->name, dev_type->type);
984         goto out;
985 }
986 EXPORT_SYMBOL(__scst_register_dev_driver);
987
988 void scst_unregister_dev_driver(struct scst_dev_type *dev_type)
989 {
990         struct scst_device *dev;
991         struct scst_dev_type *dt;
992         int found = 0;
993
994         TRACE_ENTRY();
995
996         scst_suspend_activity(false);
997         mutex_lock(&scst_mutex);
998
999         list_for_each_entry(dt, &scst_dev_type_list, dev_type_list_entry) {
1000                 if (strcmp(dt->name, dev_type->name) == 0) {
1001                         found = 1;
1002                         break;
1003                 }
1004         }
1005         if (!found) {
1006                 PRINT_ERROR("Dev handler \"%s\" isn't registered",
1007                         dev_type->name);
1008                 goto out_up;
1009         }
1010
1011         list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
1012                 if (dev->handler == dev_type) {
1013                         scst_assign_dev_handler(dev, &scst_null_devtype);
1014                         TRACE_DBG("Dev handler removed from device %p", dev);
1015                 }
1016         }
1017
1018         list_del(&dev_type->dev_type_list_entry);
1019
1020         mutex_unlock(&scst_mutex);
1021         scst_resume_activity();
1022
1023         scst_cleanup_proc_dev_handler_dir_entries(dev_type);
1024
1025         PRINT_INFO("Device handler \"%s\" for type %d unloaded",
1026                    dev_type->name, dev_type->type);
1027
1028 out:
1029         TRACE_EXIT();
1030         return;
1031
1032 out_up:
1033         mutex_unlock(&scst_mutex);
1034         scst_resume_activity();
1035         goto out;
1036 }
1037 EXPORT_SYMBOL(scst_unregister_dev_driver);
1038
1039 int __scst_register_virtual_dev_driver(struct scst_dev_type *dev_type,
1040         const char *version)
1041 {
1042         int res;
1043
1044         TRACE_ENTRY();
1045
1046         if (strcmp(version, SCST_INTERFACE_VERSION) != 0) {
1047                 PRINT_ERROR("Incorrect version of virtual dev handler %s",
1048                         dev_type->name);
1049                 res = -EINVAL;
1050                 goto out_err;
1051         }
1052
1053         res = scst_dev_handler_check(dev_type);
1054         if (res != 0)
1055                 goto out_err;
1056
1057         if (!dev_type->no_proc) {
1058                 res = scst_build_proc_dev_handler_dir_entries(dev_type);
1059                 if (res < 0)
1060                         goto out_err;
1061         }
1062
1063         if (dev_type->type != -1) {
1064                 PRINT_INFO("Virtual device handler %s for type %d "
1065                         "registered successfully", dev_type->name,
1066                         dev_type->type);
1067         } else {
1068                 PRINT_INFO("Virtual device handler \"%s\" registered "
1069                         "successfully", dev_type->name);
1070         }
1071
1072 out:
1073         TRACE_EXIT_RES(res);
1074         return res;
1075
1076 out_err:
1077         PRINT_ERROR("Failed to register virtual device handler \"%s\"",
1078                 dev_type->name);
1079         goto out;
1080 }
1081 EXPORT_SYMBOL(__scst_register_virtual_dev_driver);
1082
1083 void scst_unregister_virtual_dev_driver(struct scst_dev_type *dev_type)
1084 {
1085         TRACE_ENTRY();
1086
1087         if (!dev_type->no_proc)
1088                 scst_cleanup_proc_dev_handler_dir_entries(dev_type);
1089
1090         PRINT_INFO("Device handler \"%s\" unloaded", dev_type->name);
1091
1092         TRACE_EXIT();
1093         return;
1094 }
1095 EXPORT_SYMBOL(scst_unregister_virtual_dev_driver);
1096
1097 /* Called under scst_mutex */
1098 int scst_add_dev_threads(struct scst_device *dev, int num)
1099 {
1100         int i, res = 0;
1101         int n = 0;
1102         struct scst_cmd_thread_t *thr;
1103         char nm[12];
1104
1105         TRACE_ENTRY();
1106
1107         list_for_each_entry(thr, &dev->threads_list, thread_list_entry) {
1108                 n++;
1109         }
1110
1111         for (i = 0; i < num; i++) {
1112                 thr = kmalloc(sizeof(*thr), GFP_KERNEL);
1113                 if (!thr) {
1114                         res = -ENOMEM;
1115                         PRINT_ERROR("Failed to allocate thr %d", res);
1116                         goto out_del;
1117                 }
1118                 strncpy(nm, dev->handler->name, ARRAY_SIZE(nm)-1);
1119                 nm[ARRAY_SIZE(nm)-1] = '\0';
1120                 thr->cmd_thread = kthread_create(scst_cmd_thread,
1121                         &dev->cmd_lists, "%sd%d_%d", nm, dev->dev_num, n++);
1122                 if (IS_ERR(thr->cmd_thread)) {
1123                         res = PTR_ERR(thr->cmd_thread);
1124                         PRINT_ERROR("kthread_create() failed: %d", res);
1125                         kfree(thr);
1126                         goto out_del;
1127                 }
1128
1129                 list_add(&thr->thread_list_entry, &dev->threads_list);
1130
1131                 /*
1132                  * ToDo: better to use tgt_dev_io_context instead, but we
1133                  * are not ready for that yet.
1134                  */
1135                 __exit_io_context(thr->cmd_thread->io_context);
1136                 thr->cmd_thread->io_context = ioc_task_link(dev->dev_io_ctx);
1137                 TRACE_DBG("Setting dev io ctx %p on thr %d", dev->dev_io_ctx,
1138                         thr->cmd_thread->pid);
1139
1140                 wake_up_process(thr->cmd_thread);
1141         }
1142
1143 out:
1144         TRACE_EXIT_RES(res);
1145         return res;
1146
1147 out_del:
1148         scst_del_dev_threads(dev, i);
1149         goto out;
1150 }
1151
1152 /* Called under scst_mutex and suspended activity */
1153 static int scst_create_dev_threads(struct scst_device *dev)
1154 {
1155         int res = 0;
1156         int threads_num;
1157
1158         TRACE_ENTRY();
1159
1160         if (dev->handler->threads_num <= 0)
1161                 goto out;
1162
1163         threads_num = dev->handler->threads_num;
1164
1165         spin_lock_init(&dev->cmd_lists.cmd_list_lock);
1166         INIT_LIST_HEAD(&dev->cmd_lists.active_cmd_list);
1167         init_waitqueue_head(&dev->cmd_lists.cmd_list_waitQ);
1168
1169         res = scst_add_dev_threads(dev, threads_num);
1170         if (res != 0)
1171                 goto out;
1172
1173         mutex_lock(&scst_suspend_mutex);
1174         list_add_tail(&dev->cmd_lists.lists_list_entry,
1175                 &scst_cmd_lists_list);
1176         mutex_unlock(&scst_suspend_mutex);
1177
1178         dev->p_cmd_lists = &dev->cmd_lists;
1179
1180 out:
1181         TRACE_EXIT_RES(res);
1182         return res;
1183 }
1184
1185 /* Called under scst_mutex */
1186 void scst_del_dev_threads(struct scst_device *dev, int num)
1187 {
1188         struct scst_cmd_thread_t *ct, *tmp;
1189         int i = 0;
1190
1191         TRACE_ENTRY();
1192
1193         if (num == 0)
1194                 goto out;
1195
1196         list_for_each_entry_safe_reverse(ct, tmp, &dev->threads_list,
1197                                 thread_list_entry) {
1198                 int rc;
1199                 struct scst_tgt_dev *tgt_dev;
1200
1201                 list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
1202                                 dev_tgt_dev_list_entry) {
1203                         struct scst_thr_data_hdr *td;
1204                         td = __scst_find_thr_data(tgt_dev, ct->cmd_thread);
1205                         if (td != NULL) {
1206                                 scst_thr_data_put(td);
1207                                 break;
1208                         }
1209                 }
1210
1211                 rc = kthread_stop(ct->cmd_thread);
1212                 if (rc < 0)
1213                         TRACE_MGMT_DBG("kthread_stop() failed: %d", rc);
1214
1215                 list_del(&ct->thread_list_entry);
1216                 kfree(ct);
1217
1218                 if ((num > 0) && (++i >= num))
1219                         break;
1220         }
1221
1222 out:
1223         TRACE_EXIT();
1224         return;
1225 }
1226
1227 /* Called under scst_mutex and suspended activity */
1228 static void scst_stop_dev_threads(struct scst_device *dev)
1229 {
1230         TRACE_ENTRY();
1231
1232         if (list_empty(&dev->threads_list))
1233                 goto out;
1234
1235         scst_del_dev_threads(dev, -1);
1236
1237         if (dev->p_cmd_lists == &dev->cmd_lists) {
1238                 mutex_lock(&scst_suspend_mutex);
1239                 list_del(&dev->cmd_lists.lists_list_entry);
1240                 mutex_unlock(&scst_suspend_mutex);
1241         }
1242
1243 out:
1244         TRACE_EXIT();
1245         return;
1246 }
1247
1248 /* The activity supposed to be suspended and scst_mutex held */
1249 int scst_assign_dev_handler(struct scst_device *dev,
1250         struct scst_dev_type *handler)
1251 {
1252         int res = 0;
1253         struct scst_tgt_dev *tgt_dev;
1254         LIST_HEAD(attached_tgt_devs);
1255
1256         TRACE_ENTRY();
1257
1258         sBUG_ON(handler == NULL);
1259
1260         if (dev->handler == handler)
1261                 goto out;
1262
1263         if (dev->handler && dev->handler->detach_tgt) {
1264                 list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
1265                                 dev_tgt_dev_list_entry) {
1266                         TRACE_DBG("Calling dev handler's detach_tgt(%p)",
1267                                 tgt_dev);
1268                         dev->handler->detach_tgt(tgt_dev);
1269                         TRACE_DBG("%s", "Dev handler's detach_tgt() returned");
1270                 }
1271         }
1272
1273         if (dev->handler && dev->handler->detach) {
1274                 TRACE_DBG("%s", "Calling dev handler's detach()");
1275                 dev->handler->detach(dev);
1276                 TRACE_DBG("%s", "Old handler's detach() returned");
1277         }
1278
1279         scst_stop_dev_threads(dev);
1280
1281         dev->handler = handler;
1282
1283         if (handler) {
1284                 res = scst_create_dev_threads(dev);
1285                 if (res != 0)
1286                         goto out_null;
1287         }
1288
1289         if (handler && handler->attach) {
1290                 TRACE_DBG("Calling new dev handler's attach(%p)", dev);
1291                 res = handler->attach(dev);
1292                 TRACE_DBG("New dev handler's attach() returned %d", res);
1293                 if (res != 0) {
1294                         PRINT_ERROR("New device handler's %s attach() "
1295                                 "failed: %d", handler->name, res);
1296                 }
1297                 goto out_thr_null;
1298         }
1299
1300         if (handler && handler->attach_tgt) {
1301                 list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
1302                                 dev_tgt_dev_list_entry) {
1303                         TRACE_DBG("Calling dev handler's attach_tgt(%p)",
1304                                 tgt_dev);
1305                         res = handler->attach_tgt(tgt_dev);
1306                         TRACE_DBG("%s", "Dev handler's attach_tgt() returned");
1307                         if (res != 0) {
1308                                 PRINT_ERROR("Device handler's %s attach_tgt() "
1309                                     "failed: %d", handler->name, res);
1310                                 goto out_err_detach_tgt;
1311                         }
1312                         list_add_tail(&tgt_dev->extra_tgt_dev_list_entry,
1313                                 &attached_tgt_devs);
1314                 }
1315         }
1316
1317 out_thr_null:
1318         if (res != 0)
1319                 scst_stop_dev_threads(dev);
1320
1321 out_null:
1322         if (res != 0)
1323                 dev->handler = &scst_null_devtype;
1324
1325 out:
1326         TRACE_EXIT_RES(res);
1327         return res;
1328
1329 out_err_detach_tgt:
1330         if (handler && handler->detach_tgt) {
1331                 list_for_each_entry(tgt_dev, &attached_tgt_devs,
1332                                  extra_tgt_dev_list_entry)
1333                 {
1334                         TRACE_DBG("Calling handler's detach_tgt(%p)",
1335                                 tgt_dev);
1336                         handler->detach_tgt(tgt_dev);
1337                         TRACE_DBG("%s", "Handler's detach_tgt() returned");
1338                 }
1339         }
1340         if (handler && handler->detach) {
1341                 TRACE_DBG("%s", "Calling handler's detach()");
1342                 handler->detach(dev);
1343                 TRACE_DBG("%s", "Handler's detach() returned");
1344         }
1345         goto out_null;
1346 }
1347
1348 int scst_global_threads_count(void)
1349 {
1350         int i;
1351
1352         /*
1353          * Just to lower the race window, when user can get just changed value
1354          */
1355         mutex_lock(&scst_global_threads_mutex);
1356         i = scst_nr_global_threads;
1357         mutex_unlock(&scst_global_threads_mutex);
1358         return i;
1359 }
1360
1361 static void scst_threads_info_init(void)
1362 {
1363         mutex_init(&scst_global_threads_mutex);
1364         INIT_LIST_HEAD(&scst_global_threads_list);
1365 }
1366
1367 /* scst_global_threads_mutex supposed to be held */
1368 void __scst_del_global_threads(int num)
1369 {
1370         struct scst_cmd_thread_t *ct, *tmp;
1371
1372         TRACE_ENTRY();
1373
1374         if (num == 0)
1375                 goto out;
1376
1377         list_for_each_entry_safe(ct, tmp, &scst_global_threads_list,
1378                                 thread_list_entry) {
1379                 int res;
1380
1381                 res = kthread_stop(ct->cmd_thread);
1382                 if (res < 0)
1383                         TRACE_MGMT_DBG("kthread_stop() failed: %d", res);
1384                 list_del(&ct->thread_list_entry);
1385                 kfree(ct);
1386                 scst_nr_global_threads--;
1387                 --num;
1388                 if (num == 0)
1389                         break;
1390         }
1391
1392 out:
1393         TRACE_EXIT();
1394         return;
1395 }
1396
1397 /* scst_global_threads_mutex supposed to be held */
1398 int __scst_add_global_threads(int num)
1399 {
1400         int res = 0, i;
1401         static int scst_thread_num;
1402
1403         TRACE_ENTRY();
1404
1405         for (i = 0; i < num; i++) {
1406                 struct scst_cmd_thread_t *thr;
1407
1408                 thr = kmalloc(sizeof(*thr), GFP_KERNEL);
1409                 if (!thr) {
1410                         res = -ENOMEM;
1411                         PRINT_ERROR("fail to allocate thr %d", res);
1412                         goto out_error;
1413                 }
1414                 thr->cmd_thread = kthread_create(scst_cmd_thread,
1415                         &scst_main_cmd_lists, "scsi_tgt%d",
1416                         scst_thread_num++);
1417                 if (IS_ERR(thr->cmd_thread)) {
1418                         res = PTR_ERR(thr->cmd_thread);
1419                         PRINT_ERROR("kthread_create() failed: %d", res);
1420                         kfree(thr);
1421                         goto out_error;
1422                 }
1423
1424                 list_add(&thr->thread_list_entry, &scst_global_threads_list);
1425                 scst_nr_global_threads++;
1426
1427                 wake_up_process(thr->cmd_thread);
1428         }
1429         res = 0;
1430
1431 out:
1432         TRACE_EXIT_RES(res);
1433         return res;
1434
1435 out_error:
1436         __scst_del_global_threads(i);
1437         goto out;
1438 }
1439
1440 int scst_add_global_threads(int num)
1441 {
1442         int res;
1443
1444         TRACE_ENTRY();
1445
1446         mutex_lock(&scst_global_threads_mutex);
1447         res = __scst_add_global_threads(num);
1448         mutex_unlock(&scst_global_threads_mutex);
1449
1450         TRACE_EXIT_RES(res);
1451         return res;
1452 }
1453 EXPORT_SYMBOL(scst_add_global_threads);
1454
1455 void scst_del_global_threads(int num)
1456 {
1457         TRACE_ENTRY();
1458
1459         mutex_lock(&scst_global_threads_mutex);
1460         __scst_del_global_threads(num);
1461         mutex_unlock(&scst_global_threads_mutex);
1462
1463         TRACE_EXIT();
1464         return;
1465 }
1466 EXPORT_SYMBOL(scst_del_global_threads);
1467
1468 static void scst_stop_all_threads(void)
1469 {
1470         TRACE_ENTRY();
1471
1472         mutex_lock(&scst_global_threads_mutex);
1473         __scst_del_global_threads(-1);
1474         if (scst_mgmt_cmd_thread)
1475                 kthread_stop(scst_mgmt_cmd_thread);
1476         if (scst_mgmt_thread)
1477                 kthread_stop(scst_mgmt_thread);
1478         if (scst_init_cmd_thread)
1479                 kthread_stop(scst_init_cmd_thread);
1480         mutex_unlock(&scst_global_threads_mutex);
1481
1482         TRACE_EXIT();
1483         return;
1484 }
1485
1486 static int scst_start_all_threads(int num)
1487 {
1488         int res;
1489
1490         TRACE_ENTRY();
1491
1492         mutex_lock(&scst_global_threads_mutex);
1493         res = __scst_add_global_threads(num);
1494         if (res < 0)
1495                 goto out;
1496
1497         scst_init_cmd_thread = kthread_run(scst_init_thread,
1498                 NULL, "scsi_tgt_init");
1499         if (IS_ERR(scst_init_cmd_thread)) {
1500                 res = PTR_ERR(scst_init_cmd_thread);
1501                 PRINT_ERROR("kthread_create() for init cmd failed: %d", res);
1502                 scst_init_cmd_thread = NULL;
1503                 goto out;
1504         }
1505
1506         scst_mgmt_cmd_thread = kthread_run(scst_tm_thread,
1507                 NULL, "scsi_tm");
1508         if (IS_ERR(scst_mgmt_cmd_thread)) {
1509                 res = PTR_ERR(scst_mgmt_cmd_thread);
1510                 PRINT_ERROR("kthread_create() for TM failed: %d", res);
1511                 scst_mgmt_cmd_thread = NULL;
1512                 goto out;
1513         }
1514
1515         scst_mgmt_thread = kthread_run(scst_global_mgmt_thread,
1516                 NULL, "scsi_tgt_mgmt");
1517         if (IS_ERR(scst_mgmt_thread)) {
1518                 res = PTR_ERR(scst_mgmt_thread);
1519                 PRINT_ERROR("kthread_create() for mgmt failed: %d", res);
1520                 scst_mgmt_thread = NULL;
1521                 goto out;
1522         }
1523
1524 out:
1525         mutex_unlock(&scst_global_threads_mutex);
1526         TRACE_EXIT_RES(res);
1527         return res;
1528 }
1529
1530 void scst_get(void)
1531 {
1532         __scst_get(0);
1533 }
1534 EXPORT_SYMBOL(scst_get);
1535
1536 void scst_put(void)
1537 {
1538         __scst_put();
1539 }
1540 EXPORT_SYMBOL(scst_put);
1541
1542 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
1543 static int scst_add(struct class_device *cdev, struct class_interface *intf)
1544 #else
1545 static int scst_add(struct device *cdev, struct class_interface *intf)
1546 #endif
1547 {
1548         struct scsi_device *scsidp;
1549         int res = 0;
1550
1551         TRACE_ENTRY();
1552
1553 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
1554         scsidp = to_scsi_device(cdev->dev);
1555 #else
1556         scsidp = to_scsi_device(cdev->parent);
1557 #endif
1558
1559         if (strcmp(scsidp->host->hostt->name, SCST_LOCAL_NAME) != 0)
1560                 res = scst_register_device(scsidp);
1561
1562         TRACE_EXIT();
1563         return res;
1564 }
1565
1566 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
1567 static void scst_remove(struct class_device *cdev, struct class_interface *intf)
1568 #else
1569 static void scst_remove(struct device *cdev, struct class_interface *intf)
1570 #endif
1571 {
1572         struct scsi_device *scsidp;
1573
1574         TRACE_ENTRY();
1575
1576 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
1577         scsidp = to_scsi_device(cdev->dev);
1578 #else
1579         scsidp = to_scsi_device(cdev->parent);
1580 #endif
1581
1582         if (strcmp(scsidp->host->hostt->name, SCST_LOCAL_NAME) != 0)
1583                 scst_unregister_device(scsidp);
1584
1585         TRACE_EXIT();
1586         return;
1587 }
1588
1589 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
1590 static struct class_interface scst_interface = {
1591         .add = scst_add,
1592         .remove = scst_remove,
1593 };
1594 #else
1595 static struct class_interface scst_interface = {
1596         .add_dev = scst_add,
1597         .remove_dev = scst_remove,
1598 };
1599 #endif
1600
1601 static void __init scst_print_config(void)
1602 {
1603         char buf[128];
1604         int i, j;
1605
1606         i = snprintf(buf, sizeof(buf), "Enabled features: ");
1607         j = i;
1608
1609 #ifdef CONFIG_SCST_STRICT_SERIALIZING
1610         i += snprintf(&buf[i], sizeof(buf) - i, "Strict serializing");
1611 #endif
1612
1613 #ifdef CONFIG_SCST_EXTRACHECKS
1614         i += snprintf(&buf[i], sizeof(buf) - i, "%sEXTRACHECKS",
1615                 (j == i) ? "" : ", ");
1616 #endif
1617
1618 #ifdef CONFIG_SCST_TRACING
1619         i += snprintf(&buf[i], sizeof(buf) - i, "%sTRACING",
1620                 (j == i) ? "" : ", ");
1621 #endif
1622
1623 #ifdef CONFIG_SCST_DEBUG
1624         i += snprintf(&buf[i], sizeof(buf) - i, "%sDEBUG",
1625                 (j == i) ? "" : ", ");
1626 #endif
1627
1628 #ifdef CONFIG_SCST_DEBUG_TM
1629         i += snprintf(&buf[i], sizeof(buf) - i, "%sDEBUG_TM",
1630                 (j == i) ? "" : ", ");
1631 #endif
1632
1633 #ifdef CONFIG_SCST_DEBUG_RETRY
1634         i += snprintf(&buf[i], sizeof(buf) - i, "%sDEBUG_RETRY",
1635                 (j == i) ? "" : ", ");
1636 #endif
1637
1638 #ifdef CONFIG_SCST_DEBUG_OOM
1639         i += snprintf(&buf[i], sizeof(buf) - i, "%sDEBUG_OOM",
1640                 (j == i) ? "" : ", ");
1641 #endif
1642
1643 #ifdef CONFIG_SCST_DEBUG_SN
1644         i += snprintf(&buf[i], sizeof(buf) - i, "%sDEBUG_SN",
1645                 (j == i) ? "" : ", ");
1646 #endif
1647
1648 #ifdef CONFIG_SCST_USE_EXPECTED_VALUES
1649         i += snprintf(&buf[i], sizeof(buf) - i, "%sUSE_EXPECTED_VALUES",
1650                 (j == i) ? "" : ", ");
1651 #endif
1652
1653 #ifdef CONFIG_SCST_ALLOW_PASSTHROUGH_IO_SUBMIT_IN_SIRQ
1654         i += snprintf(&buf[i], sizeof(buf) - i,
1655                 "%sALLOW_PASSTHROUGH_IO_SUBMIT_IN_SIRQ",
1656                 (j == i) ? "" : ", ");
1657 #endif
1658
1659 #ifdef CONFIG_SCST_STRICT_SECURITY
1660         i += snprintf(&buf[i], sizeof(buf) - i, "%sSCST_STRICT_SECURITY",
1661                 (j == i) ? "" : ", ");
1662 #endif
1663
1664         if (j != i)
1665                 PRINT_INFO("%s", buf);
1666 }
1667
1668 static int __init init_scst(void)
1669 {
1670         int res = 0, i;
1671         int scst_num_cpus;
1672
1673         TRACE_ENTRY();
1674
1675 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
1676         {
1677                 struct scsi_request *req;
1678                 BUILD_BUG_ON(SCST_SENSE_BUFFERSIZE !=
1679                         sizeof(req->sr_sense_buffer));
1680         }
1681 #else
1682         {
1683                 struct scsi_sense_hdr *shdr;
1684                 BUILD_BUG_ON(SCST_SENSE_BUFFERSIZE < sizeof(*shdr));
1685         }
1686 #endif
1687         {
1688                 struct scst_tgt_dev *t;
1689                 struct scst_cmd *c;
1690                 BUILD_BUG_ON(sizeof(t->curr_sn) != sizeof(t->expected_sn));
1691                 BUILD_BUG_ON(sizeof(c->sn) != sizeof(t->expected_sn));
1692         }
1693
1694 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
1695 #if !defined(SCST_IO_CONTEXT)
1696         PRINT_WARNING("%s", "Patch io_context was not applied on "
1697                 "your kernel. SCST will be working with not the best "
1698                 "performance.");
1699 #endif
1700 #endif
1701
1702         mutex_init(&scst_mutex);
1703         INIT_LIST_HEAD(&scst_template_list);
1704         INIT_LIST_HEAD(&scst_dev_list);
1705         INIT_LIST_HEAD(&scst_dev_type_list);
1706         spin_lock_init(&scst_main_lock);
1707         INIT_LIST_HEAD(&scst_acg_list);
1708         spin_lock_init(&scst_init_lock);
1709         init_waitqueue_head(&scst_init_cmd_list_waitQ);
1710         INIT_LIST_HEAD(&scst_init_cmd_list);
1711 #if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
1712         scst_trace_flag = SCST_DEFAULT_LOG_FLAGS;
1713 #endif
1714         atomic_set(&scst_cmd_count, 0);
1715         spin_lock_init(&scst_mcmd_lock);
1716         INIT_LIST_HEAD(&scst_active_mgmt_cmd_list);
1717         INIT_LIST_HEAD(&scst_delayed_mgmt_cmd_list);
1718         init_waitqueue_head(&scst_mgmt_cmd_list_waitQ);
1719         init_waitqueue_head(&scst_mgmt_waitQ);
1720         spin_lock_init(&scst_mgmt_lock);
1721         INIT_LIST_HEAD(&scst_sess_init_list);
1722         INIT_LIST_HEAD(&scst_sess_shut_list);
1723         init_waitqueue_head(&scst_dev_cmd_waitQ);
1724         mutex_init(&scst_suspend_mutex);
1725         INIT_LIST_HEAD(&scst_cmd_lists_list);
1726         scst_virt_dev_last_id = 1;
1727         spin_lock_init(&scst_main_cmd_lists.cmd_list_lock);
1728         INIT_LIST_HEAD(&scst_main_cmd_lists.active_cmd_list);
1729         init_waitqueue_head(&scst_main_cmd_lists.cmd_list_waitQ);
1730         list_add_tail(&scst_main_cmd_lists.lists_list_entry,
1731                 &scst_cmd_lists_list);
1732
1733         scst_num_cpus = num_online_cpus();
1734
1735         /* ToDo: register_cpu_notifier() */
1736
1737         if (scst_threads == 0)
1738                 scst_threads = scst_num_cpus;
1739
1740         if (scst_threads < 1) {
1741                 PRINT_ERROR("%s", "scst_threads can not be less than 1");
1742                 scst_threads = scst_num_cpus;
1743         }
1744
1745         scst_threads_info_init();
1746
1747 #define INIT_CACHEP(p, s, o) do {                                       \
1748                 p = KMEM_CACHE(s, SCST_SLAB_FLAGS);                     \
1749                 TRACE_MEM("Slab create: %s at %p size %zd", #s, p,      \
1750                           sizeof(struct s));                            \
1751                 if (p == NULL) {                                        \
1752                         res = -ENOMEM;                                  \
1753                         goto o;                                         \
1754                 }                                                       \
1755         } while (0)
1756
1757         INIT_CACHEP(scst_mgmt_cachep, scst_mgmt_cmd, out);
1758         INIT_CACHEP(scst_mgmt_stub_cachep, scst_mgmt_cmd_stub,
1759                         out_destroy_mgmt_cache);
1760         INIT_CACHEP(scst_ua_cachep, scst_tgt_dev_UA,
1761                         out_destroy_mgmt_stub_cache);
1762         {
1763                 struct scst_sense { uint8_t s[SCST_SENSE_BUFFERSIZE]; };
1764                 INIT_CACHEP(scst_sense_cachep, scst_sense,
1765                             out_destroy_ua_cache);
1766         }
1767         INIT_CACHEP(scst_aen_cachep, scst_aen, out_destroy_sense_cache);
1768         INIT_CACHEP(scst_cmd_cachep, scst_cmd, out_destroy_aen_cache);
1769         INIT_CACHEP(scst_sess_cachep, scst_session, out_destroy_cmd_cache);
1770         INIT_CACHEP(scst_tgtd_cachep, scst_tgt_dev, out_destroy_sess_cache);
1771         INIT_CACHEP(scst_acgd_cachep, scst_acg_dev, out_destroy_tgt_cache);
1772
1773         scst_mgmt_mempool = mempool_create(64, mempool_alloc_slab,
1774                 mempool_free_slab, scst_mgmt_cachep);
1775         if (scst_mgmt_mempool == NULL) {
1776                 res = -ENOMEM;
1777                 goto out_destroy_acg_cache;
1778         }
1779
1780         /*
1781          * All mgmt stubs, UAs and sense buffers are bursty and loosing them
1782          * may have fatal consequences, so let's have big pools for them.
1783          */
1784
1785         scst_mgmt_stub_mempool = mempool_create(1024, mempool_alloc_slab,
1786                 mempool_free_slab, scst_mgmt_stub_cachep);
1787         if (scst_mgmt_stub_mempool == NULL) {
1788                 res = -ENOMEM;
1789                 goto out_destroy_mgmt_mempool;
1790         }
1791
1792         scst_ua_mempool = mempool_create(512, mempool_alloc_slab,
1793                 mempool_free_slab, scst_ua_cachep);
1794         if (scst_ua_mempool == NULL) {
1795                 res = -ENOMEM;
1796                 goto out_destroy_mgmt_stub_mempool;
1797         }
1798
1799         scst_sense_mempool = mempool_create(1024, mempool_alloc_slab,
1800                 mempool_free_slab, scst_sense_cachep);
1801         if (scst_sense_mempool == NULL) {
1802                 res = -ENOMEM;
1803                 goto out_destroy_ua_mempool;
1804         }
1805
1806         scst_aen_mempool = mempool_create(100, mempool_alloc_slab,
1807                 mempool_free_slab, scst_aen_cachep);
1808         if (scst_aen_mempool == NULL) {
1809                 res = -ENOMEM;
1810                 goto out_destroy_sense_mempool;
1811         }
1812
1813         if (scst_max_cmd_mem == 0) {
1814                 struct sysinfo si;
1815                 si_meminfo(&si);
1816 #if BITS_PER_LONG == 32
1817                 scst_max_cmd_mem = min(
1818                         (((uint64_t)(si.totalram - si.totalhigh) << PAGE_SHIFT)
1819                                 >> 20) >> 2, (uint64_t)1 << 30);
1820 #else
1821                 scst_max_cmd_mem = (((si.totalram - si.totalhigh) << PAGE_SHIFT)
1822                                         >> 20) >> 2;
1823 #endif
1824         }
1825
1826         if (scst_max_dev_cmd_mem != 0) {
1827                 if (scst_max_dev_cmd_mem > scst_max_cmd_mem) {
1828                         PRINT_ERROR("scst_max_dev_cmd_mem (%d) > "
1829                                 "scst_max_cmd_mem (%d)",
1830                                 scst_max_dev_cmd_mem,
1831                                 scst_max_cmd_mem);
1832                         scst_max_dev_cmd_mem = scst_max_cmd_mem;
1833                 }
1834         } else
1835                 scst_max_dev_cmd_mem = scst_max_cmd_mem * 2 / 5;
1836
1837         res = scst_sgv_pools_init(
1838                 ((uint64_t)scst_max_cmd_mem << 10) >> (PAGE_SHIFT - 10), 0);
1839         if (res != 0)
1840                 goto out_destroy_aen_mempool;
1841
1842         scst_default_acg = scst_alloc_add_acg(SCST_DEFAULT_ACG_NAME);
1843         if (scst_default_acg == NULL) {
1844                 res = -ENOMEM;
1845                 goto out_destroy_sgv_pool;
1846         }
1847
1848         res = scsi_register_interface(&scst_interface);
1849         if (res != 0)
1850                 goto out_free_acg;
1851
1852         scst_scsi_op_list_init();
1853
1854         for (i = 0; i < (int)ARRAY_SIZE(scst_tasklets); i++) {
1855                 spin_lock_init(&scst_tasklets[i].tasklet_lock);
1856                 INIT_LIST_HEAD(&scst_tasklets[i].tasklet_cmd_list);
1857                 tasklet_init(&scst_tasklets[i].tasklet,
1858                              (void *)scst_cmd_tasklet,
1859                              (unsigned long)&scst_tasklets[i]);
1860         }
1861
1862         TRACE_DBG("%d CPUs found, starting %d threads", scst_num_cpus,
1863                 scst_threads);
1864
1865         res = scst_start_all_threads(scst_threads);
1866         if (res < 0)
1867                 goto out_thread_free;
1868
1869         res = scst_proc_init_module();
1870         if (res != 0)
1871                 goto out_thread_free;
1872
1873
1874         PRINT_INFO("SCST version %s loaded successfully (max mem for "
1875                 "commands %dMB, per device %dMB)", SCST_VERSION_STRING,
1876                 scst_max_cmd_mem, scst_max_dev_cmd_mem);
1877
1878         scst_print_config();
1879
1880 out:
1881         TRACE_EXIT_RES(res);
1882         return res;
1883
1884 out_thread_free:
1885         scst_stop_all_threads();
1886
1887         scsi_unregister_interface(&scst_interface);
1888
1889 out_free_acg:
1890         scst_destroy_acg(scst_default_acg);
1891
1892 out_destroy_sgv_pool:
1893         scst_sgv_pools_deinit();
1894
1895 out_destroy_aen_mempool:
1896         mempool_destroy(scst_aen_mempool);
1897
1898 out_destroy_sense_mempool:
1899         mempool_destroy(scst_sense_mempool);
1900
1901 out_destroy_ua_mempool:
1902         mempool_destroy(scst_ua_mempool);
1903
1904 out_destroy_mgmt_stub_mempool:
1905         mempool_destroy(scst_mgmt_stub_mempool);
1906
1907 out_destroy_mgmt_mempool:
1908         mempool_destroy(scst_mgmt_mempool);
1909
1910 out_destroy_acg_cache:
1911         kmem_cache_destroy(scst_acgd_cachep);
1912
1913 out_destroy_tgt_cache:
1914         kmem_cache_destroy(scst_tgtd_cachep);
1915
1916 out_destroy_sess_cache:
1917         kmem_cache_destroy(scst_sess_cachep);
1918
1919 out_destroy_cmd_cache:
1920         kmem_cache_destroy(scst_cmd_cachep);
1921
1922 out_destroy_aen_cache:
1923         kmem_cache_destroy(scst_aen_cachep);
1924
1925 out_destroy_sense_cache:
1926         kmem_cache_destroy(scst_sense_cachep);
1927
1928 out_destroy_ua_cache:
1929         kmem_cache_destroy(scst_ua_cachep);
1930
1931 out_destroy_mgmt_stub_cache:
1932         kmem_cache_destroy(scst_mgmt_stub_cachep);
1933
1934 out_destroy_mgmt_cache:
1935         kmem_cache_destroy(scst_mgmt_cachep);
1936         goto out;
1937 }
1938
1939 static void __exit exit_scst(void)
1940 {
1941         TRACE_ENTRY();
1942
1943         /* ToDo: unregister_cpu_notifier() */
1944
1945         scst_proc_cleanup_module();
1946
1947         scst_stop_all_threads();
1948
1949         scsi_unregister_interface(&scst_interface);
1950         scst_destroy_acg(scst_default_acg);
1951
1952         scst_sgv_pools_deinit();
1953
1954 #define DEINIT_CACHEP(p) do {           \
1955                 kmem_cache_destroy(p);  \
1956                 p = NULL;               \
1957         } while (0)
1958
1959         mempool_destroy(scst_mgmt_mempool);
1960         mempool_destroy(scst_mgmt_stub_mempool);
1961         mempool_destroy(scst_ua_mempool);
1962         mempool_destroy(scst_sense_mempool);
1963         mempool_destroy(scst_aen_mempool);
1964
1965         DEINIT_CACHEP(scst_mgmt_cachep);
1966         DEINIT_CACHEP(scst_mgmt_stub_cachep);
1967         DEINIT_CACHEP(scst_ua_cachep);
1968         DEINIT_CACHEP(scst_sense_cachep);
1969         DEINIT_CACHEP(scst_aen_cachep);
1970         DEINIT_CACHEP(scst_cmd_cachep);
1971         DEINIT_CACHEP(scst_sess_cachep);
1972         DEINIT_CACHEP(scst_tgtd_cachep);
1973         DEINIT_CACHEP(scst_acgd_cachep);
1974
1975         PRINT_INFO("%s", "SCST unloaded");
1976
1977         TRACE_EXIT();
1978         return;
1979 }
1980
1981
1982 module_init(init_scst);
1983 module_exit(exit_scst);
1984
1985 MODULE_AUTHOR("Vladislav Bolkhovitin");
1986 MODULE_LICENSE("GPL");
1987 MODULE_DESCRIPTION("SCSI target core");
1988 MODULE_VERSION(SCST_VERSION_STRING);