08aad2d1200349519234d3bb249b0c70e9c18b81
[mirror/scst/.git] / scst / src / scst_main.c
1 /*
2  *  scst_main.c
3  *
4  *  Copyright (C) 2004 - 2008 Vladislav Bolkhovitin <vst@vlnb.net>
5  *  Copyright (C) 2004 - 2005 Leonid Stoljar
6  *  Copyright (C) 2007 - 2008 CMS Distribution Limited
7  *
8  *  This program is free software; you can redistribute it and/or
9  *  modify it under the terms of the GNU General Public License
10  *  as published by the Free Software Foundation, version 2
11  *  of the License.
12  *
13  *  This program is distributed in the hope that it will be useful,
14  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16  *  GNU General Public License for more details.
17  */
18
19 #include <linux/module.h>
20
21 #include <linux/init.h>
22 #include <linux/kernel.h>
23 #include <linux/errno.h>
24 #include <linux/list.h>
25 #include <linux/spinlock.h>
26 #include <linux/slab.h>
27 #include <linux/sched.h>
28 #include <linux/unistd.h>
29 #include <linux/string.h>
30 #include <linux/kthread.h>
31
32 #include "scst.h"
33 #include "scst_priv.h"
34 #include "scst_mem.h"
35
36 #if defined(CONFIG_HIGHMEM4G) || defined(CONFIG_HIGHMEM64G)
37 #warning "HIGHMEM kernel configurations are fully supported, but not \
38         recommended for performance reasons. Consider change VMSPLIT \
39         option or use 64-bit configuration instead. See README file for \
40         details."
41 #endif
42
43 #if !defined(SCSI_EXEC_REQ_FIFO_DEFINED) && \
44     !defined(CONFIG_SCST_STRICT_SERIALIZING)
45 #warning "Patch scst_exec_req_fifo-<kernel-version>.patch was not applied on \
46         your kernel and CONFIG_SCST_STRICT_SERIALIZING isn't defined. \
47         Pass-through dev handlers will not be supported."
48 #endif
49
50 /**
51  ** SCST global variables. They are all uninitialized to have their layout in
52  ** memory be exactly as specified. Otherwise compiler puts zero-initialized
53  ** variable separately from nonzero-initialized ones.
54  **/
55
56 /*
57  * All targets, devices and dev_types management is done under this mutex.
58  *
59  * It must NOT be used in any works (schedule_work(), etc.), because
60  * otherwise a deadlock (double lock, actually) is possible, e.g., with
61  * scst_user detach_tgt(), which is called under scst_mutex and calls
62  * flush_scheduled_work().
63  */
64 struct mutex scst_mutex;
65
66 struct list_head scst_template_list;
67 struct list_head scst_dev_list;
68 struct list_head scst_dev_type_list;
69
70 spinlock_t scst_main_lock;
71
72 static struct kmem_cache *scst_mgmt_cachep;
73 mempool_t *scst_mgmt_mempool;
74 static struct kmem_cache *scst_mgmt_stub_cachep;
75 mempool_t *scst_mgmt_stub_mempool;
76 static struct kmem_cache *scst_ua_cachep;
77 mempool_t *scst_ua_mempool;
78 static struct kmem_cache *scst_sense_cachep;
79 mempool_t *scst_sense_mempool;
80 struct kmem_cache *scst_tgtd_cachep;
81 struct kmem_cache *scst_sess_cachep;
82 struct kmem_cache *scst_acgd_cachep;
83
84 struct list_head scst_acg_list;
85 struct scst_acg *scst_default_acg;
86
87 spinlock_t scst_init_lock;
88 wait_queue_head_t scst_init_cmd_list_waitQ;
89 struct list_head scst_init_cmd_list;
90 unsigned int scst_init_poll_cnt;
91
92 struct kmem_cache *scst_cmd_cachep;
93
94 #if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
95 unsigned long scst_trace_flag;
96 #endif
97
98 unsigned long scst_flags;
99 atomic_t scst_cmd_count;
100
101 struct scst_cmd_lists scst_main_cmd_lists;
102
103 struct scst_tasklet scst_tasklets[NR_CPUS];
104
105 spinlock_t scst_mcmd_lock;
106 struct list_head scst_active_mgmt_cmd_list;
107 struct list_head scst_delayed_mgmt_cmd_list;
108 wait_queue_head_t scst_mgmt_cmd_list_waitQ;
109
110 wait_queue_head_t scst_mgmt_waitQ;
111 spinlock_t scst_mgmt_lock;
112 struct list_head scst_sess_init_list;
113 struct list_head scst_sess_shut_list;
114
115 wait_queue_head_t scst_dev_cmd_waitQ;
116
117 struct mutex scst_suspend_mutex;
118 struct list_head scst_cmd_lists_list;
119
120 static int scst_threads;
121 struct scst_threads_info_t scst_threads_info;
122
123 static int suspend_count;
124
125 static int scst_virt_dev_last_id; /* protected by scst_mutex */
126
127 /*
128  * This buffer and lock are intended to avoid memory allocation, which
129  * could fail in improper places.
130  */
131 spinlock_t scst_temp_UA_lock;
132 uint8_t scst_temp_UA[SCST_SENSE_BUFFERSIZE];
133
134 unsigned int scst_max_cmd_mem;
135 unsigned int scst_max_dev_cmd_mem;
136
137 module_param_named(scst_threads, scst_threads, int, 0);
138 MODULE_PARM_DESC(scst_threads, "SCSI target threads count");
139
140 module_param_named(scst_max_cmd_mem, scst_max_cmd_mem, int, 0);
141 MODULE_PARM_DESC(scst_max_cmd_mem, "Maximum memory allowed to be consumed by "
142         "all SCSI commands of all devices at any given time in MB");
143
144 module_param_named(scst_max_dev_cmd_mem, scst_max_dev_cmd_mem, int, 0);
145 MODULE_PARM_DESC(scst_max_dev_cmd_mem, "Maximum memory allowed to be consumed "
146         "by all SCSI commands of a device at any given time in MB");
147
148 struct scst_dev_type scst_null_devtype = {
149         .name = "none",
150 };
151
152 static void __scst_resume_activity(void);
153
154 int __scst_register_target_template(struct scst_tgt_template *vtt,
155         const char *version)
156 {
157         int res = 0;
158         struct scst_tgt_template *t;
159         static DEFINE_MUTEX(m);
160
161         TRACE_ENTRY();
162
163         INIT_LIST_HEAD(&vtt->tgt_list);
164
165         if (strcmp(version, SCST_INTERFACE_VERSION) != 0) {
166                 PRINT_ERROR("Incorrect version of target %s", vtt->name);
167                 res = -EINVAL;
168                 goto out_err;
169         }
170
171         if (!vtt->detect) {
172                 PRINT_ERROR("Target driver %s doesn't have a "
173                         "detect() method.", vtt->name);
174                 res = -EINVAL;
175                 goto out_err;
176         }
177
178         if (!vtt->release) {
179                 PRINT_ERROR("Target driver %s doesn't have a "
180                         "release() method.", vtt->name);
181                 res = -EINVAL;
182                 goto out_err;
183         }
184
185         if (!vtt->xmit_response) {
186                 PRINT_ERROR("Target driver %s doesn't have a "
187                         "xmit_response() method.", vtt->name);
188                 res = -EINVAL;
189                 goto out_err;
190         }
191
192         if (vtt->threads_num < 0) {
193                 PRINT_ERROR("Wrong threads_num value %d for "
194                         "target \"%s\"", vtt->threads_num,
195                         vtt->name);
196                 res = -EINVAL;
197                 goto out_err;
198         }
199
200         if (!vtt->no_proc_entry) {
201                 res = scst_build_proc_target_dir_entries(vtt);
202                 if (res < 0)
203                         goto out_err;
204         }
205
206         if (vtt->rdy_to_xfer == NULL)
207                 vtt->rdy_to_xfer_atomic = 1;
208
209         if (mutex_lock_interruptible(&m) != 0)
210                 goto out_err;
211
212         if (mutex_lock_interruptible(&scst_mutex) != 0)
213                 goto out_m_up;
214         list_for_each_entry(t, &scst_template_list, scst_template_list_entry) {
215                 if (strcmp(t->name, vtt->name) == 0) {
216                         PRINT_ERROR("Target driver %s already registered",
217                                 vtt->name);
218                         mutex_unlock(&scst_mutex);
219                         goto out_cleanup;
220                 }
221         }
222         mutex_unlock(&scst_mutex);
223
224         TRACE_DBG("%s", "Calling target driver's detect()");
225         res = vtt->detect(vtt);
226         TRACE_DBG("Target driver's detect() returned %d", res);
227         if (res < 0) {
228                 PRINT_ERROR("%s", "The detect() routine failed");
229                 res = -EINVAL;
230                 goto out_cleanup;
231         }
232
233         mutex_lock(&scst_mutex);
234         list_add_tail(&vtt->scst_template_list_entry, &scst_template_list);
235         mutex_unlock(&scst_mutex);
236
237         res = 0;
238
239         PRINT_INFO("Target template %s registered successfully", vtt->name);
240
241         mutex_unlock(&m);
242
243 out:
244         TRACE_EXIT_RES(res);
245         return res;
246
247 out_cleanup:
248         scst_cleanup_proc_target_dir_entries(vtt);
249
250 out_m_up:
251         mutex_unlock(&m);
252
253 out_err:
254         PRINT_ERROR("Failed to register target template %s", vtt->name);
255         goto out;
256 }
257 EXPORT_SYMBOL(__scst_register_target_template);
258
259 void scst_unregister_target_template(struct scst_tgt_template *vtt)
260 {
261         struct scst_tgt *tgt;
262         struct scst_tgt_template *t;
263         int found = 0;
264
265         TRACE_ENTRY();
266
267         mutex_lock(&scst_mutex);
268
269         list_for_each_entry(t, &scst_template_list, scst_template_list_entry) {
270                 if (strcmp(t->name, vtt->name) == 0) {
271                         found = 1;
272                         break;
273                 }
274         }
275         if (!found) {
276                 PRINT_ERROR("Target driver %s isn't registered", vtt->name);
277                 goto out_up;
278         }
279
280 restart:
281         list_for_each_entry(tgt, &vtt->tgt_list, tgt_list_entry) {
282                 mutex_unlock(&scst_mutex);
283                 scst_unregister(tgt);
284                 mutex_lock(&scst_mutex);
285                 goto restart;
286         }
287         list_del(&vtt->scst_template_list_entry);
288
289         PRINT_INFO("Target template %s unregistered successfully", vtt->name);
290
291 out_up:
292         mutex_unlock(&scst_mutex);
293
294         scst_cleanup_proc_target_dir_entries(vtt);
295
296         TRACE_EXIT();
297         return;
298 }
299 EXPORT_SYMBOL(scst_unregister_target_template);
300
301 struct scst_tgt *scst_register(struct scst_tgt_template *vtt,
302         const char *target_name)
303 {
304         struct scst_tgt *tgt;
305         int rc = 0;
306
307         TRACE_ENTRY();
308
309         tgt = kzalloc(sizeof(*tgt), GFP_KERNEL);
310         if (tgt == NULL) {
311                 TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of tgt failed");
312                 rc = -ENOMEM;
313                 goto out_err;
314         }
315
316         INIT_LIST_HEAD(&tgt->sess_list);
317         init_waitqueue_head(&tgt->unreg_waitQ);
318         tgt->tgtt = vtt;
319         tgt->sg_tablesize = vtt->sg_tablesize;
320         spin_lock_init(&tgt->tgt_lock);
321         INIT_LIST_HEAD(&tgt->retry_cmd_list);
322         atomic_set(&tgt->finished_cmds, 0);
323         init_timer(&tgt->retry_timer);
324         tgt->retry_timer.data = (unsigned long)tgt;
325         tgt->retry_timer.function = scst_tgt_retry_timer_fn;
326
327         rc = scst_suspend_activity(true);
328         if (rc != 0)
329                 goto out_free_tgt_err;
330
331         if (mutex_lock_interruptible(&scst_mutex) != 0) {
332                 rc = -EINTR;
333                 goto out_resume_free;
334         }
335
336         if (target_name != NULL) {
337                 int len = strlen(target_name) + 1 +
338                         strlen(SCST_DEFAULT_ACG_NAME) + 1;
339
340                 tgt->default_group_name = kmalloc(len, GFP_KERNEL);
341                 if (tgt->default_group_name == NULL) {
342                         TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of default "
343                                 "group name failed");
344                         rc = -ENOMEM;
345                         goto out_unlock_resume;
346                 }
347                 sprintf(tgt->default_group_name, "%s_%s", SCST_DEFAULT_ACG_NAME,
348                         target_name);
349         }
350
351         rc = scst_build_proc_target_entries(tgt);
352         if (rc < 0)
353                 goto out_free_name;
354         else
355                 list_add_tail(&tgt->tgt_list_entry, &vtt->tgt_list);
356
357         mutex_unlock(&scst_mutex);
358         scst_resume_activity();
359
360         PRINT_INFO("Target %s (%p) for template %s registered successfully",
361                 target_name, tgt, vtt->name);
362
363 out:
364         TRACE_EXIT();
365         return tgt;
366
367 out_free_name:
368         kfree(tgt->default_group_name);
369
370 out_unlock_resume:
371         mutex_unlock(&scst_mutex);
372
373 out_resume_free:
374         scst_resume_activity();
375
376 out_free_tgt_err:
377         kfree(tgt);
378         tgt = NULL;
379
380 out_err:
381         PRINT_ERROR("Failed to register target %s for template %s (error %d)",
382                 target_name, vtt->name, rc);
383         goto out;
384 }
385 EXPORT_SYMBOL(scst_register);
386
387 static inline int test_sess_list(struct scst_tgt *tgt)
388 {
389         int res;
390         mutex_lock(&scst_mutex);
391         res = list_empty(&tgt->sess_list);
392         mutex_unlock(&scst_mutex);
393         return res;
394 }
395
396 void scst_unregister(struct scst_tgt *tgt)
397 {
398         struct scst_session *sess;
399         struct scst_tgt_template *vtt = tgt->tgtt;
400
401         TRACE_ENTRY();
402
403         TRACE_DBG("%s", "Calling target driver's release()");
404         tgt->tgtt->release(tgt);
405         TRACE_DBG("%s", "Target driver's release() returned");
406
407         mutex_lock(&scst_mutex);
408         list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) {
409                 sBUG_ON(sess->shut_phase == SCST_SESS_SPH_READY);
410         }
411         mutex_unlock(&scst_mutex);
412
413         TRACE_DBG("%s", "Waiting for sessions shutdown");
414         wait_event(tgt->unreg_waitQ, test_sess_list(tgt));
415         TRACE_DBG("%s", "wait_event() returned");
416
417         scst_suspend_activity(false);
418         mutex_lock(&scst_mutex);
419
420         list_del(&tgt->tgt_list_entry);
421
422         scst_cleanup_proc_target_entries(tgt);
423
424         kfree(tgt->default_group_name);
425
426         mutex_unlock(&scst_mutex);
427         scst_resume_activity();
428
429         del_timer_sync(&tgt->retry_timer);
430
431         PRINT_INFO("Target %p for template %s unregistered successfully",
432                 tgt, vtt->name);
433
434         kfree(tgt);
435
436         TRACE_EXIT();
437         return;
438 }
439 EXPORT_SYMBOL(scst_unregister);
440
441 static int scst_susp_wait(bool interruptible)
442 {
443         int res = 0;
444
445         TRACE_ENTRY();
446
447         if (interruptible) {
448                 res = wait_event_interruptible_timeout(scst_dev_cmd_waitQ,
449                         (atomic_read(&scst_cmd_count) == 0),
450                         SCST_SUSPENDING_TIMEOUT);
451                 if (res <= 0) {
452                         __scst_resume_activity();
453                         if (res == 0)
454                                 res = -EBUSY;
455                 } else
456                         res = 0;
457         } else
458                 wait_event(scst_dev_cmd_waitQ,
459                            atomic_read(&scst_cmd_count) == 0);
460
461         TRACE_MGMT_DBG("wait_event() returned %d", res);
462
463         TRACE_EXIT_RES(res);
464         return res;
465 }
466
467 int scst_suspend_activity(bool interruptible)
468 {
469         int res = 0;
470         bool rep = false;
471
472         TRACE_ENTRY();
473
474         if (interruptible) {
475                 if (mutex_lock_interruptible(&scst_suspend_mutex) != 0) {
476                         res = -EINTR;
477                         goto out;
478                 }
479         } else
480                 mutex_lock(&scst_suspend_mutex);
481
482         TRACE_MGMT_DBG("suspend_count %d", suspend_count);
483         suspend_count++;
484         if (suspend_count > 1)
485                 goto out_up;
486
487         set_bit(SCST_FLAG_SUSPENDING, &scst_flags);
488         set_bit(SCST_FLAG_SUSPENDED, &scst_flags);
489         smp_mb__after_set_bit();
490
491         /*
492          * See comment in scst_user.c::dev_user_task_mgmt_fn() for more
493          * information about scst_user behavior.
494          *
495          * ToDo: make the global suspending unneeded (Switch to per-device
496          * reference counting? That would mean to switch off from lockless
497          * implementation of scst_translate_lun().. )
498          */
499
500         if (atomic_read(&scst_cmd_count) != 0) {
501                 PRINT_INFO("Waiting for %d active commands to complete... This "
502                         "might take few minutes for disks or few hours for "
503                         "tapes, if you use long executed commands, like "
504                         "REWIND or FORMAT. In case, if you have a hung user "
505                         "space device (i.e. made using scst_user module) not "
506                         "responding to any commands, if might take virtually "
507                         "forever until the corresponding user space "
508                         "program recovers and starts responding or gets "
509                         "killed.", atomic_read(&scst_cmd_count));
510                 rep = true;
511         }
512
513         res = scst_susp_wait(interruptible);
514         if (res != 0)
515                 goto out_clear;
516
517         clear_bit(SCST_FLAG_SUSPENDING, &scst_flags);
518         smp_mb__after_clear_bit();
519
520         TRACE_MGMT_DBG("Waiting for %d active commands finally to complete",
521                 atomic_read(&scst_cmd_count));
522
523         res = scst_susp_wait(interruptible);
524         if (res != 0)
525                 goto out_clear;
526
527         if (rep)
528                 PRINT_INFO("%s", "All active commands completed");
529
530 out_up:
531         mutex_unlock(&scst_suspend_mutex);
532
533 out:
534         TRACE_EXIT_RES(res);
535         return res;
536
537 out_clear:
538         clear_bit(SCST_FLAG_SUSPENDING, &scst_flags);
539         smp_mb__after_clear_bit();
540         goto out_up;
541 }
542 EXPORT_SYMBOL(scst_suspend_activity);
543
544 static void __scst_resume_activity(void)
545 {
546         struct scst_cmd_lists *l;
547
548         TRACE_ENTRY();
549
550         suspend_count--;
551         TRACE_MGMT_DBG("suspend_count %d left", suspend_count);
552         if (suspend_count > 0)
553                 goto out;
554
555         clear_bit(SCST_FLAG_SUSPENDED, &scst_flags);
556         smp_mb__after_clear_bit();
557
558         list_for_each_entry(l, &scst_cmd_lists_list, lists_list_entry) {
559                 wake_up_all(&l->cmd_list_waitQ);
560         }
561         wake_up_all(&scst_init_cmd_list_waitQ);
562
563         spin_lock_irq(&scst_mcmd_lock);
564         if (!list_empty(&scst_delayed_mgmt_cmd_list)) {
565                 struct scst_mgmt_cmd *m;
566                 m = list_entry(scst_delayed_mgmt_cmd_list.next, typeof(*m),
567                                 mgmt_cmd_list_entry);
568                 TRACE_MGMT_DBG("Moving delayed mgmt cmd %p to head of active "
569                         "mgmt cmd list", m);
570                 list_move(&m->mgmt_cmd_list_entry, &scst_active_mgmt_cmd_list);
571         }
572         spin_unlock_irq(&scst_mcmd_lock);
573         wake_up_all(&scst_mgmt_cmd_list_waitQ);
574
575 out:
576         TRACE_EXIT();
577         return;
578 }
579
580 void scst_resume_activity(void)
581 {
582         TRACE_ENTRY();
583
584         mutex_lock(&scst_suspend_mutex);
585         __scst_resume_activity();
586         mutex_unlock(&scst_suspend_mutex);
587
588         TRACE_EXIT();
589         return;
590 }
591 EXPORT_SYMBOL(scst_resume_activity);
592
593 static int scst_register_device(struct scsi_device *scsidp)
594 {
595         int res = 0;
596         struct scst_device *dev;
597         struct scst_dev_type *dt;
598
599         TRACE_ENTRY();
600
601         res = scst_suspend_activity(true);
602         if (res != 0)
603                 goto out_err;
604
605         if (mutex_lock_interruptible(&scst_mutex) != 0) {
606                 res = -EINTR;
607                 goto out_resume;
608         }
609
610         res = scst_alloc_device(GFP_KERNEL, &dev);
611         if (res != 0)
612                 goto out_up;
613
614         dev->type = scsidp->type;
615
616         dev->rq_disk = alloc_disk(1);
617         if (dev->rq_disk == NULL) {
618                 res = -ENOMEM;
619                 goto out_free_dev;
620         }
621         dev->rq_disk->major = SCST_MAJOR;
622
623         dev->scsi_dev = scsidp;
624
625         list_add_tail(&dev->dev_list_entry, &scst_dev_list);
626
627         list_for_each_entry(dt, &scst_dev_type_list, dev_type_list_entry) {
628                 if (dt->type == scsidp->type) {
629                         res = scst_assign_dev_handler(dev, dt);
630                         if (res != 0)
631                                 goto out_free;
632                         break;
633                 }
634         }
635
636 out_up:
637         mutex_unlock(&scst_mutex);
638
639 out_resume:
640         scst_resume_activity();
641
642 out_err:
643         if (res == 0) {
644                 PRINT_INFO("Attached SCSI target mid-level at "
645                     "scsi%d, channel %d, id %d, lun %d, type %d",
646                     scsidp->host->host_no, scsidp->channel, scsidp->id,
647                     scsidp->lun, scsidp->type);
648         } else {
649                 PRINT_ERROR("Failed to attach SCSI target mid-level "
650                     "at scsi%d, channel %d, id %d, lun %d, type %d",
651                     scsidp->host->host_no, scsidp->channel, scsidp->id,
652                     scsidp->lun, scsidp->type);
653         }
654
655         TRACE_EXIT_RES(res);
656         return res;
657
658 out_free:
659         list_del(&dev->dev_list_entry);
660         put_disk(dev->rq_disk);
661
662 out_free_dev:
663         scst_free_device(dev);
664         goto out_up;
665 }
666
667 static void scst_unregister_device(struct scsi_device *scsidp)
668 {
669         struct scst_device *d, *dev = NULL;
670         struct scst_acg_dev *acg_dev, *aa;
671
672         TRACE_ENTRY();
673
674         scst_suspend_activity(false);
675         mutex_lock(&scst_mutex);
676
677         list_for_each_entry(d, &scst_dev_list, dev_list_entry) {
678                 if (d->scsi_dev == scsidp) {
679                         dev = d;
680                         TRACE_DBG("Target device %p found", dev);
681                         break;
682                 }
683         }
684         if (dev == NULL) {
685                 PRINT_ERROR("%s", "Target device not found");
686                 goto out_unblock;
687         }
688
689         list_del(&dev->dev_list_entry);
690
691         list_for_each_entry_safe(acg_dev, aa, &dev->dev_acg_dev_list,
692                                  dev_acg_dev_list_entry)
693         {
694                 scst_acg_remove_dev(acg_dev->acg, dev);
695         }
696
697         scst_assign_dev_handler(dev, &scst_null_devtype);
698
699         put_disk(dev->rq_disk);
700         scst_free_device(dev);
701
702         PRINT_INFO("Detached SCSI target mid-level from scsi%d, channel %d, "
703                 "id %d, lun %d, type %d", scsidp->host->host_no,
704                 scsidp->channel, scsidp->id, scsidp->lun, scsidp->type);
705
706 out_unblock:
707         mutex_unlock(&scst_mutex);
708         scst_resume_activity();
709
710         TRACE_EXIT();
711         return;
712 }
713
714 static int scst_dev_handler_check(struct scst_dev_type *dev_handler)
715 {
716         int res = 0;
717
718         if (dev_handler->parse == NULL) {
719                 PRINT_ERROR("scst dev_type driver %s doesn't have a "
720                         "parse() method.", dev_handler->name);
721                 res = -EINVAL;
722                 goto out;
723         }
724
725         if (dev_handler->exec == NULL) {
726 #ifdef CONFIG_SCST_ALLOW_PASSTHROUGH_IO_SUBMIT_IN_SIRQ
727                 dev_handler->exec_atomic = 1;
728 #else
729                 dev_handler->exec_atomic = 0;
730 #endif
731         }
732
733         if (dev_handler->dev_done == NULL)
734                 dev_handler->dev_done_atomic = 1;
735
736 out:
737         TRACE_EXIT_RES(res);
738         return res;
739 }
740
741 int scst_register_virtual_device(struct scst_dev_type *dev_handler,
742         const char *dev_name)
743 {
744         int res, rc;
745         struct scst_device *dev = NULL;
746
747         TRACE_ENTRY();
748
749         if (dev_handler == NULL) {
750                 PRINT_ERROR("%s: valid device handler must be supplied",
751                             __func__);
752                 res = -EINVAL;
753                 goto out;
754         }
755
756         if (dev_name == NULL) {
757                 PRINT_ERROR("%s: device name must be non-NULL", __func__);
758                 res = -EINVAL;
759                 goto out;
760         }
761
762         res = scst_dev_handler_check(dev_handler);
763         if (res != 0)
764                 goto out;
765
766         res = scst_suspend_activity(true);
767         if (res != 0)
768                 goto out;
769
770         if (mutex_lock_interruptible(&scst_mutex) != 0) {
771                 res = -EINTR;
772                 goto out_resume;
773         }
774
775         res = scst_alloc_device(GFP_KERNEL, &dev);
776         if (res != 0)
777                 goto out_up;
778
779         dev->type = dev_handler->type;
780         dev->scsi_dev = NULL;
781         dev->virt_name = dev_name;
782         dev->virt_id = scst_virt_dev_last_id++;
783
784         list_add_tail(&dev->dev_list_entry, &scst_dev_list);
785
786         res = dev->virt_id;
787
788         rc = scst_assign_dev_handler(dev, dev_handler);
789         if (rc != 0) {
790                 res = rc;
791                 goto out_free_del;
792         }
793
794 out_up:
795         mutex_unlock(&scst_mutex);
796
797 out_resume:
798         scst_resume_activity();
799
800 out:
801         if (res > 0) {
802                 PRINT_INFO("Attached SCSI target mid-level to virtual "
803                     "device %s (id %d)", dev_name, dev->virt_id);
804         } else {
805                 PRINT_INFO("Failed to attach SCSI target mid-level to "
806                     "virtual device %s", dev_name);
807         }
808
809         TRACE_EXIT_RES(res);
810         return res;
811
812 out_free_del:
813         list_del(&dev->dev_list_entry);
814         scst_free_device(dev);
815         goto out_up;
816 }
817 EXPORT_SYMBOL(scst_register_virtual_device);
818
819 void scst_unregister_virtual_device(int id)
820 {
821         struct scst_device *d, *dev = NULL;
822         struct scst_acg_dev *acg_dev, *aa;
823
824         TRACE_ENTRY();
825
826         scst_suspend_activity(false);
827         mutex_lock(&scst_mutex);
828
829         list_for_each_entry(d, &scst_dev_list, dev_list_entry) {
830                 if (d->virt_id == id) {
831                         dev = d;
832                         TRACE_DBG("Target device %p (id %d) found", dev, id);
833                         break;
834                 }
835         }
836         if (dev == NULL) {
837                 PRINT_ERROR("Target virtual device (id %d) not found", id);
838                 goto out_unblock;
839         }
840
841         list_del(&dev->dev_list_entry);
842
843         list_for_each_entry_safe(acg_dev, aa, &dev->dev_acg_dev_list,
844                                  dev_acg_dev_list_entry)
845         {
846                 scst_acg_remove_dev(acg_dev->acg, dev);
847         }
848
849         scst_assign_dev_handler(dev, &scst_null_devtype);
850
851         PRINT_INFO("Detached SCSI target mid-level from virtual device %s "
852                 "(id %d)", dev->virt_name, dev->virt_id);
853
854         scst_free_device(dev);
855
856 out_unblock:
857         mutex_unlock(&scst_mutex);
858         scst_resume_activity();
859
860         TRACE_EXIT();
861         return;
862 }
863 EXPORT_SYMBOL(scst_unregister_virtual_device);
864
865 int __scst_register_dev_driver(struct scst_dev_type *dev_type,
866         const char *version)
867 {
868         struct scst_dev_type *dt;
869         struct scst_device *dev;
870         int res;
871         int exist;
872
873         TRACE_ENTRY();
874
875         if (strcmp(version, SCST_INTERFACE_VERSION) != 0) {
876                 PRINT_ERROR("Incorrect version of dev handler %s",
877                         dev_type->name);
878                 res = -EINVAL;
879                 goto out_error;
880         }
881
882         res = scst_dev_handler_check(dev_type);
883         if (res != 0)
884                 goto out_error;
885
886 #if !defined(SCSI_EXEC_REQ_FIFO_DEFINED) && \
887     !defined(CONFIG_SCST_STRICT_SERIALIZING)
888         if (dev_type->exec == NULL) {
889                 PRINT_ERROR("Pass-through dev handlers (handler \"%s\") not "
890                         "supported. Consider applying on your kernel patch "
891                         "scst_exec_req_fifo-<kernel-version>.patch or define "
892                         "CONFIG_SCST_STRICT_SERIALIZING", dev_type->name);
893                 res = -EINVAL;
894                 goto out;
895         }
896 #endif
897
898         res = scst_suspend_activity(true);
899         if (res != 0)
900                 goto out_error;
901
902         if (mutex_lock_interruptible(&scst_mutex) != 0) {
903                 res = -EINTR;
904                 goto out_err_res;
905         }
906
907         exist = 0;
908         list_for_each_entry(dt, &scst_dev_type_list, dev_type_list_entry) {
909                 if (strcmp(dt->name, dev_type->name) == 0) {
910                         PRINT_ERROR("Device type handler \"%s\" already "
911                                 "exist", dt->name);
912                         exist = 1;
913                         break;
914                 }
915         }
916         if (exist)
917                 goto out_up;
918
919         res = scst_build_proc_dev_handler_dir_entries(dev_type);
920         if (res < 0)
921                 goto out_up;
922
923         list_add_tail(&dev_type->dev_type_list_entry, &scst_dev_type_list);
924
925         list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
926                 if (dev->scsi_dev == NULL || dev->handler != &scst_null_devtype)
927                         continue;
928                 if (dev->scsi_dev->type == dev_type->type)
929                         scst_assign_dev_handler(dev, dev_type);
930         }
931
932         mutex_unlock(&scst_mutex);
933         scst_resume_activity();
934
935         if (res == 0) {
936                 PRINT_INFO("Device handler \"%s\" for type %d registered "
937                         "successfully", dev_type->name, dev_type->type);
938         }
939
940 out:
941         TRACE_EXIT_RES(res);
942         return res;
943
944 out_up:
945         mutex_unlock(&scst_mutex);
946
947 out_err_res:
948         scst_resume_activity();
949
950 out_error:
951         PRINT_ERROR("Failed to register device handler \"%s\" for type %d",
952                 dev_type->name, dev_type->type);
953         goto out;
954 }
955 EXPORT_SYMBOL(__scst_register_dev_driver);
956
957 void scst_unregister_dev_driver(struct scst_dev_type *dev_type)
958 {
959         struct scst_device *dev;
960         struct scst_dev_type *dt;
961         int found = 0;
962
963         TRACE_ENTRY();
964
965         scst_suspend_activity(false);
966         mutex_lock(&scst_mutex);
967
968         list_for_each_entry(dt, &scst_dev_type_list, dev_type_list_entry) {
969                 if (strcmp(dt->name, dev_type->name) == 0) {
970                         found = 1;
971                         break;
972                 }
973         }
974         if (!found) {
975                 PRINT_ERROR("Dev handler \"%s\" isn't registered",
976                         dev_type->name);
977                 goto out_up;
978         }
979
980         list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
981                 if (dev->handler == dev_type) {
982                         scst_assign_dev_handler(dev, &scst_null_devtype);
983                         TRACE_DBG("Dev handler removed from device %p", dev);
984                 }
985         }
986
987         list_del(&dev_type->dev_type_list_entry);
988
989         mutex_unlock(&scst_mutex);
990         scst_resume_activity();
991
992         scst_cleanup_proc_dev_handler_dir_entries(dev_type);
993
994         PRINT_INFO("Device handler \"%s\" for type %d unloaded",
995                    dev_type->name, dev_type->type);
996
997 out:
998         TRACE_EXIT();
999         return;
1000
1001 out_up:
1002         mutex_unlock(&scst_mutex);
1003         scst_resume_activity();
1004         goto out;
1005 }
1006 EXPORT_SYMBOL(scst_unregister_dev_driver);
1007
1008 int __scst_register_virtual_dev_driver(struct scst_dev_type *dev_type,
1009         const char *version)
1010 {
1011         int res;
1012
1013         TRACE_ENTRY();
1014
1015         if (strcmp(version, SCST_INTERFACE_VERSION) != 0) {
1016                 PRINT_ERROR("Incorrect version of virtual dev handler %s",
1017                         dev_type->name);
1018                 res = -EINVAL;
1019                 goto out_err;
1020         }
1021
1022         res = scst_dev_handler_check(dev_type);
1023         if (res != 0)
1024                 goto out_err;
1025
1026         if (!dev_type->no_proc) {
1027                 res = scst_build_proc_dev_handler_dir_entries(dev_type);
1028                 if (res < 0)
1029                         goto out_err;
1030         }
1031
1032         if (dev_type->type != -1) {
1033                 PRINT_INFO("Virtual device handler %s for type %d "
1034                         "registered successfully", dev_type->name,
1035                         dev_type->type);
1036         } else {
1037                 PRINT_INFO("Virtual device handler \"%s\" registered "
1038                         "successfully", dev_type->name);
1039         }
1040
1041 out:
1042         TRACE_EXIT_RES(res);
1043         return res;
1044
1045 out_err:
1046         PRINT_ERROR("Failed to register virtual device handler \"%s\"",
1047                 dev_type->name);
1048         goto out;
1049 }
1050 EXPORT_SYMBOL(__scst_register_virtual_dev_driver);
1051
1052 void scst_unregister_virtual_dev_driver(struct scst_dev_type *dev_type)
1053 {
1054         TRACE_ENTRY();
1055
1056         if (!dev_type->no_proc)
1057                 scst_cleanup_proc_dev_handler_dir_entries(dev_type);
1058
1059         PRINT_INFO("Device handler \"%s\" unloaded", dev_type->name);
1060
1061         TRACE_EXIT();
1062         return;
1063 }
1064 EXPORT_SYMBOL(scst_unregister_virtual_dev_driver);
1065
1066 /* Called under scst_mutex */
1067 int scst_add_dev_threads(struct scst_device *dev, int num)
1068 {
1069         int i, res = 0;
1070         int n = 0;
1071         struct scst_cmd_thread_t *thr;
1072         char nm[12];
1073
1074         TRACE_ENTRY();
1075
1076         list_for_each_entry(thr, &dev->threads_list, thread_list_entry) {
1077                 n++;
1078         }
1079
1080         for (i = 0; i < num; i++) {
1081                 thr = kmalloc(sizeof(*thr), GFP_KERNEL);
1082                 if (!thr) {
1083                         res = -ENOMEM;
1084                         PRINT_ERROR("Failed to allocate thr %d", res);
1085                         goto out;
1086                 }
1087                 strncpy(nm, dev->handler->name, ARRAY_SIZE(nm)-1);
1088                 nm[ARRAY_SIZE(nm)-1] = '\0';
1089                 thr->cmd_thread = kthread_run(scst_cmd_thread,
1090                         &dev->cmd_lists, "%sd%d_%d", nm, dev->dev_num, n++);
1091                 if (IS_ERR(thr->cmd_thread)) {
1092                         res = PTR_ERR(thr->cmd_thread);
1093                         PRINT_ERROR("kthread_create() failed: %d", res);
1094                         kfree(thr);
1095                         goto out;
1096                 }
1097                 list_add(&thr->thread_list_entry, &dev->threads_list);
1098         }
1099
1100 out:
1101         TRACE_EXIT_RES(res);
1102         return res;
1103 }
1104
1105 /* Called under scst_mutex and suspended activity */
1106 static int scst_create_dev_threads(struct scst_device *dev)
1107 {
1108         int res = 0;
1109         int threads_num;
1110
1111         TRACE_ENTRY();
1112
1113         if (dev->handler->threads_num <= 0)
1114                 goto out;
1115
1116         threads_num = dev->handler->threads_num;
1117
1118         spin_lock_init(&dev->cmd_lists.cmd_list_lock);
1119         INIT_LIST_HEAD(&dev->cmd_lists.active_cmd_list);
1120         init_waitqueue_head(&dev->cmd_lists.cmd_list_waitQ);
1121
1122         res = scst_add_dev_threads(dev, threads_num);
1123         if (res != 0)
1124                 goto out;
1125
1126         mutex_lock(&scst_suspend_mutex);
1127         list_add_tail(&dev->cmd_lists.lists_list_entry,
1128                 &scst_cmd_lists_list);
1129         mutex_unlock(&scst_suspend_mutex);
1130
1131         dev->p_cmd_lists = &dev->cmd_lists;
1132
1133 out:
1134         TRACE_EXIT_RES(res);
1135         return res;
1136 }
1137
1138 /* Called under scst_mutex */
1139 void scst_del_dev_threads(struct scst_device *dev, int num)
1140 {
1141         struct scst_cmd_thread_t *ct, *tmp;
1142         int i = 0;
1143
1144         TRACE_ENTRY();
1145
1146         list_for_each_entry_safe(ct, tmp, &dev->threads_list,
1147                                 thread_list_entry) {
1148                 int rc = kthread_stop(ct->cmd_thread);
1149                 if (rc < 0)
1150                         TRACE_MGMT_DBG("kthread_stop() failed: %d", rc);
1151                 list_del(&ct->thread_list_entry);
1152                 kfree(ct);
1153                 if ((num > 0) && (++i >= num))
1154                         break;
1155         }
1156
1157         TRACE_EXIT();
1158         return;
1159 }
1160
1161 /* Called under scst_mutex and suspended activity */
1162 static void scst_stop_dev_threads(struct scst_device *dev)
1163 {
1164         TRACE_ENTRY();
1165
1166         if (list_empty(&dev->threads_list))
1167                 goto out;
1168
1169         scst_del_dev_threads(dev, -1);
1170
1171         if (dev->p_cmd_lists == &dev->cmd_lists) {
1172                 mutex_lock(&scst_suspend_mutex);
1173                 list_del(&dev->cmd_lists.lists_list_entry);
1174                 mutex_unlock(&scst_suspend_mutex);
1175         }
1176
1177 out:
1178         TRACE_EXIT();
1179         return;
1180 }
1181
1182 /* The activity supposed to be suspended and scst_mutex held */
1183 int scst_assign_dev_handler(struct scst_device *dev,
1184         struct scst_dev_type *handler)
1185 {
1186         int res = 0;
1187         struct scst_tgt_dev *tgt_dev;
1188         LIST_HEAD(attached_tgt_devs);
1189
1190         TRACE_ENTRY();
1191
1192         sBUG_ON(handler == NULL);
1193
1194         if (dev->handler == handler)
1195                 goto out;
1196
1197         if (dev->handler && dev->handler->detach_tgt) {
1198                 list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
1199                                 dev_tgt_dev_list_entry) {
1200                         TRACE_DBG("Calling dev handler's detach_tgt(%p)",
1201                                 tgt_dev);
1202                         dev->handler->detach_tgt(tgt_dev);
1203                         TRACE_DBG("%s", "Dev handler's detach_tgt() returned");
1204                 }
1205         }
1206
1207         if (dev->handler && dev->handler->detach) {
1208                 TRACE_DBG("%s", "Calling dev handler's detach()");
1209                 dev->handler->detach(dev);
1210                 TRACE_DBG("%s", "Old handler's detach() returned");
1211         }
1212
1213         scst_stop_dev_threads(dev);
1214
1215         dev->handler = handler;
1216
1217         if (handler) {
1218                 res = scst_create_dev_threads(dev);
1219                 if (res != 0)
1220                         goto out_null;
1221         }
1222
1223         if (handler && handler->attach) {
1224                 TRACE_DBG("Calling new dev handler's attach(%p)", dev);
1225                 res = handler->attach(dev);
1226                 TRACE_DBG("New dev handler's attach() returned %d", res);
1227                 if (res != 0) {
1228                         PRINT_ERROR("New device handler's %s attach() "
1229                                 "failed: %d", handler->name, res);
1230                 }
1231                 goto out_thr_null;
1232         }
1233
1234         if (handler && handler->attach_tgt) {
1235                 list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
1236                                 dev_tgt_dev_list_entry) {
1237                         TRACE_DBG("Calling dev handler's attach_tgt(%p)",
1238                                 tgt_dev);
1239                         res = handler->attach_tgt(tgt_dev);
1240                         TRACE_DBG("%s", "Dev handler's attach_tgt() returned");
1241                         if (res != 0) {
1242                                 PRINT_ERROR("Device handler's %s attach_tgt() "
1243                                     "failed: %d", handler->name, res);
1244                                 goto out_err_detach_tgt;
1245                         }
1246                         list_add_tail(&tgt_dev->extra_tgt_dev_list_entry,
1247                                 &attached_tgt_devs);
1248                 }
1249         }
1250
1251 out_thr_null:
1252         if (res != 0)
1253                 scst_stop_dev_threads(dev);
1254
1255 out_null:
1256         if (res != 0)
1257                 dev->handler = &scst_null_devtype;
1258
1259 out:
1260         TRACE_EXIT_RES(res);
1261         return res;
1262
1263 out_err_detach_tgt:
1264         if (handler && handler->detach_tgt) {
1265                 list_for_each_entry(tgt_dev, &attached_tgt_devs,
1266                                  extra_tgt_dev_list_entry)
1267                 {
1268                         TRACE_DBG("Calling handler's detach_tgt(%p)",
1269                                 tgt_dev);
1270                         handler->detach_tgt(tgt_dev);
1271                         TRACE_DBG("%s", "Handler's detach_tgt() returned");
1272                 }
1273         }
1274         if (handler && handler->detach) {
1275                 TRACE_DBG("%s", "Calling handler's detach()");
1276                 handler->detach(dev);
1277                 TRACE_DBG("%s", "Handler's detach() returned");
1278         }
1279         goto out_null;
1280 }
1281
1282 int scst_cmd_threads_count(void)
1283 {
1284         int i;
1285
1286         /*
1287          * Just to lower the race window, when user can get just changed value
1288          */
1289         mutex_lock(&scst_threads_info.cmd_threads_mutex);
1290         i = scst_threads_info.nr_cmd_threads;
1291         mutex_unlock(&scst_threads_info.cmd_threads_mutex);
1292         return i;
1293 }
1294
1295 static void scst_threads_info_init(void)
1296 {
1297         memset(&scst_threads_info, 0, sizeof(scst_threads_info));
1298         mutex_init(&scst_threads_info.cmd_threads_mutex);
1299         INIT_LIST_HEAD(&scst_threads_info.cmd_threads_list);
1300 }
1301
1302 /* scst_threads_info.cmd_threads_mutex supposed to be held */
1303 void __scst_del_cmd_threads(int num)
1304 {
1305         struct scst_cmd_thread_t *ct, *tmp;
1306         int i;
1307
1308         TRACE_ENTRY();
1309
1310         i = scst_threads_info.nr_cmd_threads;
1311         if (num <= 0 || num > i) {
1312                 PRINT_ERROR("can not del %d cmd threads from %d", num, i);
1313                 return;
1314         }
1315
1316         list_for_each_entry_safe(ct, tmp, &scst_threads_info.cmd_threads_list,
1317                                 thread_list_entry) {
1318                 int res;
1319
1320                 res = kthread_stop(ct->cmd_thread);
1321                 if (res < 0)
1322                         TRACE_MGMT_DBG("kthread_stop() failed: %d", res);
1323                 list_del(&ct->thread_list_entry);
1324                 kfree(ct);
1325                 scst_threads_info.nr_cmd_threads--;
1326                 --num;
1327                 if (num == 0)
1328                         break;
1329         }
1330
1331         TRACE_EXIT();
1332         return;
1333 }
1334
1335 /* scst_threads_info.cmd_threads_mutex supposed to be held */
1336 int __scst_add_cmd_threads(int num)
1337 {
1338         int res = 0, i;
1339         static int scst_thread_num;
1340
1341         TRACE_ENTRY();
1342
1343         for (i = 0; i < num; i++) {
1344                 struct scst_cmd_thread_t *thr;
1345
1346                 thr = kmalloc(sizeof(*thr), GFP_KERNEL);
1347                 if (!thr) {
1348                         res = -ENOMEM;
1349                         PRINT_ERROR("fail to allocate thr %d", res);
1350                         goto out_error;
1351                 }
1352                 thr->cmd_thread = kthread_run(scst_cmd_thread,
1353                         &scst_main_cmd_lists, "scsi_tgt%d",
1354                         scst_thread_num++);
1355                 if (IS_ERR(thr->cmd_thread)) {
1356                         res = PTR_ERR(thr->cmd_thread);
1357                         PRINT_ERROR("kthread_create() failed: %d", res);
1358                         kfree(thr);
1359                         goto out_error;
1360                 }
1361                 list_add(&thr->thread_list_entry,
1362                         &scst_threads_info.cmd_threads_list);
1363                 scst_threads_info.nr_cmd_threads++;
1364         }
1365         res = 0;
1366
1367 out:
1368         TRACE_EXIT_RES(res);
1369         return res;
1370
1371 out_error:
1372         if (i > 0)
1373                 __scst_del_cmd_threads(i - 1);
1374         goto out;
1375 }
1376
1377 int scst_add_cmd_threads(int num)
1378 {
1379         int res;
1380
1381         TRACE_ENTRY();
1382
1383         mutex_lock(&scst_threads_info.cmd_threads_mutex);
1384         res = __scst_add_cmd_threads(num);
1385         mutex_unlock(&scst_threads_info.cmd_threads_mutex);
1386
1387         TRACE_EXIT_RES(res);
1388         return res;
1389 }
1390 EXPORT_SYMBOL(scst_add_cmd_threads);
1391
1392 void scst_del_cmd_threads(int num)
1393 {
1394         TRACE_ENTRY();
1395
1396         mutex_lock(&scst_threads_info.cmd_threads_mutex);
1397         __scst_del_cmd_threads(num);
1398         mutex_unlock(&scst_threads_info.cmd_threads_mutex);
1399
1400         TRACE_EXIT();
1401         return;
1402 }
1403 EXPORT_SYMBOL(scst_del_cmd_threads);
1404
1405 static void scst_stop_all_threads(void)
1406 {
1407         TRACE_ENTRY();
1408
1409         mutex_lock(&scst_threads_info.cmd_threads_mutex);
1410         __scst_del_cmd_threads(scst_threads_info.nr_cmd_threads);
1411         if (scst_threads_info.mgmt_cmd_thread)
1412                 kthread_stop(scst_threads_info.mgmt_cmd_thread);
1413         if (scst_threads_info.mgmt_thread)
1414                 kthread_stop(scst_threads_info.mgmt_thread);
1415         if (scst_threads_info.init_cmd_thread)
1416                 kthread_stop(scst_threads_info.init_cmd_thread);
1417         mutex_unlock(&scst_threads_info.cmd_threads_mutex);
1418
1419         TRACE_EXIT();
1420         return;
1421 }
1422
1423 static int scst_start_all_threads(int num)
1424 {
1425         int res;
1426
1427         TRACE_ENTRY();
1428
1429         mutex_lock(&scst_threads_info.cmd_threads_mutex);
1430         res = __scst_add_cmd_threads(num);
1431         if (res < 0)
1432                 goto out;
1433
1434         scst_threads_info.init_cmd_thread = kthread_run(scst_init_cmd_thread,
1435                 NULL, "scsi_tgt_init");
1436         if (IS_ERR(scst_threads_info.init_cmd_thread)) {
1437                 res = PTR_ERR(scst_threads_info.init_cmd_thread);
1438                 PRINT_ERROR("kthread_create() for init cmd failed: %d", res);
1439                 scst_threads_info.init_cmd_thread = NULL;
1440                 goto out;
1441         }
1442
1443         scst_threads_info.mgmt_cmd_thread = kthread_run(scst_mgmt_cmd_thread,
1444                 NULL, "scsi_tgt_mc");
1445         if (IS_ERR(scst_threads_info.mgmt_cmd_thread)) {
1446                 res = PTR_ERR(scst_threads_info.mgmt_cmd_thread);
1447                 PRINT_ERROR("kthread_create() for mcmd failed: %d", res);
1448                 scst_threads_info.mgmt_cmd_thread = NULL;
1449                 goto out;
1450         }
1451
1452         scst_threads_info.mgmt_thread = kthread_run(scst_mgmt_thread,
1453                 NULL, "scsi_tgt_mgmt");
1454         if (IS_ERR(scst_threads_info.mgmt_thread)) {
1455                 res = PTR_ERR(scst_threads_info.mgmt_thread);
1456                 PRINT_ERROR("kthread_create() for mgmt failed: %d", res);
1457                 scst_threads_info.mgmt_thread = NULL;
1458                 goto out;
1459         }
1460
1461 out:
1462         mutex_unlock(&scst_threads_info.cmd_threads_mutex);
1463         TRACE_EXIT_RES(res);
1464         return res;
1465 }
1466
1467 void scst_get(void)
1468 {
1469         __scst_get(0);
1470 }
1471 EXPORT_SYMBOL(scst_get);
1472
1473 void scst_put(void)
1474 {
1475         __scst_put();
1476 }
1477 EXPORT_SYMBOL(scst_put);
1478
1479 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
1480 static int scst_add(struct class_device *cdev, struct class_interface *intf)
1481 #else
1482 static int scst_add(struct device *cdev, struct class_interface *intf)
1483 #endif
1484 {
1485         struct scsi_device *scsidp;
1486         int res = 0;
1487
1488         TRACE_ENTRY();
1489
1490 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
1491         scsidp = to_scsi_device(cdev->dev);
1492 #else
1493         scsidp = to_scsi_device(cdev->parent);
1494 #endif
1495
1496         if (strcmp(scsidp->host->hostt->name, SCST_LOCAL_NAME) != 0)
1497                 res = scst_register_device(scsidp);
1498
1499         TRACE_EXIT();
1500         return res;
1501 }
1502
1503 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
1504 static void scst_remove(struct class_device *cdev, struct class_interface *intf)
1505 #else
1506 static void scst_remove(struct device *cdev, struct class_interface *intf)
1507 #endif
1508 {
1509         struct scsi_device *scsidp;
1510
1511         TRACE_ENTRY();
1512
1513 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
1514         scsidp = to_scsi_device(cdev->dev);
1515 #else
1516         scsidp = to_scsi_device(cdev->parent);
1517 #endif
1518
1519         if (strcmp(scsidp->host->hostt->name, SCST_LOCAL_NAME) != 0)
1520                 scst_unregister_device(scsidp);
1521
1522         TRACE_EXIT();
1523         return;
1524 }
1525
1526 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
1527 static struct class_interface scst_interface = {
1528         .add = scst_add,
1529         .remove = scst_remove,
1530 };
1531 #else
1532 static struct class_interface scst_interface = {
1533         .add_dev = scst_add,
1534         .remove_dev = scst_remove,
1535 };
1536 #endif
1537
1538 static void __init scst_print_config(void)
1539 {
1540         char buf[128];
1541         int i, j;
1542
1543         i = snprintf(buf, sizeof(buf), "Enabled features: ");
1544         j = i;
1545
1546 #ifdef CONFIG_SCST_STRICT_SERIALIZING
1547         i += snprintf(&buf[i], sizeof(buf) - i, "Strict serializing");
1548 #endif
1549
1550 #ifdef CONFIG_SCST_EXTRACHECKS
1551         i += snprintf(&buf[i], sizeof(buf) - i, "%sEXTRACHECKS",
1552                 (j == i) ? "" : ", ");
1553 #endif
1554
1555 #ifdef CONFIG_SCST_TRACING
1556         i += snprintf(&buf[i], sizeof(buf) - i, "%sTRACING",
1557                 (j == i) ? "" : ", ");
1558 #endif
1559
1560 #ifdef CONFIG_SCST_DEBUG
1561         i += snprintf(&buf[i], sizeof(buf) - i, "%sDEBUG",
1562                 (j == i) ? "" : ", ");
1563 #endif
1564
1565 #ifdef CONFIG_SCST_DEBUG_TM
1566         i += snprintf(&buf[i], sizeof(buf) - i, "%sDEBUG_TM",
1567                 (j == i) ? "" : ", ");
1568 #endif
1569
1570 #ifdef CONFIG_SCST_DEBUG_RETRY
1571         i += snprintf(&buf[i], sizeof(buf) - i, "%sDEBUG_RETRY",
1572                 (j == i) ? "" : ", ");
1573 #endif
1574
1575 #ifdef CONFIG_SCST_DEBUG_OOM
1576         i += snprintf(&buf[i], sizeof(buf) - i, "%sDEBUG_OOM",
1577                 (j == i) ? "" : ", ");
1578 #endif
1579
1580 #ifdef CONFIG_SCST_DEBUG_SN
1581         i += snprintf(&buf[i], sizeof(buf) - i, "%sDEBUG_SN",
1582                 (j == i) ? "" : ", ");
1583 #endif
1584
1585 #ifdef CONFIG_SCST_USE_EXPECTED_VALUES
1586         i += snprintf(&buf[i], sizeof(buf) - i, "%sUSE_EXPECTED_VALUES",
1587                 (j == i) ? "" : ", ");
1588 #endif
1589
1590 #ifdef CONFIG_SCST_ALLOW_PASSTHROUGH_IO_SUBMIT_IN_SIRQ
1591         i += snprintf(&buf[i], sizeof(buf) - i,
1592                 "%sALLOW_PASSTHROUGH_IO_SUBMIT_IN_SIRQ",
1593                 (j == i) ? "" : ", ");
1594 #endif
1595
1596 #ifdef CONFIG_SCST_STRICT_SECURITY
1597         i += snprintf(&buf[i], sizeof(buf) - i, "%sSCST_STRICT_SECURITY",
1598                 (j == i) ? "" : ", ");
1599 #endif
1600
1601         if (j != i)
1602                 PRINT_INFO("%s", buf);
1603 }
1604
1605 static int __init init_scst(void)
1606 {
1607         int res = 0, i;
1608         int scst_num_cpus;
1609
1610         TRACE_ENTRY();
1611
1612 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
1613         {
1614                 struct scsi_request *req;
1615                 BUILD_BUG_ON(SCST_SENSE_BUFFERSIZE !=
1616                         sizeof(req->sr_sense_buffer));
1617         }
1618 #else
1619         {
1620                 struct scsi_sense_hdr *shdr;
1621                 BUILD_BUG_ON(SCST_SENSE_BUFFERSIZE < sizeof(*shdr));
1622         }
1623 #endif
1624         {
1625                 struct scst_tgt_dev *t;
1626                 struct scst_cmd *c;
1627                 BUILD_BUG_ON(sizeof(t->curr_sn) != sizeof(t->expected_sn));
1628                 BUILD_BUG_ON(sizeof(c->sn) != sizeof(t->expected_sn));
1629         }
1630
1631         BUILD_BUG_ON(SCST_DATA_UNKNOWN != DMA_BIDIRECTIONAL);
1632         BUILD_BUG_ON(SCST_DATA_WRITE != DMA_TO_DEVICE);
1633         BUILD_BUG_ON(SCST_DATA_READ != DMA_FROM_DEVICE);
1634         BUILD_BUG_ON(SCST_DATA_NONE != DMA_NONE);
1635
1636         mutex_init(&scst_mutex);
1637         INIT_LIST_HEAD(&scst_template_list);
1638         INIT_LIST_HEAD(&scst_dev_list);
1639         INIT_LIST_HEAD(&scst_dev_type_list);
1640         spin_lock_init(&scst_main_lock);
1641         INIT_LIST_HEAD(&scst_acg_list);
1642         spin_lock_init(&scst_init_lock);
1643         init_waitqueue_head(&scst_init_cmd_list_waitQ);
1644         INIT_LIST_HEAD(&scst_init_cmd_list);
1645 #if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
1646         scst_trace_flag = SCST_DEFAULT_LOG_FLAGS;
1647 #endif
1648         atomic_set(&scst_cmd_count, 0);
1649         spin_lock_init(&scst_mcmd_lock);
1650         INIT_LIST_HEAD(&scst_active_mgmt_cmd_list);
1651         INIT_LIST_HEAD(&scst_delayed_mgmt_cmd_list);
1652         init_waitqueue_head(&scst_mgmt_cmd_list_waitQ);
1653         init_waitqueue_head(&scst_mgmt_waitQ);
1654         spin_lock_init(&scst_mgmt_lock);
1655         INIT_LIST_HEAD(&scst_sess_init_list);
1656         INIT_LIST_HEAD(&scst_sess_shut_list);
1657         init_waitqueue_head(&scst_dev_cmd_waitQ);
1658         mutex_init(&scst_suspend_mutex);
1659         INIT_LIST_HEAD(&scst_cmd_lists_list);
1660         scst_virt_dev_last_id = 1;
1661         spin_lock_init(&scst_temp_UA_lock);
1662
1663         spin_lock_init(&scst_main_cmd_lists.cmd_list_lock);
1664         INIT_LIST_HEAD(&scst_main_cmd_lists.active_cmd_list);
1665         init_waitqueue_head(&scst_main_cmd_lists.cmd_list_waitQ);
1666         list_add_tail(&scst_main_cmd_lists.lists_list_entry,
1667                 &scst_cmd_lists_list);
1668
1669         scst_num_cpus = num_online_cpus();
1670
1671         /* ToDo: register_cpu_notifier() */
1672
1673         if (scst_threads == 0)
1674                 scst_threads = scst_num_cpus;
1675
1676         if (scst_threads < 1) {
1677                 PRINT_ERROR("%s", "scst_threads can not be less than 1");
1678                 scst_threads = scst_num_cpus;
1679         }
1680
1681         scst_threads_info_init();
1682
1683 #define INIT_CACHEP(p, s, o) do {                                       \
1684                 p = KMEM_CACHE(s, SCST_SLAB_FLAGS);                     \
1685                 TRACE_MEM("Slab create: %s at %p size %zd", #s, p,      \
1686                           sizeof(struct s));                            \
1687                 if (p == NULL) {                                        \
1688                         res = -ENOMEM;                                  \
1689                         goto o;                                         \
1690                 }                                                       \
1691         } while (0)
1692
1693         INIT_CACHEP(scst_mgmt_cachep, scst_mgmt_cmd, out);
1694         INIT_CACHEP(scst_mgmt_stub_cachep, scst_mgmt_cmd_stub,
1695                         out_destroy_mgmt_cache);
1696         INIT_CACHEP(scst_ua_cachep, scst_tgt_dev_UA,
1697                         out_destroy_mgmt_stub_cache);
1698         {
1699                 struct scst_sense { uint8_t s[SCST_SENSE_BUFFERSIZE]; };
1700                 INIT_CACHEP(scst_sense_cachep, scst_sense,
1701                             out_destroy_ua_cache);
1702         }
1703         INIT_CACHEP(scst_cmd_cachep, scst_cmd, out_destroy_sense_cache);
1704         INIT_CACHEP(scst_sess_cachep, scst_session, out_destroy_cmd_cache);
1705         INIT_CACHEP(scst_tgtd_cachep, scst_tgt_dev, out_destroy_sess_cache);
1706         INIT_CACHEP(scst_acgd_cachep, scst_acg_dev, out_destroy_tgt_cache);
1707
1708         scst_mgmt_mempool = mempool_create(64, mempool_alloc_slab,
1709                 mempool_free_slab, scst_mgmt_cachep);
1710         if (scst_mgmt_mempool == NULL) {
1711                 res = -ENOMEM;
1712                 goto out_destroy_acg_cache;
1713         }
1714
1715         scst_mgmt_stub_mempool = mempool_create(1024, mempool_alloc_slab,
1716                 mempool_free_slab, scst_mgmt_stub_cachep);
1717         if (scst_mgmt_stub_mempool == NULL) {
1718                 res = -ENOMEM;
1719                 goto out_destroy_mgmt_mempool;
1720         }
1721
1722         scst_ua_mempool = mempool_create(64, mempool_alloc_slab,
1723                 mempool_free_slab, scst_ua_cachep);
1724         if (scst_ua_mempool == NULL) {
1725                 res = -ENOMEM;
1726                 goto out_destroy_mgmt_stub_mempool;
1727         }
1728
1729         /*
1730          * Loosing sense may have fatal consequences, so let's have a big pool
1731          */
1732         scst_sense_mempool = mempool_create(128, mempool_alloc_slab,
1733                 mempool_free_slab, scst_sense_cachep);
1734         if (scst_sense_mempool == NULL) {
1735                 res = -ENOMEM;
1736                 goto out_destroy_ua_mempool;
1737         }
1738
1739         if (scst_max_cmd_mem == 0) {
1740                 struct sysinfo si;
1741                 si_meminfo(&si);
1742 #if BITS_PER_LONG == 32
1743                 scst_max_cmd_mem = min(
1744                         (((uint64_t)si.totalram << PAGE_SHIFT) >> 20) >> 2,
1745                         (uint64_t)1 << 30);
1746 #else
1747                 scst_max_cmd_mem = ((si.totalram << PAGE_SHIFT) >> 20) >> 2;
1748 #endif
1749         }
1750
1751         if (scst_max_dev_cmd_mem != 0) {
1752                 if (scst_max_dev_cmd_mem > scst_max_cmd_mem) {
1753                         PRINT_ERROR("scst_max_dev_cmd_mem (%d) > "
1754                                 "scst_max_cmd_mem (%d)",
1755                                 scst_max_dev_cmd_mem,
1756                                 scst_max_cmd_mem);
1757                         scst_max_dev_cmd_mem = scst_max_cmd_mem;
1758                 }
1759         } else
1760                 scst_max_dev_cmd_mem = scst_max_cmd_mem * 2 / 5;
1761
1762         res = scst_sgv_pools_init(
1763                 ((uint64_t)scst_max_cmd_mem << 10) >> (PAGE_SHIFT - 10), 0);
1764         if (res != 0)
1765                 goto out_destroy_sense_mempool;
1766
1767         scst_default_acg = scst_alloc_add_acg(SCST_DEFAULT_ACG_NAME);
1768         if (scst_default_acg == NULL) {
1769                 res = -ENOMEM;
1770                 goto out_destroy_sgv_pool;
1771         }
1772
1773         res = scsi_register_interface(&scst_interface);
1774         if (res != 0)
1775                 goto out_free_acg;
1776
1777         scst_scsi_op_list_init();
1778
1779         for (i = 0; i < (int)ARRAY_SIZE(scst_tasklets); i++) {
1780                 spin_lock_init(&scst_tasklets[i].tasklet_lock);
1781                 INIT_LIST_HEAD(&scst_tasklets[i].tasklet_cmd_list);
1782                 tasklet_init(&scst_tasklets[i].tasklet,
1783                              (void *)scst_cmd_tasklet,
1784                              (unsigned long)&scst_tasklets[i]);
1785         }
1786
1787         TRACE_DBG("%d CPUs found, starting %d threads", scst_num_cpus,
1788                 scst_threads);
1789
1790         res = scst_start_all_threads(scst_threads);
1791         if (res < 0)
1792                 goto out_thread_free;
1793
1794         res = scst_proc_init_module();
1795         if (res != 0)
1796                 goto out_thread_free;
1797
1798
1799         PRINT_INFO("SCST version %s loaded successfully (max mem for "
1800                 "commands %dMB, per device %dMB)", SCST_VERSION_STRING,
1801                 scst_max_cmd_mem, scst_max_dev_cmd_mem);
1802
1803         scst_print_config();
1804
1805 out:
1806         TRACE_EXIT_RES(res);
1807         return res;
1808
1809 out_thread_free:
1810         scst_stop_all_threads();
1811
1812         scsi_unregister_interface(&scst_interface);
1813
1814 out_free_acg:
1815         scst_destroy_acg(scst_default_acg);
1816
1817 out_destroy_sgv_pool:
1818         scst_sgv_pools_deinit();
1819
1820 out_destroy_sense_mempool:
1821         mempool_destroy(scst_sense_mempool);
1822
1823 out_destroy_ua_mempool:
1824         mempool_destroy(scst_ua_mempool);
1825
1826 out_destroy_mgmt_stub_mempool:
1827         mempool_destroy(scst_mgmt_stub_mempool);
1828
1829 out_destroy_mgmt_mempool:
1830         mempool_destroy(scst_mgmt_mempool);
1831
1832 out_destroy_acg_cache:
1833         kmem_cache_destroy(scst_acgd_cachep);
1834
1835 out_destroy_tgt_cache:
1836         kmem_cache_destroy(scst_tgtd_cachep);
1837
1838 out_destroy_sess_cache:
1839         kmem_cache_destroy(scst_sess_cachep);
1840
1841 out_destroy_cmd_cache:
1842         kmem_cache_destroy(scst_cmd_cachep);
1843
1844 out_destroy_sense_cache:
1845         kmem_cache_destroy(scst_sense_cachep);
1846
1847 out_destroy_ua_cache:
1848         kmem_cache_destroy(scst_ua_cachep);
1849
1850 out_destroy_mgmt_stub_cache:
1851         kmem_cache_destroy(scst_mgmt_stub_cachep);
1852
1853 out_destroy_mgmt_cache:
1854         kmem_cache_destroy(scst_mgmt_cachep);
1855         goto out;
1856 }
1857
1858 static void __exit exit_scst(void)
1859 {
1860         TRACE_ENTRY();
1861
1862         /* ToDo: unregister_cpu_notifier() */
1863
1864         scst_proc_cleanup_module();
1865
1866         scst_stop_all_threads();
1867
1868         scsi_unregister_interface(&scst_interface);
1869         scst_destroy_acg(scst_default_acg);
1870
1871         scst_sgv_pools_deinit();
1872
1873 #define DEINIT_CACHEP(p) do {           \
1874                 kmem_cache_destroy(p);  \
1875                 p = NULL;               \
1876         } while (0)
1877
1878         mempool_destroy(scst_mgmt_mempool);
1879         mempool_destroy(scst_mgmt_stub_mempool);
1880         mempool_destroy(scst_ua_mempool);
1881         mempool_destroy(scst_sense_mempool);
1882
1883         DEINIT_CACHEP(scst_mgmt_cachep);
1884         DEINIT_CACHEP(scst_mgmt_stub_cachep);
1885         DEINIT_CACHEP(scst_ua_cachep);
1886         DEINIT_CACHEP(scst_sense_cachep);
1887         DEINIT_CACHEP(scst_cmd_cachep);
1888         DEINIT_CACHEP(scst_sess_cachep);
1889         DEINIT_CACHEP(scst_tgtd_cachep);
1890         DEINIT_CACHEP(scst_acgd_cachep);
1891
1892         PRINT_INFO("%s", "SCST unloaded");
1893
1894         TRACE_EXIT();
1895         return;
1896 }
1897
1898
1899 module_init(init_scst);
1900 module_exit(exit_scst);
1901
1902 MODULE_AUTHOR("Vladislav Bolkhovitin");
1903 MODULE_LICENSE("GPL");
1904 MODULE_DESCRIPTION("SCSI target core");
1905 MODULE_VERSION(SCST_VERSION_STRING);