Patch from Bart Van Assche <bart.vanassche@gmail.com>:
[mirror/scst/.git] / scst / src / scst_main.c
1 /*
2  *  scst_main.c
3  *
4  *  Copyright (C) 2004-2007 Vladislav Bolkhovitin <vst@vlnb.net>
5  *                 and Leonid Stoljar
6  *
7  *  This program is free software; you can redistribute it and/or
8  *  modify it under the terms of the GNU General Public License
9  *  as published by the Free Software Foundation, version 2
10  *  of the License.
11  *
12  *  This program is distributed in the hope that it will be useful,
13  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
14  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15  *  GNU General Public License for more details.
16  */
17
18 #include <linux/module.h>
19
20 #include <linux/init.h>
21 #include <linux/kernel.h>
22 #include <linux/errno.h>
23 #include <linux/list.h>
24 #include <linux/spinlock.h>
25 #include <linux/slab.h>
26 #include <linux/sched.h>
27 #include <linux/unistd.h>
28 #include <linux/string.h>
29 #include <linux/kthread.h>
30
31 #include "scst.h"
32 #include "scst_priv.h"
33 #include "scst_mem.h"
34
35 #if defined(CONFIG_HIGHMEM4G) || defined(CONFIG_HIGHMEM64G)
36 #warning "HIGHMEM kernel configurations are fully supported, but not \
37         recommended for performance reasons. Consider change VMSPLIT \
38         option or use 64-bit configuration instead. See README file for \
39         details."
40 #endif
41
42 #ifdef SCST_HIGHMEM
43 #error "SCST_HIGHMEM configuration isn't supported and broken, because there \
44         is no real point to support it, at least it definitely doesn't worth \
45         the effort. Better use no-HIGHMEM kernel with VMSPLIT option \
46         or in 64-bit configuration instead. See README file for details."
47 #endif
48
49 #if !defined(SCSI_EXEC_REQ_FIFO_DEFINED) && !defined(STRICT_SERIALIZING)
50 #warning "Patch scst_exec_req_fifo-<kernel-version>.patch was not applied on \
51         your kernel and STRICT_SERIALIZING isn't defined. Pass-through dev \
52         handlers will not be supported."
53 #endif
54
55 /**
56  ** SCST global variables. They are all uninitialized to have their layout in
57  ** memory be exactly as specified. Otherwise compiler puts zero-initialized
58  ** variable separately from nonzero-initialized ones.
59  **/
60
61 /*
62  * All targets, devices and dev_types management is done under this mutex.
63  *
64  * It must NOT be used in any works (schedule_work(), etc.), because
65  * otherwise a deadlock (double lock, actually) is possible, e.g., with
66  * scst_user detach_tgt(), which is called under scst_mutex and calls
67  * flush_scheduled_work().
68  */
69 struct mutex scst_mutex;
70
71 struct list_head scst_template_list;
72 struct list_head scst_dev_list;
73 struct list_head scst_dev_type_list;
74
75 spinlock_t scst_main_lock;
76
77 struct kmem_cache *scst_mgmt_cachep;
78 mempool_t *scst_mgmt_mempool;
79 struct kmem_cache *scst_mgmt_stub_cachep;
80 mempool_t *scst_mgmt_stub_mempool;
81 struct kmem_cache *scst_ua_cachep;
82 mempool_t *scst_ua_mempool;
83 struct kmem_cache *scst_sense_cachep;
84 mempool_t *scst_sense_mempool;
85 struct kmem_cache *scst_tgtd_cachep;
86 struct kmem_cache *scst_sess_cachep;
87 struct kmem_cache *scst_acgd_cachep;
88
89 struct list_head scst_acg_list;
90 struct scst_acg *scst_default_acg;
91
92 spinlock_t scst_init_lock;
93 wait_queue_head_t scst_init_cmd_list_waitQ;
94 struct list_head scst_init_cmd_list;
95 unsigned int scst_init_poll_cnt;
96
97 struct kmem_cache *scst_cmd_cachep;
98
99 #if defined(DEBUG) || defined(TRACING)
100 unsigned long scst_trace_flag;
101 #endif
102
103 unsigned long scst_flags;
104 atomic_t scst_cmd_count;
105
106 spinlock_t scst_cmd_mem_lock;
107 unsigned long scst_cur_cmd_mem, scst_cur_max_cmd_mem;
108 unsigned long scst_max_cmd_mem;
109
110 struct scst_cmd_lists scst_main_cmd_lists;
111
112 struct scst_tasklet scst_tasklets[NR_CPUS];
113
114 spinlock_t scst_mcmd_lock;
115 struct list_head scst_active_mgmt_cmd_list;
116 struct list_head scst_delayed_mgmt_cmd_list;
117 wait_queue_head_t scst_mgmt_cmd_list_waitQ;
118
119 wait_queue_head_t scst_mgmt_waitQ;
120 spinlock_t scst_mgmt_lock;
121 struct list_head scst_sess_init_list;
122 struct list_head scst_sess_shut_list;
123
124 wait_queue_head_t scst_dev_cmd_waitQ;
125
126 struct mutex scst_suspend_mutex;
127 struct list_head scst_cmd_lists_list;
128
129 static int scst_threads;
130 struct scst_threads_info_t scst_threads_info;
131
132 static int suspend_count;
133
134 static int scst_virt_dev_last_id; /* protected by scst_mutex */
135
136 /*
137  * This buffer and lock are intended to avoid memory allocation, which
138  * could fail in improper places.
139  */
140 spinlock_t scst_temp_UA_lock;
141 uint8_t scst_temp_UA[SCST_SENSE_BUFFERSIZE];
142
143 module_param_named(scst_threads, scst_threads, int, 0);
144 MODULE_PARM_DESC(scst_threads, "SCSI target threads count");
145
146 module_param_named(scst_max_cmd_mem, scst_max_cmd_mem, long, 0);
147 MODULE_PARM_DESC(scst_max_cmd_mem, "Maximum memory allowed to be consumed by "
148         "the SCST commands at any given time in MB");
149
150 struct scst_dev_type scst_null_devtype = {
151         .name = "none",
152 };
153
154 int __scst_register_target_template(struct scst_tgt_template *vtt,
155         const char *version)
156 {
157         int res = 0;
158         struct scst_tgt_template *t;
159         static DEFINE_MUTEX(m);
160
161         TRACE_ENTRY();
162
163         INIT_LIST_HEAD(&vtt->tgt_list);
164
165         if (strcmp(version, SCST_INTERFACE_VERSION) != 0) {
166                 PRINT_ERROR("Incorrect version of target %s", vtt->name);
167                 res = -EINVAL;
168                 goto out_err;
169         }
170
171         if (!vtt->detect) {
172                 PRINT_ERROR("Target driver %s doesn't have a "
173                         "detect() method.", vtt->name);
174                 res = -EINVAL;
175                 goto out_err;
176         }
177
178         if (!vtt->release) {
179                 PRINT_ERROR("Target driver %s doesn't have a "
180                         "release() method.", vtt->name);
181                 res = -EINVAL;
182                 goto out_err;
183         }
184
185         if (!vtt->xmit_response) {
186                 PRINT_ERROR("Target driver %s doesn't have a "
187                         "xmit_response() method.", vtt->name);
188                 res = -EINVAL;
189                 goto out_err;
190         }
191
192         if (vtt->threads_num < 0) {
193                 PRINT_ERROR("Wrong threads_num value %d for "
194                         "target \"%s\"", vtt->threads_num,
195                         vtt->name);
196                 res = -EINVAL;
197                 goto out_err;
198         }
199
200         if (!vtt->no_proc_entry) {
201                 res = scst_build_proc_target_dir_entries(vtt);
202                 if (res < 0)
203                         goto out_err;
204         }
205
206         if (mutex_lock_interruptible(&m) != 0)
207                 goto out_err;
208
209         if (mutex_lock_interruptible(&scst_mutex) != 0)
210                 goto out_m_up;
211         list_for_each_entry(t, &scst_template_list, scst_template_list_entry) {
212                 if (strcmp(t->name, vtt->name) == 0) {
213                         PRINT_ERROR("Target driver %s already registered",
214                                 vtt->name);
215                         mutex_unlock(&scst_mutex);
216                         goto out_cleanup;
217                 }
218         }
219         mutex_unlock(&scst_mutex);
220
221         TRACE_DBG("%s", "Calling target driver's detect()");
222         res = vtt->detect(vtt);
223         TRACE_DBG("Target driver's detect() returned %d", res);
224         if (res < 0) {
225                 PRINT_ERROR("%s", "The detect() routine failed");
226                 res = -EINVAL;
227                 goto out_cleanup;
228         }
229
230         mutex_lock(&scst_mutex);
231         list_add_tail(&vtt->scst_template_list_entry, &scst_template_list);
232         mutex_unlock(&scst_mutex);
233
234         res = 0;
235
236         PRINT_INFO("Target template %s registered successfully", vtt->name);
237
238         mutex_unlock(&m);
239
240 out:
241         TRACE_EXIT_RES(res);
242         return res;
243
244 out_cleanup:
245         scst_cleanup_proc_target_dir_entries(vtt);
246
247 out_m_up:
248         mutex_unlock(&m);
249
250 out_err:
251         PRINT_ERROR("Failed to register target template %s", vtt->name);
252         goto out;
253 }
254
255 void scst_unregister_target_template(struct scst_tgt_template *vtt)
256 {
257         struct scst_tgt *tgt;
258         struct scst_tgt_template *t;
259         int found = 0;
260
261         TRACE_ENTRY();
262
263         mutex_lock(&scst_mutex);
264
265         list_for_each_entry(t, &scst_template_list, scst_template_list_entry) {
266                 if (strcmp(t->name, vtt->name) == 0) {
267                         found = 1;
268                         break;
269                 }
270         }
271         if (!found) {
272                 PRINT_ERROR("Target driver %s isn't registered", vtt->name);
273                 goto out_up;
274         }
275
276 restart:
277         list_for_each_entry(tgt, &vtt->tgt_list, tgt_list_entry) {
278                 mutex_unlock(&scst_mutex);
279                 scst_unregister(tgt);
280                 mutex_lock(&scst_mutex);
281                 goto restart;
282         }
283         list_del(&vtt->scst_template_list_entry);
284
285         PRINT_INFO("Target template %s unregistered successfully", vtt->name);
286
287 out_up:
288         mutex_unlock(&scst_mutex);
289
290         scst_cleanup_proc_target_dir_entries(vtt);
291
292         TRACE_EXIT();
293         return;
294 }
295
296 struct scst_tgt *scst_register(struct scst_tgt_template *vtt,
297         const char *target_name)
298 {
299         struct scst_tgt *tgt;
300
301         TRACE_ENTRY();
302
303         tgt = kzalloc(sizeof(*tgt), GFP_KERNEL);
304         if (tgt == NULL) {
305                 TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of tgt failed");
306                 goto out_err;
307         }
308
309         INIT_LIST_HEAD(&tgt->sess_list);
310         init_waitqueue_head(&tgt->unreg_waitQ);
311         tgt->tgtt = vtt;
312         tgt->sg_tablesize = vtt->sg_tablesize;
313         spin_lock_init(&tgt->tgt_lock);
314         INIT_LIST_HEAD(&tgt->retry_cmd_list);
315         atomic_set(&tgt->finished_cmds, 0);
316         init_timer(&tgt->retry_timer);
317         tgt->retry_timer.data = (unsigned long)tgt;
318         tgt->retry_timer.function = scst_tgt_retry_timer_fn;
319
320         scst_suspend_activity();
321         mutex_lock(&scst_mutex);
322
323         if (target_name != NULL) {
324                 int len = strlen(target_name) + 1 +
325                         strlen(SCST_DEFAULT_ACG_NAME) + 1;
326
327                 tgt->default_group_name = kmalloc(len, GFP_KERNEL);
328                 if (tgt->default_group_name == NULL) {
329                         TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of default "
330                                 "group name failed");
331                         goto out_free_err;
332                 }
333                 sprintf(tgt->default_group_name, "%s_%s", SCST_DEFAULT_ACG_NAME,
334                         target_name);
335         }
336
337         if (scst_build_proc_target_entries(tgt) < 0)
338                 goto out_free_name;
339         else
340                 list_add_tail(&tgt->tgt_list_entry, &vtt->tgt_list);
341
342         mutex_unlock(&scst_mutex);
343         scst_resume_activity();
344
345         PRINT_INFO("Target %s (%p) for template %s registered successfully",
346                 target_name, tgt, vtt->name);
347
348 out:
349         TRACE_EXIT();
350         return tgt;
351
352 out_free_name:
353         if (tgt->default_group_name)
354                 kfree(tgt->default_group_name);
355
356 out_free_err:
357         mutex_unlock(&scst_mutex);
358         scst_resume_activity();
359
360         kfree(tgt);
361         tgt = NULL;
362
363 out_err:
364         PRINT_ERROR("Failed to register target %s for template %s",
365                 target_name, vtt->name);
366         goto out;
367 }
368
369 static inline int test_sess_list(struct scst_tgt *tgt)
370 {
371         int res;
372         mutex_lock(&scst_mutex);
373         res = list_empty(&tgt->sess_list);
374         mutex_unlock(&scst_mutex);
375         return res;
376 }
377
378 void scst_unregister(struct scst_tgt *tgt)
379 {
380         struct scst_session *sess;
381         struct scst_tgt_template *vtt = tgt->tgtt;
382
383         TRACE_ENTRY();
384
385         TRACE_DBG("%s", "Calling target driver's release()");
386         tgt->tgtt->release(tgt);
387         TRACE_DBG("%s", "Target driver's release() returned");
388
389         mutex_lock(&scst_mutex);
390         list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) {
391                 sBUG_ON(sess->shut_phase == SCST_SESS_SPH_READY);
392         }
393         mutex_unlock(&scst_mutex);
394
395         TRACE_DBG("%s", "Waiting for sessions shutdown");
396         wait_event(tgt->unreg_waitQ, test_sess_list(tgt));
397         TRACE_DBG("%s", "wait_event() returned");
398
399         scst_suspend_activity();
400         mutex_lock(&scst_mutex);
401
402         list_del(&tgt->tgt_list_entry);
403
404         scst_cleanup_proc_target_entries(tgt);
405
406         if (tgt->default_group_name)
407                 kfree(tgt->default_group_name);
408
409         mutex_unlock(&scst_mutex);
410         scst_resume_activity();
411
412         del_timer_sync(&tgt->retry_timer);
413
414         PRINT_INFO("Target %p for template %s unregistered successfully",
415                 tgt, vtt->name);
416
417         kfree(tgt);
418
419         TRACE_EXIT();
420         return;
421 }
422
423 void scst_suspend_activity(void)
424 {
425         TRACE_ENTRY();
426
427         mutex_lock(&scst_suspend_mutex);
428
429         TRACE_MGMT_DBG("suspend_count %d", suspend_count);
430         suspend_count++;
431         if (suspend_count > 1)
432                 goto out_up;
433
434         set_bit(SCST_FLAG_SUSPENDING, &scst_flags);
435         set_bit(SCST_FLAG_SUSPENDED, &scst_flags);
436         smp_mb__after_set_bit();
437
438         TRACE_MGMT_DBG("Waiting for %d active commands to complete",
439               atomic_read(&scst_cmd_count));
440         wait_event(scst_dev_cmd_waitQ, atomic_read(&scst_cmd_count) == 0);
441         TRACE_MGMT_DBG("%s", "wait_event() returned");
442
443         clear_bit(SCST_FLAG_SUSPENDING, &scst_flags);
444         smp_mb__after_clear_bit();
445
446         TRACE_MGMT_DBG("Waiting for %d active commands finally to complete",
447               atomic_read(&scst_cmd_count));
448         wait_event(scst_dev_cmd_waitQ, atomic_read(&scst_cmd_count) == 0);
449         TRACE_MGMT_DBG("%s", "wait_event() returned");
450
451 out_up:
452         mutex_unlock(&scst_suspend_mutex);
453
454         TRACE_EXIT();
455         return;
456 }
457
458 void scst_resume_activity(void)
459 {
460         struct scst_cmd_lists *l;
461
462         TRACE_ENTRY();
463
464         mutex_lock(&scst_suspend_mutex);
465
466         suspend_count--;
467         TRACE_MGMT_DBG("suspend_count %d left", suspend_count);
468         if (suspend_count > 0)
469                 goto out_up;
470
471         clear_bit(SCST_FLAG_SUSPENDED, &scst_flags);
472         smp_mb__after_clear_bit();
473
474         list_for_each_entry(l, &scst_cmd_lists_list, lists_list_entry) {
475                 wake_up_all(&l->cmd_list_waitQ);
476         }
477         wake_up_all(&scst_init_cmd_list_waitQ);
478
479         spin_lock_irq(&scst_mcmd_lock);
480         if (!list_empty(&scst_delayed_mgmt_cmd_list)) {
481                 struct scst_mgmt_cmd *m;
482                 m = list_entry(scst_delayed_mgmt_cmd_list.next, typeof(*m),
483                                 mgmt_cmd_list_entry);
484                 TRACE_MGMT_DBG("Moving delayed mgmt cmd %p to head of active "
485                         "mgmt cmd list", m);
486                 list_move(&m->mgmt_cmd_list_entry, &scst_active_mgmt_cmd_list);
487         }
488         spin_unlock_irq(&scst_mcmd_lock);
489         wake_up_all(&scst_mgmt_cmd_list_waitQ);
490
491 out_up:
492         mutex_unlock(&scst_suspend_mutex);
493
494         TRACE_EXIT();
495         return;
496 }
497
498 static int scst_register_device(struct scsi_device *scsidp)
499 {
500         int res = 0;
501         struct scst_device *dev;
502         struct scst_dev_type *dt;
503
504         TRACE_ENTRY();
505
506         scst_suspend_activity();
507         mutex_lock(&scst_mutex);
508
509         res = scst_alloc_device(GFP_KERNEL, &dev);
510         if (res != 0)
511                 goto out_up;
512
513         dev->type = scsidp->type;
514
515         dev->rq_disk = alloc_disk(1);
516         if (dev->rq_disk == NULL) {
517                 res = -ENOMEM;
518                 goto out_free_dev;
519         }
520         dev->rq_disk->major = SCST_MAJOR;
521
522         dev->scsi_dev = scsidp;
523
524         list_add_tail(&dev->dev_list_entry, &scst_dev_list);
525
526         list_for_each_entry(dt, &scst_dev_type_list, dev_type_list_entry) {
527                 if (dt->type == scsidp->type) {
528                         res = scst_assign_dev_handler(dev, dt);
529                         if (res != 0)
530                                 goto out_free;
531                         break;
532                 }
533         }
534
535 out_up:
536         mutex_unlock(&scst_mutex);
537         scst_resume_activity();
538
539         if (res == 0) {
540                 PRINT_INFO("Attached SCSI target mid-level at "
541                     "scsi%d, channel %d, id %d, lun %d, type %d",
542                     scsidp->host->host_no, scsidp->channel, scsidp->id,
543                     scsidp->lun, scsidp->type);
544         } else {
545                 PRINT_ERROR("Failed to attach SCSI target mid-level "
546                     "at scsi%d, channel %d, id %d, lun %d, type %d",
547                     scsidp->host->host_no, scsidp->channel, scsidp->id,
548                     scsidp->lun, scsidp->type);
549         }
550
551         TRACE_EXIT_RES(res);
552         return res;
553
554 out_free:
555         list_del(&dev->dev_list_entry);
556         put_disk(dev->rq_disk);
557
558 out_free_dev:
559         scst_free_device(dev);
560         goto out_up;
561 }
562
563 static void scst_unregister_device(struct scsi_device *scsidp)
564 {
565         struct scst_device *d, *dev = NULL;
566         struct scst_acg_dev *acg_dev, *aa;
567
568         TRACE_ENTRY();
569
570         scst_suspend_activity();
571         mutex_lock(&scst_mutex);
572
573         list_for_each_entry(d, &scst_dev_list, dev_list_entry) {
574                 if (d->scsi_dev == scsidp) {
575                         dev = d;
576                         TRACE_DBG("Target device %p found", dev);
577                         break;
578                 }
579         }
580         if (dev == NULL) {
581                 PRINT_ERROR("%s", "Target device not found");
582                 goto out_unblock;
583         }
584
585         list_del(&dev->dev_list_entry);
586
587         list_for_each_entry_safe(acg_dev, aa, &dev->dev_acg_dev_list,
588                                  dev_acg_dev_list_entry)
589         {
590                 scst_acg_remove_dev(acg_dev->acg, dev);
591         }
592
593         scst_assign_dev_handler(dev, &scst_null_devtype);
594
595         put_disk(dev->rq_disk);
596         scst_free_device(dev);
597
598         PRINT_INFO("Detached SCSI target mid-level from scsi%d, channel %d, "
599                 "id %d, lun %d, type %d", scsidp->host->host_no,
600                 scsidp->channel, scsidp->id, scsidp->lun, scsidp->type);
601
602 out_unblock:
603         mutex_unlock(&scst_mutex);
604         scst_resume_activity();
605
606         TRACE_EXIT();
607         return;
608 }
609
610 static int scst_dev_handler_check(struct scst_dev_type *dev_handler)
611 {
612         int res = 0;
613
614         if (dev_handler->parse == NULL) {
615                 PRINT_ERROR("scst dev_type driver %s doesn't have a "
616                         "parse() method.", dev_handler->name);
617                 res = -EINVAL;
618                 goto out;
619         }
620
621         if (dev_handler->exec == NULL) {
622 #ifdef ALLOW_PASSTHROUGH_IO_SUBMIT_IN_SIRQ
623                 dev_handler->exec_atomic = 1;
624 #else
625                 dev_handler->exec_atomic = 0;
626 #endif
627         }
628
629         if (dev_handler->dev_done == NULL)
630                 dev_handler->dev_done_atomic = 1;
631
632 out:
633         TRACE_EXIT_RES(res);
634         return res;
635 }
636
637 int scst_register_virtual_device(struct scst_dev_type *dev_handler,
638         const char *dev_name)
639 {
640         int res, rc;
641         struct scst_device *dev = NULL;
642
643         TRACE_ENTRY();
644
645         if (dev_handler == NULL) {
646                 PRINT_ERROR("%s: valid device handler must be supplied",
647                             __func__);
648                 res = -EINVAL;
649                 goto out;
650         }
651
652         if (dev_name == NULL) {
653                 PRINT_ERROR("%s: device name must be non-NULL", __func__);
654                 res = -EINVAL;
655                 goto out;
656         }
657
658         res = scst_dev_handler_check(dev_handler);
659         if (res != 0)
660                 goto out;
661
662         scst_suspend_activity();
663         if (mutex_lock_interruptible(&scst_mutex) != 0) {
664                 res = -EINTR;
665                 goto out_resume;
666         }
667
668         res = scst_alloc_device(GFP_KERNEL, &dev);
669         if (res != 0)
670                 goto out_up;
671
672         dev->type = dev_handler->type;
673         dev->scsi_dev = NULL;
674         dev->virt_name = dev_name;
675         dev->virt_id = scst_virt_dev_last_id++;
676
677         list_add_tail(&dev->dev_list_entry, &scst_dev_list);
678
679         res = dev->virt_id;
680
681         rc = scst_assign_dev_handler(dev, dev_handler);
682         if (rc != 0) {
683                 res = rc;
684                 goto out_free_del;
685         }
686
687 out_up:
688         mutex_unlock(&scst_mutex);
689
690 out_resume:
691         scst_resume_activity();
692
693 out:
694         if (res > 0) {
695                 PRINT_INFO("Attached SCSI target mid-level to virtual "
696                     "device %s (id %d)", dev_name, dev->virt_id);
697         } else {
698                 PRINT_INFO("Failed to attach SCSI target mid-level to "
699                     "virtual device %s", dev_name);
700         }
701
702         TRACE_EXIT_RES(res);
703         return res;
704
705 out_free_del:
706         list_del(&dev->dev_list_entry);
707         scst_free_device(dev);
708         goto out_up;
709 }
710
711 void scst_unregister_virtual_device(int id)
712 {
713         struct scst_device *d, *dev = NULL;
714         struct scst_acg_dev *acg_dev, *aa;
715
716         TRACE_ENTRY();
717
718         scst_suspend_activity();
719         mutex_lock(&scst_mutex);
720
721         list_for_each_entry(d, &scst_dev_list, dev_list_entry) {
722                 if (d->virt_id == id) {
723                         dev = d;
724                         TRACE_DBG("Target device %p (id %d) found", dev, id);
725                         break;
726                 }
727         }
728         if (dev == NULL) {
729                 PRINT_ERROR("Target virtual device (id %d) not found", id);
730                 goto out_unblock;
731         }
732
733         list_del(&dev->dev_list_entry);
734
735         list_for_each_entry_safe(acg_dev, aa, &dev->dev_acg_dev_list,
736                                  dev_acg_dev_list_entry)
737         {
738                 scst_acg_remove_dev(acg_dev->acg, dev);
739         }
740
741         scst_assign_dev_handler(dev, &scst_null_devtype);
742
743         PRINT_INFO("Detached SCSI target mid-level from virtual device %s "
744                 "(id %d)", dev->virt_name, dev->virt_id);
745
746         scst_free_device(dev);
747
748 out_unblock:
749         mutex_unlock(&scst_mutex);
750         scst_resume_activity();
751
752         TRACE_EXIT();
753         return;
754 }
755
756 int __scst_register_dev_driver(struct scst_dev_type *dev_type,
757         const char *version)
758 {
759         struct scst_dev_type *dt;
760         struct scst_device *dev;
761         int res;
762         int exist;
763
764         TRACE_ENTRY();
765
766         if (strcmp(version, SCST_INTERFACE_VERSION) != 0) {
767                 PRINT_ERROR("Incorrect version of dev handler %s",
768                         dev_type->name);
769                 res = -EINVAL;
770                 goto out_error;
771         }
772
773         res = scst_dev_handler_check(dev_type);
774         if (res != 0)
775                 goto out_error;
776
777 #if !defined(SCSI_EXEC_REQ_FIFO_DEFINED) && !defined(STRICT_SERIALIZING)
778         if (dev_type->exec == NULL) {
779                 PRINT_ERROR("Pass-through dev handlers (handler \"%s\") not "
780                         "supported. Consider applying on your kernel patch "
781                         "scst_exec_req_fifo-<kernel-version>.patch or define "
782                         "STRICT_SERIALIZING", dev_type->name);
783                 res = -EINVAL;
784                 goto out;
785         }
786 #endif
787
788         scst_suspend_activity();
789         if (mutex_lock_interruptible(&scst_mutex) != 0) {
790                 res = -EINTR;
791                 goto out_err_res;
792         }
793
794         exist = 0;
795         list_for_each_entry(dt, &scst_dev_type_list, dev_type_list_entry) {
796                 if (strcmp(dt->name, dev_type->name) == 0) {
797                         PRINT_ERROR("Device type handler \"%s\" already "
798                                 "exist", dt->name);
799                         exist = 1;
800                         break;
801                 }
802         }
803         if (exist)
804                 goto out_up;
805
806         res = scst_build_proc_dev_handler_dir_entries(dev_type);
807         if (res < 0)
808                 goto out_up;
809
810         list_add_tail(&dev_type->dev_type_list_entry, &scst_dev_type_list);
811
812         list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
813                 if ((dev->scsi_dev == NULL) || (dev->handler != &scst_null_devtype))
814                         continue;
815                 if (dev->scsi_dev->type == dev_type->type)
816                         scst_assign_dev_handler(dev, dev_type);
817         }
818
819         mutex_unlock(&scst_mutex);
820         scst_resume_activity();
821
822         if (res == 0) {
823                 PRINT_INFO("Device handler \"%s\" for type %d registered "
824                         "successfully", dev_type->name, dev_type->type);
825         }
826
827 out:
828         TRACE_EXIT_RES(res);
829         return res;
830
831 out_up:
832         mutex_unlock(&scst_mutex);
833
834 out_err_res:
835         scst_resume_activity();
836
837 out_error:
838         PRINT_ERROR("Failed to register device handler \"%s\" for type %d",
839                 dev_type->name, dev_type->type);
840         goto out;
841 }
842
843 void scst_unregister_dev_driver(struct scst_dev_type *dev_type)
844 {
845         struct scst_device *dev;
846         struct scst_dev_type *dt;
847         int found = 0;
848
849         TRACE_ENTRY();
850
851         scst_suspend_activity();
852         mutex_lock(&scst_mutex);
853
854         list_for_each_entry(dt, &scst_dev_type_list, dev_type_list_entry) {
855                 if (strcmp(dt->name, dev_type->name) == 0) {
856                         found = 1;
857                         break;
858                 }
859         }
860         if (!found) {
861                 PRINT_ERROR("Dev handler \"%s\" isn't registered",
862                         dev_type->name);
863                 goto out_up;
864         }
865
866         list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
867                 if (dev->handler == dev_type) {
868                         scst_assign_dev_handler(dev, &scst_null_devtype);
869                         TRACE_DBG("Dev handler removed from device %p", dev);
870                 }
871         }
872
873         list_del(&dev_type->dev_type_list_entry);
874
875         mutex_unlock(&scst_mutex);
876         scst_resume_activity();
877
878         scst_cleanup_proc_dev_handler_dir_entries(dev_type);
879
880         PRINT_INFO("Device handler \"%s\" for type %d unloaded",
881                    dev_type->name, dev_type->type);
882
883 out:
884         TRACE_EXIT();
885         return;
886
887 out_up:
888         mutex_unlock(&scst_mutex);
889         scst_resume_activity();
890         goto out;
891 }
892
893 int __scst_register_virtual_dev_driver(struct scst_dev_type *dev_type,
894         const char *version)
895 {
896         int res;
897
898         TRACE_ENTRY();
899
900         if (strcmp(version, SCST_INTERFACE_VERSION) != 0) {
901                 PRINT_ERROR("Incorrect version of virtual dev handler %s",
902                         dev_type->name);
903                 res = -EINVAL;
904                 goto out_err;
905         }
906
907         res = scst_dev_handler_check(dev_type);
908         if (res != 0)
909                 goto out_err;
910
911         if (!dev_type->no_proc) {
912                 res = scst_build_proc_dev_handler_dir_entries(dev_type);
913                 if (res < 0)
914                         goto out_err;
915         }
916
917         if (dev_type->type != -1) {
918                 PRINT_INFO("Virtual device handler %s for type %d "
919                         "registered successfully", dev_type->name,
920                         dev_type->type);
921         } else {
922                 PRINT_INFO("Virtual device handler \"%s\" registered "
923                         "successfully", dev_type->name);
924         }
925
926 out:
927         TRACE_EXIT_RES(res);
928         return res;
929
930 out_err:
931         PRINT_ERROR("Failed to register virtual device handler \"%s\"",
932                 dev_type->name);
933         goto out;
934 }
935
936 void scst_unregister_virtual_dev_driver(struct scst_dev_type *dev_type)
937 {
938         TRACE_ENTRY();
939
940         if (!dev_type->no_proc)
941                 scst_cleanup_proc_dev_handler_dir_entries(dev_type);
942
943         PRINT_INFO("Device handler \"%s\" unloaded", dev_type->name);
944
945         TRACE_EXIT();
946         return;
947 }
948
949 /* Called under scst_mutex */
950 int scst_add_dev_threads(struct scst_device *dev, int num)
951 {
952         int i, res = 0;
953         int n = 0;
954         struct scst_cmd_thread_t *thr;
955         char nm[12];
956
957         TRACE_ENTRY();
958
959         list_for_each_entry(thr, &dev->threads_list, thread_list_entry) {
960                 n++;
961         }
962
963         for (i = 0; i < num; i++) {
964                 thr = kmalloc(sizeof(*thr), GFP_KERNEL);
965                 if (!thr) {
966                         res = -ENOMEM;
967                         PRINT_ERROR("Failed to allocate thr %d", res);
968                         goto out;
969                 }
970                 strncpy(nm, dev->handler->name, ARRAY_SIZE(nm)-1);
971                 nm[ARRAY_SIZE(nm)-1] = '\0';
972                 thr->cmd_thread = kthread_run(scst_cmd_thread,
973                         &dev->cmd_lists, "%sd%d_%d", nm, dev->dev_num, n++);
974                 if (IS_ERR(thr->cmd_thread)) {
975                         res = PTR_ERR(thr->cmd_thread);
976                         PRINT_ERROR("kthread_create() failed: %d", res);
977                         kfree(thr);
978                         goto out;
979                 }
980                 list_add(&thr->thread_list_entry, &dev->threads_list);
981         }
982
983 out:
984         TRACE_EXIT_RES(res);
985         return res;
986 }
987
988 /* Called under scst_mutex and suspended activity */
989 static int scst_create_dev_threads(struct scst_device *dev)
990 {
991         int res = 0;
992         int threads_num;
993
994         TRACE_ENTRY();
995
996         if (dev->handler->threads_num <= 0)
997                 goto out;
998
999         threads_num = dev->handler->threads_num;
1000
1001         spin_lock_init(&dev->cmd_lists.cmd_list_lock);
1002         INIT_LIST_HEAD(&dev->cmd_lists.active_cmd_list);
1003         init_waitqueue_head(&dev->cmd_lists.cmd_list_waitQ);
1004
1005         res = scst_add_dev_threads(dev, threads_num);
1006         if (res != 0)
1007                 goto out;
1008
1009         mutex_lock(&scst_suspend_mutex);
1010         list_add_tail(&dev->cmd_lists.lists_list_entry,
1011                 &scst_cmd_lists_list);
1012         mutex_unlock(&scst_suspend_mutex);
1013
1014         dev->p_cmd_lists = &dev->cmd_lists;
1015
1016 out:
1017         TRACE_EXIT_RES(res);
1018         return res;
1019 }
1020
1021 /* Called under scst_mutex */
1022 void scst_del_dev_threads(struct scst_device *dev, int num)
1023 {
1024         struct scst_cmd_thread_t *ct, *tmp;
1025         int i = 0;
1026
1027         TRACE_ENTRY();
1028
1029         list_for_each_entry_safe(ct, tmp, &dev->threads_list,
1030                                 thread_list_entry) {
1031                 int rc = kthread_stop(ct->cmd_thread);
1032                 if (rc < 0)
1033                         TRACE_MGMT_DBG("kthread_stop() failed: %d", rc);
1034                 list_del(&ct->thread_list_entry);
1035                 kfree(ct);
1036                 if ((num > 0) && (++i >= num))
1037                         break;
1038         }
1039
1040         TRACE_EXIT();
1041         return;
1042 }
1043
1044 /* Called under scst_mutex and suspended activity */
1045 static void scst_stop_dev_threads(struct scst_device *dev)
1046 {
1047         TRACE_ENTRY();
1048
1049         if (list_empty(&dev->threads_list))
1050                 goto out;
1051
1052         scst_del_dev_threads(dev, -1);
1053
1054         if (dev->p_cmd_lists == &dev->cmd_lists) {
1055                 mutex_lock(&scst_suspend_mutex);
1056                 list_del(&dev->cmd_lists.lists_list_entry);
1057                 mutex_unlock(&scst_suspend_mutex);
1058         }
1059
1060 out:
1061         TRACE_EXIT();
1062         return;
1063 }
1064
1065 /* The activity supposed to be suspended and scst_mutex held */
1066 int scst_assign_dev_handler(struct scst_device *dev,
1067         struct scst_dev_type *handler)
1068 {
1069         int res = 0;
1070         struct scst_tgt_dev *tgt_dev;
1071         LIST_HEAD(attached_tgt_devs);
1072
1073         TRACE_ENTRY();
1074
1075         sBUG_ON(handler == NULL);
1076
1077         if (dev->handler == handler)
1078                 goto out;
1079
1080         if (dev->handler && dev->handler->detach_tgt) {
1081                 list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
1082                                 dev_tgt_dev_list_entry) {
1083                         TRACE_DBG("Calling dev handler's detach_tgt(%p)",
1084                                 tgt_dev);
1085                         dev->handler->detach_tgt(tgt_dev);
1086                         TRACE_DBG("%s", "Dev handler's detach_tgt() returned");
1087                 }
1088         }
1089
1090         if (dev->handler && dev->handler->detach) {
1091                 TRACE_DBG("%s", "Calling dev handler's detach()");
1092                 dev->handler->detach(dev);
1093                 TRACE_DBG("%s", "Old handler's detach() returned");
1094         }
1095
1096         scst_stop_dev_threads(dev);
1097
1098         dev->handler = handler;
1099
1100         if (handler) {
1101                 res = scst_create_dev_threads(dev);
1102                 if (res != 0)
1103                         goto out_null;
1104         }
1105
1106         if (handler && handler->attach) {
1107                 TRACE_DBG("Calling new dev handler's attach(%p)", dev);
1108                 res = handler->attach(dev);
1109                 TRACE_DBG("New dev handler's attach() returned %d", res);
1110                 if (res != 0) {
1111                         PRINT_ERROR("New device handler's %s attach() "
1112                                 "failed: %d", handler->name, res);
1113                 }
1114                 goto out_thr_null;
1115         }
1116
1117         if (handler && handler->attach_tgt) {
1118                 list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
1119                                 dev_tgt_dev_list_entry) {
1120                         TRACE_DBG("Calling dev handler's attach_tgt(%p)",
1121                                 tgt_dev);
1122                         res = handler->attach_tgt(tgt_dev);
1123                         TRACE_DBG("%s", "Dev handler's attach_tgt() returned");
1124                         if (res != 0) {
1125                                 PRINT_ERROR("Device handler's %s attach_tgt() "
1126                                     "failed: %d", handler->name, res);
1127                                 goto out_err_detach_tgt;
1128                         }
1129                         list_add_tail(&tgt_dev->extra_tgt_dev_list_entry,
1130                                 &attached_tgt_devs);
1131                 }
1132         }
1133
1134 out_thr_null:
1135         if (res != 0)
1136                 scst_stop_dev_threads(dev);
1137
1138 out_null:
1139         if (res != 0)
1140                 dev->handler = &scst_null_devtype;
1141
1142 out:
1143         TRACE_EXIT_RES(res);
1144         return res;
1145
1146 out_err_detach_tgt:
1147         if (handler && handler->detach_tgt) {
1148                 list_for_each_entry(tgt_dev, &attached_tgt_devs,
1149                                  extra_tgt_dev_list_entry)
1150                 {
1151                         TRACE_DBG("Calling handler's detach_tgt(%p)",
1152                                 tgt_dev);
1153                         handler->detach_tgt(tgt_dev);
1154                         TRACE_DBG("%s", "Handler's detach_tgt() returned");
1155                 }
1156         }
1157         if (handler && handler->detach) {
1158                 TRACE_DBG("%s", "Calling handler's detach()");
1159                 handler->detach(dev);
1160                 TRACE_DBG("%s", "Handler's detach() returned");
1161         }
1162         goto out_null;
1163 }
1164
1165 int scst_cmd_threads_count(void)
1166 {
1167         int i;
1168
1169         /* Just to lower the race window, when user can get just changed value */
1170         mutex_lock(&scst_threads_info.cmd_threads_mutex);
1171         i = scst_threads_info.nr_cmd_threads;
1172         mutex_unlock(&scst_threads_info.cmd_threads_mutex);
1173         return i;
1174 }
1175
1176 static void scst_threads_info_init(void)
1177 {
1178         memset(&scst_threads_info, 0, sizeof(scst_threads_info));
1179         mutex_init(&scst_threads_info.cmd_threads_mutex);
1180         INIT_LIST_HEAD(&scst_threads_info.cmd_threads_list);
1181 }
1182
1183 /* scst_threads_info.cmd_threads_mutex supposed to be held */
1184 void __scst_del_cmd_threads(int num)
1185 {
1186         struct scst_cmd_thread_t *ct, *tmp;
1187         int i;
1188
1189         TRACE_ENTRY();
1190
1191         i = scst_threads_info.nr_cmd_threads;
1192         if (num <= 0 || num > i) {
1193                 PRINT_ERROR("can not del %d cmd threads from %d", num, i);
1194                 return;
1195         }
1196
1197         list_for_each_entry_safe(ct, tmp, &scst_threads_info.cmd_threads_list,
1198                                 thread_list_entry) {
1199                 int res;
1200
1201                 res = kthread_stop(ct->cmd_thread);
1202                 if (res < 0)
1203                         TRACE_MGMT_DBG("kthread_stop() failed: %d", res);
1204                 list_del(&ct->thread_list_entry);
1205                 kfree(ct);
1206                 scst_threads_info.nr_cmd_threads--;
1207                 --num;
1208                 if (num == 0)
1209                         break;
1210         }
1211
1212         TRACE_EXIT();
1213         return;
1214 }
1215
1216 /* scst_threads_info.cmd_threads_mutex supposed to be held */
1217 int __scst_add_cmd_threads(int num)
1218 {
1219         int res = 0, i;
1220         static int scst_thread_num;
1221
1222         TRACE_ENTRY();
1223
1224         for (i = 0; i < num; i++) {
1225                 struct scst_cmd_thread_t *thr;
1226
1227                 thr = kmalloc(sizeof(*thr), GFP_KERNEL);
1228                 if (!thr) {
1229                         res = -ENOMEM;
1230                         PRINT_ERROR("fail to allocate thr %d", res);
1231                         goto out_error;
1232                 }
1233                 thr->cmd_thread = kthread_run(scst_cmd_thread,
1234                         &scst_main_cmd_lists, "scsi_tgt%d",
1235                         scst_thread_num++);
1236                 if (IS_ERR(thr->cmd_thread)) {
1237                         res = PTR_ERR(thr->cmd_thread);
1238                         PRINT_ERROR("kthread_create() failed: %d", res);
1239                         kfree(thr);
1240                         goto out_error;
1241                 }
1242                 list_add(&thr->thread_list_entry,
1243                         &scst_threads_info.cmd_threads_list);
1244                 scst_threads_info.nr_cmd_threads++;
1245         }
1246         res = 0;
1247
1248 out:
1249         TRACE_EXIT_RES(res);
1250         return res;
1251
1252 out_error:
1253         if (i > 0)
1254                 __scst_del_cmd_threads(i - 1);
1255         goto out;
1256 }
1257
1258 int scst_add_cmd_threads(int num)
1259 {
1260         int res;
1261
1262         TRACE_ENTRY();
1263
1264         mutex_lock(&scst_threads_info.cmd_threads_mutex);
1265         res = __scst_add_cmd_threads(num);
1266         mutex_unlock(&scst_threads_info.cmd_threads_mutex);
1267
1268         TRACE_EXIT_RES(res);
1269         return res;
1270 }
1271
1272 void scst_del_cmd_threads(int num)
1273 {
1274         TRACE_ENTRY();
1275
1276         mutex_lock(&scst_threads_info.cmd_threads_mutex);
1277         __scst_del_cmd_threads(num);
1278         mutex_unlock(&scst_threads_info.cmd_threads_mutex);
1279
1280         TRACE_EXIT();
1281         return;
1282 }
1283
1284 static void scst_stop_all_threads(void)
1285 {
1286         TRACE_ENTRY();
1287
1288         mutex_lock(&scst_threads_info.cmd_threads_mutex);
1289         __scst_del_cmd_threads(scst_threads_info.nr_cmd_threads);
1290         if (scst_threads_info.mgmt_cmd_thread)
1291                 kthread_stop(scst_threads_info.mgmt_cmd_thread);
1292         if (scst_threads_info.mgmt_thread)
1293                 kthread_stop(scst_threads_info.mgmt_thread);
1294         if (scst_threads_info.init_cmd_thread)
1295                 kthread_stop(scst_threads_info.init_cmd_thread);
1296         mutex_unlock(&scst_threads_info.cmd_threads_mutex);
1297
1298         TRACE_EXIT();
1299         return;
1300 }
1301
1302 static int scst_start_all_threads(int num)
1303 {
1304         int res;
1305
1306         TRACE_ENTRY();
1307
1308         mutex_lock(&scst_threads_info.cmd_threads_mutex);
1309         res = __scst_add_cmd_threads(num);
1310         if (res < 0)
1311                 goto out;
1312
1313         scst_threads_info.init_cmd_thread = kthread_run(scst_init_cmd_thread,
1314                 NULL, "scsi_tgt_init");
1315         if (IS_ERR(scst_threads_info.init_cmd_thread)) {
1316                 res = PTR_ERR(scst_threads_info.init_cmd_thread);
1317                 PRINT_ERROR("kthread_create() for init cmd failed: %d", res);
1318                 scst_threads_info.init_cmd_thread = NULL;
1319                 goto out;
1320         }
1321
1322         scst_threads_info.mgmt_cmd_thread = kthread_run(scst_mgmt_cmd_thread,
1323                 NULL, "scsi_tgt_mc");
1324         if (IS_ERR(scst_threads_info.mgmt_cmd_thread)) {
1325                 res = PTR_ERR(scst_threads_info.mgmt_cmd_thread);
1326                 PRINT_ERROR("kthread_create() for mcmd failed: %d", res);
1327                 scst_threads_info.mgmt_cmd_thread = NULL;
1328                 goto out;
1329         }
1330
1331         scst_threads_info.mgmt_thread = kthread_run(scst_mgmt_thread,
1332                 NULL, "scsi_tgt_mgmt");
1333         if (IS_ERR(scst_threads_info.mgmt_thread)) {
1334                 res = PTR_ERR(scst_threads_info.mgmt_thread);
1335                 PRINT_ERROR("kthread_create() for mgmt failed: %d", res);
1336                 scst_threads_info.mgmt_thread = NULL;
1337                 goto out;
1338         }
1339
1340 out:
1341         mutex_unlock(&scst_threads_info.cmd_threads_mutex);
1342         TRACE_EXIT_RES(res);
1343         return res;
1344 }
1345
1346 void scst_get(void)
1347 {
1348         __scst_get(0);
1349 }
1350
1351 void scst_put(void)
1352 {
1353         __scst_put();
1354 }
1355
1356 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 15)
1357 static int scst_add(struct class_device *cdev)
1358 #else
1359 static int scst_add(struct class_device *cdev, struct class_interface *intf)
1360 #endif
1361 {
1362         struct scsi_device *scsidp;
1363         int res = 0;
1364
1365         TRACE_ENTRY();
1366
1367         scsidp = to_scsi_device(cdev->dev);
1368         res = scst_register_device(scsidp);
1369
1370         TRACE_EXIT();
1371         return res;
1372 }
1373
1374 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 15)
1375 static void scst_remove(struct class_device *cdev)
1376 #else
1377 static void scst_remove(struct class_device *cdev, struct class_interface *intf)
1378 #endif
1379 {
1380         struct scsi_device *scsidp;
1381
1382         TRACE_ENTRY();
1383
1384         scsidp = to_scsi_device(cdev->dev);
1385         scst_unregister_device(scsidp);
1386
1387         TRACE_EXIT();
1388         return;
1389 }
1390
1391 static struct class_interface scst_interface = {
1392         .add = scst_add,
1393         .remove = scst_remove,
1394 };
1395
1396 static void __init scst_print_config(void)
1397 {
1398         char buf[128];
1399         int i, j;
1400
1401         i = snprintf(buf, sizeof(buf), "Enabled features: ");
1402         j = i;
1403
1404 #ifdef STRICT_SERIALIZING
1405         i += snprintf(&buf[i], sizeof(buf) - i, "Strict serializing");
1406 #endif
1407
1408 #ifdef EXTRACHECKS
1409         i += snprintf(&buf[i], sizeof(buf) - i, "%sEXTRACHECKS",
1410                 (j == i) ? "" : ", ");
1411 #endif
1412
1413 #ifdef TRACING
1414         i += snprintf(&buf[i], sizeof(buf) - i, "%sTRACING",
1415                 (j == i) ? "" : ", ");
1416 #endif
1417
1418 #ifdef DEBUG
1419         i += snprintf(&buf[i], sizeof(buf) - i, "%sDEBUG",
1420                 (j == i) ? "" : ", ");
1421 #endif
1422
1423 #ifdef DEBUG_TM
1424         i += snprintf(&buf[i], sizeof(buf) - i, "%sDEBUG_TM",
1425                 (j == i) ? "" : ", ");
1426 #endif
1427
1428 #ifdef DEBUG_RETRY
1429         i += snprintf(&buf[i], sizeof(buf) - i, "%sDEBUG_RETRY",
1430                 (j == i) ? "" : ", ");
1431 #endif
1432
1433 #ifdef DEBUG_OOM
1434         i += snprintf(&buf[i], sizeof(buf) - i, "%sDEBUG_OOM",
1435                 (j == i) ? "" : ", ");
1436 #endif
1437
1438 #ifdef DEBUG_SN
1439         i += snprintf(&buf[i], sizeof(buf) - i, "%sDEBUG_SN",
1440                 (j == i) ? "" : ", ");
1441 #endif
1442
1443 #ifdef USE_EXPECTED_VALUES
1444         i += snprintf(&buf[i], sizeof(buf) - i, "%sUSE_EXPECTED_VALUES",
1445                 (j == i) ? "" : ", ");
1446 #endif
1447
1448 #ifdef ALLOW_PASSTHROUGH_IO_SUBMIT_IN_SIRQ
1449         i += snprintf(&buf[i], sizeof(buf) - i, "%sALLOW_PASSTHROUGH_IO_SUBMIT_IN_SIRQ",
1450                 (j == i) ? "" : ", ");
1451 #endif
1452
1453 #ifdef SCST_STRICT_SECURITY
1454         i += snprintf(&buf[i], sizeof(buf) - i, "%sSCST_STRICT_SECURITY",
1455                 (j == i) ? "" : ", ");
1456 #endif
1457
1458 #ifdef SCST_HIGHMEM
1459         i += snprintf(&buf[i], sizeof(buf) - i, "%sSCST_HIGHMEM",
1460                 (j == i) ? "" : ", ");
1461 #endif
1462
1463         if (j != i)
1464                 PRINT_INFO("%s", buf);
1465 }
1466
1467 static int __init init_scst(void)
1468 {
1469         int res = 0, i;
1470         int scst_num_cpus;
1471
1472         TRACE_ENTRY();
1473
1474 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
1475         {
1476                 struct scsi_request *req;
1477                 BUILD_BUG_ON(SCST_SENSE_BUFFERSIZE !=
1478                         sizeof(req->sr_sense_buffer));
1479         }
1480 #else
1481         {
1482                 struct scsi_sense_hdr *shdr;
1483                 BUILD_BUG_ON(SCST_SENSE_BUFFERSIZE < sizeof(*shdr));
1484         }
1485 #endif
1486         {
1487                 struct scst_tgt_dev *t;
1488                 struct scst_cmd *c;
1489                 BUILD_BUG_ON(sizeof(t->curr_sn) != sizeof(t->expected_sn));
1490                 BUILD_BUG_ON(sizeof(c->sn) != sizeof(t->expected_sn));
1491         }
1492
1493         BUILD_BUG_ON(SCST_DATA_UNKNOWN != DMA_BIDIRECTIONAL);
1494         BUILD_BUG_ON(SCST_DATA_WRITE != DMA_TO_DEVICE);
1495         BUILD_BUG_ON(SCST_DATA_READ != DMA_FROM_DEVICE);
1496         BUILD_BUG_ON(SCST_DATA_NONE != DMA_NONE);
1497
1498         mutex_init(&scst_mutex);
1499         INIT_LIST_HEAD(&scst_template_list);
1500         INIT_LIST_HEAD(&scst_dev_list);
1501         INIT_LIST_HEAD(&scst_dev_type_list);
1502         spin_lock_init(&scst_main_lock);
1503         INIT_LIST_HEAD(&scst_acg_list);
1504         spin_lock_init(&scst_init_lock);
1505         init_waitqueue_head(&scst_init_cmd_list_waitQ);
1506         INIT_LIST_HEAD(&scst_init_cmd_list);
1507 #if defined(DEBUG) || defined(TRACING)
1508         scst_trace_flag = SCST_DEFAULT_LOG_FLAGS;
1509 #endif
1510         atomic_set(&scst_cmd_count, 0);
1511         spin_lock_init(&scst_cmd_mem_lock);
1512         spin_lock_init(&scst_mcmd_lock);
1513         INIT_LIST_HEAD(&scst_active_mgmt_cmd_list);
1514         INIT_LIST_HEAD(&scst_delayed_mgmt_cmd_list);
1515         init_waitqueue_head(&scst_mgmt_cmd_list_waitQ);
1516         init_waitqueue_head(&scst_mgmt_waitQ);
1517         spin_lock_init(&scst_mgmt_lock);
1518         INIT_LIST_HEAD(&scst_sess_init_list);
1519         INIT_LIST_HEAD(&scst_sess_shut_list);
1520         init_waitqueue_head(&scst_dev_cmd_waitQ);
1521         mutex_init(&scst_suspend_mutex);
1522         INIT_LIST_HEAD(&scst_cmd_lists_list);
1523         scst_virt_dev_last_id = 1;
1524         spin_lock_init(&scst_temp_UA_lock);
1525
1526         spin_lock_init(&scst_main_cmd_lists.cmd_list_lock);
1527         INIT_LIST_HEAD(&scst_main_cmd_lists.active_cmd_list);
1528         init_waitqueue_head(&scst_main_cmd_lists.cmd_list_waitQ);
1529         list_add_tail(&scst_main_cmd_lists.lists_list_entry,
1530                 &scst_cmd_lists_list);
1531
1532         scst_num_cpus = num_online_cpus();
1533
1534         /* ToDo: register_cpu_notifier() */
1535
1536         if (scst_threads == 0)
1537                 scst_threads = scst_num_cpus;
1538
1539         if (scst_threads < 1) {
1540                 PRINT_ERROR("%s", "scst_threads can not be less than 1");
1541                 scst_threads = scst_num_cpus;
1542         }
1543
1544         scst_threads_info_init();
1545
1546 #define INIT_CACHEP(p, s, o) do {                                       \
1547                 p = KMEM_CACHE(s, SCST_SLAB_FLAGS);                     \
1548                 TRACE_MEM("Slab create: %s at %p size %zd", #s, p,      \
1549                           sizeof(struct s));                            \
1550                 if (p == NULL) {                                        \
1551                         res = -ENOMEM;                                  \
1552                         goto o;                                         \
1553                 }                                                       \
1554         } while (0)
1555
1556         INIT_CACHEP(scst_mgmt_cachep, scst_mgmt_cmd, out);
1557         INIT_CACHEP(scst_mgmt_stub_cachep, scst_mgmt_cmd_stub,
1558                         out_destroy_mgmt_cache);
1559         INIT_CACHEP(scst_ua_cachep, scst_tgt_dev_UA,
1560                         out_destroy_mgmt_stub_cache);
1561         {
1562                 struct scst_sense { uint8_t s[SCST_SENSE_BUFFERSIZE]; };
1563                 INIT_CACHEP(scst_sense_cachep, scst_sense, out_destroy_ua_cache);
1564         }
1565         INIT_CACHEP(scst_cmd_cachep, scst_cmd, out_destroy_sense_cache);
1566         INIT_CACHEP(scst_sess_cachep, scst_session, out_destroy_cmd_cache);
1567         INIT_CACHEP(scst_tgtd_cachep, scst_tgt_dev, out_destroy_sess_cache);
1568         INIT_CACHEP(scst_acgd_cachep, scst_acg_dev, out_destroy_tgt_cache);
1569
1570         scst_mgmt_mempool = mempool_create(64, mempool_alloc_slab,
1571                 mempool_free_slab, scst_mgmt_cachep);
1572         if (scst_mgmt_mempool == NULL) {
1573                 res = -ENOMEM;
1574                 goto out_destroy_acg_cache;
1575         }
1576
1577         scst_mgmt_stub_mempool = mempool_create(1024, mempool_alloc_slab,
1578                 mempool_free_slab, scst_mgmt_stub_cachep);
1579         if (scst_mgmt_stub_mempool == NULL) {
1580                 res = -ENOMEM;
1581                 goto out_destroy_mgmt_mempool;
1582         }
1583
1584         scst_ua_mempool = mempool_create(64, mempool_alloc_slab,
1585                 mempool_free_slab, scst_ua_cachep);
1586         if (scst_ua_mempool == NULL) {
1587                 res = -ENOMEM;
1588                 goto out_destroy_mgmt_stub_mempool;
1589         }
1590
1591         /* Loosing sense may have fatal consequences, so let's have a big pool */
1592         scst_sense_mempool = mempool_create(128, mempool_alloc_slab,
1593                 mempool_free_slab, scst_sense_cachep);
1594         if (scst_sense_mempool == NULL) {
1595                 res = -ENOMEM;
1596                 goto out_destroy_ua_mempool;
1597         }
1598
1599         if (scst_max_cmd_mem == 0) {
1600                 struct sysinfo si;
1601                 si_meminfo(&si);
1602 #if BITS_PER_LONG == 32
1603                 scst_max_cmd_mem = min(((uint64_t)si.totalram << PAGE_SHIFT) >> 2,
1604                                         (uint64_t)1 << 30);
1605 #else
1606                 scst_max_cmd_mem = (si.totalram << PAGE_SHIFT) >> 2;
1607 #endif
1608         } else
1609                 scst_max_cmd_mem <<= 20;
1610
1611         res = scst_sgv_pools_init(scst_max_cmd_mem, 0);
1612         if (res != 0)
1613                 goto out_destroy_sense_mempool;
1614
1615         scst_default_acg = scst_alloc_add_acg(SCST_DEFAULT_ACG_NAME);
1616         if (scst_default_acg == NULL) {
1617                 res = -ENOMEM;
1618                 goto out_destroy_sgv_pool;
1619         }
1620
1621         res = scsi_register_interface(&scst_interface);
1622         if (res != 0)
1623                 goto out_free_acg;
1624
1625         scst_scsi_op_list_init();
1626
1627         for (i = 0; i < (int)ARRAY_SIZE(scst_tasklets); i++) {
1628                 spin_lock_init(&scst_tasklets[i].tasklet_lock);
1629                 INIT_LIST_HEAD(&scst_tasklets[i].tasklet_cmd_list);
1630                 tasklet_init(&scst_tasklets[i].tasklet, (void *)scst_cmd_tasklet,
1631                         (unsigned long)&scst_tasklets[i]);
1632         }
1633
1634         TRACE_DBG("%d CPUs found, starting %d threads", scst_num_cpus,
1635                 scst_threads);
1636
1637         res = scst_start_all_threads(scst_threads);
1638         if (res < 0)
1639                 goto out_thread_free;
1640
1641         res = scst_proc_init_module();
1642         if (res != 0)
1643                 goto out_thread_free;
1644
1645
1646         PRINT_INFO("SCST version %s loaded successfully (max mem for "
1647                 "commands %ld Mb)", SCST_VERSION_STRING, scst_max_cmd_mem >> 20);
1648
1649         scst_print_config();
1650
1651 out:
1652         TRACE_EXIT_RES(res);
1653         return res;
1654
1655 out_thread_free:
1656         scst_stop_all_threads();
1657
1658         scsi_unregister_interface(&scst_interface);
1659
1660 out_free_acg:
1661         scst_destroy_acg(scst_default_acg);
1662
1663 out_destroy_sgv_pool:
1664         scst_sgv_pools_deinit();
1665
1666 out_destroy_sense_mempool:
1667         mempool_destroy(scst_sense_mempool);
1668
1669 out_destroy_ua_mempool:
1670         mempool_destroy(scst_ua_mempool);
1671
1672 out_destroy_mgmt_stub_mempool:
1673         mempool_destroy(scst_mgmt_stub_mempool);
1674
1675 out_destroy_mgmt_mempool:
1676         mempool_destroy(scst_mgmt_mempool);
1677
1678 out_destroy_acg_cache:
1679         kmem_cache_destroy(scst_acgd_cachep);
1680
1681 out_destroy_tgt_cache:
1682         kmem_cache_destroy(scst_tgtd_cachep);
1683
1684 out_destroy_sess_cache:
1685         kmem_cache_destroy(scst_sess_cachep);
1686
1687 out_destroy_cmd_cache:
1688         kmem_cache_destroy(scst_cmd_cachep);
1689
1690 out_destroy_sense_cache:
1691         kmem_cache_destroy(scst_sense_cachep);
1692
1693 out_destroy_ua_cache:
1694         kmem_cache_destroy(scst_ua_cachep);
1695
1696 out_destroy_mgmt_stub_cache:
1697         kmem_cache_destroy(scst_mgmt_stub_cachep);
1698
1699 out_destroy_mgmt_cache:
1700         kmem_cache_destroy(scst_mgmt_cachep);
1701         goto out;
1702 }
1703
1704 static void __exit exit_scst(void)
1705 {
1706 #ifdef CONFIG_LOCKDEP
1707         static /* To hide the lockdep's warning about non-static key */
1708 #endif
1709         DECLARE_MUTEX_LOCKED(shm);
1710
1711         TRACE_ENTRY();
1712
1713         /* ToDo: unregister_cpu_notifier() */
1714
1715         scst_proc_cleanup_module();
1716
1717         scst_stop_all_threads();
1718
1719         scsi_unregister_interface(&scst_interface);
1720         scst_destroy_acg(scst_default_acg);
1721
1722         scst_sgv_pools_deinit();
1723
1724 #define DEINIT_CACHEP(p) do {           \
1725                 kmem_cache_destroy(p);  \
1726                 p = NULL;               \
1727         } while (0)
1728
1729         mempool_destroy(scst_mgmt_mempool);
1730         mempool_destroy(scst_mgmt_stub_mempool);
1731         mempool_destroy(scst_ua_mempool);
1732         mempool_destroy(scst_sense_mempool);
1733
1734         DEINIT_CACHEP(scst_mgmt_cachep);
1735         DEINIT_CACHEP(scst_mgmt_stub_cachep);
1736         DEINIT_CACHEP(scst_ua_cachep);
1737         DEINIT_CACHEP(scst_sense_cachep);
1738         DEINIT_CACHEP(scst_cmd_cachep);
1739         DEINIT_CACHEP(scst_sess_cachep);
1740         DEINIT_CACHEP(scst_tgtd_cachep);
1741         DEINIT_CACHEP(scst_acgd_cachep);
1742
1743         PRINT_INFO("%s", "SCST unloaded");
1744
1745         TRACE_EXIT();
1746         return;
1747 }
1748
1749 /*
1750  * Device Handler Side (i.e. scst_vdisk)
1751  */
1752 EXPORT_SYMBOL(__scst_register_dev_driver);
1753 EXPORT_SYMBOL(scst_unregister_dev_driver);
1754 EXPORT_SYMBOL(scst_register);
1755 EXPORT_SYMBOL(scst_unregister);
1756
1757 EXPORT_SYMBOL(scst_register_virtual_device);
1758 EXPORT_SYMBOL(scst_unregister_virtual_device);
1759 EXPORT_SYMBOL(__scst_register_virtual_dev_driver);
1760 EXPORT_SYMBOL(scst_unregister_virtual_dev_driver);
1761
1762 EXPORT_SYMBOL(scst_set_busy);
1763 EXPORT_SYMBOL(scst_set_cmd_error_status);
1764 EXPORT_SYMBOL(scst_set_cmd_error);
1765 EXPORT_SYMBOL(scst_set_resp_data_len);
1766 EXPORT_SYMBOL(scst_alloc_sense);
1767 EXPORT_SYMBOL(scst_alloc_set_sense);
1768 EXPORT_SYMBOL(scst_set_sense);
1769 EXPORT_SYMBOL(scst_set_cmd_error_sense);
1770
1771 EXPORT_SYMBOL(scst_process_active_cmd);
1772
1773 /*
1774  * Target Driver Side (i.e. HBA)
1775  */
1776 EXPORT_SYMBOL(scst_register_session);
1777 EXPORT_SYMBOL(scst_unregister_session_ex);
1778
1779 EXPORT_SYMBOL(__scst_register_target_template);
1780 EXPORT_SYMBOL(scst_unregister_target_template);
1781
1782 EXPORT_SYMBOL(scst_cmd_init_done);
1783 EXPORT_SYMBOL(scst_tgt_cmd_done);
1784 EXPORT_SYMBOL(scst_restart_cmd);
1785 EXPORT_SYMBOL(scst_rx_cmd);
1786 EXPORT_SYMBOL(scst_rx_data);
1787 EXPORT_SYMBOL(scst_rx_mgmt_fn);
1788
1789 EXPORT_SYMBOL(scst_find_cmd);
1790 EXPORT_SYMBOL(scst_find_cmd_by_tag);
1791
1792 /*
1793  * Global Commands
1794  */
1795 EXPORT_SYMBOL(scst_suspend_activity);
1796 EXPORT_SYMBOL(scst_resume_activity);
1797
1798 EXPORT_SYMBOL(scst_add_cmd_threads);
1799 EXPORT_SYMBOL(scst_del_cmd_threads);
1800
1801 #if defined(DEBUG) || defined(TRACING)
1802 EXPORT_SYMBOL(scst_proc_log_entry_read);
1803 EXPORT_SYMBOL(scst_proc_log_entry_write);
1804 #endif
1805
1806 EXPORT_SYMBOL(scst_create_proc_entry);
1807 EXPORT_SYMBOL(scst_single_seq_open);
1808
1809 EXPORT_SYMBOL(scst_get);
1810 EXPORT_SYMBOL(scst_put);
1811
1812 EXPORT_SYMBOL(scst_cmd_get);
1813 EXPORT_SYMBOL(scst_cmd_put);
1814
1815 EXPORT_SYMBOL(scst_alloc);
1816 EXPORT_SYMBOL(scst_free);
1817
1818 EXPORT_SYMBOL(scst_check_local_events);
1819
1820 /* Tgt_dev's threads local storage */
1821 EXPORT_SYMBOL(scst_add_thr_data);
1822 EXPORT_SYMBOL(scst_del_all_thr_data);
1823 EXPORT_SYMBOL(scst_dev_del_all_thr_data);
1824 EXPORT_SYMBOL(scst_find_thr_data);
1825
1826 /* SGV pool routines */
1827 EXPORT_SYMBOL(sgv_pool_create);
1828 EXPORT_SYMBOL(sgv_pool_destroy);
1829 EXPORT_SYMBOL(sgv_pool_set_allocator);
1830 EXPORT_SYMBOL(sgv_pool_alloc);
1831 EXPORT_SYMBOL(sgv_pool_free);
1832 EXPORT_SYMBOL(sgv_get_priv);
1833
1834 /* Generic parse() routines */
1835 EXPORT_SYMBOL(scst_calc_block_shift);
1836 EXPORT_SYMBOL(scst_sbc_generic_parse);
1837 EXPORT_SYMBOL(scst_cdrom_generic_parse);
1838 EXPORT_SYMBOL(scst_modisk_generic_parse);
1839 EXPORT_SYMBOL(scst_tape_generic_parse);
1840 EXPORT_SYMBOL(scst_changer_generic_parse);
1841 EXPORT_SYMBOL(scst_processor_generic_parse);
1842 EXPORT_SYMBOL(scst_raid_generic_parse);
1843
1844 /* Generic dev_done() routines */
1845 EXPORT_SYMBOL(scst_block_generic_dev_done);
1846 EXPORT_SYMBOL(scst_tape_generic_dev_done);
1847
1848 /*
1849  * Other Commands
1850  */
1851 EXPORT_SYMBOL(scst_get_cdb_info);
1852 EXPORT_SYMBOL(scst_cmd_get_tgt_priv_lock);
1853 EXPORT_SYMBOL(scst_cmd_set_tgt_priv_lock);
1854 EXPORT_SYMBOL(scst_obtain_device_parameters);
1855
1856 #ifdef DEBUG
1857 EXPORT_SYMBOL(scst_random);
1858 #endif
1859
1860 module_init(init_scst);
1861 module_exit(exit_scst);
1862
1863 MODULE_AUTHOR("Vladislav Bolkhovitin");
1864 MODULE_LICENSE("GPL");
1865 MODULE_DESCRIPTION("SCSI target core");
1866 MODULE_VERSION(SCST_VERSION_STRING);