- Performance increase
[mirror/scst/.git] / scst / src / scst_main.c
1 /*
2  *  scst_main.c
3  *
4  *  Copyright (C) 2004 - 2008 Vladislav Bolkhovitin <vst@vlnb.net>
5  *  Copyright (C) 2004 - 2005 Leonid Stoljar
6  *  Copyright (C) 2007 - 2008 CMS Distribution Limited
7  *
8  *  This program is free software; you can redistribute it and/or
9  *  modify it under the terms of the GNU General Public License
10  *  as published by the Free Software Foundation, version 2
11  *  of the License.
12  *
13  *  This program is distributed in the hope that it will be useful,
14  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16  *  GNU General Public License for more details.
17  */
18
19 #include <linux/module.h>
20
21 #include <linux/init.h>
22 #include <linux/kernel.h>
23 #include <linux/errno.h>
24 #include <linux/list.h>
25 #include <linux/spinlock.h>
26 #include <linux/slab.h>
27 #include <linux/sched.h>
28 #include <linux/unistd.h>
29 #include <linux/string.h>
30 #include <linux/kthread.h>
31
32 #include "scst.h"
33 #include "scst_priv.h"
34 #include "scst_mem.h"
35
36 #if defined(CONFIG_HIGHMEM4G) || defined(CONFIG_HIGHMEM64G)
37 #warning "HIGHMEM kernel configurations are fully supported, but not \
38         recommended for performance reasons. Consider change VMSPLIT \
39         option or use 64-bit configuration instead. See README file for \
40         details."
41 #endif
42
43 #if !defined(SCSI_EXEC_REQ_FIFO_DEFINED) && \
44     !defined(CONFIG_SCST_STRICT_SERIALIZING)
45 #warning "Patch scst_exec_req_fifo-<kernel-version>.patch was not applied on \
46         your kernel and CONFIG_SCST_STRICT_SERIALIZING isn't defined. \
47         Pass-through dev handlers will not be supported."
48 #endif
49
50 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
51 #if !defined(SCST_ALLOC_IO_CONTEXT_EXPORTED)
52 #warning "Patch export_alloc_io_context-<kernel-version>.patch was not applied \
53         on your kernel. SCST will be working with not the best performance."
54 #endif
55 #endif
56
57 /**
58  ** SCST global variables. They are all uninitialized to have their layout in
59  ** memory be exactly as specified. Otherwise compiler puts zero-initialized
60  ** variable separately from nonzero-initialized ones.
61  **/
62
63 /*
64  * All targets, devices and dev_types management is done under this mutex.
65  *
66  * It must NOT be used in any works (schedule_work(), etc.), because
67  * otherwise a deadlock (double lock, actually) is possible, e.g., with
68  * scst_user detach_tgt(), which is called under scst_mutex and calls
69  * flush_scheduled_work().
70  */
71 struct mutex scst_mutex;
72
73 struct list_head scst_template_list;
74 struct list_head scst_dev_list;
75 struct list_head scst_dev_type_list;
76
77 spinlock_t scst_main_lock;
78
79 static struct kmem_cache *scst_mgmt_cachep;
80 mempool_t *scst_mgmt_mempool;
81 static struct kmem_cache *scst_mgmt_stub_cachep;
82 mempool_t *scst_mgmt_stub_mempool;
83 static struct kmem_cache *scst_ua_cachep;
84 mempool_t *scst_ua_mempool;
85 static struct kmem_cache *scst_sense_cachep;
86 mempool_t *scst_sense_mempool;
87 struct kmem_cache *scst_tgtd_cachep;
88 struct kmem_cache *scst_sess_cachep;
89 struct kmem_cache *scst_acgd_cachep;
90
91 struct list_head scst_acg_list;
92 struct scst_acg *scst_default_acg;
93
94 spinlock_t scst_init_lock;
95 wait_queue_head_t scst_init_cmd_list_waitQ;
96 struct list_head scst_init_cmd_list;
97 unsigned int scst_init_poll_cnt;
98
99 struct kmem_cache *scst_cmd_cachep;
100
101 #if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
102 unsigned long scst_trace_flag;
103 #endif
104
105 unsigned long scst_flags;
106 atomic_t scst_cmd_count;
107
108 struct scst_cmd_lists scst_main_cmd_lists;
109
110 struct scst_tasklet scst_tasklets[NR_CPUS];
111
112 spinlock_t scst_mcmd_lock;
113 struct list_head scst_active_mgmt_cmd_list;
114 struct list_head scst_delayed_mgmt_cmd_list;
115 wait_queue_head_t scst_mgmt_cmd_list_waitQ;
116
117 wait_queue_head_t scst_mgmt_waitQ;
118 spinlock_t scst_mgmt_lock;
119 struct list_head scst_sess_init_list;
120 struct list_head scst_sess_shut_list;
121
122 wait_queue_head_t scst_dev_cmd_waitQ;
123
124 struct mutex scst_suspend_mutex;
125 struct list_head scst_cmd_lists_list;
126
127 static int scst_threads;
128 struct scst_threads_info_t scst_threads_info;
129
130 static int suspend_count;
131
132 static int scst_virt_dev_last_id; /* protected by scst_mutex */
133
134 /*
135  * This buffer and lock are intended to avoid memory allocation, which
136  * could fail in improper places.
137  */
138 spinlock_t scst_temp_UA_lock;
139 uint8_t scst_temp_UA[SCST_SENSE_BUFFERSIZE];
140
141 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
142 #if defined(CONFIG_BLOCK) && defined(SCST_ALLOC_IO_CONTEXT_EXPORTED)
143 static struct io_context *scst_ioc;
144 #endif
145 #endif
146
147 unsigned int scst_max_cmd_mem;
148 unsigned int scst_max_dev_cmd_mem;
149
150 module_param_named(scst_threads, scst_threads, int, 0);
151 MODULE_PARM_DESC(scst_threads, "SCSI target threads count");
152
153 module_param_named(scst_max_cmd_mem, scst_max_cmd_mem, int, 0);
154 MODULE_PARM_DESC(scst_max_cmd_mem, "Maximum memory allowed to be consumed by "
155         "all SCSI commands of all devices at any given time in MB");
156
157 module_param_named(scst_max_dev_cmd_mem, scst_max_dev_cmd_mem, int, 0);
158 MODULE_PARM_DESC(scst_max_dev_cmd_mem, "Maximum memory allowed to be consumed "
159         "by all SCSI commands of a device at any given time in MB");
160
161 struct scst_dev_type scst_null_devtype = {
162         .name = "none",
163 };
164
165 static void __scst_resume_activity(void);
166
167 int __scst_register_target_template(struct scst_tgt_template *vtt,
168         const char *version)
169 {
170         int res = 0;
171         struct scst_tgt_template *t;
172         static DEFINE_MUTEX(m);
173
174         TRACE_ENTRY();
175
176         INIT_LIST_HEAD(&vtt->tgt_list);
177
178         if (strcmp(version, SCST_INTERFACE_VERSION) != 0) {
179                 PRINT_ERROR("Incorrect version of target %s", vtt->name);
180                 res = -EINVAL;
181                 goto out_err;
182         }
183
184         if (!vtt->detect) {
185                 PRINT_ERROR("Target driver %s doesn't have a "
186                         "detect() method.", vtt->name);
187                 res = -EINVAL;
188                 goto out_err;
189         }
190
191         if (!vtt->release) {
192                 PRINT_ERROR("Target driver %s doesn't have a "
193                         "release() method.", vtt->name);
194                 res = -EINVAL;
195                 goto out_err;
196         }
197
198         if (!vtt->xmit_response) {
199                 PRINT_ERROR("Target driver %s doesn't have a "
200                         "xmit_response() method.", vtt->name);
201                 res = -EINVAL;
202                 goto out_err;
203         }
204
205         if (vtt->threads_num < 0) {
206                 PRINT_ERROR("Wrong threads_num value %d for "
207                         "target \"%s\"", vtt->threads_num,
208                         vtt->name);
209                 res = -EINVAL;
210                 goto out_err;
211         }
212
213         if (!vtt->no_proc_entry) {
214                 res = scst_build_proc_target_dir_entries(vtt);
215                 if (res < 0)
216                         goto out_err;
217         }
218
219         if (vtt->rdy_to_xfer == NULL)
220                 vtt->rdy_to_xfer_atomic = 1;
221
222         if (mutex_lock_interruptible(&m) != 0)
223                 goto out_err;
224
225         if (mutex_lock_interruptible(&scst_mutex) != 0)
226                 goto out_m_up;
227         list_for_each_entry(t, &scst_template_list, scst_template_list_entry) {
228                 if (strcmp(t->name, vtt->name) == 0) {
229                         PRINT_ERROR("Target driver %s already registered",
230                                 vtt->name);
231                         mutex_unlock(&scst_mutex);
232                         goto out_cleanup;
233                 }
234         }
235         mutex_unlock(&scst_mutex);
236
237         TRACE_DBG("%s", "Calling target driver's detect()");
238         res = vtt->detect(vtt);
239         TRACE_DBG("Target driver's detect() returned %d", res);
240         if (res < 0) {
241                 PRINT_ERROR("%s", "The detect() routine failed");
242                 res = -EINVAL;
243                 goto out_cleanup;
244         }
245
246         mutex_lock(&scst_mutex);
247         list_add_tail(&vtt->scst_template_list_entry, &scst_template_list);
248         mutex_unlock(&scst_mutex);
249
250         res = 0;
251
252         PRINT_INFO("Target template %s registered successfully", vtt->name);
253
254         mutex_unlock(&m);
255
256 out:
257         TRACE_EXIT_RES(res);
258         return res;
259
260 out_cleanup:
261         scst_cleanup_proc_target_dir_entries(vtt);
262
263 out_m_up:
264         mutex_unlock(&m);
265
266 out_err:
267         PRINT_ERROR("Failed to register target template %s", vtt->name);
268         goto out;
269 }
270 EXPORT_SYMBOL(__scst_register_target_template);
271
272 void scst_unregister_target_template(struct scst_tgt_template *vtt)
273 {
274         struct scst_tgt *tgt;
275         struct scst_tgt_template *t;
276         int found = 0;
277
278         TRACE_ENTRY();
279
280         mutex_lock(&scst_mutex);
281
282         list_for_each_entry(t, &scst_template_list, scst_template_list_entry) {
283                 if (strcmp(t->name, vtt->name) == 0) {
284                         found = 1;
285                         break;
286                 }
287         }
288         if (!found) {
289                 PRINT_ERROR("Target driver %s isn't registered", vtt->name);
290                 goto out_up;
291         }
292
293 restart:
294         list_for_each_entry(tgt, &vtt->tgt_list, tgt_list_entry) {
295                 mutex_unlock(&scst_mutex);
296                 scst_unregister(tgt);
297                 mutex_lock(&scst_mutex);
298                 goto restart;
299         }
300         list_del(&vtt->scst_template_list_entry);
301
302         PRINT_INFO("Target template %s unregistered successfully", vtt->name);
303
304 out_up:
305         mutex_unlock(&scst_mutex);
306
307         scst_cleanup_proc_target_dir_entries(vtt);
308
309         TRACE_EXIT();
310         return;
311 }
312 EXPORT_SYMBOL(scst_unregister_target_template);
313
314 struct scst_tgt *scst_register(struct scst_tgt_template *vtt,
315         const char *target_name)
316 {
317         struct scst_tgt *tgt;
318         int rc = 0;
319
320         TRACE_ENTRY();
321
322         tgt = kzalloc(sizeof(*tgt), GFP_KERNEL);
323         if (tgt == NULL) {
324                 TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of tgt failed");
325                 rc = -ENOMEM;
326                 goto out_err;
327         }
328
329         INIT_LIST_HEAD(&tgt->sess_list);
330         init_waitqueue_head(&tgt->unreg_waitQ);
331         tgt->tgtt = vtt;
332         tgt->sg_tablesize = vtt->sg_tablesize;
333         spin_lock_init(&tgt->tgt_lock);
334         INIT_LIST_HEAD(&tgt->retry_cmd_list);
335         atomic_set(&tgt->finished_cmds, 0);
336         init_timer(&tgt->retry_timer);
337         tgt->retry_timer.data = (unsigned long)tgt;
338         tgt->retry_timer.function = scst_tgt_retry_timer_fn;
339
340         rc = scst_suspend_activity(true);
341         if (rc != 0)
342                 goto out_free_tgt_err;
343
344         if (mutex_lock_interruptible(&scst_mutex) != 0) {
345                 rc = -EINTR;
346                 goto out_resume_free;
347         }
348
349         if (target_name != NULL) {
350                 int len = strlen(target_name) + 1 +
351                         strlen(SCST_DEFAULT_ACG_NAME) + 1;
352
353                 tgt->default_group_name = kmalloc(len, GFP_KERNEL);
354                 if (tgt->default_group_name == NULL) {
355                         TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of default "
356                                 "group name failed");
357                         rc = -ENOMEM;
358                         goto out_unlock_resume;
359                 }
360                 sprintf(tgt->default_group_name, "%s_%s", SCST_DEFAULT_ACG_NAME,
361                         target_name);
362         }
363
364         rc = scst_build_proc_target_entries(tgt);
365         if (rc < 0)
366                 goto out_free_name;
367         else
368                 list_add_tail(&tgt->tgt_list_entry, &vtt->tgt_list);
369
370         mutex_unlock(&scst_mutex);
371         scst_resume_activity();
372
373         PRINT_INFO("Target %s (%p) for template %s registered successfully",
374                 target_name, tgt, vtt->name);
375
376 out:
377         TRACE_EXIT();
378         return tgt;
379
380 out_free_name:
381         kfree(tgt->default_group_name);
382
383 out_unlock_resume:
384         mutex_unlock(&scst_mutex);
385
386 out_resume_free:
387         scst_resume_activity();
388
389 out_free_tgt_err:
390         kfree(tgt);
391         tgt = NULL;
392
393 out_err:
394         PRINT_ERROR("Failed to register target %s for template %s (error %d)",
395                 target_name, vtt->name, rc);
396         goto out;
397 }
398 EXPORT_SYMBOL(scst_register);
399
400 static inline int test_sess_list(struct scst_tgt *tgt)
401 {
402         int res;
403         mutex_lock(&scst_mutex);
404         res = list_empty(&tgt->sess_list);
405         mutex_unlock(&scst_mutex);
406         return res;
407 }
408
409 void scst_unregister(struct scst_tgt *tgt)
410 {
411         struct scst_session *sess;
412         struct scst_tgt_template *vtt = tgt->tgtt;
413
414         TRACE_ENTRY();
415
416         TRACE_DBG("%s", "Calling target driver's release()");
417         tgt->tgtt->release(tgt);
418         TRACE_DBG("%s", "Target driver's release() returned");
419
420         mutex_lock(&scst_mutex);
421         list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) {
422                 sBUG_ON(sess->shut_phase == SCST_SESS_SPH_READY);
423         }
424         mutex_unlock(&scst_mutex);
425
426         TRACE_DBG("%s", "Waiting for sessions shutdown");
427         wait_event(tgt->unreg_waitQ, test_sess_list(tgt));
428         TRACE_DBG("%s", "wait_event() returned");
429
430         scst_suspend_activity(false);
431         mutex_lock(&scst_mutex);
432
433         list_del(&tgt->tgt_list_entry);
434
435         scst_cleanup_proc_target_entries(tgt);
436
437         kfree(tgt->default_group_name);
438
439         mutex_unlock(&scst_mutex);
440         scst_resume_activity();
441
442         del_timer_sync(&tgt->retry_timer);
443
444         PRINT_INFO("Target %p for template %s unregistered successfully",
445                 tgt, vtt->name);
446
447         kfree(tgt);
448
449         TRACE_EXIT();
450         return;
451 }
452 EXPORT_SYMBOL(scst_unregister);
453
454 static int scst_susp_wait(bool interruptible)
455 {
456         int res = 0;
457
458         TRACE_ENTRY();
459
460         if (interruptible) {
461                 res = wait_event_interruptible_timeout(scst_dev_cmd_waitQ,
462                         (atomic_read(&scst_cmd_count) == 0),
463                         SCST_SUSPENDING_TIMEOUT);
464                 if (res <= 0) {
465                         __scst_resume_activity();
466                         if (res == 0)
467                                 res = -EBUSY;
468                 } else
469                         res = 0;
470         } else
471                 wait_event(scst_dev_cmd_waitQ,
472                            atomic_read(&scst_cmd_count) == 0);
473
474         TRACE_MGMT_DBG("wait_event() returned %d", res);
475
476         TRACE_EXIT_RES(res);
477         return res;
478 }
479
480 int scst_suspend_activity(bool interruptible)
481 {
482         int res = 0;
483         bool rep = false;
484
485         TRACE_ENTRY();
486
487         if (interruptible) {
488                 if (mutex_lock_interruptible(&scst_suspend_mutex) != 0) {
489                         res = -EINTR;
490                         goto out;
491                 }
492         } else
493                 mutex_lock(&scst_suspend_mutex);
494
495         TRACE_MGMT_DBG("suspend_count %d", suspend_count);
496         suspend_count++;
497         if (suspend_count > 1)
498                 goto out_up;
499
500         set_bit(SCST_FLAG_SUSPENDING, &scst_flags);
501         set_bit(SCST_FLAG_SUSPENDED, &scst_flags);
502         smp_mb__after_set_bit();
503
504         /*
505          * See comment in scst_user.c::dev_user_task_mgmt_fn() for more
506          * information about scst_user behavior.
507          *
508          * ToDo: make the global suspending unneeded (Switch to per-device
509          * reference counting? That would mean to switch off from lockless
510          * implementation of scst_translate_lun().. )
511          */
512
513         if (atomic_read(&scst_cmd_count) != 0) {
514                 PRINT_INFO("Waiting for %d active commands to complete... This "
515                         "might take few minutes for disks or few hours for "
516                         "tapes, if you use long executed commands, like "
517                         "REWIND or FORMAT. In case, if you have a hung user "
518                         "space device (i.e. made using scst_user module) not "
519                         "responding to any commands, if might take virtually "
520                         "forever until the corresponding user space "
521                         "program recovers and starts responding or gets "
522                         "killed.", atomic_read(&scst_cmd_count));
523                 rep = true;
524         }
525
526         res = scst_susp_wait(interruptible);
527         if (res != 0)
528                 goto out_clear;
529
530         clear_bit(SCST_FLAG_SUSPENDING, &scst_flags);
531         smp_mb__after_clear_bit();
532
533         TRACE_MGMT_DBG("Waiting for %d active commands finally to complete",
534                 atomic_read(&scst_cmd_count));
535
536         res = scst_susp_wait(interruptible);
537         if (res != 0)
538                 goto out_clear;
539
540         if (rep)
541                 PRINT_INFO("%s", "All active commands completed");
542
543 out_up:
544         mutex_unlock(&scst_suspend_mutex);
545
546 out:
547         TRACE_EXIT_RES(res);
548         return res;
549
550 out_clear:
551         clear_bit(SCST_FLAG_SUSPENDING, &scst_flags);
552         smp_mb__after_clear_bit();
553         goto out_up;
554 }
555 EXPORT_SYMBOL(scst_suspend_activity);
556
557 static void __scst_resume_activity(void)
558 {
559         struct scst_cmd_lists *l;
560
561         TRACE_ENTRY();
562
563         suspend_count--;
564         TRACE_MGMT_DBG("suspend_count %d left", suspend_count);
565         if (suspend_count > 0)
566                 goto out;
567
568         clear_bit(SCST_FLAG_SUSPENDED, &scst_flags);
569         smp_mb__after_clear_bit();
570
571         list_for_each_entry(l, &scst_cmd_lists_list, lists_list_entry) {
572                 wake_up_all(&l->cmd_list_waitQ);
573         }
574         wake_up_all(&scst_init_cmd_list_waitQ);
575
576         spin_lock_irq(&scst_mcmd_lock);
577         if (!list_empty(&scst_delayed_mgmt_cmd_list)) {
578                 struct scst_mgmt_cmd *m;
579                 m = list_entry(scst_delayed_mgmt_cmd_list.next, typeof(*m),
580                                 mgmt_cmd_list_entry);
581                 TRACE_MGMT_DBG("Moving delayed mgmt cmd %p to head of active "
582                         "mgmt cmd list", m);
583                 list_move(&m->mgmt_cmd_list_entry, &scst_active_mgmt_cmd_list);
584         }
585         spin_unlock_irq(&scst_mcmd_lock);
586         wake_up_all(&scst_mgmt_cmd_list_waitQ);
587
588 out:
589         TRACE_EXIT();
590         return;
591 }
592
593 void scst_resume_activity(void)
594 {
595         TRACE_ENTRY();
596
597         mutex_lock(&scst_suspend_mutex);
598         __scst_resume_activity();
599         mutex_unlock(&scst_suspend_mutex);
600
601         TRACE_EXIT();
602         return;
603 }
604 EXPORT_SYMBOL(scst_resume_activity);
605
606 static int scst_register_device(struct scsi_device *scsidp)
607 {
608         int res = 0;
609         struct scst_device *dev;
610         struct scst_dev_type *dt;
611
612         TRACE_ENTRY();
613
614         res = scst_suspend_activity(true);
615         if (res != 0)
616                 goto out_err;
617
618         if (mutex_lock_interruptible(&scst_mutex) != 0) {
619                 res = -EINTR;
620                 goto out_resume;
621         }
622
623         res = scst_alloc_device(GFP_KERNEL, &dev);
624         if (res != 0)
625                 goto out_up;
626
627         dev->type = scsidp->type;
628
629         dev->rq_disk = alloc_disk(1);
630         if (dev->rq_disk == NULL) {
631                 res = -ENOMEM;
632                 goto out_free_dev;
633         }
634         dev->rq_disk->major = SCST_MAJOR;
635
636         dev->scsi_dev = scsidp;
637
638         list_add_tail(&dev->dev_list_entry, &scst_dev_list);
639
640         list_for_each_entry(dt, &scst_dev_type_list, dev_type_list_entry) {
641                 if (dt->type == scsidp->type) {
642                         res = scst_assign_dev_handler(dev, dt);
643                         if (res != 0)
644                                 goto out_free;
645                         break;
646                 }
647         }
648
649 out_up:
650         mutex_unlock(&scst_mutex);
651
652 out_resume:
653         scst_resume_activity();
654
655 out_err:
656         if (res == 0) {
657                 PRINT_INFO("Attached SCSI target mid-level at "
658                     "scsi%d, channel %d, id %d, lun %d, type %d",
659                     scsidp->host->host_no, scsidp->channel, scsidp->id,
660                     scsidp->lun, scsidp->type);
661         } else {
662                 PRINT_ERROR("Failed to attach SCSI target mid-level "
663                     "at scsi%d, channel %d, id %d, lun %d, type %d",
664                     scsidp->host->host_no, scsidp->channel, scsidp->id,
665                     scsidp->lun, scsidp->type);
666         }
667
668         TRACE_EXIT_RES(res);
669         return res;
670
671 out_free:
672         list_del(&dev->dev_list_entry);
673         put_disk(dev->rq_disk);
674
675 out_free_dev:
676         scst_free_device(dev);
677         goto out_up;
678 }
679
680 static void scst_unregister_device(struct scsi_device *scsidp)
681 {
682         struct scst_device *d, *dev = NULL;
683         struct scst_acg_dev *acg_dev, *aa;
684
685         TRACE_ENTRY();
686
687         scst_suspend_activity(false);
688         mutex_lock(&scst_mutex);
689
690         list_for_each_entry(d, &scst_dev_list, dev_list_entry) {
691                 if (d->scsi_dev == scsidp) {
692                         dev = d;
693                         TRACE_DBG("Target device %p found", dev);
694                         break;
695                 }
696         }
697         if (dev == NULL) {
698                 PRINT_ERROR("%s", "Target device not found");
699                 goto out_unblock;
700         }
701
702         list_del(&dev->dev_list_entry);
703
704         list_for_each_entry_safe(acg_dev, aa, &dev->dev_acg_dev_list,
705                                  dev_acg_dev_list_entry)
706         {
707                 scst_acg_remove_dev(acg_dev->acg, dev);
708         }
709
710         scst_assign_dev_handler(dev, &scst_null_devtype);
711
712         put_disk(dev->rq_disk);
713         scst_free_device(dev);
714
715         PRINT_INFO("Detached SCSI target mid-level from scsi%d, channel %d, "
716                 "id %d, lun %d, type %d", scsidp->host->host_no,
717                 scsidp->channel, scsidp->id, scsidp->lun, scsidp->type);
718
719 out_unblock:
720         mutex_unlock(&scst_mutex);
721         scst_resume_activity();
722
723         TRACE_EXIT();
724         return;
725 }
726
727 static int scst_dev_handler_check(struct scst_dev_type *dev_handler)
728 {
729         int res = 0;
730
731         if (dev_handler->parse == NULL) {
732                 PRINT_ERROR("scst dev_type driver %s doesn't have a "
733                         "parse() method.", dev_handler->name);
734                 res = -EINVAL;
735                 goto out;
736         }
737
738         if (dev_handler->exec == NULL) {
739 #ifdef CONFIG_SCST_ALLOW_PASSTHROUGH_IO_SUBMIT_IN_SIRQ
740                 dev_handler->exec_atomic = 1;
741 #else
742                 dev_handler->exec_atomic = 0;
743 #endif
744         }
745
746         if (dev_handler->dev_done == NULL)
747                 dev_handler->dev_done_atomic = 1;
748
749 out:
750         TRACE_EXIT_RES(res);
751         return res;
752 }
753
754 int scst_register_virtual_device(struct scst_dev_type *dev_handler,
755         const char *dev_name)
756 {
757         int res, rc;
758         struct scst_device *dev = NULL;
759
760         TRACE_ENTRY();
761
762         if (dev_handler == NULL) {
763                 PRINT_ERROR("%s: valid device handler must be supplied",
764                             __func__);
765                 res = -EINVAL;
766                 goto out;
767         }
768
769         if (dev_name == NULL) {
770                 PRINT_ERROR("%s: device name must be non-NULL", __func__);
771                 res = -EINVAL;
772                 goto out;
773         }
774
775         res = scst_dev_handler_check(dev_handler);
776         if (res != 0)
777                 goto out;
778
779         res = scst_suspend_activity(true);
780         if (res != 0)
781                 goto out;
782
783         if (mutex_lock_interruptible(&scst_mutex) != 0) {
784                 res = -EINTR;
785                 goto out_resume;
786         }
787
788         res = scst_alloc_device(GFP_KERNEL, &dev);
789         if (res != 0)
790                 goto out_up;
791
792         dev->type = dev_handler->type;
793         dev->scsi_dev = NULL;
794         dev->virt_name = dev_name;
795         dev->virt_id = scst_virt_dev_last_id++;
796
797         list_add_tail(&dev->dev_list_entry, &scst_dev_list);
798
799         res = dev->virt_id;
800
801         rc = scst_assign_dev_handler(dev, dev_handler);
802         if (rc != 0) {
803                 res = rc;
804                 goto out_free_del;
805         }
806
807 out_up:
808         mutex_unlock(&scst_mutex);
809
810 out_resume:
811         scst_resume_activity();
812
813 out:
814         if (res > 0) {
815                 PRINT_INFO("Attached SCSI target mid-level to virtual "
816                     "device %s (id %d)", dev_name, dev->virt_id);
817         } else {
818                 PRINT_INFO("Failed to attach SCSI target mid-level to "
819                     "virtual device %s", dev_name);
820         }
821
822         TRACE_EXIT_RES(res);
823         return res;
824
825 out_free_del:
826         list_del(&dev->dev_list_entry);
827         scst_free_device(dev);
828         goto out_up;
829 }
830 EXPORT_SYMBOL(scst_register_virtual_device);
831
832 void scst_unregister_virtual_device(int id)
833 {
834         struct scst_device *d, *dev = NULL;
835         struct scst_acg_dev *acg_dev, *aa;
836
837         TRACE_ENTRY();
838
839         scst_suspend_activity(false);
840         mutex_lock(&scst_mutex);
841
842         list_for_each_entry(d, &scst_dev_list, dev_list_entry) {
843                 if (d->virt_id == id) {
844                         dev = d;
845                         TRACE_DBG("Target device %p (id %d) found", dev, id);
846                         break;
847                 }
848         }
849         if (dev == NULL) {
850                 PRINT_ERROR("Target virtual device (id %d) not found", id);
851                 goto out_unblock;
852         }
853
854         list_del(&dev->dev_list_entry);
855
856         list_for_each_entry_safe(acg_dev, aa, &dev->dev_acg_dev_list,
857                                  dev_acg_dev_list_entry)
858         {
859                 scst_acg_remove_dev(acg_dev->acg, dev);
860         }
861
862         scst_assign_dev_handler(dev, &scst_null_devtype);
863
864         PRINT_INFO("Detached SCSI target mid-level from virtual device %s "
865                 "(id %d)", dev->virt_name, dev->virt_id);
866
867         scst_free_device(dev);
868
869 out_unblock:
870         mutex_unlock(&scst_mutex);
871         scst_resume_activity();
872
873         TRACE_EXIT();
874         return;
875 }
876 EXPORT_SYMBOL(scst_unregister_virtual_device);
877
878 int __scst_register_dev_driver(struct scst_dev_type *dev_type,
879         const char *version)
880 {
881         struct scst_dev_type *dt;
882         struct scst_device *dev;
883         int res;
884         int exist;
885
886         TRACE_ENTRY();
887
888         if (strcmp(version, SCST_INTERFACE_VERSION) != 0) {
889                 PRINT_ERROR("Incorrect version of dev handler %s",
890                         dev_type->name);
891                 res = -EINVAL;
892                 goto out_error;
893         }
894
895         res = scst_dev_handler_check(dev_type);
896         if (res != 0)
897                 goto out_error;
898
899 #if !defined(SCSI_EXEC_REQ_FIFO_DEFINED) && \
900     !defined(CONFIG_SCST_STRICT_SERIALIZING)
901         if (dev_type->exec == NULL) {
902                 PRINT_ERROR("Pass-through dev handlers (handler \"%s\") not "
903                         "supported. Consider applying on your kernel patch "
904                         "scst_exec_req_fifo-<kernel-version>.patch or define "
905                         "CONFIG_SCST_STRICT_SERIALIZING", dev_type->name);
906                 res = -EINVAL;
907                 goto out;
908         }
909 #endif
910
911         res = scst_suspend_activity(true);
912         if (res != 0)
913                 goto out_error;
914
915         if (mutex_lock_interruptible(&scst_mutex) != 0) {
916                 res = -EINTR;
917                 goto out_err_res;
918         }
919
920         exist = 0;
921         list_for_each_entry(dt, &scst_dev_type_list, dev_type_list_entry) {
922                 if (strcmp(dt->name, dev_type->name) == 0) {
923                         PRINT_ERROR("Device type handler \"%s\" already "
924                                 "exist", dt->name);
925                         exist = 1;
926                         break;
927                 }
928         }
929         if (exist)
930                 goto out_up;
931
932         res = scst_build_proc_dev_handler_dir_entries(dev_type);
933         if (res < 0)
934                 goto out_up;
935
936         list_add_tail(&dev_type->dev_type_list_entry, &scst_dev_type_list);
937
938         list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
939                 if (dev->scsi_dev == NULL || dev->handler != &scst_null_devtype)
940                         continue;
941                 if (dev->scsi_dev->type == dev_type->type)
942                         scst_assign_dev_handler(dev, dev_type);
943         }
944
945         mutex_unlock(&scst_mutex);
946         scst_resume_activity();
947
948         if (res == 0) {
949                 PRINT_INFO("Device handler \"%s\" for type %d registered "
950                         "successfully", dev_type->name, dev_type->type);
951         }
952
953 out:
954         TRACE_EXIT_RES(res);
955         return res;
956
957 out_up:
958         mutex_unlock(&scst_mutex);
959
960 out_err_res:
961         scst_resume_activity();
962
963 out_error:
964         PRINT_ERROR("Failed to register device handler \"%s\" for type %d",
965                 dev_type->name, dev_type->type);
966         goto out;
967 }
968 EXPORT_SYMBOL(__scst_register_dev_driver);
969
970 void scst_unregister_dev_driver(struct scst_dev_type *dev_type)
971 {
972         struct scst_device *dev;
973         struct scst_dev_type *dt;
974         int found = 0;
975
976         TRACE_ENTRY();
977
978         scst_suspend_activity(false);
979         mutex_lock(&scst_mutex);
980
981         list_for_each_entry(dt, &scst_dev_type_list, dev_type_list_entry) {
982                 if (strcmp(dt->name, dev_type->name) == 0) {
983                         found = 1;
984                         break;
985                 }
986         }
987         if (!found) {
988                 PRINT_ERROR("Dev handler \"%s\" isn't registered",
989                         dev_type->name);
990                 goto out_up;
991         }
992
993         list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
994                 if (dev->handler == dev_type) {
995                         scst_assign_dev_handler(dev, &scst_null_devtype);
996                         TRACE_DBG("Dev handler removed from device %p", dev);
997                 }
998         }
999
1000         list_del(&dev_type->dev_type_list_entry);
1001
1002         mutex_unlock(&scst_mutex);
1003         scst_resume_activity();
1004
1005         scst_cleanup_proc_dev_handler_dir_entries(dev_type);
1006
1007         PRINT_INFO("Device handler \"%s\" for type %d unloaded",
1008                    dev_type->name, dev_type->type);
1009
1010 out:
1011         TRACE_EXIT();
1012         return;
1013
1014 out_up:
1015         mutex_unlock(&scst_mutex);
1016         scst_resume_activity();
1017         goto out;
1018 }
1019 EXPORT_SYMBOL(scst_unregister_dev_driver);
1020
1021 int __scst_register_virtual_dev_driver(struct scst_dev_type *dev_type,
1022         const char *version)
1023 {
1024         int res;
1025
1026         TRACE_ENTRY();
1027
1028         if (strcmp(version, SCST_INTERFACE_VERSION) != 0) {
1029                 PRINT_ERROR("Incorrect version of virtual dev handler %s",
1030                         dev_type->name);
1031                 res = -EINVAL;
1032                 goto out_err;
1033         }
1034
1035         res = scst_dev_handler_check(dev_type);
1036         if (res != 0)
1037                 goto out_err;
1038
1039         if (!dev_type->no_proc) {
1040                 res = scst_build_proc_dev_handler_dir_entries(dev_type);
1041                 if (res < 0)
1042                         goto out_err;
1043         }
1044
1045         if (dev_type->type != -1) {
1046                 PRINT_INFO("Virtual device handler %s for type %d "
1047                         "registered successfully", dev_type->name,
1048                         dev_type->type);
1049         } else {
1050                 PRINT_INFO("Virtual device handler \"%s\" registered "
1051                         "successfully", dev_type->name);
1052         }
1053
1054 out:
1055         TRACE_EXIT_RES(res);
1056         return res;
1057
1058 out_err:
1059         PRINT_ERROR("Failed to register virtual device handler \"%s\"",
1060                 dev_type->name);
1061         goto out;
1062 }
1063 EXPORT_SYMBOL(__scst_register_virtual_dev_driver);
1064
1065 void scst_unregister_virtual_dev_driver(struct scst_dev_type *dev_type)
1066 {
1067         TRACE_ENTRY();
1068
1069         if (!dev_type->no_proc)
1070                 scst_cleanup_proc_dev_handler_dir_entries(dev_type);
1071
1072         PRINT_INFO("Device handler \"%s\" unloaded", dev_type->name);
1073
1074         TRACE_EXIT();
1075         return;
1076 }
1077 EXPORT_SYMBOL(scst_unregister_virtual_dev_driver);
1078
1079 /* Called under scst_mutex */
1080 int scst_add_dev_threads(struct scst_device *dev, int num)
1081 {
1082         int i, res = 0;
1083         int n = 0;
1084         struct scst_cmd_thread_t *thr;
1085         struct io_context *ioc = NULL;
1086         char nm[12];
1087
1088         TRACE_ENTRY();
1089
1090         list_for_each_entry(thr, &dev->threads_list, thread_list_entry) {
1091                 n++;
1092         }
1093
1094         for (i = 0; i < num; i++) {
1095                 thr = kmalloc(sizeof(*thr), GFP_KERNEL);
1096                 if (!thr) {
1097                         res = -ENOMEM;
1098                         PRINT_ERROR("Failed to allocate thr %d", res);
1099                         goto out;
1100                 }
1101                 strncpy(nm, dev->handler->name, ARRAY_SIZE(nm)-1);
1102                 nm[ARRAY_SIZE(nm)-1] = '\0';
1103                 thr->cmd_thread = kthread_create(scst_cmd_thread,
1104                         &dev->cmd_lists, "%sd%d_%d", nm, dev->dev_num, n++);
1105                 if (IS_ERR(thr->cmd_thread)) {
1106                         res = PTR_ERR(thr->cmd_thread);
1107                         PRINT_ERROR("kthread_create() failed: %d", res);
1108                         kfree(thr);
1109                         goto out;
1110                 }
1111
1112                 list_add(&thr->thread_list_entry, &dev->threads_list);
1113
1114 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
1115 #if defined(CONFIG_BLOCK) && defined(SCST_ALLOC_IO_CONTEXT_EXPORTED)
1116                 /*
1117                  * It would be better to keep io_context in tgt_dev and
1118                  * dynamically assign it to the current thread on the IO
1119                  * submission time to let each initiator have own
1120                  * io_context. But, unfortunately, CFQ doesn't
1121                  * support if a task has dynamically switched
1122                  * io_context, it oopses on BUG_ON(!cic->dead_key) in
1123                  * cic_free_func(). So, we have to have the same io_context
1124                  * for all initiators.
1125                  */
1126                 if (ioc == NULL) {
1127                         ioc = alloc_io_context(GFP_KERNEL, -1);
1128                         TRACE_DBG("ioc %p (thr %d)", ioc, thr->cmd_thread->pid);
1129                 }
1130
1131                 put_io_context(thr->cmd_thread->io_context);
1132                 thr->cmd_thread->io_context = ioc_task_link(ioc);
1133                 TRACE_DBG("Setting ioc %p on thr %d", ioc,
1134                         thr->cmd_thread->pid);
1135 #endif
1136 #endif
1137                 wake_up_process(thr->cmd_thread);
1138         }
1139
1140 out:
1141         put_io_context(ioc);
1142
1143         TRACE_EXIT_RES(res);
1144         return res;
1145 }
1146
1147 /* Called under scst_mutex and suspended activity */
1148 static int scst_create_dev_threads(struct scst_device *dev)
1149 {
1150         int res = 0;
1151         int threads_num;
1152
1153         TRACE_ENTRY();
1154
1155         if (dev->handler->threads_num <= 0)
1156                 goto out;
1157
1158         threads_num = dev->handler->threads_num;
1159
1160         spin_lock_init(&dev->cmd_lists.cmd_list_lock);
1161         INIT_LIST_HEAD(&dev->cmd_lists.active_cmd_list);
1162         init_waitqueue_head(&dev->cmd_lists.cmd_list_waitQ);
1163
1164         res = scst_add_dev_threads(dev, threads_num);
1165         if (res != 0)
1166                 goto out;
1167
1168         mutex_lock(&scst_suspend_mutex);
1169         list_add_tail(&dev->cmd_lists.lists_list_entry,
1170                 &scst_cmd_lists_list);
1171         mutex_unlock(&scst_suspend_mutex);
1172
1173         dev->p_cmd_lists = &dev->cmd_lists;
1174
1175 out:
1176         TRACE_EXIT_RES(res);
1177         return res;
1178 }
1179
1180 /* Called under scst_mutex */
1181 void scst_del_dev_threads(struct scst_device *dev, int num)
1182 {
1183         struct scst_cmd_thread_t *ct, *tmp;
1184         int i = 0;
1185
1186         TRACE_ENTRY();
1187
1188         list_for_each_entry_safe(ct, tmp, &dev->threads_list,
1189                                 thread_list_entry) {
1190                 int rc = kthread_stop(ct->cmd_thread);
1191                 if (rc < 0)
1192                         TRACE_MGMT_DBG("kthread_stop() failed: %d", rc);
1193                 list_del(&ct->thread_list_entry);
1194                 kfree(ct);
1195                 if ((num > 0) && (++i >= num))
1196                         break;
1197         }
1198
1199         TRACE_EXIT();
1200         return;
1201 }
1202
1203 /* Called under scst_mutex and suspended activity */
1204 static void scst_stop_dev_threads(struct scst_device *dev)
1205 {
1206         TRACE_ENTRY();
1207
1208         if (list_empty(&dev->threads_list))
1209                 goto out;
1210
1211         scst_del_dev_threads(dev, -1);
1212
1213         if (dev->p_cmd_lists == &dev->cmd_lists) {
1214                 mutex_lock(&scst_suspend_mutex);
1215                 list_del(&dev->cmd_lists.lists_list_entry);
1216                 mutex_unlock(&scst_suspend_mutex);
1217         }
1218
1219 out:
1220         TRACE_EXIT();
1221         return;
1222 }
1223
1224 /* The activity supposed to be suspended and scst_mutex held */
1225 int scst_assign_dev_handler(struct scst_device *dev,
1226         struct scst_dev_type *handler)
1227 {
1228         int res = 0;
1229         struct scst_tgt_dev *tgt_dev;
1230         LIST_HEAD(attached_tgt_devs);
1231
1232         TRACE_ENTRY();
1233
1234         sBUG_ON(handler == NULL);
1235
1236         if (dev->handler == handler)
1237                 goto out;
1238
1239         if (dev->handler && dev->handler->detach_tgt) {
1240                 list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
1241                                 dev_tgt_dev_list_entry) {
1242                         TRACE_DBG("Calling dev handler's detach_tgt(%p)",
1243                                 tgt_dev);
1244                         dev->handler->detach_tgt(tgt_dev);
1245                         TRACE_DBG("%s", "Dev handler's detach_tgt() returned");
1246                 }
1247         }
1248
1249         if (dev->handler && dev->handler->detach) {
1250                 TRACE_DBG("%s", "Calling dev handler's detach()");
1251                 dev->handler->detach(dev);
1252                 TRACE_DBG("%s", "Old handler's detach() returned");
1253         }
1254
1255         scst_stop_dev_threads(dev);
1256
1257         dev->handler = handler;
1258
1259         if (handler) {
1260                 res = scst_create_dev_threads(dev);
1261                 if (res != 0)
1262                         goto out_null;
1263         }
1264
1265         if (handler && handler->attach) {
1266                 TRACE_DBG("Calling new dev handler's attach(%p)", dev);
1267                 res = handler->attach(dev);
1268                 TRACE_DBG("New dev handler's attach() returned %d", res);
1269                 if (res != 0) {
1270                         PRINT_ERROR("New device handler's %s attach() "
1271                                 "failed: %d", handler->name, res);
1272                 }
1273                 goto out_thr_null;
1274         }
1275
1276         if (handler && handler->attach_tgt) {
1277                 list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
1278                                 dev_tgt_dev_list_entry) {
1279                         TRACE_DBG("Calling dev handler's attach_tgt(%p)",
1280                                 tgt_dev);
1281                         res = handler->attach_tgt(tgt_dev);
1282                         TRACE_DBG("%s", "Dev handler's attach_tgt() returned");
1283                         if (res != 0) {
1284                                 PRINT_ERROR("Device handler's %s attach_tgt() "
1285                                     "failed: %d", handler->name, res);
1286                                 goto out_err_detach_tgt;
1287                         }
1288                         list_add_tail(&tgt_dev->extra_tgt_dev_list_entry,
1289                                 &attached_tgt_devs);
1290                 }
1291         }
1292
1293 out_thr_null:
1294         if (res != 0)
1295                 scst_stop_dev_threads(dev);
1296
1297 out_null:
1298         if (res != 0)
1299                 dev->handler = &scst_null_devtype;
1300
1301 out:
1302         TRACE_EXIT_RES(res);
1303         return res;
1304
1305 out_err_detach_tgt:
1306         if (handler && handler->detach_tgt) {
1307                 list_for_each_entry(tgt_dev, &attached_tgt_devs,
1308                                  extra_tgt_dev_list_entry)
1309                 {
1310                         TRACE_DBG("Calling handler's detach_tgt(%p)",
1311                                 tgt_dev);
1312                         handler->detach_tgt(tgt_dev);
1313                         TRACE_DBG("%s", "Handler's detach_tgt() returned");
1314                 }
1315         }
1316         if (handler && handler->detach) {
1317                 TRACE_DBG("%s", "Calling handler's detach()");
1318                 handler->detach(dev);
1319                 TRACE_DBG("%s", "Handler's detach() returned");
1320         }
1321         goto out_null;
1322 }
1323
1324 int scst_cmd_threads_count(void)
1325 {
1326         int i;
1327
1328         /*
1329          * Just to lower the race window, when user can get just changed value
1330          */
1331         mutex_lock(&scst_threads_info.cmd_threads_mutex);
1332         i = scst_threads_info.nr_cmd_threads;
1333         mutex_unlock(&scst_threads_info.cmd_threads_mutex);
1334         return i;
1335 }
1336
1337 static void scst_threads_info_init(void)
1338 {
1339         memset(&scst_threads_info, 0, sizeof(scst_threads_info));
1340         mutex_init(&scst_threads_info.cmd_threads_mutex);
1341         INIT_LIST_HEAD(&scst_threads_info.cmd_threads_list);
1342 }
1343
1344 /* scst_threads_info.cmd_threads_mutex supposed to be held */
1345 void __scst_del_cmd_threads(int num)
1346 {
1347         struct scst_cmd_thread_t *ct, *tmp;
1348         int i;
1349
1350         TRACE_ENTRY();
1351
1352         i = scst_threads_info.nr_cmd_threads;
1353         if (num <= 0 || num > i) {
1354                 PRINT_ERROR("can not del %d cmd threads from %d", num, i);
1355                 return;
1356         }
1357
1358         list_for_each_entry_safe(ct, tmp, &scst_threads_info.cmd_threads_list,
1359                                 thread_list_entry) {
1360                 int res;
1361
1362                 res = kthread_stop(ct->cmd_thread);
1363                 if (res < 0)
1364                         TRACE_MGMT_DBG("kthread_stop() failed: %d", res);
1365                 list_del(&ct->thread_list_entry);
1366                 kfree(ct);
1367                 scst_threads_info.nr_cmd_threads--;
1368                 --num;
1369                 if (num == 0)
1370                         break;
1371         }
1372
1373         TRACE_EXIT();
1374         return;
1375 }
1376
1377 /* scst_threads_info.cmd_threads_mutex supposed to be held */
1378 int __scst_add_cmd_threads(int num)
1379 {
1380         int res = 0, i;
1381         static int scst_thread_num;
1382
1383         TRACE_ENTRY();
1384
1385         for (i = 0; i < num; i++) {
1386                 struct scst_cmd_thread_t *thr;
1387
1388                 thr = kmalloc(sizeof(*thr), GFP_KERNEL);
1389                 if (!thr) {
1390                         res = -ENOMEM;
1391                         PRINT_ERROR("fail to allocate thr %d", res);
1392                         goto out_error;
1393                 }
1394                 thr->cmd_thread = kthread_create(scst_cmd_thread,
1395                         &scst_main_cmd_lists, "scsi_tgt%d",
1396                         scst_thread_num++);
1397                 if (IS_ERR(thr->cmd_thread)) {
1398                         res = PTR_ERR(thr->cmd_thread);
1399                         PRINT_ERROR("kthread_create() failed: %d", res);
1400                         kfree(thr);
1401                         goto out_error;
1402                 }
1403
1404                 list_add(&thr->thread_list_entry,
1405                         &scst_threads_info.cmd_threads_list);
1406                 scst_threads_info.nr_cmd_threads++;
1407
1408 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
1409 #if defined(CONFIG_BLOCK) && defined(SCST_ALLOC_IO_CONTEXT_EXPORTED)
1410                 /* See comment in scst_add_dev_threads() */
1411                 if (scst_ioc == NULL) {
1412                         scst_ioc = alloc_io_context(GFP_KERNEL, -1);
1413                         TRACE_DBG("scst_ioc %p (thr %d)", scst_ioc,
1414                                 thr->cmd_thread->pid);
1415                 }
1416
1417                 put_io_context(thr->cmd_thread->io_context);
1418                 thr->cmd_thread->io_context = ioc_task_link(scst_ioc);
1419                 TRACE_DBG("Setting scst_ioc %p on thr %d",
1420                         scst_ioc, thr->cmd_thread->pid);
1421 #endif
1422 #endif
1423                 wake_up_process(thr->cmd_thread);
1424         }
1425         res = 0;
1426
1427 out:
1428         TRACE_EXIT_RES(res);
1429         return res;
1430
1431 out_error:
1432         if (i > 0)
1433                 __scst_del_cmd_threads(i - 1);
1434         goto out;
1435 }
1436
1437 int scst_add_cmd_threads(int num)
1438 {
1439         int res;
1440
1441         TRACE_ENTRY();
1442
1443         mutex_lock(&scst_threads_info.cmd_threads_mutex);
1444         res = __scst_add_cmd_threads(num);
1445         mutex_unlock(&scst_threads_info.cmd_threads_mutex);
1446
1447         TRACE_EXIT_RES(res);
1448         return res;
1449 }
1450 EXPORT_SYMBOL(scst_add_cmd_threads);
1451
1452 void scst_del_cmd_threads(int num)
1453 {
1454         TRACE_ENTRY();
1455
1456         mutex_lock(&scst_threads_info.cmd_threads_mutex);
1457         __scst_del_cmd_threads(num);
1458         mutex_unlock(&scst_threads_info.cmd_threads_mutex);
1459
1460         TRACE_EXIT();
1461         return;
1462 }
1463 EXPORT_SYMBOL(scst_del_cmd_threads);
1464
1465 static void scst_stop_all_threads(void)
1466 {
1467         TRACE_ENTRY();
1468
1469         mutex_lock(&scst_threads_info.cmd_threads_mutex);
1470         __scst_del_cmd_threads(scst_threads_info.nr_cmd_threads);
1471         if (scst_threads_info.mgmt_cmd_thread)
1472                 kthread_stop(scst_threads_info.mgmt_cmd_thread);
1473         if (scst_threads_info.mgmt_thread)
1474                 kthread_stop(scst_threads_info.mgmt_thread);
1475         if (scst_threads_info.init_cmd_thread)
1476                 kthread_stop(scst_threads_info.init_cmd_thread);
1477         mutex_unlock(&scst_threads_info.cmd_threads_mutex);
1478
1479         TRACE_EXIT();
1480         return;
1481 }
1482
1483 static int scst_start_all_threads(int num)
1484 {
1485         int res;
1486
1487         TRACE_ENTRY();
1488
1489         mutex_lock(&scst_threads_info.cmd_threads_mutex);
1490         res = __scst_add_cmd_threads(num);
1491         if (res < 0)
1492                 goto out;
1493
1494         scst_threads_info.init_cmd_thread = kthread_run(scst_init_cmd_thread,
1495                 NULL, "scsi_tgt_init");
1496         if (IS_ERR(scst_threads_info.init_cmd_thread)) {
1497                 res = PTR_ERR(scst_threads_info.init_cmd_thread);
1498                 PRINT_ERROR("kthread_create() for init cmd failed: %d", res);
1499                 scst_threads_info.init_cmd_thread = NULL;
1500                 goto out;
1501         }
1502
1503         scst_threads_info.mgmt_cmd_thread = kthread_run(scst_mgmt_cmd_thread,
1504                 NULL, "scsi_tgt_mc");
1505         if (IS_ERR(scst_threads_info.mgmt_cmd_thread)) {
1506                 res = PTR_ERR(scst_threads_info.mgmt_cmd_thread);
1507                 PRINT_ERROR("kthread_create() for mcmd failed: %d", res);
1508                 scst_threads_info.mgmt_cmd_thread = NULL;
1509                 goto out;
1510         }
1511
1512         scst_threads_info.mgmt_thread = kthread_run(scst_mgmt_thread,
1513                 NULL, "scsi_tgt_mgmt");
1514         if (IS_ERR(scst_threads_info.mgmt_thread)) {
1515                 res = PTR_ERR(scst_threads_info.mgmt_thread);
1516                 PRINT_ERROR("kthread_create() for mgmt failed: %d", res);
1517                 scst_threads_info.mgmt_thread = NULL;
1518                 goto out;
1519         }
1520
1521 out:
1522         mutex_unlock(&scst_threads_info.cmd_threads_mutex);
1523         TRACE_EXIT_RES(res);
1524         return res;
1525 }
1526
1527 void scst_get(void)
1528 {
1529         __scst_get(0);
1530 }
1531 EXPORT_SYMBOL(scst_get);
1532
1533 void scst_put(void)
1534 {
1535         __scst_put();
1536 }
1537 EXPORT_SYMBOL(scst_put);
1538
1539 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
1540 static int scst_add(struct class_device *cdev, struct class_interface *intf)
1541 #else
1542 static int scst_add(struct device *cdev, struct class_interface *intf)
1543 #endif
1544 {
1545         struct scsi_device *scsidp;
1546         int res = 0;
1547
1548         TRACE_ENTRY();
1549
1550 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
1551         scsidp = to_scsi_device(cdev->dev);
1552 #else
1553         scsidp = to_scsi_device(cdev->parent);
1554 #endif
1555
1556         if (strcmp(scsidp->host->hostt->name, SCST_LOCAL_NAME) != 0)
1557                 res = scst_register_device(scsidp);
1558
1559         TRACE_EXIT();
1560         return res;
1561 }
1562
1563 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
1564 static void scst_remove(struct class_device *cdev, struct class_interface *intf)
1565 #else
1566 static void scst_remove(struct device *cdev, struct class_interface *intf)
1567 #endif
1568 {
1569         struct scsi_device *scsidp;
1570
1571         TRACE_ENTRY();
1572
1573 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
1574         scsidp = to_scsi_device(cdev->dev);
1575 #else
1576         scsidp = to_scsi_device(cdev->parent);
1577 #endif
1578
1579         if (strcmp(scsidp->host->hostt->name, SCST_LOCAL_NAME) != 0)
1580                 scst_unregister_device(scsidp);
1581
1582         TRACE_EXIT();
1583         return;
1584 }
1585
1586 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
1587 static struct class_interface scst_interface = {
1588         .add = scst_add,
1589         .remove = scst_remove,
1590 };
1591 #else
1592 static struct class_interface scst_interface = {
1593         .add_dev = scst_add,
1594         .remove_dev = scst_remove,
1595 };
1596 #endif
1597
1598 static void __init scst_print_config(void)
1599 {
1600         char buf[128];
1601         int i, j;
1602
1603         i = snprintf(buf, sizeof(buf), "Enabled features: ");
1604         j = i;
1605
1606 #ifdef CONFIG_SCST_STRICT_SERIALIZING
1607         i += snprintf(&buf[i], sizeof(buf) - i, "Strict serializing");
1608 #endif
1609
1610 #ifdef CONFIG_SCST_EXTRACHECKS
1611         i += snprintf(&buf[i], sizeof(buf) - i, "%sEXTRACHECKS",
1612                 (j == i) ? "" : ", ");
1613 #endif
1614
1615 #ifdef CONFIG_SCST_TRACING
1616         i += snprintf(&buf[i], sizeof(buf) - i, "%sTRACING",
1617                 (j == i) ? "" : ", ");
1618 #endif
1619
1620 #ifdef CONFIG_SCST_DEBUG
1621         i += snprintf(&buf[i], sizeof(buf) - i, "%sDEBUG",
1622                 (j == i) ? "" : ", ");
1623 #endif
1624
1625 #ifdef CONFIG_SCST_DEBUG_TM
1626         i += snprintf(&buf[i], sizeof(buf) - i, "%sDEBUG_TM",
1627                 (j == i) ? "" : ", ");
1628 #endif
1629
1630 #ifdef CONFIG_SCST_DEBUG_RETRY
1631         i += snprintf(&buf[i], sizeof(buf) - i, "%sDEBUG_RETRY",
1632                 (j == i) ? "" : ", ");
1633 #endif
1634
1635 #ifdef CONFIG_SCST_DEBUG_OOM
1636         i += snprintf(&buf[i], sizeof(buf) - i, "%sDEBUG_OOM",
1637                 (j == i) ? "" : ", ");
1638 #endif
1639
1640 #ifdef CONFIG_SCST_DEBUG_SN
1641         i += snprintf(&buf[i], sizeof(buf) - i, "%sDEBUG_SN",
1642                 (j == i) ? "" : ", ");
1643 #endif
1644
1645 #ifdef CONFIG_SCST_USE_EXPECTED_VALUES
1646         i += snprintf(&buf[i], sizeof(buf) - i, "%sUSE_EXPECTED_VALUES",
1647                 (j == i) ? "" : ", ");
1648 #endif
1649
1650 #ifdef CONFIG_SCST_ALLOW_PASSTHROUGH_IO_SUBMIT_IN_SIRQ
1651         i += snprintf(&buf[i], sizeof(buf) - i,
1652                 "%sALLOW_PASSTHROUGH_IO_SUBMIT_IN_SIRQ",
1653                 (j == i) ? "" : ", ");
1654 #endif
1655
1656 #ifdef CONFIG_SCST_STRICT_SECURITY
1657         i += snprintf(&buf[i], sizeof(buf) - i, "%sSCST_STRICT_SECURITY",
1658                 (j == i) ? "" : ", ");
1659 #endif
1660
1661         if (j != i)
1662                 PRINT_INFO("%s", buf);
1663 }
1664
1665 static int __init init_scst(void)
1666 {
1667         int res = 0, i;
1668         int scst_num_cpus;
1669
1670         TRACE_ENTRY();
1671
1672 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
1673         {
1674                 struct scsi_request *req;
1675                 BUILD_BUG_ON(SCST_SENSE_BUFFERSIZE !=
1676                         sizeof(req->sr_sense_buffer));
1677         }
1678 #else
1679         {
1680                 struct scsi_sense_hdr *shdr;
1681                 BUILD_BUG_ON(SCST_SENSE_BUFFERSIZE < sizeof(*shdr));
1682         }
1683 #endif
1684         {
1685                 struct scst_tgt_dev *t;
1686                 struct scst_cmd *c;
1687                 BUILD_BUG_ON(sizeof(t->curr_sn) != sizeof(t->expected_sn));
1688                 BUILD_BUG_ON(sizeof(c->sn) != sizeof(t->expected_sn));
1689         }
1690
1691         BUILD_BUG_ON(SCST_DATA_UNKNOWN != DMA_BIDIRECTIONAL);
1692         BUILD_BUG_ON(SCST_DATA_WRITE != DMA_TO_DEVICE);
1693         BUILD_BUG_ON(SCST_DATA_READ != DMA_FROM_DEVICE);
1694         BUILD_BUG_ON(SCST_DATA_NONE != DMA_NONE);
1695
1696         mutex_init(&scst_mutex);
1697         INIT_LIST_HEAD(&scst_template_list);
1698         INIT_LIST_HEAD(&scst_dev_list);
1699         INIT_LIST_HEAD(&scst_dev_type_list);
1700         spin_lock_init(&scst_main_lock);
1701         INIT_LIST_HEAD(&scst_acg_list);
1702         spin_lock_init(&scst_init_lock);
1703         init_waitqueue_head(&scst_init_cmd_list_waitQ);
1704         INIT_LIST_HEAD(&scst_init_cmd_list);
1705 #if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
1706         scst_trace_flag = SCST_DEFAULT_LOG_FLAGS;
1707 #endif
1708         atomic_set(&scst_cmd_count, 0);
1709         spin_lock_init(&scst_mcmd_lock);
1710         INIT_LIST_HEAD(&scst_active_mgmt_cmd_list);
1711         INIT_LIST_HEAD(&scst_delayed_mgmt_cmd_list);
1712         init_waitqueue_head(&scst_mgmt_cmd_list_waitQ);
1713         init_waitqueue_head(&scst_mgmt_waitQ);
1714         spin_lock_init(&scst_mgmt_lock);
1715         INIT_LIST_HEAD(&scst_sess_init_list);
1716         INIT_LIST_HEAD(&scst_sess_shut_list);
1717         init_waitqueue_head(&scst_dev_cmd_waitQ);
1718         mutex_init(&scst_suspend_mutex);
1719         INIT_LIST_HEAD(&scst_cmd_lists_list);
1720         scst_virt_dev_last_id = 1;
1721         spin_lock_init(&scst_temp_UA_lock);
1722
1723         spin_lock_init(&scst_main_cmd_lists.cmd_list_lock);
1724         INIT_LIST_HEAD(&scst_main_cmd_lists.active_cmd_list);
1725         init_waitqueue_head(&scst_main_cmd_lists.cmd_list_waitQ);
1726         list_add_tail(&scst_main_cmd_lists.lists_list_entry,
1727                 &scst_cmd_lists_list);
1728
1729         scst_num_cpus = num_online_cpus();
1730
1731         /* ToDo: register_cpu_notifier() */
1732
1733         if (scst_threads == 0)
1734                 scst_threads = scst_num_cpus;
1735
1736         if (scst_threads < 1) {
1737                 PRINT_ERROR("%s", "scst_threads can not be less than 1");
1738                 scst_threads = scst_num_cpus;
1739         }
1740
1741         scst_threads_info_init();
1742
1743 #define INIT_CACHEP(p, s, o) do {                                       \
1744                 p = KMEM_CACHE(s, SCST_SLAB_FLAGS);                     \
1745                 TRACE_MEM("Slab create: %s at %p size %zd", #s, p,      \
1746                           sizeof(struct s));                            \
1747                 if (p == NULL) {                                        \
1748                         res = -ENOMEM;                                  \
1749                         goto o;                                         \
1750                 }                                                       \
1751         } while (0)
1752
1753         INIT_CACHEP(scst_mgmt_cachep, scst_mgmt_cmd, out);
1754         INIT_CACHEP(scst_mgmt_stub_cachep, scst_mgmt_cmd_stub,
1755                         out_destroy_mgmt_cache);
1756         INIT_CACHEP(scst_ua_cachep, scst_tgt_dev_UA,
1757                         out_destroy_mgmt_stub_cache);
1758         {
1759                 struct scst_sense { uint8_t s[SCST_SENSE_BUFFERSIZE]; };
1760                 INIT_CACHEP(scst_sense_cachep, scst_sense,
1761                             out_destroy_ua_cache);
1762         }
1763         INIT_CACHEP(scst_cmd_cachep, scst_cmd, out_destroy_sense_cache);
1764         INIT_CACHEP(scst_sess_cachep, scst_session, out_destroy_cmd_cache);
1765         INIT_CACHEP(scst_tgtd_cachep, scst_tgt_dev, out_destroy_sess_cache);
1766         INIT_CACHEP(scst_acgd_cachep, scst_acg_dev, out_destroy_tgt_cache);
1767
1768         scst_mgmt_mempool = mempool_create(64, mempool_alloc_slab,
1769                 mempool_free_slab, scst_mgmt_cachep);
1770         if (scst_mgmt_mempool == NULL) {
1771                 res = -ENOMEM;
1772                 goto out_destroy_acg_cache;
1773         }
1774
1775         scst_mgmt_stub_mempool = mempool_create(1024, mempool_alloc_slab,
1776                 mempool_free_slab, scst_mgmt_stub_cachep);
1777         if (scst_mgmt_stub_mempool == NULL) {
1778                 res = -ENOMEM;
1779                 goto out_destroy_mgmt_mempool;
1780         }
1781
1782         scst_ua_mempool = mempool_create(64, mempool_alloc_slab,
1783                 mempool_free_slab, scst_ua_cachep);
1784         if (scst_ua_mempool == NULL) {
1785                 res = -ENOMEM;
1786                 goto out_destroy_mgmt_stub_mempool;
1787         }
1788
1789         /*
1790          * Loosing sense may have fatal consequences, so let's have a big pool
1791          */
1792         scst_sense_mempool = mempool_create(128, mempool_alloc_slab,
1793                 mempool_free_slab, scst_sense_cachep);
1794         if (scst_sense_mempool == NULL) {
1795                 res = -ENOMEM;
1796                 goto out_destroy_ua_mempool;
1797         }
1798
1799         if (scst_max_cmd_mem == 0) {
1800                 struct sysinfo si;
1801                 si_meminfo(&si);
1802 #if BITS_PER_LONG == 32
1803                 scst_max_cmd_mem = min(
1804                         (((uint64_t)si.totalram << PAGE_SHIFT) >> 20) >> 2,
1805                         (uint64_t)1 << 30);
1806 #else
1807                 scst_max_cmd_mem = ((si.totalram << PAGE_SHIFT) >> 20) >> 2;
1808 #endif
1809         }
1810
1811         if (scst_max_dev_cmd_mem != 0) {
1812                 if (scst_max_dev_cmd_mem > scst_max_cmd_mem) {
1813                         PRINT_ERROR("scst_max_dev_cmd_mem (%d) > "
1814                                 "scst_max_cmd_mem (%d)",
1815                                 scst_max_dev_cmd_mem,
1816                                 scst_max_cmd_mem);
1817                         scst_max_dev_cmd_mem = scst_max_cmd_mem;
1818                 }
1819         } else
1820                 scst_max_dev_cmd_mem = scst_max_cmd_mem * 2 / 5;
1821
1822         res = scst_sgv_pools_init(
1823                 ((uint64_t)scst_max_cmd_mem << 10) >> (PAGE_SHIFT - 10), 0);
1824         if (res != 0)
1825                 goto out_destroy_sense_mempool;
1826
1827         scst_default_acg = scst_alloc_add_acg(SCST_DEFAULT_ACG_NAME);
1828         if (scst_default_acg == NULL) {
1829                 res = -ENOMEM;
1830                 goto out_destroy_sgv_pool;
1831         }
1832
1833         res = scsi_register_interface(&scst_interface);
1834         if (res != 0)
1835                 goto out_free_acg;
1836
1837         scst_scsi_op_list_init();
1838
1839         for (i = 0; i < (int)ARRAY_SIZE(scst_tasklets); i++) {
1840                 spin_lock_init(&scst_tasklets[i].tasklet_lock);
1841                 INIT_LIST_HEAD(&scst_tasklets[i].tasklet_cmd_list);
1842                 tasklet_init(&scst_tasklets[i].tasklet,
1843                              (void *)scst_cmd_tasklet,
1844                              (unsigned long)&scst_tasklets[i]);
1845         }
1846
1847         TRACE_DBG("%d CPUs found, starting %d threads", scst_num_cpus,
1848                 scst_threads);
1849
1850         res = scst_start_all_threads(scst_threads);
1851         if (res < 0)
1852                 goto out_thread_free;
1853
1854         res = scst_proc_init_module();
1855         if (res != 0)
1856                 goto out_thread_free;
1857
1858
1859         PRINT_INFO("SCST version %s loaded successfully (max mem for "
1860                 "commands %dMB, per device %dMB)", SCST_VERSION_STRING,
1861                 scst_max_cmd_mem, scst_max_dev_cmd_mem);
1862
1863         scst_print_config();
1864
1865 out:
1866         TRACE_EXIT_RES(res);
1867         return res;
1868
1869 out_thread_free:
1870         scst_stop_all_threads();
1871
1872         scsi_unregister_interface(&scst_interface);
1873
1874 out_free_acg:
1875         scst_destroy_acg(scst_default_acg);
1876
1877 out_destroy_sgv_pool:
1878         scst_sgv_pools_deinit();
1879
1880 out_destroy_sense_mempool:
1881         mempool_destroy(scst_sense_mempool);
1882
1883 out_destroy_ua_mempool:
1884         mempool_destroy(scst_ua_mempool);
1885
1886 out_destroy_mgmt_stub_mempool:
1887         mempool_destroy(scst_mgmt_stub_mempool);
1888
1889 out_destroy_mgmt_mempool:
1890         mempool_destroy(scst_mgmt_mempool);
1891
1892 out_destroy_acg_cache:
1893         kmem_cache_destroy(scst_acgd_cachep);
1894
1895 out_destroy_tgt_cache:
1896         kmem_cache_destroy(scst_tgtd_cachep);
1897
1898 out_destroy_sess_cache:
1899         kmem_cache_destroy(scst_sess_cachep);
1900
1901 out_destroy_cmd_cache:
1902         kmem_cache_destroy(scst_cmd_cachep);
1903
1904 out_destroy_sense_cache:
1905         kmem_cache_destroy(scst_sense_cachep);
1906
1907 out_destroy_ua_cache:
1908         kmem_cache_destroy(scst_ua_cachep);
1909
1910 out_destroy_mgmt_stub_cache:
1911         kmem_cache_destroy(scst_mgmt_stub_cachep);
1912
1913 out_destroy_mgmt_cache:
1914         kmem_cache_destroy(scst_mgmt_cachep);
1915         goto out;
1916 }
1917
1918 static void __exit exit_scst(void)
1919 {
1920         TRACE_ENTRY();
1921
1922         /* ToDo: unregister_cpu_notifier() */
1923
1924         scst_proc_cleanup_module();
1925
1926         scst_stop_all_threads();
1927
1928         scsi_unregister_interface(&scst_interface);
1929         scst_destroy_acg(scst_default_acg);
1930
1931         scst_sgv_pools_deinit();
1932
1933 #define DEINIT_CACHEP(p) do {           \
1934                 kmem_cache_destroy(p);  \
1935                 p = NULL;               \
1936         } while (0)
1937
1938         mempool_destroy(scst_mgmt_mempool);
1939         mempool_destroy(scst_mgmt_stub_mempool);
1940         mempool_destroy(scst_ua_mempool);
1941         mempool_destroy(scst_sense_mempool);
1942
1943         DEINIT_CACHEP(scst_mgmt_cachep);
1944         DEINIT_CACHEP(scst_mgmt_stub_cachep);
1945         DEINIT_CACHEP(scst_ua_cachep);
1946         DEINIT_CACHEP(scst_sense_cachep);
1947         DEINIT_CACHEP(scst_cmd_cachep);
1948         DEINIT_CACHEP(scst_sess_cachep);
1949         DEINIT_CACHEP(scst_tgtd_cachep);
1950         DEINIT_CACHEP(scst_acgd_cachep);
1951
1952 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
1953 #if defined(CONFIG_BLOCK) && defined(SCST_ALLOC_IO_CONTEXT_EXPORTED)
1954         put_io_context(scst_ioc);
1955 #endif
1956 #endif
1957
1958         PRINT_INFO("%s", "SCST unloaded");
1959
1960         TRACE_EXIT();
1961         return;
1962 }
1963
1964
1965 module_init(init_scst);
1966 module_exit(exit_scst);
1967
1968 MODULE_AUTHOR("Vladislav Bolkhovitin");
1969 MODULE_LICENSE("GPL");
1970 MODULE_DESCRIPTION("SCSI target core");
1971 MODULE_VERSION(SCST_VERSION_STRING);