b671c0bd269a5bf0887f8308d1a54f2adc36f5c1
[mirror/scst/.git] / scst / src / scst_main.c
1 /*
2  *  scst_main.c
3  *
4  *  Copyright (C) 2004 - 2008 Vladislav Bolkhovitin <vst@vlnb.net>
5  *  Copyright (C) 2004 - 2005 Leonid Stoljar
6  *  Copyright (C) 2007 - 2008 CMS Distribution Limited
7  *
8  *  This program is free software; you can redistribute it and/or
9  *  modify it under the terms of the GNU General Public License
10  *  as published by the Free Software Foundation, version 2
11  *  of the License.
12  *
13  *  This program is distributed in the hope that it will be useful,
14  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16  *  GNU General Public License for more details.
17  */
18
19 #include <linux/module.h>
20
21 #include <linux/init.h>
22 #include <linux/kernel.h>
23 #include <linux/errno.h>
24 #include <linux/list.h>
25 #include <linux/spinlock.h>
26 #include <linux/slab.h>
27 #include <linux/sched.h>
28 #include <linux/unistd.h>
29 #include <linux/string.h>
30 #include <linux/kthread.h>
31
32 #include "scst.h"
33 #include "scst_priv.h"
34 #include "scst_mem.h"
35
36 #if defined(CONFIG_HIGHMEM4G) || defined(CONFIG_HIGHMEM64G)
37 #warning "HIGHMEM kernel configurations are fully supported, but not \
38         recommended for performance reasons. Consider changing VMSPLIT \
39         option or use a 64-bit configuration instead. See README file for \
40         details."
41 #endif
42
43 #if !defined(SCSI_EXEC_REQ_FIFO_DEFINED) && \
44     !defined(CONFIG_SCST_STRICT_SERIALIZING)
45 #warning "Patch scst_exec_req_fifo-<kernel-version>.patch was not applied on \
46         your kernel and CONFIG_SCST_STRICT_SERIALIZING isn't defined. \
47         Pass-through dev handlers will not be supported."
48 #endif
49
50 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
51 #if !defined(SCST_ALLOC_IO_CONTEXT_EXPORTED)
52 #warning "Patch export_alloc_io_context-<kernel-version>.patch was not applied \
53         on your kernel. SCST will be working with not the best performance."
54 #endif
55 #endif
56
57 /**
58  ** SCST global variables. They are all uninitialized to have their layout in
59  ** memory be exactly as specified. Otherwise compiler puts zero-initialized
60  ** variable separately from nonzero-initialized ones.
61  **/
62
63 /*
64  * All targets, devices and dev_types management is done under this mutex.
65  *
66  * It must NOT be used in any works (schedule_work(), etc.), because
67  * otherwise a deadlock (double lock, actually) is possible, e.g., with
68  * scst_user detach_tgt(), which is called under scst_mutex and calls
69  * flush_scheduled_work().
70  */
71 struct mutex scst_mutex;
72
73  /* All 3 protected by scst_mutex */
74 static struct list_head scst_template_list;
75 struct list_head scst_dev_list;
76 struct list_head scst_dev_type_list;
77
78 spinlock_t scst_main_lock;
79
80 static struct kmem_cache *scst_mgmt_cachep;
81 mempool_t *scst_mgmt_mempool;
82 static struct kmem_cache *scst_mgmt_stub_cachep;
83 mempool_t *scst_mgmt_stub_mempool;
84 static struct kmem_cache *scst_ua_cachep;
85 mempool_t *scst_ua_mempool;
86 static struct kmem_cache *scst_sense_cachep;
87 mempool_t *scst_sense_mempool;
88 struct kmem_cache *scst_tgtd_cachep;
89 struct kmem_cache *scst_sess_cachep;
90 struct kmem_cache *scst_acgd_cachep;
91
92 struct list_head scst_acg_list;
93 struct scst_acg *scst_default_acg;
94
95 spinlock_t scst_init_lock;
96 wait_queue_head_t scst_init_cmd_list_waitQ;
97 struct list_head scst_init_cmd_list;
98 unsigned int scst_init_poll_cnt;
99
100 struct kmem_cache *scst_cmd_cachep;
101
102 #if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
103 unsigned long scst_trace_flag;
104 #endif
105
106 unsigned long scst_flags;
107 atomic_t scst_cmd_count;
108
109 struct scst_cmd_lists scst_main_cmd_lists;
110
111 struct scst_tasklet scst_tasklets[NR_CPUS];
112
113 spinlock_t scst_mcmd_lock;
114 struct list_head scst_active_mgmt_cmd_list;
115 struct list_head scst_delayed_mgmt_cmd_list;
116 wait_queue_head_t scst_mgmt_cmd_list_waitQ;
117
118 wait_queue_head_t scst_mgmt_waitQ;
119 spinlock_t scst_mgmt_lock;
120 struct list_head scst_sess_init_list;
121 struct list_head scst_sess_shut_list;
122
123 wait_queue_head_t scst_dev_cmd_waitQ;
124
125 static struct mutex scst_suspend_mutex;
126 /* protected by scst_suspend_mutex */
127 static struct list_head scst_cmd_lists_list;
128
129 static int scst_threads;
130 struct scst_threads_info_t scst_threads_info;
131
132 static int suspend_count;
133
134 static int scst_virt_dev_last_id; /* protected by scst_mutex */
135
136 /*
137  * This buffer and lock are intended to avoid memory allocation, which
138  * could fail in improper places.
139  */
140 spinlock_t scst_temp_UA_lock;
141 uint8_t scst_temp_UA[SCST_SENSE_BUFFERSIZE];
142
143 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
144 #if defined(CONFIG_BLOCK) && defined(SCST_ALLOC_IO_CONTEXT_EXPORTED)
145 static struct io_context *scst_ioc;
146 #endif
147 #endif
148
149 static unsigned int scst_max_cmd_mem;
150 unsigned int scst_max_dev_cmd_mem;
151
152 module_param_named(scst_threads, scst_threads, int, 0);
153 MODULE_PARM_DESC(scst_threads, "SCSI target threads count");
154
155 module_param_named(scst_max_cmd_mem, scst_max_cmd_mem, int, 0);
156 MODULE_PARM_DESC(scst_max_cmd_mem, "Maximum memory allowed to be consumed by "
157         "all SCSI commands of all devices at any given time in MB");
158
159 module_param_named(scst_max_dev_cmd_mem, scst_max_dev_cmd_mem, int, 0);
160 MODULE_PARM_DESC(scst_max_dev_cmd_mem, "Maximum memory allowed to be consumed "
161         "by all SCSI commands of a device at any given time in MB");
162
163 struct scst_dev_type scst_null_devtype = {
164         .name = "none",
165 };
166
167 static void __scst_resume_activity(void);
168
169 int __scst_register_target_template(struct scst_tgt_template *vtt,
170         const char *version)
171 {
172         int res = 0;
173         struct scst_tgt_template *t;
174         static DEFINE_MUTEX(m);
175
176         TRACE_ENTRY();
177
178         INIT_LIST_HEAD(&vtt->tgt_list);
179
180         if (strcmp(version, SCST_INTERFACE_VERSION) != 0) {
181                 PRINT_ERROR("Incorrect version of target %s", vtt->name);
182                 res = -EINVAL;
183                 goto out_err;
184         }
185
186         if (!vtt->detect) {
187                 PRINT_ERROR("Target driver %s doesn't have a "
188                         "detect() method.", vtt->name);
189                 res = -EINVAL;
190                 goto out_err;
191         }
192
193         if (!vtt->release) {
194                 PRINT_ERROR("Target driver %s doesn't have a "
195                         "release() method.", vtt->name);
196                 res = -EINVAL;
197                 goto out_err;
198         }
199
200         if (!vtt->xmit_response) {
201                 PRINT_ERROR("Target driver %s doesn't have a "
202                         "xmit_response() method.", vtt->name);
203                 res = -EINVAL;
204                 goto out_err;
205         }
206
207         if (vtt->threads_num < 0) {
208                 PRINT_ERROR("Wrong threads_num value %d for "
209                         "target \"%s\"", vtt->threads_num,
210                         vtt->name);
211                 res = -EINVAL;
212                 goto out_err;
213         }
214
215         if (!vtt->no_proc_entry) {
216                 res = scst_build_proc_target_dir_entries(vtt);
217                 if (res < 0)
218                         goto out_err;
219         }
220
221         if (vtt->rdy_to_xfer == NULL)
222                 vtt->rdy_to_xfer_atomic = 1;
223
224         if (mutex_lock_interruptible(&m) != 0)
225                 goto out_err;
226
227         if (mutex_lock_interruptible(&scst_mutex) != 0)
228                 goto out_m_up;
229         list_for_each_entry(t, &scst_template_list, scst_template_list_entry) {
230                 if (strcmp(t->name, vtt->name) == 0) {
231                         PRINT_ERROR("Target driver %s already registered",
232                                 vtt->name);
233                         mutex_unlock(&scst_mutex);
234                         goto out_cleanup;
235                 }
236         }
237         mutex_unlock(&scst_mutex);
238
239         TRACE_DBG("%s", "Calling target driver's detect()");
240         res = vtt->detect(vtt);
241         TRACE_DBG("Target driver's detect() returned %d", res);
242         if (res < 0) {
243                 PRINT_ERROR("%s", "The detect() routine failed");
244                 res = -EINVAL;
245                 goto out_cleanup;
246         }
247
248         mutex_lock(&scst_mutex);
249         list_add_tail(&vtt->scst_template_list_entry, &scst_template_list);
250         mutex_unlock(&scst_mutex);
251
252         res = 0;
253
254         PRINT_INFO("Target template %s registered successfully", vtt->name);
255
256         mutex_unlock(&m);
257
258 out:
259         TRACE_EXIT_RES(res);
260         return res;
261
262 out_cleanup:
263         scst_cleanup_proc_target_dir_entries(vtt);
264
265 out_m_up:
266         mutex_unlock(&m);
267
268 out_err:
269         PRINT_ERROR("Failed to register target template %s", vtt->name);
270         goto out;
271 }
272 EXPORT_SYMBOL(__scst_register_target_template);
273
274 void scst_unregister_target_template(struct scst_tgt_template *vtt)
275 {
276         struct scst_tgt *tgt;
277         struct scst_tgt_template *t;
278         int found = 0;
279
280         TRACE_ENTRY();
281
282         mutex_lock(&scst_mutex);
283
284         list_for_each_entry(t, &scst_template_list, scst_template_list_entry) {
285                 if (strcmp(t->name, vtt->name) == 0) {
286                         found = 1;
287                         break;
288                 }
289         }
290         if (!found) {
291                 PRINT_ERROR("Target driver %s isn't registered", vtt->name);
292                 goto out_up;
293         }
294
295 restart:
296         list_for_each_entry(tgt, &vtt->tgt_list, tgt_list_entry) {
297                 mutex_unlock(&scst_mutex);
298                 scst_unregister(tgt);
299                 mutex_lock(&scst_mutex);
300                 goto restart;
301         }
302         list_del(&vtt->scst_template_list_entry);
303
304         PRINT_INFO("Target template %s unregistered successfully", vtt->name);
305
306 out_up:
307         mutex_unlock(&scst_mutex);
308
309         scst_cleanup_proc_target_dir_entries(vtt);
310
311         TRACE_EXIT();
312         return;
313 }
314 EXPORT_SYMBOL(scst_unregister_target_template);
315
316 struct scst_tgt *scst_register(struct scst_tgt_template *vtt,
317         const char *target_name)
318 {
319         struct scst_tgt *tgt;
320         int rc = 0;
321
322         TRACE_ENTRY();
323
324         tgt = kzalloc(sizeof(*tgt), GFP_KERNEL);
325         if (tgt == NULL) {
326                 TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of tgt failed");
327                 rc = -ENOMEM;
328                 goto out_err;
329         }
330
331         INIT_LIST_HEAD(&tgt->sess_list);
332         init_waitqueue_head(&tgt->unreg_waitQ);
333         tgt->tgtt = vtt;
334         tgt->sg_tablesize = vtt->sg_tablesize;
335         spin_lock_init(&tgt->tgt_lock);
336         INIT_LIST_HEAD(&tgt->retry_cmd_list);
337         atomic_set(&tgt->finished_cmds, 0);
338         init_timer(&tgt->retry_timer);
339         tgt->retry_timer.data = (unsigned long)tgt;
340         tgt->retry_timer.function = scst_tgt_retry_timer_fn;
341
342         rc = scst_suspend_activity(true);
343         if (rc != 0)
344                 goto out_free_tgt_err;
345
346         if (mutex_lock_interruptible(&scst_mutex) != 0) {
347                 rc = -EINTR;
348                 goto out_resume_free;
349         }
350
351         if (target_name != NULL) {
352                 int len = strlen(target_name) + 1 +
353                         strlen(SCST_DEFAULT_ACG_NAME) + 1;
354
355                 tgt->default_group_name = kmalloc(len, GFP_KERNEL);
356                 if (tgt->default_group_name == NULL) {
357                         TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of default "
358                                 "group name failed");
359                         rc = -ENOMEM;
360                         goto out_unlock_resume;
361                 }
362                 sprintf(tgt->default_group_name, "%s_%s", SCST_DEFAULT_ACG_NAME,
363                         target_name);
364         }
365
366         rc = scst_build_proc_target_entries(tgt);
367         if (rc < 0)
368                 goto out_free_name;
369         else
370                 list_add_tail(&tgt->tgt_list_entry, &vtt->tgt_list);
371
372         mutex_unlock(&scst_mutex);
373         scst_resume_activity();
374
375         PRINT_INFO("Target %s (%p) for template %s registered successfully",
376                 target_name, tgt, vtt->name);
377
378 out:
379         TRACE_EXIT();
380         return tgt;
381
382 out_free_name:
383         kfree(tgt->default_group_name);
384
385 out_unlock_resume:
386         mutex_unlock(&scst_mutex);
387
388 out_resume_free:
389         scst_resume_activity();
390
391 out_free_tgt_err:
392         kfree(tgt);
393         tgt = NULL;
394
395 out_err:
396         PRINT_ERROR("Failed to register target %s for template %s (error %d)",
397                 target_name, vtt->name, rc);
398         goto out;
399 }
400 EXPORT_SYMBOL(scst_register);
401
402 static inline int test_sess_list(struct scst_tgt *tgt)
403 {
404         int res;
405         mutex_lock(&scst_mutex);
406         res = list_empty(&tgt->sess_list);
407         mutex_unlock(&scst_mutex);
408         return res;
409 }
410
411 void scst_unregister(struct scst_tgt *tgt)
412 {
413         struct scst_session *sess;
414         struct scst_tgt_template *vtt = tgt->tgtt;
415
416         TRACE_ENTRY();
417
418         TRACE_DBG("%s", "Calling target driver's release()");
419         tgt->tgtt->release(tgt);
420         TRACE_DBG("%s", "Target driver's release() returned");
421
422         mutex_lock(&scst_mutex);
423         list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) {
424                 sBUG_ON(sess->shut_phase == SCST_SESS_SPH_READY);
425         }
426         mutex_unlock(&scst_mutex);
427
428         TRACE_DBG("%s", "Waiting for sessions shutdown");
429         wait_event(tgt->unreg_waitQ, test_sess_list(tgt));
430         TRACE_DBG("%s", "wait_event() returned");
431
432         scst_suspend_activity(false);
433         mutex_lock(&scst_mutex);
434
435         list_del(&tgt->tgt_list_entry);
436
437         scst_cleanup_proc_target_entries(tgt);
438
439         kfree(tgt->default_group_name);
440
441         mutex_unlock(&scst_mutex);
442         scst_resume_activity();
443
444         del_timer_sync(&tgt->retry_timer);
445
446         PRINT_INFO("Target %p for template %s unregistered successfully",
447                 tgt, vtt->name);
448
449         kfree(tgt);
450
451         TRACE_EXIT();
452         return;
453 }
454 EXPORT_SYMBOL(scst_unregister);
455
456 static int scst_susp_wait(bool interruptible)
457 {
458         int res = 0;
459
460         TRACE_ENTRY();
461
462         if (interruptible) {
463                 res = wait_event_interruptible_timeout(scst_dev_cmd_waitQ,
464                         (atomic_read(&scst_cmd_count) == 0),
465                         SCST_SUSPENDING_TIMEOUT);
466                 if (res <= 0) {
467                         __scst_resume_activity();
468                         if (res == 0)
469                                 res = -EBUSY;
470                 } else
471                         res = 0;
472         } else
473                 wait_event(scst_dev_cmd_waitQ,
474                            atomic_read(&scst_cmd_count) == 0);
475
476         TRACE_MGMT_DBG("wait_event() returned %d", res);
477
478         TRACE_EXIT_RES(res);
479         return res;
480 }
481
482 int scst_suspend_activity(bool interruptible)
483 {
484         int res = 0;
485         bool rep = false;
486
487         TRACE_ENTRY();
488
489         if (interruptible) {
490                 if (mutex_lock_interruptible(&scst_suspend_mutex) != 0) {
491                         res = -EINTR;
492                         goto out;
493                 }
494         } else
495                 mutex_lock(&scst_suspend_mutex);
496
497         TRACE_MGMT_DBG("suspend_count %d", suspend_count);
498         suspend_count++;
499         if (suspend_count > 1)
500                 goto out_up;
501
502         set_bit(SCST_FLAG_SUSPENDING, &scst_flags);
503         set_bit(SCST_FLAG_SUSPENDED, &scst_flags);
504         smp_mb__after_set_bit();
505
506         /*
507          * See comment in scst_user.c::dev_user_task_mgmt_fn() for more
508          * information about scst_user behavior.
509          *
510          * ToDo: make the global suspending unneeded (Switch to per-device
511          * reference counting? That would mean to switch off from lockless
512          * implementation of scst_translate_lun().. )
513          */
514
515         if (atomic_read(&scst_cmd_count) != 0) {
516                 PRINT_INFO("Waiting for %d active commands to complete... This "
517                         "might take few minutes for disks or few hours for "
518                         "tapes, if you use long executed commands, like "
519                         "REWIND or FORMAT. In case, if you have a hung user "
520                         "space device (i.e. made using scst_user module) not "
521                         "responding to any commands, if might take virtually "
522                         "forever until the corresponding user space "
523                         "program recovers and starts responding or gets "
524                         "killed.", atomic_read(&scst_cmd_count));
525                 rep = true;
526         }
527
528         res = scst_susp_wait(interruptible);
529         if (res != 0)
530                 goto out_clear;
531
532         clear_bit(SCST_FLAG_SUSPENDING, &scst_flags);
533         smp_mb__after_clear_bit();
534
535         TRACE_MGMT_DBG("Waiting for %d active commands finally to complete",
536                 atomic_read(&scst_cmd_count));
537
538         res = scst_susp_wait(interruptible);
539         if (res != 0)
540                 goto out_clear;
541
542         if (rep)
543                 PRINT_INFO("%s", "All active commands completed");
544
545 out_up:
546         mutex_unlock(&scst_suspend_mutex);
547
548 out:
549         TRACE_EXIT_RES(res);
550         return res;
551
552 out_clear:
553         clear_bit(SCST_FLAG_SUSPENDING, &scst_flags);
554         smp_mb__after_clear_bit();
555         goto out_up;
556 }
557 EXPORT_SYMBOL(scst_suspend_activity);
558
559 static void __scst_resume_activity(void)
560 {
561         struct scst_cmd_lists *l;
562
563         TRACE_ENTRY();
564
565         suspend_count--;
566         TRACE_MGMT_DBG("suspend_count %d left", suspend_count);
567         if (suspend_count > 0)
568                 goto out;
569
570         clear_bit(SCST_FLAG_SUSPENDED, &scst_flags);
571         smp_mb__after_clear_bit();
572
573         list_for_each_entry(l, &scst_cmd_lists_list, lists_list_entry) {
574                 wake_up_all(&l->cmd_list_waitQ);
575         }
576         wake_up_all(&scst_init_cmd_list_waitQ);
577
578         spin_lock_irq(&scst_mcmd_lock);
579         if (!list_empty(&scst_delayed_mgmt_cmd_list)) {
580                 struct scst_mgmt_cmd *m;
581                 m = list_entry(scst_delayed_mgmt_cmd_list.next, typeof(*m),
582                                 mgmt_cmd_list_entry);
583                 TRACE_MGMT_DBG("Moving delayed mgmt cmd %p to head of active "
584                         "mgmt cmd list", m);
585                 list_move(&m->mgmt_cmd_list_entry, &scst_active_mgmt_cmd_list);
586         }
587         spin_unlock_irq(&scst_mcmd_lock);
588         wake_up_all(&scst_mgmt_cmd_list_waitQ);
589
590 out:
591         TRACE_EXIT();
592         return;
593 }
594
595 void scst_resume_activity(void)
596 {
597         TRACE_ENTRY();
598
599         mutex_lock(&scst_suspend_mutex);
600         __scst_resume_activity();
601         mutex_unlock(&scst_suspend_mutex);
602
603         TRACE_EXIT();
604         return;
605 }
606 EXPORT_SYMBOL(scst_resume_activity);
607
608 static int scst_register_device(struct scsi_device *scsidp)
609 {
610         int res = 0;
611         struct scst_device *dev;
612         struct scst_dev_type *dt;
613
614         TRACE_ENTRY();
615
616         res = scst_suspend_activity(true);
617         if (res != 0)
618                 goto out_err;
619
620         if (mutex_lock_interruptible(&scst_mutex) != 0) {
621                 res = -EINTR;
622                 goto out_resume;
623         }
624
625         res = scst_alloc_device(GFP_KERNEL, &dev);
626         if (res != 0)
627                 goto out_up;
628
629         dev->type = scsidp->type;
630
631         dev->rq_disk = alloc_disk(1);
632         if (dev->rq_disk == NULL) {
633                 res = -ENOMEM;
634                 goto out_free_dev;
635         }
636         dev->rq_disk->major = SCST_MAJOR;
637
638         dev->scsi_dev = scsidp;
639
640         list_add_tail(&dev->dev_list_entry, &scst_dev_list);
641
642         list_for_each_entry(dt, &scst_dev_type_list, dev_type_list_entry) {
643                 if (dt->type == scsidp->type) {
644                         res = scst_assign_dev_handler(dev, dt);
645                         if (res != 0)
646                                 goto out_free;
647                         break;
648                 }
649         }
650
651 out_up:
652         mutex_unlock(&scst_mutex);
653
654 out_resume:
655         scst_resume_activity();
656
657 out_err:
658         if (res == 0) {
659                 PRINT_INFO("Attached SCSI target mid-level at "
660                     "scsi%d, channel %d, id %d, lun %d, type %d",
661                     scsidp->host->host_no, scsidp->channel, scsidp->id,
662                     scsidp->lun, scsidp->type);
663         } else {
664                 PRINT_ERROR("Failed to attach SCSI target mid-level "
665                     "at scsi%d, channel %d, id %d, lun %d, type %d",
666                     scsidp->host->host_no, scsidp->channel, scsidp->id,
667                     scsidp->lun, scsidp->type);
668         }
669
670         TRACE_EXIT_RES(res);
671         return res;
672
673 out_free:
674         list_del(&dev->dev_list_entry);
675         put_disk(dev->rq_disk);
676
677 out_free_dev:
678         scst_free_device(dev);
679         goto out_up;
680 }
681
682 static void scst_unregister_device(struct scsi_device *scsidp)
683 {
684         struct scst_device *d, *dev = NULL;
685         struct scst_acg_dev *acg_dev, *aa;
686
687         TRACE_ENTRY();
688
689         scst_suspend_activity(false);
690         mutex_lock(&scst_mutex);
691
692         list_for_each_entry(d, &scst_dev_list, dev_list_entry) {
693                 if (d->scsi_dev == scsidp) {
694                         dev = d;
695                         TRACE_DBG("Target device %p found", dev);
696                         break;
697                 }
698         }
699         if (dev == NULL) {
700                 PRINT_ERROR("%s", "Target device not found");
701                 goto out_unblock;
702         }
703
704         list_del(&dev->dev_list_entry);
705
706         list_for_each_entry_safe(acg_dev, aa, &dev->dev_acg_dev_list,
707                                  dev_acg_dev_list_entry)
708         {
709                 scst_acg_remove_dev(acg_dev->acg, dev);
710         }
711
712         scst_assign_dev_handler(dev, &scst_null_devtype);
713
714         put_disk(dev->rq_disk);
715         scst_free_device(dev);
716
717         PRINT_INFO("Detached SCSI target mid-level from scsi%d, channel %d, "
718                 "id %d, lun %d, type %d", scsidp->host->host_no,
719                 scsidp->channel, scsidp->id, scsidp->lun, scsidp->type);
720
721 out_unblock:
722         mutex_unlock(&scst_mutex);
723         scst_resume_activity();
724
725         TRACE_EXIT();
726         return;
727 }
728
729 static int scst_dev_handler_check(struct scst_dev_type *dev_handler)
730 {
731         int res = 0;
732
733         if (dev_handler->parse == NULL) {
734                 PRINT_ERROR("scst dev_type driver %s doesn't have a "
735                         "parse() method.", dev_handler->name);
736                 res = -EINVAL;
737                 goto out;
738         }
739
740         if (dev_handler->exec == NULL) {
741 #ifdef CONFIG_SCST_ALLOW_PASSTHROUGH_IO_SUBMIT_IN_SIRQ
742                 dev_handler->exec_atomic = 1;
743 #else
744                 dev_handler->exec_atomic = 0;
745 #endif
746         }
747
748         if (dev_handler->dev_done == NULL)
749                 dev_handler->dev_done_atomic = 1;
750
751 out:
752         TRACE_EXIT_RES(res);
753         return res;
754 }
755
756 int scst_register_virtual_device(struct scst_dev_type *dev_handler,
757         const char *dev_name)
758 {
759         int res, rc;
760         struct scst_device *dev = NULL;
761
762         TRACE_ENTRY();
763
764         if (dev_handler == NULL) {
765                 PRINT_ERROR("%s: valid device handler must be supplied",
766                             __func__);
767                 res = -EINVAL;
768                 goto out;
769         }
770
771         if (dev_name == NULL) {
772                 PRINT_ERROR("%s: device name must be non-NULL", __func__);
773                 res = -EINVAL;
774                 goto out;
775         }
776
777         res = scst_dev_handler_check(dev_handler);
778         if (res != 0)
779                 goto out;
780
781         res = scst_suspend_activity(true);
782         if (res != 0)
783                 goto out;
784
785         if (mutex_lock_interruptible(&scst_mutex) != 0) {
786                 res = -EINTR;
787                 goto out_resume;
788         }
789
790         res = scst_alloc_device(GFP_KERNEL, &dev);
791         if (res != 0)
792                 goto out_up;
793
794         dev->type = dev_handler->type;
795         dev->scsi_dev = NULL;
796         dev->virt_name = dev_name;
797         dev->virt_id = scst_virt_dev_last_id++;
798
799         list_add_tail(&dev->dev_list_entry, &scst_dev_list);
800
801         res = dev->virt_id;
802
803         rc = scst_assign_dev_handler(dev, dev_handler);
804         if (rc != 0) {
805                 res = rc;
806                 goto out_free_del;
807         }
808
809 out_up:
810         mutex_unlock(&scst_mutex);
811
812 out_resume:
813         scst_resume_activity();
814
815 out:
816         if (res > 0) {
817                 PRINT_INFO("Attached SCSI target mid-level to virtual "
818                     "device %s (id %d)", dev_name, dev->virt_id);
819         } else {
820                 PRINT_INFO("Failed to attach SCSI target mid-level to "
821                     "virtual device %s", dev_name);
822         }
823
824         TRACE_EXIT_RES(res);
825         return res;
826
827 out_free_del:
828         list_del(&dev->dev_list_entry);
829         scst_free_device(dev);
830         goto out_up;
831 }
832 EXPORT_SYMBOL(scst_register_virtual_device);
833
834 void scst_unregister_virtual_device(int id)
835 {
836         struct scst_device *d, *dev = NULL;
837         struct scst_acg_dev *acg_dev, *aa;
838
839         TRACE_ENTRY();
840
841         scst_suspend_activity(false);
842         mutex_lock(&scst_mutex);
843
844         list_for_each_entry(d, &scst_dev_list, dev_list_entry) {
845                 if (d->virt_id == id) {
846                         dev = d;
847                         TRACE_DBG("Target device %p (id %d) found", dev, id);
848                         break;
849                 }
850         }
851         if (dev == NULL) {
852                 PRINT_ERROR("Target virtual device (id %d) not found", id);
853                 goto out_unblock;
854         }
855
856         list_del(&dev->dev_list_entry);
857
858         list_for_each_entry_safe(acg_dev, aa, &dev->dev_acg_dev_list,
859                                  dev_acg_dev_list_entry)
860         {
861                 scst_acg_remove_dev(acg_dev->acg, dev);
862         }
863
864         scst_assign_dev_handler(dev, &scst_null_devtype);
865
866         PRINT_INFO("Detached SCSI target mid-level from virtual device %s "
867                 "(id %d)", dev->virt_name, dev->virt_id);
868
869         scst_free_device(dev);
870
871 out_unblock:
872         mutex_unlock(&scst_mutex);
873         scst_resume_activity();
874
875         TRACE_EXIT();
876         return;
877 }
878 EXPORT_SYMBOL(scst_unregister_virtual_device);
879
880 int __scst_register_dev_driver(struct scst_dev_type *dev_type,
881         const char *version)
882 {
883         struct scst_dev_type *dt;
884         struct scst_device *dev;
885         int res;
886         int exist;
887
888         TRACE_ENTRY();
889
890         if (strcmp(version, SCST_INTERFACE_VERSION) != 0) {
891                 PRINT_ERROR("Incorrect version of dev handler %s",
892                         dev_type->name);
893                 res = -EINVAL;
894                 goto out_error;
895         }
896
897         res = scst_dev_handler_check(dev_type);
898         if (res != 0)
899                 goto out_error;
900
901 #if !defined(SCSI_EXEC_REQ_FIFO_DEFINED) && \
902     !defined(CONFIG_SCST_STRICT_SERIALIZING)
903         if (dev_type->exec == NULL) {
904                 PRINT_ERROR("Pass-through dev handlers (handler \"%s\") not "
905                         "supported. Consider applying on your kernel patch "
906                         "scst_exec_req_fifo-<kernel-version>.patch or define "
907                         "CONFIG_SCST_STRICT_SERIALIZING", dev_type->name);
908                 res = -EINVAL;
909                 goto out;
910         }
911 #endif
912
913         res = scst_suspend_activity(true);
914         if (res != 0)
915                 goto out_error;
916
917         if (mutex_lock_interruptible(&scst_mutex) != 0) {
918                 res = -EINTR;
919                 goto out_err_res;
920         }
921
922         exist = 0;
923         list_for_each_entry(dt, &scst_dev_type_list, dev_type_list_entry) {
924                 if (strcmp(dt->name, dev_type->name) == 0) {
925                         PRINT_ERROR("Device type handler \"%s\" already "
926                                 "exist", dt->name);
927                         exist = 1;
928                         break;
929                 }
930         }
931         if (exist)
932                 goto out_up;
933
934         res = scst_build_proc_dev_handler_dir_entries(dev_type);
935         if (res < 0)
936                 goto out_up;
937
938         list_add_tail(&dev_type->dev_type_list_entry, &scst_dev_type_list);
939
940         list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
941                 if (dev->scsi_dev == NULL || dev->handler != &scst_null_devtype)
942                         continue;
943                 if (dev->scsi_dev->type == dev_type->type)
944                         scst_assign_dev_handler(dev, dev_type);
945         }
946
947         mutex_unlock(&scst_mutex);
948         scst_resume_activity();
949
950         if (res == 0) {
951                 PRINT_INFO("Device handler \"%s\" for type %d registered "
952                         "successfully", dev_type->name, dev_type->type);
953         }
954
955 out:
956         TRACE_EXIT_RES(res);
957         return res;
958
959 out_up:
960         mutex_unlock(&scst_mutex);
961
962 out_err_res:
963         scst_resume_activity();
964
965 out_error:
966         PRINT_ERROR("Failed to register device handler \"%s\" for type %d",
967                 dev_type->name, dev_type->type);
968         goto out;
969 }
970 EXPORT_SYMBOL(__scst_register_dev_driver);
971
972 void scst_unregister_dev_driver(struct scst_dev_type *dev_type)
973 {
974         struct scst_device *dev;
975         struct scst_dev_type *dt;
976         int found = 0;
977
978         TRACE_ENTRY();
979
980         scst_suspend_activity(false);
981         mutex_lock(&scst_mutex);
982
983         list_for_each_entry(dt, &scst_dev_type_list, dev_type_list_entry) {
984                 if (strcmp(dt->name, dev_type->name) == 0) {
985                         found = 1;
986                         break;
987                 }
988         }
989         if (!found) {
990                 PRINT_ERROR("Dev handler \"%s\" isn't registered",
991                         dev_type->name);
992                 goto out_up;
993         }
994
995         list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
996                 if (dev->handler == dev_type) {
997                         scst_assign_dev_handler(dev, &scst_null_devtype);
998                         TRACE_DBG("Dev handler removed from device %p", dev);
999                 }
1000         }
1001
1002         list_del(&dev_type->dev_type_list_entry);
1003
1004         mutex_unlock(&scst_mutex);
1005         scst_resume_activity();
1006
1007         scst_cleanup_proc_dev_handler_dir_entries(dev_type);
1008
1009         PRINT_INFO("Device handler \"%s\" for type %d unloaded",
1010                    dev_type->name, dev_type->type);
1011
1012 out:
1013         TRACE_EXIT();
1014         return;
1015
1016 out_up:
1017         mutex_unlock(&scst_mutex);
1018         scst_resume_activity();
1019         goto out;
1020 }
1021 EXPORT_SYMBOL(scst_unregister_dev_driver);
1022
1023 int __scst_register_virtual_dev_driver(struct scst_dev_type *dev_type,
1024         const char *version)
1025 {
1026         int res;
1027
1028         TRACE_ENTRY();
1029
1030         if (strcmp(version, SCST_INTERFACE_VERSION) != 0) {
1031                 PRINT_ERROR("Incorrect version of virtual dev handler %s",
1032                         dev_type->name);
1033                 res = -EINVAL;
1034                 goto out_err;
1035         }
1036
1037         res = scst_dev_handler_check(dev_type);
1038         if (res != 0)
1039                 goto out_err;
1040
1041         if (!dev_type->no_proc) {
1042                 res = scst_build_proc_dev_handler_dir_entries(dev_type);
1043                 if (res < 0)
1044                         goto out_err;
1045         }
1046
1047         if (dev_type->type != -1) {
1048                 PRINT_INFO("Virtual device handler %s for type %d "
1049                         "registered successfully", dev_type->name,
1050                         dev_type->type);
1051         } else {
1052                 PRINT_INFO("Virtual device handler \"%s\" registered "
1053                         "successfully", dev_type->name);
1054         }
1055
1056 out:
1057         TRACE_EXIT_RES(res);
1058         return res;
1059
1060 out_err:
1061         PRINT_ERROR("Failed to register virtual device handler \"%s\"",
1062                 dev_type->name);
1063         goto out;
1064 }
1065 EXPORT_SYMBOL(__scst_register_virtual_dev_driver);
1066
1067 void scst_unregister_virtual_dev_driver(struct scst_dev_type *dev_type)
1068 {
1069         TRACE_ENTRY();
1070
1071         if (!dev_type->no_proc)
1072                 scst_cleanup_proc_dev_handler_dir_entries(dev_type);
1073
1074         PRINT_INFO("Device handler \"%s\" unloaded", dev_type->name);
1075
1076         TRACE_EXIT();
1077         return;
1078 }
1079 EXPORT_SYMBOL(scst_unregister_virtual_dev_driver);
1080
1081 /* Called under scst_mutex */
1082 int scst_add_dev_threads(struct scst_device *dev, int num)
1083 {
1084         int i, res = 0;
1085         int n = 0;
1086         struct scst_cmd_thread_t *thr;
1087         struct io_context *ioc = NULL;
1088         char nm[12];
1089
1090         TRACE_ENTRY();
1091
1092         list_for_each_entry(thr, &dev->threads_list, thread_list_entry) {
1093                 n++;
1094         }
1095
1096         for (i = 0; i < num; i++) {
1097                 thr = kmalloc(sizeof(*thr), GFP_KERNEL);
1098                 if (!thr) {
1099                         res = -ENOMEM;
1100                         PRINT_ERROR("Failed to allocate thr %d", res);
1101                         goto out;
1102                 }
1103                 strncpy(nm, dev->handler->name, ARRAY_SIZE(nm)-1);
1104                 nm[ARRAY_SIZE(nm)-1] = '\0';
1105                 thr->cmd_thread = kthread_create(scst_cmd_thread,
1106                         &dev->cmd_lists, "%sd%d_%d", nm, dev->dev_num, n++);
1107                 if (IS_ERR(thr->cmd_thread)) {
1108                         res = PTR_ERR(thr->cmd_thread);
1109                         PRINT_ERROR("kthread_create() failed: %d", res);
1110                         kfree(thr);
1111                         goto out;
1112                 }
1113
1114                 list_add(&thr->thread_list_entry, &dev->threads_list);
1115
1116 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
1117 #if defined(CONFIG_BLOCK) && defined(SCST_ALLOC_IO_CONTEXT_EXPORTED)
1118                 /*
1119                  * It would be better to keep io_context in tgt_dev and
1120                  * dynamically assign it to the current thread on the IO
1121                  * submission time to let each initiator have own
1122                  * io_context. But, unfortunately, CFQ doesn't
1123                  * support if a task has dynamically switched
1124                  * io_context, it oopses on BUG_ON(!cic->dead_key) in
1125                  * cic_free_func(). So, we have to have the same io_context
1126                  * for all initiators.
1127                  */
1128                 if (ioc == NULL) {
1129                         ioc = alloc_io_context(GFP_KERNEL, -1);
1130                         TRACE_DBG("ioc %p (thr %d)", ioc, thr->cmd_thread->pid);
1131                 }
1132
1133                 put_io_context(thr->cmd_thread->io_context);
1134                 thr->cmd_thread->io_context = ioc_task_link(ioc);
1135                 TRACE_DBG("Setting ioc %p on thr %d", ioc,
1136                         thr->cmd_thread->pid);
1137 #endif
1138 #endif
1139                 wake_up_process(thr->cmd_thread);
1140         }
1141
1142 out:
1143         put_io_context(ioc);
1144
1145         TRACE_EXIT_RES(res);
1146         return res;
1147 }
1148
1149 /* Called under scst_mutex and suspended activity */
1150 static int scst_create_dev_threads(struct scst_device *dev)
1151 {
1152         int res = 0;
1153         int threads_num;
1154
1155         TRACE_ENTRY();
1156
1157         if (dev->handler->threads_num <= 0)
1158                 goto out;
1159
1160         threads_num = dev->handler->threads_num;
1161
1162         spin_lock_init(&dev->cmd_lists.cmd_list_lock);
1163         INIT_LIST_HEAD(&dev->cmd_lists.active_cmd_list);
1164         init_waitqueue_head(&dev->cmd_lists.cmd_list_waitQ);
1165
1166         res = scst_add_dev_threads(dev, threads_num);
1167         if (res != 0)
1168                 goto out;
1169
1170         mutex_lock(&scst_suspend_mutex);
1171         list_add_tail(&dev->cmd_lists.lists_list_entry,
1172                 &scst_cmd_lists_list);
1173         mutex_unlock(&scst_suspend_mutex);
1174
1175         dev->p_cmd_lists = &dev->cmd_lists;
1176
1177 out:
1178         TRACE_EXIT_RES(res);
1179         return res;
1180 }
1181
1182 /* Called under scst_mutex */
1183 void scst_del_dev_threads(struct scst_device *dev, int num)
1184 {
1185         struct scst_cmd_thread_t *ct, *tmp;
1186         int i = 0;
1187
1188         TRACE_ENTRY();
1189
1190         list_for_each_entry_safe(ct, tmp, &dev->threads_list,
1191                                 thread_list_entry) {
1192                 int rc = kthread_stop(ct->cmd_thread);
1193                 if (rc < 0)
1194                         TRACE_MGMT_DBG("kthread_stop() failed: %d", rc);
1195                 list_del(&ct->thread_list_entry);
1196                 kfree(ct);
1197                 if ((num > 0) && (++i >= num))
1198                         break;
1199         }
1200
1201         TRACE_EXIT();
1202         return;
1203 }
1204
1205 /* Called under scst_mutex and suspended activity */
1206 static void scst_stop_dev_threads(struct scst_device *dev)
1207 {
1208         TRACE_ENTRY();
1209
1210         if (list_empty(&dev->threads_list))
1211                 goto out;
1212
1213         scst_del_dev_threads(dev, -1);
1214
1215         if (dev->p_cmd_lists == &dev->cmd_lists) {
1216                 mutex_lock(&scst_suspend_mutex);
1217                 list_del(&dev->cmd_lists.lists_list_entry);
1218                 mutex_unlock(&scst_suspend_mutex);
1219         }
1220
1221 out:
1222         TRACE_EXIT();
1223         return;
1224 }
1225
1226 /* The activity supposed to be suspended and scst_mutex held */
1227 int scst_assign_dev_handler(struct scst_device *dev,
1228         struct scst_dev_type *handler)
1229 {
1230         int res = 0;
1231         struct scst_tgt_dev *tgt_dev;
1232         LIST_HEAD(attached_tgt_devs);
1233
1234         TRACE_ENTRY();
1235
1236         sBUG_ON(handler == NULL);
1237
1238         if (dev->handler == handler)
1239                 goto out;
1240
1241         if (dev->handler && dev->handler->detach_tgt) {
1242                 list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
1243                                 dev_tgt_dev_list_entry) {
1244                         TRACE_DBG("Calling dev handler's detach_tgt(%p)",
1245                                 tgt_dev);
1246                         dev->handler->detach_tgt(tgt_dev);
1247                         TRACE_DBG("%s", "Dev handler's detach_tgt() returned");
1248                 }
1249         }
1250
1251         if (dev->handler && dev->handler->detach) {
1252                 TRACE_DBG("%s", "Calling dev handler's detach()");
1253                 dev->handler->detach(dev);
1254                 TRACE_DBG("%s", "Old handler's detach() returned");
1255         }
1256
1257         scst_stop_dev_threads(dev);
1258
1259         dev->handler = handler;
1260
1261         if (handler) {
1262                 res = scst_create_dev_threads(dev);
1263                 if (res != 0)
1264                         goto out_null;
1265         }
1266
1267         if (handler && handler->attach) {
1268                 TRACE_DBG("Calling new dev handler's attach(%p)", dev);
1269                 res = handler->attach(dev);
1270                 TRACE_DBG("New dev handler's attach() returned %d", res);
1271                 if (res != 0) {
1272                         PRINT_ERROR("New device handler's %s attach() "
1273                                 "failed: %d", handler->name, res);
1274                 }
1275                 goto out_thr_null;
1276         }
1277
1278         if (handler && handler->attach_tgt) {
1279                 list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
1280                                 dev_tgt_dev_list_entry) {
1281                         TRACE_DBG("Calling dev handler's attach_tgt(%p)",
1282                                 tgt_dev);
1283                         res = handler->attach_tgt(tgt_dev);
1284                         TRACE_DBG("%s", "Dev handler's attach_tgt() returned");
1285                         if (res != 0) {
1286                                 PRINT_ERROR("Device handler's %s attach_tgt() "
1287                                     "failed: %d", handler->name, res);
1288                                 goto out_err_detach_tgt;
1289                         }
1290                         list_add_tail(&tgt_dev->extra_tgt_dev_list_entry,
1291                                 &attached_tgt_devs);
1292                 }
1293         }
1294
1295 out_thr_null:
1296         if (res != 0)
1297                 scst_stop_dev_threads(dev);
1298
1299 out_null:
1300         if (res != 0)
1301                 dev->handler = &scst_null_devtype;
1302
1303 out:
1304         TRACE_EXIT_RES(res);
1305         return res;
1306
1307 out_err_detach_tgt:
1308         if (handler && handler->detach_tgt) {
1309                 list_for_each_entry(tgt_dev, &attached_tgt_devs,
1310                                  extra_tgt_dev_list_entry)
1311                 {
1312                         TRACE_DBG("Calling handler's detach_tgt(%p)",
1313                                 tgt_dev);
1314                         handler->detach_tgt(tgt_dev);
1315                         TRACE_DBG("%s", "Handler's detach_tgt() returned");
1316                 }
1317         }
1318         if (handler && handler->detach) {
1319                 TRACE_DBG("%s", "Calling handler's detach()");
1320                 handler->detach(dev);
1321                 TRACE_DBG("%s", "Handler's detach() returned");
1322         }
1323         goto out_null;
1324 }
1325
1326 int scst_cmd_threads_count(void)
1327 {
1328         int i;
1329
1330         /*
1331          * Just to lower the race window, when user can get just changed value
1332          */
1333         mutex_lock(&scst_threads_info.cmd_threads_mutex);
1334         i = scst_threads_info.nr_cmd_threads;
1335         mutex_unlock(&scst_threads_info.cmd_threads_mutex);
1336         return i;
1337 }
1338
1339 static void scst_threads_info_init(void)
1340 {
1341         memset(&scst_threads_info, 0, sizeof(scst_threads_info));
1342         mutex_init(&scst_threads_info.cmd_threads_mutex);
1343         INIT_LIST_HEAD(&scst_threads_info.cmd_threads_list);
1344 }
1345
1346 /* scst_threads_info.cmd_threads_mutex supposed to be held */
1347 void __scst_del_cmd_threads(int num)
1348 {
1349         struct scst_cmd_thread_t *ct, *tmp;
1350         int i;
1351
1352         TRACE_ENTRY();
1353
1354         i = scst_threads_info.nr_cmd_threads;
1355         if (num <= 0 || num > i) {
1356                 PRINT_ERROR("can not del %d cmd threads from %d", num, i);
1357                 return;
1358         }
1359
1360         list_for_each_entry_safe(ct, tmp, &scst_threads_info.cmd_threads_list,
1361                                 thread_list_entry) {
1362                 int res;
1363
1364                 res = kthread_stop(ct->cmd_thread);
1365                 if (res < 0)
1366                         TRACE_MGMT_DBG("kthread_stop() failed: %d", res);
1367                 list_del(&ct->thread_list_entry);
1368                 kfree(ct);
1369                 scst_threads_info.nr_cmd_threads--;
1370                 --num;
1371                 if (num == 0)
1372                         break;
1373         }
1374
1375         TRACE_EXIT();
1376         return;
1377 }
1378
1379 /* scst_threads_info.cmd_threads_mutex supposed to be held */
1380 int __scst_add_cmd_threads(int num)
1381 {
1382         int res = 0, i;
1383         static int scst_thread_num;
1384
1385         TRACE_ENTRY();
1386
1387         for (i = 0; i < num; i++) {
1388                 struct scst_cmd_thread_t *thr;
1389
1390                 thr = kmalloc(sizeof(*thr), GFP_KERNEL);
1391                 if (!thr) {
1392                         res = -ENOMEM;
1393                         PRINT_ERROR("fail to allocate thr %d", res);
1394                         goto out_error;
1395                 }
1396                 thr->cmd_thread = kthread_create(scst_cmd_thread,
1397                         &scst_main_cmd_lists, "scsi_tgt%d",
1398                         scst_thread_num++);
1399                 if (IS_ERR(thr->cmd_thread)) {
1400                         res = PTR_ERR(thr->cmd_thread);
1401                         PRINT_ERROR("kthread_create() failed: %d", res);
1402                         kfree(thr);
1403                         goto out_error;
1404                 }
1405
1406                 list_add(&thr->thread_list_entry,
1407                         &scst_threads_info.cmd_threads_list);
1408                 scst_threads_info.nr_cmd_threads++;
1409
1410 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
1411 #if defined(CONFIG_BLOCK) && defined(SCST_ALLOC_IO_CONTEXT_EXPORTED)
1412                 /* See comment in scst_add_dev_threads() */
1413                 if (scst_ioc == NULL) {
1414                         scst_ioc = alloc_io_context(GFP_KERNEL, -1);
1415                         TRACE_DBG("scst_ioc %p (thr %d)", scst_ioc,
1416                                 thr->cmd_thread->pid);
1417                 }
1418
1419                 put_io_context(thr->cmd_thread->io_context);
1420                 thr->cmd_thread->io_context = ioc_task_link(scst_ioc);
1421                 TRACE_DBG("Setting scst_ioc %p on thr %d",
1422                         scst_ioc, thr->cmd_thread->pid);
1423 #endif
1424 #endif
1425                 wake_up_process(thr->cmd_thread);
1426         }
1427         res = 0;
1428
1429 out:
1430         TRACE_EXIT_RES(res);
1431         return res;
1432
1433 out_error:
1434         if (i > 0)
1435                 __scst_del_cmd_threads(i - 1);
1436         goto out;
1437 }
1438
1439 int scst_add_cmd_threads(int num)
1440 {
1441         int res;
1442
1443         TRACE_ENTRY();
1444
1445         mutex_lock(&scst_threads_info.cmd_threads_mutex);
1446         res = __scst_add_cmd_threads(num);
1447         mutex_unlock(&scst_threads_info.cmd_threads_mutex);
1448
1449         TRACE_EXIT_RES(res);
1450         return res;
1451 }
1452 EXPORT_SYMBOL(scst_add_cmd_threads);
1453
1454 void scst_del_cmd_threads(int num)
1455 {
1456         TRACE_ENTRY();
1457
1458         mutex_lock(&scst_threads_info.cmd_threads_mutex);
1459         __scst_del_cmd_threads(num);
1460         mutex_unlock(&scst_threads_info.cmd_threads_mutex);
1461
1462         TRACE_EXIT();
1463         return;
1464 }
1465 EXPORT_SYMBOL(scst_del_cmd_threads);
1466
1467 static void scst_stop_all_threads(void)
1468 {
1469         TRACE_ENTRY();
1470
1471         mutex_lock(&scst_threads_info.cmd_threads_mutex);
1472         __scst_del_cmd_threads(scst_threads_info.nr_cmd_threads);
1473         if (scst_threads_info.mgmt_cmd_thread)
1474                 kthread_stop(scst_threads_info.mgmt_cmd_thread);
1475         if (scst_threads_info.mgmt_thread)
1476                 kthread_stop(scst_threads_info.mgmt_thread);
1477         if (scst_threads_info.init_cmd_thread)
1478                 kthread_stop(scst_threads_info.init_cmd_thread);
1479         mutex_unlock(&scst_threads_info.cmd_threads_mutex);
1480
1481         TRACE_EXIT();
1482         return;
1483 }
1484
1485 static int scst_start_all_threads(int num)
1486 {
1487         int res;
1488
1489         TRACE_ENTRY();
1490
1491         mutex_lock(&scst_threads_info.cmd_threads_mutex);
1492         res = __scst_add_cmd_threads(num);
1493         if (res < 0)
1494                 goto out;
1495
1496         scst_threads_info.init_cmd_thread = kthread_run(scst_init_cmd_thread,
1497                 NULL, "scsi_tgt_init");
1498         if (IS_ERR(scst_threads_info.init_cmd_thread)) {
1499                 res = PTR_ERR(scst_threads_info.init_cmd_thread);
1500                 PRINT_ERROR("kthread_create() for init cmd failed: %d", res);
1501                 scst_threads_info.init_cmd_thread = NULL;
1502                 goto out;
1503         }
1504
1505         scst_threads_info.mgmt_cmd_thread = kthread_run(scst_mgmt_cmd_thread,
1506                 NULL, "scsi_tgt_mc");
1507         if (IS_ERR(scst_threads_info.mgmt_cmd_thread)) {
1508                 res = PTR_ERR(scst_threads_info.mgmt_cmd_thread);
1509                 PRINT_ERROR("kthread_create() for mcmd failed: %d", res);
1510                 scst_threads_info.mgmt_cmd_thread = NULL;
1511                 goto out;
1512         }
1513
1514         scst_threads_info.mgmt_thread = kthread_run(scst_mgmt_thread,
1515                 NULL, "scsi_tgt_mgmt");
1516         if (IS_ERR(scst_threads_info.mgmt_thread)) {
1517                 res = PTR_ERR(scst_threads_info.mgmt_thread);
1518                 PRINT_ERROR("kthread_create() for mgmt failed: %d", res);
1519                 scst_threads_info.mgmt_thread = NULL;
1520                 goto out;
1521         }
1522
1523 out:
1524         mutex_unlock(&scst_threads_info.cmd_threads_mutex);
1525         TRACE_EXIT_RES(res);
1526         return res;
1527 }
1528
1529 void scst_get(void)
1530 {
1531         __scst_get(0);
1532 }
1533 EXPORT_SYMBOL(scst_get);
1534
1535 void scst_put(void)
1536 {
1537         __scst_put();
1538 }
1539 EXPORT_SYMBOL(scst_put);
1540
1541 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
1542 static int scst_add(struct class_device *cdev, struct class_interface *intf)
1543 #else
1544 static int scst_add(struct device *cdev, struct class_interface *intf)
1545 #endif
1546 {
1547         struct scsi_device *scsidp;
1548         int res = 0;
1549
1550         TRACE_ENTRY();
1551
1552 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
1553         scsidp = to_scsi_device(cdev->dev);
1554 #else
1555         scsidp = to_scsi_device(cdev->parent);
1556 #endif
1557
1558         if (strcmp(scsidp->host->hostt->name, SCST_LOCAL_NAME) != 0)
1559                 res = scst_register_device(scsidp);
1560
1561         TRACE_EXIT();
1562         return res;
1563 }
1564
1565 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
1566 static void scst_remove(struct class_device *cdev, struct class_interface *intf)
1567 #else
1568 static void scst_remove(struct device *cdev, struct class_interface *intf)
1569 #endif
1570 {
1571         struct scsi_device *scsidp;
1572
1573         TRACE_ENTRY();
1574
1575 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
1576         scsidp = to_scsi_device(cdev->dev);
1577 #else
1578         scsidp = to_scsi_device(cdev->parent);
1579 #endif
1580
1581         if (strcmp(scsidp->host->hostt->name, SCST_LOCAL_NAME) != 0)
1582                 scst_unregister_device(scsidp);
1583
1584         TRACE_EXIT();
1585         return;
1586 }
1587
1588 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
1589 static struct class_interface scst_interface = {
1590         .add = scst_add,
1591         .remove = scst_remove,
1592 };
1593 #else
1594 static struct class_interface scst_interface = {
1595         .add_dev = scst_add,
1596         .remove_dev = scst_remove,
1597 };
1598 #endif
1599
1600 static void __init scst_print_config(void)
1601 {
1602         char buf[128];
1603         int i, j;
1604
1605         i = snprintf(buf, sizeof(buf), "Enabled features: ");
1606         j = i;
1607
1608 #ifdef CONFIG_SCST_STRICT_SERIALIZING
1609         i += snprintf(&buf[i], sizeof(buf) - i, "Strict serializing");
1610 #endif
1611
1612 #ifdef CONFIG_SCST_EXTRACHECKS
1613         i += snprintf(&buf[i], sizeof(buf) - i, "%sEXTRACHECKS",
1614                 (j == i) ? "" : ", ");
1615 #endif
1616
1617 #ifdef CONFIG_SCST_TRACING
1618         i += snprintf(&buf[i], sizeof(buf) - i, "%sTRACING",
1619                 (j == i) ? "" : ", ");
1620 #endif
1621
1622 #ifdef CONFIG_SCST_DEBUG
1623         i += snprintf(&buf[i], sizeof(buf) - i, "%sDEBUG",
1624                 (j == i) ? "" : ", ");
1625 #endif
1626
1627 #ifdef CONFIG_SCST_DEBUG_TM
1628         i += snprintf(&buf[i], sizeof(buf) - i, "%sDEBUG_TM",
1629                 (j == i) ? "" : ", ");
1630 #endif
1631
1632 #ifdef CONFIG_SCST_DEBUG_RETRY
1633         i += snprintf(&buf[i], sizeof(buf) - i, "%sDEBUG_RETRY",
1634                 (j == i) ? "" : ", ");
1635 #endif
1636
1637 #ifdef CONFIG_SCST_DEBUG_OOM
1638         i += snprintf(&buf[i], sizeof(buf) - i, "%sDEBUG_OOM",
1639                 (j == i) ? "" : ", ");
1640 #endif
1641
1642 #ifdef CONFIG_SCST_DEBUG_SN
1643         i += snprintf(&buf[i], sizeof(buf) - i, "%sDEBUG_SN",
1644                 (j == i) ? "" : ", ");
1645 #endif
1646
1647 #ifdef CONFIG_SCST_USE_EXPECTED_VALUES
1648         i += snprintf(&buf[i], sizeof(buf) - i, "%sUSE_EXPECTED_VALUES",
1649                 (j == i) ? "" : ", ");
1650 #endif
1651
1652 #ifdef CONFIG_SCST_ALLOW_PASSTHROUGH_IO_SUBMIT_IN_SIRQ
1653         i += snprintf(&buf[i], sizeof(buf) - i,
1654                 "%sALLOW_PASSTHROUGH_IO_SUBMIT_IN_SIRQ",
1655                 (j == i) ? "" : ", ");
1656 #endif
1657
1658 #ifdef CONFIG_SCST_STRICT_SECURITY
1659         i += snprintf(&buf[i], sizeof(buf) - i, "%sSCST_STRICT_SECURITY",
1660                 (j == i) ? "" : ", ");
1661 #endif
1662
1663         if (j != i)
1664                 PRINT_INFO("%s", buf);
1665 }
1666
1667 static int __init init_scst(void)
1668 {
1669         int res = 0, i;
1670         int scst_num_cpus;
1671
1672         TRACE_ENTRY();
1673
1674 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
1675         {
1676                 struct scsi_request *req;
1677                 BUILD_BUG_ON(SCST_SENSE_BUFFERSIZE !=
1678                         sizeof(req->sr_sense_buffer));
1679         }
1680 #else
1681         {
1682                 struct scsi_sense_hdr *shdr;
1683                 BUILD_BUG_ON(SCST_SENSE_BUFFERSIZE < sizeof(*shdr));
1684         }
1685 #endif
1686         {
1687                 struct scst_tgt_dev *t;
1688                 struct scst_cmd *c;
1689                 BUILD_BUG_ON(sizeof(t->curr_sn) != sizeof(t->expected_sn));
1690                 BUILD_BUG_ON(sizeof(c->sn) != sizeof(t->expected_sn));
1691         }
1692
1693         BUILD_BUG_ON(SCST_DATA_UNKNOWN != DMA_BIDIRECTIONAL);
1694         BUILD_BUG_ON(SCST_DATA_WRITE != DMA_TO_DEVICE);
1695         BUILD_BUG_ON(SCST_DATA_READ != DMA_FROM_DEVICE);
1696         BUILD_BUG_ON(SCST_DATA_NONE != DMA_NONE);
1697
1698 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
1699 #if !defined(SCST_ALLOC_IO_CONTEXT_EXPORTED)
1700         PRINT_WARNING("%s", "Patch export_alloc_io_context was not applied on "
1701                 "your kernel. SCST will be working with not the best "
1702                 "performance.");
1703 #endif
1704 #endif
1705
1706         mutex_init(&scst_mutex);
1707         INIT_LIST_HEAD(&scst_template_list);
1708         INIT_LIST_HEAD(&scst_dev_list);
1709         INIT_LIST_HEAD(&scst_dev_type_list);
1710         spin_lock_init(&scst_main_lock);
1711         INIT_LIST_HEAD(&scst_acg_list);
1712         spin_lock_init(&scst_init_lock);
1713         init_waitqueue_head(&scst_init_cmd_list_waitQ);
1714         INIT_LIST_HEAD(&scst_init_cmd_list);
1715 #if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
1716         scst_trace_flag = SCST_DEFAULT_LOG_FLAGS;
1717 #endif
1718         atomic_set(&scst_cmd_count, 0);
1719         spin_lock_init(&scst_mcmd_lock);
1720         INIT_LIST_HEAD(&scst_active_mgmt_cmd_list);
1721         INIT_LIST_HEAD(&scst_delayed_mgmt_cmd_list);
1722         init_waitqueue_head(&scst_mgmt_cmd_list_waitQ);
1723         init_waitqueue_head(&scst_mgmt_waitQ);
1724         spin_lock_init(&scst_mgmt_lock);
1725         INIT_LIST_HEAD(&scst_sess_init_list);
1726         INIT_LIST_HEAD(&scst_sess_shut_list);
1727         init_waitqueue_head(&scst_dev_cmd_waitQ);
1728         mutex_init(&scst_suspend_mutex);
1729         INIT_LIST_HEAD(&scst_cmd_lists_list);
1730         scst_virt_dev_last_id = 1;
1731         spin_lock_init(&scst_temp_UA_lock);
1732
1733         spin_lock_init(&scst_main_cmd_lists.cmd_list_lock);
1734         INIT_LIST_HEAD(&scst_main_cmd_lists.active_cmd_list);
1735         init_waitqueue_head(&scst_main_cmd_lists.cmd_list_waitQ);
1736         list_add_tail(&scst_main_cmd_lists.lists_list_entry,
1737                 &scst_cmd_lists_list);
1738
1739         scst_num_cpus = num_online_cpus();
1740
1741         /* ToDo: register_cpu_notifier() */
1742
1743         if (scst_threads == 0)
1744                 scst_threads = scst_num_cpus;
1745
1746         if (scst_threads < 1) {
1747                 PRINT_ERROR("%s", "scst_threads can not be less than 1");
1748                 scst_threads = scst_num_cpus;
1749         }
1750
1751         scst_threads_info_init();
1752
1753 #define INIT_CACHEP(p, s, o) do {                                       \
1754                 p = KMEM_CACHE(s, SCST_SLAB_FLAGS);                     \
1755                 TRACE_MEM("Slab create: %s at %p size %zd", #s, p,      \
1756                           sizeof(struct s));                            \
1757                 if (p == NULL) {                                        \
1758                         res = -ENOMEM;                                  \
1759                         goto o;                                         \
1760                 }                                                       \
1761         } while (0)
1762
1763         INIT_CACHEP(scst_mgmt_cachep, scst_mgmt_cmd, out);
1764         INIT_CACHEP(scst_mgmt_stub_cachep, scst_mgmt_cmd_stub,
1765                         out_destroy_mgmt_cache);
1766         INIT_CACHEP(scst_ua_cachep, scst_tgt_dev_UA,
1767                         out_destroy_mgmt_stub_cache);
1768         {
1769                 struct scst_sense { uint8_t s[SCST_SENSE_BUFFERSIZE]; };
1770                 INIT_CACHEP(scst_sense_cachep, scst_sense,
1771                             out_destroy_ua_cache);
1772         }
1773         INIT_CACHEP(scst_cmd_cachep, scst_cmd, out_destroy_sense_cache);
1774         INIT_CACHEP(scst_sess_cachep, scst_session, out_destroy_cmd_cache);
1775         INIT_CACHEP(scst_tgtd_cachep, scst_tgt_dev, out_destroy_sess_cache);
1776         INIT_CACHEP(scst_acgd_cachep, scst_acg_dev, out_destroy_tgt_cache);
1777
1778         scst_mgmt_mempool = mempool_create(64, mempool_alloc_slab,
1779                 mempool_free_slab, scst_mgmt_cachep);
1780         if (scst_mgmt_mempool == NULL) {
1781                 res = -ENOMEM;
1782                 goto out_destroy_acg_cache;
1783         }
1784
1785         scst_mgmt_stub_mempool = mempool_create(1024, mempool_alloc_slab,
1786                 mempool_free_slab, scst_mgmt_stub_cachep);
1787         if (scst_mgmt_stub_mempool == NULL) {
1788                 res = -ENOMEM;
1789                 goto out_destroy_mgmt_mempool;
1790         }
1791
1792         scst_ua_mempool = mempool_create(64, mempool_alloc_slab,
1793                 mempool_free_slab, scst_ua_cachep);
1794         if (scst_ua_mempool == NULL) {
1795                 res = -ENOMEM;
1796                 goto out_destroy_mgmt_stub_mempool;
1797         }
1798
1799         /*
1800          * Loosing sense may have fatal consequences, so let's have a big pool
1801          */
1802         scst_sense_mempool = mempool_create(128, mempool_alloc_slab,
1803                 mempool_free_slab, scst_sense_cachep);
1804         if (scst_sense_mempool == NULL) {
1805                 res = -ENOMEM;
1806                 goto out_destroy_ua_mempool;
1807         }
1808
1809         if (scst_max_cmd_mem == 0) {
1810                 struct sysinfo si;
1811                 si_meminfo(&si);
1812 #if BITS_PER_LONG == 32
1813                 scst_max_cmd_mem = min(
1814                         (((uint64_t)si.totalram << PAGE_SHIFT) >> 20) >> 2,
1815                         (uint64_t)1 << 30);
1816 #else
1817                 scst_max_cmd_mem = ((si.totalram << PAGE_SHIFT) >> 20) >> 2;
1818 #endif
1819         }
1820
1821         if (scst_max_dev_cmd_mem != 0) {
1822                 if (scst_max_dev_cmd_mem > scst_max_cmd_mem) {
1823                         PRINT_ERROR("scst_max_dev_cmd_mem (%d) > "
1824                                 "scst_max_cmd_mem (%d)",
1825                                 scst_max_dev_cmd_mem,
1826                                 scst_max_cmd_mem);
1827                         scst_max_dev_cmd_mem = scst_max_cmd_mem;
1828                 }
1829         } else
1830                 scst_max_dev_cmd_mem = scst_max_cmd_mem * 2 / 5;
1831
1832         res = scst_sgv_pools_init(
1833                 ((uint64_t)scst_max_cmd_mem << 10) >> (PAGE_SHIFT - 10), 0);
1834         if (res != 0)
1835                 goto out_destroy_sense_mempool;
1836
1837         scst_default_acg = scst_alloc_add_acg(SCST_DEFAULT_ACG_NAME);
1838         if (scst_default_acg == NULL) {
1839                 res = -ENOMEM;
1840                 goto out_destroy_sgv_pool;
1841         }
1842
1843         res = scsi_register_interface(&scst_interface);
1844         if (res != 0)
1845                 goto out_free_acg;
1846
1847         scst_scsi_op_list_init();
1848
1849         for (i = 0; i < (int)ARRAY_SIZE(scst_tasklets); i++) {
1850                 spin_lock_init(&scst_tasklets[i].tasklet_lock);
1851                 INIT_LIST_HEAD(&scst_tasklets[i].tasklet_cmd_list);
1852                 tasklet_init(&scst_tasklets[i].tasklet,
1853                              (void *)scst_cmd_tasklet,
1854                              (unsigned long)&scst_tasklets[i]);
1855         }
1856
1857         TRACE_DBG("%d CPUs found, starting %d threads", scst_num_cpus,
1858                 scst_threads);
1859
1860         res = scst_start_all_threads(scst_threads);
1861         if (res < 0)
1862                 goto out_thread_free;
1863
1864         res = scst_proc_init_module();
1865         if (res != 0)
1866                 goto out_thread_free;
1867
1868
1869         PRINT_INFO("SCST version %s loaded successfully (max mem for "
1870                 "commands %dMB, per device %dMB)", SCST_VERSION_STRING,
1871                 scst_max_cmd_mem, scst_max_dev_cmd_mem);
1872
1873         scst_print_config();
1874
1875 out:
1876         TRACE_EXIT_RES(res);
1877         return res;
1878
1879 out_thread_free:
1880         scst_stop_all_threads();
1881
1882         scsi_unregister_interface(&scst_interface);
1883
1884 out_free_acg:
1885         scst_destroy_acg(scst_default_acg);
1886
1887 out_destroy_sgv_pool:
1888         scst_sgv_pools_deinit();
1889
1890 out_destroy_sense_mempool:
1891         mempool_destroy(scst_sense_mempool);
1892
1893 out_destroy_ua_mempool:
1894         mempool_destroy(scst_ua_mempool);
1895
1896 out_destroy_mgmt_stub_mempool:
1897         mempool_destroy(scst_mgmt_stub_mempool);
1898
1899 out_destroy_mgmt_mempool:
1900         mempool_destroy(scst_mgmt_mempool);
1901
1902 out_destroy_acg_cache:
1903         kmem_cache_destroy(scst_acgd_cachep);
1904
1905 out_destroy_tgt_cache:
1906         kmem_cache_destroy(scst_tgtd_cachep);
1907
1908 out_destroy_sess_cache:
1909         kmem_cache_destroy(scst_sess_cachep);
1910
1911 out_destroy_cmd_cache:
1912         kmem_cache_destroy(scst_cmd_cachep);
1913
1914 out_destroy_sense_cache:
1915         kmem_cache_destroy(scst_sense_cachep);
1916
1917 out_destroy_ua_cache:
1918         kmem_cache_destroy(scst_ua_cachep);
1919
1920 out_destroy_mgmt_stub_cache:
1921         kmem_cache_destroy(scst_mgmt_stub_cachep);
1922
1923 out_destroy_mgmt_cache:
1924         kmem_cache_destroy(scst_mgmt_cachep);
1925         goto out;
1926 }
1927
1928 static void __exit exit_scst(void)
1929 {
1930         TRACE_ENTRY();
1931
1932         /* ToDo: unregister_cpu_notifier() */
1933
1934         scst_proc_cleanup_module();
1935
1936         scst_stop_all_threads();
1937
1938         scsi_unregister_interface(&scst_interface);
1939         scst_destroy_acg(scst_default_acg);
1940
1941         scst_sgv_pools_deinit();
1942
1943 #define DEINIT_CACHEP(p) do {           \
1944                 kmem_cache_destroy(p);  \
1945                 p = NULL;               \
1946         } while (0)
1947
1948         mempool_destroy(scst_mgmt_mempool);
1949         mempool_destroy(scst_mgmt_stub_mempool);
1950         mempool_destroy(scst_ua_mempool);
1951         mempool_destroy(scst_sense_mempool);
1952
1953         DEINIT_CACHEP(scst_mgmt_cachep);
1954         DEINIT_CACHEP(scst_mgmt_stub_cachep);
1955         DEINIT_CACHEP(scst_ua_cachep);
1956         DEINIT_CACHEP(scst_sense_cachep);
1957         DEINIT_CACHEP(scst_cmd_cachep);
1958         DEINIT_CACHEP(scst_sess_cachep);
1959         DEINIT_CACHEP(scst_tgtd_cachep);
1960         DEINIT_CACHEP(scst_acgd_cachep);
1961
1962 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
1963 #if defined(CONFIG_BLOCK) && defined(SCST_ALLOC_IO_CONTEXT_EXPORTED)
1964         put_io_context(scst_ioc);
1965 #endif
1966 #endif
1967
1968         PRINT_INFO("%s", "SCST unloaded");
1969
1970         TRACE_EXIT();
1971         return;
1972 }
1973
1974
1975 module_init(init_scst);
1976 module_exit(exit_scst);
1977
1978 MODULE_AUTHOR("Vladislav Bolkhovitin");
1979 MODULE_LICENSE("GPL");
1980 MODULE_DESCRIPTION("SCSI target core");
1981 MODULE_VERSION(SCST_VERSION_STRING);