- Minor optimization
[mirror/scst/.git] / scst / src / dev_handlers / scst_user.c
1 /*
2  *  scst_user.c
3  *
4  *  Copyright (C) 2007 - 2008 Vladislav Bolkhovitin <vst@vlnb.net>
5  *  Copyright (C) 2007 - 2008 CMS Distribution Limited
6  *
7  *  SCSI virtual user space device handler
8  *
9  *  This program is free software; you can redistribute it and/or
10  *  modify it under the terms of the GNU General Public License
11  *  as published by the Free Software Foundation, version 2
12  *  of the License.
13  *
14  *  This program is distributed in the hope that it will be useful,
15  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
16  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17  *  GNU General Public License for more details.
18  */
19
20 #include <linux/kthread.h>
21 #include <linux/delay.h>
22 #include <linux/poll.h>
23 #include <linux/stddef.h>
24
25 #define LOG_PREFIX              DEV_USER_NAME
26
27 #include "scst.h"
28 #include "scst_user.h"
29 #include "scst_dev_handler.h"
30
31 #if defined(CONFIG_HIGHMEM4G) || defined(CONFIG_HIGHMEM64G)
32 #warning "HIGHMEM kernel configurations are not supported by this module, \
33         because nowadays it doesn't worth the effort. Consider changing \
34         VMSPLIT option or use a 64-bit configuration instead. See README file \
35         for details."
36 #endif
37
38 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18))
39 #define COMPLETION_INITIALIZER_ONSTACK(work) \
40         ({ init_completion(&work); work; })
41         
42 /*
43  * Lockdep needs to run a non-constant initializer for on-stack
44  * completions - so we use the _ONSTACK() variant for those that
45  * are on the kernel stack:
46  */
47 #ifdef CONFIG_LOCKDEP
48 # define DECLARE_COMPLETION_ONSTACK(work) \
49         struct completion work = COMPLETION_INITIALIZER_ONSTACK(work)
50 #else
51 # define DECLARE_COMPLETION_ONSTACK(work) DECLARE_COMPLETION(work)
52 #endif
53
54 #endif
55
56 #define DEV_USER_MAJOR                  237
57 #define DEV_USER_CMD_HASH_ORDER         6
58 #define DEV_USER_ATTACH_TIMEOUT         (5*HZ)
59
60 #define DEV_USER_HEADER_LEN     offsetof(struct scst_user_get_cmd, preply)
61
62 struct scst_user_dev {
63         struct rw_semaphore dev_rwsem;
64
65         struct scst_cmd_lists cmd_lists;
66
67         /* Protected by cmd_lists.cmd_list_lock */
68         struct list_head ready_cmd_list;
69
70         /* Protected by dev_rwsem or don't need any protection */
71         unsigned int blocking:1;
72         unsigned int cleanup_done:1;
73         unsigned int tst:3;
74         unsigned int queue_alg:4;
75         unsigned int tas:1;
76         unsigned int swp:1;
77         unsigned int has_own_order_mgmt:1;
78
79         int (*generic_parse)(struct scst_cmd *cmd,
80                 int (*get_block)(struct scst_cmd *cmd));
81
82         int block;
83         int def_block;
84
85         struct scst_mem_lim udev_mem_lim;
86         struct sgv_pool *pool;
87
88         uint8_t parse_type;
89         uint8_t on_free_cmd_type;
90         uint8_t memory_reuse_type;
91         uint8_t partial_transfers_type;
92         uint32_t partial_len;
93
94         struct scst_dev_type devtype;
95
96         /* Both protected by cmd_lists.cmd_list_lock */
97         unsigned int handle_counter;
98         struct list_head ucmd_hash[1 << DEV_USER_CMD_HASH_ORDER];
99
100         struct scst_device *sdev;
101
102         int virt_id;
103         struct list_head dev_list_entry;
104         char name[SCST_MAX_NAME];
105
106         struct list_head cleanup_list_entry;
107         /* ToDo: make it on-stack */
108         struct completion cleanup_cmpl;
109 };
110
111 /* Most fields are unprotected, since only one thread at time can access them */
112 struct scst_user_cmd {
113         struct scst_cmd *cmd;
114         struct scst_user_dev *dev;
115
116         atomic_t ucmd_ref;
117
118         unsigned int buff_cached:1;
119         unsigned int buf_dirty:1;
120         unsigned int background_exec:1;
121         unsigned int aborted:1;
122
123         struct scst_user_cmd *buf_ucmd;
124
125         int cur_data_page;
126         int num_data_pages;
127         int first_page_offset;
128         unsigned long ubuff;
129         struct page **data_pages;
130         struct sgv_pool_obj *sgv;
131
132         /*
133          * Special flags, which can be accessed asynchronously (hence "long").
134          * Protected by cmd_lists.cmd_list_lock.
135          */
136         unsigned long sent_to_user:1;
137         unsigned long jammed:1;
138         unsigned long this_state_unjammed:1;
139         unsigned long seen_by_user:1; /* here only as a small optimization */
140
141         unsigned int state;
142
143         struct list_head ready_cmd_list_entry;
144
145         unsigned int h;
146         struct list_head hash_list_entry;
147
148         int user_cmd_payload_len;
149         struct scst_user_get_cmd user_cmd;
150
151         /* cmpl used only by ATTACH_SESS, mcmd used only by TM */
152         union {
153                 struct completion *cmpl;
154                 struct scst_mgmt_cmd *mcmd;
155         };
156         int result;
157 };
158
159 static struct scst_user_cmd *dev_user_alloc_ucmd(struct scst_user_dev *dev,
160         gfp_t gfp_mask);
161 static void dev_user_free_ucmd(struct scst_user_cmd *ucmd);
162
163 static int dev_user_parse(struct scst_cmd *cmd);
164 static int dev_user_exec(struct scst_cmd *cmd);
165 static void dev_user_on_free_cmd(struct scst_cmd *cmd);
166 static int dev_user_task_mgmt_fn(struct scst_mgmt_cmd *mcmd,
167         struct scst_tgt_dev *tgt_dev);
168
169 static int dev_user_disk_done(struct scst_cmd *cmd);
170 static int dev_user_tape_done(struct scst_cmd *cmd);
171
172 static struct page *dev_user_alloc_pages(struct scatterlist *sg,
173         gfp_t gfp_mask, void *priv);
174 static void dev_user_free_sg_entries(struct scatterlist *sg, int sg_count,
175                                      void *priv);
176
177 static void dev_user_add_to_ready(struct scst_user_cmd *ucmd);
178
179 static void dev_user_unjam_cmd(struct scst_user_cmd *ucmd, int busy,
180         unsigned long *flags);
181
182 static int dev_user_process_reply_on_free(struct scst_user_cmd *ucmd);
183 static int dev_user_process_reply_tm_exec(struct scst_user_cmd *ucmd,
184         int status);
185 static int dev_user_process_reply_sess(struct scst_user_cmd *ucmd, int status);
186 static int dev_user_register_dev(struct file *file,
187         const struct scst_user_dev_desc *dev_desc);
188 static int __dev_user_set_opt(struct scst_user_dev *dev,
189         const struct scst_user_opt *opt);
190 static int dev_user_set_opt(struct file *file, const struct scst_user_opt *opt);
191 static int dev_user_get_opt(struct file *file, void __user *arg);
192
193 static unsigned int dev_user_poll(struct file *filp, poll_table *wait);
194 static long dev_user_ioctl(struct file *file, unsigned int cmd,
195         unsigned long arg);
196 static int dev_user_release(struct inode *inode, struct file *file);
197
198 /** Data **/
199
200 static struct kmem_cache *user_cmd_cachep;
201 static struct kmem_cache *user_get_cmd_cachep;
202
203 static DEFINE_MUTEX(dev_priv_mutex);
204
205 static struct file_operations dev_user_fops = {
206         .poll           = dev_user_poll,
207         .unlocked_ioctl = dev_user_ioctl,
208 #ifdef CONFIG_COMPAT
209         .compat_ioctl   = dev_user_ioctl,
210 #endif
211         .release        = dev_user_release,
212 };
213
214 static struct class *dev_user_sysfs_class;
215
216 static DEFINE_SPINLOCK(dev_list_lock);
217 static LIST_HEAD(dev_list);
218
219 static DEFINE_SPINLOCK(cleanup_lock);
220 static LIST_HEAD(cleanup_list);
221 static DECLARE_WAIT_QUEUE_HEAD(cleanup_list_waitQ);
222 static struct task_struct *cleanup_thread;
223
224 /*
225  * Skip this command if result is not 0. Must be called under
226  * cmd_lists.cmd_list_lock and IRQ off.
227  */
228 static inline bool ucmd_get_check(struct scst_user_cmd *ucmd)
229 {
230         int r = atomic_inc_return(&ucmd->ucmd_ref);
231         int res;
232         if (unlikely(r == 1)) {
233                 TRACE_DBG("ucmd %p is being destroyed", ucmd);
234                 atomic_dec(&ucmd->ucmd_ref);
235                 res = true;
236                 /*
237                  * Necessary code is serialized by cmd_list_lock in
238                  * cmd_remove_hash()
239                  */
240         } else {
241                 TRACE_DBG("ucmd %p, new ref_cnt %d", ucmd,
242                         atomic_read(&ucmd->ucmd_ref));
243                 res = false;
244         }
245         return res;
246 }
247
248 static inline void __ucmd_get(struct scst_user_cmd *ucmd, bool barrier)
249 {
250         TRACE_DBG("ucmd %p, ucmd_ref %d", ucmd, atomic_read(&ucmd->ucmd_ref));
251         atomic_inc(&ucmd->ucmd_ref);
252         if (barrier)
253                 smp_mb__after_atomic_inc();
254 }
255
256 static inline void ucmd_get_ordered(struct scst_user_cmd *ucmd)
257 {
258         __ucmd_get(ucmd, true);
259 }
260
261 static inline void ucmd_get(struct scst_user_cmd *ucmd)
262 {
263         __ucmd_get(ucmd, false);
264 }
265
266 /* Must not be called under cmd_list_lock!! */
267 static inline void ucmd_put(struct scst_user_cmd *ucmd)
268 {
269         TRACE_DBG("ucmd %p, ucmd_ref %d", ucmd, atomic_read(&ucmd->ucmd_ref));
270
271         EXTRACHECKS_BUG_ON(atomic_read(&ucmd->ucmd_ref) == 0);
272
273         if (atomic_dec_and_test(&ucmd->ucmd_ref))
274                 dev_user_free_ucmd(ucmd);
275 }
276
277 static inline int calc_num_pg(unsigned long buf, int len)
278 {
279         len += buf & ~PAGE_MASK;
280         return (len >> PAGE_SHIFT) + ((len & ~PAGE_MASK) != 0);
281 }
282
283 static inline int is_need_offs_page(unsigned long buf, int len)
284 {
285         return ((buf & ~PAGE_MASK) != 0) &&
286                 ((buf & PAGE_MASK) != ((buf+len-1) & PAGE_MASK));
287 }
288
289 static void __dev_user_not_reg(void)
290 {
291         PRINT_ERROR("%s", "Device not registered");
292         return;
293 }
294
295 static inline int dev_user_check_reg(struct scst_user_dev *dev)
296 {
297         if (dev == NULL) {
298                 __dev_user_not_reg();
299                 return -EINVAL;
300         }
301         return 0;
302 }
303
304 static inline int scst_user_cmd_hashfn(int h)
305 {
306         return h & ((1 << DEV_USER_CMD_HASH_ORDER) - 1);
307 }
308
309 static inline struct scst_user_cmd *__ucmd_find_hash(struct scst_user_dev *dev,
310         unsigned int h)
311 {
312         struct list_head *head;
313         struct scst_user_cmd *ucmd;
314
315         head = &dev->ucmd_hash[scst_user_cmd_hashfn(h)];
316         list_for_each_entry(ucmd, head, hash_list_entry) {
317                 if (ucmd->h == h) {
318                         TRACE_DBG("Found ucmd %p", ucmd);
319                         return ucmd;
320                 }
321         }
322         return NULL;
323 }
324
325 static void cmd_insert_hash(struct scst_user_cmd *ucmd)
326 {
327         struct list_head *head;
328         struct scst_user_dev *dev = ucmd->dev;
329         struct scst_user_cmd *u;
330         unsigned long flags;
331
332         spin_lock_irqsave(&dev->cmd_lists.cmd_list_lock, flags);
333         do {
334                 ucmd->h = dev->handle_counter++;
335                 u = __ucmd_find_hash(dev, ucmd->h);
336         } while (u != NULL);
337         head = &dev->ucmd_hash[scst_user_cmd_hashfn(ucmd->h)];
338         list_add_tail(&ucmd->hash_list_entry, head);
339         spin_unlock_irqrestore(&dev->cmd_lists.cmd_list_lock, flags);
340
341         TRACE_DBG("Inserted ucmd %p, h=%d (dev %s)", ucmd, ucmd->h, dev->name);
342         return;
343 }
344
345 static inline void cmd_remove_hash(struct scst_user_cmd *ucmd)
346 {
347         unsigned long flags;
348
349         spin_lock_irqsave(&ucmd->dev->cmd_lists.cmd_list_lock, flags);
350         list_del(&ucmd->hash_list_entry);
351         spin_unlock_irqrestore(&ucmd->dev->cmd_lists.cmd_list_lock, flags);
352
353         TRACE_DBG("Removed ucmd %p, h=%d", ucmd, ucmd->h);
354         return;
355 }
356
357 static void dev_user_free_ucmd(struct scst_user_cmd *ucmd)
358 {
359         TRACE_ENTRY();
360
361         TRACE_MEM("Freeing ucmd %p", ucmd);
362
363         cmd_remove_hash(ucmd);
364         EXTRACHECKS_BUG_ON(ucmd->cmd != NULL);
365
366         kmem_cache_free(user_cmd_cachep, ucmd);
367
368         TRACE_EXIT();
369         return;
370 }
371
372 static struct page *dev_user_alloc_pages(struct scatterlist *sg,
373         gfp_t gfp_mask, void *priv)
374 {
375         struct scst_user_cmd *ucmd = (struct scst_user_cmd *)priv;
376         int offset = 0;
377
378         TRACE_ENTRY();
379
380         /* *sg supposed to be zeroed */
381
382         TRACE_MEM("ucmd %p, ubuff %lx, ucmd->cur_data_page %d", ucmd,
383                 ucmd->ubuff, ucmd->cur_data_page);
384
385         if (ucmd->cur_data_page == 0) {
386                 TRACE_MEM("ucmd->first_page_offset %d",
387                         ucmd->first_page_offset);
388                 offset = ucmd->first_page_offset;
389                 ucmd_get(ucmd);
390         }
391
392         if (ucmd->cur_data_page >= ucmd->num_data_pages)
393                 goto out;
394
395         sg_set_page(sg, ucmd->data_pages[ucmd->cur_data_page],
396                 PAGE_SIZE - offset, offset);
397         ucmd->cur_data_page++;
398
399         TRACE_MEM("page=%p, length=%d, offset=%d", sg_page(sg), sg->length,
400                 sg->offset);
401         TRACE_BUFFER("Page data", sg_virt(sg), sg->length);
402
403 out:
404         TRACE_EXIT();
405         return sg_page(sg);
406 }
407
408 static void dev_user_on_cached_mem_free(struct scst_user_cmd *ucmd)
409 {
410         TRACE_ENTRY();
411
412         TRACE_MEM("Preparing ON_CACHED_MEM_FREE (ucmd %p, h %d, ubuff %lx)",
413                 ucmd, ucmd->h, ucmd->ubuff);
414
415         ucmd->user_cmd_payload_len = sizeof(ucmd->user_cmd.on_cached_mem_free);
416         ucmd->user_cmd.cmd_h = ucmd->h;
417         ucmd->user_cmd.subcode = SCST_USER_ON_CACHED_MEM_FREE;
418         ucmd->user_cmd.on_cached_mem_free.pbuf = ucmd->ubuff;
419
420         ucmd->state = UCMD_STATE_ON_CACHE_FREEING;
421
422         dev_user_add_to_ready(ucmd);
423
424         TRACE_EXIT();
425         return;
426 }
427
428 static void dev_user_unmap_buf(struct scst_user_cmd *ucmd)
429 {
430         int i;
431
432         TRACE_ENTRY();
433
434         TRACE_MEM("Unmapping data pages (ucmd %p, ubuff %lx, num %d)", ucmd,
435                 ucmd->ubuff, ucmd->num_data_pages);
436
437         for (i = 0; i < ucmd->num_data_pages; i++) {
438                 struct page *page = ucmd->data_pages[i];
439
440                 if (ucmd->buf_dirty)
441                         SetPageDirty(page);
442
443                 page_cache_release(page);
444         }
445
446         kfree(ucmd->data_pages);
447         ucmd->data_pages = NULL;
448
449         TRACE_EXIT();
450         return;
451 }
452
453 static void __dev_user_free_sg_entries(struct scst_user_cmd *ucmd)
454 {
455         TRACE_ENTRY();
456
457         sBUG_ON(ucmd->data_pages == NULL);
458
459         TRACE_MEM("Freeing data pages (ucmd=%p, ubuff=%lx, buff_cached=%d)",
460                 ucmd, ucmd->ubuff, ucmd->buff_cached);
461
462         dev_user_unmap_buf(ucmd);
463
464         if (ucmd->buff_cached)
465                 dev_user_on_cached_mem_free(ucmd);
466         else
467                 ucmd_put(ucmd);
468
469         TRACE_EXIT();
470         return;
471 }
472
473 static void dev_user_free_sg_entries(struct scatterlist *sg, int sg_count,
474         void *priv)
475 {
476         struct scst_user_cmd *ucmd = (struct scst_user_cmd *)priv;
477
478         TRACE_MEM("Freeing data pages (sg=%p, sg_count=%d, priv %p)", sg,
479                 sg_count, ucmd);
480
481         __dev_user_free_sg_entries(ucmd);
482
483         return;
484 }
485
486 static inline int is_buff_cached(struct scst_user_cmd *ucmd)
487 {
488         int mem_reuse_type = ucmd->dev->memory_reuse_type;
489
490         if ((mem_reuse_type == SCST_USER_MEM_REUSE_ALL) ||
491             ((ucmd->cmd->data_direction == SCST_DATA_READ) &&
492              (mem_reuse_type == SCST_USER_MEM_REUSE_READ)) ||
493             ((ucmd->cmd->data_direction == SCST_DATA_WRITE) &&
494              (mem_reuse_type == SCST_USER_MEM_REUSE_WRITE)))
495                 return 1;
496         else
497                 return 0;
498 }
499
500 /*
501  * Returns 0 for success, <0 for fatal failure, >0 - need pages.
502  * Unmaps the buffer, if needed in case of error
503  */
504 static int dev_user_alloc_sg(struct scst_user_cmd *ucmd, int cached_buff)
505 {
506         int res = 0;
507         struct scst_cmd *cmd = ucmd->cmd;
508         struct scst_user_dev *dev = ucmd->dev;
509         gfp_t gfp_mask;
510         int flags = 0;
511         int bufflen = cmd->bufflen;
512         int last_len = 0;
513
514         TRACE_ENTRY();
515
516         sBUG_ON(bufflen == 0);
517
518         gfp_mask = __GFP_NOWARN;
519         gfp_mask |= (scst_cmd_atomic(cmd) ? GFP_ATOMIC : GFP_KERNEL);
520
521         if (cached_buff) {
522                 flags |= SCST_POOL_RETURN_OBJ_ON_ALLOC_FAIL;
523                 if (ucmd->ubuff == 0)
524                         flags |= SCST_POOL_NO_ALLOC_ON_CACHE_MISS;
525         } else {
526                 TRACE_MEM("%s", "Not cached buff");
527                 flags |= SCST_POOL_ALLOC_NO_CACHED;
528                 if (ucmd->ubuff == 0) {
529                         res = 1;
530                         goto out;
531                 }
532                 bufflen += ucmd->first_page_offset;
533                 if (is_need_offs_page(ucmd->ubuff, cmd->bufflen))
534                         last_len = bufflen & ~PAGE_MASK;
535                 else
536                         last_len = cmd->bufflen & ~PAGE_MASK;
537                 if (last_len == 0)
538                         last_len = PAGE_SIZE;
539         }
540         ucmd->buff_cached = cached_buff;
541
542         cmd->sg = sgv_pool_alloc(dev->pool, bufflen, gfp_mask, flags,
543                         &cmd->sg_cnt, &ucmd->sgv, &dev->udev_mem_lim, ucmd);
544         if (cmd->sg != NULL) {
545                 struct scst_user_cmd *buf_ucmd =
546                         (struct scst_user_cmd *)sgv_get_priv(ucmd->sgv);
547
548                 TRACE_MEM("Buf ucmd %p", buf_ucmd);
549
550                 ucmd->ubuff = buf_ucmd->ubuff;
551                 ucmd->buf_ucmd = buf_ucmd;
552
553                 EXTRACHECKS_BUG_ON((ucmd->data_pages != NULL) &&
554                                    (ucmd != buf_ucmd));
555
556                 if (last_len != 0) {
557                         /* We don't use clustering, so the assignment is safe */
558                         cmd->sg[cmd->sg_cnt-1].length = last_len;
559                 }
560
561                 TRACE_MEM("Buf alloced (ucmd %p, cached_buff %d, ubuff %lx, "
562                         "last_len %d, l %d)", ucmd, cached_buff, ucmd->ubuff,
563                         last_len, cmd->sg[cmd->sg_cnt-1].length);
564
565                 if (unlikely(cmd->sg_cnt > cmd->tgt_dev->max_sg_cnt)) {
566                         static int ll;
567                         if (ll < 10) {
568                                 PRINT_INFO("Unable to complete command due to "
569                                         "SG IO count limitation (requested %d, "
570                                         "available %d, tgt lim %d)",
571                                         cmd->sg_cnt,
572                                         cmd->tgt_dev->max_sg_cnt,
573                                         cmd->tgt->sg_tablesize);
574                                 ll++;
575                         }
576                         cmd->sg = NULL;
577                         /* sgv will be freed in dev_user_free_sgv() */
578                         res = -1;
579                 }
580         } else {
581                 TRACE_MEM("Buf not alloced (ucmd %p, h %d, buff_cached, %d, "
582                         "sg_cnt %d, ubuff %lx, sgv %p", ucmd, ucmd->h,
583                         ucmd->buff_cached, cmd->sg_cnt, ucmd->ubuff, ucmd->sgv);
584                 if (unlikely(cmd->sg_cnt == 0)) {
585                         TRACE_MEM("Refused allocation (ucmd %p)", ucmd);
586                         sBUG_ON(ucmd->sgv != NULL);
587                         res = -1;
588                 } else {
589                         switch (ucmd->state) {
590                         case UCMD_STATE_BUF_ALLOCING:
591                                 res = 1;
592                                 break;
593                         case UCMD_STATE_EXECING:
594                                 res = -1;
595                                 break;
596                         default:
597                                 sBUG();
598                                 break;
599                         }
600                 }
601         }
602
603 out:
604         TRACE_EXIT_RES(res);
605         return res;
606 }
607
608 static int dev_user_alloc_space(struct scst_user_cmd *ucmd)
609 {
610         int rc, res = SCST_CMD_STATE_DEFAULT;
611         struct scst_cmd *cmd = ucmd->cmd;
612
613         TRACE_ENTRY();
614
615         ucmd->state = UCMD_STATE_BUF_ALLOCING;
616         cmd->dh_data_buf_alloced = 1;
617
618         rc = dev_user_alloc_sg(ucmd, is_buff_cached(ucmd));
619         if (rc == 0)
620                 goto out;
621         else if (rc < 0) {
622                 scst_set_busy(cmd);
623                 res = scst_get_cmd_abnormal_done_state(cmd);
624                 goto out;
625         }
626
627         if ((cmd->data_direction != SCST_DATA_WRITE) &&
628             !scst_is_cmd_local(cmd)) {
629                 TRACE_DBG("Delayed alloc, ucmd %p", ucmd);
630                 goto out;
631         }
632
633         ucmd->user_cmd_payload_len = sizeof(ucmd->user_cmd.alloc_cmd);
634         ucmd->user_cmd.cmd_h = ucmd->h;
635         ucmd->user_cmd.subcode = SCST_USER_ALLOC_MEM;
636         ucmd->user_cmd.alloc_cmd.sess_h = (unsigned long)cmd->tgt_dev;
637         memcpy(ucmd->user_cmd.alloc_cmd.cdb, cmd->cdb,
638                 min(sizeof(ucmd->user_cmd.alloc_cmd.cdb), sizeof(cmd->cdb)));
639         ucmd->user_cmd.alloc_cmd.cdb_len = cmd->cdb_len;
640         ucmd->user_cmd.alloc_cmd.alloc_len = ucmd->buff_cached ?
641                 (cmd->sg_cnt << PAGE_SHIFT) : cmd->bufflen;
642         ucmd->user_cmd.alloc_cmd.queue_type = cmd->queue_type;
643         ucmd->user_cmd.alloc_cmd.data_direction = cmd->data_direction;
644         ucmd->user_cmd.alloc_cmd.sn = cmd->tgt_sn;
645
646         dev_user_add_to_ready(ucmd);
647
648         res = SCST_CMD_STATE_STOP;
649
650 out:
651         TRACE_EXIT_RES(res);
652         return res;
653 }
654
655 static struct scst_user_cmd *dev_user_alloc_ucmd(struct scst_user_dev *dev,
656         gfp_t gfp_mask)
657 {
658         struct scst_user_cmd *ucmd = NULL;
659
660         TRACE_ENTRY();
661
662 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
663         ucmd = kmem_cache_alloc(user_cmd_cachep, gfp_mask);
664         if (ucmd != NULL)
665                 memset(ucmd, 0, sizeof(*ucmd));
666 #else
667         ucmd = kmem_cache_zalloc(user_cmd_cachep, gfp_mask);
668 #endif
669         if (unlikely(ucmd == NULL)) {
670                 TRACE(TRACE_OUT_OF_MEM, "Unable to allocate "
671                         "user cmd (gfp_mask %x)", gfp_mask);
672                 goto out;
673         }
674         ucmd->dev = dev;
675         atomic_set(&ucmd->ucmd_ref, 1);
676
677         cmd_insert_hash(ucmd);
678
679         TRACE_MEM("ucmd %p allocated", ucmd);
680
681 out:
682         TRACE_EXIT_HRES((unsigned long)ucmd);
683         return ucmd;
684 }
685
686 static int dev_user_get_block(struct scst_cmd *cmd)
687 {
688         struct scst_user_dev *dev = (struct scst_user_dev *)cmd->dev->dh_priv;
689         /*
690          * No need for locks here, since *_detach() can not be
691          * called, when there are existing commands.
692          */
693         TRACE_EXIT_RES(dev->block);
694         return dev->block;
695 }
696
697 static int dev_user_parse(struct scst_cmd *cmd)
698 {
699         int rc, res = SCST_CMD_STATE_DEFAULT;
700         struct scst_user_cmd *ucmd;
701         int atomic = scst_cmd_atomic(cmd);
702         struct scst_user_dev *dev = (struct scst_user_dev *)cmd->dev->dh_priv;
703         gfp_t gfp_mask = atomic ? GFP_ATOMIC : GFP_KERNEL;
704
705         TRACE_ENTRY();
706
707         if (cmd->dh_priv == NULL) {
708                 ucmd = dev_user_alloc_ucmd(dev, gfp_mask);
709                 if (unlikely(ucmd == NULL)) {
710                         if (atomic) {
711                                 res = SCST_CMD_STATE_NEED_THREAD_CTX;
712                                 goto out;
713                         } else {
714                                 scst_set_busy(cmd);
715                                 goto out_error;
716                         }
717                 }
718                 ucmd->cmd = cmd;
719                 cmd->dh_priv = ucmd;
720         } else {
721                 ucmd = (struct scst_user_cmd *)cmd->dh_priv;
722                 TRACE_DBG("Used ucmd %p, state %x", ucmd, ucmd->state);
723         }
724
725         TRACE_DBG("ucmd %p, cmd %p, state %x", ucmd, cmd, ucmd->state);
726
727         if (ucmd->state != UCMD_STATE_NEW)
728                 goto alloc;
729
730         switch (dev->parse_type) {
731         case SCST_USER_PARSE_STANDARD:
732                 TRACE_DBG("PARSE STANDARD: ucmd %p", ucmd);
733                 rc = dev->generic_parse(cmd, dev_user_get_block);
734                 if ((rc != 0) || (cmd->op_flags & SCST_INFO_INVALID))
735                         goto out_invalid;
736                 break;
737
738         case SCST_USER_PARSE_EXCEPTION:
739                 TRACE_DBG("PARSE EXCEPTION: ucmd %p", ucmd);
740                 rc = dev->generic_parse(cmd, dev_user_get_block);
741                 if ((rc == 0) && (!(cmd->op_flags & SCST_INFO_INVALID)))
742                         break;
743                 else if (rc == SCST_CMD_STATE_NEED_THREAD_CTX) {
744                         TRACE_MEM("Restarting PARSE to thread context "
745                                 "(ucmd %p)", ucmd);
746                         res = SCST_CMD_STATE_NEED_THREAD_CTX;
747                         goto out;
748                 }
749                 /* else go through */
750
751         case SCST_USER_PARSE_CALL:
752                 TRACE_DBG("Preparing PARSE for user space (ucmd=%p, h=%d, "
753                         "bufflen %d)", ucmd, ucmd->h, cmd->bufflen);
754                 ucmd->user_cmd_payload_len = sizeof(ucmd->user_cmd.parse_cmd);
755                 ucmd->user_cmd.cmd_h = ucmd->h;
756                 ucmd->user_cmd.subcode = SCST_USER_PARSE;
757                 ucmd->user_cmd.parse_cmd.sess_h = (unsigned long)cmd->tgt_dev;
758                 memcpy(ucmd->user_cmd.parse_cmd.cdb, cmd->cdb,
759                         min(sizeof(ucmd->user_cmd.parse_cmd.cdb),
760                             sizeof(cmd->cdb)));
761                 ucmd->user_cmd.parse_cmd.cdb_len = cmd->cdb_len;
762                 ucmd->user_cmd.parse_cmd.timeout = cmd->timeout / HZ;
763                 ucmd->user_cmd.parse_cmd.bufflen = cmd->bufflen;
764                 ucmd->user_cmd.parse_cmd.queue_type = cmd->queue_type;
765                 ucmd->user_cmd.parse_cmd.data_direction = cmd->data_direction;
766                 ucmd->user_cmd.parse_cmd.expected_values_set =
767                                         cmd->expected_values_set;
768                 ucmd->user_cmd.parse_cmd.expected_data_direction =
769                                         cmd->expected_data_direction;
770                 ucmd->user_cmd.parse_cmd.expected_transfer_len =
771                                         cmd->expected_transfer_len;
772                 ucmd->user_cmd.parse_cmd.sn = cmd->tgt_sn;
773                 ucmd->state = UCMD_STATE_PARSING;
774                 dev_user_add_to_ready(ucmd);
775                 res = SCST_CMD_STATE_STOP;
776                 goto out;
777
778         default:
779                 sBUG();
780                 goto out;
781         }
782
783 alloc:
784         if (cmd->data_direction != SCST_DATA_NONE)
785                 res = dev_user_alloc_space(ucmd);
786
787 out:
788         TRACE_EXIT_RES(res);
789         return res;
790
791 out_invalid:
792         PRINT_ERROR("PARSE failed (ucmd %p, rc %d, invalid %d)", ucmd, rc,
793                 cmd->op_flags & SCST_INFO_INVALID);
794         scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_invalid_opcode));
795
796 out_error:
797         res = scst_get_cmd_abnormal_done_state(cmd);
798         goto out;
799 }
800
801 static void dev_user_flush_dcache(struct scst_user_cmd *ucmd)
802 {
803         struct scst_user_cmd *buf_ucmd = ucmd->buf_ucmd;
804         unsigned long start = buf_ucmd->ubuff;
805         int i, bufflen = ucmd->cmd->bufflen;
806
807         TRACE_ENTRY();
808
809         if (start == 0)
810                 goto out;
811
812         /*
813          * Possibly, flushing of all the pages from ucmd->cmd->sg can be
814          * faster, since it should be cache hot, while ucmd->buf_ucmd and
815          * buf_ucmd->data_pages are cache cold. But, from other side,
816          * sizeof(buf_ucmd->data_pages[0]) is considerably smaller, than
817          * sizeof(ucmd->cmd->sg[0]), so on big buffers going over
818          * data_pages array can lead to less cache misses. So, real numbers are
819          * needed. ToDo.
820          */
821
822         for (i = 0; (bufflen > 0) && (i < buf_ucmd->num_data_pages); i++) {
823                 struct page *page;
824                 page = buf_ucmd->data_pages[i];
825 #ifdef ARCH_HAS_FLUSH_ANON_PAGE
826                 struct vm_area_struct *vma = find_vma(current->mm, start);
827                 if (vma != NULL)
828                         flush_anon_page(vma, page, start);
829 #endif
830                 flush_dcache_page(page);
831                 start += PAGE_SIZE;
832                 bufflen -= PAGE_SIZE;
833         }
834
835 out:
836         TRACE_EXIT();
837         return;
838 }
839
840 static int dev_user_exec(struct scst_cmd *cmd)
841 {
842         struct scst_user_cmd *ucmd = (struct scst_user_cmd *)cmd->dh_priv;
843         int res = SCST_EXEC_COMPLETED;
844
845         TRACE_ENTRY();
846
847 #if 0 /* We set exec_atomic in 0 to let SCST core know that we need a thread
848        * context to complete the necessary actions, but all we are going to
849        * do in this function is, in fact, atomic, so let's skip this check.
850        */
851         if (scst_cmd_atomic(cmd)) {
852                 TRACE_DBG("%s", "User exec() can not be called in atomic "
853                         "context, rescheduling to the thread");
854                 res = SCST_EXEC_NEED_THREAD;
855                 goto out;
856         }
857 #endif
858
859         TRACE_DBG("Preparing EXEC for user space (ucmd=%p, h=%d, "
860                 "bufflen %d, data_len %d, ubuff %lx)", ucmd, ucmd->h,
861                 cmd->bufflen, cmd->data_len, ucmd->ubuff);
862
863         if (cmd->data_direction == SCST_DATA_WRITE)
864                 dev_user_flush_dcache(ucmd);
865
866         ucmd->user_cmd_payload_len = sizeof(ucmd->user_cmd.exec_cmd);
867         ucmd->user_cmd.cmd_h = ucmd->h;
868         ucmd->user_cmd.subcode = SCST_USER_EXEC;
869         ucmd->user_cmd.exec_cmd.sess_h = (unsigned long)cmd->tgt_dev;
870         memcpy(ucmd->user_cmd.exec_cmd.cdb, cmd->cdb,
871                 min(sizeof(ucmd->user_cmd.exec_cmd.cdb),
872                     sizeof(cmd->cdb)));
873         ucmd->user_cmd.exec_cmd.cdb_len = cmd->cdb_len;
874         ucmd->user_cmd.exec_cmd.bufflen = cmd->bufflen;
875         ucmd->user_cmd.exec_cmd.data_len = cmd->data_len;
876         ucmd->user_cmd.exec_cmd.pbuf = ucmd->ubuff;
877         if ((ucmd->ubuff == 0) && (cmd->data_direction != SCST_DATA_NONE)) {
878                 ucmd->user_cmd.exec_cmd.alloc_len = ucmd->buff_cached ?
879                         (cmd->sg_cnt << PAGE_SHIFT) : cmd->bufflen;
880         }
881         ucmd->user_cmd.exec_cmd.queue_type = cmd->queue_type;
882         ucmd->user_cmd.exec_cmd.data_direction = cmd->data_direction;
883         ucmd->user_cmd.exec_cmd.partial = 0;
884         ucmd->user_cmd.exec_cmd.timeout = cmd->timeout / HZ;
885         ucmd->user_cmd.exec_cmd.sn = cmd->tgt_sn;
886
887         ucmd->state = UCMD_STATE_EXECING;
888
889         dev_user_add_to_ready(ucmd);
890
891         TRACE_EXIT_RES(res);
892         return res;
893 }
894
895 static void dev_user_free_sgv(struct scst_user_cmd *ucmd)
896 {
897         if (ucmd->sgv != NULL) {
898                 sgv_pool_free(ucmd->sgv, &ucmd->dev->udev_mem_lim);
899                 ucmd->sgv = NULL;
900         } else if (ucmd->data_pages != NULL) {
901                 /* We mapped pages, but for some reason didn't allocate them */
902                 ucmd_get(ucmd);
903                 __dev_user_free_sg_entries(ucmd);
904         }
905         return;
906 }
907
908 static void dev_user_on_free_cmd(struct scst_cmd *cmd)
909 {
910         struct scst_user_cmd *ucmd = (struct scst_user_cmd *)cmd->dh_priv;
911
912         TRACE_ENTRY();
913
914         if (unlikely(ucmd == NULL))
915                 goto out;
916
917         TRACE_MEM("ucmd %p, cmd %p, buff_cached %d, ubuff %lx", ucmd, ucmd->cmd,
918                 ucmd->buff_cached, ucmd->ubuff);
919
920         ucmd->cmd = NULL;
921         if (cmd->data_direction == SCST_DATA_WRITE && ucmd->buf_ucmd != NULL)
922                 ucmd->buf_ucmd->buf_dirty = 1;
923
924         if (ucmd->dev->on_free_cmd_type == SCST_USER_ON_FREE_CMD_IGNORE) {
925                 ucmd->state = UCMD_STATE_ON_FREE_SKIPPED;
926                 /* The state assignment must be before freeing sgv! */
927                 goto out_reply;
928         }
929
930         if (unlikely(!ucmd->seen_by_user)) {
931                 TRACE_MGMT_DBG("Not seen by user ucmd %p", ucmd);
932                 goto out_reply;
933         }
934
935         ucmd->user_cmd_payload_len = sizeof(ucmd->user_cmd.on_free_cmd);
936         ucmd->user_cmd.cmd_h = ucmd->h;
937         ucmd->user_cmd.subcode = SCST_USER_ON_FREE_CMD;
938         ucmd->user_cmd.on_free_cmd.pbuf = ucmd->ubuff;
939         ucmd->user_cmd.on_free_cmd.resp_data_len = cmd->resp_data_len;
940         ucmd->user_cmd.on_free_cmd.buffer_cached = ucmd->buff_cached;
941         ucmd->user_cmd.on_free_cmd.aborted = ucmd->aborted;
942         ucmd->user_cmd.on_free_cmd.status = cmd->status;
943         ucmd->user_cmd.on_free_cmd.delivery_status = cmd->delivery_status;
944
945         ucmd->state = UCMD_STATE_ON_FREEING;
946
947         dev_user_add_to_ready(ucmd);
948
949 out:
950         TRACE_EXIT();
951         return;
952
953 out_reply:
954         dev_user_process_reply_on_free(ucmd);
955         goto out;
956 }
957
958 static void dev_user_set_block(struct scst_cmd *cmd, int block)
959 {
960         struct scst_user_dev *dev = (struct scst_user_dev *)cmd->dev->dh_priv;
961         /*
962          * No need for locks here, since *_detach() can not be
963          * called, when there are existing commands.
964          */
965         TRACE_DBG("dev %p, new block %d", dev, block);
966         if (block != 0)
967                 dev->block = block;
968         else
969                 dev->block = dev->def_block;
970         return;
971 }
972
973 static int dev_user_disk_done(struct scst_cmd *cmd)
974 {
975         int res = SCST_CMD_STATE_DEFAULT;
976
977         TRACE_ENTRY();
978
979         res = scst_block_generic_dev_done(cmd, dev_user_set_block);
980
981         TRACE_EXIT_RES(res);
982         return res;
983 }
984
985 static int dev_user_tape_done(struct scst_cmd *cmd)
986 {
987         int res = SCST_CMD_STATE_DEFAULT;
988
989         TRACE_ENTRY();
990
991         res = scst_tape_generic_dev_done(cmd, dev_user_set_block);
992
993         TRACE_EXIT_RES(res);
994         return res;
995 }
996
997 static void dev_user_add_to_ready(struct scst_user_cmd *ucmd)
998 {
999         struct scst_user_dev *dev = ucmd->dev;
1000         unsigned long flags;
1001         int do_wake;
1002
1003         TRACE_ENTRY();
1004
1005         do_wake = (in_interrupt() ||
1006                    (ucmd->state == UCMD_STATE_ON_CACHE_FREEING));
1007         if (ucmd->cmd)
1008                 do_wake |= ucmd->cmd->preprocessing_only;
1009
1010         spin_lock_irqsave(&dev->cmd_lists.cmd_list_lock, flags);
1011
1012         ucmd->this_state_unjammed = 0;
1013
1014         if ((ucmd->state == UCMD_STATE_PARSING) ||
1015             (ucmd->state == UCMD_STATE_BUF_ALLOCING)) {
1016                 /*
1017                  * If we don't put such commands in the queue head, then under
1018                  * high load we might delay threads, waiting for memory
1019                  * allocations, for too long and start loosing NOPs, which
1020                  * would lead to consider us by remote initiators as
1021                  * unresponsive and stuck => broken connections, etc. If none
1022                  * of our commands completed in NOP timeout to allow the head
1023                  * commands to go, then we are really overloaded and/or stuck.
1024                  */
1025                 TRACE_DBG("Adding ucmd %p (state %d) to head of ready "
1026                         "cmd list", ucmd, ucmd->state);
1027                 list_add(&ucmd->ready_cmd_list_entry,
1028                         &dev->ready_cmd_list);
1029                 do_wake = 1;
1030         } else if (unlikely(ucmd->state == UCMD_STATE_TM_EXECING) ||
1031                    unlikely(ucmd->state == UCMD_STATE_ATTACH_SESS) ||
1032                    unlikely(ucmd->state == UCMD_STATE_DETACH_SESS)) {
1033                 TRACE_MGMT_DBG("Adding mgmt ucmd %p (state %d) to head of "
1034                         "ready cmd list", ucmd, ucmd->state);
1035                 list_add(&ucmd->ready_cmd_list_entry,
1036                         &dev->ready_cmd_list);
1037                 do_wake = 1;
1038         } else if ((ucmd->cmd != NULL) &&
1039            unlikely((ucmd->cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE))) {
1040                 TRACE_DBG("Adding HQ ucmd %p to head of ready cmd list", ucmd);
1041                 list_add(&ucmd->ready_cmd_list_entry, &dev->ready_cmd_list);
1042         } else {
1043                 TRACE_DBG("Adding ucmd %p to ready cmd list", ucmd);
1044                 list_add_tail(&ucmd->ready_cmd_list_entry,
1045                               &dev->ready_cmd_list);
1046         }
1047
1048         if (do_wake) {
1049                 TRACE_DBG("Waking up dev %p", dev);
1050                 wake_up(&dev->cmd_lists.cmd_list_waitQ);
1051         }
1052
1053         spin_unlock_irqrestore(&dev->cmd_lists.cmd_list_lock, flags);
1054
1055         TRACE_EXIT();
1056         return;
1057 }
1058
1059 static int dev_user_map_buf(struct scst_user_cmd *ucmd, unsigned long ubuff,
1060         int num_pg)
1061 {
1062         int res = 0, rc;
1063         int i;
1064         struct task_struct *tsk = current;
1065
1066         TRACE_ENTRY();
1067
1068         if (unlikely(ubuff == 0))
1069                 goto out_nomem;
1070
1071         sBUG_ON(ucmd->data_pages != NULL);
1072
1073         ucmd->num_data_pages = num_pg;
1074
1075         ucmd->data_pages =
1076                 kmalloc(sizeof(*ucmd->data_pages) * ucmd->num_data_pages,
1077                           GFP_KERNEL);
1078         if (ucmd->data_pages == NULL) {
1079                 TRACE(TRACE_OUT_OF_MEM, "Unable to allocate data_pages array "
1080                         "(num_data_pages=%d)", ucmd->num_data_pages);
1081                 res = -ENOMEM;
1082                 goto out_nomem;
1083         }
1084
1085         TRACE_MEM("Mapping buffer (ucmd %p, ubuff %lx, ucmd->num_data_pages %d,"
1086                 " first_page_offset %d, len %d)", ucmd, ubuff,
1087                 ucmd->num_data_pages, (int)(ubuff & ~PAGE_MASK),
1088                 ucmd->cmd->bufflen);
1089
1090         down_read(&tsk->mm->mmap_sem);
1091         rc = get_user_pages(tsk, tsk->mm, ubuff, ucmd->num_data_pages,
1092                 1/*writable*/, 0/*don't force*/, ucmd->data_pages, NULL);
1093         up_read(&tsk->mm->mmap_sem);
1094
1095         /* get_user_pages() flushes dcache */
1096
1097         if (rc < ucmd->num_data_pages)
1098                 goto out_unmap;
1099
1100         ucmd->ubuff = ubuff;
1101         ucmd->first_page_offset = (ubuff & ~PAGE_MASK);
1102
1103 out:
1104         TRACE_EXIT_RES(res);
1105         return res;
1106
1107 out_nomem:
1108         scst_set_busy(ucmd->cmd);
1109         /* go through */
1110
1111 out_err:
1112         scst_set_cmd_abnormal_done_state(ucmd->cmd);
1113         goto out;
1114
1115 out_unmap:
1116         PRINT_ERROR("Failed to get %d user pages (rc %d)",
1117                 ucmd->num_data_pages, rc);
1118         if (rc > 0) {
1119                 for (i = 0; i < rc; i++)
1120                         page_cache_release(ucmd->data_pages[i]);
1121         }
1122         kfree(ucmd->data_pages);
1123         ucmd->data_pages = NULL;
1124         res = -EFAULT;
1125         scst_set_cmd_error(ucmd->cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
1126         goto out_err;
1127 }
1128
1129 static int dev_user_process_reply_alloc(struct scst_user_cmd *ucmd,
1130         struct scst_user_reply_cmd *reply)
1131 {
1132         int res = 0;
1133         struct scst_cmd *cmd = ucmd->cmd;
1134
1135         TRACE_ENTRY();
1136
1137         TRACE_DBG("ucmd %p, pbuf %llx", ucmd, reply->alloc_reply.pbuf);
1138
1139         if (likely(reply->alloc_reply.pbuf != 0)) {
1140                 int pages;
1141                 if (ucmd->buff_cached) {
1142                         if (unlikely((reply->alloc_reply.pbuf & ~PAGE_MASK) != 0)) {
1143                                 PRINT_ERROR("Supplied pbuf %llx isn't "
1144                                         "page aligned",
1145                                         reply->alloc_reply.pbuf);
1146                                 goto out_hwerr;
1147                         }
1148                         pages = cmd->sg_cnt;
1149                 } else
1150                         pages = calc_num_pg(reply->alloc_reply.pbuf,
1151                                             cmd->bufflen);
1152                 res = dev_user_map_buf(ucmd, reply->alloc_reply.pbuf, pages);
1153         } else {
1154                 scst_set_busy(ucmd->cmd);
1155                 scst_set_cmd_abnormal_done_state(ucmd->cmd);
1156         }
1157
1158 out_process:
1159         scst_process_active_cmd(cmd, false);
1160
1161         TRACE_EXIT_RES(res);
1162         return res;
1163
1164 out_hwerr:
1165         scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
1166         scst_set_cmd_abnormal_done_state(ucmd->cmd);
1167         res = -EINVAL;
1168         goto out_process;
1169 }
1170
1171 static int dev_user_process_reply_parse(struct scst_user_cmd *ucmd,
1172         struct scst_user_reply_cmd *reply)
1173 {
1174         int res = 0;
1175         struct scst_user_scsi_cmd_reply_parse *preply =
1176                 &reply->parse_reply;
1177         struct scst_cmd *cmd = ucmd->cmd;
1178
1179         TRACE_ENTRY();
1180
1181         if (unlikely(preply->queue_type > SCST_CMD_QUEUE_ACA))
1182                 goto out_inval;
1183
1184         if (unlikely((preply->data_direction != SCST_DATA_WRITE) &&
1185                      (preply->data_direction != SCST_DATA_READ) &&
1186                      (preply->data_direction != SCST_DATA_NONE)))
1187                 goto out_inval;
1188
1189         if (unlikely((preply->data_direction != SCST_DATA_NONE) &&
1190                      (preply->bufflen == 0)))
1191                 goto out_inval;
1192
1193         if (unlikely((preply->bufflen < 0) || (preply->data_len < 0)))
1194                 goto out_inval;
1195
1196         TRACE_DBG("ucmd %p, queue_type %x, data_direction, %x, bufflen %d, "
1197                 "data_len %d, pbuf %llx", ucmd, preply->queue_type,
1198                 preply->data_direction, preply->bufflen, preply->data_len,
1199                 reply->alloc_reply.pbuf);
1200
1201         cmd->queue_type = preply->queue_type;
1202         cmd->data_direction = preply->data_direction;
1203         cmd->bufflen = preply->bufflen;
1204         cmd->data_len = preply->data_len;
1205
1206 out_process:
1207         scst_process_active_cmd(cmd, false);
1208
1209         TRACE_EXIT_RES(res);
1210         return res;
1211
1212 out_inval:
1213         PRINT_ERROR("Invalid parse_reply parameters (LUN %lld, op %x, cmd %p)",
1214                 (long long unsigned int)cmd->lun, cmd->cdb[0], cmd);
1215         PRINT_BUFFER("Invalid parse_reply", reply, sizeof(*reply));
1216         scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
1217         scst_set_cmd_abnormal_done_state(cmd);
1218         res = -EINVAL;
1219         goto out_process;
1220 }
1221
1222 static int dev_user_process_reply_on_free(struct scst_user_cmd *ucmd)
1223 {
1224         int res = 0;
1225
1226         TRACE_ENTRY();
1227
1228         TRACE_DBG("ON FREE ucmd %p", ucmd);
1229
1230         dev_user_free_sgv(ucmd);
1231         ucmd_put(ucmd);
1232
1233         TRACE_EXIT_RES(res);
1234         return res;
1235 }
1236
1237 static int dev_user_process_reply_on_cache_free(struct scst_user_cmd *ucmd)
1238 {
1239         int res = 0;
1240
1241         TRACE_ENTRY();
1242
1243         TRACE_DBG("ON CACHE FREE ucmd %p", ucmd);
1244
1245         ucmd_put(ucmd);
1246
1247         TRACE_EXIT_RES(res);
1248         return res;
1249 }
1250
1251 static int dev_user_process_reply_exec(struct scst_user_cmd *ucmd,
1252         struct scst_user_reply_cmd *reply)
1253 {
1254         int res = 0;
1255         struct scst_user_scsi_cmd_reply_exec *ereply =
1256                 &reply->exec_reply;
1257         struct scst_cmd *cmd = ucmd->cmd;
1258
1259         TRACE_ENTRY();
1260
1261         if (ereply->reply_type == SCST_EXEC_REPLY_COMPLETED) {
1262                 if (ucmd->background_exec) {
1263                         TRACE_DBG("Background ucmd %p finished", ucmd);
1264                         ucmd_put(ucmd);
1265                         goto out;
1266                 }
1267                 if (unlikely(ereply->resp_data_len > cmd->bufflen))
1268                         goto out_inval;
1269                 if (unlikely((cmd->data_direction != SCST_DATA_READ) &&
1270                              (ereply->resp_data_len != 0)))
1271                         goto out_inval;
1272         } else if (ereply->reply_type == SCST_EXEC_REPLY_BACKGROUND) {
1273                 if (unlikely(ucmd->background_exec))
1274                         goto out_inval;
1275                 if (unlikely((cmd->data_direction == SCST_DATA_READ) ||
1276                              (cmd->resp_data_len != 0)))
1277                         goto out_inval;
1278                 /*
1279                  * background_exec assignment must be after ucmd get.
1280                  * Otherwise, due to reorder, in dev_user_process_reply()
1281                  * it is possible that ucmd is destroyed before it "got" here.
1282                  */
1283                 ucmd_get_ordered(ucmd);
1284                 ucmd->background_exec = 1;
1285                 TRACE_DBG("Background ucmd %p", ucmd);
1286                 goto out_compl;
1287         } else
1288                 goto out_inval;
1289
1290         TRACE_DBG("ucmd %p, status %d, resp_data_len %d", ucmd,
1291                 ereply->status, ereply->resp_data_len);
1292
1293          if (ereply->resp_data_len != 0) {
1294                 if (ucmd->ubuff == 0) {
1295                         int pages, rc;
1296                         if (unlikely(ereply->pbuf == 0))
1297                                 goto out_busy;
1298                         if (ucmd->buff_cached) {
1299                                 if (unlikely((ereply->pbuf & ~PAGE_MASK) != 0)) {
1300                                         PRINT_ERROR("Supplied pbuf %llx isn't "
1301                                                 "page aligned", ereply->pbuf);
1302                                         goto out_hwerr;
1303                                 }
1304                                 pages = cmd->sg_cnt;
1305                         } else
1306                                 pages = calc_num_pg(ereply->pbuf, cmd->bufflen);
1307                         rc = dev_user_map_buf(ucmd, ereply->pbuf, pages);
1308                         if ((rc != 0) || (ucmd->ubuff == 0))
1309                                 goto out_compl;
1310
1311                         rc = dev_user_alloc_sg(ucmd, ucmd->buff_cached);
1312                         if (unlikely(rc != 0))
1313                                 goto out_busy;
1314                 } else
1315                         dev_user_flush_dcache(ucmd);
1316                 cmd->may_need_dma_sync = 1;
1317                 scst_set_resp_data_len(cmd, ereply->resp_data_len);
1318         } else if (cmd->resp_data_len != ereply->resp_data_len) {
1319                 if (ucmd->ubuff == 0)
1320                         cmd->resp_data_len = ereply->resp_data_len;
1321                 else
1322                         scst_set_resp_data_len(cmd, ereply->resp_data_len);
1323         }
1324
1325         cmd->status = ereply->status;
1326         if (ereply->sense_len != 0) {
1327                 res = scst_alloc_sense(cmd, 0);
1328                 if (res != 0)
1329                         goto out_compl;
1330                 res = copy_from_user(cmd->sense,
1331                         (void __user *)(unsigned long)ereply->psense_buffer,
1332                         min((unsigned int)SCST_SENSE_BUFFERSIZE,
1333                                 (unsigned int)ereply->sense_len));
1334                 if (res < 0) {
1335                         PRINT_ERROR("%s", "Unable to get sense data");
1336                         goto out_hwerr_res_set;
1337                 }
1338         }
1339
1340 out_compl:
1341         cmd->completed = 1;
1342         cmd->scst_cmd_done(cmd, SCST_CMD_STATE_DEFAULT, SCST_CONTEXT_DIRECT);
1343         /* !! At this point cmd can be already freed !! */
1344
1345 out:
1346         TRACE_EXIT_RES(res);
1347         return res;
1348
1349 out_inval:
1350         PRINT_ERROR("Invalid exec_reply parameters (LUN %lld, op %x, cmd %p)",
1351                 (long long unsigned int)cmd->lun, cmd->cdb[0], cmd);
1352         PRINT_BUFFER("Invalid exec_reply", reply, sizeof(*reply));
1353
1354 out_hwerr:
1355         res = -EINVAL;
1356
1357 out_hwerr_res_set:
1358         if (ucmd->background_exec) {
1359                 ucmd_put(ucmd);
1360                 goto out;
1361         } else {
1362                 scst_set_cmd_error(cmd,
1363                                    SCST_LOAD_SENSE(scst_sense_hardw_error));
1364                 goto out_compl;
1365         }
1366
1367 out_busy:
1368         scst_set_busy(cmd);
1369         goto out_compl;
1370 }
1371
1372 static int dev_user_process_reply(struct scst_user_dev *dev,
1373         struct scst_user_reply_cmd *reply)
1374 {
1375         int res = 0;
1376         struct scst_user_cmd *ucmd;
1377         int state;
1378
1379         TRACE_ENTRY();
1380
1381         spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
1382
1383         ucmd = __ucmd_find_hash(dev, reply->cmd_h);
1384         if (unlikely(ucmd == NULL)) {
1385                 TRACE_MGMT_DBG("cmd_h %d not found", reply->cmd_h);
1386                 res = -ESRCH;
1387                 goto out_unlock;
1388         }
1389
1390         if (unlikely(ucmd_get_check(ucmd))) {
1391                 TRACE_MGMT_DBG("Found being destroyed cmd_h %d", reply->cmd_h);
1392                 res = -ESRCH;
1393                 goto out_unlock;
1394         }
1395
1396         /* To sync. with dev_user_process_reply_exec(). See comment there. */
1397         smp_mb();
1398         if (ucmd->background_exec) {
1399                 state = UCMD_STATE_EXECING;
1400                 goto unlock_process;
1401         }
1402
1403         if (unlikely(ucmd->this_state_unjammed)) {
1404                 TRACE_MGMT_DBG("Reply on unjammed ucmd %p, ignoring",
1405                         ucmd);
1406                 goto out_unlock_put;
1407         }
1408
1409         if (unlikely(!ucmd->sent_to_user)) {
1410                 TRACE_MGMT_DBG("Ucmd %p isn't in the sent to user "
1411                         "state %x", ucmd, ucmd->state);
1412                 res = -EINVAL;
1413                 goto out_unlock_put;
1414         }
1415
1416         if (unlikely(reply->subcode != ucmd->user_cmd.subcode))
1417                 goto out_wrong_state;
1418
1419         if (unlikely(_IOC_NR(reply->subcode) != ucmd->state))
1420                 goto out_wrong_state;
1421
1422         state = ucmd->state;
1423         ucmd->sent_to_user = 0;
1424
1425 unlock_process:
1426         spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1427
1428         switch (state) {
1429         case UCMD_STATE_PARSING:
1430                 res = dev_user_process_reply_parse(ucmd, reply);
1431                 break;
1432
1433         case UCMD_STATE_BUF_ALLOCING:
1434                 res = dev_user_process_reply_alloc(ucmd, reply);
1435                 break;
1436
1437         case UCMD_STATE_EXECING:
1438                 res = dev_user_process_reply_exec(ucmd, reply);
1439                 break;
1440
1441         case UCMD_STATE_ON_FREEING:
1442                 res = dev_user_process_reply_on_free(ucmd);
1443                 break;
1444
1445         case UCMD_STATE_ON_CACHE_FREEING:
1446                 res = dev_user_process_reply_on_cache_free(ucmd);
1447                 break;
1448
1449         case UCMD_STATE_TM_EXECING:
1450                 res = dev_user_process_reply_tm_exec(ucmd, reply->result);
1451                 break;
1452
1453         case UCMD_STATE_ATTACH_SESS:
1454         case UCMD_STATE_DETACH_SESS:
1455                 res = dev_user_process_reply_sess(ucmd, reply->result);
1456                 break;
1457
1458         default:
1459                 sBUG();
1460                 break;
1461         }
1462
1463 out_put:
1464         ucmd_put(ucmd);
1465
1466 out:
1467         TRACE_EXIT_RES(res);
1468         return res;
1469
1470 out_wrong_state:
1471         PRINT_ERROR("Command's %p subcode %x doesn't match internal "
1472                 "command's state %x or reply->subcode (%x) != ucmd->subcode "
1473                 "(%x)", ucmd, _IOC_NR(reply->subcode), ucmd->state,
1474                 reply->subcode, ucmd->user_cmd.subcode);
1475         res = -EINVAL;
1476         dev_user_unjam_cmd(ucmd, 0, NULL);
1477
1478 out_unlock_put:
1479         spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1480         goto out_put;
1481
1482 out_unlock:
1483         spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1484         goto out;
1485 }
1486
1487 static int dev_user_reply_cmd(struct file *file, void __user *arg)
1488 {
1489         int res = 0;
1490         struct scst_user_dev *dev;
1491         struct scst_user_reply_cmd reply;
1492
1493         TRACE_ENTRY();
1494
1495         mutex_lock(&dev_priv_mutex);
1496         dev = (struct scst_user_dev *)file->private_data;
1497         res = dev_user_check_reg(dev);
1498         if (unlikely(res != 0)) {
1499                 mutex_unlock(&dev_priv_mutex);
1500                 goto out;
1501         }
1502         down_read(&dev->dev_rwsem);
1503         mutex_unlock(&dev_priv_mutex);
1504
1505         res = copy_from_user(&reply, arg, sizeof(reply));
1506         if (unlikely(res < 0))
1507                 goto out_up;
1508
1509         TRACE_MGMT_DBG("Reply for dev %s", dev->name);
1510
1511         TRACE_BUFFER("Reply", &reply, sizeof(reply));
1512
1513         res = dev_user_process_reply(dev, &reply);
1514         if (unlikely(res < 0))
1515                 goto out_up;
1516
1517 out_up:
1518         up_read(&dev->dev_rwsem);
1519
1520 out:
1521         TRACE_EXIT_RES(res);
1522         return res;
1523 }
1524
1525 static int dev_user_process_scst_commands(struct scst_user_dev *dev)
1526         __releases(&dev->cmd_lists.cmd_list_lock)
1527         __acquires(&dev->cmd_lists.cmd_list_lock)
1528 {
1529         int res = 0;
1530
1531         TRACE_ENTRY();
1532
1533         while (!list_empty(&dev->cmd_lists.active_cmd_list)) {
1534                 struct scst_cmd *cmd = list_entry(
1535                         dev->cmd_lists.active_cmd_list.next, typeof(*cmd),
1536                         cmd_list_entry);
1537                 TRACE_DBG("Deleting cmd %p from active cmd list", cmd);
1538                 list_del(&cmd->cmd_list_entry);
1539                 spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1540                 scst_process_active_cmd(cmd, false);
1541                 spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
1542                 res++;
1543         }
1544
1545         TRACE_EXIT_RES(res);
1546         return res;
1547 }
1548
1549 /* Called under cmd_lists.cmd_list_lock and IRQ off */
1550 static struct scst_user_cmd *__dev_user_get_next_cmd(struct list_head *cmd_list)
1551         __releases(&dev->cmd_lists.cmd_list_lock)
1552         __acquires(&dev->cmd_lists.cmd_list_lock)
1553 {
1554         struct scst_user_cmd *u;
1555
1556 again:
1557         u = NULL;
1558         if (!list_empty(cmd_list)) {
1559                 u = list_entry(cmd_list->next, typeof(*u),
1560                                ready_cmd_list_entry);
1561
1562                 TRACE_DBG("Found ready ucmd %p", u);
1563                 list_del(&u->ready_cmd_list_entry);
1564
1565                 EXTRACHECKS_BUG_ON(u->this_state_unjammed);
1566
1567                 if (u->cmd != NULL) {
1568                         if (u->state == UCMD_STATE_EXECING) {
1569                                 struct scst_user_dev *dev = u->dev;
1570                                 int rc;
1571
1572                                 EXTRACHECKS_BUG_ON(u->jammed);
1573
1574                                 spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1575
1576                                 rc = scst_check_local_events(u->cmd);
1577                                 if (unlikely(rc != 0)) {
1578                                         u->cmd->scst_cmd_done(u->cmd,
1579                                                 SCST_CMD_STATE_DEFAULT,
1580                                                 SCST_CONTEXT_DIRECT);
1581                                         /*
1582                                          * !! At this point cmd & u can be !!
1583                                          * !! already freed                !!
1584                                          */
1585                                         spin_lock_irq(
1586                                                 &dev->cmd_lists.cmd_list_lock);
1587                                         goto again;
1588                                 }
1589
1590                                 spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
1591                         } else if (unlikely(test_bit(SCST_CMD_ABORTED,
1592                                         &u->cmd->cmd_flags))) {
1593                                 switch (u->state) {
1594                                 case UCMD_STATE_PARSING:
1595                                 case UCMD_STATE_BUF_ALLOCING:
1596                                         TRACE_MGMT_DBG("Aborting ucmd %p", u);
1597                                         dev_user_unjam_cmd(u, 0, NULL);
1598                                         goto again;
1599                                 case UCMD_STATE_EXECING:
1600                                         EXTRACHECKS_BUG_ON(1);
1601                                 }
1602                         }
1603                 }
1604                 u->sent_to_user = 1;
1605                 u->seen_by_user = 1;
1606         }
1607         return u;
1608 }
1609
1610 static inline int test_cmd_lists(struct scst_user_dev *dev)
1611 {
1612         int res = !list_empty(&dev->cmd_lists.active_cmd_list) ||
1613                   !list_empty(&dev->ready_cmd_list) ||
1614                   !dev->blocking || dev->cleanup_done ||
1615                   signal_pending(current);
1616         return res;
1617 }
1618
1619 /* Called under cmd_lists.cmd_list_lock and IRQ off */
1620 static int dev_user_get_next_cmd(struct scst_user_dev *dev,
1621         struct scst_user_cmd **ucmd)
1622 {
1623         int res = 0;
1624         wait_queue_t wait;
1625
1626         TRACE_ENTRY();
1627
1628         init_waitqueue_entry(&wait, current);
1629
1630         while (1) {
1631                 if (!test_cmd_lists(dev)) {
1632                         add_wait_queue_exclusive(&dev->cmd_lists.cmd_list_waitQ,
1633                                 &wait);
1634                         for (;;) {
1635                                 set_current_state(TASK_INTERRUPTIBLE);
1636                                 if (test_cmd_lists(dev))
1637                                         break;
1638                                 spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1639                                 schedule();
1640                                 spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
1641                         }
1642                         set_current_state(TASK_RUNNING);
1643                         remove_wait_queue(&dev->cmd_lists.cmd_list_waitQ,
1644                                 &wait);
1645                 }
1646
1647                 dev_user_process_scst_commands(dev);
1648
1649                 *ucmd = __dev_user_get_next_cmd(&dev->ready_cmd_list);
1650                 if (*ucmd != NULL)
1651                         break;
1652
1653                 if (!dev->blocking || dev->cleanup_done) {
1654                         res = -EAGAIN;
1655                         TRACE_DBG("No ready commands, returning %d", res);
1656                         break;
1657                 }
1658
1659                 if (signal_pending(current)) {
1660                         res = -EINTR;
1661                         TRACE_DBG("Signal pending, returning %d", res);
1662                         break;
1663                 }
1664         }
1665
1666         TRACE_EXIT_RES(res);
1667         return res;
1668 }
1669
1670 static int dev_user_reply_get_cmd(struct file *file, void __user *arg)
1671 {
1672         int res = 0;
1673         struct scst_user_dev *dev;
1674         struct scst_user_get_cmd *cmd;
1675         struct scst_user_reply_cmd *reply;
1676         struct scst_user_cmd *ucmd;
1677         uint64_t ureply;
1678
1679         TRACE_ENTRY();
1680
1681         mutex_lock(&dev_priv_mutex);
1682         dev = (struct scst_user_dev *)file->private_data;
1683         res = dev_user_check_reg(dev);
1684         if (unlikely(res != 0)) {
1685                 mutex_unlock(&dev_priv_mutex);
1686                 goto out;
1687         }
1688         down_read(&dev->dev_rwsem);
1689         mutex_unlock(&dev_priv_mutex);
1690
1691         /* get_user() can't be used with 64-bit values on x86_32 */
1692         res = copy_from_user(&ureply, (uint64_t __user *)
1693                 &((struct scst_user_get_cmd *)arg)->preply,
1694                 sizeof(ureply));
1695         if (unlikely(res < 0))
1696                 goto out_up;
1697
1698         TRACE_DBG("ureply %lld (dev %s)", (long long unsigned int)ureply,
1699                 dev->name);
1700
1701         cmd = kmem_cache_alloc(user_get_cmd_cachep, GFP_KERNEL);
1702         if (unlikely(cmd == NULL)) {
1703                 res = -ENOMEM;
1704                 goto out_up;
1705         }
1706
1707         if (ureply != 0) {
1708                 unsigned long u = (unsigned long)ureply;
1709                 reply = (struct scst_user_reply_cmd *)cmd;
1710                 res = copy_from_user(reply, (void __user *)u, sizeof(*reply));
1711                 if (unlikely(res < 0))
1712                         goto out_free;
1713
1714                 TRACE_BUFFER("Reply", reply, sizeof(*reply));
1715
1716                 res = dev_user_process_reply(dev, reply);
1717                 if (unlikely(res < 0))
1718                         goto out_free;
1719         }
1720
1721         kmem_cache_free(user_get_cmd_cachep, cmd);
1722
1723         spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
1724         res = dev_user_get_next_cmd(dev, &ucmd);
1725         if (res == 0) {
1726                 int len;
1727                 /*
1728                  * A misbehaving user space handler can make ucmd to get dead
1729                  * immediately after we released the lock, which can lead to
1730                  * copy of dead data to the user space, which can lead to a
1731                  * leak of sensitive information.
1732                  */
1733                 ucmd_get(ucmd);
1734                 spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1735
1736                 EXTRACHECKS_BUG_ON(ucmd->user_cmd_payload_len == 0);
1737
1738                 len = DEV_USER_HEADER_LEN + ucmd->user_cmd_payload_len;
1739                 TRACE_DBG("ucmd %p (user_cmd %p), payload_len %d (len %d)",
1740                         ucmd, &ucmd->user_cmd, ucmd->user_cmd_payload_len, len);
1741                 TRACE_BUFFER("UCMD", &ucmd->user_cmd, len);
1742                 res = copy_to_user(arg, &ucmd->user_cmd, len);
1743                 if (unlikely(res != 0)) {
1744                         /* Requeue ucmd back */
1745                         TRACE_DBG("Copy to user failed (%d), requeuing ucmd %p "
1746                                 "back to head of ready cmd list", res, ucmd);
1747                         spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
1748                         list_add(&ucmd->ready_cmd_list_entry,
1749                                 &dev->ready_cmd_list);
1750                         spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1751                 }
1752 #ifdef CONFIG_SCST_EXTRACHECKS
1753                 else
1754                         ucmd->user_cmd_payload_len = 0;
1755 #endif
1756                 ucmd_put(ucmd);
1757         } else
1758                 spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1759
1760 out_up:
1761         up_read(&dev->dev_rwsem);
1762
1763 out:
1764         TRACE_EXIT_RES(res);
1765         return res;
1766
1767 out_free:
1768         kmem_cache_free(user_get_cmd_cachep, cmd);
1769         goto out_up;
1770 }
1771
1772 static long dev_user_ioctl(struct file *file, unsigned int cmd,
1773         unsigned long arg)
1774 {
1775         long res;
1776
1777         TRACE_ENTRY();
1778
1779         switch (cmd) {
1780         case SCST_USER_REPLY_AND_GET_CMD:
1781                 TRACE_DBG("%s", "REPLY_AND_GET_CMD");
1782                 res = dev_user_reply_get_cmd(file, (void __user *)arg);
1783                 break;
1784
1785         case SCST_USER_REPLY_CMD:
1786                 TRACE_DBG("%s", "REPLY_CMD");
1787                 res = dev_user_reply_cmd(file, (void __user *)arg);
1788                 break;
1789
1790         case SCST_USER_REGISTER_DEVICE:
1791         {
1792                 struct scst_user_dev_desc *dev_desc;
1793                 TRACE_DBG("%s", "REGISTER_DEVICE");
1794                 dev_desc = kmalloc(sizeof(*dev_desc), GFP_KERNEL);
1795                 if (dev_desc == NULL) {
1796                         res = -ENOMEM;
1797                         goto out;
1798                 }
1799                 res = copy_from_user(dev_desc, (void __user *)arg,
1800                                      sizeof(*dev_desc));
1801                 if (res < 0) {
1802                         kfree(dev_desc);
1803                         goto out;
1804                 }
1805                 TRACE_BUFFER("dev_desc", dev_desc, sizeof(*dev_desc));
1806                 dev_desc->name[sizeof(dev_desc->name)-1] = '\0';
1807                 res = dev_user_register_dev(file, dev_desc);
1808                 kfree(dev_desc);
1809                 break;
1810         }
1811
1812         case SCST_USER_SET_OPTIONS:
1813         {
1814                 struct scst_user_opt opt;
1815                 TRACE_DBG("%s", "SET_OPTIONS");
1816                 res = copy_from_user(&opt, (void __user *)arg, sizeof(opt));
1817                 if (res < 0)
1818                         goto out;
1819                 TRACE_BUFFER("opt", &opt, sizeof(opt));
1820                 res = dev_user_set_opt(file, &opt);
1821                 break;
1822         }
1823
1824         case SCST_USER_GET_OPTIONS:
1825                 TRACE_DBG("%s", "GET_OPTIONS");
1826                 res = dev_user_get_opt(file, (void __user *)arg);
1827                 break;
1828
1829         default:
1830                 PRINT_ERROR("Invalid ioctl cmd %x", cmd);
1831                 res = -EINVAL;
1832                 goto out;
1833         }
1834
1835 out:
1836         TRACE_EXIT_RES(res);
1837         return res;
1838 }
1839
1840 static unsigned int dev_user_poll(struct file *file, poll_table *wait)
1841 {
1842         int res = 0;
1843         struct scst_user_dev *dev;
1844
1845         TRACE_ENTRY();
1846
1847         mutex_lock(&dev_priv_mutex);
1848         dev = (struct scst_user_dev *)file->private_data;
1849         res = dev_user_check_reg(dev);
1850         if (unlikely(res != 0)) {
1851                 mutex_unlock(&dev_priv_mutex);
1852                 goto out;
1853         }
1854         down_read(&dev->dev_rwsem);
1855         mutex_unlock(&dev_priv_mutex);
1856
1857         spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
1858
1859         if (!list_empty(&dev->ready_cmd_list) ||
1860             !list_empty(&dev->cmd_lists.active_cmd_list)) {
1861                 res |= POLLIN | POLLRDNORM;
1862                 goto out_unlock;
1863         }
1864
1865         spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1866
1867         TRACE_DBG("Before poll_wait() (dev %s)", dev->name);
1868         poll_wait(file, &dev->cmd_lists.cmd_list_waitQ, wait);
1869         TRACE_DBG("After poll_wait() (dev %s)", dev->name);
1870
1871         spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
1872
1873         if (!list_empty(&dev->ready_cmd_list) ||
1874             !list_empty(&dev->cmd_lists.active_cmd_list)) {
1875                 res |= POLLIN | POLLRDNORM;
1876                 goto out_unlock;
1877         }
1878
1879 out_unlock:
1880         spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1881
1882         up_read(&dev->dev_rwsem);
1883
1884 out:
1885         TRACE_EXIT_HRES(res);
1886         return res;
1887 }
1888
1889 /*
1890  * Called under cmd_lists.cmd_list_lock, but can drop it inside, then reacquire.
1891  */
1892 static void dev_user_unjam_cmd(struct scst_user_cmd *ucmd, int busy,
1893         unsigned long *flags)
1894         __releases(&dev->cmd_lists.cmd_list_lock)
1895         __acquires(&dev->cmd_lists.cmd_list_lock)
1896 {
1897         int state = ucmd->state;
1898         struct scst_user_dev *dev = ucmd->dev;
1899
1900         TRACE_ENTRY();
1901
1902         if (ucmd->this_state_unjammed)
1903                 goto out;
1904
1905         TRACE_MGMT_DBG("Unjamming ucmd %p (busy %d, state %x)", ucmd, busy,
1906                 state);
1907
1908         ucmd->jammed = 1;
1909         ucmd->this_state_unjammed = 1;
1910         ucmd->sent_to_user = 0;
1911
1912         switch (state) {
1913         case UCMD_STATE_PARSING:
1914         case UCMD_STATE_BUF_ALLOCING:
1915                 if (test_bit(SCST_CMD_ABORTED, &ucmd->cmd->cmd_flags))
1916                         ucmd->aborted = 1;
1917                 else {
1918                         if (busy)
1919                                 scst_set_busy(ucmd->cmd);
1920                         else
1921                                 scst_set_cmd_error(ucmd->cmd,
1922                                        SCST_LOAD_SENSE(scst_sense_hardw_error));
1923                 }
1924                 scst_set_cmd_abnormal_done_state(ucmd->cmd);
1925
1926                 TRACE_MGMT_DBG("Adding ucmd %p to active list", ucmd);
1927                 list_add(&ucmd->cmd->cmd_list_entry,
1928                         &ucmd->cmd->cmd_lists->active_cmd_list);
1929                 wake_up(&ucmd->dev->cmd_lists.cmd_list_waitQ);
1930                 break;
1931
1932         case UCMD_STATE_EXECING:
1933                 if (flags != NULL)
1934                         spin_unlock_irqrestore(&dev->cmd_lists.cmd_list_lock,
1935                                                *flags);
1936                 else
1937                         spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1938
1939                 TRACE_MGMT_DBG("EXEC: unjamming ucmd %p", ucmd);
1940
1941                 if (test_bit(SCST_CMD_ABORTED, &ucmd->cmd->cmd_flags))
1942                         ucmd->aborted = 1;
1943                 else {
1944                         if (busy)
1945                                 scst_set_busy(ucmd->cmd);
1946                         else
1947                                 scst_set_cmd_error(ucmd->cmd,
1948                                        SCST_LOAD_SENSE(scst_sense_hardw_error));
1949                 }
1950
1951                 ucmd->cmd->scst_cmd_done(ucmd->cmd, SCST_CMD_STATE_DEFAULT,
1952                         SCST_CONTEXT_DIRECT);
1953                 /* !! At this point cmd and ucmd can be already freed !! */
1954
1955                 if (flags != NULL)
1956                         spin_lock_irqsave(&dev->cmd_lists.cmd_list_lock,
1957                                           *flags);
1958                 else
1959                         spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
1960                 break;
1961
1962         case UCMD_STATE_ON_FREEING:
1963         case UCMD_STATE_ON_CACHE_FREEING:
1964         case UCMD_STATE_TM_EXECING:
1965         case UCMD_STATE_ATTACH_SESS:
1966         case UCMD_STATE_DETACH_SESS:
1967         {
1968                 if (flags != NULL)
1969                         spin_unlock_irqrestore(&dev->cmd_lists.cmd_list_lock,
1970                                                *flags);
1971                 else
1972                         spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1973
1974                 switch (state) {
1975                 case UCMD_STATE_ON_FREEING:
1976                         dev_user_process_reply_on_free(ucmd);
1977                         break;
1978
1979                 case UCMD_STATE_ON_CACHE_FREEING:
1980                         dev_user_process_reply_on_cache_free(ucmd);
1981                         break;
1982
1983                 case UCMD_STATE_TM_EXECING:
1984                         dev_user_process_reply_tm_exec(ucmd,
1985                                                        SCST_MGMT_STATUS_FAILED);
1986                         break;
1987
1988                 case UCMD_STATE_ATTACH_SESS:
1989                 case UCMD_STATE_DETACH_SESS:
1990                         dev_user_process_reply_sess(ucmd, -EFAULT);
1991                         break;
1992                 }
1993
1994                 if (flags != NULL)
1995                         spin_lock_irqsave(&dev->cmd_lists.cmd_list_lock,
1996                                           *flags);
1997                 else
1998                         spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
1999                 break;
2000         }
2001
2002         default:
2003                 PRINT_CRIT_ERROR("Wrong ucmd state %x", state);
2004                 sBUG();
2005                 break;
2006         }
2007
2008 out:
2009         TRACE_EXIT();
2010         return;
2011 }
2012
2013 static void dev_user_unjam_dev(struct scst_user_dev *dev)
2014         __releases(&dev->cmd_lists.cmd_list_lock)
2015         __acquires(&dev->cmd_lists.cmd_list_lock)
2016 {
2017         int i;
2018         struct scst_user_cmd *ucmd;
2019
2020         TRACE_ENTRY();
2021
2022         TRACE_MGMT_DBG("Unjamming dev %p", dev);
2023
2024         spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
2025
2026 repeat:
2027         for (i = 0; i < (int)ARRAY_SIZE(dev->ucmd_hash); i++) {
2028                 struct list_head *head = &dev->ucmd_hash[i];
2029
2030                 list_for_each_entry(ucmd, head, hash_list_entry) {
2031                         if (!ucmd->sent_to_user)
2032                                 continue;
2033
2034                         if (ucmd_get_check(ucmd))
2035                                 continue;
2036
2037                         TRACE_MGMT_DBG("ucmd %p, state %x, scst_cmd %p", ucmd,
2038                                 ucmd->state, ucmd->cmd);
2039
2040                         dev_user_unjam_cmd(ucmd, 0, NULL);
2041
2042                         spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
2043                         ucmd_put(ucmd);
2044                         spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
2045
2046                         goto repeat;
2047                 }
2048         }
2049
2050         if (dev_user_process_scst_commands(dev) != 0)
2051                 goto repeat;
2052
2053         spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
2054
2055         TRACE_EXIT();
2056         return;
2057 }
2058
2059 static int dev_user_process_reply_tm_exec(struct scst_user_cmd *ucmd,
2060         int status)
2061 {
2062         int res = 0;
2063
2064         TRACE_ENTRY();
2065
2066         TRACE_MGMT_DBG("TM reply (ucmd %p, fn %d, status %d)", ucmd,
2067                 ucmd->user_cmd.tm_cmd.fn, status);
2068
2069         if (status == SCST_MGMT_STATUS_TASK_NOT_EXIST) {
2070                 /*
2071                  * It is possible that user space seen TM cmd before cmd
2072                  * to abort or will never see it at all, because it was
2073                  * aborted on the way there. So, it is safe to return
2074                  * success instead, because, if there is the TM cmd at this
2075                  * point, then the cmd to abort apparrently does exist.
2076                  */
2077                 status = SCST_MGMT_STATUS_SUCCESS;
2078         }
2079
2080         scst_async_mcmd_completed(ucmd->mcmd, status);
2081
2082         ucmd_put(ucmd);
2083
2084         TRACE_EXIT_RES(res);
2085         return res;
2086 }
2087
2088 static void dev_user_abort_ready_commands(struct scst_user_dev *dev)
2089 {
2090         struct scst_user_cmd *ucmd;
2091         unsigned long flags;
2092
2093         TRACE_ENTRY();
2094
2095         spin_lock_irqsave(&dev->cmd_lists.cmd_list_lock, flags);
2096 again:
2097         list_for_each_entry(ucmd, &dev->ready_cmd_list, ready_cmd_list_entry) {
2098                 if ((ucmd->cmd != NULL) && !ucmd->seen_by_user &&
2099                     test_bit(SCST_CMD_ABORTED, &ucmd->cmd->cmd_flags)) {
2100                         switch (ucmd->state) {
2101                         case UCMD_STATE_PARSING:
2102                         case UCMD_STATE_BUF_ALLOCING:
2103                         case UCMD_STATE_EXECING:
2104                                 TRACE_MGMT_DBG("Aborting ready ucmd %p", ucmd);
2105                                 list_del(&ucmd->ready_cmd_list_entry);
2106                                 dev_user_unjam_cmd(ucmd, 0, &flags);
2107                                 goto again;
2108                         }
2109                 }
2110         }
2111
2112         spin_unlock_irqrestore(&dev->cmd_lists.cmd_list_lock, flags);
2113
2114         TRACE_EXIT();
2115         return;
2116 }
2117
2118 static int dev_user_task_mgmt_fn(struct scst_mgmt_cmd *mcmd,
2119         struct scst_tgt_dev *tgt_dev)
2120 {
2121         struct scst_user_cmd *ucmd;
2122         struct scst_user_dev *dev =
2123                 (struct scst_user_dev *)tgt_dev->dev->dh_priv;
2124         struct scst_user_cmd *ucmd_to_abort = NULL;
2125
2126         TRACE_ENTRY();
2127
2128         /*
2129          * In the used approach we don't do anything with hung devices, which
2130          * stopped responding and/or have stuck commands. We forcedly abort such
2131          * commands only if they not yet sent to the user space or if the device
2132          * is getting unloaded, e.g. if its handler program gets killed. This is
2133          * because it's pretty hard to distinguish between stuck and temporary
2134          * overloaded states of the device. There are several reasons for that:
2135          *
2136          * 1. Some commands need a lot of time to complete (several hours),
2137          *    so for an impatient user such command(s) will always look as
2138          *    stuck.
2139          *
2140          * 2. If we forcedly abort, i.e. abort before it's actually completed
2141          *    in the user space, just one command, we will have to put the whole
2142          *    device offline until we are sure that no more previously aborted
2143          *    commands will get executed. Otherwise, we might have a possibility
2144          *    for data corruption, when aborted and reported as completed
2145          *    command actually gets executed *after* new commands sent
2146          *    after the force abort was done. Many journaling file systems and
2147          *    databases use "provide required commands order via queue draining"
2148          *    approach and not putting the whole device offline after the forced
2149          *    abort will break it. This makes our decision, if a command stuck
2150          *    or not, cost a lot.
2151          *
2152          * So, we leave policy definition if a device stuck or not to
2153          * the user space and simply let all commands live until they are
2154          * completed or their devices get closed/killed. This approach is very
2155          * much OK, but can affect management commands, which need activity
2156          * suspending via scst_suspend_activity() function such as devices or
2157          * targets registration/removal. But during normal life such commands
2158          * should be rare. Plus, when possible, scst_suspend_activity() will
2159          * return after timeout EBUSY status to allow caller to not stuck
2160          * forever as well.
2161          *
2162          * But, anyway, ToDo, we should reimplement that in the SCST core, so
2163          * stuck commands would affect only related devices.
2164          */
2165
2166         dev_user_abort_ready_commands(dev);
2167
2168         /* We can't afford missing TM command due to memory shortage */
2169         ucmd = dev_user_alloc_ucmd(dev, GFP_KERNEL|__GFP_NOFAIL);
2170
2171         ucmd->user_cmd_payload_len = sizeof(ucmd->user_cmd.tm_cmd);
2172         ucmd->user_cmd.cmd_h = ucmd->h;
2173         ucmd->user_cmd.subcode = SCST_USER_TASK_MGMT;
2174         ucmd->user_cmd.tm_cmd.sess_h = (unsigned long)tgt_dev;
2175         ucmd->user_cmd.tm_cmd.fn = mcmd->fn;
2176         ucmd->user_cmd.tm_cmd.cmd_sn = mcmd->cmd_sn;
2177         ucmd->user_cmd.tm_cmd.cmd_sn_set = mcmd->cmd_sn_set;
2178
2179         if (mcmd->cmd_to_abort != NULL) {
2180                 ucmd_to_abort =
2181                         (struct scst_user_cmd *)mcmd->cmd_to_abort->dh_priv;
2182                 if (ucmd_to_abort != NULL)
2183                         ucmd->user_cmd.tm_cmd.cmd_h_to_abort = ucmd_to_abort->h;
2184         }
2185
2186         TRACE_MGMT_DBG("Preparing TM ucmd %p (h %d, fn %d, cmd_to_abort %p, "
2187                 "ucmd_to_abort %p, cmd_h_to_abort %d)", ucmd, ucmd->h,
2188                 mcmd->fn, mcmd->cmd_to_abort, ucmd_to_abort,
2189                 ucmd->user_cmd.tm_cmd.cmd_h_to_abort);
2190
2191         ucmd->mcmd = mcmd;
2192         ucmd->state = UCMD_STATE_TM_EXECING;
2193
2194         scst_prepare_async_mcmd(mcmd);
2195
2196         dev_user_add_to_ready(ucmd);
2197
2198         TRACE_EXIT();
2199         return SCST_DEV_TM_NOT_COMPLETED;
2200 }
2201
2202 static int dev_user_attach(struct scst_device *sdev)
2203 {
2204         int res = 0;
2205         struct scst_user_dev *dev = NULL, *d;
2206
2207         TRACE_ENTRY();
2208
2209         spin_lock(&dev_list_lock);
2210         list_for_each_entry(d, &dev_list, dev_list_entry) {
2211                 if (strcmp(d->name, sdev->virt_name) == 0) {
2212                         dev = d;
2213                         break;
2214                 }
2215         }
2216         spin_unlock(&dev_list_lock);
2217         if (dev == NULL) {
2218                 PRINT_ERROR("Device %s not found", sdev->virt_name);
2219                 res = -EINVAL;
2220                 goto out;
2221         }
2222
2223         sdev->p_cmd_lists = &dev->cmd_lists;
2224         sdev->dh_priv = dev;
2225         sdev->tst = dev->tst;
2226         sdev->queue_alg = dev->queue_alg;
2227         sdev->swp = dev->swp;
2228         sdev->tas = dev->tas;
2229         sdev->has_own_order_mgmt = dev->has_own_order_mgmt;
2230
2231         dev->sdev = sdev;
2232
2233         PRINT_INFO("Attached user space SCSI target virtual device \"%s\"",
2234                 dev->name);
2235
2236 out:
2237         TRACE_EXIT();
2238         return res;
2239 }
2240
2241 static void dev_user_detach(struct scst_device *sdev)
2242 {
2243         struct scst_user_dev *dev = (struct scst_user_dev *)sdev->dh_priv;
2244
2245         TRACE_ENTRY();
2246
2247         TRACE_DBG("virt_id %d", sdev->virt_id);
2248
2249         PRINT_INFO("Detached user space SCSI target virtual device \"%s\"",
2250                 dev->name);
2251
2252         /* dev will be freed by the caller */
2253         sdev->dh_priv = NULL;
2254         dev->sdev = NULL;
2255
2256         TRACE_EXIT();
2257         return;
2258 }
2259
2260 static int dev_user_process_reply_sess(struct scst_user_cmd *ucmd, int status)
2261 {
2262         int res = 0;
2263         unsigned long flags;
2264
2265         TRACE_ENTRY();
2266
2267         TRACE_MGMT_DBG("ucmd %p, cmpl %p, status %d", ucmd, ucmd->cmpl, status);
2268
2269         spin_lock_irqsave(&ucmd->dev->cmd_lists.cmd_list_lock, flags);
2270
2271         if (ucmd->state == UCMD_STATE_ATTACH_SESS) {
2272                 TRACE_MGMT_DBG("%s", "ATTACH_SESS finished");
2273                 ucmd->result = status;
2274         } else if (ucmd->state == UCMD_STATE_DETACH_SESS) {
2275                 TRACE_MGMT_DBG("%s", "DETACH_SESS finished");
2276         } else
2277                 sBUG();
2278
2279         if (ucmd->cmpl != NULL)
2280                 complete_all(ucmd->cmpl);
2281
2282         spin_unlock_irqrestore(&ucmd->dev->cmd_lists.cmd_list_lock, flags);
2283
2284         ucmd_put(ucmd);
2285
2286         TRACE_EXIT_RES(res);
2287         return res;
2288 }
2289
2290 static int dev_user_attach_tgt(struct scst_tgt_dev *tgt_dev)
2291 {
2292         struct scst_user_dev *dev =
2293                 (struct scst_user_dev *)tgt_dev->dev->dh_priv;
2294         int res = 0, rc;
2295         struct scst_user_cmd *ucmd;
2296         DECLARE_COMPLETION_ONSTACK(cmpl);
2297
2298         TRACE_ENTRY();
2299
2300         ucmd = dev_user_alloc_ucmd(dev, GFP_KERNEL);
2301         if (ucmd == NULL)
2302                 goto out_nomem;
2303
2304         ucmd->cmpl = &cmpl;
2305
2306         ucmd->user_cmd_payload_len = sizeof(ucmd->user_cmd.sess);
2307         ucmd->user_cmd.cmd_h = ucmd->h;
2308         ucmd->user_cmd.subcode = SCST_USER_ATTACH_SESS;
2309         ucmd->user_cmd.sess.sess_h = (unsigned long)tgt_dev;
2310         ucmd->user_cmd.sess.lun = (uint64_t)tgt_dev->lun;
2311         ucmd->user_cmd.sess.threads_num = tgt_dev->sess->tgt->tgtt->threads_num;
2312         ucmd->user_cmd.sess.rd_only = tgt_dev->acg_dev->rd_only_flag;
2313         strncpy(ucmd->user_cmd.sess.initiator_name,
2314                 tgt_dev->sess->initiator_name,
2315                 sizeof(ucmd->user_cmd.sess.initiator_name)-1);
2316         ucmd->user_cmd.sess.initiator_name[
2317                 sizeof(ucmd->user_cmd.sess.initiator_name)-1] = '\0';
2318         if (tgt_dev->sess->tgt->default_group_name != NULL) {
2319                 strncpy(ucmd->user_cmd.sess.target_name,
2320                         &tgt_dev->sess->tgt->default_group_name[sizeof(SCST_DEFAULT_ACG_NAME)],
2321                         sizeof(ucmd->user_cmd.sess.target_name)-1);
2322                 ucmd->user_cmd.sess.target_name[
2323                         sizeof(ucmd->user_cmd.sess.target_name)-1] = '\0';
2324         }
2325
2326         TRACE_MGMT_DBG("Preparing ATTACH_SESS %p (h %d, sess_h %llx, LUN %llx, "
2327                 "threads_num %d, rd_only_flag %d, initiator %s, target %s)",
2328                 ucmd, ucmd->h, ucmd->user_cmd.sess.sess_h,
2329                 ucmd->user_cmd.sess.lun, ucmd->user_cmd.sess.threads_num,
2330                 ucmd->user_cmd.sess.rd_only, ucmd->user_cmd.sess.initiator_name,
2331                 ucmd->user_cmd.sess.target_name);
2332
2333         ucmd->state = UCMD_STATE_ATTACH_SESS;
2334
2335         ucmd_get(ucmd);
2336
2337         dev_user_add_to_ready(ucmd);
2338
2339         rc = wait_for_completion_timeout(ucmd->cmpl, DEV_USER_ATTACH_TIMEOUT);
2340         if (rc > 0)
2341                 res = ucmd->result;
2342         else {
2343                 PRINT_ERROR("%s", "ATTACH_SESS command timeout");
2344                 res = -EFAULT;
2345         }
2346
2347         sBUG_ON(irqs_disabled());
2348
2349         spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
2350         ucmd->cmpl = NULL;
2351         spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
2352
2353         ucmd_put(ucmd);
2354
2355 out:
2356         TRACE_EXIT_RES(res);
2357         return res;
2358
2359 out_nomem:
2360         res = -ENOMEM;
2361         goto out;
2362 }
2363
2364 static void dev_user_detach_tgt(struct scst_tgt_dev *tgt_dev)
2365 {
2366         struct scst_user_dev *dev =
2367                 (struct scst_user_dev *)tgt_dev->dev->dh_priv;
2368         struct scst_user_cmd *ucmd;
2369
2370         TRACE_ENTRY();
2371
2372         /*
2373          * We can't miss TM command due to memory shortage, because it might
2374          * lead to a memory leak in the user space handler.
2375          */
2376         ucmd = dev_user_alloc_ucmd(dev, GFP_KERNEL|__GFP_NOFAIL);
2377         if (ucmd == NULL)
2378                 goto out;
2379
2380         TRACE_MGMT_DBG("Preparing DETACH_SESS %p (h %d, sess_h %llx)", ucmd,
2381                 ucmd->h, ucmd->user_cmd.sess.sess_h);
2382
2383         ucmd->user_cmd_payload_len = sizeof(ucmd->user_cmd.sess);
2384         ucmd->user_cmd.cmd_h = ucmd->h;
2385         ucmd->user_cmd.subcode = SCST_USER_DETACH_SESS;
2386         ucmd->user_cmd.sess.sess_h = (unsigned long)tgt_dev;
2387
2388         ucmd->state = UCMD_STATE_DETACH_SESS;
2389
2390         dev_user_add_to_ready(ucmd);
2391
2392 out:
2393         TRACE_EXIT();
2394         return;
2395 }
2396
2397 /* No locks are needed, but the activity must be suspended */
2398 static void dev_user_setup_functions(struct scst_user_dev *dev)
2399 {
2400         TRACE_ENTRY();
2401
2402         dev->devtype.parse = dev_user_parse;
2403         dev->devtype.dev_done = NULL;
2404
2405         if (dev->parse_type != SCST_USER_PARSE_CALL) {
2406                 switch (dev->devtype.type) {
2407                 case TYPE_DISK:
2408                         dev->generic_parse = scst_sbc_generic_parse;
2409                         dev->devtype.dev_done = dev_user_disk_done;
2410                         break;
2411
2412                 case TYPE_TAPE:
2413                         dev->generic_parse = scst_tape_generic_parse;
2414                         dev->devtype.dev_done = dev_user_tape_done;
2415                         break;
2416
2417                 case TYPE_MOD:
2418                         dev->generic_parse = scst_modisk_generic_parse;
2419                         dev->devtype.dev_done = dev_user_disk_done;
2420                         break;
2421
2422                 case TYPE_ROM:
2423                         dev->generic_parse = scst_cdrom_generic_parse;
2424                         dev->devtype.dev_done = dev_user_disk_done;
2425                         break;
2426
2427                 case TYPE_MEDIUM_CHANGER:
2428                         dev->generic_parse = scst_changer_generic_parse;
2429                         break;
2430
2431                 case TYPE_PROCESSOR:
2432                         dev->generic_parse = scst_processor_generic_parse;
2433                         break;
2434
2435                 case TYPE_RAID:
2436                         dev->generic_parse = scst_raid_generic_parse;
2437                         break;
2438
2439                 default:
2440                         PRINT_INFO("Unknown SCSI type %x, using PARSE_CALL "
2441                                 "for it", dev->devtype.type);
2442                         dev->parse_type = SCST_USER_PARSE_CALL;
2443                         break;
2444                 }
2445         } else {
2446                 dev->generic_parse = NULL;
2447                 dev->devtype.dev_done = NULL;
2448         }
2449
2450         TRACE_EXIT();
2451         return;
2452 }
2453
2454 static int dev_user_check_version(const struct scst_user_dev_desc *dev_desc)
2455 {
2456         char ver[sizeof(DEV_USER_VERSION)+1];
2457         int res;
2458
2459         res = copy_from_user(ver,
2460                         (void __user *)(unsigned long)dev_desc->version_str,
2461                         sizeof(ver));
2462         if (res < 0) {
2463                 PRINT_ERROR("%s", "Unable to get version string");
2464                 goto out;
2465         }
2466         ver[sizeof(ver)-1] = '\0';
2467
2468         if (strcmp(ver, DEV_USER_VERSION) != 0) {
2469                 /* ->name already 0-terminated in dev_user_ioctl() */
2470                 PRINT_ERROR("Incorrect version of user device %s (%s)",
2471                         dev_desc->name, ver);
2472                 res = -EINVAL;
2473                 goto out;
2474         }
2475
2476 out:
2477         return res;
2478 }
2479
2480 static int dev_user_register_dev(struct file *file,
2481         const struct scst_user_dev_desc *dev_desc)
2482 {
2483         int res = -ENOMEM, i;
2484         struct scst_user_dev *dev, *d;
2485         int block;
2486
2487         TRACE_ENTRY();
2488
2489         res = dev_user_check_version(dev_desc);
2490         if (res != 0)
2491                 goto out;
2492
2493         switch (dev_desc->type) {
2494         case TYPE_DISK:
2495         case TYPE_ROM:
2496         case TYPE_MOD:
2497                 if (dev_desc->block_size == 0) {
2498                         PRINT_ERROR("Wrong block size %d",
2499                                     dev_desc->block_size);
2500                         res = -EINVAL;
2501                         goto out;
2502                 }
2503                 block = scst_calc_block_shift(dev_desc->block_size);
2504                 if (block == -1) {
2505                         res = -EINVAL;
2506                         goto out;
2507                 }
2508                 break;
2509         default:
2510                 block = dev_desc->block_size;
2511                 break;
2512         }
2513
2514         if (!try_module_get(THIS_MODULE)) {
2515                 PRINT_ERROR("%s", "Fail to get module");
2516                 goto out;
2517         }
2518
2519         dev = kzalloc(sizeof(*dev), GFP_KERNEL);
2520         if (dev == NULL)
2521                 goto out_put;
2522
2523         init_rwsem(&dev->dev_rwsem);
2524         spin_lock_init(&dev->cmd_lists.cmd_list_lock);
2525         INIT_LIST_HEAD(&dev->cmd_lists.active_cmd_list);
2526         init_waitqueue_head(&dev->cmd_lists.cmd_list_waitQ);
2527         INIT_LIST_HEAD(&dev->ready_cmd_list);
2528         if (file->f_flags & O_NONBLOCK) {
2529                 TRACE_DBG("%s", "Non-blocking operations");
2530                 dev->blocking = 0;
2531         } else
2532                 dev->blocking = 1;
2533         for (i = 0; i < (int)ARRAY_SIZE(dev->ucmd_hash); i++)
2534                 INIT_LIST_HEAD(&dev->ucmd_hash[i]);
2535
2536         strncpy(dev->name, dev_desc->name, sizeof(dev->name)-1);
2537         dev->name[sizeof(dev->name)-1] = '\0';
2538
2539         scst_init_mem_lim(&dev->udev_mem_lim);
2540
2541         /*
2542          * We don't use clustered pool, since it implies pages reordering,
2543          * which isn't possible with user space supplied buffers. Although
2544          * it's still possible to cluster pages by the tail of each other,
2545          * seems it doesn't worth the effort.
2546          */
2547         dev->pool = sgv_pool_create(dev->name, 0);
2548         if (dev->pool == NULL)
2549                 goto out_put;
2550         sgv_pool_set_allocator(dev->pool, dev_user_alloc_pages,
2551                 dev_user_free_sg_entries);
2552
2553         scnprintf(dev->devtype.name, sizeof(dev->devtype.name), "dh-%s",
2554                 dev->name);
2555         dev->devtype.type = dev_desc->type;
2556         dev->devtype.threads_num = -1;
2557         dev->devtype.parse_atomic = 1;
2558         dev->devtype.exec_atomic = 0; /* no point to make it 1 */
2559         dev->devtype.dev_done_atomic = 1;
2560         dev->devtype.no_proc = 1;
2561         dev->devtype.attach = dev_user_attach;
2562         dev->devtype.detach = dev_user_detach;
2563         dev->devtype.attach_tgt = dev_user_attach_tgt;
2564         dev->devtype.detach_tgt = dev_user_detach_tgt;
2565         dev->devtype.exec = dev_user_exec;
2566         dev->devtype.on_free_cmd = dev_user_on_free_cmd;
2567         dev->devtype.task_mgmt_fn = dev_user_task_mgmt_fn;
2568
2569         init_completion(&dev->cleanup_cmpl);
2570         dev->block = block;
2571         dev->def_block = block;
2572
2573         res = __dev_user_set_opt(dev, &dev_desc->opt);
2574
2575         TRACE_MEM("dev %p, name %s", dev, dev->name);
2576
2577         spin_lock(&dev_list_lock);
2578
2579         list_for_each_entry(d, &dev_list, dev_list_entry) {
2580                 if (strcmp(d->name, dev->name) == 0) {
2581                         PRINT_ERROR("Device %s already exist",
2582                                 dev->name);
2583                         res = -EEXIST;
2584                         spin_unlock(&dev_list_lock);
2585                         goto out_free;
2586                 }
2587         }
2588
2589         list_add_tail(&dev->dev_list_entry, &dev_list);
2590
2591         spin_unlock(&dev_list_lock);
2592
2593         if (res != 0)
2594                 goto out_del_free;
2595
2596         res = scst_register_virtual_dev_driver(&dev->devtype);
2597         if (res < 0)
2598                 goto out_del_free;
2599
2600         dev->virt_id = scst_register_virtual_device(&dev->devtype, dev->name);
2601         if (dev->virt_id < 0) {
2602                 res = dev->virt_id;
2603                 goto out_unreg_handler;
2604         }
2605
2606         mutex_lock(&dev_priv_mutex);
2607         if (file->private_data != NULL) {
2608                 mutex_unlock(&dev_priv_mutex);
2609                 PRINT_ERROR("%s", "Device already registered");
2610                 res = -EINVAL;
2611                 goto out_unreg_drv;
2612         }
2613         file->private_data = dev;
2614         mutex_unlock(&dev_priv_mutex);
2615
2616 out:
2617         TRACE_EXIT_RES(res);
2618         return res;
2619
2620 out_unreg_drv:
2621         scst_unregister_virtual_device(dev->virt_id);
2622
2623 out_unreg_handler:
2624         scst_unregister_virtual_dev_driver(&dev->devtype);
2625
2626 out_del_free:
2627         spin_lock(&dev_list_lock);
2628         list_del(&dev->dev_list_entry);
2629         spin_unlock(&dev_list_lock);
2630
2631 out_free:
2632         sgv_pool_destroy(dev->pool);
2633         kfree(dev);
2634         goto out_put;
2635
2636 out_put:
2637         module_put(THIS_MODULE);
2638         goto out;
2639 }
2640
2641 static int __dev_user_set_opt(struct scst_user_dev *dev,
2642         const struct scst_user_opt *opt)
2643 {
2644         int res = 0;
2645
2646         TRACE_ENTRY();
2647
2648         TRACE_DBG("dev %s, parse_type %x, on_free_cmd_type %x, "
2649                 "memory_reuse_type %x, partial_transfers_type %x, "
2650                 "partial_len %d", dev->name, opt->parse_type,
2651                 opt->on_free_cmd_type, opt->memory_reuse_type,
2652                 opt->partial_transfers_type, opt->partial_len);
2653
2654         if (opt->parse_type > SCST_USER_MAX_PARSE_OPT ||
2655             opt->on_free_cmd_type > SCST_USER_MAX_ON_FREE_CMD_OPT ||
2656             opt->memory_reuse_type > SCST_USER_MAX_MEM_REUSE_OPT ||
2657             opt->partial_transfers_type > SCST_USER_MAX_PARTIAL_TRANSFERS_OPT) {
2658                 PRINT_ERROR("%s", "Invalid option");
2659                 res = -EINVAL;
2660                 goto out;
2661         }
2662
2663         if (((opt->tst != SCST_CONTR_MODE_ONE_TASK_SET) &&
2664              (opt->tst != SCST_CONTR_MODE_SEP_TASK_SETS)) ||
2665             ((opt->queue_alg != SCST_CONTR_MODE_QUEUE_ALG_RESTRICTED_REORDER) &&
2666              (opt->queue_alg != SCST_CONTR_MODE_QUEUE_ALG_UNRESTRICTED_REORDER)) ||
2667             (opt->swp > 1) || (opt->tas > 1) || (opt->has_own_order_mgmt > 1)) {
2668                 PRINT_ERROR("Invalid SCSI option (tst %x, queue_alg %x, swp %x,"
2669                         " tas %x, has_own_order_mgmt %x)", opt->tst,
2670                         opt->queue_alg, opt->swp, opt->tas,
2671                         opt->has_own_order_mgmt);
2672                 res = -EINVAL;
2673                 goto out;
2674         }
2675
2676         dev->parse_type = opt->parse_type;
2677         dev->on_free_cmd_type = opt->on_free_cmd_type;
2678         dev->memory_reuse_type = opt->memory_reuse_type;
2679         dev->partial_transfers_type = opt->partial_transfers_type;
2680         dev->partial_len = opt->partial_len;
2681
2682         dev->tst = opt->tst;
2683         dev->queue_alg = opt->queue_alg;
2684         dev->swp = opt->swp;
2685         dev->tas = opt->tas;
2686         dev->has_own_order_mgmt = opt->has_own_order_mgmt;
2687         if (dev->sdev != NULL) {
2688                 dev->sdev->tst = opt->tst;
2689                 dev->sdev->queue_alg = opt->queue_alg;
2690                 dev->sdev->swp = opt->swp;
2691                 dev->sdev->tas = opt->tas;
2692                 dev->sdev->has_own_order_mgmt = opt->has_own_order_mgmt;
2693         }
2694
2695         dev_user_setup_functions(dev);
2696
2697 out:
2698         TRACE_EXIT_RES(res);
2699         return res;
2700 }
2701
2702 static int dev_user_set_opt(struct file *file, const struct scst_user_opt *opt)
2703 {
2704         int res = 0;
2705         struct scst_user_dev *dev;
2706
2707         TRACE_ENTRY();
2708
2709         mutex_lock(&dev_priv_mutex);
2710         dev = (struct scst_user_dev *)file->private_data;
2711         res = dev_user_check_reg(dev);
2712         if (res != 0) {
2713                 mutex_unlock(&dev_priv_mutex);
2714                 goto out;
2715         }
2716         down_write(&dev->dev_rwsem);
2717         mutex_unlock(&dev_priv_mutex);
2718
2719         res = scst_suspend_activity(true);
2720         if (res != 0)
2721                 goto out;
2722
2723         res = __dev_user_set_opt(dev, opt);
2724
2725         scst_resume_activity();
2726
2727         up_write(&dev->dev_rwsem);
2728
2729 out:
2730         TRACE_EXIT_RES(res);
2731         return res;
2732 }
2733
2734 static int dev_user_get_opt(struct file *file, void __user *arg)
2735 {
2736         int res = 0;
2737         struct scst_user_dev *dev;
2738         struct scst_user_opt opt;
2739
2740         TRACE_ENTRY();
2741
2742         mutex_lock(&dev_priv_mutex);
2743         dev = (struct scst_user_dev *)file->private_data;
2744         res = dev_user_check_reg(dev);
2745         if (res != 0) {
2746                 mutex_unlock(&dev_priv_mutex);
2747                 goto out;
2748         }
2749         down_read(&dev->dev_rwsem);
2750         mutex_unlock(&dev_priv_mutex);
2751
2752         opt.parse_type = dev->parse_type;
2753         opt.on_free_cmd_type = dev->on_free_cmd_type;
2754         opt.memory_reuse_type = dev->memory_reuse_type;
2755         opt.partial_transfers_type = dev->partial_transfers_type;
2756         opt.partial_len = dev->partial_len;
2757         opt.tst = dev->tst;
2758         opt.queue_alg = dev->queue_alg;
2759         opt.tas = dev->tas;
2760         opt.swp = dev->swp;
2761         opt.has_own_order_mgmt = dev->has_own_order_mgmt;
2762
2763         TRACE_DBG("dev %s, parse_type %x, on_free_cmd_type %x, "
2764                 "memory_reuse_type %x, partial_transfers_type %x, "
2765                 "partial_len %d", dev->name, opt.parse_type,
2766                 opt.on_free_cmd_type, opt.memory_reuse_type,
2767                 opt.partial_transfers_type, opt.partial_len);
2768
2769         res = copy_to_user(arg, &opt, sizeof(opt));
2770
2771         up_read(&dev->dev_rwsem);
2772 out:
2773         TRACE_EXIT_RES(res);
2774         return res;
2775 }
2776
2777 static int dev_usr_parse(struct scst_cmd *cmd)
2778 {
2779         sBUG();
2780         return SCST_CMD_STATE_DEFAULT;
2781 }
2782
2783 /* Needed only for /proc support */
2784 #define USR_TYPE {                      \
2785         .name =         DEV_USER_NAME,  \
2786         .type =         -1,             \
2787         .parse =        dev_usr_parse,  \
2788 }
2789
2790 static struct scst_dev_type dev_user_devtype = USR_TYPE;
2791
2792 static int dev_user_release(struct inode *inode, struct file *file)
2793 {
2794         int res = 0;
2795         struct scst_user_dev *dev;
2796
2797         TRACE_ENTRY();
2798
2799         mutex_lock(&dev_priv_mutex);
2800         dev = (struct scst_user_dev *)file->private_data;
2801         if (dev == NULL) {
2802                 mutex_unlock(&dev_priv_mutex);
2803                 goto out;
2804         }
2805         file->private_data = NULL;
2806
2807         TRACE(TRACE_MGMT, "Releasing dev %s", dev->name);
2808
2809         spin_lock(&dev_list_lock);
2810         list_del(&dev->dev_list_entry);
2811         spin_unlock(&dev_list_lock);
2812
2813         mutex_unlock(&dev_priv_mutex);
2814
2815         down_write(&dev->dev_rwsem);
2816
2817         spin_lock(&cleanup_lock);
2818         list_add_tail(&dev->cleanup_list_entry, &cleanup_list);
2819         spin_unlock(&cleanup_lock);
2820
2821         wake_up(&cleanup_list_waitQ);
2822         wake_up(&dev->cmd_lists.cmd_list_waitQ);
2823
2824         scst_unregister_virtual_device(dev->virt_id);
2825         scst_unregister_virtual_dev_driver(&dev->devtype);
2826
2827         sgv_pool_destroy(dev->pool);
2828
2829         TRACE_DBG("Unregistering finished (dev %p)", dev);
2830
2831         dev->cleanup_done = 1;
2832
2833         wake_up(&cleanup_list_waitQ);
2834         wake_up(&dev->cmd_lists.cmd_list_waitQ);
2835
2836         wait_for_completion(&dev->cleanup_cmpl);
2837
2838         up_write(&dev->dev_rwsem); /* to make the debug check happy */
2839
2840         TRACE_DBG("Releasing completed (dev %p)", dev);
2841
2842         kfree(dev);
2843
2844         module_put(THIS_MODULE);
2845
2846 out:
2847         TRACE_EXIT_RES(res);
2848         return res;
2849 }
2850
2851 static int dev_user_process_cleanup(struct scst_user_dev *dev)
2852 {
2853         struct scst_user_cmd *ucmd;
2854         int rc, res = 1;
2855
2856         TRACE_ENTRY();
2857
2858         dev->blocking = 0;
2859
2860         while (1) {
2861                 TRACE_DBG("Cleanuping dev %p", dev);
2862
2863                 dev_user_unjam_dev(dev);
2864
2865                 spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
2866
2867                 rc = dev_user_get_next_cmd(dev, &ucmd);
2868                 if (rc == 0)
2869                         dev_user_unjam_cmd(ucmd, 1, NULL);
2870
2871                 spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
2872
2873                 if (rc == -EAGAIN) {
2874                         if (dev->cleanup_done)
2875                                 break;
2876                         else {
2877                                 TRACE_DBG("No more commands (dev %p)", dev);
2878                                 goto out;
2879                         }
2880                 }
2881         }
2882
2883 #ifdef CONFIG_SCST_EXTRACHECKS
2884 {
2885         int i;
2886         for (i = 0; i < (int)ARRAY_SIZE(dev->ucmd_hash); i++) {
2887                 struct list_head *head = &dev->ucmd_hash[i];
2888                 struct scst_user_cmd *ucmd2;
2889 again:
2890                 list_for_each_entry(ucmd2, head, hash_list_entry) {
2891                         PRINT_ERROR("Lost ucmd %p (state %x, ref %d)", ucmd2,
2892                                 ucmd2->state, atomic_read(&ucmd2->ucmd_ref));
2893                         ucmd_put(ucmd2);
2894                         goto again;
2895                 }
2896         }
2897 }
2898 #endif
2899
2900         TRACE_DBG("Cleanuping done (dev %p)", dev);
2901         complete_all(&dev->cleanup_cmpl);
2902         res = 0;
2903
2904 out:
2905         TRACE_EXIT_RES(res);
2906         return res;
2907 }
2908
2909 static inline int test_cleanup_list(void)
2910 {
2911         int res = !list_empty(&cleanup_list) ||
2912                   unlikely(kthread_should_stop());
2913         return res;
2914 }
2915
2916 static int dev_user_cleanup_thread(void *arg)
2917 {
2918         TRACE_ENTRY();
2919
2920         PRINT_INFO("Cleanup thread started, PID %d", current->pid);
2921
2922         current->flags |= PF_NOFREEZE;
2923
2924         spin_lock(&cleanup_lock);
2925         while (!kthread_should_stop()) {
2926                 wait_queue_t wait;
2927                 init_waitqueue_entry(&wait, current);
2928
2929                 if (!test_cleanup_list()) {
2930                         add_wait_queue_exclusive(&cleanup_list_waitQ, &wait);
2931                         for (;;) {
2932                                 set_current_state(TASK_INTERRUPTIBLE);
2933                                 if (test_cleanup_list())
2934                                         break;
2935                                 spin_unlock(&cleanup_lock);
2936                                 schedule();
2937                                 spin_lock(&cleanup_lock);
2938                         }
2939                         set_current_state(TASK_RUNNING);
2940                         remove_wait_queue(&cleanup_list_waitQ, &wait);
2941                 }
2942
2943                 /*
2944                  * We have to poll devices, because commands can go from SCST
2945                  * core on cmd_list_waitQ and we have no practical way to
2946                  * detect them.
2947                  */
2948
2949                 while (1) {
2950                         struct scst_user_dev *dev;
2951                         LIST_HEAD(cl_devs);
2952
2953                         while (!list_empty(&cleanup_list)) {
2954                                 int rc;
2955
2956                                 dev = list_entry(cleanup_list.next,
2957                                         typeof(*dev), cleanup_list_entry);
2958                                 list_del(&dev->cleanup_list_entry);
2959
2960                                 spin_unlock(&cleanup_lock);
2961                                 rc = dev_user_process_cleanup(dev);
2962                                 spin_lock(&cleanup_lock);
2963
2964                                 if (rc != 0)
2965                                         list_add_tail(&dev->cleanup_list_entry,
2966                                                 &cl_devs);
2967                         }
2968
2969                         if (list_empty(&cl_devs))
2970                                 break;
2971
2972                         spin_unlock(&cleanup_lock);
2973                         msleep(100);
2974                         spin_lock(&cleanup_lock);
2975
2976                         while (!list_empty(&cl_devs)) {
2977                                 dev = list_entry(cl_devs.next, typeof(*dev),
2978                                         cleanup_list_entry);
2979                                 list_move_tail(&dev->cleanup_list_entry,
2980                                         &cleanup_list);
2981                         }
2982                 }
2983         }
2984         spin_unlock(&cleanup_lock);
2985
2986         /*
2987          * If kthread_should_stop() is true, we are guaranteed to be
2988          * on the module unload, so cleanup_list must be empty.
2989          */
2990         sBUG_ON(!list_empty(&cleanup_list));
2991
2992         PRINT_INFO("Cleanup thread PID %d finished", current->pid);
2993
2994         TRACE_EXIT();
2995         return 0;
2996 }
2997
2998 static int __init init_scst_user(void)
2999 {
3000         int res = 0;
3001         struct max_get_reply {
3002                 union {
3003                         struct scst_user_get_cmd g;
3004                         struct scst_user_reply_cmd r;
3005                 };
3006         };
3007 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 21)
3008         struct class_device *class_member;
3009 #else
3010         struct device *dev;
3011 #endif
3012
3013         TRACE_ENTRY();
3014
3015 #if defined(CONFIG_HIGHMEM4G) || defined(CONFIG_HIGHMEM64G)
3016         PRINT_ERROR("%s", "HIGHMEM kernel configurations are not supported. "
3017                 "Consider changing VMSPLIT option or use a 64-bit "
3018                 "configuration instead. See README file for details.");
3019         res = -EINVAL;
3020         goto out;
3021 #endif
3022
3023         user_cmd_cachep = KMEM_CACHE(scst_user_cmd, SCST_SLAB_FLAGS);
3024         if (user_cmd_cachep == NULL) {
3025                 res = -ENOMEM;
3026                 goto out;
3027         }
3028
3029         user_get_cmd_cachep = KMEM_CACHE(max_get_reply, SCST_SLAB_FLAGS);
3030         if (user_get_cmd_cachep == NULL) {
3031                 res = -ENOMEM;
3032                 goto out_cache;
3033         }
3034
3035         dev_user_devtype.module = THIS_MODULE;
3036
3037         res = scst_register_virtual_dev_driver(&dev_user_devtype);
3038         if (res < 0)
3039                 goto out_cache1;
3040
3041         res = scst_dev_handler_build_std_proc(&dev_user_devtype);
3042         if (res != 0)
3043                 goto out_unreg;
3044
3045         dev_user_sysfs_class = class_create(THIS_MODULE, DEV_USER_NAME);
3046         if (IS_ERR(dev_user_sysfs_class)) {
3047                 PRINT_ERROR("%s", "Unable create sysfs class for SCST user "
3048                         "space handler");
3049                 res = PTR_ERR(dev_user_sysfs_class);
3050                 goto out_proc;
3051         }
3052
3053         res = register_chrdev(DEV_USER_MAJOR, DEV_USER_NAME, &dev_user_fops);
3054         if (res) {
3055                 PRINT_ERROR("Unable to get major %d for SCSI tapes",
3056                             DEV_USER_MAJOR);
3057                 goto out_class;
3058         }
3059
3060 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 21)
3061         class_member = class_device_create(dev_user_sysfs_class, NULL,
3062                                 MKDEV(DEV_USER_MAJOR, 0), NULL, DEV_USER_NAME);
3063         if (IS_ERR(class_member)) {
3064                 res = PTR_ERR(class_member);
3065                 goto out_chrdev;
3066         }
3067 #else
3068         dev = device_create(dev_user_sysfs_class, NULL,
3069                             MKDEV(DEV_USER_MAJOR, 0),
3070 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
3071                                 NULL,
3072 #endif
3073                                 DEV_USER_NAME);
3074         if (IS_ERR(dev)) {
3075                 res = PTR_ERR(dev);
3076                 goto out_chrdev;
3077         }
3078 #endif
3079
3080         cleanup_thread = kthread_run(dev_user_cleanup_thread, NULL,
3081                 "scst_usr_cleanupd");
3082         if (IS_ERR(cleanup_thread)) {
3083                 res = PTR_ERR(cleanup_thread);
3084                 PRINT_ERROR("kthread_create() failed: %d", res);
3085                 goto out_dev;
3086         }
3087
3088 out:
3089         TRACE_EXIT_RES(res);
3090         return res;
3091
3092 out_dev:
3093 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 21)
3094         class_device_destroy(dev_user_sysfs_class, MKDEV(DEV_USER_MAJOR, 0));
3095 #else
3096         device_destroy(dev_user_sysfs_class, MKDEV(DEV_USER_MAJOR, 0));
3097 #endif
3098
3099 out_chrdev:
3100         unregister_chrdev(DEV_USER_MAJOR, DEV_USER_NAME);
3101
3102 out_class:
3103         class_destroy(dev_user_sysfs_class);
3104
3105 out_proc:
3106         scst_dev_handler_destroy_std_proc(&dev_user_devtype);
3107
3108 out_unreg:
3109         scst_unregister_dev_driver(&dev_user_devtype);
3110
3111 out_cache1:
3112         kmem_cache_destroy(user_get_cmd_cachep);
3113
3114 out_cache:
3115         kmem_cache_destroy(user_cmd_cachep);
3116         goto out;
3117 }
3118
3119 static void __exit exit_scst_user(void)
3120 {
3121         int rc;
3122
3123         TRACE_ENTRY();
3124
3125         rc = kthread_stop(cleanup_thread);
3126         if (rc < 0)
3127                 TRACE_MGMT_DBG("kthread_stop() failed: %d", rc);
3128
3129         unregister_chrdev(DEV_USER_MAJOR, DEV_USER_NAME);
3130 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 21)
3131         class_device_destroy(dev_user_sysfs_class, MKDEV(DEV_USER_MAJOR, 0));
3132 #else
3133         device_destroy(dev_user_sysfs_class, MKDEV(DEV_USER_MAJOR, 0));
3134 #endif
3135         class_destroy(dev_user_sysfs_class);
3136
3137         scst_dev_handler_destroy_std_proc(&dev_user_devtype);
3138         scst_unregister_virtual_dev_driver(&dev_user_devtype);
3139
3140         kmem_cache_destroy(user_get_cmd_cachep);
3141         kmem_cache_destroy(user_cmd_cachep);
3142
3143         TRACE_EXIT();
3144         return;
3145 }
3146
3147 module_init(init_scst_user);
3148 module_exit(exit_scst_user);
3149
3150 MODULE_AUTHOR("Vladislav Bolkhovitin");
3151 MODULE_LICENSE("GPL");
3152 MODULE_DESCRIPTION("Virtual user space device handler for SCST");
3153 MODULE_VERSION(SCST_VERSION_STRING);
3154 MODULE_ALIAS_CHARDEV_MAJOR(DEV_USER_MAJOR);