75ca162bb128f87dadd67f9cebefea193e7e7ebf
[mirror/scst/.git] / scst / src / dev_handlers / scst_user.c
1 /*
2  *  scst_user.c
3  *
4  *  Copyright (C) 2007 Vladislav Bolkhovitin <vst@vlnb.net>
5  *
6  *  SCSI virtual user space device handler
7  *
8  *  This program is free software; you can redistribute it and/or
9  *  modify it under the terms of the GNU General Public License
10  *  as published by the Free Software Foundation, version 2
11  *  of the License.
12  *
13  *  This program is distributed in the hope that it will be useful,
14  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16  *  GNU General Public License for more details.
17  */
18
19 #include <linux/kthread.h>
20 #include <linux/delay.h>
21 #include <linux/poll.h>
22
23 #define LOG_PREFIX              DEV_USER_NAME
24
25 #include "scst.h"
26 #include "scst_user.h"
27 #include "scst_dev_handler.h"
28
29 #if defined(CONFIG_HIGHMEM4G) || defined(CONFIG_HIGHMEM64G)
30 #warning "HIGHMEM kernel configurations are not supported by this module, \
31         because nowadays it doesn't worth the effort. Consider change \
32         VMSPLIT option or use 64-bit configuration instead. See README file \
33         for details."
34 #endif
35
36 #define DEV_USER_MAJOR                  237
37 #define DEV_USER_CMD_HASH_ORDER         6
38 #define DEV_USER_TM_TIMEOUT             (10*HZ)
39 #define DEV_USER_ATTACH_TIMEOUT         (5*HZ)
40 #define DEV_USER_DETACH_TIMEOUT         (5*HZ)
41 #define DEV_USER_PRE_UNREG_POLL_TIME    (HZ/10)
42
43 struct scst_user_dev {
44         struct rw_semaphore dev_rwsem;
45
46         struct scst_cmd_lists cmd_lists;
47         /* All 3 protected by cmd_lists.cmd_list_lock */
48         struct list_head ready_cmd_list;
49         struct list_head prio_ready_cmd_list;
50         wait_queue_head_t prio_cmd_list_waitQ;
51
52         /* All, including detach_cmd_count, protected by cmd_lists.cmd_list_lock */
53         unsigned short blocking:1;
54         unsigned short cleaning:1;
55         unsigned short cleanup_done:1;
56         unsigned short attach_cmd_active:1;
57         unsigned short tm_cmd_active:1;
58         unsigned short internal_reset_active:1;
59         unsigned short pre_unreg_sess_active:1; /* just a small optimization */
60
61         unsigned short tst:3;
62         unsigned short queue_alg:4;
63         unsigned short tas:1;
64         unsigned short swp:1;
65         unsigned short has_own_order_mgmt:1;
66
67         unsigned short detach_cmd_count;
68
69         int (*generic_parse)(struct scst_cmd *cmd,
70                 int (*get_block)(struct scst_cmd *cmd));
71
72         int block;
73         int def_block;
74
75         struct sgv_pool *pool;
76
77         uint8_t parse_type;
78         uint8_t on_free_cmd_type;
79         uint8_t memory_reuse_type;
80         uint8_t prio_queue_type;
81         uint8_t partial_transfers_type;
82         uint32_t partial_len;
83
84         struct scst_dev_type devtype;
85
86         /* Both protected by cmd_lists.cmd_list_lock */
87         unsigned int handle_counter;
88         struct list_head ucmd_hash[1<<DEV_USER_CMD_HASH_ORDER];
89
90         struct scst_device *sdev;
91
92         int virt_id;
93         struct list_head dev_list_entry;
94         char name[SCST_MAX_NAME];
95
96         /* Protected by cmd_lists.cmd_list_lock */
97         struct list_head pre_unreg_sess_list;
98
99         struct list_head cleanup_list_entry;
100         struct completion cleanup_cmpl;
101 };
102
103 struct scst_user_pre_unreg_sess_obj {
104         struct scst_tgt_dev *tgt_dev;
105         unsigned int active:1;
106         unsigned int exit:1;
107         struct list_head pre_unreg_sess_list_entry;
108 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
109         struct work_struct pre_unreg_sess_work;
110 #else
111         struct delayed_work pre_unreg_sess_work;
112 #endif
113 };
114
115 /* Most fields are unprotected, since only one thread at time can access them */
116 struct scst_user_cmd {
117         struct scst_cmd *cmd;
118         struct scst_user_dev *dev;
119
120         atomic_t ucmd_ref;
121
122         unsigned int buff_cached:1;
123         unsigned int buf_dirty:1;
124         unsigned int background_exec:1;
125         unsigned int internal_reset_tm:1;
126         unsigned int aborted:1;
127
128         struct scst_user_cmd *buf_ucmd;
129
130         int cur_data_page;
131         int num_data_pages;
132         int first_page_offset;
133         unsigned long ubuff;
134         struct page **data_pages;
135         struct sgv_pool_obj *sgv;
136
137         unsigned int state;
138
139         struct list_head ready_cmd_list_entry;
140
141         unsigned int h;
142         struct list_head hash_list_entry;
143
144         struct scst_user_get_cmd user_cmd;
145
146         struct completion *cmpl;
147         int result;
148 };
149
150 static struct scst_user_cmd *dev_user_alloc_ucmd(struct scst_user_dev *dev,
151         int gfp_mask);
152 static void dev_user_free_ucmd(struct scst_user_cmd *ucmd);
153
154 static int dev_user_parse(struct scst_cmd *cmd);
155 static int dev_user_exec(struct scst_cmd *cmd);
156 static void dev_user_on_free_cmd(struct scst_cmd *cmd);
157 static int dev_user_task_mgmt_fn(struct scst_mgmt_cmd *mcmd,
158         struct scst_tgt_dev *tgt_dev);
159
160 static int dev_user_disk_done(struct scst_cmd *cmd);
161 static int dev_user_tape_done(struct scst_cmd *cmd);
162
163 static struct page *dev_user_alloc_pages(struct scatterlist *sg,
164         gfp_t gfp_mask, void *priv);
165 static void dev_user_free_sg_entries(struct scatterlist *sg, int sg_count,
166                                      void *priv);
167
168 static void dev_user_add_to_ready(struct scst_user_cmd *ucmd);
169
170 static void dev_user_unjam_cmd(struct scst_user_cmd *ucmd, int busy,
171         unsigned long *flags);
172 static void dev_user_unjam_dev(struct scst_user_dev *dev, int tm,
173         struct scst_tgt_dev *tgt_dev);
174
175 static int dev_user_process_reply_tm_exec(struct scst_user_cmd *ucmd,
176         int status);
177 static int dev_user_process_reply_sess(struct scst_user_cmd *ucmd, int status);
178 static int dev_user_register_dev(struct file *file,
179         const struct scst_user_dev_desc *dev_desc);
180 static int __dev_user_set_opt(struct scst_user_dev *dev,
181         const struct scst_user_opt *opt);
182 static int dev_user_set_opt(struct file *file, const struct scst_user_opt *opt);
183 static int dev_user_get_opt(struct file *file, void *arg);
184
185 static unsigned int dev_user_poll(struct file *filp, poll_table *wait);
186 static long dev_user_ioctl(struct file *file, unsigned int cmd,
187         unsigned long arg);
188 static int dev_user_release(struct inode *inode, struct file *file);
189
190 /** Data **/
191
192 static struct kmem_cache *user_cmd_cachep;
193
194 static DEFINE_MUTEX(dev_priv_mutex);
195
196 static struct file_operations dev_user_fops = {
197         .poll           = dev_user_poll,
198         .unlocked_ioctl = dev_user_ioctl,
199 #ifdef CONFIG_COMPAT
200         .compat_ioctl   = dev_user_ioctl,
201 #endif
202         .release        = dev_user_release,
203 };
204
205 static struct class *dev_user_sysfs_class;
206
207 static DEFINE_SPINLOCK(dev_list_lock);
208 static LIST_HEAD(dev_list);
209
210 static DEFINE_SPINLOCK(cleanup_lock);
211 static LIST_HEAD(cleanup_list);
212 static DECLARE_WAIT_QUEUE_HEAD(cleanup_list_waitQ);
213 static struct task_struct *cleanup_thread;
214
215 static inline void ucmd_get(struct scst_user_cmd *ucmd, int barrier)
216 {
217         TRACE_DBG("ucmd %p, ucmd_ref %d", ucmd, atomic_read(&ucmd->ucmd_ref));
218         atomic_inc(&ucmd->ucmd_ref);
219         if (barrier)
220                 smp_mb__after_atomic_inc();
221 }
222
223 static inline void ucmd_put(struct scst_user_cmd *ucmd)
224 {
225         TRACE_DBG("ucmd %p, ucmd_ref %d", ucmd, atomic_read(&ucmd->ucmd_ref));
226         if (atomic_dec_and_test(&ucmd->ucmd_ref))
227                 dev_user_free_ucmd(ucmd);
228 }
229
230 static inline int calc_num_pg(unsigned long buf, int len)
231 {
232         len += buf & ~PAGE_MASK;
233         return (len >> PAGE_SHIFT) + ((len & ~PAGE_MASK) != 0);
234 }
235
236 static inline int is_need_offs_page(unsigned long buf, int len)
237 {
238         return ((buf & ~PAGE_MASK) != 0) &&
239                 ((buf & PAGE_MASK) != ((buf+len-1) & PAGE_MASK));
240 }
241
242 static void __dev_user_not_reg(void)
243 {
244         PRINT_ERROR("%s", "Device not registered");
245         return;
246 }
247
248 static inline int dev_user_check_reg(struct scst_user_dev *dev)
249 {
250         if (dev == NULL) {
251                 __dev_user_not_reg();
252                 return -EINVAL;
253         }
254         return 0;
255 }
256
257 static inline int scst_user_cmd_hashfn(int h)
258 {
259         return h & ((1 << DEV_USER_CMD_HASH_ORDER) - 1);
260 }
261
262 static inline struct scst_user_cmd *__ucmd_find_hash(struct scst_user_dev *dev,
263         unsigned int h)
264 {
265         struct list_head *head;
266         struct scst_user_cmd *ucmd;
267
268         head = &dev->ucmd_hash[scst_user_cmd_hashfn(h)];
269         list_for_each_entry(ucmd, head, hash_list_entry) {
270                 if (ucmd->h == h) {
271                         TRACE_DBG("Found ucmd %p", ucmd);
272                         return ucmd;
273                 }
274         }
275         return NULL;
276 }
277
278 static void cmd_insert_hash(struct scst_user_cmd *ucmd)
279 {
280         struct list_head *head;
281         struct scst_user_dev *dev = ucmd->dev;
282         struct scst_user_cmd *u;
283         unsigned long flags;
284
285         spin_lock_irqsave(&dev->cmd_lists.cmd_list_lock, flags);
286         do {
287                 ucmd->h = dev->handle_counter++;
288                 u = __ucmd_find_hash(dev, ucmd->h);
289         } while (u != NULL);
290         head = &dev->ucmd_hash[scst_user_cmd_hashfn(ucmd->h)];
291         list_add_tail(&ucmd->hash_list_entry, head);
292         spin_unlock_irqrestore(&dev->cmd_lists.cmd_list_lock, flags);
293
294         TRACE_DBG("Inserted ucmd %p, h=%d", ucmd, ucmd->h);
295         return;
296 }
297
298 static inline void cmd_remove_hash(struct scst_user_cmd *ucmd)
299 {
300         unsigned long flags;
301         spin_lock_irqsave(&ucmd->dev->cmd_lists.cmd_list_lock, flags);
302         list_del(&ucmd->hash_list_entry);
303         spin_unlock_irqrestore(&ucmd->dev->cmd_lists.cmd_list_lock, flags);
304
305         TRACE_DBG("Removed ucmd %p, h=%d", ucmd, ucmd->h);
306         return;
307 }
308
309 static void dev_user_free_ucmd(struct scst_user_cmd *ucmd)
310 {
311         TRACE_ENTRY();
312
313         TRACE_MEM("Freeing ucmd %p", ucmd);
314
315         cmd_remove_hash(ucmd);
316         EXTRACHECKS_BUG_ON(ucmd->cmd != NULL);
317
318         kmem_cache_free(user_cmd_cachep, ucmd);
319
320         TRACE_EXIT();
321         return;
322 }
323
324 static struct page *dev_user_alloc_pages(struct scatterlist *sg,
325         gfp_t gfp_mask, void *priv)
326 {
327         struct scst_user_cmd *ucmd = (struct scst_user_cmd *)priv;
328         int offset = 0;
329
330         TRACE_ENTRY();
331
332         /* *sg supposed to be zeroed */
333
334         TRACE_MEM("ucmd %p, ubuff %lx, ucmd->cur_data_page %d", ucmd,
335                 ucmd->ubuff, ucmd->cur_data_page);
336
337         if (ucmd->cur_data_page == 0) {
338                 TRACE_MEM("ucmd->first_page_offset %d",
339                         ucmd->first_page_offset);
340                 offset = ucmd->first_page_offset;
341                 ucmd_get(ucmd, 0);
342         }
343
344         if (ucmd->cur_data_page >= ucmd->num_data_pages)
345                 goto out;
346
347         sg_set_page(sg, ucmd->data_pages[ucmd->cur_data_page],
348                 PAGE_SIZE - offset, offset);
349         ucmd->cur_data_page++;
350
351         TRACE_MEM("page=%p, length=%d, offset=%d", sg_page(sg), sg->length,
352                 sg->offset);
353         TRACE_BUFFER("Page data", sg_virt(sg), sg->length);
354
355 out:
356         TRACE_EXIT();
357         return sg_page(sg);
358 }
359
360 static void dev_user_on_cached_mem_free(struct scst_user_cmd *ucmd)
361 {
362         TRACE_ENTRY();
363
364         TRACE_MEM("Preparing ON_CACHED_MEM_FREE (ucmd %p, h %d, ubuff %lx)",
365                 ucmd, ucmd->h, ucmd->ubuff);
366
367         ucmd->user_cmd.cmd_h = ucmd->h;
368         ucmd->user_cmd.subcode = SCST_USER_ON_CACHED_MEM_FREE;
369         ucmd->user_cmd.on_cached_mem_free.pbuf = ucmd->ubuff;
370
371         ucmd->state = UCMD_STATE_ON_CACHE_FREEING;
372
373         dev_user_add_to_ready(ucmd);
374
375         TRACE_EXIT();
376         return;
377 }
378
379 static void dev_user_unmap_buf(struct scst_user_cmd *ucmd)
380 {
381         int i;
382
383         TRACE_ENTRY();
384
385         TRACE_MEM("Unmapping data pages (ucmd %p, ubuff %lx, num %d)", ucmd,
386                 ucmd->ubuff, ucmd->num_data_pages);
387
388         for (i = 0; i < ucmd->num_data_pages; i++) {
389                 struct page *page = ucmd->data_pages[i];
390
391                 if (ucmd->buf_dirty)
392                         SetPageDirty(page);
393
394                 page_cache_release(page);
395         }
396         kfree(ucmd->data_pages);
397         ucmd->data_pages = NULL;
398
399         TRACE_EXIT();
400         return;
401 }
402
403 static void __dev_user_free_sg_entries(struct scst_user_cmd *ucmd)
404 {
405         TRACE_ENTRY();
406
407         sBUG_ON(ucmd->data_pages == NULL);
408
409         TRACE_MEM("Freeing data pages (ucmd=%p, ubuff=%lx, buff_cached=%d)",
410                 ucmd, ucmd->ubuff, ucmd->buff_cached);
411
412         dev_user_unmap_buf(ucmd);
413
414         if (ucmd->buff_cached)
415                 dev_user_on_cached_mem_free(ucmd);
416         else
417                 ucmd_put(ucmd);
418
419         TRACE_EXIT();
420         return;
421 }
422
423 static void dev_user_free_sg_entries(struct scatterlist *sg, int sg_count,
424         void *priv)
425 {
426         struct scst_user_cmd *ucmd = (struct scst_user_cmd *)priv;
427
428         TRACE_MEM("Freeing data pages (sg=%p, sg_count=%d, priv %p)", sg,
429                 sg_count, ucmd);
430
431         __dev_user_free_sg_entries(ucmd);
432
433         return;
434 }
435
436 static inline int is_buff_cached(struct scst_user_cmd *ucmd)
437 {
438         int mem_reuse_type = ucmd->dev->memory_reuse_type;
439
440         if ((mem_reuse_type == SCST_USER_MEM_REUSE_ALL) ||
441             ((ucmd->cmd->data_direction == SCST_DATA_READ) &&
442              (mem_reuse_type == SCST_USER_MEM_REUSE_READ)) ||
443             ((ucmd->cmd->data_direction == SCST_DATA_WRITE) &&
444              (mem_reuse_type == SCST_USER_MEM_REUSE_WRITE)))
445                 return 1;
446         else
447                 return 0;
448 }
449
450 /*
451  * Returns 0 for success, <0 for fatal failure, >0 - need pages.
452  * Unmaps the buffer, if needed in case of error
453  */
454 static int dev_user_alloc_sg(struct scst_user_cmd *ucmd, int cached_buff)
455 {
456         int res = 0;
457         struct scst_cmd *cmd = ucmd->cmd;
458         struct scst_user_dev *dev = ucmd->dev;
459         int gfp_mask, flags = 0;
460         int bufflen = cmd->bufflen;
461         int last_len = 0;
462
463         TRACE_ENTRY();
464
465         gfp_mask = __GFP_NOWARN;
466         gfp_mask |= (scst_cmd_atomic(cmd) ? GFP_ATOMIC : GFP_KERNEL);
467
468         if (cached_buff) {
469                 flags |= SCST_POOL_RETURN_OBJ_ON_ALLOC_FAIL;
470                 if (ucmd->ubuff == 0)
471                         flags |= SCST_POOL_NO_ALLOC_ON_CACHE_MISS;
472         } else {
473                 TRACE_MEM("%s", "Not cached buff");
474                 flags |= SCST_POOL_ALLOC_NO_CACHED;
475                 if (ucmd->ubuff == 0) {
476                         res = 1;
477                         goto out;
478                 }
479                 bufflen += ucmd->first_page_offset;
480                 if (is_need_offs_page(ucmd->ubuff, cmd->bufflen))
481                         last_len = bufflen & ~PAGE_MASK;
482                 else
483                         last_len = cmd->bufflen & ~PAGE_MASK;
484                 if (last_len == 0)
485                         last_len = PAGE_SIZE;
486         }
487         ucmd->buff_cached = cached_buff;
488
489         cmd->sg = sgv_pool_alloc(dev->pool, bufflen, gfp_mask, flags,
490                         &cmd->sg_cnt, &ucmd->sgv, ucmd);
491         if (cmd->sg != NULL) {
492                 struct scst_user_cmd *buf_ucmd =
493                         (struct scst_user_cmd *)sgv_get_priv(ucmd->sgv);
494
495                 TRACE_MEM("Buf ucmd %p", buf_ucmd);
496
497                 ucmd->ubuff = buf_ucmd->ubuff;
498                 ucmd->buf_ucmd = buf_ucmd;
499
500                 EXTRACHECKS_BUG_ON((ucmd->data_pages != NULL) &&
501                                    (ucmd != buf_ucmd));
502
503                 if (last_len != 0) {
504                         /* We don't use clustering, so the assignment is safe */
505                         cmd->sg[cmd->sg_cnt-1].length = last_len;
506                 }
507
508                 TRACE_MEM("Buf alloced (ucmd %p, cached_buff %d, ubuff %lx, "
509                         "last_len %d, l %d)", ucmd, cached_buff, ucmd->ubuff,
510                         last_len, cmd->sg[cmd->sg_cnt-1].length);
511
512                 if (unlikely(cmd->sg_cnt > cmd->tgt_dev->max_sg_cnt)) {
513                         static int ll;
514                         if (ll < 10) {
515                                 PRINT_INFO("Unable to complete command due to "
516                                         "SG IO count limitation (requested %d, "
517                                         "available %d, tgt lim %d)", cmd->sg_cnt,
518                                         cmd->tgt_dev->max_sg_cnt,
519                                         cmd->tgt->sg_tablesize);
520                                 ll++;
521                         }
522                         cmd->sg = NULL;
523                         /* sgv will be freed in dev_user_free_sgv() */
524                         res = -1;
525                 }
526         } else {
527                 TRACE_MEM("Buf not alloced (ucmd %p, h %d, buff_cached, %d, "
528                         "sg_cnt %d, ubuff %lx, sgv %p", ucmd, ucmd->h,
529                         ucmd->buff_cached, cmd->sg_cnt, ucmd->ubuff, ucmd->sgv);
530                 if (unlikely(cmd->sg_cnt == 0)) {
531                         TRACE_MEM("Refused allocation (ucmd %p)", ucmd);
532                         sBUG_ON(ucmd->sgv != NULL);
533                         res = -1;
534                 } else {
535                         switch (ucmd->state & ~UCMD_STATE_MASK) {
536                         case UCMD_STATE_BUF_ALLOCING:
537                                 res = 1;
538                                 break;
539                         case UCMD_STATE_EXECING:
540                                 res = -1;
541                                 break;
542                         default:
543                                 sBUG();
544                                 break;
545                         }
546                 }
547         }
548
549 out:
550         TRACE_EXIT_RES(res);
551         return res;
552 }
553
554 static int dev_user_alloc_space(struct scst_user_cmd *ucmd)
555 {
556         int rc, res = SCST_CMD_STATE_DEFAULT;
557         struct scst_cmd *cmd = ucmd->cmd;
558
559         TRACE_ENTRY();
560
561         if (unlikely(ucmd->cmd->data_buf_tgt_alloc)) {
562                 PRINT_ERROR("Target driver %s requested own memory "
563                         "allocation", ucmd->cmd->tgtt->name);
564                 scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
565                 res = SCST_CMD_STATE_PRE_XMIT_RESP;
566                 goto out;
567         }
568
569         ucmd->state = UCMD_STATE_BUF_ALLOCING;
570         cmd->data_buf_alloced = 1;
571
572         rc = dev_user_alloc_sg(ucmd, is_buff_cached(ucmd));
573         if (rc == 0)
574                 goto out;
575         else if (rc < 0) {
576                 scst_set_busy(cmd);
577                 res = SCST_CMD_STATE_PRE_XMIT_RESP;
578                 goto out;
579         }
580
581         if ((cmd->data_direction != SCST_DATA_WRITE) &&
582             !scst_is_cmd_local(cmd)) {
583                 TRACE_DBG("Delayed alloc, ucmd %p", ucmd);
584                 goto out;
585         }
586
587         ucmd->user_cmd.cmd_h = ucmd->h;
588         ucmd->user_cmd.subcode = SCST_USER_ALLOC_MEM;
589         ucmd->user_cmd.alloc_cmd.sess_h = (unsigned long)cmd->tgt_dev;
590         memcpy(ucmd->user_cmd.alloc_cmd.cdb, cmd->cdb,
591                 min(sizeof(ucmd->user_cmd.alloc_cmd.cdb), sizeof(cmd->cdb)));
592         ucmd->user_cmd.alloc_cmd.cdb_len = cmd->cdb_len;
593         ucmd->user_cmd.alloc_cmd.alloc_len = ucmd->buff_cached ?
594                 (cmd->sg_cnt << PAGE_SHIFT) : cmd->bufflen;
595         ucmd->user_cmd.alloc_cmd.queue_type = cmd->queue_type;
596         ucmd->user_cmd.alloc_cmd.data_direction = cmd->data_direction;
597         ucmd->user_cmd.alloc_cmd.sn = cmd->tgt_sn;
598
599         dev_user_add_to_ready(ucmd);
600
601         res = SCST_CMD_STATE_STOP;
602
603 out:
604         TRACE_EXIT_RES(res);
605         return res;
606 }
607
608 static struct scst_user_cmd *dev_user_alloc_ucmd(struct scst_user_dev *dev,
609         int gfp_mask)
610 {
611         struct scst_user_cmd *ucmd = NULL;
612
613         TRACE_ENTRY();
614
615 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
616         ucmd = kmem_cache_alloc(user_cmd_cachep, gfp_mask);
617         if (ucmd != NULL)
618                 memset(ucmd, 0, sizeof(*ucmd));
619 #else
620         ucmd = kmem_cache_zalloc(user_cmd_cachep, gfp_mask);
621 #endif
622         if (unlikely(ucmd == NULL)) {
623                 TRACE(TRACE_OUT_OF_MEM, "Unable to allocate "
624                         "user cmd (gfp_mask %x)", gfp_mask);
625                 goto out;
626         }
627         ucmd->dev = dev;
628         atomic_set(&ucmd->ucmd_ref, 1);
629
630         cmd_insert_hash(ucmd);
631
632         TRACE_MEM("ucmd %p allocated", ucmd);
633
634 out:
635         TRACE_EXIT_HRES((unsigned long)ucmd);
636         return ucmd;
637 }
638
639 static int dev_user_get_block(struct scst_cmd *cmd)
640 {
641         struct scst_user_dev *dev = (struct scst_user_dev *)cmd->dev->dh_priv;
642         /*
643          * No need for locks here, since *_detach() can not be
644          * called, when there are existing commands.
645          */
646         TRACE_EXIT_RES(dev->block);
647         return dev->block;
648 }
649
650 static int dev_user_parse(struct scst_cmd *cmd)
651 {
652         int rc, res = SCST_CMD_STATE_DEFAULT;
653         struct scst_user_cmd *ucmd;
654         int atomic = scst_cmd_atomic(cmd);
655         struct scst_user_dev *dev = (struct scst_user_dev *)cmd->dev->dh_priv;
656         int gfp_mask = atomic ? GFP_ATOMIC : GFP_KERNEL;
657
658         TRACE_ENTRY();
659
660         if (cmd->dh_priv == NULL) {
661                 ucmd = dev_user_alloc_ucmd(dev, gfp_mask);
662                 if (unlikely(ucmd == NULL)) {
663                         if (atomic) {
664                                 res = SCST_CMD_STATE_NEED_THREAD_CTX;
665                                 goto out;
666                         } else {
667                                 scst_set_busy(cmd);
668                                 goto out_error;
669                         }
670                 }
671                 ucmd->cmd = cmd;
672                 cmd->dh_priv = ucmd;
673         } else {
674                 ucmd = (struct scst_user_cmd *)cmd->dh_priv;
675                 TRACE_DBG("Used ucmd %p, state %x", ucmd, ucmd->state);
676         }
677
678         TRACE_DBG("ucmd %p, cmd %p, state %x", ucmd, cmd, ucmd->state);
679
680         if (ucmd->state != UCMD_STATE_NEW)
681                 goto alloc;
682
683         switch (dev->parse_type) {
684         case SCST_USER_PARSE_STANDARD:
685                 TRACE_DBG("PARSE STANDARD: ucmd %p", ucmd);
686                 rc = dev->generic_parse(cmd, dev_user_get_block);
687                 if ((rc != 0) || (cmd->op_flags & SCST_INFO_INVALID))
688                         goto out_invalid;
689                 break;
690
691         case SCST_USER_PARSE_EXCEPTION:
692                 TRACE_DBG("PARSE EXCEPTION: ucmd %p", ucmd);
693                 rc = dev->generic_parse(cmd, dev_user_get_block);
694                 if ((rc == 0) && (!(cmd->op_flags & SCST_INFO_INVALID)))
695                         break;
696                 else if (rc == SCST_CMD_STATE_NEED_THREAD_CTX) {
697                         TRACE_MEM("Restarting PARSE to thread context "
698                                 "(ucmd %p)", ucmd);
699                         res = SCST_CMD_STATE_NEED_THREAD_CTX;
700                         goto out;
701                 }
702                 /* else go through */
703
704         case SCST_USER_PARSE_CALL:
705                 TRACE_DBG("Preparing PARSE for user space (ucmd=%p, h=%d, "
706                         "bufflen %d)", ucmd, ucmd->h, cmd->bufflen);
707                 ucmd->user_cmd.cmd_h = ucmd->h;
708                 ucmd->user_cmd.subcode = SCST_USER_PARSE;
709                 ucmd->user_cmd.parse_cmd.sess_h = (unsigned long)cmd->tgt_dev;
710                 memcpy(ucmd->user_cmd.parse_cmd.cdb, cmd->cdb,
711                         min(sizeof(ucmd->user_cmd.parse_cmd.cdb),
712                             sizeof(cmd->cdb)));
713                 ucmd->user_cmd.parse_cmd.cdb_len = cmd->cdb_len;
714                 ucmd->user_cmd.parse_cmd.timeout = cmd->timeout;
715                 ucmd->user_cmd.parse_cmd.bufflen = cmd->bufflen;
716                 ucmd->user_cmd.parse_cmd.queue_type = cmd->queue_type;
717                 ucmd->user_cmd.parse_cmd.data_direction = cmd->data_direction;
718                 ucmd->user_cmd.parse_cmd.expected_values_set =
719                                         cmd->expected_values_set;
720                 ucmd->user_cmd.parse_cmd.expected_data_direction =
721                                         cmd->expected_data_direction;
722                 ucmd->user_cmd.parse_cmd.expected_transfer_len =
723                                         cmd->expected_transfer_len;
724                 ucmd->user_cmd.parse_cmd.sn = cmd->tgt_sn;
725                 ucmd->state = UCMD_STATE_PARSING;
726                 dev_user_add_to_ready(ucmd);
727                 res = SCST_CMD_STATE_STOP;
728                 goto out;
729
730         default:
731                 sBUG();
732                 goto out;
733         }
734
735 alloc:
736         if (cmd->data_direction != SCST_DATA_NONE)
737                 res = dev_user_alloc_space(ucmd);
738
739 out:
740         TRACE_EXIT_RES(res);
741         return res;
742
743 out_invalid:
744         PRINT_ERROR("PARSE failed (ucmd %p, rc %d, invalid %d)", ucmd, rc,
745                 cmd->op_flags & SCST_INFO_INVALID);
746         scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_invalid_opcode));
747
748 out_error:
749         res = SCST_CMD_STATE_PRE_XMIT_RESP;
750         goto out;
751 }
752
753 static void dev_user_flush_dcache(struct scst_user_cmd *ucmd)
754 {
755         struct scst_user_cmd *buf_ucmd = ucmd->buf_ucmd;
756         unsigned long start = buf_ucmd->ubuff;
757         int i;
758
759         TRACE_ENTRY();
760
761         if (start == 0)
762                 goto out;
763
764         for (i = 0; i < buf_ucmd->num_data_pages; i++) {
765                 struct page *page;
766                 page = buf_ucmd->data_pages[i];
767 #ifdef ARCH_HAS_FLUSH_ANON_PAGE
768                 struct vm_area_struct *vma = find_vma(current->mm, start);
769                 if (vma != NULL)
770                         flush_anon_page(vma, page, start);
771 #endif
772                 flush_dcache_page(page);
773                 start += PAGE_SIZE;
774         }
775
776 out:
777         TRACE_EXIT();
778         return;
779 }
780
781 static int dev_user_exec(struct scst_cmd *cmd)
782 {
783         struct scst_user_cmd *ucmd = (struct scst_user_cmd *)cmd->dh_priv;
784         int res = SCST_EXEC_COMPLETED;
785
786         TRACE_ENTRY();
787
788 #if 0 /* We set exec_atomic in 0 to let SCST core know that we need a thread
789        * context to complete the necessary actions, but all we are going to
790        * do in this function is, in fact, atomic, so let's skip this check.
791        */
792         if (scst_cmd_atomic(cmd)) {
793                 TRACE_DBG("%s", "User exec() can not be called in atomic "
794                         "context, rescheduling to the thread");
795                 res = SCST_EXEC_NEED_THREAD;
796                 goto out;
797         }
798 #endif
799
800         TRACE_DBG("Preparing EXEC for user space (ucmd=%p, h=%d, "
801                 "bufflen %d, data_len %d, ubuff %lx)", ucmd, ucmd->h,
802                 cmd->bufflen, cmd->data_len, ucmd->ubuff);
803
804         if (cmd->data_direction == SCST_DATA_WRITE)
805                 dev_user_flush_dcache(ucmd);
806
807         ucmd->user_cmd.cmd_h = ucmd->h;
808         ucmd->user_cmd.subcode = SCST_USER_EXEC;
809         ucmd->user_cmd.exec_cmd.sess_h = (unsigned long)cmd->tgt_dev;
810         memcpy(ucmd->user_cmd.exec_cmd.cdb, cmd->cdb,
811                 min(sizeof(ucmd->user_cmd.exec_cmd.cdb),
812                     sizeof(cmd->cdb)));
813         ucmd->user_cmd.exec_cmd.cdb_len = cmd->cdb_len;
814         ucmd->user_cmd.exec_cmd.bufflen = cmd->bufflen;
815         ucmd->user_cmd.exec_cmd.data_len = cmd->data_len;
816         ucmd->user_cmd.exec_cmd.pbuf = ucmd->ubuff;
817         if ((ucmd->ubuff == 0) && (cmd->data_direction != SCST_DATA_NONE)) {
818                 ucmd->user_cmd.exec_cmd.alloc_len = ucmd->buff_cached ?
819                         (cmd->sg_cnt << PAGE_SHIFT) : cmd->bufflen;
820         }
821         ucmd->user_cmd.exec_cmd.queue_type = cmd->queue_type;
822         ucmd->user_cmd.exec_cmd.data_direction = cmd->data_direction;
823         ucmd->user_cmd.exec_cmd.partial = 0;
824         ucmd->user_cmd.exec_cmd.timeout = cmd->timeout;
825         ucmd->user_cmd.exec_cmd.sn = cmd->tgt_sn;
826
827         ucmd->state = UCMD_STATE_EXECING;
828
829         dev_user_add_to_ready(ucmd);
830
831         TRACE_EXIT_RES(res);
832         return res;
833 }
834
835 static void dev_user_free_sgv(struct scst_user_cmd *ucmd)
836 {
837         if (ucmd->sgv != NULL) {
838                 sgv_pool_free(ucmd->sgv);
839                 ucmd->sgv = NULL;
840         } else if (ucmd->data_pages != NULL) {
841                 /* We mapped pages, but for some reason didn't allocate them */
842                 ucmd_get(ucmd, 0);
843                 __dev_user_free_sg_entries(ucmd);
844         }
845         return;
846 }
847
848 static void dev_user_on_free_cmd(struct scst_cmd *cmd)
849 {
850         struct scst_user_cmd *ucmd = (struct scst_user_cmd *)cmd->dh_priv;
851
852         TRACE_ENTRY();
853
854         if (unlikely(ucmd == NULL))
855                 goto out;
856
857         TRACE_MEM("ucmd %p, cmd %p, buff_cached %d, ubuff %lx", ucmd, ucmd->cmd,
858                 ucmd->buff_cached, ucmd->ubuff);
859
860         ucmd->cmd = NULL;
861         if ((cmd->data_direction == SCST_DATA_WRITE) && (ucmd->buf_ucmd != NULL))
862                 ucmd->buf_ucmd->buf_dirty = 1;
863
864         if (ucmd->dev->on_free_cmd_type == SCST_USER_ON_FREE_CMD_IGNORE) {
865                 ucmd->state = UCMD_STATE_ON_FREE_SKIPPED;
866                 /* The state assignment must be before freeing sgv! */
867                 dev_user_free_sgv(ucmd);
868                 ucmd_put(ucmd);
869                 goto out;
870         }
871
872         ucmd->user_cmd.cmd_h = ucmd->h;
873         ucmd->user_cmd.subcode = SCST_USER_ON_FREE_CMD;
874
875         ucmd->user_cmd.on_free_cmd.pbuf = ucmd->ubuff;
876         ucmd->user_cmd.on_free_cmd.resp_data_len = cmd->resp_data_len;
877         ucmd->user_cmd.on_free_cmd.buffer_cached = ucmd->buff_cached;
878         ucmd->user_cmd.on_free_cmd.aborted = ucmd->aborted;
879         ucmd->user_cmd.on_free_cmd.status = cmd->status;
880         ucmd->user_cmd.on_free_cmd.delivery_status = cmd->delivery_status;
881
882         ucmd->state = UCMD_STATE_ON_FREEING;
883
884         dev_user_add_to_ready(ucmd);
885
886 out:
887         TRACE_EXIT();
888         return;
889 }
890
891 static void dev_user_set_block(struct scst_cmd *cmd, int block)
892 {
893         struct scst_user_dev *dev = (struct scst_user_dev *)cmd->dev->dh_priv;
894         /*
895          * No need for locks here, since *_detach() can not be
896          * called, when there are existing commands.
897          */
898         TRACE_DBG("dev %p, new block %d", dev, block);
899         if (block != 0)
900                 dev->block = block;
901         else
902                 dev->block = dev->def_block;
903         return;
904 }
905
906 static int dev_user_disk_done(struct scst_cmd *cmd)
907 {
908         int res = SCST_CMD_STATE_DEFAULT;
909
910         TRACE_ENTRY();
911
912         res = scst_block_generic_dev_done(cmd, dev_user_set_block);
913
914         TRACE_EXIT_RES(res);
915         return res;
916 }
917
918 static int dev_user_tape_done(struct scst_cmd *cmd)
919 {
920         int res = SCST_CMD_STATE_DEFAULT;
921
922         TRACE_ENTRY();
923
924         res = scst_tape_generic_dev_done(cmd, dev_user_set_block);
925
926         TRACE_EXIT_RES(res);
927         return res;
928 }
929
930 static void dev_user_add_to_ready(struct scst_user_cmd *ucmd)
931 {
932         struct scst_user_dev *dev = ucmd->dev;
933         unsigned long flags;
934         int do_wake;
935
936         TRACE_ENTRY();
937
938         do_wake = (in_interrupt() ||
939                    (ucmd->state == UCMD_STATE_ON_CACHE_FREEING));
940         if (ucmd->cmd)
941                 do_wake |= ucmd->cmd->preprocessing_only;
942
943         EXTRACHECKS_BUG_ON(ucmd->state & UCMD_STATE_JAMMED_MASK);
944
945         spin_lock_irqsave(&dev->cmd_lists.cmd_list_lock, flags);
946
947         /* Hopefully, compiler will make it as a single test/jmp */
948         if (unlikely(dev->attach_cmd_active || dev->tm_cmd_active ||
949                      dev->internal_reset_active || dev->pre_unreg_sess_active ||
950                      (dev->detach_cmd_count != 0))) {
951                 switch (ucmd->state) {
952                 case UCMD_STATE_PARSING:
953                 case UCMD_STATE_BUF_ALLOCING:
954                 case UCMD_STATE_EXECING:
955                         if (dev->pre_unreg_sess_active &&
956                             !(dev->attach_cmd_active || dev->tm_cmd_active ||
957                               dev->internal_reset_active ||
958                               (dev->detach_cmd_count != 0))) {
959                                 struct scst_user_pre_unreg_sess_obj *p, *found = NULL;
960                                 list_for_each_entry(p, &dev->pre_unreg_sess_list,
961                                         pre_unreg_sess_list_entry) {
962                                         if (p->tgt_dev == ucmd->cmd->tgt_dev) {
963                                                 if (p->active)
964                                                         found = p;
965                                                 break;
966                                         }
967                                 }
968                                 if (found == NULL) {
969                                         TRACE_MGMT_DBG("No pre unreg sess "
970                                                 "active (ucmd %p)", ucmd);
971                                         break;
972                                 } else {
973                                         TRACE_MGMT_DBG("Pre unreg sess %p "
974                                                 "active (ucmd %p)", found, ucmd);
975                                 }
976                         }
977                         TRACE(TRACE_MGMT, "Mgmt cmd active, returning BUSY for "
978                                 "ucmd %p", ucmd);
979                         dev_user_unjam_cmd(ucmd, 1, &flags);
980                         spin_unlock_irqrestore(&dev->cmd_lists.cmd_list_lock, flags);
981                         goto out;
982                 }
983         }
984
985         if (unlikely(ucmd->state == UCMD_STATE_TM_EXECING) ||
986             unlikely(ucmd->state == UCMD_STATE_ATTACH_SESS) ||
987             unlikely(ucmd->state == UCMD_STATE_DETACH_SESS)) {
988                 if (dev->prio_queue_type == SCST_USER_PRIO_QUEUE_SEPARATE) {
989                         TRACE_MGMT_DBG("Adding mgmt ucmd %p to prio ready cmd "
990                                        "list", ucmd);
991                         list_add_tail(&ucmd->ready_cmd_list_entry,
992                                 &dev->prio_ready_cmd_list);
993                         wake_up(&dev->prio_cmd_list_waitQ);
994                         do_wake = 0;
995                 } else {
996                         TRACE_MGMT_DBG("Adding mgmt ucmd %p to ready cmd "
997                                 "list", ucmd);
998                         list_add_tail(&ucmd->ready_cmd_list_entry,
999                                 &dev->ready_cmd_list);
1000                         do_wake = 1;
1001                 }
1002         } else if ((ucmd->cmd != NULL) &&
1003             unlikely((ucmd->cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE))) {
1004                 TRACE_DBG("Adding ucmd %p to head ready cmd list", ucmd);
1005                 list_add(&ucmd->ready_cmd_list_entry, &dev->ready_cmd_list);
1006         } else {
1007                 TRACE_DBG("Adding ucmd %p to ready cmd list", ucmd);
1008                 list_add_tail(&ucmd->ready_cmd_list_entry, &dev->ready_cmd_list);
1009         }
1010
1011         if (do_wake) {
1012                 TRACE_DBG("Waking up dev %p", dev);
1013                 wake_up(&dev->cmd_lists.cmd_list_waitQ);
1014         }
1015
1016         spin_unlock_irqrestore(&dev->cmd_lists.cmd_list_lock, flags);
1017
1018 out:
1019         TRACE_EXIT();
1020         return;
1021 }
1022
1023 static int dev_user_map_buf(struct scst_user_cmd *ucmd, unsigned long ubuff,
1024         int num_pg)
1025 {
1026         int res = 0, rc;
1027         int i;
1028
1029         TRACE_ENTRY();
1030
1031         if (unlikely(ubuff == 0))
1032                 goto out_nomem;
1033
1034         sBUG_ON(ucmd->data_pages != NULL);
1035
1036         ucmd->num_data_pages = num_pg;
1037
1038         ucmd->data_pages = kzalloc(sizeof(*ucmd->data_pages)*ucmd->num_data_pages,
1039                 GFP_KERNEL);
1040         if (ucmd->data_pages == NULL) {
1041                 TRACE(TRACE_OUT_OF_MEM, "Unable to allocate data_pages array "
1042                         "(num_data_pages=%d)", ucmd->num_data_pages);
1043                 res = -ENOMEM;
1044                 goto out_nomem;
1045         }
1046
1047         TRACE_MEM("Mapping buffer (ucmd %p, ubuff %lx, ucmd->num_data_pages %d, "
1048                 "first_page_offset %d, len %d)", ucmd, ubuff,
1049                 ucmd->num_data_pages, (int)(ubuff & ~PAGE_MASK),
1050                 ucmd->cmd->bufflen);
1051
1052         down_read(&current->mm->mmap_sem);
1053         rc = get_user_pages(current, current->mm, ubuff, ucmd->num_data_pages,
1054                 1/*writable*/, 0/*don't force*/, ucmd->data_pages, NULL);
1055         up_read(&current->mm->mmap_sem);
1056
1057         /* get_user_pages() flushes dcache */
1058
1059         if (rc < ucmd->num_data_pages)
1060                 goto out_unmap;
1061
1062         ucmd->ubuff = ubuff;
1063         ucmd->first_page_offset = (ubuff & ~PAGE_MASK);
1064
1065 out:
1066         TRACE_EXIT_RES(res);
1067         return res;
1068
1069 out_nomem:
1070         scst_set_busy(ucmd->cmd);
1071         /* go through */
1072
1073 out_err:
1074         ucmd->cmd->state = SCST_CMD_STATE_PRE_XMIT_RESP;
1075         goto out;
1076
1077 out_unmap:
1078         PRINT_ERROR("Failed to get %d user pages (rc %d)",
1079                 ucmd->num_data_pages, rc);
1080         if (rc > 0) {
1081                 for (i = 0; i < rc; i++)
1082                         page_cache_release(ucmd->data_pages[i]);
1083         }
1084         kfree(ucmd->data_pages);
1085         ucmd->data_pages = NULL;
1086         res = -EFAULT;
1087         scst_set_cmd_error(ucmd->cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
1088         goto out_err;
1089 }
1090
1091 static int dev_user_process_reply_alloc(struct scst_user_cmd *ucmd,
1092         struct scst_user_reply_cmd *reply)
1093 {
1094         int res = 0;
1095         struct scst_cmd *cmd = ucmd->cmd;
1096
1097         TRACE_ENTRY();
1098
1099         TRACE_DBG("ucmd %p, pbuf %Lx", ucmd, reply->alloc_reply.pbuf);
1100
1101         if (likely(reply->alloc_reply.pbuf != 0)) {
1102                 int pages;
1103                 if (ucmd->buff_cached) {
1104                         if (unlikely((reply->alloc_reply.pbuf & ~PAGE_MASK) != 0)) {
1105                                 PRINT_ERROR("Supplied pbuf %Lx isn't "
1106                                         "page aligned", reply->alloc_reply.pbuf);
1107                                 goto out_hwerr;
1108                         }
1109                         pages = cmd->sg_cnt;
1110                 } else
1111                         pages = calc_num_pg(reply->alloc_reply.pbuf, cmd->bufflen);
1112                 res = dev_user_map_buf(ucmd, reply->alloc_reply.pbuf, pages);
1113         } else {
1114                 scst_set_busy(ucmd->cmd);
1115                 ucmd->cmd->state = SCST_CMD_STATE_PRE_XMIT_RESP;
1116         }
1117
1118 out_process:
1119         scst_process_active_cmd(cmd, SCST_CONTEXT_DIRECT);
1120
1121         TRACE_EXIT_RES(res);
1122         return res;
1123
1124 out_hwerr:
1125         scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
1126         res = -EINVAL;
1127         goto out_process;
1128 }
1129
1130 static int dev_user_process_reply_parse(struct scst_user_cmd *ucmd,
1131         struct scst_user_reply_cmd *reply)
1132 {
1133         int res = 0;
1134         struct scst_user_scsi_cmd_reply_parse *preply =
1135                 &reply->parse_reply;
1136         struct scst_cmd *cmd = ucmd->cmd;
1137
1138         TRACE_ENTRY();
1139
1140         if (unlikely(preply->queue_type > SCST_CMD_QUEUE_ACA))
1141                 goto out_inval;
1142
1143         if (unlikely((preply->data_direction != SCST_DATA_WRITE) &&
1144                      (preply->data_direction != SCST_DATA_READ) &&
1145                      (preply->data_direction != SCST_DATA_NONE)))
1146                 goto out_inval;
1147
1148         if (unlikely((preply->data_direction != SCST_DATA_NONE) &&
1149                      (preply->bufflen == 0)))
1150                 goto out_inval;
1151
1152         if (unlikely((preply->bufflen < 0) || (preply->data_len < 0)))
1153                 goto out_inval;
1154
1155         TRACE_DBG("ucmd %p, queue_type %x, data_direction, %x, bufflen %d, "
1156                 "data_len %d, pbuf %Lx", ucmd, preply->queue_type,
1157                 preply->data_direction, preply->bufflen, preply->data_len,
1158                 reply->alloc_reply.pbuf);
1159
1160         cmd->queue_type = preply->queue_type;
1161         cmd->data_direction = preply->data_direction;
1162         cmd->bufflen = preply->bufflen;
1163         cmd->data_len = preply->data_len;
1164
1165 out_process:
1166         scst_process_active_cmd(cmd, SCST_CONTEXT_DIRECT);
1167
1168         TRACE_EXIT_RES(res);
1169         return res;
1170
1171 out_inval:
1172         PRINT_ERROR("%s", "Invalid parse_reply parameter(s)");
1173         scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
1174         res = -EINVAL;
1175         goto out_process;
1176 }
1177
1178 static int dev_user_process_reply_on_free(struct scst_user_cmd *ucmd)
1179 {
1180         int res = 0;
1181
1182         TRACE_ENTRY();
1183
1184         TRACE_DBG("ON FREE ucmd %p", ucmd);
1185
1186         dev_user_free_sgv(ucmd);
1187         ucmd_put(ucmd);
1188
1189         TRACE_EXIT_RES(res);
1190         return res;
1191 }
1192
1193 static int dev_user_process_reply_on_cache_free(struct scst_user_cmd *ucmd)
1194 {
1195         int res = 0;
1196
1197         TRACE_ENTRY();
1198
1199         TRACE_DBG("ON CACHE FREE ucmd %p", ucmd);
1200
1201         ucmd_put(ucmd);
1202
1203         TRACE_EXIT_RES(res);
1204         return res;
1205 }
1206
1207 static int dev_user_process_reply_exec(struct scst_user_cmd *ucmd,
1208         struct scst_user_reply_cmd *reply)
1209 {
1210         int res = 0;
1211         struct scst_user_scsi_cmd_reply_exec *ereply =
1212                 &reply->exec_reply;
1213         struct scst_cmd *cmd = ucmd->cmd;
1214
1215         TRACE_ENTRY();
1216
1217         if (ereply->reply_type == SCST_EXEC_REPLY_COMPLETED) {
1218                 if (ucmd->background_exec) {
1219                         TRACE_DBG("Background ucmd %p finished", ucmd);
1220                         ucmd_put(ucmd);
1221                         goto out;
1222                 }
1223                 if (unlikely(ereply->resp_data_len > cmd->bufflen))
1224                         goto out_inval;
1225                 if (unlikely((cmd->data_direction != SCST_DATA_READ) &&
1226                              (ereply->resp_data_len != 0)))
1227                         goto out_inval;
1228         } else if (ereply->reply_type == SCST_EXEC_REPLY_BACKGROUND) {
1229                 if (unlikely(ucmd->background_exec))
1230                         goto out_inval;
1231                 if (unlikely((cmd->data_direction == SCST_DATA_READ) ||
1232                              (cmd->resp_data_len != 0)))
1233                         goto out_inval;
1234                 ucmd_get(ucmd, 1);
1235                 ucmd->background_exec = 1;
1236                 TRACE_DBG("Background ucmd %p", ucmd);
1237                 goto out_compl;
1238         } else
1239                 goto out_inval;
1240
1241         TRACE_DBG("ucmd %p, status %d, resp_data_len %d", ucmd,
1242                 ereply->status, ereply->resp_data_len);
1243
1244          if (ereply->resp_data_len != 0) {
1245                 if (ucmd->ubuff == 0) {
1246                         int pages, rc;
1247                         if (unlikely(ereply->pbuf == 0))
1248                                 goto out_busy;
1249                         if (ucmd->buff_cached) {
1250                                 if (unlikely((ereply->pbuf & ~PAGE_MASK) != 0)) {
1251                                         PRINT_ERROR("Supplied pbuf %Lx isn't "
1252                                                 "page aligned", ereply->pbuf);
1253                                         goto out_hwerr;
1254                                 }
1255                                 pages = cmd->sg_cnt;
1256                         } else
1257                                 pages = calc_num_pg(ereply->pbuf, cmd->bufflen);
1258                         rc = dev_user_map_buf(ucmd, ereply->pbuf, pages);
1259                         if ((rc != 0) || (ucmd->ubuff == 0))
1260                                 goto out_compl;
1261
1262                         rc = dev_user_alloc_sg(ucmd, ucmd->buff_cached);
1263                         if (unlikely(rc != 0))
1264                                 goto out_busy;
1265                 } else
1266                         dev_user_flush_dcache(ucmd);
1267                 cmd->may_need_dma_sync = 1;
1268                 scst_set_resp_data_len(cmd, ereply->resp_data_len);
1269         } else if (cmd->resp_data_len != ereply->resp_data_len) {
1270                 if (ucmd->ubuff == 0)
1271                         cmd->resp_data_len = ereply->resp_data_len;
1272                 else
1273                         scst_set_resp_data_len(cmd, ereply->resp_data_len);
1274         }
1275
1276         cmd->status = ereply->status;
1277         if (ereply->sense_len != 0) {
1278                 res = scst_alloc_sense(cmd, 0);
1279                 if (res != 0)
1280                         goto out_compl;
1281                 res = copy_from_user(cmd->sense,
1282                         (void *)(unsigned long)ereply->psense_buffer,
1283                         min((unsigned int)SCST_SENSE_BUFFERSIZE,
1284                                 (unsigned int)ereply->sense_len));
1285                 if (res < 0) {
1286                         PRINT_ERROR("%s", "Unable to get sense data");
1287                         goto out_hwerr_res_set;
1288                 }
1289         }
1290
1291 out_compl:
1292         cmd->completed = 1;
1293         cmd->scst_cmd_done(cmd, SCST_CMD_STATE_DEFAULT);
1294         /* !! At this point cmd can be already freed !! */
1295
1296 out:
1297         TRACE_EXIT_RES(res);
1298         return res;
1299
1300 out_inval:
1301         PRINT_ERROR("%s", "Invalid exec_reply parameter(s)");
1302
1303 out_hwerr:
1304         res = -EINVAL;
1305
1306 out_hwerr_res_set:
1307         if (ucmd->background_exec) {
1308                 ucmd_put(ucmd);
1309                 goto out;
1310         } else {
1311                 scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
1312                 goto out_compl;
1313         }
1314
1315 out_busy:
1316         scst_set_busy(cmd);
1317         goto out_compl;
1318 }
1319
1320 static int dev_user_process_reply(struct scst_user_dev *dev,
1321         struct scst_user_reply_cmd *reply)
1322 {
1323         int res = 0;
1324         struct scst_user_cmd *ucmd;
1325         int state;
1326
1327         TRACE_ENTRY();
1328
1329         spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
1330
1331         ucmd = __ucmd_find_hash(dev, reply->cmd_h);
1332         if (ucmd == NULL) {
1333                 TRACE_MGMT_DBG("cmd_h %d not found", reply->cmd_h);
1334                 res = -ESRCH;
1335                 goto out_unlock;
1336         }
1337
1338         if (ucmd->background_exec) {
1339                 state = UCMD_STATE_EXECING;
1340                 goto unlock_process;
1341         }
1342
1343         if (unlikely(!(ucmd->state & UCMD_STATE_SENT_MASK))) {
1344                 if (ucmd->state & UCMD_STATE_JAMMED_MASK) {
1345                         TRACE_MGMT_DBG("Reply on jammed ucmd %p, ignoring",
1346                                 ucmd);
1347                 } else {
1348                         TRACE_MGMT_DBG("Ucmd %p isn't in the sent to user "
1349                                 "state %x", ucmd, ucmd->state);
1350                         res = -EBUSY;
1351                 }
1352                 goto out_unlock;
1353         }
1354
1355         if (unlikely(reply->subcode != ucmd->user_cmd.subcode))
1356                 goto out_wrong_state;
1357
1358         if (unlikely(_IOC_NR(reply->subcode) !=
1359                         (ucmd->state & ~UCMD_STATE_SENT_MASK)))
1360                 goto out_wrong_state;
1361
1362         ucmd->state &= ~UCMD_STATE_SENT_MASK;
1363         state = ucmd->state;
1364         ucmd->state |= UCMD_STATE_RECV_MASK;
1365
1366 unlock_process:
1367         spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1368
1369         switch (state) {
1370         case UCMD_STATE_PARSING:
1371                 res = dev_user_process_reply_parse(ucmd, reply);
1372                 break;
1373
1374         case UCMD_STATE_BUF_ALLOCING:
1375                 res = dev_user_process_reply_alloc(ucmd, reply);
1376                 break;
1377
1378         case UCMD_STATE_EXECING:
1379                 res = dev_user_process_reply_exec(ucmd, reply);
1380                 break;
1381
1382         case UCMD_STATE_ON_FREEING:
1383                 res = dev_user_process_reply_on_free(ucmd);
1384                 break;
1385
1386         case UCMD_STATE_ON_CACHE_FREEING:
1387                 res = dev_user_process_reply_on_cache_free(ucmd);
1388                 break;
1389
1390         case UCMD_STATE_TM_EXECING:
1391                 res = dev_user_process_reply_tm_exec(ucmd, reply->result);
1392                 break;
1393
1394         case UCMD_STATE_ATTACH_SESS:
1395         case UCMD_STATE_DETACH_SESS:
1396                 res = dev_user_process_reply_sess(ucmd, reply->result);
1397                 break;
1398
1399         default:
1400                 sBUG();
1401                 break;
1402         }
1403 out:
1404         TRACE_EXIT_RES(res);
1405         return res;
1406
1407 out_wrong_state:
1408         PRINT_ERROR("Command's %p subcode %x doesn't match internal "
1409                 "command's state %x or reply->subcode (%x) != ucmd->subcode "
1410                 "(%x)", ucmd, _IOC_NR(reply->subcode), ucmd->state,
1411                 reply->subcode, ucmd->user_cmd.subcode);
1412         res = -EINVAL;
1413         dev_user_unjam_cmd(ucmd, 0, NULL);
1414
1415 out_unlock:
1416         spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1417         goto out;
1418 }
1419
1420 static int dev_user_reply_cmd(struct file *file, unsigned long arg)
1421 {
1422         int res = 0;
1423         struct scst_user_dev *dev;
1424         struct scst_user_reply_cmd *reply;
1425
1426         TRACE_ENTRY();
1427
1428         mutex_lock(&dev_priv_mutex);
1429         dev = (struct scst_user_dev *)file->private_data;
1430         res = dev_user_check_reg(dev);
1431         if (res != 0) {
1432                 mutex_unlock(&dev_priv_mutex);
1433                 goto out;
1434         }
1435         down_read(&dev->dev_rwsem);
1436         mutex_unlock(&dev_priv_mutex);
1437
1438         reply = kzalloc(sizeof(*reply), GFP_KERNEL);
1439         if (reply == NULL) {
1440                 res = -ENOMEM;
1441                 goto out_up;
1442         }
1443
1444         res = copy_from_user(reply, (void *)arg, sizeof(*reply));
1445         if (res < 0)
1446                 goto out_free;
1447
1448         TRACE_BUFFER("Reply", reply, sizeof(*reply));
1449
1450         res = dev_user_process_reply(dev, reply);
1451         if (res < 0)
1452                 goto out_free;
1453
1454 out_free:
1455         kfree(reply);
1456
1457 out_up:
1458         up_read(&dev->dev_rwsem);
1459
1460 out:
1461         TRACE_EXIT_RES(res);
1462         return res;
1463 }
1464
1465 static int dev_user_process_scst_commands(struct scst_user_dev *dev)
1466 {
1467         int res = 0;
1468
1469         TRACE_ENTRY();
1470
1471         while (!list_empty(&dev->cmd_lists.active_cmd_list)) {
1472                 struct scst_cmd *cmd = list_entry(
1473                         dev->cmd_lists.active_cmd_list.next, typeof(*cmd),
1474                         cmd_list_entry);
1475                 TRACE_DBG("Deleting cmd %p from active cmd list", cmd);
1476                 list_del(&cmd->cmd_list_entry);
1477                 spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1478                 scst_process_active_cmd(cmd, SCST_CONTEXT_DIRECT |
1479                                                  SCST_CONTEXT_PROCESSABLE);
1480                 spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
1481                 res++;
1482         }
1483
1484         TRACE_EXIT_RES(res);
1485         return res;
1486 }
1487
1488 /* Called under cmd_lists.cmd_list_lock and IRQ off */
1489 struct scst_user_cmd *__dev_user_get_next_cmd(struct list_head *cmd_list)
1490 {
1491         struct scst_user_cmd *u;
1492
1493 again:
1494         u = NULL;
1495         if (!list_empty(cmd_list)) {
1496                 u = list_entry(cmd_list->next, typeof(*u), ready_cmd_list_entry);
1497
1498                 TRACE_DBG("Found ready ucmd %p", u);
1499                 list_del(&u->ready_cmd_list_entry);
1500
1501                 EXTRACHECKS_BUG_ON(u->state & UCMD_STATE_JAMMED_MASK);
1502
1503                 if (u->cmd != NULL) {
1504                         if (u->state == UCMD_STATE_EXECING) {
1505                                 struct scst_user_dev *dev = u->dev;
1506                                 int rc;
1507                                 spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1508                                 rc = scst_check_local_events(u->cmd);
1509                                 if (unlikely(rc != 0)) {
1510                                         u->cmd->scst_cmd_done(u->cmd,
1511                                                 SCST_CMD_STATE_DEFAULT);
1512                                         /*
1513                                          * !! At this point cmd & u can be !!
1514                                          * !! already freed                !!
1515                                          */
1516                                         spin_lock_irq(
1517                                                 &dev->cmd_lists.cmd_list_lock);
1518                                         goto again;
1519                                 }
1520                                 /*
1521                                  * There is no real need to lock again here, but
1522                                  * let's do it for simplicity.
1523                                  */
1524                                 spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
1525                         } else if (unlikely(test_bit(SCST_CMD_ABORTED,
1526                                         &u->cmd->cmd_flags))) {
1527                                 switch (u->state) {
1528                                 case UCMD_STATE_PARSING:
1529                                 case UCMD_STATE_BUF_ALLOCING:
1530                                         TRACE_MGMT_DBG("Aborting ucmd %p", u);
1531                                         dev_user_unjam_cmd(u, 0, NULL);
1532                                         goto again;
1533                                 case UCMD_STATE_EXECING:
1534                                         EXTRACHECKS_BUG_ON(1);
1535                                 }
1536                         }
1537                 }
1538                 u->state |= UCMD_STATE_SENT_MASK;
1539         }
1540         return u;
1541 }
1542
1543 static inline int test_cmd_lists(struct scst_user_dev *dev)
1544 {
1545         int res = !list_empty(&dev->cmd_lists.active_cmd_list) ||
1546                   !list_empty(&dev->ready_cmd_list) ||
1547                   !dev->blocking || dev->cleanup_done ||
1548                   signal_pending(current);
1549         return res;
1550 }
1551
1552 /* Called under cmd_lists.cmd_list_lock and IRQ off */
1553 static int dev_user_get_next_cmd(struct scst_user_dev *dev,
1554         struct scst_user_cmd **ucmd)
1555 {
1556         int res = 0;
1557         wait_queue_t wait;
1558
1559         TRACE_ENTRY();
1560
1561         init_waitqueue_entry(&wait, current);
1562
1563         while (1) {
1564                 if (!test_cmd_lists(dev)) {
1565                         add_wait_queue_exclusive(&dev->cmd_lists.cmd_list_waitQ,
1566                                 &wait);
1567                         for (;;) {
1568                                 set_current_state(TASK_INTERRUPTIBLE);
1569                                 if (test_cmd_lists(dev))
1570                                         break;
1571                                 spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1572                                 schedule();
1573                                 spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
1574                         }
1575                         set_current_state(TASK_RUNNING);
1576                         remove_wait_queue(&dev->cmd_lists.cmd_list_waitQ,
1577                                 &wait);
1578                 }
1579
1580                 dev_user_process_scst_commands(dev);
1581
1582                 *ucmd = __dev_user_get_next_cmd(&dev->ready_cmd_list);
1583                 if (*ucmd != NULL)
1584                         break;
1585
1586                 if (!dev->blocking || dev->cleanup_done) {
1587                         res = -EAGAIN;
1588                         TRACE_DBG("No ready commands, returning %d", res);
1589                         break;
1590                 }
1591
1592                 if (signal_pending(current)) {
1593                         res = -EINTR;
1594                         TRACE_DBG("Signal pending, returning %d", res);
1595                         break;
1596                 }
1597         }
1598
1599         TRACE_EXIT_RES(res);
1600         return res;
1601 }
1602
1603 static inline int test_prio_cmd_list(struct scst_user_dev *dev)
1604 {
1605         /*
1606          * Prio queue is always blocking, because poll() seems doesn't
1607          * support, when different threads wait with different events
1608          * mask. Only one thread is woken up on each event and if it
1609          * isn't interested in such events, another (interested) one
1610          * will not be woken up. Does't know if it's a bug or feature.
1611          */
1612         int res = !list_empty(&dev->prio_ready_cmd_list) ||
1613                   dev->cleaning || dev->cleanup_done ||
1614                   signal_pending(current);
1615         return res;
1616 }
1617
1618 /* Called under cmd_lists.cmd_list_lock and IRQ off */
1619 static int dev_user_get_next_prio_cmd(struct scst_user_dev *dev,
1620         struct scst_user_cmd **ucmd)
1621 {
1622         int res = 0;
1623         wait_queue_t wait;
1624
1625         TRACE_ENTRY();
1626
1627         init_waitqueue_entry(&wait, current);
1628
1629         while (1) {
1630                 if (!test_prio_cmd_list(dev)) {
1631                         add_wait_queue_exclusive(&dev->prio_cmd_list_waitQ,
1632                                 &wait);
1633                         for (;;) {
1634                                 set_current_state(TASK_INTERRUPTIBLE);
1635                                 if (test_prio_cmd_list(dev))
1636                                         break;
1637                                 spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1638                                 schedule();
1639                                 spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
1640                         }
1641                         set_current_state(TASK_RUNNING);
1642                         remove_wait_queue(&dev->prio_cmd_list_waitQ, &wait);
1643                 }
1644
1645                 *ucmd = __dev_user_get_next_cmd(&dev->prio_ready_cmd_list);
1646                 if (*ucmd != NULL)
1647                         break;
1648
1649                 if (dev->cleaning || dev->cleanup_done) {
1650                         res = -EAGAIN;
1651                         TRACE_DBG("No ready commands, returning %d", res);
1652                         break;
1653                 }
1654
1655                 if (signal_pending(current)) {
1656                         res = -EINTR;
1657                         TRACE_DBG("Signal pending, returning %d", res);
1658                         break;
1659                 }
1660         }
1661
1662         TRACE_EXIT_RES(res);
1663         return res;
1664 }
1665
1666 static int dev_user_reply_get_cmd(struct file *file, unsigned long arg,
1667         int prio)
1668 {
1669         int res = 0;
1670         struct scst_user_dev *dev;
1671         struct scst_user_get_cmd *cmd;
1672         struct scst_user_reply_cmd *reply;
1673         struct scst_user_cmd *ucmd;
1674         uint64_t ureply;
1675
1676         TRACE_ENTRY();
1677
1678         mutex_lock(&dev_priv_mutex);
1679         dev = (struct scst_user_dev *)file->private_data;
1680         res = dev_user_check_reg(dev);
1681         if (res != 0) {
1682                 mutex_unlock(&dev_priv_mutex);
1683                 goto out;
1684         }
1685         down_read(&dev->dev_rwsem);
1686         mutex_unlock(&dev_priv_mutex);
1687
1688         res = copy_from_user(&ureply, (void *)arg, sizeof(ureply));
1689         if (res < 0)
1690                 goto out_up;
1691
1692         TRACE_DBG("ureply %Ld", (long long unsigned int)ureply);
1693
1694         cmd = kzalloc(max(sizeof(*cmd), sizeof(*reply)), GFP_KERNEL);
1695         if (cmd == NULL) {
1696                 res = -ENOMEM;
1697                 goto out_up;
1698         }
1699
1700         if (ureply != 0) {
1701                 unsigned long u = (unsigned long)ureply;
1702                 reply = (struct scst_user_reply_cmd *)cmd;
1703                 res = copy_from_user(reply, (void *)u, sizeof(*reply));
1704                 if (res < 0)
1705                         goto out_free;
1706
1707                 TRACE_BUFFER("Reply", reply, sizeof(*reply));
1708
1709                 res = dev_user_process_reply(dev, reply);
1710                 if (res < 0)
1711                         goto out_free;
1712         }
1713
1714         spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
1715         if (prio && (dev->prio_queue_type == SCST_USER_PRIO_QUEUE_SEPARATE))
1716                 res = dev_user_get_next_prio_cmd(dev, &ucmd);
1717         else
1718                 res = dev_user_get_next_cmd(dev, &ucmd);
1719         if (res == 0) {
1720                 *cmd = ucmd->user_cmd;
1721                 spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1722                 TRACE_BUFFER("UCMD", cmd, sizeof(*cmd));
1723                 res = copy_to_user((void *)arg, cmd, sizeof(*cmd));
1724         } else
1725                 spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1726
1727 out_free:
1728         kfree(cmd);
1729
1730 out_up:
1731         up_read(&dev->dev_rwsem);
1732
1733 out:
1734         TRACE_EXIT_RES(res);
1735         return res;
1736 }
1737
1738 static long dev_user_ioctl(struct file *file, unsigned int cmd,
1739         unsigned long arg)
1740 {
1741         long res;
1742
1743         TRACE_ENTRY();
1744
1745         switch (cmd) {
1746         case SCST_USER_REPLY_AND_GET_CMD:
1747                 TRACE_DBG("%s", "REPLY_AND_GET_CMD");
1748                 res = dev_user_reply_get_cmd(file, arg, 0);
1749                 break;
1750
1751         case SCST_USER_REPLY_CMD:
1752                 TRACE_DBG("%s", "REPLY_CMD");
1753                 res = dev_user_reply_cmd(file, arg);
1754                 break;
1755
1756         case SCST_USER_REPLY_AND_GET_PRIO_CMD:
1757                 TRACE_DBG("%s", "REPLY_AND_GET_PRIO_CMD");
1758                 res = dev_user_reply_get_cmd(file, arg, 1);
1759                 break;
1760
1761         case SCST_USER_REGISTER_DEVICE:
1762         {
1763                 struct scst_user_dev_desc *dev_desc;
1764                 TRACE_DBG("%s", "REGISTER_DEVICE");
1765                 dev_desc = kmalloc(sizeof(*dev_desc), GFP_KERNEL);
1766                 if (dev_desc == NULL) {
1767                         res = -ENOMEM;
1768                         goto out;
1769                 }
1770                 res = copy_from_user(dev_desc, (void *)arg, sizeof(*dev_desc));
1771                 if (res < 0) {
1772                         kfree(dev_desc);
1773                         goto out;
1774                 }
1775                 TRACE_BUFFER("dev_desc", dev_desc, sizeof(*dev_desc));
1776                 dev_desc->name[sizeof(dev_desc->name)-1] = '\0';
1777                 res = dev_user_register_dev(file, dev_desc);
1778                 kfree(dev_desc);
1779                 break;
1780         }
1781
1782         case SCST_USER_SET_OPTIONS:
1783         {
1784                 struct scst_user_opt opt;
1785                 TRACE_DBG("%s", "SET_OPTIONS");
1786                 res = copy_from_user(&opt, (void *)arg, sizeof(opt));
1787                 if (res < 0)
1788                         goto out;
1789                 TRACE_BUFFER("opt", &opt, sizeof(opt));
1790                 res = dev_user_set_opt(file, &opt);
1791                 break;
1792         }
1793
1794         case SCST_USER_GET_OPTIONS:
1795                 TRACE_DBG("%s", "GET_OPTIONS");
1796                 res = dev_user_get_opt(file, (void *)arg);
1797                 break;
1798
1799         default:
1800                 PRINT_ERROR("Invalid ioctl cmd %x", cmd);
1801                 res = -EINVAL;
1802                 goto out;
1803         }
1804
1805 out:
1806         TRACE_EXIT_RES(res);
1807         return res;
1808 }
1809
1810 static unsigned int dev_user_poll(struct file *file, poll_table *wait)
1811 {
1812         int res = 0;
1813         struct scst_user_dev *dev;
1814
1815         TRACE_ENTRY();
1816
1817         mutex_lock(&dev_priv_mutex);
1818         dev = (struct scst_user_dev *)file->private_data;
1819         res = dev_user_check_reg(dev);
1820         if (res != 0) {
1821                 mutex_unlock(&dev_priv_mutex);
1822                 goto out;
1823         }
1824         down_read(&dev->dev_rwsem);
1825         mutex_unlock(&dev_priv_mutex);
1826
1827         spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
1828
1829         if (!list_empty(&dev->ready_cmd_list) ||
1830             !list_empty(&dev->cmd_lists.active_cmd_list)) {
1831                 res |= POLLIN | POLLRDNORM;
1832                 goto out_unlock;
1833         }
1834
1835         spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1836
1837         TRACE_DBG("Before poll_wait() (dev %p)", dev);
1838         poll_wait(file, &dev->cmd_lists.cmd_list_waitQ, wait);
1839         TRACE_DBG("After poll_wait() (dev %p)", dev);
1840
1841         spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
1842
1843         if (!list_empty(&dev->ready_cmd_list) ||
1844             !list_empty(&dev->cmd_lists.active_cmd_list)) {
1845                 res |= POLLIN | POLLRDNORM;
1846                 goto out_unlock;
1847         }
1848
1849 out_unlock:
1850         spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1851
1852         up_read(&dev->dev_rwsem);
1853
1854 out:
1855         TRACE_EXIT_HRES(res);
1856         return res;
1857 }
1858
1859 /*
1860  * Called under cmd_lists.cmd_list_lock, but can drop it inside, then reaquire.
1861  */
1862 static void dev_user_unjam_cmd(struct scst_user_cmd *ucmd, int busy,
1863         unsigned long *flags)
1864 {
1865         int state = ucmd->state & ~UCMD_STATE_MASK;
1866         struct scst_user_dev *dev = ucmd->dev;
1867
1868         TRACE_ENTRY();
1869
1870         if (ucmd->state & UCMD_STATE_JAMMED_MASK)
1871                 goto out;
1872
1873         TRACE_MGMT_DBG("Unjamming ucmd %p (busy %d, state %x)", ucmd, busy,
1874                 ucmd->state);
1875
1876         ucmd->state = state | UCMD_STATE_JAMMED_MASK;
1877
1878         switch (state) {
1879         case UCMD_STATE_PARSING:
1880         case UCMD_STATE_BUF_ALLOCING:
1881                 if (test_bit(SCST_CMD_ABORTED, &ucmd->cmd->cmd_flags))
1882                         ucmd->aborted = 1;
1883                 else {
1884                         if (busy)
1885                                 scst_set_busy(ucmd->cmd);
1886                         else
1887                                 scst_set_cmd_error(ucmd->cmd,
1888                                         SCST_LOAD_SENSE(scst_sense_hardw_error));
1889                 }
1890                 TRACE_MGMT_DBG("Adding ucmd %p to active list", ucmd);
1891                 list_add(&ucmd->cmd->cmd_list_entry,
1892                         &ucmd->cmd->cmd_lists->active_cmd_list);
1893                 wake_up(&ucmd->dev->cmd_lists.cmd_list_waitQ);
1894                 break;
1895
1896         case UCMD_STATE_EXECING:
1897                 if (flags != NULL)
1898                         spin_unlock_irqrestore(&dev->cmd_lists.cmd_list_lock, *flags);
1899                 else
1900                         spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1901
1902                 TRACE_MGMT_DBG("EXEC: unjamming ucmd %p", ucmd);
1903
1904                 if (test_bit(SCST_CMD_ABORTED,  &ucmd->cmd->cmd_flags))
1905                         ucmd->aborted = 1;
1906                 else {
1907                         if (busy)
1908                                 scst_set_busy(ucmd->cmd);
1909                         else
1910                                 scst_set_cmd_error(ucmd->cmd,
1911                                         SCST_LOAD_SENSE(scst_sense_hardw_error));
1912                 }
1913
1914                 ucmd->cmd->scst_cmd_done(ucmd->cmd, SCST_CMD_STATE_DEFAULT);
1915                 /* !! At this point cmd ans ucmd can be already freed !! */
1916
1917                 if (flags != NULL)
1918                         spin_lock_irqsave(&dev->cmd_lists.cmd_list_lock, *flags);
1919                 else
1920                         spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
1921                 break;
1922
1923         case UCMD_STATE_ON_FREEING:
1924         case UCMD_STATE_ON_CACHE_FREEING:
1925         case UCMD_STATE_TM_EXECING:
1926         case UCMD_STATE_ATTACH_SESS:
1927         case UCMD_STATE_DETACH_SESS:
1928         {
1929                 if (flags != NULL)
1930                         spin_unlock_irqrestore(&dev->cmd_lists.cmd_list_lock, *flags);
1931                 else
1932                         spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1933
1934                 switch (state) {
1935                 case UCMD_STATE_ON_FREEING:
1936                         dev_user_process_reply_on_free(ucmd);
1937                         break;
1938
1939                 case UCMD_STATE_ON_CACHE_FREEING:
1940                         dev_user_process_reply_on_cache_free(ucmd);
1941                         break;
1942
1943                 case UCMD_STATE_TM_EXECING:
1944                         dev_user_process_reply_tm_exec(ucmd, SCST_MGMT_STATUS_FAILED);
1945                         break;
1946
1947                 case UCMD_STATE_ATTACH_SESS:
1948                 case UCMD_STATE_DETACH_SESS:
1949                         dev_user_process_reply_sess(ucmd, -EFAULT);
1950                         break;
1951                 }
1952
1953                 if (flags != NULL)
1954                         spin_lock_irqsave(&dev->cmd_lists.cmd_list_lock, *flags);
1955                 else
1956                         spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
1957                 break;
1958         }
1959
1960         default:
1961                 PRINT_CRIT_ERROR("Wrong ucmd state %x", state);
1962                 sBUG();
1963                 break;
1964         }
1965
1966 out:
1967         TRACE_EXIT();
1968         return;
1969 }
1970
1971 static int __unjam_check_tgt_dev(struct scst_user_cmd *ucmd, int state,
1972         struct scst_tgt_dev *tgt_dev)
1973 {
1974         int res = 0;
1975
1976         if (ucmd->cmd == NULL)
1977                 goto out;
1978
1979         if (ucmd->cmd->tgt_dev != tgt_dev)
1980                 goto out;
1981
1982         switch (state & ~UCMD_STATE_MASK) {
1983         case UCMD_STATE_PARSING:
1984         case UCMD_STATE_BUF_ALLOCING:
1985         case UCMD_STATE_EXECING:
1986                 break;
1987         default:
1988                 goto out;
1989         }
1990
1991         res = 1;
1992 out:
1993         return res;
1994 }
1995
1996 static int __unjam_check_tm(struct scst_user_cmd *ucmd, int state)
1997 {
1998         int res = 0;
1999
2000         switch (state & ~UCMD_STATE_MASK) {
2001         case UCMD_STATE_PARSING:
2002         case UCMD_STATE_BUF_ALLOCING:
2003         case UCMD_STATE_EXECING:
2004                 if ((ucmd->cmd != NULL) &&
2005                     (!test_bit(SCST_CMD_ABORTED,
2006                                 &ucmd->cmd->cmd_flags)))
2007                         goto out;
2008                 break;
2009         default:
2010                 goto out;
2011         }
2012
2013         res = 1;
2014 out:
2015         return res;
2016 }
2017
2018 static void dev_user_unjam_dev(struct scst_user_dev *dev, int tm,
2019         struct scst_tgt_dev *tgt_dev)
2020 {
2021         int i;
2022         unsigned long flags;
2023         struct scst_user_cmd *ucmd;
2024
2025         TRACE_ENTRY();
2026
2027         TRACE_MGMT_DBG("Unjamming dev %p", dev);
2028
2029         spin_lock_irqsave(&dev->cmd_lists.cmd_list_lock, flags);
2030
2031 repeat:
2032         for (i = 0; i < (int)ARRAY_SIZE(dev->ucmd_hash); i++) {
2033                 struct list_head *head = &dev->ucmd_hash[i];
2034                 list_for_each_entry(ucmd, head, hash_list_entry) {
2035                         TRACE_DBG("ALL: ucmd %p, state %x, scst_cmd %p",
2036                                 ucmd, ucmd->state, ucmd->cmd);
2037                         if (ucmd->state & UCMD_STATE_SENT_MASK) {
2038                                 int st = ucmd->state & ~UCMD_STATE_SENT_MASK;
2039                                 if (tgt_dev != NULL) {
2040                                         if (__unjam_check_tgt_dev(ucmd, st,
2041                                                         tgt_dev) == 0)
2042                                                 continue;
2043                                 } else if (tm) {
2044                                         if (__unjam_check_tm(ucmd, st) == 0)
2045                                                 continue;
2046                                 }
2047                                 dev_user_unjam_cmd(ucmd, 0, &flags);
2048                                 goto repeat;
2049                         }
2050                 }
2051         }
2052
2053         if ((tgt_dev != NULL) || tm) {
2054                 list_for_each_entry(ucmd, &dev->ready_cmd_list,
2055                                 ready_cmd_list_entry) {
2056                         TRACE_DBG("READY: ucmd %p, state %x, scst_cmd %p",
2057                                 ucmd, ucmd->state, ucmd->cmd);
2058                         if (tgt_dev != NULL) {
2059                                 if (__unjam_check_tgt_dev(ucmd, ucmd->state,
2060                                                 tgt_dev) == 0)
2061                                         continue;
2062                         } else if (tm) {
2063                                 if (__unjam_check_tm(ucmd, ucmd->state) == 0)
2064                                         continue;
2065                         }
2066                         list_del(&ucmd->ready_cmd_list_entry);
2067                         dev_user_unjam_cmd(ucmd, 0, &flags);
2068                         goto repeat;
2069                 }
2070         }
2071
2072         if (dev_user_process_scst_commands(dev) != 0)
2073                 goto repeat;
2074
2075         spin_unlock_irqrestore(&dev->cmd_lists.cmd_list_lock, flags);
2076
2077         TRACE_EXIT();
2078         return;
2079 }
2080
2081 /**
2082  ** In order to deal with user space handler hangups we rely on remote
2083  ** initiators, which in case if a command doesn't respond for too long
2084  ** supposed to issue a task management command, so on that event we can
2085  ** "unjam" the command. In order to prevent TM command from stalling, we
2086  ** use a timer. In order to prevent too many queued TM commands, we
2087  ** enqueue only 2 of them, the first one with the requested TM function,
2088  ** the second - with TARGET_RESET as the most comprehensive function.
2089  **
2090  ** The only exception here is DETACH_SESS subcode, where there are no TM
2091  ** commands could be expected, so we need manually after a timeout "unjam"
2092  ** all the commands on the device.
2093  **
2094  ** We also don't queue >1 ATTACH_SESS commands and after timeout fail it.
2095  **/
2096
2097 static int dev_user_process_reply_tm_exec(struct scst_user_cmd *ucmd,
2098         int status)
2099 {
2100         int res = 0;
2101         unsigned long flags;
2102
2103         TRACE_ENTRY();
2104
2105         TRACE_MGMT_DBG("TM reply (ucmd %p, fn %d, status %d)", ucmd,
2106                 ucmd->user_cmd.tm_cmd.fn, status);
2107
2108         ucmd->result = status;
2109
2110         spin_lock_irqsave(&ucmd->dev->cmd_lists.cmd_list_lock, flags);
2111
2112         if (ucmd->internal_reset_tm) {
2113                 TRACE_MGMT_DBG("Internal TM ucmd %p finished", ucmd);
2114                 ucmd->dev->internal_reset_active = 0;
2115         } else {
2116                 TRACE_MGMT_DBG("TM ucmd %p finished", ucmd);
2117                 ucmd->dev->tm_cmd_active = 0;
2118         }
2119
2120         if (ucmd->cmpl != NULL)
2121                 complete_all(ucmd->cmpl);
2122
2123         spin_unlock_irqrestore(&ucmd->dev->cmd_lists.cmd_list_lock, flags);
2124
2125         ucmd_put(ucmd);
2126
2127         TRACE_EXIT_RES(res);
2128         return res;
2129 }
2130
2131 static int dev_user_task_mgmt_fn(struct scst_mgmt_cmd *mcmd,
2132         struct scst_tgt_dev *tgt_dev)
2133 {
2134         int res, rc;
2135         struct scst_user_cmd *ucmd;
2136         struct scst_user_dev *dev = (struct scst_user_dev *)tgt_dev->dev->dh_priv;
2137         struct scst_user_cmd *ucmd_to_abort = NULL;
2138
2139         TRACE_ENTRY();
2140
2141         /* We can't afford missing TM command due to memory shortage */
2142         ucmd = dev_user_alloc_ucmd(dev, GFP_KERNEL|__GFP_NOFAIL);
2143         ucmd->cmpl = kmalloc(sizeof(*ucmd->cmpl), GFP_KERNEL|__GFP_NOFAIL);
2144
2145         init_completion(ucmd->cmpl);
2146
2147         ucmd->user_cmd.cmd_h = ucmd->h;
2148         ucmd->user_cmd.subcode = SCST_USER_TASK_MGMT;
2149         ucmd->user_cmd.tm_cmd.sess_h = (unsigned long)tgt_dev;
2150         ucmd->user_cmd.tm_cmd.fn = mcmd->fn;
2151         ucmd->user_cmd.tm_cmd.cmd_sn = mcmd->cmd_sn;
2152         ucmd->user_cmd.tm_cmd.cmd_sn_set = mcmd->cmd_sn_set;
2153
2154         if (mcmd->cmd_to_abort != NULL) {
2155                 ucmd_to_abort = (struct scst_user_cmd *)mcmd->cmd_to_abort->dh_priv;
2156                 if (ucmd_to_abort != NULL)
2157                         ucmd->user_cmd.tm_cmd.cmd_h_to_abort = ucmd_to_abort->h;
2158         }
2159
2160         TRACE_MGMT_DBG("Preparing TM ucmd %p (h %d, fn %d, cmd_to_abort %p, "
2161                 "ucmd_to_abort %p, cmd_h_to_abort %d)", ucmd, ucmd->h,
2162                 mcmd->fn, mcmd->cmd_to_abort, ucmd_to_abort,
2163                 ucmd->user_cmd.tm_cmd.cmd_h_to_abort);
2164
2165         ucmd->state = UCMD_STATE_TM_EXECING;
2166
2167         spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
2168         if (dev->internal_reset_active) {
2169                 PRINT_ERROR("Loosing TM cmd %d, because there are other "
2170                         "unprocessed TM commands", mcmd->fn);
2171                 res = SCST_MGMT_STATUS_FAILED;
2172                 goto out_locked_free;
2173         } else if (dev->tm_cmd_active) {
2174                 /*
2175                  * We are going to miss some TM commands, so replace it
2176                  * by the hardest one.
2177                  */
2178                 PRINT_ERROR("Replacing TM cmd %d by TARGET_RESET, because "
2179                         "there is another unprocessed TM command", mcmd->fn);
2180                 ucmd->user_cmd.tm_cmd.fn = SCST_TARGET_RESET;
2181                 ucmd->internal_reset_tm = 1;
2182                 dev->internal_reset_active = 1;
2183         } else
2184                 dev->tm_cmd_active = 1;
2185         spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
2186
2187         ucmd_get(ucmd, 0);
2188         dev_user_add_to_ready(ucmd);
2189
2190         /*
2191          * Since the user space handler should not wait for affecting tasks to
2192          * complete it shall complete the TM request ASAP, otherwise the device
2193          * will be considered stalled.
2194          */
2195         rc = wait_for_completion_timeout(ucmd->cmpl, DEV_USER_TM_TIMEOUT);
2196         if (rc > 0)
2197                 res = ucmd->result;
2198         else {
2199                 PRINT_ERROR("Task management command %p timeout", ucmd);
2200                 res = SCST_MGMT_STATUS_FAILED;
2201         }
2202
2203         sBUG_ON(irqs_disabled());
2204
2205         spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
2206
2207 out_locked_free:
2208         kfree(ucmd->cmpl);
2209         ucmd->cmpl = NULL;
2210         spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
2211
2212         dev_user_unjam_dev(ucmd->dev, 1, NULL);
2213
2214         ucmd_put(ucmd);
2215
2216         TRACE_EXIT();
2217         return res;
2218 }
2219
2220 static int dev_user_attach(struct scst_device *sdev)
2221 {
2222         int res = 0;
2223         struct scst_user_dev *dev = NULL, *d;
2224
2225         TRACE_ENTRY();
2226
2227         spin_lock(&dev_list_lock);
2228         list_for_each_entry(d, &dev_list, dev_list_entry) {
2229                 if (strcmp(d->name, sdev->virt_name) == 0) {
2230                         dev = d;
2231                         break;
2232                 }
2233         }
2234         spin_unlock(&dev_list_lock);
2235         if (dev == NULL) {
2236                 PRINT_ERROR("Device %s not found", sdev->virt_name);
2237                 res = -EINVAL;
2238                 goto out;
2239         }
2240
2241         sdev->p_cmd_lists = &dev->cmd_lists;
2242         sdev->dh_priv = dev;
2243         sdev->tst = dev->tst;
2244         sdev->queue_alg = dev->queue_alg;
2245         sdev->swp = dev->swp;
2246         sdev->tas = dev->tas;
2247         sdev->has_own_order_mgmt = dev->has_own_order_mgmt;
2248
2249         dev->sdev = sdev;
2250
2251         PRINT_INFO("Attached user space SCSI target virtual device \"%s\"",
2252                 dev->name);
2253
2254 out:
2255         TRACE_EXIT();
2256         return res;
2257 }
2258
2259 static void dev_user_detach(struct scst_device *sdev)
2260 {
2261         struct scst_user_dev *dev = (struct scst_user_dev *)sdev->dh_priv;
2262
2263         TRACE_ENTRY();
2264
2265         TRACE_DBG("virt_id %d", sdev->virt_id);
2266
2267         PRINT_INFO("Detached user space SCSI target virtual device \"%s\"",
2268                 dev->name);
2269
2270         /* dev will be freed by the caller */
2271         sdev->dh_priv = NULL;
2272         dev->sdev = NULL;
2273
2274         TRACE_EXIT();
2275         return;
2276 }
2277
2278 static int dev_user_process_reply_sess(struct scst_user_cmd *ucmd, int status)
2279 {
2280         int res = 0;
2281         unsigned long flags;
2282
2283         TRACE_ENTRY();
2284
2285         TRACE_MGMT_DBG("ucmd %p, cmpl %p, status %d", ucmd, ucmd->cmpl, status);
2286
2287         spin_lock_irqsave(&ucmd->dev->cmd_lists.cmd_list_lock, flags);
2288
2289         if ((ucmd->state & ~UCMD_STATE_MASK) ==
2290                         UCMD_STATE_ATTACH_SESS) {
2291                 TRACE_MGMT_DBG("%s", "ATTACH_SESS finished");
2292                 ucmd->result = status;
2293                 ucmd->dev->attach_cmd_active = 0;
2294         } else if ((ucmd->state & ~UCMD_STATE_MASK) ==
2295                         UCMD_STATE_DETACH_SESS) {
2296                 TRACE_MGMT_DBG("%s", "DETACH_SESS finished");
2297                 ucmd->dev->detach_cmd_count--;
2298         } else
2299                 sBUG();
2300
2301         if (ucmd->cmpl != NULL)
2302                 complete_all(ucmd->cmpl);
2303
2304         spin_unlock_irqrestore(&ucmd->dev->cmd_lists.cmd_list_lock, flags);
2305
2306         ucmd_put(ucmd);
2307
2308         TRACE_EXIT_RES(res);
2309         return res;
2310 }
2311
2312 static int dev_user_attach_tgt(struct scst_tgt_dev *tgt_dev)
2313 {
2314         struct scst_user_dev *dev =
2315                 (struct scst_user_dev *)tgt_dev->dev->dh_priv;
2316         int res = 0, rc;
2317         struct scst_user_cmd *ucmd;
2318
2319         TRACE_ENTRY();
2320
2321         ucmd = dev_user_alloc_ucmd(dev, GFP_KERNEL);
2322         if (ucmd == NULL)
2323                 goto out_nomem;
2324
2325         ucmd->cmpl = kmalloc(sizeof(*ucmd->cmpl), GFP_KERNEL);
2326         if (ucmd->cmpl == NULL)
2327                 goto out_put_nomem;
2328
2329         init_completion(ucmd->cmpl);
2330
2331         ucmd->user_cmd.cmd_h = ucmd->h;
2332         ucmd->user_cmd.subcode = SCST_USER_ATTACH_SESS;
2333         ucmd->user_cmd.sess.sess_h = (unsigned long)tgt_dev;
2334         ucmd->user_cmd.sess.lun = (uint64_t)tgt_dev->lun;
2335         ucmd->user_cmd.sess.threads_num = tgt_dev->sess->tgt->tgtt->threads_num;
2336         ucmd->user_cmd.sess.rd_only = tgt_dev->acg_dev->rd_only_flag;
2337         strncpy(ucmd->user_cmd.sess.initiator_name,
2338                 tgt_dev->sess->initiator_name,
2339                 sizeof(ucmd->user_cmd.sess.initiator_name)-1);
2340         ucmd->user_cmd.sess.initiator_name[
2341                 sizeof(ucmd->user_cmd.sess.initiator_name)-1] = '\0';
2342
2343         TRACE_MGMT_DBG("Preparing ATTACH_SESS %p (h %d, sess_h %Lx, LUN %Lx, "
2344                 "threads_num %d, rd_only_flag %d, initiator %s)", ucmd, ucmd->h,
2345                 ucmd->user_cmd.sess.sess_h, ucmd->user_cmd.sess.lun,
2346                 ucmd->user_cmd.sess.threads_num, ucmd->user_cmd.sess.rd_only,
2347                 ucmd->user_cmd.sess.initiator_name);
2348
2349         ucmd->state = UCMD_STATE_ATTACH_SESS;
2350
2351         spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
2352         if (dev->attach_cmd_active) {
2353                 PRINT_ERROR("%s", "ATTACH_SESS command failed, because "
2354                         "there is another unprocessed ATTACH_SESS command");
2355                 res = -EBUSY;
2356                 goto out_locked_free;
2357         }
2358         dev->attach_cmd_active = 1;
2359         spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
2360
2361         ucmd_get(ucmd, 0);
2362         dev_user_add_to_ready(ucmd);
2363
2364         rc = wait_for_completion_timeout(ucmd->cmpl, DEV_USER_ATTACH_TIMEOUT);
2365         if (rc > 0)
2366                 res = ucmd->result;
2367         else {
2368                 PRINT_ERROR("%s", "ATTACH_SESS command timeout");
2369                 res = -EFAULT;
2370         }
2371
2372         sBUG_ON(irqs_disabled());
2373
2374         spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
2375 out_locked_free:
2376         kfree(ucmd->cmpl);
2377         ucmd->cmpl = NULL;
2378         spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
2379
2380         ucmd_put(ucmd);
2381
2382 out:
2383         TRACE_EXIT_RES(res);
2384         return res;
2385
2386 out_put_nomem:
2387         ucmd_put(ucmd);
2388
2389 out_nomem:
2390         res = -ENOMEM;
2391         goto out;
2392 }
2393
2394 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
2395 static void dev_user_pre_unreg_sess_work_fn(void *p)
2396 #else
2397 static void dev_user_pre_unreg_sess_work_fn(struct work_struct *work)
2398 #endif
2399 {
2400 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
2401         struct scst_user_pre_unreg_sess_obj *pd = (struct scst_user_pre_unreg_sess_obj *)p;
2402 #else
2403         struct scst_user_pre_unreg_sess_obj *pd = container_of(
2404                 (struct delayed_work *)work, struct scst_user_pre_unreg_sess_obj,
2405                 pre_unreg_sess_work);
2406 #endif
2407         struct scst_user_dev *dev =
2408                 (struct scst_user_dev *)pd->tgt_dev->dev->dh_priv;
2409
2410         TRACE_ENTRY();
2411
2412         TRACE_MGMT_DBG("Unreg sess: unjaming dev %p (tgt_dev %p)", dev,
2413                 pd->tgt_dev);
2414
2415         pd->active = 1;
2416
2417         dev_user_unjam_dev(dev, 0, pd->tgt_dev);
2418
2419         if (!pd->exit) {
2420                 TRACE_MGMT_DBG("Rescheduling pre_unreg_sess work %p (dev %p, "
2421                         "tgt_dev %p)", pd, dev, pd->tgt_dev);
2422                 schedule_delayed_work(&pd->pre_unreg_sess_work,
2423                         DEV_USER_PRE_UNREG_POLL_TIME);
2424         }
2425
2426         TRACE_EXIT();
2427         return;
2428 }
2429
2430 static void dev_user_pre_unreg_sess(struct scst_tgt_dev *tgt_dev)
2431 {
2432         struct scst_user_dev *dev =
2433                 (struct scst_user_dev *)tgt_dev->dev->dh_priv;
2434         struct scst_user_pre_unreg_sess_obj *pd;
2435
2436         TRACE_ENTRY();
2437
2438         /* We can't afford missing DETACH command due to memory shortage */
2439         pd = kzalloc(sizeof(*pd), GFP_KERNEL|__GFP_NOFAIL);
2440
2441         pd->tgt_dev = tgt_dev;
2442 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
2443         INIT_WORK(&pd->pre_unreg_sess_work, dev_user_pre_unreg_sess_work_fn, pd);
2444 #else
2445         INIT_DELAYED_WORK(&pd->pre_unreg_sess_work, dev_user_pre_unreg_sess_work_fn);
2446 #endif
2447
2448         spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
2449         dev->pre_unreg_sess_active = 1;
2450         list_add_tail(&pd->pre_unreg_sess_list_entry, &dev->pre_unreg_sess_list);
2451         spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
2452
2453         TRACE_MGMT_DBG("Scheduling pre_unreg_sess work %p (dev %p, tgt_dev %p)",
2454                 pd, dev, pd->tgt_dev);
2455
2456         schedule_delayed_work(&pd->pre_unreg_sess_work, DEV_USER_DETACH_TIMEOUT);
2457
2458         TRACE_EXIT();
2459         return;
2460 }
2461
2462 static void dev_user_detach_tgt(struct scst_tgt_dev *tgt_dev)
2463 {
2464         struct scst_user_dev *dev =
2465                 (struct scst_user_dev *)tgt_dev->dev->dh_priv;
2466         struct scst_user_cmd *ucmd;
2467         struct scst_user_pre_unreg_sess_obj *pd = NULL, *p;
2468
2469         TRACE_ENTRY();
2470
2471         spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
2472         list_for_each_entry(p, &dev->pre_unreg_sess_list,
2473                         pre_unreg_sess_list_entry) {
2474                 if (p->tgt_dev == tgt_dev) {
2475                         list_del(&p->pre_unreg_sess_list_entry);
2476                         if (list_empty(&dev->pre_unreg_sess_list))
2477                                 dev->pre_unreg_sess_active = 0;
2478                         pd = p;
2479                         break;
2480                 }
2481         }
2482         spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
2483
2484         if (pd != NULL) {
2485                 pd->exit = 1;
2486                 TRACE_MGMT_DBG("Canceling pre unreg work %p", pd);
2487                 cancel_delayed_work(&pd->pre_unreg_sess_work);
2488                 flush_scheduled_work();
2489                 kfree(pd);
2490         }
2491
2492         ucmd = dev_user_alloc_ucmd(dev, GFP_KERNEL);
2493         if (ucmd == NULL)
2494                 goto out;
2495
2496         TRACE_MGMT_DBG("Preparing DETACH_SESS %p (h %d, sess_h %Lx)", ucmd,
2497                 ucmd->h, ucmd->user_cmd.sess.sess_h);
2498
2499         ucmd->user_cmd.cmd_h = ucmd->h;
2500         ucmd->user_cmd.subcode = SCST_USER_DETACH_SESS;
2501         ucmd->user_cmd.sess.sess_h = (unsigned long)tgt_dev;
2502
2503         spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
2504         dev->detach_cmd_count++;
2505         spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
2506
2507         ucmd->state = UCMD_STATE_DETACH_SESS;
2508
2509         dev_user_add_to_ready(ucmd);
2510
2511 out:
2512         TRACE_EXIT();
2513         return;
2514 }
2515
2516 /* No locks are needed, but the activity must be suspended */
2517 static void dev_user_setup_functions(struct scst_user_dev *dev)
2518 {
2519         TRACE_ENTRY();
2520
2521         dev->devtype.parse = dev_user_parse;
2522         dev->devtype.dev_done = NULL;
2523
2524         if (dev->parse_type != SCST_USER_PARSE_CALL) {
2525                 switch (dev->devtype.type) {
2526                 case TYPE_DISK:
2527                         dev->generic_parse = scst_sbc_generic_parse;
2528                         dev->devtype.dev_done = dev_user_disk_done;
2529                         break;
2530
2531                 case TYPE_TAPE:
2532                         dev->generic_parse = scst_tape_generic_parse;
2533                         dev->devtype.dev_done = dev_user_tape_done;
2534                         break;
2535
2536                 case TYPE_MOD:
2537                         dev->generic_parse = scst_modisk_generic_parse;
2538                         dev->devtype.dev_done = dev_user_disk_done;
2539                         break;
2540
2541                 case TYPE_ROM:
2542                         dev->generic_parse = scst_cdrom_generic_parse;
2543                         dev->devtype.dev_done = dev_user_disk_done;
2544                         break;
2545
2546                 case TYPE_MEDIUM_CHANGER:
2547                         dev->generic_parse = scst_changer_generic_parse;
2548                         break;
2549
2550                 case TYPE_PROCESSOR:
2551                         dev->generic_parse = scst_processor_generic_parse;
2552                         break;
2553
2554                 case TYPE_RAID:
2555                         dev->generic_parse = scst_raid_generic_parse;
2556                         break;
2557
2558                 default:
2559                         PRINT_INFO("Unknown SCSI type %x, using PARSE_CALL "
2560                                 "for it", dev->devtype.type);
2561                         dev->parse_type = SCST_USER_PARSE_CALL;
2562                         break;
2563                 }
2564         } else {
2565                 dev->generic_parse = NULL;
2566                 dev->devtype.dev_done = NULL;
2567         }
2568
2569         TRACE_EXIT();
2570         return;
2571 }
2572
2573 static int dev_user_check_version(const struct scst_user_dev_desc *dev_desc)
2574 {
2575         char ver[sizeof(DEV_USER_VERSION)+1];
2576         int res;
2577
2578         res = copy_from_user(ver, (void *)(unsigned long)dev_desc->version_str,
2579                                 sizeof(ver));
2580         if (res < 0) {
2581                 PRINT_ERROR("%s", "Unable to get version string");
2582                 goto out;
2583         }
2584         ver[sizeof(ver)-1] = '\0';
2585
2586         if (strcmp(ver, DEV_USER_VERSION) != 0) {
2587                 /* ->name already 0-terminated in dev_user_ioctl() */
2588                 PRINT_ERROR("Incorrect version of user device %s (%s)",
2589                         dev_desc->name, ver);
2590                 res = -EINVAL;
2591                 goto out;
2592         }
2593
2594 out:
2595         return res;
2596 }
2597
2598 static int dev_user_register_dev(struct file *file,
2599         const struct scst_user_dev_desc *dev_desc)
2600 {
2601         int res = -ENOMEM, i;
2602         struct scst_user_dev *dev, *d;
2603         int block;
2604
2605         TRACE_ENTRY();
2606
2607         res = dev_user_check_version(dev_desc);
2608         if (res != 0)
2609                 goto out;
2610
2611         switch (dev_desc->type) {
2612         case TYPE_DISK:
2613         case TYPE_ROM:
2614         case TYPE_MOD:
2615                 if (dev_desc->block_size == 0) {
2616                         PRINT_ERROR("Wrong block size %d", dev_desc->block_size);
2617                         res = -EINVAL;
2618                         goto out;
2619                 }
2620                 block = scst_calc_block_shift(dev_desc->block_size);
2621                 if (block == -1) {
2622                         res = -EINVAL;
2623                         goto out;
2624                 }
2625                 break;
2626         default:
2627                 block = dev_desc->block_size;
2628                 break;
2629         }
2630
2631         if (!try_module_get(THIS_MODULE)) {
2632                 PRINT_ERROR("%s", "Fail to get module");
2633                 goto out;
2634         }
2635
2636         dev = kzalloc(sizeof(*dev), GFP_KERNEL);
2637         if (dev == NULL)
2638                 goto out_put;
2639
2640         init_rwsem(&dev->dev_rwsem);
2641         spin_lock_init(&dev->cmd_lists.cmd_list_lock);
2642         INIT_LIST_HEAD(&dev->cmd_lists.active_cmd_list);
2643         init_waitqueue_head(&dev->cmd_lists.cmd_list_waitQ);
2644         INIT_LIST_HEAD(&dev->ready_cmd_list);
2645         INIT_LIST_HEAD(&dev->prio_ready_cmd_list);
2646         init_waitqueue_head(&dev->prio_cmd_list_waitQ);
2647         if (file->f_flags & O_NONBLOCK) {
2648                 TRACE_DBG("%s", "Non-blocking operations");
2649                 dev->blocking = 0;
2650         } else
2651                 dev->blocking = 1;
2652         for (i = 0; i < (int)ARRAY_SIZE(dev->ucmd_hash); i++)
2653                 INIT_LIST_HEAD(&dev->ucmd_hash[i]);
2654         INIT_LIST_HEAD(&dev->pre_unreg_sess_list);
2655
2656         strncpy(dev->name, dev_desc->name, sizeof(dev->name)-1);
2657         dev->name[sizeof(dev->name)-1] = '\0';
2658
2659         /*
2660          * We don't use clustered pool, since it implies pages reordering,
2661          * which isn't possible with user space supplied buffers. Although
2662          * it's still possible to cluster pages by the tail of each other,
2663          * seems it doesn't worth the effort.
2664          */
2665         dev->pool = sgv_pool_create(dev->name, 0);
2666         if (dev->pool == NULL)
2667                 goto out_put;
2668         sgv_pool_set_allocator(dev->pool, dev_user_alloc_pages,
2669                 dev_user_free_sg_entries);
2670
2671         scnprintf(dev->devtype.name, sizeof(dev->devtype.name), "dh-%s",
2672                 dev->name);
2673         dev->devtype.type = dev_desc->type;
2674         dev->devtype.threads_num = -1;
2675         dev->devtype.parse_atomic = 1;
2676         dev->devtype.exec_atomic = 0; /* no point to make it 1 */
2677         dev->devtype.dev_done_atomic = 1;
2678         dev->devtype.no_proc = 1;
2679         dev->devtype.attach = dev_user_attach;
2680         dev->devtype.detach = dev_user_detach;
2681         dev->devtype.attach_tgt = dev_user_attach_tgt;
2682         dev->devtype.pre_unreg_sess = dev_user_pre_unreg_sess;
2683         dev->devtype.detach_tgt = dev_user_detach_tgt;
2684         dev->devtype.exec = dev_user_exec;
2685         dev->devtype.on_free_cmd = dev_user_on_free_cmd;
2686         dev->devtype.task_mgmt_fn = dev_user_task_mgmt_fn;
2687
2688         init_completion(&dev->cleanup_cmpl);
2689         dev->block = block;
2690         dev->def_block = dev->block;
2691
2692         res = __dev_user_set_opt(dev, &dev_desc->opt);
2693
2694         TRACE_MEM("dev %p, name %s", dev, dev->name);
2695
2696         spin_lock(&dev_list_lock);
2697
2698         list_for_each_entry(d, &dev_list, dev_list_entry) {
2699                 if (strcmp(d->name, dev->name) == 0) {
2700                         PRINT_ERROR("Device %s already exist",
2701                                 dev->name);
2702                         res = -EEXIST;
2703                         spin_unlock(&dev_list_lock);
2704                         goto out_free;
2705                 }
2706         }
2707
2708         list_add_tail(&dev->dev_list_entry, &dev_list);
2709
2710         spin_unlock(&dev_list_lock);
2711
2712         if (res != 0)
2713                 goto out_del_free;
2714
2715         res = scst_register_virtual_dev_driver(&dev->devtype);
2716         if (res < 0)
2717                 goto out_del_free;
2718
2719         dev->virt_id = scst_register_virtual_device(&dev->devtype, dev->name);
2720         if (dev->virt_id < 0) {
2721                 res = dev->virt_id;
2722                 goto out_unreg_handler;
2723         }
2724
2725         mutex_lock(&dev_priv_mutex);
2726         if (file->private_data != NULL) {
2727                 mutex_unlock(&dev_priv_mutex);
2728                 PRINT_ERROR("%s", "Device already registered");
2729                 res = -EINVAL;
2730                 goto out_unreg_drv;
2731         }
2732         file->private_data = dev;
2733         mutex_unlock(&dev_priv_mutex);
2734
2735 out:
2736         TRACE_EXIT_RES(res);
2737         return res;
2738
2739 out_unreg_drv:
2740         scst_unregister_virtual_device(dev->virt_id);
2741
2742 out_unreg_handler:
2743         scst_unregister_virtual_dev_driver(&dev->devtype);
2744
2745 out_del_free:
2746         spin_lock(&dev_list_lock);
2747         list_del(&dev->dev_list_entry);
2748         spin_unlock(&dev_list_lock);
2749
2750 out_free:
2751         sgv_pool_destroy(dev->pool);
2752         kfree(dev);
2753         goto out_put;
2754
2755 out_put:
2756         module_put(THIS_MODULE);
2757         goto out;
2758 }
2759
2760 static int __dev_user_set_opt(struct scst_user_dev *dev,
2761         const struct scst_user_opt *opt)
2762 {
2763         int res = 0;
2764
2765         TRACE_ENTRY();
2766
2767         TRACE_DBG("parse_type %x, on_free_cmd_type %x, memory_reuse_type %x, "
2768                 "partial_transfers_type %x, partial_len %d", opt->parse_type,
2769                 opt->on_free_cmd_type, opt->memory_reuse_type,
2770                 opt->partial_transfers_type, opt->partial_len);
2771
2772         if ((opt->parse_type > SCST_USER_MAX_PARSE_OPT) ||
2773             (opt->on_free_cmd_type > SCST_USER_MAX_ON_FREE_CMD_OPT) ||
2774             (opt->memory_reuse_type > SCST_USER_MAX_MEM_REUSE_OPT) ||
2775             (opt->prio_queue_type > SCST_USER_MAX_PRIO_QUEUE_OPT) ||
2776             (opt->partial_transfers_type > SCST_USER_MAX_PARTIAL_TRANSFERS_OPT)) {
2777                 PRINT_ERROR("%s", "Invalid option");
2778                 res = -EINVAL;
2779                 goto out;
2780         }
2781
2782         if (((opt->tst != SCST_CONTR_MODE_ONE_TASK_SET) &&
2783              (opt->tst != SCST_CONTR_MODE_SEP_TASK_SETS)) ||
2784             ((opt->queue_alg != SCST_CONTR_MODE_QUEUE_ALG_RESTRICTED_REORDER) &&
2785              (opt->queue_alg != SCST_CONTR_MODE_QUEUE_ALG_UNRESTRICTED_REORDER)) ||
2786             (opt->swp > 1) || (opt->tas > 1) || (opt->has_own_order_mgmt > 1)) {
2787                 PRINT_ERROR("Invalid SCSI option (tst %x, queue_alg %x, swp %x, "
2788                         "tas %x, has_own_order_mgmt %x)", opt->tst,
2789                         opt->queue_alg, opt->swp, opt->tas, opt->has_own_order_mgmt);
2790                 res = -EINVAL;
2791                 goto out;
2792         }
2793
2794         if ((dev->prio_queue_type != opt->prio_queue_type) &&
2795             (opt->prio_queue_type == SCST_USER_PRIO_QUEUE_SINGLE)) {
2796                 struct scst_user_cmd *u, *t;
2797                 /* No need for lock, the activity is suspended */
2798                 list_for_each_entry_safe(u, t, &dev->prio_ready_cmd_list,
2799                                 ready_cmd_list_entry) {
2800                         list_move_tail(&u->ready_cmd_list_entry,
2801                                 &dev->ready_cmd_list);
2802                 }
2803         }
2804
2805         dev->prio_queue_type = opt->prio_queue_type;
2806         dev->parse_type = opt->parse_type;
2807         dev->on_free_cmd_type = opt->on_free_cmd_type;
2808         dev->memory_reuse_type = opt->memory_reuse_type;
2809         dev->partial_transfers_type = opt->partial_transfers_type;
2810         dev->partial_len = opt->partial_len;
2811
2812         dev->tst = opt->tst;
2813         dev->queue_alg = opt->queue_alg;
2814         dev->swp = opt->swp;
2815         dev->tas = opt->tas;
2816         dev->has_own_order_mgmt = opt->has_own_order_mgmt;
2817         if (dev->sdev != NULL) {
2818                 dev->sdev->tst = opt->tst;
2819                 dev->sdev->queue_alg = opt->queue_alg;
2820                 dev->sdev->swp = opt->swp;
2821                 dev->sdev->tas = opt->tas;
2822                 dev->sdev->has_own_order_mgmt = opt->has_own_order_mgmt;
2823         }
2824
2825         dev_user_setup_functions(dev);
2826
2827 out:
2828         TRACE_EXIT_RES(res);
2829         return res;
2830 }
2831
2832 static int dev_user_set_opt(struct file *file, const struct scst_user_opt *opt)
2833 {
2834         int res = 0;
2835         struct scst_user_dev *dev;
2836
2837         TRACE_ENTRY();
2838
2839         mutex_lock(&dev_priv_mutex);
2840         dev = (struct scst_user_dev *)file->private_data;
2841         res = dev_user_check_reg(dev);
2842         if (res != 0) {
2843                 mutex_unlock(&dev_priv_mutex);
2844                 goto out;
2845         }
2846         down_read(&dev->dev_rwsem);
2847         mutex_unlock(&dev_priv_mutex);
2848
2849         scst_suspend_activity();
2850         res = __dev_user_set_opt(dev, opt);
2851         scst_resume_activity();
2852
2853         up_read(&dev->dev_rwsem);
2854
2855 out:
2856         TRACE_EXIT_RES(res);
2857         return res;
2858 }
2859
2860 static int dev_user_get_opt(struct file *file, void *arg)
2861 {
2862         int res = 0;
2863         struct scst_user_dev *dev;
2864         struct scst_user_opt opt;
2865
2866         TRACE_ENTRY();
2867
2868         mutex_lock(&dev_priv_mutex);
2869         dev = (struct scst_user_dev *)file->private_data;
2870         res = dev_user_check_reg(dev);
2871         if (res != 0) {
2872                 mutex_unlock(&dev_priv_mutex);
2873                 goto out;
2874         }
2875         down_read(&dev->dev_rwsem);
2876         mutex_unlock(&dev_priv_mutex);
2877
2878         opt.parse_type = dev->parse_type;
2879         opt.on_free_cmd_type = dev->on_free_cmd_type;
2880         opt.memory_reuse_type = dev->memory_reuse_type;
2881         opt.prio_queue_type = dev->prio_queue_type;
2882         opt.partial_transfers_type = dev->partial_transfers_type;
2883         opt.partial_len = dev->partial_len;
2884         opt.tst = dev->tst;
2885         opt.queue_alg = dev->queue_alg;
2886         opt.tas = dev->tas;
2887         opt.swp = dev->swp;
2888         opt.has_own_order_mgmt = dev->has_own_order_mgmt;
2889
2890         TRACE_DBG("parse_type %x, on_free_cmd_type %x, memory_reuse_type %x, "
2891                 "partial_transfers_type %x, partial_len %d", opt.parse_type,
2892                 opt.on_free_cmd_type, opt.memory_reuse_type,
2893                 opt.partial_transfers_type, opt.partial_len);
2894
2895         res = copy_to_user(arg, &opt, sizeof(opt));
2896
2897         up_read(&dev->dev_rwsem);
2898 out:
2899         TRACE_EXIT_RES(res);
2900         return res;
2901 }
2902
2903 static int dev_usr_parse(struct scst_cmd *cmd)
2904 {
2905         sBUG();
2906         return SCST_CMD_STATE_DEFAULT;
2907 }
2908
2909 /* Needed only for /proc support */
2910 #define USR_TYPE {                      \
2911         .name =         DEV_USER_NAME,  \
2912         .type =         -1,             \
2913         .parse =        dev_usr_parse,  \
2914 }
2915
2916 static struct scst_dev_type dev_user_devtype = USR_TYPE;
2917
2918 static int dev_user_release(struct inode *inode, struct file *file)
2919 {
2920         int res = 0;
2921         struct scst_user_dev *dev;
2922
2923         TRACE_ENTRY();
2924
2925         mutex_lock(&dev_priv_mutex);
2926         dev = (struct scst_user_dev *)file->private_data;
2927         if (dev == NULL) {
2928                 mutex_unlock(&dev_priv_mutex);
2929                 goto out;
2930         }
2931         file->private_data = NULL;
2932
2933         TRACE(TRACE_MGMT, "Releasing dev %s", dev->name);
2934
2935         spin_lock(&dev_list_lock);
2936         list_del(&dev->dev_list_entry);
2937         spin_unlock(&dev_list_lock);
2938
2939         mutex_unlock(&dev_priv_mutex);
2940
2941         down_write(&dev->dev_rwsem);
2942
2943         spin_lock(&cleanup_lock);
2944         list_add_tail(&dev->cleanup_list_entry, &cleanup_list);
2945         spin_unlock(&cleanup_lock);
2946
2947         wake_up(&cleanup_list_waitQ);
2948         wake_up(&dev->prio_cmd_list_waitQ);
2949         wake_up(&dev->cmd_lists.cmd_list_waitQ);
2950
2951         scst_unregister_virtual_device(dev->virt_id);
2952         scst_unregister_virtual_dev_driver(&dev->devtype);
2953
2954         sgv_pool_destroy(dev->pool);
2955
2956         TRACE_DBG("Unregistering finished (dev %p)", dev);
2957
2958         dev->cleanup_done = 1;
2959         wake_up(&cleanup_list_waitQ);
2960         wake_up(&dev->prio_cmd_list_waitQ);
2961         wake_up(&dev->cmd_lists.cmd_list_waitQ);
2962         wait_for_completion(&dev->cleanup_cmpl);
2963
2964         up_write(&dev->dev_rwsem); /* to make the debug check happy */
2965
2966         TRACE_DBG("Releasing completed (dev %p)", dev);
2967
2968         kfree(dev);
2969
2970         module_put(THIS_MODULE);
2971
2972 out:
2973         TRACE_EXIT_RES(res);
2974         return res;
2975 }
2976
2977 static void dev_user_process_cleanup(struct scst_user_dev *dev)
2978 {
2979         struct scst_user_cmd *ucmd;
2980         int rc;
2981
2982         TRACE_ENTRY();
2983
2984         dev->prio_queue_type = SCST_USER_PRIO_QUEUE_SINGLE;
2985         dev->cleaning = 1;
2986         dev->blocking = 1;
2987
2988         while (1) {
2989                 TRACE_DBG("Cleanuping dev %p", dev);
2990
2991                 dev_user_unjam_dev(dev, 0, NULL);
2992
2993                 spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
2994                 rc = dev_user_get_next_prio_cmd(dev, &ucmd);
2995                 if (rc != 0)
2996                         rc = dev_user_get_next_cmd(dev, &ucmd);
2997                 if (rc == 0)
2998                         dev_user_unjam_cmd(ucmd, 1, NULL);
2999                 spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
3000                 if ((rc == -EAGAIN) && dev->cleanup_done)
3001                         break;
3002         }
3003
3004 #ifdef EXTRACHECKS
3005 {
3006         int i;
3007         for (i = 0; i < (int)ARRAY_SIZE(dev->ucmd_hash); i++) {
3008                 struct list_head *head = &dev->ucmd_hash[i];
3009                 struct scst_user_cmd *ucmd, *t;
3010                 list_for_each_entry_safe(ucmd, t, head, hash_list_entry) {
3011                         PRINT_ERROR("Lost ucmd %p (state %x, ref %d)", ucmd,
3012                                 ucmd->state, atomic_read(&ucmd->ucmd_ref));
3013                         ucmd_put(ucmd);
3014                 }
3015         }
3016 }
3017 #endif
3018
3019         TRACE_DBG("Cleanuping done (dev %p)", dev);
3020         complete_all(&dev->cleanup_cmpl);
3021
3022         TRACE_EXIT();
3023         return;
3024 }
3025
3026 static inline int test_cleanup_list(void)
3027 {
3028         int res = !list_empty(&cleanup_list) ||
3029                   unlikely(kthread_should_stop());
3030         return res;
3031 }
3032
3033 static int dev_user_cleanup_thread(void *arg)
3034 {
3035         struct scst_user_dev *dev;
3036
3037         TRACE_ENTRY();
3038
3039         PRINT_INFO("Cleanup thread started, PID %d", current->pid);
3040
3041         current->flags |= PF_NOFREEZE;
3042
3043         spin_lock(&cleanup_lock);
3044         while (!kthread_should_stop()) {
3045                 wait_queue_t wait;
3046                 init_waitqueue_entry(&wait, current);
3047
3048                 if (!test_cleanup_list()) {
3049                         add_wait_queue_exclusive(&cleanup_list_waitQ, &wait);
3050                         for (;;) {
3051                                 set_current_state(TASK_INTERRUPTIBLE);
3052                                 if (test_cleanup_list())
3053                                         break;
3054                                 spin_unlock(&cleanup_lock);
3055                                 schedule();
3056                                 spin_lock(&cleanup_lock);
3057                         }
3058                         set_current_state(TASK_RUNNING);
3059                         remove_wait_queue(&cleanup_list_waitQ, &wait);
3060                 }
3061 restart:
3062                 list_for_each_entry(dev, &cleanup_list, cleanup_list_entry) {
3063                         list_del(&dev->cleanup_list_entry);
3064                         spin_unlock(&cleanup_lock);
3065                         dev_user_process_cleanup(dev);
3066                         spin_lock(&cleanup_lock);
3067                         goto restart;
3068                 }
3069         }
3070         spin_unlock(&cleanup_lock);
3071
3072         /*
3073          * If kthread_should_stop() is true, we are guaranteed to be
3074          * on the module unload, so cleanup_list must be empty.
3075          */
3076         sBUG_ON(!list_empty(&cleanup_list));
3077
3078         PRINT_INFO("Cleanup thread PID %d finished", current->pid);
3079
3080         TRACE_EXIT();
3081         return 0;
3082 }
3083
3084 static int __init init_scst_user(void)
3085 {
3086         int res = 0;
3087         struct class_device *class_member;
3088
3089         TRACE_ENTRY();
3090
3091 #if defined(CONFIG_HIGHMEM4G) || defined(CONFIG_HIGHMEM64G)
3092         PRINT_ERROR("%s", "HIGHMEM kernel configurations are not supported. "
3093                 "Consider change VMSPLIT option or use 64-bit "
3094                 "configuration instead. See README file for details.");
3095         res = -EINVAL;
3096         goto out;
3097 #endif
3098
3099         user_cmd_cachep = KMEM_CACHE(scst_user_cmd, SCST_SLAB_FLAGS);
3100         if (user_cmd_cachep == NULL) {
3101                 res = -ENOMEM;
3102                 goto out;
3103         }
3104
3105         dev_user_devtype.module = THIS_MODULE;
3106
3107         res = scst_register_virtual_dev_driver(&dev_user_devtype);
3108         if (res < 0)
3109                 goto out_cache;
3110
3111         res = scst_dev_handler_build_std_proc(&dev_user_devtype);
3112         if (res != 0)
3113                 goto out_unreg;
3114
3115         dev_user_sysfs_class = class_create(THIS_MODULE, DEV_USER_NAME);
3116         if (IS_ERR(dev_user_sysfs_class)) {
3117                 PRINT_ERROR("%s", "Unable create sysfs class for SCST user "
3118                         "space handler");
3119                 res = PTR_ERR(dev_user_sysfs_class);
3120                 goto out_proc;
3121         }
3122
3123         res = register_chrdev(DEV_USER_MAJOR, DEV_USER_NAME, &dev_user_fops);
3124         if (res) {
3125                 PRINT_ERROR("Unable to get major %d for SCSI tapes", DEV_USER_MAJOR);
3126                 goto out_class;
3127         }
3128
3129         class_member = class_device_create(dev_user_sysfs_class, NULL,
3130                 MKDEV(DEV_USER_MAJOR, 0), NULL, DEV_USER_NAME);
3131         if (IS_ERR(class_member)) {
3132                 res = PTR_ERR(class_member);
3133                 goto out_chrdev;
3134         }
3135
3136         cleanup_thread = kthread_run(dev_user_cleanup_thread, NULL,
3137                 "scst_usr_cleanupd");
3138         if (IS_ERR(cleanup_thread)) {
3139                 res = PTR_ERR(cleanup_thread);
3140                 PRINT_ERROR("kthread_create() failed: %d", res);
3141                 goto out_dev;
3142         }
3143
3144 out:
3145         TRACE_EXIT_RES(res);
3146         return res;
3147
3148 out_dev:
3149         class_device_destroy(dev_user_sysfs_class, MKDEV(DEV_USER_MAJOR, 0));
3150
3151 out_chrdev:
3152         unregister_chrdev(DEV_USER_MAJOR, DEV_USER_NAME);
3153
3154 out_class:
3155         class_destroy(dev_user_sysfs_class);
3156
3157 out_proc:
3158         scst_dev_handler_destroy_std_proc(&dev_user_devtype);
3159
3160 out_unreg:
3161         scst_unregister_dev_driver(&dev_user_devtype);
3162
3163 out_cache:
3164         kmem_cache_destroy(user_cmd_cachep);
3165         goto out;
3166 }
3167
3168 static void __exit exit_scst_user(void)
3169 {
3170         int rc;
3171
3172         TRACE_ENTRY();
3173
3174         rc = kthread_stop(cleanup_thread);
3175         if (rc < 0)
3176                 TRACE_MGMT_DBG("kthread_stop() failed: %d", rc);
3177
3178         unregister_chrdev(DEV_USER_MAJOR, DEV_USER_NAME);
3179         class_device_destroy(dev_user_sysfs_class, MKDEV(DEV_USER_MAJOR, 0));
3180         class_destroy(dev_user_sysfs_class);
3181
3182         scst_dev_handler_destroy_std_proc(&dev_user_devtype);
3183         scst_unregister_virtual_dev_driver(&dev_user_devtype);
3184
3185         kmem_cache_destroy(user_cmd_cachep);
3186
3187         TRACE_EXIT();
3188         return;
3189 }
3190
3191 module_init(init_scst_user);
3192 module_exit(exit_scst_user);
3193
3194 MODULE_AUTHOR("Vladislav Bolkhovitin");
3195 MODULE_LICENSE("GPL");
3196 MODULE_DESCRIPTION("Virtual user space device handler for SCST");
3197 MODULE_VERSION(SCST_VERSION_STRING);
3198 MODULE_ALIAS_CHARDEV_MAJOR(DEV_USER_MAJOR);