Patch from Bart Van Assche <bart.vanassche@gmail.com>:
[mirror/scst/.git] / scst / src / dev_handlers / scst_user.c
1 /*
2  *  scst_user.c
3  *
4  *  Copyright (C) 2007 Vladislav Bolkhovitin <vst@vlnb.net>
5  *
6  *  SCSI virtual user space device handler
7  *
8  *  This program is free software; you can redistribute it and/or
9  *  modify it under the terms of the GNU General Public License
10  *  as published by the Free Software Foundation, version 2
11  *  of the License.
12  *
13  *  This program is distributed in the hope that it will be useful,
14  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16  *  GNU General Public License for more details.
17  */
18
19 #include <linux/kthread.h>
20 #include <linux/delay.h>
21 #include <linux/poll.h>
22
23 #define LOG_PREFIX              DEV_USER_NAME
24
25 #include "scst.h"
26 #include "scst_user.h"
27 #include "scst_dev_handler.h"
28
29 #if defined(CONFIG_HIGHMEM4G) || defined(CONFIG_HIGHMEM64G)
30 #warning "HIGHMEM kernel configurations are not supported by this module, \
31         because nowadays it doesn't worth the effort. Consider change \
32         VMSPLIT option or use 64-bit configuration instead. See README file \
33         for details."
34 #endif
35
36 #define DEV_USER_MAJOR                  237
37 #define DEV_USER_CMD_HASH_ORDER         6
38 #define DEV_USER_TM_TIMEOUT             (10*HZ)
39 #define DEV_USER_ATTACH_TIMEOUT         (5*HZ)
40 #define DEV_USER_DETACH_TIMEOUT         (5*HZ)
41 #define DEV_USER_PRE_UNREG_POLL_TIME    (HZ/10)
42
43 struct scst_user_dev
44 {
45         struct rw_semaphore dev_rwsem;
46
47         struct scst_cmd_lists cmd_lists;
48         /* All 3 protected by cmd_lists.cmd_list_lock */
49         struct list_head ready_cmd_list;
50         struct list_head prio_ready_cmd_list;
51         wait_queue_head_t prio_cmd_list_waitQ;
52
53         /* All, including detach_cmd_count, protected by cmd_lists.cmd_list_lock */
54         unsigned short blocking:1;
55         unsigned short cleaning:1;
56         unsigned short cleanup_done:1;
57         unsigned short attach_cmd_active:1;
58         unsigned short tm_cmd_active:1;
59         unsigned short internal_reset_active:1;
60         unsigned short pre_unreg_sess_active:1; /* just a small optimization */
61
62         unsigned short tst:3;
63         unsigned short queue_alg:4;
64         unsigned short tas:1;
65         unsigned short swp:1;
66         unsigned short has_own_order_mgmt:1;
67
68         unsigned short detach_cmd_count;
69
70         int (*generic_parse)(struct scst_cmd *cmd,
71                 int (*get_block)(struct scst_cmd *cmd));
72
73         int block;
74         int def_block;
75
76         struct sgv_pool *pool;
77
78         uint8_t parse_type;
79         uint8_t on_free_cmd_type;
80         uint8_t memory_reuse_type;
81         uint8_t prio_queue_type;
82         uint8_t partial_transfers_type;
83         uint32_t partial_len;
84
85         struct scst_dev_type devtype;
86
87         /* Both protected by cmd_lists.cmd_list_lock */
88         unsigned int handle_counter;
89         struct list_head ucmd_hash[1<<DEV_USER_CMD_HASH_ORDER];
90
91         struct scst_device *sdev;
92
93         int virt_id;
94         struct list_head dev_list_entry;
95         char name[SCST_MAX_NAME];
96
97         /* Protected by cmd_lists.cmd_list_lock */
98         struct list_head pre_unreg_sess_list;
99
100         struct list_head cleanup_list_entry;
101         struct completion cleanup_cmpl;
102 };
103
104 struct scst_user_pre_unreg_sess_obj
105 {
106         struct scst_tgt_dev *tgt_dev;
107         unsigned int active:1;
108         unsigned int exit:1;
109         struct list_head pre_unreg_sess_list_entry;
110 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
111         struct work_struct pre_unreg_sess_work;
112 #else
113         struct delayed_work pre_unreg_sess_work;
114 #endif
115 };
116
117 /* Most fields are unprotected, since only one thread at time can access them */
118 struct scst_user_cmd
119 {
120         struct scst_cmd *cmd;
121         struct scst_user_dev *dev;
122
123         atomic_t ucmd_ref;
124
125         unsigned int buff_cached:1;
126         unsigned int buf_dirty:1;
127         unsigned int background_exec:1;
128         unsigned int internal_reset_tm:1;
129         unsigned int aborted:1;
130
131         struct scst_user_cmd *buf_ucmd;
132
133         int cur_data_page;
134         int num_data_pages;
135         int first_page_offset;
136         unsigned long ubuff;
137         struct page **data_pages;
138         struct sgv_pool_obj *sgv;
139
140         unsigned int state;
141
142         struct list_head ready_cmd_list_entry;
143
144         unsigned int h;
145         struct list_head hash_list_entry;
146
147         struct scst_user_get_cmd user_cmd;
148
149         struct completion *cmpl;
150         int result;
151 };
152
153 static struct scst_user_cmd *dev_user_alloc_ucmd(struct scst_user_dev *dev,
154         int gfp_mask);
155 static void dev_user_free_ucmd(struct scst_user_cmd *ucmd);
156
157 static int dev_user_parse(struct scst_cmd *cmd);
158 static int dev_user_exec(struct scst_cmd *cmd);
159 static void dev_user_on_free_cmd(struct scst_cmd *cmd);
160 static int dev_user_task_mgmt_fn(struct scst_mgmt_cmd *mcmd,
161         struct scst_tgt_dev *tgt_dev);
162
163 static int dev_user_disk_done(struct scst_cmd *cmd);
164 static int dev_user_tape_done(struct scst_cmd *cmd);
165
166 static struct page *dev_user_alloc_pages(struct scatterlist *sg,
167         gfp_t gfp_mask, void *priv);
168 static void dev_user_free_sg_entries(struct scatterlist *sg, int sg_count,
169         void *priv);
170
171 static void dev_user_add_to_ready(struct scst_user_cmd *ucmd);
172
173 static void dev_user_unjam_cmd(struct scst_user_cmd *ucmd, int busy,
174         unsigned long *flags);
175 static void dev_user_unjam_dev(struct scst_user_dev *dev, int tm,
176         struct scst_tgt_dev *tgt_dev);
177
178 static int dev_user_process_reply_tm_exec(struct scst_user_cmd *ucmd,
179         int status);
180 static int dev_user_process_reply_sess(struct scst_user_cmd *ucmd, int status);
181 static int dev_user_register_dev(struct file *file,
182         const struct scst_user_dev_desc *dev_desc);
183 static int __dev_user_set_opt(struct scst_user_dev *dev,
184         const struct scst_user_opt *opt);
185 static int dev_user_set_opt(struct file *file, const struct scst_user_opt *opt);
186 static int dev_user_get_opt(struct file *file, void *arg);
187
188 static unsigned int dev_user_poll(struct file *filp, poll_table *wait);
189 static long dev_user_ioctl(struct file *file, unsigned int cmd,
190         unsigned long arg);
191 static int dev_user_release(struct inode *inode, struct file *file);
192
193 /** Data **/
194
195 static struct kmem_cache *user_cmd_cachep;
196
197 static DEFINE_MUTEX(dev_priv_mutex);
198
199 static struct file_operations dev_user_fops = {
200         .poll           = dev_user_poll,
201         .unlocked_ioctl = dev_user_ioctl,
202 #ifdef CONFIG_COMPAT
203         .compat_ioctl   = dev_user_ioctl,
204 #endif
205         .release        = dev_user_release,
206 };
207
208 static struct class *dev_user_sysfs_class;
209
210 static spinlock_t dev_list_lock = SPIN_LOCK_UNLOCKED;
211 static LIST_HEAD(dev_list);
212
213 static spinlock_t cleanup_lock = SPIN_LOCK_UNLOCKED;
214 static LIST_HEAD(cleanup_list);
215 static DECLARE_WAIT_QUEUE_HEAD(cleanup_list_waitQ);
216 static struct task_struct *cleanup_thread;
217
218 static inline void ucmd_get(struct scst_user_cmd *ucmd, int barrier)
219 {
220         TRACE_DBG("ucmd %p, ucmd_ref %d", ucmd, atomic_read(&ucmd->ucmd_ref));
221         atomic_inc(&ucmd->ucmd_ref);
222         if (barrier)
223                 smp_mb__after_atomic_inc();
224 }
225
226 static inline void ucmd_put(struct scst_user_cmd *ucmd)
227 {
228         TRACE_DBG("ucmd %p, ucmd_ref %d", ucmd, atomic_read(&ucmd->ucmd_ref));
229         if (atomic_dec_and_test(&ucmd->ucmd_ref))
230                 dev_user_free_ucmd(ucmd);
231 }
232
233 static inline int calc_num_pg(unsigned long buf, int len)
234 {
235         len += buf & ~PAGE_MASK;
236         return (len >> PAGE_SHIFT) + ((len & ~PAGE_MASK) != 0);
237 }
238
239 static inline int is_need_offs_page(unsigned long buf, int len)
240 {
241         return ((buf & ~PAGE_MASK) != 0) &&
242                 ((buf & PAGE_MASK) != ((buf+len-1) & PAGE_MASK));
243 }
244
245 static void __dev_user_not_reg(void)
246 {
247         PRINT_ERROR("%s", "Device not registered");
248         return;
249 }
250
251 static inline int dev_user_check_reg(struct scst_user_dev *dev)
252 {
253         if (dev == NULL) {
254                 __dev_user_not_reg();
255                 return -EINVAL;
256         }
257         return 0;
258 }
259
260 static inline int scst_user_cmd_hashfn(int h)
261 {
262         return h & ((1 << DEV_USER_CMD_HASH_ORDER) - 1);
263 }
264
265 static inline struct scst_user_cmd *__ucmd_find_hash(struct scst_user_dev *dev,
266         unsigned int h)
267 {
268         struct list_head *head;
269         struct scst_user_cmd *ucmd;
270
271         head = &dev->ucmd_hash[scst_user_cmd_hashfn(h)];
272         list_for_each_entry(ucmd, head, hash_list_entry) {
273                 if (ucmd->h == h) {
274                         TRACE_DBG("Found ucmd %p", ucmd);
275                         return ucmd;
276                 }
277         }
278         return NULL;
279 }
280
281 static void cmd_insert_hash(struct scst_user_cmd *ucmd)
282 {
283         struct list_head *head;
284         struct scst_user_dev *dev = ucmd->dev;
285         struct scst_user_cmd *u;
286         unsigned long flags;
287
288         spin_lock_irqsave(&dev->cmd_lists.cmd_list_lock, flags);
289         do {
290                 ucmd->h = dev->handle_counter++;
291                 u = __ucmd_find_hash(dev, ucmd->h);
292         } while(u != NULL);
293         head = &dev->ucmd_hash[scst_user_cmd_hashfn(ucmd->h)];
294         list_add_tail(&ucmd->hash_list_entry, head);
295         spin_unlock_irqrestore(&dev->cmd_lists.cmd_list_lock, flags);
296
297         TRACE_DBG("Inserted ucmd %p, h=%d", ucmd, ucmd->h);
298         return;
299 }
300
301 static inline void cmd_remove_hash(struct scst_user_cmd *ucmd)
302 {
303         unsigned long flags;
304         spin_lock_irqsave(&ucmd->dev->cmd_lists.cmd_list_lock, flags);
305         list_del(&ucmd->hash_list_entry);
306         spin_unlock_irqrestore(&ucmd->dev->cmd_lists.cmd_list_lock, flags);
307
308         TRACE_DBG("Removed ucmd %p, h=%d", ucmd, ucmd->h);
309         return;
310 }
311
312 static void dev_user_free_ucmd(struct scst_user_cmd *ucmd)
313 {
314         TRACE_ENTRY();
315
316         TRACE_MEM("Freeing ucmd %p", ucmd);
317
318         cmd_remove_hash(ucmd);
319         EXTRACHECKS_BUG_ON(ucmd->cmd != NULL);
320
321         kmem_cache_free(user_cmd_cachep, ucmd);
322
323         TRACE_EXIT();
324         return;
325 }
326
327 static struct page *dev_user_alloc_pages(struct scatterlist *sg,
328         gfp_t gfp_mask, void *priv)
329 {
330         struct scst_user_cmd *ucmd = (struct scst_user_cmd*)priv;
331         int offset = 0;
332
333         TRACE_ENTRY();
334
335         /* *sg supposed to be zeroed */
336
337         TRACE_MEM("ucmd %p, ubuff %lx, ucmd->cur_data_page %d", ucmd,
338                 ucmd->ubuff, ucmd->cur_data_page);
339
340         if (ucmd->cur_data_page == 0) {
341                 TRACE_MEM("ucmd->first_page_offset %d",
342                         ucmd->first_page_offset);
343                 offset = ucmd->first_page_offset;
344                 ucmd_get(ucmd, 0);
345         }
346
347         if (ucmd->cur_data_page >= ucmd->num_data_pages)
348                 goto out;
349
350         sg_set_page(sg, ucmd->data_pages[ucmd->cur_data_page],
351                 PAGE_SIZE - offset, offset);
352         ucmd->cur_data_page++;
353
354         TRACE_MEM("page=%p, length=%d, offset=%d", sg_page(sg), sg->length,
355                 sg->offset);
356         TRACE_BUFFER("Page data", sg_virt(sg), sg->length);
357
358 out:
359         TRACE_EXIT();
360         return sg_page(sg);
361 }
362
363 static void dev_user_on_cached_mem_free(struct scst_user_cmd *ucmd)
364 {
365         TRACE_ENTRY();
366
367         TRACE_MEM("Preparing ON_CACHED_MEM_FREE (ucmd %p, h %d, ubuff %lx)",
368                 ucmd, ucmd->h, ucmd->ubuff);
369
370         ucmd->user_cmd.cmd_h = ucmd->h;
371         ucmd->user_cmd.subcode = SCST_USER_ON_CACHED_MEM_FREE;
372         ucmd->user_cmd.on_cached_mem_free.pbuf = ucmd->ubuff;
373
374         ucmd->state = UCMD_STATE_ON_CACHE_FREEING;
375
376         dev_user_add_to_ready(ucmd);
377
378         TRACE_EXIT();
379         return;
380 }
381
382 static void dev_user_unmap_buf(struct scst_user_cmd *ucmd)
383 {
384         int i;
385
386         TRACE_ENTRY();
387
388         TRACE_MEM("Unmapping data pages (ucmd %p, ubuff %lx, num %d)", ucmd,
389                 ucmd->ubuff, ucmd->num_data_pages);
390
391         for(i = 0; i < ucmd->num_data_pages; i++) {
392                 struct page *page = ucmd->data_pages[i];
393
394                 if (ucmd->buf_dirty)
395                         SetPageDirty(page);
396
397                 page_cache_release(page);
398         }
399         kfree(ucmd->data_pages);
400         ucmd->data_pages = NULL;
401
402         TRACE_EXIT();
403         return;
404 }
405
406 static void __dev_user_free_sg_entries(struct scst_user_cmd *ucmd)
407 {
408         TRACE_ENTRY();
409
410         sBUG_ON(ucmd->data_pages == NULL);
411
412         TRACE_MEM("Freeing data pages (ucmd=%p, ubuff=%lx, buff_cached=%d)",
413                 ucmd, ucmd->ubuff, ucmd->buff_cached);
414
415         dev_user_unmap_buf(ucmd);
416
417         if (ucmd->buff_cached)
418                 dev_user_on_cached_mem_free(ucmd);
419         else
420                 ucmd_put(ucmd);
421
422         TRACE_EXIT();
423         return;
424 }
425
426 static void dev_user_free_sg_entries(struct scatterlist *sg, int sg_count,
427         void *priv)
428 {
429         struct scst_user_cmd *ucmd = (struct scst_user_cmd*)priv;
430
431         TRACE_MEM("Freeing data pages (sg=%p, sg_count=%d, priv %p)", sg,
432                 sg_count, ucmd);
433
434         __dev_user_free_sg_entries(ucmd);
435
436         return;
437 }
438
439 static inline int is_buff_cached(struct scst_user_cmd *ucmd)
440 {
441         int mem_reuse_type = ucmd->dev->memory_reuse_type;
442
443         if ((mem_reuse_type == SCST_USER_MEM_REUSE_ALL) ||
444             ((ucmd->cmd->data_direction == SCST_DATA_READ) &&
445              (mem_reuse_type == SCST_USER_MEM_REUSE_READ)) ||
446             ((ucmd->cmd->data_direction == SCST_DATA_WRITE) &&
447              (mem_reuse_type == SCST_USER_MEM_REUSE_WRITE))) {
448                 return 1;
449         } else
450                 return 0;
451 }
452
453 /*
454  * Returns 0 for success, <0 for fatal failure, >0 - need pages.
455  * Unmaps the buffer, if needed in case of error
456  */
457 static int dev_user_alloc_sg(struct scst_user_cmd *ucmd, int cached_buff)
458 {
459         int res = 0;
460         struct scst_cmd *cmd = ucmd->cmd;
461         struct scst_user_dev *dev = ucmd->dev;
462         int gfp_mask, flags = 0;
463         int bufflen = cmd->bufflen;
464         int last_len = 0;
465
466         TRACE_ENTRY();
467
468         gfp_mask = __GFP_NOWARN;
469         gfp_mask |= (scst_cmd_atomic(cmd) ? GFP_ATOMIC : GFP_KERNEL);
470
471         if (cached_buff) {
472                 flags |= SCST_POOL_RETURN_OBJ_ON_ALLOC_FAIL;
473                 if (ucmd->ubuff == 0)
474                         flags |= SCST_POOL_NO_ALLOC_ON_CACHE_MISS;
475         } else {
476                 TRACE_MEM("%s", "Not cached buff");
477                 flags |= SCST_POOL_ALLOC_NO_CACHED;
478                 if (ucmd->ubuff == 0) {
479                         res = 1;
480                         goto out;
481                 }
482                 bufflen += ucmd->first_page_offset;
483                 if (is_need_offs_page(ucmd->ubuff, cmd->bufflen))
484                         last_len = bufflen & ~PAGE_MASK;
485                 else
486                         last_len = cmd->bufflen & ~PAGE_MASK;
487                 if (last_len == 0)
488                         last_len = PAGE_SIZE;
489         }
490         ucmd->buff_cached = cached_buff;
491
492         cmd->sg = sgv_pool_alloc(dev->pool, bufflen, gfp_mask, flags,
493                         &cmd->sg_cnt, &ucmd->sgv, ucmd);
494         if (cmd->sg != NULL) {
495                 struct scst_user_cmd *buf_ucmd =
496                         (struct scst_user_cmd*)sgv_get_priv(ucmd->sgv);
497
498                 TRACE_MEM("Buf ucmd %p", buf_ucmd);
499
500                 ucmd->ubuff = buf_ucmd->ubuff;
501                 ucmd->buf_ucmd = buf_ucmd;
502
503                 EXTRACHECKS_BUG_ON((ucmd->data_pages != NULL) &&
504                                    (ucmd != buf_ucmd));
505
506                 if (last_len != 0) {
507                         /* We don't use clustering, so the assignment is safe */
508                         cmd->sg[cmd->sg_cnt-1].length = last_len;
509                 }
510
511                 TRACE_MEM("Buf alloced (ucmd %p, cached_buff %d, ubuff %lx, "
512                         "last_len %d, l %d)", ucmd, cached_buff, ucmd->ubuff,
513                         last_len, cmd->sg[cmd->sg_cnt-1].length);
514
515                 if (unlikely(cmd->sg_cnt > cmd->tgt_dev->max_sg_cnt)) {
516                         static int ll;
517                         if (ll < 10) {
518                                 PRINT_INFO("Unable to complete command due to "
519                                         "SG IO count limitation (requested %d, "
520                                         "available %d, tgt lim %d)", cmd->sg_cnt,
521                                         cmd->tgt_dev->max_sg_cnt,
522                                         cmd->tgt->sg_tablesize);
523                                 ll++;
524                         }
525                         cmd->sg = NULL;
526                         /* sgv will be freed in dev_user_free_sgv() */
527                         res = -1;
528                 }
529         } else {
530                 TRACE_MEM("Buf not alloced (ucmd %p, h %d, buff_cached, %d, "
531                         "sg_cnt %d, ubuff %lx, sgv %p", ucmd, ucmd->h,
532                         ucmd->buff_cached, cmd->sg_cnt, ucmd->ubuff, ucmd->sgv);
533                 if (unlikely(cmd->sg_cnt == 0)) {
534                         TRACE_MEM("Refused allocation (ucmd %p)", ucmd);
535                         sBUG_ON(ucmd->sgv != NULL);
536                         res = -1;
537                 } else {
538                         switch(ucmd->state & ~UCMD_STATE_MASK) {
539                         case UCMD_STATE_BUF_ALLOCING:
540                                 res = 1;
541                                 break;
542                         case UCMD_STATE_EXECING:
543                                 res = -1;
544                                 break;
545                         default:
546                                 sBUG();
547                                 break;
548                         }
549                 }
550         }
551
552 out:
553         TRACE_EXIT_RES(res);
554         return res;
555 }
556
557 static int dev_user_alloc_space(struct scst_user_cmd *ucmd)
558 {
559         int rc, res = SCST_CMD_STATE_DEFAULT;
560         struct scst_cmd *cmd = ucmd->cmd;
561
562         TRACE_ENTRY();
563
564         if (unlikely(ucmd->cmd->data_buf_tgt_alloc)) {
565                 PRINT_ERROR("Target driver %s requested own memory "
566                         "allocation", ucmd->cmd->tgtt->name);
567                 scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
568                 res = SCST_CMD_STATE_PRE_XMIT_RESP;
569                 goto out;
570         }
571
572         ucmd->state = UCMD_STATE_BUF_ALLOCING;
573         cmd->data_buf_alloced = 1;
574
575         rc = dev_user_alloc_sg(ucmd, is_buff_cached(ucmd));
576         if (rc == 0)
577                 goto out;
578         else if (rc < 0) {
579                 scst_set_busy(cmd);
580                 res = SCST_CMD_STATE_PRE_XMIT_RESP;
581                 goto out;
582         }
583
584         if ((cmd->data_direction != SCST_DATA_WRITE) &&
585             !scst_is_cmd_local(cmd)) {
586                 TRACE_DBG("Delayed alloc, ucmd %p", ucmd);
587                 goto out;
588         }
589
590         ucmd->user_cmd.cmd_h = ucmd->h;
591         ucmd->user_cmd.subcode = SCST_USER_ALLOC_MEM;
592         ucmd->user_cmd.alloc_cmd.sess_h = (unsigned long)cmd->tgt_dev;
593         memcpy(ucmd->user_cmd.alloc_cmd.cdb, cmd->cdb,
594                 min(sizeof(ucmd->user_cmd.alloc_cmd.cdb), sizeof(cmd->cdb)));
595         ucmd->user_cmd.alloc_cmd.cdb_len = cmd->cdb_len;
596         ucmd->user_cmd.alloc_cmd.alloc_len = ucmd->buff_cached ?
597                 (cmd->sg_cnt << PAGE_SHIFT) : cmd->bufflen;
598         ucmd->user_cmd.alloc_cmd.queue_type = cmd->queue_type;
599         ucmd->user_cmd.alloc_cmd.data_direction = cmd->data_direction;
600         ucmd->user_cmd.alloc_cmd.sn = cmd->tgt_sn;
601
602         dev_user_add_to_ready(ucmd);
603
604         res = SCST_CMD_STATE_STOP;
605
606 out:
607         TRACE_EXIT_RES(res);
608         return res;
609 }
610
611 static struct scst_user_cmd *dev_user_alloc_ucmd(struct scst_user_dev *dev,
612         int gfp_mask)
613 {
614         struct scst_user_cmd *ucmd = NULL;
615
616         TRACE_ENTRY();
617
618 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
619         ucmd = kmem_cache_alloc(user_cmd_cachep, gfp_mask);
620         if (ucmd != NULL)
621                 memset(ucmd, 0, sizeof(*ucmd));
622 #else
623         ucmd = kmem_cache_zalloc(user_cmd_cachep, gfp_mask);
624 #endif
625         if (unlikely(ucmd == NULL)) {
626                 TRACE(TRACE_OUT_OF_MEM, "Unable to allocate "
627                         "user cmd (gfp_mask %x)", gfp_mask);
628                 goto out;
629         }
630         ucmd->dev = dev;
631         atomic_set(&ucmd->ucmd_ref, 1);
632
633         cmd_insert_hash(ucmd);
634
635         TRACE_MEM("ucmd %p allocated", ucmd);
636
637 out:
638         TRACE_EXIT_HRES((unsigned long)ucmd);
639         return ucmd;
640 }
641
642 static int dev_user_get_block(struct scst_cmd *cmd)
643 {
644         struct scst_user_dev *dev = (struct scst_user_dev*)cmd->dev->dh_priv;
645         /*
646          * No need for locks here, since *_detach() can not be
647          * called, when there are existing commands.
648          */
649         TRACE_EXIT_RES(dev->block);
650         return dev->block;
651 }
652
653 static int dev_user_parse(struct scst_cmd *cmd)
654 {
655         int rc, res = SCST_CMD_STATE_DEFAULT;
656         struct scst_user_cmd *ucmd;
657         int atomic = scst_cmd_atomic(cmd);
658         struct scst_user_dev *dev = (struct scst_user_dev*)cmd->dev->dh_priv;
659         int gfp_mask = atomic ? GFP_ATOMIC : GFP_KERNEL;
660
661         TRACE_ENTRY();
662
663         if (cmd->dh_priv == NULL) {
664                 ucmd = dev_user_alloc_ucmd(dev, gfp_mask);
665                 if (unlikely(ucmd == NULL)) {
666                         if (atomic) {
667                                 res = SCST_CMD_STATE_NEED_THREAD_CTX;
668                                 goto out;
669                         } else {
670                                 scst_set_busy(cmd);
671                                 goto out_error;
672                         }
673                 }
674                 ucmd->cmd = cmd;
675                 cmd->dh_priv = ucmd;
676         } else {
677                 ucmd = (struct scst_user_cmd*)cmd->dh_priv;
678                 TRACE_DBG("Used ucmd %p, state %x", ucmd, ucmd->state);
679         }
680
681         TRACE_DBG("ucmd %p, cmd %p, state %x", ucmd, cmd, ucmd->state);
682
683         if (ucmd->state != UCMD_STATE_NEW)
684                 goto alloc;
685
686         switch(dev->parse_type) {
687         case SCST_USER_PARSE_STANDARD:
688                 TRACE_DBG("PARSE STANDARD: ucmd %p", ucmd);
689                 rc = dev->generic_parse(cmd, dev_user_get_block);
690                 if ((rc != 0) || (cmd->op_flags & SCST_INFO_INVALID))
691                         goto out_invalid;
692                 break;
693
694         case SCST_USER_PARSE_EXCEPTION:
695                 TRACE_DBG("PARSE EXCEPTION: ucmd %p", ucmd);
696                 rc = dev->generic_parse(cmd, dev_user_get_block);
697                 if ((rc == 0) && (!(cmd->op_flags & SCST_INFO_INVALID)))
698                         break;
699                 else if (rc == SCST_CMD_STATE_NEED_THREAD_CTX) {
700                         TRACE_MEM("Restarting PARSE to thread context "
701                                 "(ucmd %p)", ucmd);
702                         res = SCST_CMD_STATE_NEED_THREAD_CTX;
703                         goto out;
704                 }
705                 /* else go through */
706
707         case SCST_USER_PARSE_CALL:
708                 TRACE_DBG("Preparing PARSE for user space (ucmd=%p, h=%d, "
709                         "bufflen %d)", ucmd, ucmd->h, cmd->bufflen);
710                 ucmd->user_cmd.cmd_h = ucmd->h;
711                 ucmd->user_cmd.subcode = SCST_USER_PARSE;
712                 ucmd->user_cmd.parse_cmd.sess_h = (unsigned long)cmd->tgt_dev;
713                 memcpy(ucmd->user_cmd.parse_cmd.cdb, cmd->cdb,
714                         min(sizeof(ucmd->user_cmd.parse_cmd.cdb),
715                             sizeof(cmd->cdb)));
716                 ucmd->user_cmd.parse_cmd.cdb_len = cmd->cdb_len;
717                 ucmd->user_cmd.parse_cmd.timeout = cmd->timeout;
718                 ucmd->user_cmd.parse_cmd.bufflen = cmd->bufflen;
719                 ucmd->user_cmd.parse_cmd.queue_type = cmd->queue_type;
720                 ucmd->user_cmd.parse_cmd.data_direction = cmd->data_direction;
721                 ucmd->user_cmd.parse_cmd.expected_values_set =
722                                         cmd->expected_values_set;
723                 ucmd->user_cmd.parse_cmd.expected_data_direction =
724                                         cmd->expected_data_direction;
725                 ucmd->user_cmd.parse_cmd.expected_transfer_len =
726                                         cmd->expected_transfer_len;
727                 ucmd->user_cmd.parse_cmd.sn = cmd->tgt_sn;
728                 ucmd->state = UCMD_STATE_PARSING;
729                 dev_user_add_to_ready(ucmd);
730                 res = SCST_CMD_STATE_STOP;
731                 goto out;
732
733         default:
734                 sBUG();
735                 goto out;
736         }
737
738 alloc:
739         if (cmd->data_direction != SCST_DATA_NONE)
740                 res = dev_user_alloc_space(ucmd);
741
742 out:
743         TRACE_EXIT_RES(res);
744         return res;
745
746 out_invalid:
747         PRINT_ERROR("PARSE failed (ucmd %p, rc %d, invalid %d)", ucmd, rc,
748                 cmd->op_flags & SCST_INFO_INVALID);
749         scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_invalid_opcode));
750
751 out_error:
752         res = SCST_CMD_STATE_PRE_XMIT_RESP;
753         goto out;
754 }
755
756 static void dev_user_flush_dcache(struct scst_user_cmd *ucmd)
757 {
758         struct scst_user_cmd *buf_ucmd = ucmd->buf_ucmd;
759         unsigned long start = buf_ucmd->ubuff;
760         int i;
761
762         TRACE_ENTRY();
763
764         if (start == 0)
765                 goto out;
766
767         for(i = 0; i < buf_ucmd->num_data_pages; i++) {
768                 struct page *page;
769                 page = buf_ucmd->data_pages[i];
770 #ifdef ARCH_HAS_FLUSH_ANON_PAGE
771                 struct vm_area_struct *vma = find_vma(current->mm, start);
772                 if (vma != NULL)
773                         flush_anon_page(vma, page, start);
774 #endif
775                 flush_dcache_page(page);
776                 start += PAGE_SIZE;
777         }
778
779 out:
780         TRACE_EXIT();
781         return;
782 }
783
784 static int dev_user_exec(struct scst_cmd *cmd)
785 {
786         struct scst_user_cmd *ucmd = (struct scst_user_cmd*)cmd->dh_priv;
787         int res = SCST_EXEC_COMPLETED;
788
789         TRACE_ENTRY();
790
791 #if 0 /* We set exec_atomic in 0 to let SCST core know that we need a thread
792        * context to complete the necessary actions, but all we are going to
793        * do in this function is, in fact, atomic, so let's skip this check.
794        */
795         if (scst_cmd_atomic(cmd)) {
796                 TRACE_DBG("%s", "User exec() can not be called in atomic "
797                         "context, rescheduling to the thread");
798                 res = SCST_EXEC_NEED_THREAD;
799                 goto out;
800         }
801 #endif
802
803         TRACE_DBG("Preparing EXEC for user space (ucmd=%p, h=%d, "
804                 "bufflen %d, data_len %d, ubuff %lx)", ucmd, ucmd->h,
805                 cmd->bufflen, cmd->data_len, ucmd->ubuff);
806
807         if (cmd->data_direction == SCST_DATA_WRITE)
808                 dev_user_flush_dcache(ucmd);
809
810         ucmd->user_cmd.cmd_h = ucmd->h;
811         ucmd->user_cmd.subcode = SCST_USER_EXEC;
812         ucmd->user_cmd.exec_cmd.sess_h = (unsigned long)cmd->tgt_dev;
813         memcpy(ucmd->user_cmd.exec_cmd.cdb, cmd->cdb,
814                 min(sizeof(ucmd->user_cmd.exec_cmd.cdb),
815                     sizeof(cmd->cdb)));
816         ucmd->user_cmd.exec_cmd.cdb_len = cmd->cdb_len;
817         ucmd->user_cmd.exec_cmd.bufflen = cmd->bufflen;
818         ucmd->user_cmd.exec_cmd.data_len = cmd->data_len;
819         ucmd->user_cmd.exec_cmd.pbuf = ucmd->ubuff;
820         if ((ucmd->ubuff == 0) && (cmd->data_direction != SCST_DATA_NONE)) {
821                 ucmd->user_cmd.exec_cmd.alloc_len = ucmd->buff_cached ?
822                         (cmd->sg_cnt << PAGE_SHIFT) : cmd->bufflen;
823         }
824         ucmd->user_cmd.exec_cmd.queue_type = cmd->queue_type;
825         ucmd->user_cmd.exec_cmd.data_direction = cmd->data_direction;
826         ucmd->user_cmd.exec_cmd.partial = 0;
827         ucmd->user_cmd.exec_cmd.timeout = cmd->timeout;
828         ucmd->user_cmd.exec_cmd.sn = cmd->tgt_sn;
829
830         ucmd->state = UCMD_STATE_EXECING;
831
832         dev_user_add_to_ready(ucmd);
833
834         TRACE_EXIT_RES(res);
835         return res;
836 }
837
838 static void dev_user_free_sgv(struct scst_user_cmd *ucmd)
839 {
840         if (ucmd->sgv != NULL) {
841                 sgv_pool_free(ucmd->sgv);
842                 ucmd->sgv = NULL;
843         } else if (ucmd->data_pages != NULL) {
844                 /* We mapped pages, but for some reason didn't allocate them */
845                 ucmd_get(ucmd, 0);
846                 __dev_user_free_sg_entries(ucmd);
847         }
848         return;
849 }
850
851 static void dev_user_on_free_cmd(struct scst_cmd *cmd)
852 {
853         struct scst_user_cmd *ucmd = (struct scst_user_cmd*)cmd->dh_priv;
854
855         TRACE_ENTRY();
856
857         if (unlikely(ucmd == NULL))
858                 goto out;
859
860         TRACE_MEM("ucmd %p, cmd %p, buff_cached %d, ubuff %lx", ucmd, ucmd->cmd,
861                 ucmd->buff_cached, ucmd->ubuff);
862
863         ucmd->cmd = NULL;
864         if ((cmd->data_direction == SCST_DATA_WRITE) && (ucmd->buf_ucmd != NULL))
865                 ucmd->buf_ucmd->buf_dirty = 1;
866
867         if (ucmd->dev->on_free_cmd_type == SCST_USER_ON_FREE_CMD_IGNORE) {
868                 ucmd->state = UCMD_STATE_ON_FREE_SKIPPED;
869                 /* The state assignment must be before freeing sgv! */
870                 dev_user_free_sgv(ucmd);
871                 ucmd_put(ucmd);
872                 goto out;
873         }
874
875         ucmd->user_cmd.cmd_h = ucmd->h;
876         ucmd->user_cmd.subcode = SCST_USER_ON_FREE_CMD;
877
878         ucmd->user_cmd.on_free_cmd.pbuf = ucmd->ubuff;
879         ucmd->user_cmd.on_free_cmd.resp_data_len = cmd->resp_data_len;
880         ucmd->user_cmd.on_free_cmd.buffer_cached = ucmd->buff_cached;
881         ucmd->user_cmd.on_free_cmd.aborted = ucmd->aborted;
882         ucmd->user_cmd.on_free_cmd.status = cmd->status;
883         ucmd->user_cmd.on_free_cmd.delivery_status = cmd->delivery_status;
884
885         ucmd->state = UCMD_STATE_ON_FREEING;
886
887         dev_user_add_to_ready(ucmd);
888
889 out:
890         TRACE_EXIT();
891         return;
892 }
893
894 static void dev_user_set_block(struct scst_cmd *cmd, int block)
895 {
896         struct scst_user_dev *dev = (struct scst_user_dev*)cmd->dev->dh_priv;
897         /*
898          * No need for locks here, since *_detach() can not be
899          * called, when there are existing commands.
900          */
901         TRACE_DBG("dev %p, new block %d", dev, block);
902         if (block != 0)
903                 dev->block = block;
904         else
905                 dev->block = dev->def_block;
906         return;
907 }
908
909 static int dev_user_disk_done(struct scst_cmd *cmd)
910 {
911         int res = SCST_CMD_STATE_DEFAULT;
912
913         TRACE_ENTRY();
914
915         res = scst_block_generic_dev_done(cmd, dev_user_set_block);
916
917         TRACE_EXIT_RES(res);
918         return res;
919 }
920
921 static int dev_user_tape_done(struct scst_cmd *cmd)
922 {
923         int res = SCST_CMD_STATE_DEFAULT;
924
925         TRACE_ENTRY();
926
927         res = scst_tape_generic_dev_done(cmd, dev_user_set_block);
928
929         TRACE_EXIT_RES(res);
930         return res;
931 }
932
933 static void dev_user_add_to_ready(struct scst_user_cmd *ucmd)
934 {
935         struct scst_user_dev *dev = ucmd->dev;
936         unsigned long flags;
937         int do_wake;
938
939         TRACE_ENTRY();
940
941         do_wake = (in_interrupt() ||
942                    (ucmd->state == UCMD_STATE_ON_CACHE_FREEING));
943         if (ucmd->cmd)
944                 do_wake |= ucmd->cmd->preprocessing_only;
945
946         EXTRACHECKS_BUG_ON(ucmd->state & UCMD_STATE_JAMMED_MASK);
947
948         spin_lock_irqsave(&dev->cmd_lists.cmd_list_lock, flags);
949
950         /* Hopefully, compiler will make it as a single test/jmp */
951         if (unlikely(dev->attach_cmd_active || dev->tm_cmd_active ||
952                      dev->internal_reset_active || dev->pre_unreg_sess_active ||
953                      (dev->detach_cmd_count != 0))) {
954                 switch(ucmd->state) {
955                 case UCMD_STATE_PARSING:
956                 case UCMD_STATE_BUF_ALLOCING:
957                 case UCMD_STATE_EXECING:
958                         if (dev->pre_unreg_sess_active &&
959                             !(dev->attach_cmd_active || dev->tm_cmd_active ||
960                               dev->internal_reset_active ||
961                               (dev->detach_cmd_count != 0))) {
962                                 struct scst_user_pre_unreg_sess_obj *p, *found = NULL;
963                                 list_for_each_entry(p, &dev->pre_unreg_sess_list,
964                                         pre_unreg_sess_list_entry) {
965                                         if (p->tgt_dev == ucmd->cmd->tgt_dev) {
966                                                 if (p->active)
967                                                         found = p;
968                                                 break;
969                                         }
970                                 }
971                                 if (found == NULL) {
972                                         TRACE_MGMT_DBG("No pre unreg sess "
973                                                 "active (ucmd %p)", ucmd);
974                                         break;
975                                 } else {
976                                         TRACE_MGMT_DBG("Pre unreg sess %p "
977                                                 "active (ucmd %p)", found, ucmd);
978                                 }
979                         }
980                         TRACE(TRACE_MGMT, "Mgmt cmd active, returning BUSY for "
981                                 "ucmd %p", ucmd);
982                         dev_user_unjam_cmd(ucmd, 1, &flags);
983                         spin_unlock_irqrestore(&dev->cmd_lists.cmd_list_lock, flags);
984                         goto out;
985                 }
986         }
987
988         if (unlikely(ucmd->state == UCMD_STATE_TM_EXECING) ||
989             unlikely(ucmd->state == UCMD_STATE_ATTACH_SESS) ||
990             unlikely(ucmd->state == UCMD_STATE_DETACH_SESS)) {
991                 if (dev->prio_queue_type == SCST_USER_PRIO_QUEUE_SEPARATE) {
992                         TRACE_MGMT_DBG("Adding mgmt ucmd %p to prio ready cmd "
993                                 "list", ucmd);
994                         list_add_tail(&ucmd->ready_cmd_list_entry,
995                                 &dev->prio_ready_cmd_list);
996                         wake_up(&dev->prio_cmd_list_waitQ);
997                         do_wake = 0;
998                 } else {
999                         TRACE_MGMT_DBG("Adding mgmt ucmd %p to ready cmd "
1000                                 "list", ucmd);
1001                         list_add_tail(&ucmd->ready_cmd_list_entry,
1002                                 &dev->ready_cmd_list);
1003                         do_wake = 1;
1004                 }
1005         } else if ((ucmd->cmd != NULL) &&
1006             unlikely((ucmd->cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE))) {
1007                 TRACE_DBG("Adding ucmd %p to head ready cmd list", ucmd);
1008                 list_add(&ucmd->ready_cmd_list_entry, &dev->ready_cmd_list);
1009         } else {
1010                 TRACE_DBG("Adding ucmd %p to ready cmd list", ucmd);
1011                 list_add_tail(&ucmd->ready_cmd_list_entry, &dev->ready_cmd_list);
1012         }
1013
1014         if (do_wake) {
1015                 TRACE_DBG("Waking up dev %p", dev);
1016                 wake_up(&dev->cmd_lists.cmd_list_waitQ);
1017         }
1018
1019         spin_unlock_irqrestore(&dev->cmd_lists.cmd_list_lock, flags);
1020
1021 out:
1022         TRACE_EXIT();
1023         return;
1024 }
1025
1026 static int dev_user_map_buf(struct scst_user_cmd *ucmd, unsigned long ubuff,
1027         int num_pg)
1028 {
1029         int res = 0, rc;
1030         int i;
1031
1032         TRACE_ENTRY();
1033
1034         if (unlikely(ubuff == 0))
1035                 goto out_nomem;
1036
1037         sBUG_ON(ucmd->data_pages != NULL);
1038
1039         ucmd->num_data_pages = num_pg;
1040
1041         ucmd->data_pages = kzalloc(sizeof(*ucmd->data_pages)*ucmd->num_data_pages,
1042                 GFP_KERNEL);
1043         if (ucmd->data_pages == NULL) {
1044                 TRACE(TRACE_OUT_OF_MEM, "Unable to allocate data_pages array "
1045                         "(num_data_pages=%d)", ucmd->num_data_pages);
1046                 res = -ENOMEM;
1047                 goto out_nomem;
1048         }
1049
1050         TRACE_MEM("Mapping buffer (ucmd %p, ubuff %lx, ucmd->num_data_pages %d, "
1051                 "first_page_offset %d, len %d)", ucmd, ubuff,
1052                 ucmd->num_data_pages, (int)(ubuff & ~PAGE_MASK),
1053                 ucmd->cmd->bufflen);
1054
1055         down_read(&current->mm->mmap_sem);
1056         rc = get_user_pages(current, current->mm, ubuff, ucmd->num_data_pages,
1057                 1/*writable*/, 0/*don't force*/, ucmd->data_pages, NULL);
1058         up_read(&current->mm->mmap_sem);
1059
1060         /* get_user_pages() flushes dcache */
1061
1062         if (rc < ucmd->num_data_pages)
1063                 goto out_unmap;
1064
1065         ucmd->ubuff = ubuff;
1066         ucmd->first_page_offset = (ubuff & ~PAGE_MASK);
1067
1068 out:
1069         TRACE_EXIT_RES(res);
1070         return res;
1071
1072 out_nomem:
1073         scst_set_busy(ucmd->cmd);
1074         /* go through */
1075
1076 out_err:
1077         ucmd->cmd->state = SCST_CMD_STATE_PRE_XMIT_RESP;
1078         goto out;
1079
1080 out_unmap:
1081         PRINT_ERROR("Failed to get %d user pages (rc %d)",
1082                 ucmd->num_data_pages, rc);
1083         if (rc > 0) {
1084                 for(i = 0; i < rc; i++)
1085                         page_cache_release(ucmd->data_pages[i]);
1086         }
1087         kfree(ucmd->data_pages);
1088         ucmd->data_pages = NULL;
1089         res = -EFAULT;
1090         scst_set_cmd_error(ucmd->cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
1091         goto out_err;
1092 }
1093
1094 static int dev_user_process_reply_alloc(struct scst_user_cmd *ucmd,
1095         struct scst_user_reply_cmd *reply)
1096 {
1097         int res = 0;
1098         struct scst_cmd *cmd = ucmd->cmd;
1099
1100         TRACE_ENTRY();
1101
1102         TRACE_DBG("ucmd %p, pbuf %Lx", ucmd, reply->alloc_reply.pbuf);
1103
1104         if (likely(reply->alloc_reply.pbuf != 0)) {
1105                 int pages;
1106                 if (ucmd->buff_cached) {
1107                         if (unlikely((reply->alloc_reply.pbuf & ~PAGE_MASK) != 0)) {
1108                                 PRINT_ERROR("Supplied pbuf %Lx isn't "
1109                                         "page aligned", reply->alloc_reply.pbuf);
1110                                 goto out_hwerr;
1111                         }
1112                         pages = cmd->sg_cnt;
1113                 } else
1114                         pages = calc_num_pg(reply->alloc_reply.pbuf, cmd->bufflen);
1115                 res = dev_user_map_buf(ucmd, reply->alloc_reply.pbuf, pages);
1116         } else {
1117                 scst_set_busy(ucmd->cmd);
1118                 ucmd->cmd->state = SCST_CMD_STATE_PRE_XMIT_RESP;
1119         }
1120
1121 out_process:
1122         scst_process_active_cmd(cmd, SCST_CONTEXT_DIRECT);
1123
1124         TRACE_EXIT_RES(res);
1125         return res;
1126
1127 out_hwerr:
1128         scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
1129         res = -EINVAL;
1130         goto out_process;
1131 }
1132
1133 static int dev_user_process_reply_parse(struct scst_user_cmd *ucmd,
1134         struct scst_user_reply_cmd *reply)
1135 {
1136         int res = 0;
1137         struct scst_user_scsi_cmd_reply_parse *preply =
1138                 &reply->parse_reply;
1139         struct scst_cmd *cmd = ucmd->cmd;
1140
1141         TRACE_ENTRY();
1142
1143         if (unlikely(preply->queue_type > SCST_CMD_QUEUE_ACA))
1144                 goto out_inval;
1145
1146         if (unlikely((preply->data_direction != SCST_DATA_WRITE) &&
1147                      (preply->data_direction != SCST_DATA_READ) &&
1148                      (preply->data_direction != SCST_DATA_NONE)))
1149                 goto out_inval;
1150
1151         if (unlikely((preply->data_direction != SCST_DATA_NONE) &&
1152                      (preply->bufflen == 0)))
1153                 goto out_inval;
1154
1155         if (unlikely((preply->bufflen < 0) || (preply->data_len < 0)))
1156                 goto out_inval;
1157
1158         TRACE_DBG("ucmd %p, queue_type %x, data_direction, %x, bufflen %d, "
1159                 "data_len %d, pbuf %Lx", ucmd, preply->queue_type,
1160                 preply->data_direction, preply->bufflen, preply->data_len,
1161                 reply->alloc_reply.pbuf);
1162
1163         cmd->queue_type = preply->queue_type;
1164         cmd->data_direction = preply->data_direction;
1165         cmd->bufflen = preply->bufflen;
1166         cmd->data_len = preply->data_len;
1167
1168 out_process:
1169         scst_process_active_cmd(cmd, SCST_CONTEXT_DIRECT);
1170
1171         TRACE_EXIT_RES(res);
1172         return res;
1173
1174 out_inval:
1175         PRINT_ERROR("%s", "Invalid parse_reply parameter(s)");
1176         scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
1177         res = -EINVAL;
1178         goto out_process;
1179 }
1180
1181 static int dev_user_process_reply_on_free(struct scst_user_cmd *ucmd)
1182 {
1183         int res = 0;
1184
1185         TRACE_ENTRY();
1186
1187         TRACE_DBG("ON FREE ucmd %p", ucmd);
1188
1189         dev_user_free_sgv(ucmd);
1190         ucmd_put(ucmd);
1191
1192         TRACE_EXIT_RES(res);
1193         return res;
1194 }
1195
1196 static int dev_user_process_reply_on_cache_free(struct scst_user_cmd *ucmd)
1197 {
1198         int res = 0;
1199
1200         TRACE_ENTRY();
1201
1202         TRACE_DBG("ON CACHE FREE ucmd %p", ucmd);
1203
1204         ucmd_put(ucmd);
1205
1206         TRACE_EXIT_RES(res);
1207         return res;
1208 }
1209
1210 static int dev_user_process_reply_exec(struct scst_user_cmd *ucmd,
1211         struct scst_user_reply_cmd *reply)
1212 {
1213         int res = 0;
1214         struct scst_user_scsi_cmd_reply_exec *ereply =
1215                 &reply->exec_reply;
1216         struct scst_cmd *cmd = ucmd->cmd;
1217
1218         TRACE_ENTRY();
1219
1220         if (ereply->reply_type == SCST_EXEC_REPLY_COMPLETED) {
1221                 if (ucmd->background_exec) {
1222                         TRACE_DBG("Background ucmd %p finished", ucmd);
1223                         ucmd_put(ucmd);
1224                         goto out;
1225                 }
1226                 if (unlikely(ereply->resp_data_len > cmd->bufflen))
1227                         goto out_inval;
1228                 if (unlikely((cmd->data_direction != SCST_DATA_READ) &&
1229                              (ereply->resp_data_len != 0)))
1230                         goto out_inval;
1231         } else if (ereply->reply_type == SCST_EXEC_REPLY_BACKGROUND) {
1232                 if (unlikely(ucmd->background_exec))
1233                         goto out_inval;
1234                 if (unlikely((cmd->data_direction == SCST_DATA_READ) ||
1235                              (cmd->resp_data_len != 0)))
1236                         goto out_inval;
1237                 ucmd_get(ucmd, 1);
1238                 ucmd->background_exec = 1;
1239                 TRACE_DBG("Background ucmd %p", ucmd);
1240                 goto out_compl;
1241         } else
1242                 goto out_inval;
1243
1244         TRACE_DBG("ucmd %p, status %d, resp_data_len %d", ucmd,
1245                 ereply->status, ereply->resp_data_len);
1246
1247          if (ereply->resp_data_len != 0) {
1248                 if (ucmd->ubuff == 0) {
1249                         int pages, rc;
1250                         if (unlikely(ereply->pbuf == 0))
1251                                 goto out_busy;
1252                         if (ucmd->buff_cached) {
1253                                 if (unlikely((ereply->pbuf & ~PAGE_MASK) != 0)) {
1254                                         PRINT_ERROR("Supplied pbuf %Lx isn't "
1255                                                 "page aligned", ereply->pbuf);
1256                                         goto out_hwerr;
1257                                 }
1258                                 pages = cmd->sg_cnt;
1259                         } else
1260                                 pages = calc_num_pg(ereply->pbuf, cmd->bufflen);
1261                         rc = dev_user_map_buf(ucmd, ereply->pbuf, pages);
1262                         if ((rc != 0) || (ucmd->ubuff == 0))
1263                                 goto out_compl;
1264
1265                         rc = dev_user_alloc_sg(ucmd, ucmd->buff_cached);
1266                         if (unlikely(rc != 0))
1267                                 goto out_busy;
1268                 } else
1269                         dev_user_flush_dcache(ucmd);
1270                 cmd->may_need_dma_sync = 1;
1271                 scst_set_resp_data_len(cmd, ereply->resp_data_len);
1272         } else if (cmd->resp_data_len != ereply->resp_data_len) {
1273                 if (ucmd->ubuff == 0)
1274                         cmd->resp_data_len = ereply->resp_data_len;
1275                 else
1276                         scst_set_resp_data_len(cmd, ereply->resp_data_len);
1277         }
1278
1279         cmd->status = ereply->status;
1280         if (ereply->sense_len != 0) {
1281                 res = scst_alloc_sense(cmd, 0);
1282                 if (res != 0)
1283                         goto out_compl;
1284                 res = copy_from_user(cmd->sense,
1285                         (void*)(unsigned long)ereply->psense_buffer,
1286                         min((unsigned int)SCST_SENSE_BUFFERSIZE,
1287                                 (unsigned int)ereply->sense_len));
1288                 if (res < 0) {
1289                         PRINT_ERROR("%s", "Unable to get sense data");
1290                         goto out_hwerr_res_set;
1291                 }
1292         }
1293
1294 out_compl:
1295         cmd->completed = 1;
1296         cmd->scst_cmd_done(cmd, SCST_CMD_STATE_DEFAULT);
1297         /* !! At this point cmd can be already freed !! */
1298
1299 out:
1300         TRACE_EXIT_RES(res);
1301         return res;
1302
1303 out_inval:
1304         PRINT_ERROR("%s", "Invalid exec_reply parameter(s)");
1305
1306 out_hwerr:
1307         res = -EINVAL;
1308
1309 out_hwerr_res_set:
1310         if (ucmd->background_exec) {
1311                 ucmd_put(ucmd);
1312                 goto out;
1313         } else {
1314                 scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
1315                 goto out_compl;
1316         }
1317
1318 out_busy:
1319         scst_set_busy(cmd);
1320         goto out_compl;
1321 }
1322
1323 static int dev_user_process_reply(struct scst_user_dev *dev,
1324         struct scst_user_reply_cmd *reply)
1325 {
1326         int res = 0;
1327         struct scst_user_cmd *ucmd;
1328         int state;
1329
1330         TRACE_ENTRY();
1331
1332         spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
1333
1334         ucmd = __ucmd_find_hash(dev, reply->cmd_h);
1335         if (ucmd == NULL) {
1336                 TRACE_MGMT_DBG("cmd_h %d not found", reply->cmd_h);
1337                 res = -ESRCH;
1338                 goto out_unlock;
1339         }
1340
1341         if (ucmd->background_exec) {
1342                 state = UCMD_STATE_EXECING;
1343                 goto unlock_process;
1344         }
1345
1346         if (unlikely(!(ucmd->state & UCMD_STATE_SENT_MASK))) {
1347                 if (ucmd->state & UCMD_STATE_JAMMED_MASK) {
1348                         TRACE_MGMT_DBG("Reply on jammed ucmd %p, ignoring",
1349                                 ucmd);
1350                 } else {
1351                         TRACE_MGMT_DBG("Ucmd %p isn't in the sent to user "
1352                                 "state %x", ucmd, ucmd->state);
1353                         res = -EBUSY;
1354                 }
1355                 goto out_unlock;
1356         }
1357
1358         if (unlikely(reply->subcode != ucmd->user_cmd.subcode))
1359                 goto out_wrong_state;
1360
1361         if (unlikely(_IOC_NR(reply->subcode) !=
1362                         (ucmd->state & ~UCMD_STATE_SENT_MASK)))
1363                 goto out_wrong_state;
1364
1365         ucmd->state &= ~UCMD_STATE_SENT_MASK;
1366         state = ucmd->state;
1367         ucmd->state |= UCMD_STATE_RECV_MASK;
1368
1369 unlock_process:
1370         spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1371
1372         switch(state) {
1373         case UCMD_STATE_PARSING:
1374                 res = dev_user_process_reply_parse(ucmd, reply);
1375                 break;
1376
1377         case UCMD_STATE_BUF_ALLOCING:
1378                 res = dev_user_process_reply_alloc(ucmd, reply);
1379                 break;
1380
1381         case UCMD_STATE_EXECING:
1382                 res = dev_user_process_reply_exec(ucmd, reply);
1383                 break;
1384
1385         case UCMD_STATE_ON_FREEING:
1386                 res = dev_user_process_reply_on_free(ucmd);
1387                 break;
1388
1389         case UCMD_STATE_ON_CACHE_FREEING:
1390                 res = dev_user_process_reply_on_cache_free(ucmd);
1391                 break;
1392
1393         case UCMD_STATE_TM_EXECING:
1394                 res = dev_user_process_reply_tm_exec(ucmd, reply->result);
1395                 break;
1396
1397         case UCMD_STATE_ATTACH_SESS:
1398         case UCMD_STATE_DETACH_SESS:
1399                 res = dev_user_process_reply_sess(ucmd, reply->result);
1400                 break;
1401
1402         default:
1403                 sBUG();
1404                 break;
1405         }
1406 out:
1407         TRACE_EXIT_RES(res);
1408         return res;
1409
1410 out_wrong_state:
1411         PRINT_ERROR("Command's %p subcode %x doesn't match internal "
1412                 "command's state %x or reply->subcode (%x) != ucmd->subcode "
1413                 "(%x)", ucmd, _IOC_NR(reply->subcode), ucmd->state,
1414                 reply->subcode, ucmd->user_cmd.subcode);
1415         res = -EINVAL;
1416         dev_user_unjam_cmd(ucmd, 0, NULL);
1417
1418 out_unlock:
1419         spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1420         goto out;
1421 }
1422
1423 static int dev_user_reply_cmd(struct file *file, unsigned long arg)
1424 {
1425         int res = 0;
1426         struct scst_user_dev *dev;
1427         struct scst_user_reply_cmd *reply;
1428
1429         TRACE_ENTRY();
1430
1431         mutex_lock(&dev_priv_mutex);
1432         dev = (struct scst_user_dev*)file->private_data;
1433         res = dev_user_check_reg(dev);
1434         if (res != 0) {
1435                 mutex_unlock(&dev_priv_mutex);
1436                 goto out;
1437         }
1438         down_read(&dev->dev_rwsem);
1439         mutex_unlock(&dev_priv_mutex);
1440
1441         reply = kzalloc(sizeof(*reply), GFP_KERNEL);
1442         if (reply == NULL) {
1443                 res = -ENOMEM;
1444                 goto out_up;
1445         }
1446
1447         res = copy_from_user(reply, (void*)arg, sizeof(*reply));
1448         if (res < 0)
1449                 goto out_free;
1450
1451         TRACE_BUFFER("Reply", reply, sizeof(*reply));
1452
1453         res = dev_user_process_reply(dev, reply);
1454         if (res < 0)
1455                 goto out_free;
1456
1457 out_free:
1458         kfree(reply);
1459
1460 out_up:
1461         up_read(&dev->dev_rwsem);
1462
1463 out:
1464         TRACE_EXIT_RES(res);
1465         return res;
1466 }
1467
1468 static int dev_user_process_scst_commands(struct scst_user_dev *dev)
1469 {
1470         int res = 0;
1471
1472         TRACE_ENTRY();
1473
1474         while (!list_empty(&dev->cmd_lists.active_cmd_list)) {
1475                 struct scst_cmd *cmd = list_entry(
1476                         dev->cmd_lists.active_cmd_list.next, typeof(*cmd),
1477                         cmd_list_entry);
1478                 TRACE_DBG("Deleting cmd %p from active cmd list", cmd);
1479                 list_del(&cmd->cmd_list_entry);
1480                 spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1481                 scst_process_active_cmd(cmd, SCST_CONTEXT_DIRECT |
1482                                                  SCST_CONTEXT_PROCESSABLE);
1483                 spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
1484                 res++;
1485         }
1486
1487         TRACE_EXIT_RES(res);
1488         return res;
1489 }
1490
1491 /* Called under cmd_lists.cmd_list_lock and IRQ off */
1492 struct scst_user_cmd *__dev_user_get_next_cmd(struct list_head *cmd_list)
1493 {
1494         struct scst_user_cmd *u;
1495
1496 again:
1497         u = NULL;
1498         if (!list_empty(cmd_list)) {
1499                 u = list_entry(cmd_list->next, typeof(*u), ready_cmd_list_entry);
1500
1501                 TRACE_DBG("Found ready ucmd %p", u);
1502                 list_del(&u->ready_cmd_list_entry);
1503
1504                 EXTRACHECKS_BUG_ON(u->state & UCMD_STATE_JAMMED_MASK);
1505
1506                 if (u->cmd != NULL) {
1507                         if (u->state == UCMD_STATE_EXECING) {
1508                                 struct scst_user_dev *dev = u->dev;
1509                                 int rc;
1510                                 spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1511                                 rc = scst_check_local_events(u->cmd);
1512                                 if (unlikely(rc != 0)) {
1513                                         u->cmd->scst_cmd_done(u->cmd,
1514                                                 SCST_CMD_STATE_DEFAULT);
1515                                         /*
1516                                          * !! At this point cmd & u can be !!
1517                                          * !! already freed                !!
1518                                          */
1519                                         spin_lock_irq(
1520                                                 &dev->cmd_lists.cmd_list_lock);
1521                                         goto again;
1522                                 }
1523                                 /*
1524                                  * There is no real need to lock again here, but
1525                                  * let's do it for simplicity.
1526                                  */
1527                                 spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
1528                         } else if (unlikely(test_bit(SCST_CMD_ABORTED,
1529                                         &u->cmd->cmd_flags))) {
1530                                 switch(u->state) {
1531                                 case UCMD_STATE_PARSING:
1532                                 case UCMD_STATE_BUF_ALLOCING:
1533                                         TRACE_MGMT_DBG("Aborting ucmd %p", u);
1534                                         dev_user_unjam_cmd(u, 0, NULL);
1535                                         goto again;
1536                                 case UCMD_STATE_EXECING:
1537                                         EXTRACHECKS_BUG_ON(1);
1538                                 }
1539                         }
1540                 }
1541                 u->state |= UCMD_STATE_SENT_MASK;
1542         }
1543         return u;
1544 }
1545
1546 static inline int test_cmd_lists(struct scst_user_dev *dev)
1547 {
1548         int res = !list_empty(&dev->cmd_lists.active_cmd_list) ||
1549                   !list_empty(&dev->ready_cmd_list) ||
1550                   !dev->blocking || dev->cleanup_done ||
1551                   signal_pending(current);
1552         return res;
1553 }
1554
1555 /* Called under cmd_lists.cmd_list_lock and IRQ off */
1556 static int dev_user_get_next_cmd(struct scst_user_dev *dev,
1557         struct scst_user_cmd **ucmd)
1558 {
1559         int res = 0;
1560         wait_queue_t wait;
1561
1562         TRACE_ENTRY();
1563
1564         init_waitqueue_entry(&wait, current);
1565
1566         while(1) {
1567                 if (!test_cmd_lists(dev)) {
1568                         add_wait_queue_exclusive(&dev->cmd_lists.cmd_list_waitQ,
1569                                 &wait);
1570                         for (;;) {
1571                                 set_current_state(TASK_INTERRUPTIBLE);
1572                                 if (test_cmd_lists(dev))
1573                                         break;
1574                                 spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1575                                 schedule();
1576                                 spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
1577                         }
1578                         set_current_state(TASK_RUNNING);
1579                         remove_wait_queue(&dev->cmd_lists.cmd_list_waitQ,
1580                                 &wait);
1581                 }
1582
1583                 dev_user_process_scst_commands(dev);
1584
1585                 *ucmd = __dev_user_get_next_cmd(&dev->ready_cmd_list);
1586                 if (*ucmd != NULL)
1587                         break;
1588
1589                 if (!dev->blocking || dev->cleanup_done) {
1590                         res = -EAGAIN;
1591                         TRACE_DBG("No ready commands, returning %d", res);
1592                         break;
1593                 }
1594
1595                 if (signal_pending(current)) {
1596                         res = -EINTR;
1597                         TRACE_DBG("Signal pending, returning %d", res);
1598                         break;
1599                 }
1600         }
1601
1602         TRACE_EXIT_RES(res);
1603         return res;
1604 }
1605
1606 static inline int test_prio_cmd_list(struct scst_user_dev *dev)
1607 {
1608         /*
1609          * Prio queue is always blocking, because poll() seems doesn't
1610          * support, when different threads wait with different events
1611          * mask. Only one thread is woken up on each event and if it
1612          * isn't interested in such events, another (interested) one
1613          * will not be woken up. Does't know if it's a bug or feature.
1614          */
1615         int res = !list_empty(&dev->prio_ready_cmd_list) ||
1616                   dev->cleaning || dev->cleanup_done ||
1617                   signal_pending(current);
1618         return res;
1619 }
1620
1621 /* Called under cmd_lists.cmd_list_lock and IRQ off */
1622 static int dev_user_get_next_prio_cmd(struct scst_user_dev *dev,
1623         struct scst_user_cmd **ucmd)
1624 {
1625         int res = 0;
1626         wait_queue_t wait;
1627
1628         TRACE_ENTRY();
1629
1630         init_waitqueue_entry(&wait, current);
1631
1632         while(1) {
1633                 if (!test_prio_cmd_list(dev)) {
1634                         add_wait_queue_exclusive(&dev->prio_cmd_list_waitQ,
1635                                 &wait);
1636                         for (;;) {
1637                                 set_current_state(TASK_INTERRUPTIBLE);
1638                                 if (test_prio_cmd_list(dev))
1639                                         break;
1640                                 spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1641                                 schedule();
1642                                 spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
1643                         }
1644                         set_current_state(TASK_RUNNING);
1645                         remove_wait_queue(&dev->prio_cmd_list_waitQ, &wait);
1646                 }
1647
1648                 *ucmd = __dev_user_get_next_cmd(&dev->prio_ready_cmd_list);
1649                 if (*ucmd != NULL)
1650                         break;
1651
1652                 if (dev->cleaning || dev->cleanup_done) {
1653                         res = -EAGAIN;
1654                         TRACE_DBG("No ready commands, returning %d", res);
1655                         break;
1656                 }
1657
1658                 if (signal_pending(current)) {
1659                         res = -EINTR;
1660                         TRACE_DBG("Signal pending, returning %d", res);
1661                         break;
1662                 }
1663         }
1664
1665         TRACE_EXIT_RES(res);
1666         return res;
1667 }
1668
1669 static int dev_user_reply_get_cmd(struct file *file, unsigned long arg,
1670         int prio)
1671 {
1672         int res = 0;
1673         struct scst_user_dev *dev;
1674         struct scst_user_get_cmd *cmd;
1675         struct scst_user_reply_cmd *reply;
1676         struct scst_user_cmd *ucmd;
1677         uint64_t ureply;
1678
1679         TRACE_ENTRY();
1680
1681         mutex_lock(&dev_priv_mutex);
1682         dev = (struct scst_user_dev*)file->private_data;
1683         res = dev_user_check_reg(dev);
1684         if (res != 0) {
1685                 mutex_unlock(&dev_priv_mutex);
1686                 goto out;
1687         }
1688         down_read(&dev->dev_rwsem);
1689         mutex_unlock(&dev_priv_mutex);
1690
1691         res = copy_from_user(&ureply, (void*)arg, sizeof(ureply));
1692         if (res < 0)
1693                 goto out_up;
1694
1695         TRACE_DBG("ureply %Ld", ureply);
1696
1697         cmd = kzalloc(max(sizeof(*cmd), sizeof(*reply)), GFP_KERNEL);
1698         if (cmd == NULL) {
1699                 res = -ENOMEM;
1700                 goto out_up;
1701         }
1702
1703         if (ureply != 0) {
1704                 unsigned long u = (unsigned long)ureply;
1705                 reply = (struct scst_user_reply_cmd*)cmd;
1706                 res = copy_from_user(reply, (void*)u, sizeof(*reply));
1707                 if (res < 0)
1708                         goto out_free;
1709
1710                 TRACE_BUFFER("Reply", reply, sizeof(*reply));
1711
1712                 res = dev_user_process_reply(dev, reply);
1713                 if (res < 0)
1714                         goto out_free;
1715         }
1716
1717         spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
1718         if (prio && (dev->prio_queue_type == SCST_USER_PRIO_QUEUE_SEPARATE))
1719                 res = dev_user_get_next_prio_cmd(dev, &ucmd);
1720         else
1721                 res = dev_user_get_next_cmd(dev, &ucmd);
1722         if (res == 0) {
1723                 *cmd = ucmd->user_cmd;
1724                 spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1725                 TRACE_BUFFER("UCMD", cmd, sizeof(*cmd));
1726                 res = copy_to_user((void*)arg, cmd, sizeof(*cmd));
1727         } else
1728                 spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1729
1730 out_free:
1731         kfree(cmd);
1732
1733 out_up:
1734         up_read(&dev->dev_rwsem);
1735
1736 out:
1737         TRACE_EXIT_RES(res);
1738         return res;
1739 }
1740
1741 static long dev_user_ioctl(struct file *file, unsigned int cmd,
1742         unsigned long arg)
1743 {
1744         long res;
1745
1746         TRACE_ENTRY();
1747
1748         switch (cmd) {
1749         case SCST_USER_REPLY_AND_GET_CMD:
1750                 TRACE_DBG("%s", "REPLY_AND_GET_CMD");
1751                 res = dev_user_reply_get_cmd(file, arg, 0);
1752                 break;
1753
1754         case SCST_USER_REPLY_CMD:
1755                 TRACE_DBG("%s", "REPLY_CMD");
1756                 res = dev_user_reply_cmd(file, arg);
1757                 break;
1758
1759         case SCST_USER_REPLY_AND_GET_PRIO_CMD:
1760                 TRACE_DBG("%s", "REPLY_AND_GET_PRIO_CMD");
1761                 res = dev_user_reply_get_cmd(file, arg, 1);
1762                 break;
1763
1764         case SCST_USER_REGISTER_DEVICE:
1765         {
1766                 struct scst_user_dev_desc *dev_desc;
1767                 TRACE_DBG("%s", "REGISTER_DEVICE");
1768                 dev_desc = kmalloc(sizeof(*dev_desc), GFP_KERNEL);
1769                 if (dev_desc == NULL) {
1770                         res = -ENOMEM;
1771                         goto out;
1772                 }
1773                 res = copy_from_user(dev_desc, (void*)arg, sizeof(*dev_desc));
1774                 if (res < 0) {
1775                         kfree(dev_desc);
1776                         goto out;
1777                 }
1778                 TRACE_BUFFER("dev_desc", dev_desc, sizeof(*dev_desc));
1779                 dev_desc->name[sizeof(dev_desc->name)-1] = '\0';
1780                 res = dev_user_register_dev(file, dev_desc);
1781                 kfree(dev_desc);
1782                 break;
1783         }
1784
1785         case SCST_USER_SET_OPTIONS:
1786         {
1787                 struct scst_user_opt opt;
1788                 TRACE_DBG("%s", "SET_OPTIONS");
1789                 res = copy_from_user(&opt, (void*)arg, sizeof(opt));
1790                 if (res < 0)
1791                         goto out;
1792                 TRACE_BUFFER("opt", &opt, sizeof(opt));
1793                 res = dev_user_set_opt(file, &opt);
1794                 break;
1795         }
1796
1797         case SCST_USER_GET_OPTIONS:
1798                 TRACE_DBG("%s", "GET_OPTIONS");
1799                 res = dev_user_get_opt(file, (void*)arg);
1800                 break;
1801
1802         default:
1803                 PRINT_ERROR("Invalid ioctl cmd %x", cmd);
1804                 res = -EINVAL;
1805                 goto out;
1806         }
1807
1808 out:
1809         TRACE_EXIT_RES(res);
1810         return res;
1811 }
1812
1813 static unsigned int dev_user_poll(struct file *file, poll_table *wait)
1814 {
1815         int res = 0;
1816         struct scst_user_dev *dev;
1817
1818         TRACE_ENTRY();
1819
1820         mutex_lock(&dev_priv_mutex);
1821         dev = (struct scst_user_dev*)file->private_data;
1822         res = dev_user_check_reg(dev);
1823         if (res != 0) {
1824                 mutex_unlock(&dev_priv_mutex);
1825                 goto out;
1826         }
1827         down_read(&dev->dev_rwsem);
1828         mutex_unlock(&dev_priv_mutex);
1829
1830         spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
1831
1832         if (!list_empty(&dev->ready_cmd_list) ||
1833             !list_empty(&dev->cmd_lists.active_cmd_list)) {
1834                 res |= POLLIN | POLLRDNORM;
1835                 goto out_unlock;
1836         }
1837
1838         spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1839
1840         TRACE_DBG("Before poll_wait() (dev %p)", dev);
1841         poll_wait(file, &dev->cmd_lists.cmd_list_waitQ, wait);
1842         TRACE_DBG("After poll_wait() (dev %p)", dev);
1843
1844         spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
1845
1846         if (!list_empty(&dev->ready_cmd_list) ||
1847             !list_empty(&dev->cmd_lists.active_cmd_list)) {
1848                 res |= POLLIN | POLLRDNORM;
1849                 goto out_unlock;
1850         }
1851
1852 out_unlock:
1853         spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1854
1855         up_read(&dev->dev_rwsem);
1856
1857 out:
1858         TRACE_EXIT_HRES(res);
1859         return res;
1860 }
1861
1862 /*
1863  * Called under cmd_lists.cmd_list_lock, but can drop it inside, then reaquire.
1864  */
1865 static void dev_user_unjam_cmd(struct scst_user_cmd *ucmd, int busy,
1866         unsigned long *flags)
1867 {
1868         int state = ucmd->state & ~UCMD_STATE_MASK;
1869         struct scst_user_dev *dev = ucmd->dev;
1870
1871         TRACE_ENTRY();
1872
1873         if (ucmd->state & UCMD_STATE_JAMMED_MASK)
1874                 goto out;
1875
1876         TRACE_MGMT_DBG("Unjamming ucmd %p (busy %d, state %x)", ucmd, busy,
1877                 ucmd->state);
1878
1879         ucmd->state = state | UCMD_STATE_JAMMED_MASK;
1880
1881         switch(state) {
1882         case UCMD_STATE_PARSING:
1883         case UCMD_STATE_BUF_ALLOCING:
1884                 if (test_bit(SCST_CMD_ABORTED, &ucmd->cmd->cmd_flags))
1885                         ucmd->aborted = 1;
1886                 else {
1887                         if (busy)
1888                                 scst_set_busy(ucmd->cmd);
1889                         else
1890                                 scst_set_cmd_error(ucmd->cmd,
1891                                         SCST_LOAD_SENSE(scst_sense_hardw_error));
1892                 }
1893                 TRACE_MGMT_DBG("Adding ucmd %p to active list", ucmd);
1894                 list_add(&ucmd->cmd->cmd_list_entry,
1895                         &ucmd->cmd->cmd_lists->active_cmd_list);
1896                 wake_up(&ucmd->dev->cmd_lists.cmd_list_waitQ);
1897                 break;
1898
1899         case UCMD_STATE_EXECING:
1900                 if (flags != NULL)
1901                         spin_unlock_irqrestore(&dev->cmd_lists.cmd_list_lock, *flags);
1902                 else
1903                         spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1904
1905                 TRACE_MGMT_DBG("EXEC: unjamming ucmd %p", ucmd);
1906
1907                 if (test_bit(SCST_CMD_ABORTED,  &ucmd->cmd->cmd_flags))
1908                         ucmd->aborted = 1;
1909                 else {
1910                         if (busy)
1911                                 scst_set_busy(ucmd->cmd);
1912                         else
1913                                 scst_set_cmd_error(ucmd->cmd,
1914                                         SCST_LOAD_SENSE(scst_sense_hardw_error));
1915                 }
1916
1917                 ucmd->cmd->scst_cmd_done(ucmd->cmd, SCST_CMD_STATE_DEFAULT);
1918                 /* !! At this point cmd ans ucmd can be already freed !! */
1919
1920                 if (flags != NULL)
1921                         spin_lock_irqsave(&dev->cmd_lists.cmd_list_lock, *flags);
1922                 else
1923                         spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
1924                 break;
1925
1926         case UCMD_STATE_ON_FREEING:
1927         case UCMD_STATE_ON_CACHE_FREEING:
1928         case UCMD_STATE_TM_EXECING:
1929         case UCMD_STATE_ATTACH_SESS:
1930         case UCMD_STATE_DETACH_SESS:
1931         {
1932                 if (flags != NULL)
1933                         spin_unlock_irqrestore(&dev->cmd_lists.cmd_list_lock, *flags);
1934                 else
1935                         spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1936
1937                 switch(state) {
1938                 case UCMD_STATE_ON_FREEING:
1939                         dev_user_process_reply_on_free(ucmd);
1940                         break;
1941
1942                 case UCMD_STATE_ON_CACHE_FREEING:
1943                         dev_user_process_reply_on_cache_free(ucmd);
1944                         break;
1945
1946                 case UCMD_STATE_TM_EXECING:
1947                         dev_user_process_reply_tm_exec(ucmd, SCST_MGMT_STATUS_FAILED);
1948                         break;
1949
1950                 case UCMD_STATE_ATTACH_SESS:
1951                 case UCMD_STATE_DETACH_SESS:
1952                         dev_user_process_reply_sess(ucmd, -EFAULT);
1953                         break;
1954                 }
1955
1956                 if (flags != NULL)
1957                         spin_lock_irqsave(&dev->cmd_lists.cmd_list_lock, *flags);
1958                 else
1959                         spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
1960                 break;
1961         }
1962
1963         default:
1964                 PRINT_CRIT_ERROR("Wrong ucmd state %x", state);
1965                 sBUG();
1966                 break;
1967         }
1968
1969 out:
1970         TRACE_EXIT();
1971         return;
1972 }
1973
1974 static int __unjam_check_tgt_dev(struct scst_user_cmd *ucmd, int state,
1975         struct scst_tgt_dev *tgt_dev)
1976 {
1977         int res = 0;
1978
1979         if (ucmd->cmd == NULL)
1980                 goto out;
1981
1982         if (ucmd->cmd->tgt_dev != tgt_dev)
1983                 goto out;
1984
1985         switch(state & ~UCMD_STATE_MASK) {
1986         case UCMD_STATE_PARSING:
1987         case UCMD_STATE_BUF_ALLOCING:
1988         case UCMD_STATE_EXECING:
1989                 break;
1990         default:
1991                 goto out;
1992         }
1993
1994         res = 1;
1995 out:
1996         return res;
1997 }
1998
1999 static int __unjam_check_tm(struct scst_user_cmd *ucmd, int state)
2000 {
2001         int res = 0;
2002
2003         switch(state & ~UCMD_STATE_MASK) {
2004         case UCMD_STATE_PARSING:
2005         case UCMD_STATE_BUF_ALLOCING:
2006         case UCMD_STATE_EXECING:
2007                 if ((ucmd->cmd != NULL) &&
2008                     (!test_bit(SCST_CMD_ABORTED,
2009                                 &ucmd->cmd->cmd_flags)))
2010                         goto out;
2011                 break;
2012         default:
2013                 goto out;
2014         }
2015
2016         res = 1;
2017 out:
2018         return res;
2019 }
2020
2021 static void dev_user_unjam_dev(struct scst_user_dev *dev, int tm,
2022         struct scst_tgt_dev *tgt_dev)
2023 {
2024         int i;
2025         unsigned long flags;
2026         struct scst_user_cmd *ucmd;
2027
2028         TRACE_ENTRY();
2029
2030         TRACE_MGMT_DBG("Unjamming dev %p", dev);
2031
2032         spin_lock_irqsave(&dev->cmd_lists.cmd_list_lock, flags);
2033
2034 repeat:
2035         for(i = 0; i < (int)ARRAY_SIZE(dev->ucmd_hash); i++) {
2036                 struct list_head *head = &dev->ucmd_hash[i];
2037                 list_for_each_entry(ucmd, head, hash_list_entry) {
2038                         TRACE_DBG("ALL: ucmd %p, state %x, scst_cmd %p",
2039                                 ucmd, ucmd->state, ucmd->cmd);
2040                         if (ucmd->state & UCMD_STATE_SENT_MASK) {
2041                                 int st = ucmd->state & ~UCMD_STATE_SENT_MASK;
2042                                 if (tgt_dev != NULL) {
2043                                         if (__unjam_check_tgt_dev(ucmd, st,
2044                                                         tgt_dev) == 0)
2045                                                 continue;
2046                                 } else if (tm) {
2047                                         if (__unjam_check_tm(ucmd, st) == 0)
2048                                                 continue;
2049                                 }
2050                                 dev_user_unjam_cmd(ucmd, 0, &flags);
2051                                 goto repeat;
2052                         }
2053                 }
2054         }
2055
2056         if ((tgt_dev != NULL) || tm) {
2057                 list_for_each_entry(ucmd, &dev->ready_cmd_list,
2058                                 ready_cmd_list_entry) {
2059                         TRACE_DBG("READY: ucmd %p, state %x, scst_cmd %p",
2060                                 ucmd, ucmd->state, ucmd->cmd);
2061                         if (tgt_dev != NULL) {
2062                                 if (__unjam_check_tgt_dev(ucmd, ucmd->state,
2063                                                 tgt_dev) == 0)
2064                                         continue;
2065                         } else if (tm) {
2066                                 if (__unjam_check_tm(ucmd, ucmd->state) == 0)
2067                                         continue;
2068                         }
2069                         list_del(&ucmd->ready_cmd_list_entry);
2070                         dev_user_unjam_cmd(ucmd, 0, &flags);
2071                         goto repeat;
2072                 }
2073         }
2074
2075         if (dev_user_process_scst_commands(dev) != 0)
2076                 goto repeat;
2077
2078         spin_unlock_irqrestore(&dev->cmd_lists.cmd_list_lock, flags);
2079
2080         TRACE_EXIT();
2081         return;
2082 }
2083
2084 /**
2085  ** In order to deal with user space handler hangups we rely on remote
2086  ** initiators, which in case if a command doesn't respond for too long
2087  ** supposed to issue a task management command, so on that event we can
2088  ** "unjam" the command. In order to prevent TM command from stalling, we
2089  ** use a timer. In order to prevent too many queued TM commands, we
2090  ** enqueue only 2 of them, the first one with the requested TM function,
2091  ** the second - with TARGET_RESET as the most comprehensive function.
2092  **
2093  ** The only exception here is DETACH_SESS subcode, where there are no TM
2094  ** commands could be expected, so we need manually after a timeout "unjam"
2095  ** all the commands on the device.
2096  **
2097  ** We also don't queue >1 ATTACH_SESS commands and after timeout fail it.
2098  **/
2099
2100 static int dev_user_process_reply_tm_exec(struct scst_user_cmd *ucmd,
2101         int status)
2102 {
2103         int res = 0;
2104         unsigned long flags;
2105
2106         TRACE_ENTRY();
2107
2108         TRACE_MGMT_DBG("TM reply (ucmd %p, fn %d, status %d)", ucmd,
2109                 ucmd->user_cmd.tm_cmd.fn, status);
2110
2111         ucmd->result = status;
2112
2113         spin_lock_irqsave(&ucmd->dev->cmd_lists.cmd_list_lock, flags);
2114
2115         if (ucmd->internal_reset_tm) {
2116                 TRACE_MGMT_DBG("Internal TM ucmd %p finished", ucmd);
2117                 ucmd->dev->internal_reset_active = 0;
2118         } else {
2119                 TRACE_MGMT_DBG("TM ucmd %p finished", ucmd);
2120                 ucmd->dev->tm_cmd_active = 0;
2121         }
2122
2123         if (ucmd->cmpl != NULL)
2124                 complete_all(ucmd->cmpl);
2125
2126         spin_unlock_irqrestore(&ucmd->dev->cmd_lists.cmd_list_lock, flags);
2127
2128         ucmd_put(ucmd);
2129
2130         TRACE_EXIT_RES(res);
2131         return res;
2132 }
2133
2134 static int dev_user_task_mgmt_fn(struct scst_mgmt_cmd *mcmd,
2135         struct scst_tgt_dev *tgt_dev)
2136 {
2137         int res, rc;
2138         struct scst_user_cmd *ucmd;
2139         struct scst_user_dev *dev = (struct scst_user_dev*)tgt_dev->dev->dh_priv;
2140         struct scst_user_cmd *ucmd_to_abort = NULL;
2141
2142         TRACE_ENTRY();
2143
2144         /* We can't afford missing TM command due to memory shortage */
2145         ucmd = dev_user_alloc_ucmd(dev, GFP_KERNEL|__GFP_NOFAIL);
2146         ucmd->cmpl = kmalloc(sizeof(*ucmd->cmpl), GFP_KERNEL|__GFP_NOFAIL);
2147
2148         init_completion(ucmd->cmpl);
2149
2150         ucmd->user_cmd.cmd_h = ucmd->h;
2151         ucmd->user_cmd.subcode = SCST_USER_TASK_MGMT;
2152         ucmd->user_cmd.tm_cmd.sess_h = (unsigned long)tgt_dev;
2153         ucmd->user_cmd.tm_cmd.fn = mcmd->fn;
2154         ucmd->user_cmd.tm_cmd.cmd_sn = mcmd->cmd_sn;
2155         ucmd->user_cmd.tm_cmd.cmd_sn_set = mcmd->cmd_sn_set;
2156
2157         if (mcmd->cmd_to_abort != NULL) {
2158                 ucmd_to_abort = (struct scst_user_cmd*)mcmd->cmd_to_abort->dh_priv;
2159                 if (ucmd_to_abort != NULL)
2160                         ucmd->user_cmd.tm_cmd.cmd_h_to_abort = ucmd_to_abort->h;
2161         }
2162
2163         TRACE_MGMT_DBG("Preparing TM ucmd %p (h %d, fn %d, cmd_to_abort %p, "
2164                 "ucmd_to_abort %p, cmd_h_to_abort %d)", ucmd, ucmd->h,
2165                 mcmd->fn, mcmd->cmd_to_abort, ucmd_to_abort,
2166                 ucmd->user_cmd.tm_cmd.cmd_h_to_abort);
2167
2168         ucmd->state = UCMD_STATE_TM_EXECING;
2169
2170         spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
2171         if (dev->internal_reset_active) {
2172                 PRINT_ERROR("Loosing TM cmd %d, because there are other "
2173                         "unprocessed TM commands", mcmd->fn);
2174                 res = SCST_MGMT_STATUS_FAILED;
2175                 goto out_locked_free;
2176         } else if (dev->tm_cmd_active) {
2177                 /*
2178                  * We are going to miss some TM commands, so replace it
2179                  * by the hardest one.
2180                  */
2181                 PRINT_ERROR("Replacing TM cmd %d by TARGET_RESET, because "
2182                         "there is another unprocessed TM command", mcmd->fn);
2183                 ucmd->user_cmd.tm_cmd.fn = SCST_TARGET_RESET;
2184                 ucmd->internal_reset_tm = 1;
2185                 dev->internal_reset_active = 1;
2186         } else
2187                 dev->tm_cmd_active = 1;
2188         spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
2189
2190         ucmd_get(ucmd, 0);
2191         dev_user_add_to_ready(ucmd);
2192
2193         /*
2194          * Since the user space handler should not wait for affecting tasks to
2195          * complete it shall complete the TM request ASAP, otherwise the device
2196          * will be considered stalled.
2197          */
2198         rc = wait_for_completion_timeout(ucmd->cmpl, DEV_USER_TM_TIMEOUT);
2199         if (rc > 0)
2200                 res = ucmd->result;
2201         else {
2202                 PRINT_ERROR("Task management command %p timeout", ucmd);
2203                 res = SCST_MGMT_STATUS_FAILED;
2204         }
2205
2206         sBUG_ON(irqs_disabled());
2207
2208         spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
2209
2210 out_locked_free:
2211         kfree(ucmd->cmpl);
2212         ucmd->cmpl = NULL;
2213         spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
2214
2215         dev_user_unjam_dev(ucmd->dev, 1, NULL);
2216
2217         ucmd_put(ucmd);
2218
2219         TRACE_EXIT();
2220         return res;
2221 }
2222
2223 static int dev_user_attach(struct scst_device *sdev)
2224 {
2225         int res = 0;
2226         struct scst_user_dev *dev = NULL, *d;
2227
2228         TRACE_ENTRY();
2229
2230         spin_lock(&dev_list_lock);
2231         list_for_each_entry(d, &dev_list, dev_list_entry) {
2232                 if (strcmp(d->name, sdev->virt_name) == 0) {
2233                         dev = d;
2234                         break;
2235                 }
2236         }
2237         spin_unlock(&dev_list_lock);
2238         if (dev == NULL) {
2239                 PRINT_ERROR("Device %s not found", sdev->virt_name);
2240                 res = -EINVAL;
2241                 goto out;
2242         }
2243
2244         sdev->p_cmd_lists = &dev->cmd_lists;
2245         sdev->dh_priv = dev;
2246         sdev->tst = dev->tst;
2247         sdev->queue_alg = dev->queue_alg;
2248         sdev->swp = dev->swp;
2249         sdev->tas = dev->tas;
2250         sdev->has_own_order_mgmt = dev->has_own_order_mgmt;
2251
2252         dev->sdev = sdev;
2253
2254         PRINT_INFO("Attached user space SCSI target virtual device \"%s\"",
2255                 dev->name);
2256
2257 out:
2258         TRACE_EXIT();
2259         return res;
2260 }
2261
2262 static void dev_user_detach(struct scst_device *sdev)
2263 {
2264         struct scst_user_dev *dev = (struct scst_user_dev*)sdev->dh_priv;
2265
2266         TRACE_ENTRY();
2267
2268         TRACE_DBG("virt_id %d", sdev->virt_id);
2269
2270         PRINT_INFO("Detached user space SCSI target virtual device \"%s\"",
2271                 dev->name);
2272
2273         /* dev will be freed by the caller */
2274         sdev->dh_priv = NULL;
2275         dev->sdev = NULL;
2276
2277         TRACE_EXIT();
2278         return;
2279 }
2280
2281 static int dev_user_process_reply_sess(struct scst_user_cmd *ucmd, int status)
2282 {
2283         int res = 0;
2284         unsigned long flags;
2285
2286         TRACE_ENTRY();
2287
2288         TRACE_MGMT_DBG("ucmd %p, cmpl %p, status %d", ucmd, ucmd->cmpl, status);
2289
2290         spin_lock_irqsave(&ucmd->dev->cmd_lists.cmd_list_lock, flags);
2291
2292         if ((ucmd->state & ~UCMD_STATE_MASK) ==
2293                         UCMD_STATE_ATTACH_SESS) {
2294                 TRACE_MGMT_DBG("%s", "ATTACH_SESS finished");
2295                 ucmd->result = status;
2296                 ucmd->dev->attach_cmd_active = 0;
2297         } else if ((ucmd->state & ~UCMD_STATE_MASK) ==
2298                         UCMD_STATE_DETACH_SESS) {
2299                 TRACE_MGMT_DBG("%s", "DETACH_SESS finished");
2300                 ucmd->dev->detach_cmd_count--;
2301         } else
2302                 sBUG();
2303
2304         if (ucmd->cmpl != NULL)
2305                 complete_all(ucmd->cmpl);
2306
2307         spin_unlock_irqrestore(&ucmd->dev->cmd_lists.cmd_list_lock, flags);
2308
2309         ucmd_put(ucmd);
2310
2311         TRACE_EXIT_RES(res);
2312         return res;
2313 }
2314
2315 static int dev_user_attach_tgt(struct scst_tgt_dev *tgt_dev)
2316 {
2317         struct scst_user_dev *dev =
2318                 (struct scst_user_dev*)tgt_dev->dev->dh_priv;
2319         int res = 0, rc;
2320         struct scst_user_cmd *ucmd;
2321
2322         TRACE_ENTRY();
2323
2324         ucmd = dev_user_alloc_ucmd(dev, GFP_KERNEL);
2325         if (ucmd == NULL)
2326                 goto out_nomem;
2327
2328         ucmd->cmpl = kmalloc(sizeof(*ucmd->cmpl), GFP_KERNEL);
2329         if (ucmd->cmpl == NULL)
2330                 goto out_put_nomem;
2331
2332         init_completion(ucmd->cmpl);
2333
2334         ucmd->user_cmd.cmd_h = ucmd->h;
2335         ucmd->user_cmd.subcode = SCST_USER_ATTACH_SESS;
2336         ucmd->user_cmd.sess.sess_h = (unsigned long)tgt_dev;
2337         ucmd->user_cmd.sess.lun = (uint64_t)tgt_dev->lun;
2338         ucmd->user_cmd.sess.threads_num = tgt_dev->sess->tgt->tgtt->threads_num;
2339         ucmd->user_cmd.sess.rd_only = tgt_dev->acg_dev->rd_only_flag;
2340         strncpy(ucmd->user_cmd.sess.initiator_name,
2341                 tgt_dev->sess->initiator_name,
2342                 sizeof(ucmd->user_cmd.sess.initiator_name)-1);
2343         ucmd->user_cmd.sess.initiator_name[
2344                 sizeof(ucmd->user_cmd.sess.initiator_name)-1] = '\0';
2345
2346         TRACE_MGMT_DBG("Preparing ATTACH_SESS %p (h %d, sess_h %Lx, LUN %Lx, "
2347                 "threads_num %d, rd_only_flag %d, initiator %s)", ucmd, ucmd->h,
2348                 ucmd->user_cmd.sess.sess_h, ucmd->user_cmd.sess.lun,
2349                 ucmd->user_cmd.sess.threads_num, ucmd->user_cmd.sess.rd_only,
2350                 ucmd->user_cmd.sess.initiator_name);
2351
2352         ucmd->state = UCMD_STATE_ATTACH_SESS;
2353
2354         spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
2355         if (dev->attach_cmd_active) {
2356                 PRINT_ERROR("%s", "ATTACH_SESS command failed, because "
2357                         "there is another unprocessed ATTACH_SESS command");
2358                 res = -EBUSY;
2359                 goto out_locked_free;
2360         }
2361         dev->attach_cmd_active = 1;
2362         spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
2363
2364         ucmd_get(ucmd, 0);
2365         dev_user_add_to_ready(ucmd);
2366
2367         rc = wait_for_completion_timeout(ucmd->cmpl, DEV_USER_ATTACH_TIMEOUT);
2368         if (rc > 0)
2369                 res = ucmd->result;
2370         else {
2371                 PRINT_ERROR("%s", "ATTACH_SESS command timeout");
2372                 res = -EFAULT;
2373         }
2374
2375         sBUG_ON(irqs_disabled());
2376
2377         spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
2378 out_locked_free:
2379         kfree(ucmd->cmpl);
2380         ucmd->cmpl = NULL;
2381         spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
2382
2383         ucmd_put(ucmd);
2384
2385 out:
2386         TRACE_EXIT_RES(res);
2387         return res;
2388
2389 out_put_nomem:
2390         ucmd_put(ucmd);
2391
2392 out_nomem:
2393         res = -ENOMEM;
2394         goto out;
2395 }
2396
2397 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
2398 static void dev_user_pre_unreg_sess_work_fn(void *p)
2399 #else
2400 static void dev_user_pre_unreg_sess_work_fn(struct work_struct *work)
2401 #endif
2402 {
2403 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
2404         struct scst_user_pre_unreg_sess_obj *pd = (struct scst_user_pre_unreg_sess_obj*)p;
2405 #else
2406         struct scst_user_pre_unreg_sess_obj *pd = container_of(
2407                 (struct delayed_work*)work, struct scst_user_pre_unreg_sess_obj,
2408                 pre_unreg_sess_work);
2409 #endif
2410         struct scst_user_dev *dev =
2411                 (struct scst_user_dev*)pd->tgt_dev->dev->dh_priv;
2412
2413         TRACE_ENTRY();
2414
2415         TRACE_MGMT_DBG("Unreg sess: unjaming dev %p (tgt_dev %p)", dev,
2416                 pd->tgt_dev);
2417
2418         pd->active = 1;
2419
2420         dev_user_unjam_dev(dev, 0, pd->tgt_dev);
2421
2422         if (!pd->exit) {
2423                 TRACE_MGMT_DBG("Rescheduling pre_unreg_sess work %p (dev %p, "
2424                         "tgt_dev %p)", pd, dev, pd->tgt_dev);
2425                 schedule_delayed_work(&pd->pre_unreg_sess_work,
2426                         DEV_USER_PRE_UNREG_POLL_TIME);
2427         }
2428
2429         TRACE_EXIT();
2430         return;
2431 }
2432
2433 static void dev_user_pre_unreg_sess(struct scst_tgt_dev *tgt_dev)
2434 {
2435         struct scst_user_dev *dev =
2436                 (struct scst_user_dev*)tgt_dev->dev->dh_priv;
2437         struct scst_user_pre_unreg_sess_obj *pd;
2438
2439         TRACE_ENTRY();
2440
2441         /* We can't afford missing DETACH command due to memory shortage */
2442         pd = kzalloc(sizeof(*pd), GFP_KERNEL|__GFP_NOFAIL);
2443
2444         pd->tgt_dev = tgt_dev;
2445 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
2446         INIT_WORK(&pd->pre_unreg_sess_work, dev_user_pre_unreg_sess_work_fn, pd);
2447 #else
2448         INIT_DELAYED_WORK(&pd->pre_unreg_sess_work, dev_user_pre_unreg_sess_work_fn);
2449 #endif
2450
2451         spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
2452         dev->pre_unreg_sess_active = 1;
2453         list_add_tail(&pd->pre_unreg_sess_list_entry, &dev->pre_unreg_sess_list);
2454         spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
2455
2456         TRACE_MGMT_DBG("Scheduling pre_unreg_sess work %p (dev %p, tgt_dev %p)",
2457                 pd, dev, pd->tgt_dev);
2458
2459         schedule_delayed_work(&pd->pre_unreg_sess_work, DEV_USER_DETACH_TIMEOUT);
2460
2461         TRACE_EXIT();
2462         return;
2463 }
2464
2465 static void dev_user_detach_tgt(struct scst_tgt_dev *tgt_dev)
2466 {
2467         struct scst_user_dev *dev =
2468                 (struct scst_user_dev*)tgt_dev->dev->dh_priv;
2469         struct scst_user_cmd *ucmd;
2470         struct scst_user_pre_unreg_sess_obj *pd = NULL, *p;
2471
2472         TRACE_ENTRY();
2473
2474         spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
2475         list_for_each_entry(p, &dev->pre_unreg_sess_list,
2476                         pre_unreg_sess_list_entry) {
2477                 if (p->tgt_dev == tgt_dev) {
2478                         list_del(&p->pre_unreg_sess_list_entry);
2479                         if (list_empty(&dev->pre_unreg_sess_list))
2480                                 dev->pre_unreg_sess_active = 0;
2481                         pd = p;
2482                         break;
2483                 }
2484         }
2485         spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
2486
2487         if (pd != NULL) {
2488                 pd->exit = 1;
2489                 TRACE_MGMT_DBG("Canceling pre unreg work %p", pd);
2490                 cancel_delayed_work(&pd->pre_unreg_sess_work);
2491                 flush_scheduled_work();
2492                 kfree(pd);
2493         }
2494
2495         ucmd = dev_user_alloc_ucmd(dev, GFP_KERNEL);
2496         if (ucmd == NULL)
2497                 goto out;
2498
2499         TRACE_MGMT_DBG("Preparing DETACH_SESS %p (h %d, sess_h %Lx)", ucmd,
2500                 ucmd->h, ucmd->user_cmd.sess.sess_h);
2501
2502         ucmd->user_cmd.cmd_h = ucmd->h;
2503         ucmd->user_cmd.subcode = SCST_USER_DETACH_SESS;
2504         ucmd->user_cmd.sess.sess_h = (unsigned long)tgt_dev;
2505
2506         spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
2507         dev->detach_cmd_count++;
2508         spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
2509
2510         ucmd->state = UCMD_STATE_DETACH_SESS;
2511
2512         dev_user_add_to_ready(ucmd);
2513
2514 out:
2515         TRACE_EXIT();
2516         return;
2517 }
2518
2519 /* No locks are needed, but the activity must be suspended */
2520 static void dev_user_setup_functions(struct scst_user_dev *dev)
2521 {
2522         TRACE_ENTRY();
2523
2524         dev->devtype.parse = dev_user_parse;
2525         dev->devtype.dev_done = NULL;
2526
2527         if (dev->parse_type != SCST_USER_PARSE_CALL) {
2528                 switch(dev->devtype.type) {
2529                 case TYPE_DISK:
2530                         dev->generic_parse = scst_sbc_generic_parse;
2531                         dev->devtype.dev_done = dev_user_disk_done;
2532                         break;
2533
2534                 case TYPE_TAPE:
2535                         dev->generic_parse = scst_tape_generic_parse;
2536                         dev->devtype.dev_done = dev_user_tape_done;
2537                         break;
2538
2539                 case TYPE_MOD:
2540                         dev->generic_parse = scst_modisk_generic_parse;
2541                         dev->devtype.dev_done = dev_user_disk_done;
2542                         break;
2543
2544                 case TYPE_ROM:
2545                         dev->generic_parse = scst_cdrom_generic_parse;
2546                         dev->devtype.dev_done = dev_user_disk_done;
2547                         break;
2548
2549                 case TYPE_MEDIUM_CHANGER:
2550                         dev->generic_parse = scst_changer_generic_parse;
2551                         break;
2552
2553                 case TYPE_PROCESSOR:
2554                         dev->generic_parse = scst_processor_generic_parse;
2555                         break;
2556
2557                 case TYPE_RAID:
2558                         dev->generic_parse = scst_raid_generic_parse;
2559                         break;
2560
2561                 default:
2562                         PRINT_INFO("Unknown SCSI type %x, using PARSE_CALL "
2563                                 "for it", dev->devtype.type);
2564                         dev->parse_type = SCST_USER_PARSE_CALL;
2565                         break;
2566                 }
2567         } else {
2568                 dev->generic_parse = NULL;
2569                 dev->devtype.dev_done = NULL;
2570         }
2571
2572         TRACE_EXIT();
2573         return;
2574 }
2575
2576 static int dev_user_check_version(const struct scst_user_dev_desc *dev_desc)
2577 {
2578         char ver[sizeof(DEV_USER_VERSION)+1];
2579         int res;
2580
2581         res = copy_from_user(ver, (void*)(unsigned long)dev_desc->version_str,
2582                                 sizeof(ver));
2583         if (res < 0) {
2584                 PRINT_ERROR("%s", "Unable to get version string");
2585                 goto out;
2586         }
2587         ver[sizeof(ver)-1] = '\0';
2588
2589         if (strcmp(ver, DEV_USER_VERSION) != 0) {
2590                 /* ->name already 0-terminated in dev_user_ioctl() */
2591                 PRINT_ERROR("Incorrect version of user device %s (%s)",
2592                         dev_desc->name, ver);
2593                 res = -EINVAL;
2594                 goto out;
2595         }
2596
2597 out:
2598         return res;
2599 }
2600
2601 static int dev_user_register_dev(struct file *file,
2602         const struct scst_user_dev_desc *dev_desc)
2603 {
2604         int res = -ENOMEM, i;
2605         struct scst_user_dev *dev, *d;
2606         int block;
2607
2608         TRACE_ENTRY();
2609
2610         res = dev_user_check_version(dev_desc);
2611         if (res != 0)
2612                 goto out;
2613
2614         switch(dev_desc->type) {
2615         case TYPE_DISK:
2616         case TYPE_ROM:
2617         case TYPE_MOD:
2618                 if (dev_desc->block_size == 0) {
2619                         PRINT_ERROR("Wrong block size %d", dev_desc->block_size);
2620                         res = -EINVAL;
2621                         goto out;
2622                 }
2623                 block = scst_calc_block_shift(dev_desc->block_size);
2624                 if (block == -1) {
2625                         res = -EINVAL;
2626                         goto out;
2627                 }
2628                 break;
2629         default:
2630                 block = dev_desc->block_size;
2631                 break;
2632         }
2633
2634         if (!try_module_get(THIS_MODULE)) {
2635                 PRINT_ERROR("%s", "Fail to get module");
2636                 goto out;
2637         }
2638
2639         dev = kzalloc(sizeof(*dev), GFP_KERNEL);
2640         if (dev == NULL)
2641                 goto out_put;
2642
2643         init_rwsem(&dev->dev_rwsem);
2644         spin_lock_init(&dev->cmd_lists.cmd_list_lock);
2645         INIT_LIST_HEAD(&dev->cmd_lists.active_cmd_list);
2646         init_waitqueue_head(&dev->cmd_lists.cmd_list_waitQ);
2647         INIT_LIST_HEAD(&dev->ready_cmd_list);
2648         INIT_LIST_HEAD(&dev->prio_ready_cmd_list);
2649         init_waitqueue_head(&dev->prio_cmd_list_waitQ);
2650         if (file->f_flags & O_NONBLOCK) {
2651                 TRACE_DBG("%s", "Non-blocking operations");
2652                 dev->blocking = 0;
2653         } else
2654                 dev->blocking = 1;
2655         for (i = 0; i < (int)ARRAY_SIZE(dev->ucmd_hash); i++)
2656                 INIT_LIST_HEAD(&dev->ucmd_hash[i]);
2657         INIT_LIST_HEAD(&dev->pre_unreg_sess_list);
2658
2659         strncpy(dev->name, dev_desc->name, sizeof(dev->name)-1);
2660         dev->name[sizeof(dev->name)-1] = '\0';
2661
2662         /*
2663          * We don't use clustered pool, since it implies pages reordering,
2664          * which isn't possible with user space supplied buffers. Although
2665          * it's still possible to cluster pages by the tail of each other,
2666          * seems it doesn't worth the effort.
2667          */
2668         dev->pool = sgv_pool_create(dev->name, 0);
2669         if (dev->pool == NULL)
2670                 goto out_put;
2671         sgv_pool_set_allocator(dev->pool, dev_user_alloc_pages,
2672                 dev_user_free_sg_entries);
2673
2674         scnprintf(dev->devtype.name, sizeof(dev->devtype.name), "dh-%s",
2675                 dev->name);
2676         dev->devtype.type = dev_desc->type;
2677         dev->devtype.threads_num = -1;
2678         dev->devtype.parse_atomic = 1;
2679         dev->devtype.exec_atomic = 0; /* no point to make it 1 */
2680         dev->devtype.dev_done_atomic = 1;
2681         dev->devtype.no_proc = 1;
2682         dev->devtype.attach = dev_user_attach;
2683         dev->devtype.detach = dev_user_detach;
2684         dev->devtype.attach_tgt = dev_user_attach_tgt;
2685         dev->devtype.pre_unreg_sess = dev_user_pre_unreg_sess;
2686         dev->devtype.detach_tgt = dev_user_detach_tgt;
2687         dev->devtype.exec = dev_user_exec;
2688         dev->devtype.on_free_cmd = dev_user_on_free_cmd;
2689         dev->devtype.task_mgmt_fn = dev_user_task_mgmt_fn;
2690
2691         init_completion(&dev->cleanup_cmpl);
2692         dev->block = block;
2693         dev->def_block = dev->block;
2694
2695         res = __dev_user_set_opt(dev, &dev_desc->opt);
2696
2697         TRACE_MEM("dev %p, name %s", dev, dev->name);
2698
2699         spin_lock(&dev_list_lock);
2700
2701         list_for_each_entry(d, &dev_list, dev_list_entry) {
2702                 if (strcmp(d->name, dev->name) == 0) {
2703                         PRINT_ERROR("Device %s already exist",
2704                                 dev->name);
2705                         res = -EEXIST;
2706                         spin_unlock(&dev_list_lock);
2707                         goto out_free;
2708                 }
2709         }
2710
2711         list_add_tail(&dev->dev_list_entry, &dev_list);
2712
2713         spin_unlock(&dev_list_lock);
2714
2715         if (res != 0)
2716                 goto out_del_free;
2717
2718         res = scst_register_virtual_dev_driver(&dev->devtype);
2719         if (res < 0)
2720                 goto out_del_free;
2721
2722         dev->virt_id = scst_register_virtual_device(&dev->devtype, dev->name);
2723         if (dev->virt_id < 0) {
2724                 res = dev->virt_id;
2725                 goto out_unreg_handler;
2726         }
2727
2728         mutex_lock(&dev_priv_mutex);
2729         if (file->private_data != NULL) {
2730                 mutex_unlock(&dev_priv_mutex);
2731                 PRINT_ERROR("%s", "Device already registered");
2732                 res = -EINVAL;
2733                 goto out_unreg_drv;
2734         }
2735         file->private_data = dev;
2736         mutex_unlock(&dev_priv_mutex);
2737
2738 out:
2739         TRACE_EXIT_RES(res);
2740         return res;
2741
2742 out_unreg_drv:
2743         scst_unregister_virtual_device(dev->virt_id);
2744
2745 out_unreg_handler:
2746         scst_unregister_virtual_dev_driver(&dev->devtype);
2747
2748 out_del_free:
2749         spin_lock(&dev_list_lock);
2750         list_del(&dev->dev_list_entry);
2751         spin_unlock(&dev_list_lock);
2752
2753 out_free:
2754         sgv_pool_destroy(dev->pool);
2755         kfree(dev);
2756         goto out_put;
2757
2758 out_put:
2759         module_put(THIS_MODULE);
2760         goto out;
2761 }
2762
2763 static int __dev_user_set_opt(struct scst_user_dev *dev,
2764         const struct scst_user_opt *opt)
2765 {
2766         int res = 0;
2767
2768         TRACE_ENTRY();
2769
2770         TRACE_DBG("parse_type %x, on_free_cmd_type %x, memory_reuse_type %x, "
2771                 "partial_transfers_type %x, partial_len %d", opt->parse_type,
2772                 opt->on_free_cmd_type, opt->memory_reuse_type,
2773                 opt->partial_transfers_type, opt->partial_len);
2774
2775         if ((opt->parse_type > SCST_USER_MAX_PARSE_OPT) ||
2776             (opt->on_free_cmd_type > SCST_USER_MAX_ON_FREE_CMD_OPT) ||
2777             (opt->memory_reuse_type > SCST_USER_MAX_MEM_REUSE_OPT) ||
2778             (opt->prio_queue_type > SCST_USER_MAX_PRIO_QUEUE_OPT) ||
2779             (opt->partial_transfers_type > SCST_USER_MAX_PARTIAL_TRANSFERS_OPT)) {
2780                 PRINT_ERROR("%s", "Invalid option");
2781                 res = -EINVAL;
2782                 goto out;
2783         }
2784
2785         if (((opt->tst != SCST_CONTR_MODE_ONE_TASK_SET) &&
2786              (opt->tst != SCST_CONTR_MODE_SEP_TASK_SETS)) ||
2787             ((opt->queue_alg != SCST_CONTR_MODE_QUEUE_ALG_RESTRICTED_REORDER) &&
2788              (opt->queue_alg != SCST_CONTR_MODE_QUEUE_ALG_UNRESTRICTED_REORDER)) ||
2789             (opt->swp > 1) || (opt->tas > 1) || (opt->has_own_order_mgmt > 1)) {
2790                 PRINT_ERROR("Invalid SCSI option (tst %x, queue_alg %x, swp %x, "
2791                         "tas %x, has_own_order_mgmt %x)", opt->tst,
2792                         opt->queue_alg, opt->swp, opt->tas, opt->has_own_order_mgmt);
2793                 res = -EINVAL;
2794                 goto out;
2795         }
2796
2797         if ((dev->prio_queue_type != opt->prio_queue_type) &&
2798             (opt->prio_queue_type == SCST_USER_PRIO_QUEUE_SINGLE)) {
2799                 struct scst_user_cmd *u, *t;
2800                 /* No need for lock, the activity is suspended */
2801                 list_for_each_entry_safe(u, t, &dev->prio_ready_cmd_list,
2802                                 ready_cmd_list_entry) {
2803                         list_move_tail(&u->ready_cmd_list_entry,
2804                                 &dev->ready_cmd_list);
2805                 }
2806         }
2807
2808         dev->prio_queue_type = opt->prio_queue_type;
2809         dev->parse_type = opt->parse_type;
2810         dev->on_free_cmd_type = opt->on_free_cmd_type;
2811         dev->memory_reuse_type = opt->memory_reuse_type;
2812         dev->partial_transfers_type = opt->partial_transfers_type;
2813         dev->partial_len = opt->partial_len;
2814
2815         dev->tst = opt->tst;
2816         dev->queue_alg = opt->queue_alg;
2817         dev->swp = opt->swp;
2818         dev->tas = opt->tas;
2819         dev->has_own_order_mgmt = opt->has_own_order_mgmt;
2820         if (dev->sdev != NULL) {
2821                 dev->sdev->tst = opt->tst;
2822                 dev->sdev->queue_alg = opt->queue_alg;
2823                 dev->sdev->swp = opt->swp;
2824                 dev->sdev->tas = opt->tas;
2825                 dev->sdev->has_own_order_mgmt = opt->has_own_order_mgmt;
2826         }
2827
2828         dev_user_setup_functions(dev);
2829
2830 out:
2831         TRACE_EXIT_RES(res);
2832         return res;
2833 }
2834
2835 static int dev_user_set_opt(struct file *file, const struct scst_user_opt *opt)
2836 {
2837         int res = 0;
2838         struct scst_user_dev *dev;
2839
2840         TRACE_ENTRY();
2841
2842         mutex_lock(&dev_priv_mutex);
2843         dev = (struct scst_user_dev*)file->private_data;
2844         res = dev_user_check_reg(dev);
2845         if (res != 0) {
2846                 mutex_unlock(&dev_priv_mutex);
2847                 goto out;
2848         }
2849         down_read(&dev->dev_rwsem);
2850         mutex_unlock(&dev_priv_mutex);
2851
2852         scst_suspend_activity();
2853         res = __dev_user_set_opt(dev, opt);
2854         scst_resume_activity();
2855
2856         up_read(&dev->dev_rwsem);
2857
2858 out:
2859         TRACE_EXIT_RES(res);
2860         return res;
2861 }
2862
2863 static int dev_user_get_opt(struct file *file, void *arg)
2864 {
2865         int res = 0;
2866         struct scst_user_dev *dev;
2867         struct scst_user_opt opt;
2868
2869         TRACE_ENTRY();
2870
2871         mutex_lock(&dev_priv_mutex);
2872         dev = (struct scst_user_dev*)file->private_data;
2873         res = dev_user_check_reg(dev);
2874         if (res != 0) {
2875                 mutex_unlock(&dev_priv_mutex);
2876                 goto out;
2877         }
2878         down_read(&dev->dev_rwsem);
2879         mutex_unlock(&dev_priv_mutex);
2880
2881         opt.parse_type = dev->parse_type;
2882         opt.on_free_cmd_type = dev->on_free_cmd_type;
2883         opt.memory_reuse_type = dev->memory_reuse_type;
2884         opt.prio_queue_type = dev->prio_queue_type;
2885         opt.partial_transfers_type = dev->partial_transfers_type;
2886         opt.partial_len = dev->partial_len;
2887         opt.tst = dev->tst;
2888         opt.queue_alg = dev->queue_alg;
2889         opt.tas = dev->tas;
2890         opt.swp = dev->swp;
2891         opt.has_own_order_mgmt = dev->has_own_order_mgmt;
2892
2893         TRACE_DBG("parse_type %x, on_free_cmd_type %x, memory_reuse_type %x, "
2894                 "partial_transfers_type %x, partial_len %d", opt.parse_type,
2895                 opt.on_free_cmd_type, opt.memory_reuse_type,
2896                 opt.partial_transfers_type, opt.partial_len);
2897
2898         res = copy_to_user(arg, &opt, sizeof(opt));
2899
2900         up_read(&dev->dev_rwsem);
2901 out:
2902         TRACE_EXIT_RES(res);
2903         return res;
2904 }
2905
2906 static int dev_usr_parse(struct scst_cmd *cmd)
2907 {
2908         sBUG();
2909         return SCST_CMD_STATE_DEFAULT;
2910 }
2911
2912 /* Needed only for /proc support */
2913 #define USR_TYPE {                      \
2914         .name =         DEV_USER_NAME,  \
2915         .type =         -1,             \
2916         .parse =        dev_usr_parse,  \
2917 }
2918
2919 static struct scst_dev_type dev_user_devtype = USR_TYPE;
2920
2921 static int dev_user_release(struct inode *inode, struct file *file)
2922 {
2923         int res = 0;
2924         struct scst_user_dev *dev;
2925
2926         TRACE_ENTRY();
2927
2928         mutex_lock(&dev_priv_mutex);
2929         dev = (struct scst_user_dev*)file->private_data;
2930         if (dev == NULL) {
2931                 mutex_unlock(&dev_priv_mutex);
2932                 goto out;
2933         }
2934         file->private_data = NULL;
2935
2936         TRACE(TRACE_MGMT, "Releasing dev %s", dev->name);
2937
2938         spin_lock(&dev_list_lock);
2939         list_del(&dev->dev_list_entry);
2940         spin_unlock(&dev_list_lock);
2941
2942         mutex_unlock(&dev_priv_mutex);
2943
2944         down_write(&dev->dev_rwsem);
2945
2946         spin_lock(&cleanup_lock);
2947         list_add_tail(&dev->cleanup_list_entry, &cleanup_list);
2948         spin_unlock(&cleanup_lock);
2949
2950         wake_up(&cleanup_list_waitQ);
2951         wake_up(&dev->prio_cmd_list_waitQ);
2952         wake_up(&dev->cmd_lists.cmd_list_waitQ);
2953
2954         scst_unregister_virtual_device(dev->virt_id);
2955         scst_unregister_virtual_dev_driver(&dev->devtype);
2956
2957         sgv_pool_destroy(dev->pool);
2958
2959         TRACE_DBG("Unregistering finished (dev %p)", dev);
2960
2961         dev->cleanup_done = 1;
2962         wake_up(&cleanup_list_waitQ);
2963         wake_up(&dev->prio_cmd_list_waitQ);
2964         wake_up(&dev->cmd_lists.cmd_list_waitQ);
2965         wait_for_completion(&dev->cleanup_cmpl);
2966
2967         up_write(&dev->dev_rwsem); /* to make the debug check happy */
2968
2969         TRACE_DBG("Releasing completed (dev %p)", dev);
2970
2971         kfree(dev);
2972
2973         module_put(THIS_MODULE);
2974
2975 out:
2976         TRACE_EXIT_RES(res);
2977         return res;
2978 }
2979
2980 static void dev_user_process_cleanup(struct scst_user_dev *dev)
2981 {
2982         struct scst_user_cmd *ucmd;
2983         int rc;
2984
2985         TRACE_ENTRY();
2986
2987         dev->prio_queue_type = SCST_USER_PRIO_QUEUE_SINGLE;
2988         dev->cleaning = 1;
2989         dev->blocking = 1;
2990
2991         while(1) {
2992                 TRACE_DBG("Cleanuping dev %p", dev);
2993
2994                 dev_user_unjam_dev(dev, 0, NULL);
2995
2996                 spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
2997                 rc = dev_user_get_next_prio_cmd(dev, &ucmd);
2998                 if (rc != 0)
2999                         rc = dev_user_get_next_cmd(dev, &ucmd);
3000                 if (rc == 0)
3001                         dev_user_unjam_cmd(ucmd, 1, NULL);
3002                 spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
3003                 if ((rc == -EAGAIN) && dev->cleanup_done)
3004                         break;
3005         }
3006
3007 #ifdef EXTRACHECKS
3008 {
3009         int i;
3010         for(i = 0; i < (int)ARRAY_SIZE(dev->ucmd_hash); i++) {
3011                 struct list_head *head = &dev->ucmd_hash[i];
3012                 struct scst_user_cmd *ucmd, *t;
3013                 list_for_each_entry_safe(ucmd, t, head, hash_list_entry) {
3014                         PRINT_ERROR("Lost ucmd %p (state %x, ref %d)", ucmd,
3015                                 ucmd->state, atomic_read(&ucmd->ucmd_ref));
3016                         ucmd_put(ucmd);
3017                 }
3018         }
3019 }
3020 #endif
3021
3022         TRACE_DBG("Cleanuping done (dev %p)", dev);
3023         complete_all(&dev->cleanup_cmpl);
3024
3025         TRACE_EXIT();
3026         return;
3027 }
3028
3029 static inline int test_cleanup_list(void)
3030 {
3031         int res = !list_empty(&cleanup_list) ||
3032                   unlikely(kthread_should_stop());
3033         return res;
3034 }
3035
3036 static int dev_user_cleanup_thread(void *arg)
3037 {
3038         struct scst_user_dev *dev;
3039
3040         TRACE_ENTRY();
3041
3042         PRINT_INFO("Cleanup thread started, PID %d", current->pid);
3043
3044         current->flags |= PF_NOFREEZE;
3045
3046         spin_lock(&cleanup_lock);
3047         while(!kthread_should_stop()) {
3048                 wait_queue_t wait;
3049                 init_waitqueue_entry(&wait, current);
3050
3051                 if (!test_cleanup_list()) {
3052                         add_wait_queue_exclusive(&cleanup_list_waitQ, &wait);
3053                         for (;;) {
3054                                 set_current_state(TASK_INTERRUPTIBLE);
3055                                 if (test_cleanup_list())
3056                                         break;
3057                                 spin_unlock(&cleanup_lock);
3058                                 schedule();
3059                                 spin_lock(&cleanup_lock);
3060                         }
3061                         set_current_state(TASK_RUNNING);
3062                         remove_wait_queue(&cleanup_list_waitQ, &wait);
3063                 }
3064 restart:
3065                 list_for_each_entry(dev, &cleanup_list, cleanup_list_entry) {
3066                         list_del(&dev->cleanup_list_entry);
3067                         spin_unlock(&cleanup_lock);
3068                         dev_user_process_cleanup(dev);
3069                         spin_lock(&cleanup_lock);
3070                         goto restart;
3071                 }
3072         }
3073         spin_unlock(&cleanup_lock);
3074
3075         /*
3076          * If kthread_should_stop() is true, we are guaranteed to be
3077          * on the module unload, so cleanup_list must be empty.
3078          */
3079         sBUG_ON(!list_empty(&cleanup_list));
3080
3081         PRINT_INFO("Cleanup thread PID %d finished", current->pid);
3082
3083         TRACE_EXIT();
3084         return 0;
3085 }
3086
3087 static int __init init_scst_user(void)
3088 {
3089         int res = 0;
3090         struct class_device *class_member;
3091
3092         TRACE_ENTRY();
3093
3094 #if defined(CONFIG_HIGHMEM4G) || defined(CONFIG_HIGHMEM64G)
3095         PRINT_ERROR("%s", "HIGHMEM kernel configurations are not supported. "
3096                 "Consider change VMSPLIT option or use 64-bit "
3097                 "configuration instead. See README file for details.");
3098         res = -EINVAL;
3099         goto out;
3100 #endif
3101
3102         user_cmd_cachep = KMEM_CACHE(scst_user_cmd, SCST_SLAB_FLAGS);
3103         if (user_cmd_cachep == NULL) {
3104                 res = -ENOMEM;
3105                 goto out;
3106         }
3107
3108         dev_user_devtype.module = THIS_MODULE;
3109
3110         res = scst_register_virtual_dev_driver(&dev_user_devtype);
3111         if (res < 0)
3112                 goto out_cache;
3113
3114         res = scst_dev_handler_build_std_proc(&dev_user_devtype);
3115         if (res != 0)
3116                 goto out_unreg;
3117
3118         dev_user_sysfs_class = class_create(THIS_MODULE, DEV_USER_NAME);
3119         if (IS_ERR(dev_user_sysfs_class)) {
3120                 PRINT_ERROR("%s", "Unable create sysfs class for SCST user "
3121                         "space handler");
3122                 res = PTR_ERR(dev_user_sysfs_class);
3123                 goto out_proc;
3124         }
3125
3126         res = register_chrdev(DEV_USER_MAJOR, DEV_USER_NAME, &dev_user_fops);
3127         if (res) {
3128                 PRINT_ERROR("Unable to get major %d for SCSI tapes", DEV_USER_MAJOR);
3129                 goto out_class;
3130         }
3131
3132         class_member = class_device_create(dev_user_sysfs_class, NULL,
3133                 MKDEV(DEV_USER_MAJOR, 0), NULL, DEV_USER_NAME);
3134         if (IS_ERR(class_member)) {
3135                 res = PTR_ERR(class_member);
3136                 goto out_chrdev;
3137         }
3138
3139         cleanup_thread = kthread_run(dev_user_cleanup_thread, NULL,
3140                 "scst_usr_cleanupd");
3141         if (IS_ERR(cleanup_thread)) {
3142                 res = PTR_ERR(cleanup_thread);
3143                 PRINT_ERROR("kthread_create() failed: %d", res);
3144                 goto out_dev;
3145         }
3146
3147 out:
3148         TRACE_EXIT_RES(res);
3149         return res;
3150
3151 out_dev:
3152         class_device_destroy(dev_user_sysfs_class, MKDEV(DEV_USER_MAJOR, 0));
3153
3154 out_chrdev:
3155         unregister_chrdev(DEV_USER_MAJOR, DEV_USER_NAME);
3156
3157 out_class:
3158         class_destroy(dev_user_sysfs_class);
3159
3160 out_proc:
3161         scst_dev_handler_destroy_std_proc(&dev_user_devtype);
3162
3163 out_unreg:
3164         scst_unregister_dev_driver(&dev_user_devtype);
3165
3166 out_cache:
3167         kmem_cache_destroy(user_cmd_cachep);
3168         goto out;
3169 }
3170
3171 static void __exit exit_scst_user(void)
3172 {
3173         int rc;
3174
3175         TRACE_ENTRY();
3176
3177         rc = kthread_stop(cleanup_thread);
3178         if (rc < 0) {
3179                 TRACE_MGMT_DBG("kthread_stop() failed: %d", rc);
3180         }
3181
3182         unregister_chrdev(DEV_USER_MAJOR, DEV_USER_NAME);
3183         class_device_destroy(dev_user_sysfs_class, MKDEV(DEV_USER_MAJOR, 0));
3184         class_destroy(dev_user_sysfs_class);
3185
3186         scst_dev_handler_destroy_std_proc(&dev_user_devtype);
3187         scst_unregister_virtual_dev_driver(&dev_user_devtype);
3188
3189         kmem_cache_destroy(user_cmd_cachep);
3190
3191         TRACE_EXIT();
3192         return;
3193 }
3194
3195 module_init(init_scst_user);
3196 module_exit(exit_scst_user);
3197
3198 MODULE_AUTHOR("Vladislav Bolkhovitin");
3199 MODULE_LICENSE("GPL");
3200 MODULE_DESCRIPTION("Virtual user space device handler for SCST");
3201 MODULE_VERSION(SCST_VERSION_STRING);
3202 MODULE_ALIAS_CHARDEV_MAJOR(DEV_USER_MAJOR);