- Update to the latest IET r145
[mirror/scst/.git] / scst / src / dev_handlers / scst_user.c
1 /*
2  *  scst_user.c
3  *  
4  *  Copyright (C) 2007 Vladislav Bolkhovitin <vst@vlnb.net>
5  *
6  *  SCSI virtual user space device handler
7  *  
8  *  This program is free software; you can redistribute it and/or
9  *  modify it under the terms of the GNU General Public License
10  *  as published by the Free Software Foundation, version 2
11  *  of the License.
12  * 
13  *  This program is distributed in the hope that it will be useful,
14  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16  *  GNU General Public License for more details.
17  */
18
19 #include <linux/kthread.h>
20 #include <linux/delay.h>
21 #include <linux/poll.h>
22
23 #define LOG_PREFIX              DEV_USER_NAME
24
25 #include "scsi_tgt.h"
26 #include "scst_user.h"
27 #include "scst_dev_handler.h"
28
29 #if defined(CONFIG_HIGHMEM4G) || defined(CONFIG_HIGHMEM64G)
30 #warning HIGHMEM kernel configurations are not supported by this module, \
31         because nowadays it doesn't worth the effort. Consider change \
32         VMSPLIT option or use 64-bit configuration instead. See README file \
33         for details.
34 #endif
35
36 #define DEV_USER_MAJOR                  237
37 #define DEV_USER_CMD_HASH_ORDER         6
38 #define DEV_USER_TM_TIMEOUT             (10*HZ)
39 #define DEV_USER_ATTACH_TIMEOUT         (5*HZ)
40 #define DEV_USER_DETACH_TIMEOUT         (5*HZ)
41 #define DEV_USER_PRE_UNREG_POLL_TIME    (HZ/10)
42
43 struct scst_user_dev
44 {
45         struct rw_semaphore dev_rwsem;
46
47         struct scst_cmd_lists cmd_lists;
48         /* All 3 protected by cmd_lists.cmd_list_lock */
49         struct list_head ready_cmd_list;
50         struct list_head prio_ready_cmd_list;
51         wait_queue_head_t prio_cmd_list_waitQ;
52
53         /* All, including detach_cmd_count, protected by cmd_lists.cmd_list_lock */
54         unsigned short blocking:1;
55         unsigned short cleaning:1;
56         unsigned short cleanup_done:1;
57         unsigned short attach_cmd_active:1;
58         unsigned short tm_cmd_active:1;
59         unsigned short internal_reset_active:1;
60         unsigned short pre_unreg_sess_active:1; /* just a small optimization */
61
62         unsigned short tst:3;
63         unsigned short queue_alg:4;
64         unsigned short tas:1;
65         unsigned short swp:1;
66         unsigned short has_own_order_mgmt:1;
67
68         unsigned short detach_cmd_count;
69
70         int (*generic_parse)(struct scst_cmd *cmd,
71                 int (*get_block)(struct scst_cmd *cmd));
72
73         int block;
74         int def_block;
75
76         struct sgv_pool *pool;
77
78         uint8_t parse_type;
79         uint8_t on_free_cmd_type;
80         uint8_t memory_reuse_type;
81         uint8_t prio_queue_type;
82         uint8_t partial_transfers_type;
83         uint32_t partial_len;
84
85         struct scst_dev_type devtype;
86
87         /* Both protected by cmd_lists.cmd_list_lock */
88         unsigned int handle_counter;
89         struct list_head ucmd_hash[1<<DEV_USER_CMD_HASH_ORDER];
90
91         struct scst_device *sdev;
92
93         int virt_id;
94         struct list_head dev_list_entry;
95         char name[SCST_MAX_NAME];
96
97         /* Protected by cmd_lists.cmd_list_lock */
98         struct list_head pre_unreg_sess_list;
99
100         struct list_head cleanup_list_entry;
101         struct completion cleanup_cmpl;
102 };
103
104 struct scst_user_pre_unreg_sess_obj
105 {
106         struct scst_tgt_dev *tgt_dev;
107         unsigned int active:1;
108         unsigned int exit:1;
109         struct list_head pre_unreg_sess_list_entry;
110 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
111         struct work_struct pre_unreg_sess_work;
112 #else
113         struct delayed_work pre_unreg_sess_work;
114 #endif
115 };
116
117 /* Most fields are unprotected, since only one thread at time can access them */
118 struct scst_user_cmd
119 {
120         struct scst_cmd *cmd;
121         struct scst_user_dev *dev;
122         
123         atomic_t ucmd_ref;
124
125         unsigned int buff_cached:1;
126         unsigned int buf_dirty:1;
127         unsigned int background_exec:1;
128         unsigned int internal_reset_tm:1;
129         unsigned int aborted:1;
130
131         struct scst_user_cmd *buf_ucmd;
132
133         int cur_data_page;
134         int num_data_pages;
135         int first_page_offset;
136         unsigned long ubuff;
137         struct page **data_pages;
138         struct sgv_pool_obj *sgv;
139
140         unsigned int state;
141
142         struct list_head ready_cmd_list_entry;
143
144         unsigned int h;
145         struct list_head hash_list_entry;
146
147         struct scst_user_get_cmd user_cmd;
148
149         struct completion *cmpl;
150         int result;
151 };
152
153 static struct scst_user_cmd *dev_user_alloc_ucmd(struct scst_user_dev *dev,
154         int gfp_mask);
155 static void dev_user_free_ucmd(struct scst_user_cmd *ucmd);
156
157 static int dev_user_parse(struct scst_cmd *cmd);
158 static int dev_user_exec(struct scst_cmd *cmd);
159 static void dev_user_on_free_cmd(struct scst_cmd *cmd);
160 static int dev_user_task_mgmt_fn(struct scst_mgmt_cmd *mcmd, 
161         struct scst_tgt_dev *tgt_dev);
162
163 static int dev_user_disk_done(struct scst_cmd *cmd);
164 static int dev_user_tape_done(struct scst_cmd *cmd);
165
166 static struct page *dev_user_alloc_pages(struct scatterlist *sg,
167         gfp_t gfp_mask, void *priv);
168 static void dev_user_free_sg_entries(struct scatterlist *sg, int sg_count,
169         void *priv);
170
171 static void dev_user_add_to_ready(struct scst_user_cmd *ucmd);
172
173 static void dev_user_unjam_cmd(struct scst_user_cmd *ucmd, int busy,
174         unsigned long *flags);
175 static void dev_user_unjam_dev(struct scst_user_dev *dev, int tm,
176         struct scst_tgt_dev *tgt_dev);
177
178 static int dev_user_process_reply_tm_exec(struct scst_user_cmd *ucmd,
179         int status);
180 static int dev_user_process_reply_sess(struct scst_user_cmd *ucmd, int status);
181 static int dev_user_register_dev(struct file *file,
182         const struct scst_user_dev_desc *dev_desc);
183 static int __dev_user_set_opt(struct scst_user_dev *dev,
184         const struct scst_user_opt *opt);
185 static int dev_user_set_opt(struct file *file, const struct scst_user_opt *opt);
186 static int dev_user_get_opt(struct file *file, void *arg);
187
188 static unsigned int dev_user_poll(struct file *filp, poll_table *wait);
189 static long dev_user_ioctl(struct file *file, unsigned int cmd,
190         unsigned long arg);
191 static int dev_user_release(struct inode *inode, struct file *file);
192
193 /** Data **/
194
195 static struct kmem_cache *user_cmd_cachep;
196
197 static DEFINE_MUTEX(dev_priv_mutex);
198
199 static struct file_operations dev_user_fops = {
200         .poll           = dev_user_poll,
201         .unlocked_ioctl = dev_user_ioctl,
202 #ifdef CONFIG_COMPAT
203         .compat_ioctl   = dev_user_ioctl,
204 #endif
205         .release        = dev_user_release,
206 };
207
208 static struct class *dev_user_sysfs_class;
209
210 static spinlock_t dev_list_lock = SPIN_LOCK_UNLOCKED;
211 static LIST_HEAD(dev_list);
212
213 static spinlock_t cleanup_lock = SPIN_LOCK_UNLOCKED;
214 static LIST_HEAD(cleanup_list);
215 static DECLARE_WAIT_QUEUE_HEAD(cleanup_list_waitQ);
216 static struct task_struct *cleanup_thread;
217
218 static inline void ucmd_get(struct scst_user_cmd *ucmd, int barrier)
219 {
220         TRACE_DBG("ucmd %p, ucmd_ref %d", ucmd, atomic_read(&ucmd->ucmd_ref));
221         atomic_inc(&ucmd->ucmd_ref);
222         if (barrier)
223                 smp_mb__after_atomic_inc();
224 }
225
226 static inline void ucmd_put(struct scst_user_cmd *ucmd)
227 {
228         TRACE_DBG("ucmd %p, ucmd_ref %d", ucmd, atomic_read(&ucmd->ucmd_ref));
229         if (atomic_dec_and_test(&ucmd->ucmd_ref))
230                 dev_user_free_ucmd(ucmd);
231 }
232
233 static inline int calc_num_pg(unsigned long buf, int len)
234 {
235         len += buf & ~PAGE_MASK;
236         return (len >> PAGE_SHIFT) + ((len & ~PAGE_MASK) != 0);
237 }
238
239 static inline int is_need_offs_page(unsigned long buf, int len)
240 {
241         return ((buf & ~PAGE_MASK) != 0) && 
242                 ((buf & PAGE_MASK) != ((buf+len-1) & PAGE_MASK));
243 }
244
245 static void __dev_user_not_reg(void)
246 {
247         PRINT_ERROR("%s", "Device not registered");
248         return;
249 }
250
251 static inline int dev_user_check_reg(struct scst_user_dev *dev)
252 {
253         if (dev == NULL) {
254                 __dev_user_not_reg();
255                 return -EINVAL;
256         }
257         return 0;
258 }
259
260 static inline int scst_user_cmd_hashfn(int h)
261 {
262         return h & ((1 << DEV_USER_CMD_HASH_ORDER) - 1);
263 }
264
265 static inline struct scst_user_cmd *__ucmd_find_hash(struct scst_user_dev *dev,
266         unsigned int h)
267 {
268         struct list_head *head;
269         struct scst_user_cmd *ucmd;
270
271         head = &dev->ucmd_hash[scst_user_cmd_hashfn(h)];
272         list_for_each_entry(ucmd, head, hash_list_entry) {
273                 if (ucmd->h == h) {
274                         TRACE_DBG("Found ucmd %p", ucmd);
275                         return ucmd;
276                 }
277         }
278         return NULL;
279 }
280
281 static void cmnd_insert_hash(struct scst_user_cmd *ucmd)
282 {
283         struct list_head *head;
284         struct scst_user_dev *dev = ucmd->dev;
285         struct scst_user_cmd *u;
286         unsigned long flags;
287
288         spin_lock_irqsave(&dev->cmd_lists.cmd_list_lock, flags);
289         do {
290                 ucmd->h = dev->handle_counter++;
291                 u = __ucmd_find_hash(dev, ucmd->h);
292         } while(u != NULL);
293         head = &dev->ucmd_hash[scst_user_cmd_hashfn(ucmd->h)];
294         list_add_tail(&ucmd->hash_list_entry, head);
295         spin_unlock_irqrestore(&dev->cmd_lists.cmd_list_lock, flags);
296
297         TRACE_DBG("Inserted ucmd %p, h=%d", ucmd, ucmd->h);
298         return;
299 }
300
301 static inline void cmnd_remove_hash(struct scst_user_cmd *ucmd)
302 {
303         unsigned long flags;
304         spin_lock_irqsave(&ucmd->dev->cmd_lists.cmd_list_lock, flags);
305         list_del(&ucmd->hash_list_entry);
306         spin_unlock_irqrestore(&ucmd->dev->cmd_lists.cmd_list_lock, flags);
307
308         TRACE_DBG("Removed ucmd %p, h=%d", ucmd, ucmd->h);
309         return;
310 }
311
312 static void dev_user_free_ucmd(struct scst_user_cmd *ucmd)
313 {
314         TRACE_ENTRY();
315
316         TRACE_MEM("Freeing ucmd %p", ucmd);
317
318         cmnd_remove_hash(ucmd);
319         EXTRACHECKS_BUG_ON(ucmd->cmd != NULL);
320
321         kmem_cache_free(user_cmd_cachep, ucmd);
322
323         TRACE_EXIT();
324         return;
325 }
326
327 static struct page *dev_user_alloc_pages(struct scatterlist *sg,
328         gfp_t gfp_mask, void *priv)
329 {
330         struct scst_user_cmd *ucmd = (struct scst_user_cmd*)priv;
331
332         TRACE_ENTRY();
333
334         /* *sg supposed to be zeroed */
335
336         TRACE_MEM("ucmd %p, ubuff %lx, ucmd->cur_data_page %d", ucmd,
337                 ucmd->ubuff, ucmd->cur_data_page);
338
339         if (ucmd->cur_data_page == 0) {
340                 TRACE_MEM("ucmd->first_page_offset %d",
341                         ucmd->first_page_offset);
342                 sg->offset = ucmd->first_page_offset;
343                 ucmd_get(ucmd, 0);
344         }
345
346         if (ucmd->cur_data_page >= ucmd->num_data_pages)
347                 goto out;
348
349         sg->page = ucmd->data_pages[ucmd->cur_data_page];
350         sg->length = PAGE_SIZE - sg->offset;
351
352         ucmd->cur_data_page++;
353
354         TRACE_MEM("page=%p, length=%d", sg->page, sg->length);
355         TRACE_BUFFER("Page data", page_address(sg->page), sg->length);
356
357 out:
358         TRACE_EXIT();
359         return sg->page;
360 }
361
362 static void dev_user_on_cached_mem_free(struct scst_user_cmd *ucmd)
363 {
364         TRACE_ENTRY();
365
366         TRACE_MEM("Preparing ON_CACHED_MEM_FREE (ucmd %p, h %d, ubuff %lx)",
367                 ucmd, ucmd->h, ucmd->ubuff);
368
369         ucmd->user_cmd.cmd_h = ucmd->h;
370         ucmd->user_cmd.subcode = SCST_USER_ON_CACHED_MEM_FREE;
371         ucmd->user_cmd.on_cached_mem_free.pbuf = ucmd->ubuff;
372
373         ucmd->state = UCMD_STATE_ON_CACHE_FREEING;
374
375         dev_user_add_to_ready(ucmd);
376
377         TRACE_EXIT();
378         return;
379 }
380
381 static void dev_user_unmap_buf(struct scst_user_cmd *ucmd)
382 {
383         int i;
384
385         TRACE_ENTRY();
386
387         TRACE_MEM("Unmapping data pages (ucmd %p, ubuff %lx, num %d)", ucmd,
388                 ucmd->ubuff, ucmd->num_data_pages);
389
390         for(i = 0; i < ucmd->num_data_pages; i++) {
391                 struct page *page = ucmd->data_pages[i];
392
393                 if (ucmd->buf_dirty)
394                         SetPageDirty(page);
395
396                 page_cache_release(page);
397         }
398         kfree(ucmd->data_pages);
399         ucmd->data_pages = NULL;
400
401         TRACE_EXIT();
402         return;
403 }
404
405 static void __dev_user_free_sg_entries(struct scst_user_cmd *ucmd)
406 {
407         TRACE_ENTRY();
408
409         sBUG_ON(ucmd->data_pages == NULL);
410
411         TRACE_MEM("Freeing data pages (ucmd=%p, ubuff=%lx, buff_cached=%d)",
412                 ucmd, ucmd->ubuff, ucmd->buff_cached);
413
414         dev_user_unmap_buf(ucmd);
415
416         if (ucmd->buff_cached)
417                 dev_user_on_cached_mem_free(ucmd);
418         else
419                 ucmd_put(ucmd);
420
421         TRACE_EXIT();
422         return;
423 }
424
425 static void dev_user_free_sg_entries(struct scatterlist *sg, int sg_count,
426         void *priv)
427 {
428         struct scst_user_cmd *ucmd = (struct scst_user_cmd*)priv;
429
430         TRACE_MEM("Freeing data pages (sg=%p, sg_count=%d, priv %p)", sg,
431                 sg_count, ucmd);
432
433         __dev_user_free_sg_entries(ucmd);
434
435         return;
436 }
437
438 static inline int is_buff_cached(struct scst_user_cmd *ucmd)
439 {
440         int mem_reuse_type = ucmd->dev->memory_reuse_type;
441
442         if ((mem_reuse_type == SCST_USER_MEM_REUSE_ALL) ||
443             ((ucmd->cmd->data_direction == SCST_DATA_READ) &&
444              (mem_reuse_type == SCST_USER_MEM_REUSE_READ)) ||
445             ((ucmd->cmd->data_direction == SCST_DATA_WRITE) &&
446              (mem_reuse_type == SCST_USER_MEM_REUSE_WRITE))) {
447                 return 1;
448         } else
449                 return 0;
450 }
451
452 /*
453  * Returns 0 for success, <0 for fatal failure, >0 - need pages.
454  * Unmaps the buffer, if needed in case of error
455  */
456 static int dev_user_alloc_sg(struct scst_user_cmd *ucmd, int cached_buff)
457 {
458         int res = 0;
459         struct scst_cmd *cmd = ucmd->cmd;
460         struct scst_user_dev *dev = ucmd->dev;
461         int gfp_mask, flags = 0;
462         int bufflen = cmd->bufflen;
463         int last_len = 0;
464
465         TRACE_ENTRY();
466
467         gfp_mask = __GFP_NOWARN;
468         gfp_mask |= (scst_cmd_atomic(cmd) ? GFP_ATOMIC : GFP_KERNEL);
469
470         if (cached_buff) {
471                 flags |= SCST_POOL_RETURN_OBJ_ON_ALLOC_FAIL;
472                 if (ucmd->ubuff == 0)
473                         flags |= SCST_POOL_NO_ALLOC_ON_CACHE_MISS;
474         } else {
475                 TRACE_MEM("%s", "Not cached buff");
476                 flags |= SCST_POOL_ALLOC_NO_CACHED;
477                 if (ucmd->ubuff == 0) {
478                         res = 1;
479                         goto out;
480                 }
481                 bufflen += ucmd->first_page_offset;
482                 if (is_need_offs_page(ucmd->ubuff, cmd->bufflen))
483                         last_len = bufflen & ~PAGE_MASK;
484                 else
485                         last_len = cmd->bufflen & ~PAGE_MASK;
486                 if (last_len == 0)
487                         last_len = PAGE_SIZE;
488         }
489         ucmd->buff_cached = cached_buff;
490
491         cmd->sg = sgv_pool_alloc(dev->pool, bufflen, gfp_mask, flags,
492                         &cmd->sg_cnt, &ucmd->sgv, ucmd);
493         if (cmd->sg != NULL) {
494                 struct scst_user_cmd *buf_ucmd =
495                         (struct scst_user_cmd*)sgv_get_priv(ucmd->sgv);
496
497                 TRACE_MEM("Buf ucmd %p", buf_ucmd);
498
499                 ucmd->ubuff = buf_ucmd->ubuff;
500                 ucmd->buf_ucmd = buf_ucmd;
501
502                 TRACE_MEM("Buf alloced (ucmd %p, cached_buff %d, ubuff %lx, "
503                         "last_len %d, l %d)", ucmd, cached_buff, ucmd->ubuff,
504                         last_len, cmd->sg[cmd->sg_cnt-1].length);
505
506                 EXTRACHECKS_BUG_ON((ucmd->data_pages != NULL) &&
507                                    (ucmd != buf_ucmd));
508
509                 if (last_len != 0) {
510                         /* We don't use clustering, so the assignment is safe */
511                         cmd->sg[cmd->sg_cnt-1].length = last_len;
512                 }
513
514                 if (unlikely(cmd->sg_cnt > cmd->tgt_dev->max_sg_cnt)) {
515                         static int ll;
516                         if (ll < 10) {
517                                 PRINT_INFO("Unable to complete command due to "
518                                         "SG IO count limitation (requested %d, "
519                                         "available %d, tgt lim %d)", cmd->sg_cnt,
520                                         cmd->tgt_dev->max_sg_cnt,
521                                         cmd->tgt->sg_tablesize);
522                                 ll++;
523                         }
524                         cmd->sg = NULL;
525                         /* sgv will be freed in dev_user_free_sgv() */
526                         res = -1;
527                 }
528         } else {
529                 TRACE_MEM("Buf not alloced (ucmd %p, h %d, buff_cached, %d, "
530                         "sg_cnt %d, ubuff %lx, sgv %p", ucmd, ucmd->h,
531                         ucmd->buff_cached, cmd->sg_cnt, ucmd->ubuff, ucmd->sgv);
532                 if (unlikely(cmd->sg_cnt == 0)) {
533                         TRACE_MEM("Refused allocation (ucmd %p)", ucmd);
534                         sBUG_ON(ucmd->sgv != NULL);
535                         res = -1;
536                 } else {
537                         switch(ucmd->state & ~UCMD_STATE_MASK) {
538                         case UCMD_STATE_BUF_ALLOCING:
539                                 res = 1;
540                                 break;
541                         case UCMD_STATE_EXECING:
542                                 res = -1;
543                                 break;
544                         default:
545                                 sBUG();
546                                 break;
547                         }
548                 }
549         }
550
551 out:
552         TRACE_EXIT_RES(res);
553         return res;
554 }
555
556 static int dev_user_alloc_space(struct scst_user_cmd *ucmd)
557 {
558         int rc, res = SCST_CMD_STATE_DEFAULT;
559         struct scst_cmd *cmd = ucmd->cmd;
560
561         TRACE_ENTRY();
562
563         if (unlikely(ucmd->cmd->data_buf_tgt_alloc)) {
564                 PRINT_ERROR("Target driver %s requested own memory "
565                         "allocation", ucmd->cmd->tgtt->name);
566                 scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
567                 res = SCST_CMD_STATE_PRE_XMIT_RESP;
568                 goto out;
569         }
570
571         ucmd->state = UCMD_STATE_BUF_ALLOCING;
572         cmd->data_buf_alloced = 1;
573
574         rc = dev_user_alloc_sg(ucmd, is_buff_cached(ucmd));
575         if (rc == 0)
576                 goto out;
577         else if (rc < 0) {
578                 scst_set_busy(cmd);
579                 res = SCST_CMD_STATE_PRE_XMIT_RESP;
580                 goto out;
581         }
582
583         if ((cmd->data_direction != SCST_DATA_WRITE) &&
584             !scst_is_cmd_local(cmd)) {
585                 TRACE_DBG("Delayed alloc, ucmd %p", ucmd);
586                 goto out;
587         }
588
589         ucmd->user_cmd.cmd_h = ucmd->h;
590         ucmd->user_cmd.subcode = SCST_USER_ALLOC_MEM;
591         ucmd->user_cmd.alloc_cmd.sess_h = (unsigned long)cmd->tgt_dev;
592         memcpy(ucmd->user_cmd.alloc_cmd.cdb, cmd->cdb, 
593                 min(sizeof(ucmd->user_cmd.alloc_cmd.cdb), sizeof(cmd->cdb)));
594         ucmd->user_cmd.alloc_cmd.cdb_len = cmd->cdb_len;
595         ucmd->user_cmd.alloc_cmd.alloc_len = ucmd->buff_cached ? 
596                 (cmd->sg_cnt << PAGE_SHIFT) : cmd->bufflen;
597         ucmd->user_cmd.alloc_cmd.queue_type = cmd->queue_type;
598         ucmd->user_cmd.alloc_cmd.data_direction = cmd->data_direction;
599         ucmd->user_cmd.alloc_cmd.sn = cmd->tgt_sn;
600
601         dev_user_add_to_ready(ucmd);
602
603         res = SCST_CMD_STATE_STOP;
604
605 out:
606         TRACE_EXIT_RES(res);
607         return res;
608 }
609
610 static struct scst_user_cmd *dev_user_alloc_ucmd(struct scst_user_dev *dev,
611         int gfp_mask)
612 {
613         struct scst_user_cmd *ucmd = NULL;
614
615         TRACE_ENTRY();
616
617 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
618         ucmd = kmem_cache_alloc(user_cmd_cachep, gfp_mask);
619         if (ucmd != NULL)
620                 memset(ucmd, 0, sizeof(*ucmd));
621 #else
622         ucmd = kmem_cache_zalloc(user_cmd_cachep, gfp_mask);
623 #endif
624         if (unlikely(ucmd == NULL)) {
625                 TRACE(TRACE_OUT_OF_MEM, "Unable to allocate "
626                         "user cmd (gfp_mask %x)", gfp_mask);
627                 goto out;
628         }
629         ucmd->dev = dev;
630         atomic_set(&ucmd->ucmd_ref, 1);
631
632         cmnd_insert_hash(ucmd);
633
634         TRACE_MEM("ucmd %p allocated", ucmd);
635
636 out:
637         TRACE_EXIT_HRES((unsigned long)ucmd);
638         return ucmd;
639 }
640
641 static int dev_user_get_block(struct scst_cmd *cmd)
642 {
643         struct scst_user_dev *dev = (struct scst_user_dev*)cmd->dev->dh_priv;
644         /* 
645          * No need for locks here, since *_detach() can not be
646          * called, when there are existing commands.
647          */
648         TRACE_EXIT_RES(dev->block);
649         return dev->block;
650 }
651
652 static int dev_user_parse(struct scst_cmd *cmd)
653 {
654         int rc, res = SCST_CMD_STATE_DEFAULT;
655         struct scst_user_cmd *ucmd;
656         int atomic = scst_cmd_atomic(cmd);
657         struct scst_user_dev *dev = (struct scst_user_dev*)cmd->dev->dh_priv;
658         int gfp_mask = atomic ? GFP_ATOMIC : GFP_KERNEL;
659
660         TRACE_ENTRY();
661
662         if (cmd->dh_priv == NULL) {
663                 ucmd = dev_user_alloc_ucmd(dev, gfp_mask);
664                 if (unlikely(ucmd == NULL)) {
665                         if (atomic) {
666                                 res = SCST_CMD_STATE_NEED_THREAD_CTX;
667                                 goto out;
668                         } else {
669                                 scst_set_busy(cmd);
670                                 goto out_error;
671                         }
672                 }
673                 ucmd->cmd = cmd;
674                 cmd->dh_priv = ucmd;
675         } else {
676                 ucmd = (struct scst_user_cmd*)cmd->dh_priv;
677                 TRACE_DBG("Used ucmd %p, state %x", ucmd, ucmd->state);
678         }
679
680         TRACE_DBG("ucmd %p, cmd %p, state %x", ucmd, cmd, ucmd->state);
681
682         if (ucmd->state != UCMD_STATE_NEW)
683                 goto alloc;
684
685         switch(dev->parse_type) {
686         case SCST_USER_PARSE_STANDARD:
687                 TRACE_DBG("PARSE STANDARD: ucmd %p", ucmd);
688                 rc = dev->generic_parse(cmd, dev_user_get_block);
689                 if ((rc != 0) || (cmd->op_flags & SCST_INFO_INVALID))
690                         goto out_invalid;
691                 break;
692
693         case SCST_USER_PARSE_EXCEPTION:
694                 TRACE_DBG("PARSE EXCEPTION: ucmd %p", ucmd);
695                 rc = dev->generic_parse(cmd, dev_user_get_block);
696                 if ((rc == 0) && (!(cmd->op_flags & SCST_INFO_INVALID)))
697                         break;
698                 else if (rc == SCST_CMD_STATE_NEED_THREAD_CTX) {
699                         TRACE_MEM("Restarting PARSE to thread context "
700                                 "(ucmd %p)", ucmd);
701                         res = SCST_CMD_STATE_NEED_THREAD_CTX;
702                         goto out;
703                 }
704                 /* else go through */
705
706         case SCST_USER_PARSE_CALL:
707                 TRACE_DBG("Preparing PARSE for user space (ucmd=%p, h=%d, "
708                         "bufflen %d)", ucmd, ucmd->h, cmd->bufflen);
709                 ucmd->user_cmd.cmd_h = ucmd->h;
710                 ucmd->user_cmd.subcode = SCST_USER_PARSE;
711                 ucmd->user_cmd.parse_cmd.sess_h = (unsigned long)cmd->tgt_dev;
712                 memcpy(ucmd->user_cmd.parse_cmd.cdb, cmd->cdb, 
713                         min(sizeof(ucmd->user_cmd.parse_cmd.cdb),
714                             sizeof(cmd->cdb)));
715                 ucmd->user_cmd.parse_cmd.cdb_len = cmd->cdb_len;
716                 ucmd->user_cmd.parse_cmd.timeout = cmd->timeout;
717                 ucmd->user_cmd.parse_cmd.bufflen = cmd->bufflen;
718                 ucmd->user_cmd.parse_cmd.queue_type = cmd->queue_type;
719                 ucmd->user_cmd.parse_cmd.data_direction = cmd->data_direction;
720                 ucmd->user_cmd.parse_cmd.expected_values_set =
721                                         cmd->expected_values_set;
722                 ucmd->user_cmd.parse_cmd.expected_data_direction =
723                                         cmd->expected_data_direction;
724                 ucmd->user_cmd.parse_cmd.expected_transfer_len =
725                                         cmd->expected_transfer_len;
726                 ucmd->user_cmd.parse_cmd.sn = cmd->tgt_sn;
727                 ucmd->state = UCMD_STATE_PARSING;
728                 dev_user_add_to_ready(ucmd);
729                 res = SCST_CMD_STATE_STOP;
730                 goto out;
731
732         default:
733                 sBUG();
734                 goto out;
735         }
736
737 alloc:
738         if (cmd->data_direction != SCST_DATA_NONE)
739                 res = dev_user_alloc_space(ucmd);
740
741 out:
742         TRACE_EXIT_RES(res);
743         return res;
744
745 out_invalid:
746         PRINT_ERROR("PARSE failed (ucmd %p, rc %d, invalid %d)", ucmd, rc,
747                 cmd->op_flags & SCST_INFO_INVALID);
748         scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_invalid_opcode));
749
750 out_error:
751         res = SCST_CMD_STATE_PRE_XMIT_RESP;
752         goto out;
753 }
754
755 static void dev_user_flush_dcache(struct scst_user_cmd *ucmd)
756 {
757         struct scst_user_cmd *buf_ucmd = ucmd->buf_ucmd;
758         unsigned long start = buf_ucmd->ubuff;
759         int i;
760
761         TRACE_ENTRY();
762
763         if (start == 0)
764                 goto out;
765
766         for(i = 0; i < buf_ucmd->num_data_pages; i++) {
767                 struct page *page;
768                 page = buf_ucmd->data_pages[i];
769 #ifdef ARCH_HAS_FLUSH_ANON_PAGE
770                 struct vm_area_struct *vma = find_vma(current->mm, start);
771                 if (vma != NULL) 
772                         flush_anon_page(vma, page, start);
773 #endif
774                 flush_dcache_page(page);
775                 start += PAGE_SIZE;
776         }
777
778 out:
779         TRACE_EXIT();
780         return;
781 }
782
783 static int dev_user_exec(struct scst_cmd *cmd)
784 {
785         struct scst_user_cmd *ucmd = (struct scst_user_cmd*)cmd->dh_priv;
786
787         TRACE_ENTRY();
788
789         TRACE_DBG("Preparing EXEC for user space (ucmd=%p, h=%d, "
790                 "bufflen %d, data_len %d, ubuff %lx)", ucmd, ucmd->h,
791                 cmd->bufflen, cmd->data_len, ucmd->ubuff);
792
793         if (cmd->data_direction == SCST_DATA_WRITE)
794                 dev_user_flush_dcache(ucmd);
795
796         ucmd->user_cmd.cmd_h = ucmd->h;
797         ucmd->user_cmd.subcode = SCST_USER_EXEC;
798         ucmd->user_cmd.exec_cmd.sess_h = (unsigned long)cmd->tgt_dev;
799         memcpy(ucmd->user_cmd.exec_cmd.cdb, cmd->cdb, 
800                 min(sizeof(ucmd->user_cmd.exec_cmd.cdb),
801                     sizeof(cmd->cdb)));
802         ucmd->user_cmd.exec_cmd.cdb_len = cmd->cdb_len;
803         ucmd->user_cmd.exec_cmd.bufflen = cmd->bufflen;
804         ucmd->user_cmd.exec_cmd.data_len = cmd->data_len;
805         ucmd->user_cmd.exec_cmd.pbuf = ucmd->ubuff;
806         if ((ucmd->ubuff == 0) && (cmd->data_direction != SCST_DATA_NONE)) {
807                 ucmd->user_cmd.exec_cmd.alloc_len = ucmd->buff_cached ? 
808                         (cmd->sg_cnt << PAGE_SHIFT) : cmd->bufflen;
809         }
810         ucmd->user_cmd.exec_cmd.queue_type = cmd->queue_type;
811         ucmd->user_cmd.exec_cmd.data_direction = cmd->data_direction;
812         ucmd->user_cmd.exec_cmd.partial = 0;
813         ucmd->user_cmd.exec_cmd.timeout = cmd->timeout;
814         ucmd->user_cmd.exec_cmd.sn = cmd->tgt_sn;
815
816         ucmd->state = UCMD_STATE_EXECING;
817
818         dev_user_add_to_ready(ucmd);
819
820         TRACE_EXIT();
821         return SCST_EXEC_COMPLETED;
822 }
823
824 static void dev_user_free_sgv(struct scst_user_cmd *ucmd)
825 {
826         if (ucmd->sgv != NULL) {
827                 sgv_pool_free(ucmd->sgv);
828                 ucmd->sgv = NULL;
829         } else if (ucmd->data_pages != NULL) {
830                 /* We mapped pages, but for some reason didn't allocate them */
831                 ucmd_get(ucmd, 0);
832                 __dev_user_free_sg_entries(ucmd);
833         }
834         return;
835 }
836
837 static void dev_user_on_free_cmd(struct scst_cmd *cmd)
838 {
839         struct scst_user_cmd *ucmd = (struct scst_user_cmd*)cmd->dh_priv;
840
841         TRACE_ENTRY();
842
843         if (unlikely(ucmd == NULL))
844                 goto out;
845
846         TRACE_MEM("ucmd %p, cmd %p, buff_cached %d, ubuff %lx", ucmd, ucmd->cmd,
847                 ucmd->buff_cached, ucmd->ubuff);
848
849         ucmd->cmd = NULL;
850         if ((cmd->data_direction == SCST_DATA_WRITE) && (ucmd->buf_ucmd != NULL))
851                 ucmd->buf_ucmd->buf_dirty = 1;
852
853         if (ucmd->dev->on_free_cmd_type == SCST_USER_ON_FREE_CMD_IGNORE) {
854                 ucmd->state = UCMD_STATE_ON_FREE_SKIPPED;
855                 /* The state assignment must be before freeing sgv! */
856                 dev_user_free_sgv(ucmd);
857                 ucmd_put(ucmd);
858                 goto out;
859         }
860
861         ucmd->user_cmd.cmd_h = ucmd->h;
862         ucmd->user_cmd.subcode = SCST_USER_ON_FREE_CMD;
863
864         ucmd->user_cmd.on_free_cmd.pbuf = ucmd->ubuff;
865         ucmd->user_cmd.on_free_cmd.resp_data_len = cmd->resp_data_len;
866         ucmd->user_cmd.on_free_cmd.buffer_cached = ucmd->buff_cached;
867         ucmd->user_cmd.on_free_cmd.aborted = ucmd->aborted;
868         ucmd->user_cmd.on_free_cmd.status = cmd->status;
869         ucmd->user_cmd.on_free_cmd.delivery_status = cmd->delivery_status;
870
871         ucmd->state = UCMD_STATE_ON_FREEING;
872
873         dev_user_add_to_ready(ucmd);
874
875 out:
876         TRACE_EXIT();
877         return;
878 }
879
880 static void dev_user_set_block(struct scst_cmd *cmd, int block)
881 {
882         struct scst_user_dev *dev = (struct scst_user_dev*)cmd->dev->dh_priv;
883         /* 
884          * No need for locks here, since *_detach() can not be
885          * called, when there are existing commands.
886          */
887         TRACE_DBG("dev %p, new block %d", dev, block);
888         if (block != 0)
889                 dev->block = block;
890         else
891                 dev->block = dev->def_block;
892         return;
893 }
894
895 static int dev_user_disk_done(struct scst_cmd *cmd)
896 {
897         int res = SCST_CMD_STATE_DEFAULT;
898
899         TRACE_ENTRY();
900
901         res = scst_block_generic_dev_done(cmd, dev_user_set_block);
902
903         TRACE_EXIT_RES(res);
904         return res;
905 }
906
907 static int dev_user_tape_done(struct scst_cmd *cmd)
908 {
909         int res = SCST_CMD_STATE_DEFAULT;
910
911         TRACE_ENTRY();
912
913         res = scst_tape_generic_dev_done(cmd, dev_user_set_block);
914
915         TRACE_EXIT_RES(res);
916         return res;
917 }
918
919 static void dev_user_add_to_ready(struct scst_user_cmd *ucmd)
920 {
921         struct scst_user_dev *dev = ucmd->dev;
922         unsigned long flags;
923         int do_wake;
924
925         TRACE_ENTRY();
926
927         do_wake = (in_interrupt() || 
928                    (ucmd->state == UCMD_STATE_ON_CACHE_FREEING));
929         if (ucmd->cmd)
930                 do_wake |= ucmd->cmd->preprocessing_only;
931
932         EXTRACHECKS_BUG_ON(ucmd->state & UCMD_STATE_JAMMED_MASK);
933
934         spin_lock_irqsave(&dev->cmd_lists.cmd_list_lock, flags);
935
936         /* Hopefully, compiler will make it as a single test/jmp */
937         if (unlikely(dev->attach_cmd_active || dev->tm_cmd_active ||
938                      dev->internal_reset_active || dev->pre_unreg_sess_active ||
939                      (dev->detach_cmd_count != 0))) {
940                 switch(ucmd->state) {
941                 case UCMD_STATE_PARSING:
942                 case UCMD_STATE_BUF_ALLOCING:
943                 case UCMD_STATE_EXECING:
944                         if (dev->pre_unreg_sess_active &&
945                             !(dev->attach_cmd_active || dev->tm_cmd_active ||
946                               dev->internal_reset_active ||
947                               (dev->detach_cmd_count != 0))) {
948                                 struct scst_user_pre_unreg_sess_obj *p, *found = NULL;
949                                 list_for_each_entry(p, &dev->pre_unreg_sess_list,
950                                         pre_unreg_sess_list_entry) {
951                                         if (p->tgt_dev == ucmd->cmd->tgt_dev) {
952                                                 if (p->active)
953                                                         found = p;
954                                                 break;
955                                         }
956                                 }
957                                 if (found == NULL) {
958                                         TRACE_MGMT_DBG("No pre unreg sess "
959                                                 "active (ucmd %p)", ucmd);
960                                         break;
961                                 } else {
962                                         TRACE_MGMT_DBG("Pre unreg sess %p "
963                                                 "active (ucmd %p)", found, ucmd);
964                                 }
965                         }
966                         TRACE(TRACE_MGMT, "Mgmt cmd active, returning BUSY for "
967                                 "ucmd %p", ucmd);
968                         dev_user_unjam_cmd(ucmd, 1, &flags);
969                         spin_unlock_irqrestore(&dev->cmd_lists.cmd_list_lock, flags);
970                         goto out;
971                 }
972         }
973
974         if (unlikely(ucmd->state == UCMD_STATE_TM_EXECING) ||
975             unlikely(ucmd->state == UCMD_STATE_ATTACH_SESS) ||
976             unlikely(ucmd->state == UCMD_STATE_DETACH_SESS)) {
977                 if (dev->prio_queue_type == SCST_USER_PRIO_QUEUE_SEPARATE) {
978                         TRACE_MGMT_DBG("Adding mgmt ucmd %p to prio ready cmd "
979                                 "list", ucmd);
980                         list_add_tail(&ucmd->ready_cmd_list_entry,
981                                 &dev->prio_ready_cmd_list);
982                         wake_up(&dev->prio_cmd_list_waitQ);
983                         do_wake = 0;
984                 } else {
985                         TRACE_MGMT_DBG("Adding mgmt ucmd %p to ready cmd "
986                                 "list", ucmd);
987                         list_add_tail(&ucmd->ready_cmd_list_entry,
988                                 &dev->ready_cmd_list);
989                         do_wake = 1;
990                 }
991         } else if ((ucmd->cmd != NULL) &&
992             unlikely((ucmd->cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE))) {
993                 TRACE_DBG("Adding ucmd %p to head ready cmd list", ucmd);
994                 list_add(&ucmd->ready_cmd_list_entry, &dev->ready_cmd_list);
995         } else {
996                 TRACE_DBG("Adding ucmd %p to ready cmd list", ucmd);
997                 list_add_tail(&ucmd->ready_cmd_list_entry, &dev->ready_cmd_list);
998         }
999
1000         if (do_wake) {
1001                 TRACE_DBG("Waking up dev %p", dev);
1002                 wake_up(&dev->cmd_lists.cmd_list_waitQ);
1003         }
1004
1005         spin_unlock_irqrestore(&dev->cmd_lists.cmd_list_lock, flags);
1006
1007 out:
1008         TRACE_EXIT();
1009         return;
1010 }
1011
1012 static int dev_user_map_buf(struct scst_user_cmd *ucmd, unsigned long ubuff,
1013         int num_pg)
1014 {
1015         int res = 0, rc;
1016         int i;
1017
1018         TRACE_ENTRY();
1019
1020         if (unlikely(ubuff == 0))
1021                 goto out_nomem;
1022
1023         sBUG_ON(ucmd->data_pages != NULL);
1024
1025         ucmd->num_data_pages = num_pg;
1026
1027         ucmd->data_pages = kzalloc(sizeof(*ucmd->data_pages)*ucmd->num_data_pages,
1028                 GFP_KERNEL);
1029         if (ucmd->data_pages == NULL) {
1030                 TRACE(TRACE_OUT_OF_MEM, "Unable to allocate data_pages array "
1031                         "(num_data_pages=%d)", ucmd->num_data_pages);
1032                 res = -ENOMEM;
1033                 goto out_nomem;
1034         }
1035
1036         TRACE_MEM("Mapping buffer (ucmd %p, ubuff %lx, ucmd->num_data_pages %d, "
1037                 "first_page_offset %d, len %d)", ucmd, ubuff,
1038                 ucmd->num_data_pages, (int)(ubuff & ~PAGE_MASK),
1039                 ucmd->cmd->bufflen);
1040
1041         down_read(&current->mm->mmap_sem);
1042         rc = get_user_pages(current, current->mm, ubuff, ucmd->num_data_pages,
1043                 1/*writable*/, 0/*don't force*/, ucmd->data_pages, NULL);
1044         up_read(&current->mm->mmap_sem);
1045
1046         /* get_user_pages() flushes dcache */
1047
1048         if (rc < ucmd->num_data_pages)
1049                 goto out_unmap;
1050
1051         ucmd->ubuff = ubuff;
1052         ucmd->first_page_offset = (ubuff & ~PAGE_MASK);
1053
1054 out:
1055         TRACE_EXIT_RES(res);
1056         return res;
1057
1058 out_nomem:
1059         scst_set_busy(ucmd->cmd);
1060         /* go through */
1061
1062 out_err:
1063         ucmd->cmd->state = SCST_CMD_STATE_PRE_XMIT_RESP;
1064         goto out;
1065
1066 out_unmap:
1067         PRINT_ERROR("Failed to get %d user pages (rc %d)",
1068                 ucmd->num_data_pages, rc);
1069         if (rc > 0) {
1070                 for(i = 0; i < rc; i++)
1071                         page_cache_release(ucmd->data_pages[i]);
1072         }
1073         kfree(ucmd->data_pages);
1074         ucmd->data_pages = NULL;
1075         res = -EFAULT;
1076         scst_set_cmd_error(ucmd->cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
1077         goto out_err;
1078 }
1079
1080 static int dev_user_process_reply_alloc(struct scst_user_cmd *ucmd,
1081         struct scst_user_reply_cmd *reply)
1082 {
1083         int res = 0;
1084         struct scst_cmd *cmd = ucmd->cmd;
1085
1086         TRACE_ENTRY();
1087
1088         TRACE_DBG("ucmd %p, pbuf %Lx", ucmd, reply->alloc_reply.pbuf);
1089
1090         if (likely(reply->alloc_reply.pbuf != 0)) {
1091                 int pages;
1092                 if (ucmd->buff_cached) {
1093                         if (unlikely((reply->alloc_reply.pbuf & ~PAGE_MASK) != 0)) {
1094                                 PRINT_ERROR("Supplied pbuf %Lx isn't "
1095                                         "page aligned", reply->alloc_reply.pbuf);
1096                                 goto out_hwerr;
1097                         }
1098                         pages = cmd->sg_cnt;
1099                 } else
1100                         pages = calc_num_pg(reply->alloc_reply.pbuf, cmd->bufflen);
1101                 res = dev_user_map_buf(ucmd, reply->alloc_reply.pbuf, pages);
1102         } else {
1103                 scst_set_busy(ucmd->cmd);
1104                 ucmd->cmd->state = SCST_CMD_STATE_PRE_XMIT_RESP;
1105         }
1106
1107 out_process:
1108         scst_process_active_cmd(cmd, SCST_CONTEXT_DIRECT);
1109
1110         TRACE_EXIT_RES(res);
1111         return res;
1112
1113 out_hwerr:
1114         scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
1115         res = -EINVAL;
1116         goto out_process;
1117 }
1118
1119 static int dev_user_process_reply_parse(struct scst_user_cmd *ucmd,
1120         struct scst_user_reply_cmd *reply)
1121 {
1122         int res = 0;
1123         struct scst_user_scsi_cmd_reply_parse *preply = 
1124                 &reply->parse_reply;
1125         struct scst_cmd *cmd = ucmd->cmd;
1126
1127         TRACE_ENTRY();
1128
1129         if (unlikely(preply->queue_type > SCST_CMD_QUEUE_ACA))
1130                 goto out_inval;
1131
1132         if (unlikely((preply->data_direction != SCST_DATA_WRITE) &&
1133                      (preply->data_direction != SCST_DATA_READ) &&
1134                      (preply->data_direction != SCST_DATA_NONE)))
1135                 goto out_inval;
1136
1137         if (unlikely((preply->data_direction != SCST_DATA_NONE) &&
1138                      (preply->bufflen == 0)))
1139                 goto out_inval;
1140
1141         if (unlikely((preply->bufflen < 0) || (preply->data_len < 0)))
1142                 goto out_inval;
1143
1144         TRACE_DBG("ucmd %p, queue_type %x, data_direction, %x, bufflen %d, "
1145                 "data_len %d, pbuf %Lx", ucmd, preply->queue_type,
1146                 preply->data_direction, preply->bufflen, preply->data_len,
1147                 reply->alloc_reply.pbuf);
1148
1149         cmd->queue_type = preply->queue_type;
1150         cmd->data_direction = preply->data_direction;
1151         cmd->bufflen = preply->bufflen;
1152         cmd->data_len = preply->data_len;
1153
1154 out_process:
1155         scst_process_active_cmd(cmd, SCST_CONTEXT_DIRECT);
1156
1157         TRACE_EXIT_RES(res);
1158         return res;
1159
1160 out_inval:
1161         PRINT_ERROR("%s", "Invalid parse_reply parameter(s)");
1162         scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
1163         res = -EINVAL;
1164         goto out_process;
1165 }
1166
1167 static int dev_user_process_reply_on_free(struct scst_user_cmd *ucmd)
1168 {
1169         int res = 0;
1170
1171         TRACE_ENTRY();
1172
1173         TRACE_DBG("ON FREE ucmd %p", ucmd);
1174
1175         dev_user_free_sgv(ucmd);
1176         ucmd_put(ucmd);
1177
1178         TRACE_EXIT_RES(res);
1179         return res;
1180 }
1181
1182 static int dev_user_process_reply_on_cache_free(struct scst_user_cmd *ucmd)
1183 {
1184         int res = 0;
1185
1186         TRACE_ENTRY();
1187
1188         TRACE_DBG("ON CACHE FREE ucmd %p", ucmd);
1189
1190         ucmd_put(ucmd);
1191
1192         TRACE_EXIT_RES(res);
1193         return res;
1194 }
1195
1196 static int dev_user_process_reply_exec(struct scst_user_cmd *ucmd,
1197         struct scst_user_reply_cmd *reply)
1198 {
1199         int res = 0;
1200         struct scst_user_scsi_cmd_reply_exec *ereply =
1201                 &reply->exec_reply;
1202         struct scst_cmd *cmd = ucmd->cmd;
1203
1204         TRACE_ENTRY();
1205
1206         if (ereply->reply_type == SCST_EXEC_REPLY_COMPLETED) {
1207                 if (ucmd->background_exec) {
1208                         TRACE_DBG("Background ucmd %p finished", ucmd);
1209                         ucmd_put(ucmd);
1210                         goto out;
1211                 }
1212                 if (unlikely(ereply->resp_data_len > cmd->bufflen))
1213                         goto out_inval;
1214                 if (unlikely((cmd->data_direction != SCST_DATA_READ) &&
1215                              (ereply->resp_data_len != 0)))
1216                         goto out_inval;
1217         } else if (ereply->reply_type == SCST_EXEC_REPLY_BACKGROUND) {
1218                 if (unlikely(ucmd->background_exec))
1219                         goto out_inval;
1220                 if (unlikely((cmd->data_direction == SCST_DATA_READ) ||
1221                              (cmd->resp_data_len != 0)))
1222                         goto out_inval;
1223                 ucmd_get(ucmd, 1);
1224                 ucmd->background_exec = 1;
1225                 TRACE_DBG("Background ucmd %p", ucmd);
1226                 goto out_compl;
1227         } else
1228                 goto out_inval;
1229
1230         TRACE_DBG("ucmd %p, status %d, resp_data_len %d", ucmd,
1231                 ereply->status, ereply->resp_data_len);
1232
1233          if (ereply->resp_data_len != 0) {
1234                 if (ucmd->ubuff == 0) {
1235                         int pages, rc;
1236                         if (unlikely(ereply->pbuf == 0))
1237                                 goto out_busy;
1238                         if (ucmd->buff_cached) {
1239                                 if (unlikely((ereply->pbuf & ~PAGE_MASK) != 0)) {
1240                                         PRINT_ERROR("Supplied pbuf %Lx isn't "
1241                                                 "page aligned", ereply->pbuf);
1242                                         goto out_hwerr;
1243                                 }
1244                                 pages = cmd->sg_cnt;
1245                         } else
1246                                 pages = calc_num_pg(ereply->pbuf, cmd->bufflen);
1247                         rc = dev_user_map_buf(ucmd, ereply->pbuf, pages);
1248                         if ((rc != 0) || (ucmd->ubuff == 0))
1249                                 goto out_compl;
1250
1251                         rc = dev_user_alloc_sg(ucmd, ucmd->buff_cached);
1252                         if (unlikely(rc != 0))
1253                                 goto out_busy;
1254                 } else
1255                         dev_user_flush_dcache(ucmd);
1256                 cmd->may_need_dma_sync = 1;
1257                 scst_set_resp_data_len(cmd, ereply->resp_data_len);
1258         } else if (cmd->resp_data_len != ereply->resp_data_len) {
1259                 if (ucmd->ubuff == 0)
1260                         cmd->resp_data_len = ereply->resp_data_len;
1261                 else
1262                         scst_set_resp_data_len(cmd, ereply->resp_data_len);
1263         }
1264
1265         cmd->status = ereply->status;
1266         if (ereply->sense_len != 0) {
1267                 res = copy_from_user(cmd->sense_buffer,
1268                         (void*)(unsigned long)ereply->psense_buffer,
1269                         min(sizeof(cmd->sense_buffer),
1270                                 (unsigned int)ereply->sense_len));
1271                 if (res < 0) {
1272                         PRINT_ERROR("%s", "Unable to get sense data");
1273                         goto out_hwerr_res_set;
1274                 }
1275         }
1276
1277 out_compl:
1278         cmd->completed = 1;
1279         cmd->scst_cmd_done(cmd, SCST_CMD_STATE_DEFAULT);
1280         /* !! At this point cmd can be already freed !! */
1281
1282 out:
1283         TRACE_EXIT_RES(res);
1284         return res;
1285
1286 out_inval:
1287         PRINT_ERROR("%s", "Invalid exec_reply parameter(s)");
1288
1289 out_hwerr:
1290         res = -EINVAL;
1291
1292 out_hwerr_res_set:
1293         if (ucmd->background_exec) {
1294                 ucmd_put(ucmd);
1295                 goto out;
1296         } else {
1297                 scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
1298                 goto out_compl;
1299         }
1300
1301 out_busy:
1302         scst_set_busy(cmd);
1303         goto out_compl;
1304 }
1305
1306 static int dev_user_process_reply(struct scst_user_dev *dev,
1307         struct scst_user_reply_cmd *reply)
1308 {
1309         int res = 0;
1310         struct scst_user_cmd *ucmd;
1311         int state;
1312
1313         TRACE_ENTRY();
1314
1315         spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
1316
1317         ucmd = __ucmd_find_hash(dev, reply->cmd_h);
1318         if (ucmd == NULL) {
1319                 TRACE_MGMT_DBG("cmd_h %d not found", reply->cmd_h);
1320                 res = -ESRCH;
1321                 goto out_unlock;
1322         }
1323
1324         if (ucmd->background_exec) {
1325                 state = UCMD_STATE_EXECING;
1326                 goto unlock_process;
1327         }
1328
1329         if (unlikely(!(ucmd->state & UCMD_STATE_SENT_MASK))) {
1330                 if (ucmd->state & UCMD_STATE_JAMMED_MASK) {
1331                         TRACE_MGMT_DBG("Reply on jammed ucmd %p, ignoring",
1332                                 ucmd);
1333                 } else {
1334                         TRACE_MGMT_DBG("Ucmd %p isn't in the sent to user "
1335                                 "state %x", ucmd, ucmd->state);
1336                         res = -EBUSY;
1337                 }
1338                 goto out_unlock;
1339         }
1340
1341         if (unlikely(reply->subcode != ucmd->user_cmd.subcode))
1342                 goto out_wrong_state;
1343
1344         if (unlikely(_IOC_NR(reply->subcode) !=
1345                         (ucmd->state & ~UCMD_STATE_SENT_MASK)))
1346                 goto out_wrong_state;
1347
1348         ucmd->state &= ~UCMD_STATE_SENT_MASK;
1349         state = ucmd->state;
1350         ucmd->state |= UCMD_STATE_RECV_MASK;
1351
1352 unlock_process:
1353         spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1354
1355         switch(state) {
1356         case UCMD_STATE_PARSING:
1357                 res = dev_user_process_reply_parse(ucmd, reply);
1358                 break;
1359
1360         case UCMD_STATE_BUF_ALLOCING:
1361                 res = dev_user_process_reply_alloc(ucmd, reply);
1362                 break;
1363
1364         case UCMD_STATE_EXECING:
1365                 res = dev_user_process_reply_exec(ucmd, reply);
1366                 break;
1367         
1368         case UCMD_STATE_ON_FREEING:
1369                 res = dev_user_process_reply_on_free(ucmd);
1370                 break;
1371
1372         case UCMD_STATE_ON_CACHE_FREEING:
1373                 res = dev_user_process_reply_on_cache_free(ucmd);
1374                 break;
1375
1376         case UCMD_STATE_TM_EXECING:
1377                 res = dev_user_process_reply_tm_exec(ucmd, reply->result);
1378                 break;
1379
1380         case UCMD_STATE_ATTACH_SESS:
1381         case UCMD_STATE_DETACH_SESS:
1382                 res = dev_user_process_reply_sess(ucmd, reply->result);
1383                 break;
1384
1385         default:
1386                 sBUG();
1387                 break;
1388         }
1389 out:
1390         TRACE_EXIT_RES(res);
1391         return res;
1392
1393 out_wrong_state:
1394         PRINT_ERROR("Command's %p subcode %x doesn't match internal "
1395                 "command's state %x or reply->subcode (%x) != ucmd->subcode "
1396                 "(%x)", ucmd, _IOC_NR(reply->subcode), ucmd->state,
1397                 reply->subcode, ucmd->user_cmd.subcode);
1398         res = -EINVAL;
1399         dev_user_unjam_cmd(ucmd, 0, NULL);
1400
1401 out_unlock:
1402         spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1403         goto out;
1404 }
1405
1406 static int dev_user_reply_cmd(struct file *file, unsigned long arg)
1407 {
1408         int res = 0;
1409         struct scst_user_dev *dev;
1410         struct scst_user_reply_cmd *reply;
1411
1412         TRACE_ENTRY();
1413
1414         mutex_lock(&dev_priv_mutex);
1415         dev = (struct scst_user_dev*)file->private_data;
1416         res = dev_user_check_reg(dev);
1417         if (res != 0) {
1418                 mutex_unlock(&dev_priv_mutex);
1419                 goto out;
1420         }
1421         down_read(&dev->dev_rwsem);
1422         mutex_unlock(&dev_priv_mutex);
1423
1424         reply = kzalloc(sizeof(*reply), GFP_KERNEL);
1425         if (reply == NULL) {
1426                 res = -ENOMEM;
1427                 goto out_up;
1428         }
1429
1430         res = copy_from_user(reply, (void*)arg, sizeof(*reply));
1431         if (res < 0)
1432                 goto out_free;
1433
1434         TRACE_BUFFER("Reply", reply, sizeof(*reply));
1435
1436         res = dev_user_process_reply(dev, reply);
1437         if (res < 0)
1438                 goto out_free;
1439
1440 out_free:
1441         kfree(reply);
1442
1443 out_up:
1444         up_read(&dev->dev_rwsem);
1445
1446 out:
1447         TRACE_EXIT_RES(res);
1448         return res;
1449 }
1450
1451 static int dev_user_process_scst_commands(struct scst_user_dev *dev)
1452 {
1453         int res = 0;
1454
1455         TRACE_ENTRY();
1456
1457         while (!list_empty(&dev->cmd_lists.active_cmd_list)) {
1458                 struct scst_cmd *cmd = list_entry(
1459                         dev->cmd_lists.active_cmd_list.next, typeof(*cmd),
1460                         cmd_list_entry);
1461                 TRACE_DBG("Deleting cmd %p from active cmd list", cmd);
1462                 list_del(&cmd->cmd_list_entry);
1463                 spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1464                 scst_process_active_cmd(cmd, SCST_CONTEXT_DIRECT);
1465                 spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
1466                 res++;
1467         }
1468
1469         TRACE_EXIT_RES(res);
1470         return res;
1471 }
1472
1473 /* Called under cmd_lists.cmd_list_lock and IRQ off */
1474 struct scst_user_cmd *__dev_user_get_next_cmd(struct list_head *cmd_list)
1475 {
1476         struct scst_user_cmd *u;
1477
1478 again:
1479         u = NULL;
1480         if (!list_empty(cmd_list)) {
1481                 u = list_entry(cmd_list->next, typeof(*u), ready_cmd_list_entry);
1482                 TRACE_DBG("Found ready ucmd %p", u);
1483                 list_del(&u->ready_cmd_list_entry);
1484                 EXTRACHECKS_BUG_ON(u->state & UCMD_STATE_JAMMED_MASK);
1485                 if (u->cmd != NULL) {
1486                         if (u->state == UCMD_STATE_EXECING) {
1487                                 struct scst_user_dev *dev = u->dev;
1488                                 int rc;
1489                                 spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1490                                 rc = scst_check_local_events(u->cmd);
1491                                 if (unlikely(rc != 0)) {
1492                                         u->cmd->scst_cmd_done(u->cmd,
1493                                                 SCST_CMD_STATE_DEFAULT);
1494                                         /* 
1495                                          * !! At this point cmd & u can be !!
1496                                          * !! already freed                !! 
1497                                          */
1498                                         spin_lock_irq(
1499                                                 &dev->cmd_lists.cmd_list_lock);
1500                                         goto again;
1501                                 }
1502                                 /*
1503                                  * There is no real need to lock again here, but
1504                                  * let's do it for simplicity.
1505                                  */
1506                                 spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
1507                         } else if (unlikely(test_bit(SCST_CMD_ABORTED,
1508                                         &u->cmd->cmd_flags))) {
1509                                 switch(u->state) {
1510                                 case UCMD_STATE_PARSING:
1511                                 case UCMD_STATE_BUF_ALLOCING:
1512                                         TRACE_MGMT_DBG("Aborting ucmd %p", u);
1513                                         dev_user_unjam_cmd(u, 0, NULL);
1514                                         goto again;
1515                                 case UCMD_STATE_EXECING:
1516                                         EXTRACHECKS_BUG_ON(1);
1517                                 }
1518                         }
1519                 }
1520                 u->state |= UCMD_STATE_SENT_MASK;
1521         }
1522         return u;
1523 }
1524
1525 static inline int test_cmd_lists(struct scst_user_dev *dev)
1526 {
1527         int res = !list_empty(&dev->cmd_lists.active_cmd_list) ||
1528                   !list_empty(&dev->ready_cmd_list) ||
1529                   !dev->blocking || dev->cleanup_done ||
1530                   signal_pending(current);
1531         return res;
1532 }
1533
1534 /* Called under cmd_lists.cmd_list_lock and IRQ off */
1535 static int dev_user_get_next_cmd(struct scst_user_dev *dev,
1536         struct scst_user_cmd **ucmd)
1537 {
1538         int res = 0;
1539         wait_queue_t wait;
1540
1541         TRACE_ENTRY();
1542
1543         init_waitqueue_entry(&wait, current);
1544
1545         while(1) {
1546                 if (!test_cmd_lists(dev)) {
1547                         add_wait_queue_exclusive(&dev->cmd_lists.cmd_list_waitQ,
1548                                 &wait);
1549                         for (;;) {
1550                                 set_current_state(TASK_INTERRUPTIBLE);
1551                                 if (test_cmd_lists(dev))
1552                                         break;
1553                                 spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1554                                 schedule();
1555                                 spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
1556                         }
1557                         set_current_state(TASK_RUNNING);
1558                         remove_wait_queue(&dev->cmd_lists.cmd_list_waitQ,
1559                                 &wait);
1560                 }
1561
1562                 dev_user_process_scst_commands(dev);
1563
1564                 *ucmd = __dev_user_get_next_cmd(&dev->ready_cmd_list);
1565                 if (*ucmd != NULL)
1566                         break;
1567
1568                 if (!dev->blocking || dev->cleanup_done) {
1569                         res = -EAGAIN;
1570                         TRACE_DBG("No ready commands, returning %d", res);
1571                         break;
1572                 }
1573
1574                 if (signal_pending(current)) {
1575                         res = -EINTR;
1576                         TRACE_DBG("Signal pending, returning %d", res);
1577                         break;
1578                 }
1579         }
1580
1581         TRACE_EXIT_RES(res);
1582         return res;
1583 }
1584
1585 static inline int test_prio_cmd_list(struct scst_user_dev *dev)
1586 {
1587         /*
1588          * Prio queue is always blocking, because poll() seems doesn't
1589          * support, when different threads wait with different events
1590          * mask. Only one thread is woken up on each event and if it
1591          * isn't interested in such events, another (interested) one
1592          * will not be woken up. Does't know if it's a bug or feature.
1593          */
1594         int res = !list_empty(&dev->prio_ready_cmd_list) ||
1595                   dev->cleaning || dev->cleanup_done ||
1596                   signal_pending(current);
1597         return res;
1598 }
1599
1600 /* Called under cmd_lists.cmd_list_lock and IRQ off */
1601 static int dev_user_get_next_prio_cmd(struct scst_user_dev *dev,
1602         struct scst_user_cmd **ucmd)
1603 {
1604         int res = 0;
1605         wait_queue_t wait;
1606
1607         TRACE_ENTRY();
1608
1609         init_waitqueue_entry(&wait, current);
1610
1611         while(1) {
1612                 if (!test_prio_cmd_list(dev)) {
1613                         add_wait_queue_exclusive(&dev->prio_cmd_list_waitQ,
1614                                 &wait);
1615                         for (;;) {
1616                                 set_current_state(TASK_INTERRUPTIBLE);
1617                                 if (test_prio_cmd_list(dev))
1618                                         break;
1619                                 spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1620                                 schedule();
1621                                 spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
1622                         }
1623                         set_current_state(TASK_RUNNING);
1624                         remove_wait_queue(&dev->prio_cmd_list_waitQ, &wait);
1625                 }
1626
1627                 *ucmd = __dev_user_get_next_cmd(&dev->prio_ready_cmd_list);
1628                 if (*ucmd != NULL)
1629                         break;
1630
1631                 if (dev->cleaning || dev->cleanup_done) {
1632                         res = -EAGAIN;
1633                         TRACE_DBG("No ready commands, returning %d", res);
1634                         break;
1635                 }
1636
1637                 if (signal_pending(current)) {
1638                         res = -EINTR;
1639                         TRACE_DBG("Signal pending, returning %d", res);
1640                         break;
1641                 }
1642         }
1643
1644         TRACE_EXIT_RES(res);
1645         return res;
1646 }
1647
1648 static int dev_user_reply_get_cmd(struct file *file, unsigned long arg,
1649         int prio)
1650 {
1651         int res = 0;
1652         struct scst_user_dev *dev;
1653         struct scst_user_get_cmd *cmd;
1654         struct scst_user_reply_cmd *reply;
1655         struct scst_user_cmd *ucmd;
1656         uint64_t ureply;
1657
1658         TRACE_ENTRY();
1659
1660         mutex_lock(&dev_priv_mutex);
1661         dev = (struct scst_user_dev*)file->private_data;
1662         res = dev_user_check_reg(dev);
1663         if (res != 0) {
1664                 mutex_unlock(&dev_priv_mutex);
1665                 goto out;
1666         }
1667         down_read(&dev->dev_rwsem);
1668         mutex_unlock(&dev_priv_mutex);
1669
1670         res = copy_from_user(&ureply, (void*)arg, sizeof(ureply));
1671         if (res < 0)
1672                 goto out_up;
1673
1674         TRACE_DBG("ureply %Ld", ureply);
1675
1676         cmd = kzalloc(max(sizeof(*cmd), sizeof(*reply)), GFP_KERNEL);
1677         if (cmd == NULL) {
1678                 res = -ENOMEM;
1679                 goto out_up;
1680         }
1681
1682         if (ureply != 0) {
1683                 unsigned long u = (unsigned long)ureply;
1684                 reply = (struct scst_user_reply_cmd*)cmd;
1685                 res = copy_from_user(reply, (void*)u, sizeof(*reply));
1686                 if (res < 0)
1687                         goto out_free;
1688
1689                 TRACE_BUFFER("Reply", reply, sizeof(*reply));
1690
1691                 res = dev_user_process_reply(dev, reply);
1692                 if (res < 0)
1693                         goto out_free;
1694         }
1695
1696         spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
1697         if (prio && (dev->prio_queue_type == SCST_USER_PRIO_QUEUE_SEPARATE))
1698                 res = dev_user_get_next_prio_cmd(dev, &ucmd);
1699         else
1700                 res = dev_user_get_next_cmd(dev, &ucmd);
1701         if (res == 0) {
1702                 *cmd = ucmd->user_cmd;
1703                 spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1704                 TRACE_BUFFER("UCMD", cmd, sizeof(*cmd));
1705                 res = copy_to_user((void*)arg, cmd, sizeof(*cmd));
1706         } else
1707                 spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1708
1709 out_free:
1710         kfree(cmd);
1711
1712 out_up:
1713         up_read(&dev->dev_rwsem);
1714
1715 out:
1716         TRACE_EXIT_RES(res);
1717         return res;
1718 }
1719
1720 static long dev_user_ioctl(struct file *file, unsigned int cmd,
1721         unsigned long arg)
1722 {
1723         long res;
1724
1725         TRACE_ENTRY();
1726
1727         switch (cmd) {
1728         case SCST_USER_REPLY_AND_GET_CMD:
1729                 TRACE_DBG("%s", "REPLY_AND_GET_CMD");
1730                 res = dev_user_reply_get_cmd(file, arg, 0);
1731                 break;
1732
1733         case SCST_USER_REPLY_CMD:
1734                 TRACE_DBG("%s", "REPLY_CMD");
1735                 res = dev_user_reply_cmd(file, arg);
1736                 break;
1737
1738         case SCST_USER_REPLY_AND_GET_PRIO_CMD:
1739                 TRACE_DBG("%s", "REPLY_AND_GET_PRIO_CMD");
1740                 res = dev_user_reply_get_cmd(file, arg, 1);
1741                 break;
1742
1743         case SCST_USER_REGISTER_DEVICE:
1744         {
1745                 struct scst_user_dev_desc *dev_desc;
1746                 TRACE_DBG("%s", "REGISTER_DEVICE");
1747                 dev_desc = kmalloc(sizeof(*dev_desc), GFP_KERNEL);
1748                 if (dev_desc == NULL) {
1749                         res = -ENOMEM;
1750                         goto out;
1751                 }
1752                 res = copy_from_user(dev_desc, (void*)arg, sizeof(*dev_desc));
1753                 if (res < 0) {
1754                         kfree(dev_desc);
1755                         goto out;
1756                 }
1757                 TRACE_BUFFER("dev_desc", dev_desc, sizeof(*dev_desc));
1758                 res = dev_user_register_dev(file, dev_desc);
1759                 kfree(dev_desc);
1760                 break;
1761         }
1762
1763         case SCST_USER_SET_OPTIONS:
1764         {
1765                 struct scst_user_opt opt;
1766                 TRACE_DBG("%s", "SET_OPTIONS");
1767                 res = copy_from_user(&opt, (void*)arg, sizeof(opt));
1768                 if (res < 0)
1769                         goto out;
1770                 TRACE_BUFFER("opt", &opt, sizeof(opt));
1771                 res = dev_user_set_opt(file, &opt);
1772                 break;
1773         }
1774
1775         case SCST_USER_GET_OPTIONS:
1776                 TRACE_DBG("%s", "GET_OPTIONS");
1777                 res = dev_user_get_opt(file, (void*)arg);
1778                 break;
1779
1780         default:
1781                 PRINT_ERROR("Invalid ioctl cmd %x", cmd);
1782                 res = -EINVAL;
1783                 goto out;
1784         }
1785
1786 out:
1787         TRACE_EXIT_RES(res);
1788         return res;
1789 }
1790
1791 static unsigned int dev_user_poll(struct file *file, poll_table *wait)
1792 {
1793         int res = 0;
1794         struct scst_user_dev *dev;
1795
1796         TRACE_ENTRY();
1797
1798         mutex_lock(&dev_priv_mutex);
1799         dev = (struct scst_user_dev*)file->private_data;
1800         res = dev_user_check_reg(dev);
1801         if (res != 0) {
1802                 mutex_unlock(&dev_priv_mutex);
1803                 goto out;
1804         }
1805         down_read(&dev->dev_rwsem);
1806         mutex_unlock(&dev_priv_mutex);
1807
1808         spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
1809
1810         if (!list_empty(&dev->ready_cmd_list) ||
1811             !list_empty(&dev->cmd_lists.active_cmd_list)) {
1812                 res |= POLLIN | POLLRDNORM;
1813                 goto out_unlock;
1814         }
1815
1816         spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1817
1818         TRACE_DBG("Before poll_wait() (dev %p)", dev);
1819         poll_wait(file, &dev->cmd_lists.cmd_list_waitQ, wait);
1820         TRACE_DBG("After poll_wait() (dev %p)", dev);
1821
1822         spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
1823
1824         if (!list_empty(&dev->ready_cmd_list) ||
1825             !list_empty(&dev->cmd_lists.active_cmd_list)) {
1826                 res |= POLLIN | POLLRDNORM;
1827                 goto out_unlock;
1828         }
1829
1830 out_unlock:
1831         spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1832
1833         up_read(&dev->dev_rwsem);
1834
1835 out:
1836         TRACE_EXIT_HRES(res);
1837         return res;
1838 }
1839
1840 /*
1841  * Called under cmd_lists.cmd_list_lock, but can drop it inside, then reaquire.
1842  */
1843 static void dev_user_unjam_cmd(struct scst_user_cmd *ucmd, int busy,
1844         unsigned long *flags)
1845 {
1846         int state = ucmd->state & ~UCMD_STATE_MASK;
1847         struct scst_user_dev *dev = ucmd->dev;
1848
1849         TRACE_ENTRY();
1850
1851         if (ucmd->state & UCMD_STATE_JAMMED_MASK)
1852                 goto out;
1853
1854         TRACE_MGMT_DBG("Unjamming ucmd %p (busy %d, state %x)", ucmd, busy,
1855                 ucmd->state);
1856
1857         ucmd->state = state | UCMD_STATE_JAMMED_MASK;
1858
1859         switch(state) {
1860         case UCMD_STATE_PARSING:
1861         case UCMD_STATE_BUF_ALLOCING:
1862                 if (test_bit(SCST_CMD_ABORTED, &ucmd->cmd->cmd_flags))
1863                         ucmd->aborted = 1;
1864                 else {
1865                         if (busy)
1866                                 scst_set_busy(ucmd->cmd);
1867                         else
1868                                 scst_set_cmd_error(ucmd->cmd,
1869                                         SCST_LOAD_SENSE(scst_sense_hardw_error));
1870                 }
1871                 TRACE_MGMT_DBG("Adding ucmd %p to active list", ucmd);
1872                 list_add(&ucmd->cmd->cmd_list_entry,
1873                         &ucmd->cmd->cmd_lists->active_cmd_list);
1874                 wake_up(&ucmd->dev->cmd_lists.cmd_list_waitQ);
1875                 break;
1876
1877         case UCMD_STATE_EXECING:
1878                 if (flags != NULL)
1879                         spin_unlock_irqrestore(&dev->cmd_lists.cmd_list_lock, *flags);
1880                 else
1881                         spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1882                 
1883                 TRACE_MGMT_DBG("EXEC: unjamming ucmd %p", ucmd);
1884
1885                 if (test_bit(SCST_CMD_ABORTED,  &ucmd->cmd->cmd_flags))
1886                         ucmd->aborted = 1;
1887                 else {
1888                         if (busy)
1889                                 scst_set_busy(ucmd->cmd);
1890                         else
1891                                 scst_set_cmd_error(ucmd->cmd,
1892                                         SCST_LOAD_SENSE(scst_sense_hardw_error));
1893                 }
1894
1895                 ucmd->cmd->scst_cmd_done(ucmd->cmd, SCST_CMD_STATE_DEFAULT);
1896                 /* !! At this point cmd ans ucmd can be already freed !! */
1897
1898                 if (flags != NULL)
1899                         spin_lock_irqsave(&dev->cmd_lists.cmd_list_lock, *flags);
1900                 else
1901                         spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
1902                 break;
1903
1904         case UCMD_STATE_ON_FREEING:
1905         case UCMD_STATE_ON_CACHE_FREEING:
1906         case UCMD_STATE_TM_EXECING:
1907         case UCMD_STATE_ATTACH_SESS:
1908         case UCMD_STATE_DETACH_SESS:
1909         {
1910                 if (flags != NULL)
1911                         spin_unlock_irqrestore(&dev->cmd_lists.cmd_list_lock, *flags);
1912                 else
1913                         spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1914
1915                 switch(state) {
1916                 case UCMD_STATE_ON_FREEING:
1917                         dev_user_process_reply_on_free(ucmd);
1918                         break;
1919
1920                 case UCMD_STATE_ON_CACHE_FREEING:
1921                         dev_user_process_reply_on_cache_free(ucmd);
1922                         break;
1923
1924                 case UCMD_STATE_TM_EXECING:
1925                         dev_user_process_reply_tm_exec(ucmd, SCST_MGMT_STATUS_FAILED);
1926                         break;
1927
1928                 case UCMD_STATE_ATTACH_SESS:
1929                 case UCMD_STATE_DETACH_SESS:
1930                         dev_user_process_reply_sess(ucmd, -EFAULT);
1931                         break;
1932                 }
1933
1934                 if (flags != NULL)
1935                         spin_lock_irqsave(&dev->cmd_lists.cmd_list_lock, *flags);
1936                 else
1937                         spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
1938                 break;
1939         }
1940
1941         default:
1942                 PRINT_ERROR("Wrong ucmd state %x", state);
1943                 sBUG();
1944                 break;
1945         }
1946
1947 out:
1948         TRACE_EXIT();
1949         return;
1950 }
1951
1952 static int __unjam_check_tgt_dev(struct scst_user_cmd *ucmd, int state,
1953         struct scst_tgt_dev *tgt_dev)
1954 {
1955         int res = 0;
1956
1957         if (ucmd->cmd == NULL)
1958                 goto out;
1959
1960         if (ucmd->cmd->tgt_dev != tgt_dev)
1961                 goto out;
1962
1963         switch(state & ~UCMD_STATE_MASK) {
1964         case UCMD_STATE_PARSING:
1965         case UCMD_STATE_BUF_ALLOCING:
1966         case UCMD_STATE_EXECING:
1967                 break;
1968         default:
1969                 goto out;
1970         }
1971
1972         res = 1;
1973 out:
1974         return res;
1975 }
1976
1977 static int __unjam_check_tm(struct scst_user_cmd *ucmd, int state)
1978 {
1979         int res = 0;
1980
1981         switch(state & ~UCMD_STATE_MASK) {
1982         case UCMD_STATE_PARSING:
1983         case UCMD_STATE_BUF_ALLOCING:
1984         case UCMD_STATE_EXECING:
1985                 if ((ucmd->cmd != NULL) &&
1986                     (!test_bit(SCST_CMD_ABORTED,
1987                                 &ucmd->cmd->cmd_flags)))
1988                         goto out;
1989                 break;
1990         default:
1991                 goto out;
1992         }
1993
1994         res = 1;
1995 out:
1996         return res;
1997 }
1998
1999 static void dev_user_unjam_dev(struct scst_user_dev *dev, int tm,
2000         struct scst_tgt_dev *tgt_dev)
2001 {
2002         int i;
2003         unsigned long flags;
2004         struct scst_user_cmd *ucmd;
2005
2006         TRACE_ENTRY();
2007
2008         TRACE_MGMT_DBG("Unjamming dev %p", dev);
2009
2010         spin_lock_irqsave(&dev->cmd_lists.cmd_list_lock, flags);
2011
2012 repeat:
2013         for(i = 0; i < (int)ARRAY_SIZE(dev->ucmd_hash); i++) {
2014                 struct list_head *head = &dev->ucmd_hash[i];
2015                 list_for_each_entry(ucmd, head, hash_list_entry) {
2016                         TRACE_DBG("ALL: ucmd %p, state %x, scst_cmd %p",
2017                                 ucmd, ucmd->state, ucmd->cmd);
2018                         if (ucmd->state & UCMD_STATE_SENT_MASK) {
2019                                 int st = ucmd->state & ~UCMD_STATE_SENT_MASK;
2020                                 if (tgt_dev != NULL) {
2021                                         if (__unjam_check_tgt_dev(ucmd, st, 
2022                                                         tgt_dev) == 0)
2023                                                 continue;
2024                                 } else if (tm) {
2025                                         if (__unjam_check_tm(ucmd, st) == 0)
2026                                                 continue;
2027                                 }
2028                                 dev_user_unjam_cmd(ucmd, 0, &flags);
2029                                 goto repeat;
2030                         }
2031                 }
2032         }
2033
2034         if ((tgt_dev != NULL) || tm) {
2035                 list_for_each_entry(ucmd, &dev->ready_cmd_list,
2036                                 ready_cmd_list_entry) {
2037                         TRACE_DBG("READY: ucmd %p, state %x, scst_cmd %p",
2038                                 ucmd, ucmd->state, ucmd->cmd);
2039                         if (tgt_dev != NULL) {
2040                                 if (__unjam_check_tgt_dev(ucmd, ucmd->state,
2041                                                 tgt_dev) == 0)
2042                                         continue;
2043                         } else if (tm) {
2044                                 if (__unjam_check_tm(ucmd, ucmd->state) == 0)
2045                                         continue;
2046                         }
2047                         list_del(&ucmd->ready_cmd_list_entry);
2048                         dev_user_unjam_cmd(ucmd, 0, &flags);
2049                         goto repeat;
2050                 }
2051         }
2052
2053         if (dev_user_process_scst_commands(dev) != 0)
2054                 goto repeat;
2055
2056         spin_unlock_irqrestore(&dev->cmd_lists.cmd_list_lock, flags);
2057
2058         TRACE_EXIT();
2059         return;
2060 }
2061
2062 /**
2063  ** In order to deal with user space handler hangups we rely on remote
2064  ** initiators, which in case if a command doesn't respond for too long
2065  ** supposed to issue a task management command, so on that event we can
2066  ** "unjam" the command. In order to prevent TM command from stalling, we
2067  ** use a timer. In order to prevent too many queued TM commands, we
2068  ** enqueue only 2 of them, the first one with the requested TM function,
2069  ** the second - with TARGET_RESET as the most comprehensive function.
2070  **
2071  ** The only exception here is DETACH_SESS subcode, where there are no TM
2072  ** commands could be expected, so we need manually after a timeout "unjam"
2073  ** all the commands on the device.
2074  **
2075  ** We also don't queue >1 ATTACH_SESS commands and after timeout fail it.
2076  **/
2077
2078 static int dev_user_process_reply_tm_exec(struct scst_user_cmd *ucmd,
2079         int status)
2080 {
2081         int res = 0;
2082         unsigned long flags;
2083
2084         TRACE_ENTRY();
2085
2086         TRACE_MGMT_DBG("TM reply (ucmd %p, fn %d, status %d)", ucmd,
2087                 ucmd->user_cmd.tm_cmd.fn, status);
2088
2089         ucmd->result = status;
2090
2091         spin_lock_irqsave(&ucmd->dev->cmd_lists.cmd_list_lock, flags);
2092
2093         if (ucmd->internal_reset_tm) {
2094                 TRACE_MGMT_DBG("Internal TM ucmd %p finished", ucmd);
2095                 ucmd->dev->internal_reset_active = 0;
2096         } else {
2097                 TRACE_MGMT_DBG("TM ucmd %p finished", ucmd);
2098                 ucmd->dev->tm_cmd_active = 0;
2099         }
2100
2101         if (ucmd->cmpl != NULL)
2102                 complete_all(ucmd->cmpl);
2103
2104         spin_unlock_irqrestore(&ucmd->dev->cmd_lists.cmd_list_lock, flags);
2105
2106         ucmd_put(ucmd);
2107
2108         TRACE_EXIT_RES(res);
2109         return res;
2110 }
2111
2112 static int dev_user_task_mgmt_fn(struct scst_mgmt_cmd *mcmd, 
2113         struct scst_tgt_dev *tgt_dev)
2114 {
2115         int res, rc;
2116         struct scst_user_cmd *ucmd;
2117         struct scst_user_dev *dev = (struct scst_user_dev*)tgt_dev->dev->dh_priv;
2118         struct scst_user_cmd *ucmd_to_abort = NULL;
2119
2120         TRACE_ENTRY();
2121
2122         /* We can't afford missing TM command due to memory shortage */
2123         ucmd = dev_user_alloc_ucmd(dev, GFP_KERNEL|__GFP_NOFAIL);
2124         ucmd->cmpl = kmalloc(sizeof(*ucmd->cmpl), GFP_KERNEL|__GFP_NOFAIL);
2125
2126         init_completion(ucmd->cmpl);
2127
2128         ucmd->user_cmd.cmd_h = ucmd->h;
2129         ucmd->user_cmd.subcode = SCST_USER_TASK_MGMT;
2130         ucmd->user_cmd.tm_cmd.sess_h = (unsigned long)tgt_dev;
2131         ucmd->user_cmd.tm_cmd.fn = mcmd->fn;
2132         ucmd->user_cmd.tm_cmd.cmd_sn = mcmd->cmd_sn;
2133         ucmd->user_cmd.tm_cmd.cmd_sn_set = mcmd->cmd_sn_set;
2134
2135         if (mcmd->cmd_to_abort != NULL) {
2136                 ucmd_to_abort = (struct scst_user_cmd*)mcmd->cmd_to_abort->dh_priv;
2137                 if (ucmd_to_abort != NULL)
2138                         ucmd->user_cmd.tm_cmd.cmd_h_to_abort = ucmd_to_abort->h;
2139         }
2140
2141         TRACE_MGMT_DBG("Preparing TM ucmd %p (h %d, fn %d, cmd_to_abort %p, "
2142                 "ucmd_to_abort %p, cmd_h_to_abort %d)", ucmd, ucmd->h,
2143                 mcmd->fn, mcmd->cmd_to_abort, ucmd_to_abort,
2144                 ucmd->user_cmd.tm_cmd.cmd_h_to_abort);
2145
2146         ucmd->state = UCMD_STATE_TM_EXECING;
2147
2148         spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
2149         if (dev->internal_reset_active) {
2150                 PRINT_ERROR("Loosing TM cmd %d, because there are other "
2151                         "unprocessed TM commands", mcmd->fn);
2152                 res = SCST_MGMT_STATUS_FAILED;
2153                 goto out_locked_free;
2154         } else if (dev->tm_cmd_active) {
2155                 /*
2156                  * We are going to miss some TM commands, so replace it
2157                  * by the hardest one.
2158                  */
2159                 PRINT_ERROR("Replacing TM cmd %d by TARGET_RESET, because "
2160                         "there is another unprocessed TM command", mcmd->fn);
2161                 ucmd->user_cmd.tm_cmd.fn = SCST_TARGET_RESET;
2162                 ucmd->internal_reset_tm = 1;
2163                 dev->internal_reset_active = 1;
2164         } else
2165                 dev->tm_cmd_active = 1;
2166         spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
2167
2168         ucmd_get(ucmd, 0);
2169         dev_user_add_to_ready(ucmd);
2170
2171         /*
2172          * Since the user space handler should not wait for affecting tasks to
2173          * complete it shall complete the TM request ASAP, otherwise the device
2174          * will be considered stalled.
2175          */
2176         rc = wait_for_completion_timeout(ucmd->cmpl, DEV_USER_TM_TIMEOUT);
2177         if (rc > 0)
2178                 res = ucmd->result;
2179         else {
2180                 PRINT_ERROR("Task management command %p timeout", ucmd);
2181                 res = SCST_MGMT_STATUS_FAILED;
2182         }
2183
2184         sBUG_ON(irqs_disabled());
2185
2186         spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
2187
2188 out_locked_free:
2189         kfree(ucmd->cmpl);
2190         ucmd->cmpl = NULL;
2191         spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
2192
2193         dev_user_unjam_dev(ucmd->dev, 1, NULL);
2194
2195         ucmd_put(ucmd);
2196
2197         TRACE_EXIT();
2198         return res;
2199 }
2200
2201 static int dev_user_attach(struct scst_device *sdev)
2202 {
2203         int res = 0;
2204         struct scst_user_dev *dev = NULL, *d;
2205
2206         TRACE_ENTRY();
2207
2208         spin_lock(&dev_list_lock);
2209         list_for_each_entry(d, &dev_list, dev_list_entry) {
2210                 if (strcmp(d->name, sdev->virt_name) == 0) {
2211                         dev = d;
2212                         break;
2213                 }
2214         }
2215         spin_unlock(&dev_list_lock);
2216         if (dev == NULL) {
2217                 PRINT_ERROR("Device %s not found", sdev->virt_name);
2218                 res = -EINVAL;
2219                 goto out;
2220         }
2221
2222         sdev->p_cmd_lists = &dev->cmd_lists;
2223         sdev->dh_priv = dev;
2224         sdev->tst = dev->tst;
2225         sdev->queue_alg = dev->queue_alg;
2226         sdev->swp = dev->swp;
2227         sdev->tas = dev->tas;
2228         sdev->has_own_order_mgmt = dev->has_own_order_mgmt;
2229
2230         dev->sdev = sdev;
2231
2232         PRINT_INFO("Attached user space SCSI target virtual device \"%s\"",
2233                 dev->name);
2234
2235 out:
2236         TRACE_EXIT();
2237         return res;
2238 }
2239
2240 static void dev_user_detach(struct scst_device *sdev)
2241 {
2242         struct scst_user_dev *dev = (struct scst_user_dev*)sdev->dh_priv;
2243
2244         TRACE_ENTRY();
2245
2246         TRACE_DBG("virt_id %d", sdev->virt_id);
2247
2248         PRINT_INFO("Detached user space SCSI target virtual device \"%s\"",
2249                 dev->name);
2250
2251         /* dev will be freed by the caller */
2252         sdev->dh_priv = NULL;
2253         dev->sdev = NULL;
2254         
2255         TRACE_EXIT();
2256         return;
2257 }
2258
2259 static int dev_user_process_reply_sess(struct scst_user_cmd *ucmd, int status)
2260 {
2261         int res = 0;
2262         unsigned long flags;
2263
2264         TRACE_ENTRY();
2265
2266         TRACE_MGMT_DBG("ucmd %p, cmpl %p, status %d", ucmd, ucmd->cmpl, status);
2267
2268         spin_lock_irqsave(&ucmd->dev->cmd_lists.cmd_list_lock, flags);
2269
2270         if ((ucmd->state & ~UCMD_STATE_MASK) ==
2271                         UCMD_STATE_ATTACH_SESS) {
2272                 TRACE_MGMT_DBG("%s", "ATTACH_SESS finished");
2273                 ucmd->result = status;
2274                 ucmd->dev->attach_cmd_active = 0;
2275         } else if ((ucmd->state & ~UCMD_STATE_MASK) ==
2276                         UCMD_STATE_DETACH_SESS) {
2277                 TRACE_MGMT_DBG("%s", "DETACH_SESS finished");
2278                 ucmd->dev->detach_cmd_count--;
2279         } else
2280                 sBUG();
2281
2282         if (ucmd->cmpl != NULL)
2283                 complete_all(ucmd->cmpl);
2284
2285         spin_unlock_irqrestore(&ucmd->dev->cmd_lists.cmd_list_lock, flags);
2286
2287         ucmd_put(ucmd);
2288
2289         TRACE_EXIT_RES(res);
2290         return res;
2291 }
2292
2293 static int dev_user_attach_tgt(struct scst_tgt_dev *tgt_dev)
2294 {
2295         struct scst_user_dev *dev =
2296                 (struct scst_user_dev*)tgt_dev->dev->dh_priv;
2297         int res = 0, rc;
2298         struct scst_user_cmd *ucmd;
2299
2300         TRACE_ENTRY();
2301
2302         ucmd = dev_user_alloc_ucmd(dev, GFP_KERNEL);
2303         if (ucmd == NULL)
2304                 goto out_nomem;
2305
2306         ucmd->cmpl = kmalloc(sizeof(*ucmd->cmpl), GFP_KERNEL);
2307         if (ucmd->cmpl == NULL)
2308                 goto out_put_nomem;
2309
2310         init_completion(ucmd->cmpl);
2311
2312         ucmd->user_cmd.cmd_h = ucmd->h;
2313         ucmd->user_cmd.subcode = SCST_USER_ATTACH_SESS;
2314         ucmd->user_cmd.sess.sess_h = (unsigned long)tgt_dev;
2315         ucmd->user_cmd.sess.lun = (uint64_t)tgt_dev->lun;
2316         ucmd->user_cmd.sess.threads_num = tgt_dev->sess->tgt->tgtt->threads_num;
2317         ucmd->user_cmd.sess.rd_only = tgt_dev->acg_dev->rd_only_flag;
2318         strncpy(ucmd->user_cmd.sess.initiator_name,
2319                 tgt_dev->sess->initiator_name,
2320                 sizeof(ucmd->user_cmd.sess.initiator_name)-1);
2321         ucmd->user_cmd.sess.initiator_name[
2322                 sizeof(ucmd->user_cmd.sess.initiator_name)-1] = '\0';
2323
2324         TRACE_MGMT_DBG("Preparing ATTACH_SESS %p (h %d, sess_h %Lx, LUN %Lx, "
2325                 "threads_num %d, rd_only_flag %d, initiator %s)", ucmd, ucmd->h,
2326                 ucmd->user_cmd.sess.sess_h, ucmd->user_cmd.sess.lun,
2327                 ucmd->user_cmd.sess.threads_num, ucmd->user_cmd.sess.rd_only,
2328                 ucmd->user_cmd.sess.initiator_name);
2329
2330         ucmd->state = UCMD_STATE_ATTACH_SESS;
2331
2332         spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
2333         if (dev->attach_cmd_active) {
2334                 PRINT_ERROR("%s", "ATTACH_SESS command failed, because "
2335                         "there is another unprocessed ATTACH_SESS command");
2336                 res = -EBUSY;
2337                 goto out_locked_free;
2338         }
2339         dev->attach_cmd_active = 1;
2340         spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
2341
2342         ucmd_get(ucmd, 0);
2343         dev_user_add_to_ready(ucmd);
2344
2345         rc = wait_for_completion_timeout(ucmd->cmpl, DEV_USER_ATTACH_TIMEOUT);
2346         if (rc > 0)
2347                 res = ucmd->result;
2348         else {
2349                 PRINT_ERROR("%s", "ATTACH_SESS command timeout");
2350                 res = -EFAULT;
2351         }
2352
2353         sBUG_ON(irqs_disabled());
2354
2355         spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
2356 out_locked_free:
2357         kfree(ucmd->cmpl);
2358         ucmd->cmpl = NULL;
2359         spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
2360
2361         ucmd_put(ucmd);
2362
2363 out:
2364         TRACE_EXIT_RES(res);
2365         return res;
2366
2367 out_put_nomem:
2368         ucmd_put(ucmd);
2369
2370 out_nomem:
2371         res = -ENOMEM;
2372         goto out;
2373 }
2374
2375 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
2376 static void dev_user_pre_unreg_sess_work_fn(void *p)
2377 #else
2378 static void dev_user_pre_unreg_sess_work_fn(struct work_struct *work)
2379 #endif
2380 {
2381 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
2382         struct scst_user_pre_unreg_sess_obj *pd = (struct scst_user_pre_unreg_sess_obj*)p;
2383 #else
2384         struct scst_user_pre_unreg_sess_obj *pd = container_of(
2385                 (struct delayed_work*)work, struct scst_user_pre_unreg_sess_obj,
2386                 pre_unreg_sess_work);
2387 #endif
2388         struct scst_user_dev *dev =
2389                 (struct scst_user_dev*)pd->tgt_dev->dev->dh_priv;
2390
2391         TRACE_ENTRY();
2392
2393         TRACE_MGMT_DBG("Unreg sess: unjaming dev %p (tgt_dev %p)", dev,
2394                 pd->tgt_dev);
2395
2396         pd->active = 1;
2397
2398         dev_user_unjam_dev(dev, 0, pd->tgt_dev);
2399
2400         if (!pd->exit) {
2401                 TRACE_MGMT_DBG("Rescheduling pre_unreg_sess work %p (dev %p, "
2402                         "tgt_dev %p)", pd, dev, pd->tgt_dev);
2403                 schedule_delayed_work(&pd->pre_unreg_sess_work,
2404                         DEV_USER_PRE_UNREG_POLL_TIME);
2405         }
2406
2407         TRACE_EXIT();
2408         return;
2409 }
2410
2411 static void dev_user_pre_unreg_sess(struct scst_tgt_dev *tgt_dev)
2412 {
2413         struct scst_user_dev *dev =
2414                 (struct scst_user_dev*)tgt_dev->dev->dh_priv;
2415         struct scst_user_pre_unreg_sess_obj *pd;
2416
2417         TRACE_ENTRY();
2418
2419         /* We can't afford missing DETACH command due to memory shortage */
2420         pd = kzalloc(sizeof(*pd), GFP_KERNEL|__GFP_NOFAIL);
2421
2422         pd->tgt_dev = tgt_dev;
2423 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
2424         INIT_WORK(&pd->pre_unreg_sess_work, dev_user_pre_unreg_sess_work_fn, pd);
2425 #else
2426         INIT_DELAYED_WORK(&pd->pre_unreg_sess_work, dev_user_pre_unreg_sess_work_fn);
2427 #endif
2428
2429         spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
2430         dev->pre_unreg_sess_active = 1;
2431         list_add_tail(&pd->pre_unreg_sess_list_entry, &dev->pre_unreg_sess_list);
2432         spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
2433
2434         TRACE_MGMT_DBG("Scheduling pre_unreg_sess work %p (dev %p, tgt_dev %p)",
2435                 pd, dev, pd->tgt_dev);
2436
2437         schedule_delayed_work(&pd->pre_unreg_sess_work, DEV_USER_DETACH_TIMEOUT);
2438
2439         TRACE_EXIT();
2440         return;
2441 }
2442
2443 static void dev_user_detach_tgt(struct scst_tgt_dev *tgt_dev)
2444 {
2445         struct scst_user_dev *dev =
2446                 (struct scst_user_dev*)tgt_dev->dev->dh_priv;
2447         struct scst_user_cmd *ucmd;
2448         struct scst_user_pre_unreg_sess_obj *pd = NULL, *p;
2449
2450         TRACE_ENTRY();
2451
2452         spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
2453         list_for_each_entry(p, &dev->pre_unreg_sess_list,
2454                         pre_unreg_sess_list_entry) {
2455                 if (p->tgt_dev == tgt_dev) {
2456                         list_del(&p->pre_unreg_sess_list_entry);
2457                         if (list_empty(&dev->pre_unreg_sess_list))
2458                                 dev->pre_unreg_sess_active = 0;
2459                         pd = p;
2460                         break;
2461                 }
2462         }
2463         spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
2464
2465         if (pd != NULL) {
2466                 pd->exit = 1;
2467                 TRACE_MGMT_DBG("Canceling pre unreg work %p", pd);
2468                 cancel_delayed_work(&pd->pre_unreg_sess_work);
2469                 flush_scheduled_work();
2470                 kfree(pd);
2471         }
2472
2473         ucmd = dev_user_alloc_ucmd(dev, GFP_KERNEL);
2474         if (ucmd == NULL)
2475                 goto out;
2476
2477         TRACE_MGMT_DBG("Preparing DETACH_SESS %p (h %d, sess_h %Lx)", ucmd,
2478                 ucmd->h, ucmd->user_cmd.sess.sess_h);
2479
2480         ucmd->user_cmd.cmd_h = ucmd->h;
2481         ucmd->user_cmd.subcode = SCST_USER_DETACH_SESS;
2482         ucmd->user_cmd.sess.sess_h = (unsigned long)tgt_dev;
2483
2484         spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
2485         dev->detach_cmd_count++;
2486         spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
2487
2488         ucmd->state = UCMD_STATE_DETACH_SESS;
2489
2490         dev_user_add_to_ready(ucmd);
2491
2492 out:
2493         TRACE_EXIT();
2494         return;
2495 }
2496
2497 /* No locks are needed, but the activity must be suspended */
2498 static void dev_user_setup_functions(struct scst_user_dev *dev)
2499 {
2500         TRACE_ENTRY();
2501
2502         dev->devtype.parse = dev_user_parse;
2503         dev->devtype.dev_done = NULL;
2504
2505         if (dev->parse_type != SCST_USER_PARSE_CALL) {
2506                 switch(dev->devtype.type) {
2507                 case TYPE_DISK:
2508                         dev->generic_parse = scst_sbc_generic_parse;
2509                         dev->devtype.dev_done = dev_user_disk_done;
2510                         break;
2511
2512                 case TYPE_TAPE:
2513                         dev->generic_parse = scst_tape_generic_parse;
2514                         dev->devtype.dev_done = dev_user_tape_done;
2515                         break;
2516
2517                 case TYPE_MOD:
2518                         dev->generic_parse = scst_modisk_generic_parse;
2519                         dev->devtype.dev_done = dev_user_disk_done;
2520                         break;
2521
2522                 case TYPE_ROM:
2523                         dev->generic_parse = scst_cdrom_generic_parse;
2524                         dev->devtype.dev_done = dev_user_disk_done;
2525                         break;
2526
2527                 case TYPE_MEDIUM_CHANGER:
2528                         dev->generic_parse = scst_changer_generic_parse;
2529                         break;
2530
2531                 case TYPE_PROCESSOR:
2532                         dev->generic_parse = scst_processor_generic_parse;
2533                         break;
2534
2535                 case TYPE_RAID:
2536                         dev->generic_parse = scst_raid_generic_parse;
2537                         break;
2538
2539                 default:
2540                         PRINT_INFO("Unknown SCSI type %x, using PARSE_CALL "
2541                                 "for it", dev->devtype.type);
2542                         dev->parse_type = SCST_USER_PARSE_CALL;
2543                         break;
2544                 }
2545         } else {
2546                 dev->generic_parse = NULL;
2547                 dev->devtype.dev_done = NULL;
2548         }
2549
2550         TRACE_EXIT();
2551         return;
2552 }
2553
2554 static int dev_user_register_dev(struct file *file,
2555         const struct scst_user_dev_desc *dev_desc)
2556 {
2557         int res = -ENOMEM, i;
2558         struct scst_user_dev *dev, *d;
2559         int block;
2560
2561         TRACE_ENTRY();
2562
2563         if (dev_desc->version != DEV_USER_VERSION) {
2564                 PRINT_ERROR("Version mismatch (requested %d, required %d)",
2565                         dev_desc->version, DEV_USER_VERSION);
2566                 res = -EINVAL;
2567                 goto out;
2568         }
2569
2570         switch(dev_desc->type) {
2571         case TYPE_DISK:
2572         case TYPE_ROM:
2573         case TYPE_MOD:
2574                 if (dev_desc->block_size == 0) {
2575                         PRINT_ERROR("Wrong block size %d", dev_desc->block_size);
2576                         res = -EINVAL;
2577                         goto out;
2578                 }
2579                 block = scst_calc_block_shift(dev_desc->block_size);
2580                 if (block == -1) {
2581                         res = -EINVAL;
2582                         goto out;
2583                 }
2584                 break;
2585         default:
2586                 block = dev_desc->block_size;
2587                 break;
2588         }
2589
2590         if (!try_module_get(THIS_MODULE)) {
2591                 PRINT_ERROR("%s", "Fail to get module");
2592                 goto out;
2593         }
2594
2595         dev = kzalloc(sizeof(*dev), GFP_KERNEL);
2596         if (dev == NULL)
2597                 goto out_put;
2598
2599         init_rwsem(&dev->dev_rwsem);
2600         spin_lock_init(&dev->cmd_lists.cmd_list_lock);
2601         INIT_LIST_HEAD(&dev->cmd_lists.active_cmd_list);
2602         init_waitqueue_head(&dev->cmd_lists.cmd_list_waitQ);
2603         INIT_LIST_HEAD(&dev->ready_cmd_list);
2604         INIT_LIST_HEAD(&dev->prio_ready_cmd_list);
2605         init_waitqueue_head(&dev->prio_cmd_list_waitQ);
2606         if (file->f_flags & O_NONBLOCK) {
2607                 TRACE_DBG("%s", "Non-blocking operations");
2608                 dev->blocking = 0;
2609         } else
2610                 dev->blocking = 1;
2611         for (i = 0; i < (int)ARRAY_SIZE(dev->ucmd_hash); i++)
2612                 INIT_LIST_HEAD(&dev->ucmd_hash[i]);
2613         INIT_LIST_HEAD(&dev->pre_unreg_sess_list);
2614
2615         strncpy(dev->name, dev_desc->name, sizeof(dev->name)-1);
2616         dev->name[sizeof(dev->name)-1] = '\0';
2617
2618         /*
2619          * We don't use clustered pool, since it implies pages reordering,
2620          * which isn't possible with user space supplied buffers. Although
2621          * it's still possible to cluster pages by the tail of each other,
2622          * seems it doesn't worth the effort.
2623          */
2624         dev->pool = sgv_pool_create(dev->name, 0);
2625         if (dev->pool == NULL)
2626                 goto out_put;
2627         sgv_pool_set_allocator(dev->pool, dev_user_alloc_pages,
2628                 dev_user_free_sg_entries);
2629
2630         scnprintf(dev->devtype.name, sizeof(dev->devtype.name), "dh-%s",
2631                 dev->name);
2632         dev->devtype.type = dev_desc->type;
2633         dev->devtype.threads_num = -1;
2634         dev->devtype.parse_atomic = 1;
2635         dev->devtype.exec_atomic = 0; /* no point to make it 1 */
2636         dev->devtype.dev_done_atomic = 1;
2637         dev->devtype.no_proc = 1;
2638         dev->devtype.attach = dev_user_attach;
2639         dev->devtype.detach = dev_user_detach;
2640         dev->devtype.attach_tgt = dev_user_attach_tgt;
2641         dev->devtype.pre_unreg_sess = dev_user_pre_unreg_sess;
2642         dev->devtype.detach_tgt = dev_user_detach_tgt;
2643         dev->devtype.exec = dev_user_exec;
2644         dev->devtype.on_free_cmd = dev_user_on_free_cmd;
2645         dev->devtype.task_mgmt_fn = dev_user_task_mgmt_fn;
2646
2647         init_completion(&dev->cleanup_cmpl);
2648         dev->block = block;
2649         dev->def_block = dev->block;
2650
2651         res = __dev_user_set_opt(dev, &dev_desc->opt);
2652
2653         TRACE_MEM("dev %p, name %s", dev, dev->name);
2654
2655         spin_lock(&dev_list_lock);
2656
2657         list_for_each_entry(d, &dev_list, dev_list_entry) {
2658                 if (strcmp(d->name, dev->name) == 0) {
2659                         PRINT_ERROR("Device %s already exist",
2660                                 dev->name);
2661                         res = -EEXIST;
2662                         spin_unlock(&dev_list_lock);
2663                         goto out_free;
2664                 }
2665         }
2666
2667         list_add_tail(&dev->dev_list_entry, &dev_list);
2668
2669         spin_unlock(&dev_list_lock);
2670
2671         if (res != 0)
2672                 goto out_del_free;
2673
2674         res = scst_register_virtual_dev_driver(&dev->devtype);
2675         if (res < 0)
2676                 goto out_del_free;
2677
2678         dev->virt_id = scst_register_virtual_device(&dev->devtype, dev->name);
2679         if (dev->virt_id < 0) {
2680                 res = dev->virt_id;
2681                 goto out_unreg_handler;
2682         }
2683
2684         mutex_lock(&dev_priv_mutex);
2685         if (file->private_data != NULL) {
2686                 mutex_unlock(&dev_priv_mutex);
2687                 PRINT_ERROR("%s", "Device already registered");
2688                 res = -EINVAL;
2689                 goto out_unreg_drv;
2690         }
2691         file->private_data = dev;
2692         mutex_unlock(&dev_priv_mutex);
2693
2694 out:
2695         TRACE_EXIT_RES(res);
2696         return res;
2697
2698 out_unreg_drv:
2699         scst_unregister_virtual_device(dev->virt_id);
2700
2701 out_unreg_handler:
2702         scst_unregister_virtual_dev_driver(&dev->devtype);
2703
2704 out_del_free:
2705         spin_lock(&dev_list_lock);
2706         list_del(&dev->dev_list_entry);
2707         spin_unlock(&dev_list_lock);
2708
2709 out_free:
2710         sgv_pool_destroy(dev->pool);
2711         kfree(dev);
2712         goto out_put;
2713
2714 out_put:
2715         module_put(THIS_MODULE);
2716         goto out;
2717 }
2718
2719 static int __dev_user_set_opt(struct scst_user_dev *dev,
2720         const struct scst_user_opt *opt)
2721 {
2722         int res = 0;
2723
2724         TRACE_ENTRY();
2725
2726         TRACE_DBG("parse_type %x, on_free_cmd_type %x, memory_reuse_type %x, "
2727                 "partial_transfers_type %x, partial_len %d", opt->parse_type,
2728                 opt->on_free_cmd_type, opt->memory_reuse_type,
2729                 opt->partial_transfers_type, opt->partial_len);
2730
2731         if ((opt->parse_type > SCST_USER_MAX_PARSE_OPT) ||
2732             (opt->on_free_cmd_type > SCST_USER_MAX_ON_FREE_CMD_OPT) ||
2733             (opt->memory_reuse_type > SCST_USER_MAX_MEM_REUSE_OPT) ||
2734             (opt->prio_queue_type > SCST_USER_MAX_PRIO_QUEUE_OPT) ||
2735             (opt->partial_transfers_type > SCST_USER_MAX_PARTIAL_TRANSFERS_OPT)) {
2736                 PRINT_ERROR("%s", "Invalid option");
2737                 res = -EINVAL;
2738                 goto out;
2739         }
2740
2741         if (((opt->tst != SCST_CONTR_MODE_ONE_TASK_SET) &&
2742              (opt->tst != SCST_CONTR_MODE_SEP_TASK_SETS)) ||
2743             ((opt->queue_alg != SCST_CONTR_MODE_QUEUE_ALG_RESTRICTED_REORDER) &&
2744              (opt->queue_alg != SCST_CONTR_MODE_QUEUE_ALG_UNRESTRICTED_REORDER)) ||
2745             (opt->swp > 1) || (opt->tas > 1) || (opt->has_own_order_mgmt > 1)) {
2746                 PRINT_ERROR("Invalid SCSI option (tst %x, queue_alg %x, swp %x, "
2747                         "tas %x, has_own_order_mgmt %x)", opt->tst,
2748                         opt->queue_alg, opt->swp, opt->tas, opt->has_own_order_mgmt);
2749                 res = -EINVAL;
2750                 goto out;
2751         }
2752
2753         if ((dev->prio_queue_type != opt->prio_queue_type) &&
2754             (opt->prio_queue_type == SCST_USER_PRIO_QUEUE_SINGLE)) {
2755                 struct scst_user_cmd *u, *t;
2756                 /* No need for lock, the activity is suspended */
2757                 list_for_each_entry_safe(u, t, &dev->prio_ready_cmd_list,
2758                                 ready_cmd_list_entry) {
2759                         list_move_tail(&u->ready_cmd_list_entry,
2760                                 &dev->ready_cmd_list);
2761                 }
2762         }
2763
2764         dev->prio_queue_type = opt->prio_queue_type;
2765         dev->parse_type = opt->parse_type;
2766         dev->on_free_cmd_type = opt->on_free_cmd_type;
2767         dev->memory_reuse_type = opt->memory_reuse_type;
2768         dev->partial_transfers_type = opt->partial_transfers_type;
2769         dev->partial_len = opt->partial_len;
2770
2771         dev->tst = opt->tst;
2772         dev->queue_alg = opt->queue_alg;
2773         dev->swp = opt->swp;
2774         dev->tas = opt->tas;
2775         dev->has_own_order_mgmt = opt->has_own_order_mgmt;
2776         if (dev->sdev != NULL) {
2777                 dev->sdev->tst = opt->tst;
2778                 dev->sdev->queue_alg = opt->queue_alg;
2779                 dev->sdev->swp = opt->swp;
2780                 dev->sdev->tas = opt->tas;
2781                 dev->sdev->has_own_order_mgmt = opt->has_own_order_mgmt;
2782         }
2783
2784         dev_user_setup_functions(dev);
2785
2786 out:
2787         TRACE_EXIT_RES(res);
2788         return res;
2789 }
2790
2791 static int dev_user_set_opt(struct file *file, const struct scst_user_opt *opt)
2792 {
2793         int res = 0;
2794         struct scst_user_dev *dev;
2795
2796         TRACE_ENTRY();
2797
2798         mutex_lock(&dev_priv_mutex);
2799         dev = (struct scst_user_dev*)file->private_data;
2800         res = dev_user_check_reg(dev);
2801         if (res != 0) {
2802                 mutex_unlock(&dev_priv_mutex);
2803                 goto out;
2804         }
2805         down_read(&dev->dev_rwsem);
2806         mutex_unlock(&dev_priv_mutex);
2807
2808         scst_suspend_activity();
2809         res = __dev_user_set_opt(dev, opt);
2810         scst_resume_activity();
2811
2812         up_read(&dev->dev_rwsem);
2813
2814 out:
2815         TRACE_EXIT_RES(res);
2816         return res;
2817 }
2818
2819 static int dev_user_get_opt(struct file *file, void *arg)
2820 {
2821         int res = 0;
2822         struct scst_user_dev *dev;
2823         struct scst_user_opt opt;
2824
2825         TRACE_ENTRY();
2826
2827         mutex_lock(&dev_priv_mutex);
2828         dev = (struct scst_user_dev*)file->private_data;
2829         res = dev_user_check_reg(dev);
2830         if (res != 0) {
2831                 mutex_unlock(&dev_priv_mutex);
2832                 goto out;
2833         }
2834         down_read(&dev->dev_rwsem);
2835         mutex_unlock(&dev_priv_mutex);
2836
2837         opt.parse_type = dev->parse_type;
2838         opt.on_free_cmd_type = dev->on_free_cmd_type;
2839         opt.memory_reuse_type = dev->memory_reuse_type;
2840         opt.prio_queue_type = dev->prio_queue_type;
2841         opt.partial_transfers_type = dev->partial_transfers_type;
2842         opt.partial_len = dev->partial_len;
2843         opt.tst = dev->tst;
2844         opt.queue_alg = dev->queue_alg;
2845         opt.tas = dev->tas;
2846         opt.swp = dev->swp;
2847         opt.has_own_order_mgmt = dev->has_own_order_mgmt;
2848
2849         TRACE_DBG("parse_type %x, on_free_cmd_type %x, memory_reuse_type %x, "
2850                 "partial_transfers_type %x, partial_len %d", opt.parse_type,
2851                 opt.on_free_cmd_type, opt.memory_reuse_type,
2852                 opt.partial_transfers_type, opt.partial_len);
2853
2854         res = copy_to_user(arg, &opt, sizeof(opt));
2855
2856         up_read(&dev->dev_rwsem);
2857 out:
2858         TRACE_EXIT_RES(res);
2859         return res;
2860 }
2861
2862 static int dev_usr_parse(struct scst_cmd *cmd)
2863 {
2864         sBUG();
2865         return SCST_CMD_STATE_DEFAULT;
2866 }
2867
2868 /* Needed only for /proc support */
2869 #define USR_TYPE {              \
2870   name:     DEV_USER_NAME,      \
2871   type:     -1,                 \
2872   parse:    dev_usr_parse,      \
2873 }
2874
2875 static struct scst_dev_type dev_user_devtype = USR_TYPE;
2876
2877 static int dev_user_release(struct inode *inode, struct file *file)
2878 {
2879         int res = 0;
2880         struct scst_user_dev *dev;
2881
2882         TRACE_ENTRY();
2883
2884         mutex_lock(&dev_priv_mutex);
2885         dev = (struct scst_user_dev*)file->private_data;
2886         if (dev == NULL) {
2887                 mutex_unlock(&dev_priv_mutex);
2888                 goto out;
2889         }
2890         file->private_data = NULL;
2891
2892         spin_lock(&dev_list_lock);
2893         list_del(&dev->dev_list_entry);
2894         spin_unlock(&dev_list_lock);
2895
2896         mutex_unlock(&dev_priv_mutex);
2897
2898         down_write(&dev->dev_rwsem);
2899
2900         TRACE_DBG("Releasing dev %p", dev);
2901
2902         spin_lock(&cleanup_lock);
2903         list_add_tail(&dev->cleanup_list_entry, &cleanup_list);
2904         spin_unlock(&cleanup_lock);
2905
2906         wake_up(&cleanup_list_waitQ);
2907         wake_up(&dev->prio_cmd_list_waitQ);
2908         wake_up(&dev->cmd_lists.cmd_list_waitQ);
2909
2910         scst_unregister_virtual_device(dev->virt_id);
2911         scst_unregister_virtual_dev_driver(&dev->devtype);
2912
2913         sgv_pool_destroy(dev->pool);
2914
2915         TRACE_DBG("Unregistering finished (dev %p)", dev);
2916
2917         dev->cleanup_done = 1;
2918         wake_up(&cleanup_list_waitQ);
2919         wake_up(&dev->prio_cmd_list_waitQ);
2920         wake_up(&dev->cmd_lists.cmd_list_waitQ);
2921         wait_for_completion(&dev->cleanup_cmpl);
2922
2923         up_write(&dev->dev_rwsem); /* to make the debug check happy */
2924
2925         TRACE_DBG("Releasing completed (dev %p)", dev);
2926
2927         kfree(dev);
2928
2929         module_put(THIS_MODULE);
2930
2931 out:
2932         TRACE_EXIT_RES(res);
2933         return res;
2934 }
2935
2936 static void dev_user_process_cleanup(struct scst_user_dev *dev)
2937 {
2938         struct scst_user_cmd *ucmd;
2939         int rc;
2940
2941         TRACE_ENTRY();
2942
2943         dev->prio_queue_type = SCST_USER_PRIO_QUEUE_SINGLE;
2944         dev->cleaning = 1;
2945         dev->blocking = 1;
2946
2947         while(1) {
2948                 TRACE_DBG("Cleanuping dev %p", dev);
2949
2950                 dev_user_unjam_dev(dev, 0, NULL);
2951
2952                 spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
2953                 rc = dev_user_get_next_prio_cmd(dev, &ucmd);
2954                 if (rc != 0)
2955                         rc = dev_user_get_next_cmd(dev, &ucmd);
2956                 if (rc == 0)
2957                         dev_user_unjam_cmd(ucmd, 1, NULL);
2958                 spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
2959                 if ((rc == -EAGAIN) && dev->cleanup_done)
2960                         break;
2961         }
2962
2963 #ifdef EXTRACHECKS
2964 {
2965         int i;
2966         for(i = 0; i < (int)ARRAY_SIZE(dev->ucmd_hash); i++) {
2967                 struct list_head *head = &dev->ucmd_hash[i];
2968                 struct scst_user_cmd *ucmd, *t;
2969                 list_for_each_entry_safe(ucmd, t, head, hash_list_entry) {
2970                         PRINT_ERROR("Lost ucmd %p (state %x, ref %d)", ucmd,
2971                                 ucmd->state, atomic_read(&ucmd->ucmd_ref));
2972                         ucmd_put(ucmd);
2973                 }
2974         }
2975 }
2976 #endif
2977
2978         TRACE_DBG("Cleanuping done (dev %p)", dev);
2979         complete_all(&dev->cleanup_cmpl);
2980
2981         TRACE_EXIT();
2982         return;
2983 }
2984
2985 static inline int test_cleanup_list(void)
2986 {
2987         int res = !list_empty(&cleanup_list) ||
2988                   unlikely(kthread_should_stop());
2989         return res;
2990 }
2991
2992 static int dev_user_cleanup_thread(void *arg)
2993 {
2994         struct scst_user_dev *dev;
2995
2996         TRACE_ENTRY();
2997
2998         PRINT_INFO("Cleanup thread started, PID %d", current->pid);
2999
3000         current->flags |= PF_NOFREEZE;
3001
3002         spin_lock(&cleanup_lock);
3003         while(!kthread_should_stop()) {
3004                 wait_queue_t wait;
3005                 init_waitqueue_entry(&wait, current);
3006
3007                 if (!test_cleanup_list()) {
3008                         add_wait_queue_exclusive(&cleanup_list_waitQ, &wait);
3009                         for (;;) {
3010                                 set_current_state(TASK_INTERRUPTIBLE);
3011                                 if (test_cleanup_list())
3012                                         break;
3013                                 spin_unlock(&cleanup_lock);
3014                                 schedule();
3015                                 spin_lock(&cleanup_lock);
3016                         }
3017                         set_current_state(TASK_RUNNING);
3018                         remove_wait_queue(&cleanup_list_waitQ, &wait);
3019                 }
3020 restart:
3021                 list_for_each_entry(dev, &cleanup_list, cleanup_list_entry) {
3022                         list_del(&dev->cleanup_list_entry);
3023                         spin_unlock(&cleanup_lock);
3024                         dev_user_process_cleanup(dev);
3025                         spin_lock(&cleanup_lock);
3026                         goto restart;
3027                 }
3028         }
3029         spin_unlock(&cleanup_lock);
3030
3031         /*
3032          * If kthread_should_stop() is true, we are guaranteed to be
3033          * on the module unload, so cleanup_list must be empty.
3034          */
3035         sBUG_ON(!list_empty(&cleanup_list));
3036
3037         PRINT_INFO("Cleanup thread PID %d finished", current->pid);
3038
3039         TRACE_EXIT();
3040         return 0;
3041 }
3042
3043 static int __init init_scst_user(void)
3044 {
3045         int res = 0;
3046         struct class_device *class_member;
3047
3048         TRACE_ENTRY();
3049
3050 #if defined(CONFIG_HIGHMEM4G) || defined(CONFIG_HIGHMEM64G)
3051         PRINT_ERROR("%s", "HIGHMEM kernel configurations are not supported. "
3052                 "Consider change VMSPLIT option or use 64-bit "
3053                 "configuration instead. See README file for details.");
3054         res = -EINVAL;
3055         goto out;
3056 #endif
3057
3058         user_cmd_cachep = KMEM_CACHE(scst_user_cmd, SCST_SLAB_FLAGS);
3059         if (user_cmd_cachep == NULL) {
3060                 res = -ENOMEM;
3061                 goto out;
3062         }
3063
3064         dev_user_devtype.module = THIS_MODULE;
3065         if (scst_register_virtual_dev_driver(&dev_user_devtype) < 0) {
3066                 res = -ENODEV;
3067                 goto out_cache;
3068         }
3069
3070         res = scst_dev_handler_build_std_proc(&dev_user_devtype);
3071         if (res != 0)
3072                 goto out_unreg;
3073
3074         dev_user_sysfs_class = class_create(THIS_MODULE, DEV_USER_NAME);
3075         if (IS_ERR(dev_user_sysfs_class)) {
3076                 printk(KERN_ERR "Unable create sysfs class for SCST user "
3077                         "space handler\n");
3078                 res = PTR_ERR(dev_user_sysfs_class);
3079                 goto out_proc;
3080         }
3081
3082         res = register_chrdev(DEV_USER_MAJOR, DEV_USER_NAME, &dev_user_fops);
3083         if (res) {
3084                 printk(KERN_ERR "Unable to get major %d for SCSI tapes\n",
3085                        DEV_USER_MAJOR);
3086                 goto out_class;
3087         }
3088
3089         class_member = class_device_create(dev_user_sysfs_class, NULL,
3090                 MKDEV(DEV_USER_MAJOR, 0), NULL, DEV_USER_NAME);
3091         if (IS_ERR(class_member)) {
3092                 res = PTR_ERR(class_member);
3093                 goto out_chrdev;
3094         }
3095
3096         cleanup_thread = kthread_run(dev_user_cleanup_thread, NULL,
3097                 "scst_usr_cleanupd");
3098         if (IS_ERR(cleanup_thread)) {
3099                 res = PTR_ERR(cleanup_thread);
3100                 PRINT_ERROR("kthread_create() failed: %d", res);
3101                 goto out_dev;
3102         }
3103
3104 out:
3105         TRACE_EXIT_RES(res);
3106         return res;
3107
3108 out_dev:
3109         class_device_destroy(dev_user_sysfs_class, MKDEV(DEV_USER_MAJOR, 0));
3110
3111 out_chrdev:
3112         unregister_chrdev(DEV_USER_MAJOR, DEV_USER_NAME);
3113
3114 out_class:
3115         class_destroy(dev_user_sysfs_class);
3116
3117 out_proc:
3118         scst_dev_handler_destroy_std_proc(&dev_user_devtype);
3119
3120 out_unreg:
3121         scst_unregister_dev_driver(&dev_user_devtype);
3122
3123 out_cache:
3124         kmem_cache_destroy(user_cmd_cachep);
3125         goto out;
3126 }
3127
3128 static void __exit exit_scst_user(void)
3129 {
3130         int rc;
3131
3132         TRACE_ENTRY();
3133
3134         rc = kthread_stop(cleanup_thread);
3135         if (rc < 0) {
3136                 TRACE_MGMT_DBG("kthread_stop() failed: %d", rc);
3137         }
3138
3139         unregister_chrdev(DEV_USER_MAJOR, DEV_USER_NAME);
3140         class_device_destroy(dev_user_sysfs_class, MKDEV(DEV_USER_MAJOR, 0));
3141         class_destroy(dev_user_sysfs_class);
3142
3143         scst_dev_handler_destroy_std_proc(&dev_user_devtype);
3144         scst_unregister_virtual_dev_driver(&dev_user_devtype);
3145
3146         kmem_cache_destroy(user_cmd_cachep);
3147
3148         TRACE_EXIT();
3149         return;
3150 }
3151
3152 module_init(init_scst_user);
3153 module_exit(exit_scst_user);
3154
3155 MODULE_AUTHOR("Vladislav Bolkhovitin");
3156 MODULE_LICENSE("GPL");
3157 MODULE_DESCRIPTION("Virtual user space device handler for SCST");
3158 MODULE_VERSION(SCST_VERSION_STRING);
3159 MODULE_ALIAS_CHARDEV_MAJOR(DEV_USER_MAJOR);