b04ec37a2baa9993f5846d4202c9d3a07e721abc
[mirror/scst/.git] / scst / src / dev_handlers / scst_user.c
1 /*
2  *  scst_user.c
3  *  
4  *  Copyright (C) 2007 Vladislav Bolkhovitin <vst@vlnb.net>
5  *
6  *  SCSI virtual user space device handler
7  *  
8  *  This program is free software; you can redistribute it and/or
9  *  modify it under the terms of the GNU General Public License
10  *  as published by the Free Software Foundation, version 2
11  *  of the License.
12  * 
13  *  This program is distributed in the hope that it will be useful,
14  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16  *  GNU General Public License for more details.
17  */
18
19 #include <linux/kthread.h>
20 #include <linux/delay.h>
21 #include <linux/poll.h>
22
23 #define LOG_PREFIX              DEV_USER_NAME
24
25 #include "scsi_tgt.h"
26 #include "scst_user.h"
27 #include "scst_dev_handler.h"
28
29 #if defined(CONFIG_HIGHMEM4G) || defined(CONFIG_HIGHMEM64G)
30 #warning HIGHMEM kernel configurations are not supported by this module, \
31         because nowadays it doesn't worth the effort. Consider change \
32         VMSPLIT option or use 64-bit configuration instead. See README file \
33         for details.
34 #endif
35
36 #define DEV_USER_MAJOR                  237
37 #define DEV_USER_CMD_HASH_ORDER         6
38 #define DEV_USER_TM_TIMEOUT             (10*HZ)
39 #define DEV_USER_ATTACH_TIMEOUT         (5*HZ)
40 #define DEV_USER_DETACH_TIMEOUT         (5*HZ)
41 #define DEV_USER_PRE_UNREG_POLL_TIME    (HZ/10)
42
43 struct scst_user_dev
44 {
45         struct rw_semaphore dev_rwsem;
46
47         struct scst_cmd_lists cmd_lists;
48         /* All 3 protected by cmd_lists.cmd_list_lock */
49         struct list_head ready_cmd_list;
50         struct list_head prio_ready_cmd_list;
51         wait_queue_head_t prio_cmd_list_waitQ;
52
53         /* All, including detach_cmd_count, protected by cmd_lists.cmd_list_lock */
54         unsigned short blocking:1;
55         unsigned short cleaning:1;
56         unsigned short cleanup_done:1;
57         unsigned short attach_cmd_active:1;
58         unsigned short tm_cmd_active:1;
59         unsigned short internal_reset_active:1;
60         unsigned short pre_unreg_sess_active:1; /* just a small optimization */
61
62         unsigned short tst:3;
63         unsigned short queue_alg:4;
64         unsigned short tas:1;
65         unsigned short swp:1;
66         unsigned short has_own_order_mgmt:1;
67
68         unsigned short detach_cmd_count;
69
70         int (*generic_parse)(struct scst_cmd *cmd,
71                 int (*get_block)(struct scst_cmd *cmd));
72
73         int block;
74         int def_block;
75
76         struct sgv_pool *pool;
77
78         uint8_t parse_type;
79         uint8_t on_free_cmd_type;
80         uint8_t memory_reuse_type;
81         uint8_t prio_queue_type;
82         uint8_t partial_transfers_type;
83         uint32_t partial_len;
84
85         struct scst_dev_type devtype;
86
87         /* Both protected by cmd_lists.cmd_list_lock */
88         unsigned int handle_counter;
89         struct list_head ucmd_hash[1<<DEV_USER_CMD_HASH_ORDER];
90
91         struct scst_device *sdev;
92
93         int virt_id;
94         struct list_head dev_list_entry;
95         char name[SCST_MAX_NAME];
96
97         /* Protected by cmd_lists.cmd_list_lock */
98         struct list_head pre_unreg_sess_list;
99
100         struct list_head cleanup_list_entry;
101         struct completion cleanup_cmpl;
102 };
103
104 struct scst_user_pre_unreg_sess_obj
105 {
106         struct scst_tgt_dev *tgt_dev;
107         unsigned int active:1;
108         unsigned int exit:1;
109         struct list_head pre_unreg_sess_list_entry;
110 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
111         struct work_struct pre_unreg_sess_work;
112 #else
113         struct delayed_work pre_unreg_sess_work;
114 #endif
115 };
116
117 /* Most fields are unprotected, since only one thread at time can access them */
118 struct scst_user_cmd
119 {
120         struct scst_cmd *cmd;
121         struct scst_user_dev *dev;
122         
123         atomic_t ucmd_ref;
124
125         unsigned int buff_cached:1;
126         unsigned int buf_dirty:1;
127         unsigned int background_exec:1;
128         unsigned int internal_reset_tm:1;
129         unsigned int aborted:1;
130
131         struct scst_user_cmd *buf_ucmd;
132
133         int cur_data_page;
134         int num_data_pages;
135         int first_page_offset;
136         unsigned long ubuff;
137         struct page **data_pages;
138         struct sgv_pool_obj *sgv;
139
140         unsigned int state;
141
142         struct list_head ready_cmd_list_entry;
143
144         unsigned int h;
145         struct list_head hash_list_entry;
146
147         struct scst_user_get_cmd user_cmd;
148
149         struct completion *cmpl;
150         int result;
151 };
152
153 static struct scst_user_cmd *dev_user_alloc_ucmd(struct scst_user_dev *dev,
154         int gfp_mask);
155 static void dev_user_free_ucmd(struct scst_user_cmd *ucmd);
156
157 static int dev_user_parse(struct scst_cmd *cmd);
158 static int dev_user_exec(struct scst_cmd *cmd);
159 static void dev_user_on_free_cmd(struct scst_cmd *cmd);
160 static int dev_user_task_mgmt_fn(struct scst_mgmt_cmd *mcmd, 
161         struct scst_tgt_dev *tgt_dev);
162
163 static int dev_user_disk_done(struct scst_cmd *cmd);
164 static int dev_user_tape_done(struct scst_cmd *cmd);
165
166 static struct page *dev_user_alloc_pages(struct scatterlist *sg,
167         gfp_t gfp_mask, void *priv);
168 static void dev_user_free_sg_entries(struct scatterlist *sg, int sg_count,
169         void *priv);
170
171 static void dev_user_add_to_ready(struct scst_user_cmd *ucmd);
172
173 static void dev_user_unjam_cmd(struct scst_user_cmd *ucmd, int busy,
174         unsigned long *flags);
175 static void dev_user_unjam_dev(struct scst_user_dev *dev, int tm,
176         struct scst_tgt_dev *tgt_dev);
177
178 static int dev_user_process_reply_tm_exec(struct scst_user_cmd *ucmd,
179         int status);
180 static int dev_user_process_reply_sess(struct scst_user_cmd *ucmd, int status);
181 static int dev_user_register_dev(struct file *file,
182         const struct scst_user_dev_desc *dev_desc);
183 static int __dev_user_set_opt(struct scst_user_dev *dev,
184         const struct scst_user_opt *opt);
185 static int dev_user_set_opt(struct file *file, const struct scst_user_opt *opt);
186 static int dev_user_get_opt(struct file *file, void *arg);
187
188 static unsigned int dev_user_poll(struct file *filp, poll_table *wait);
189 static long dev_user_ioctl(struct file *file, unsigned int cmd,
190         unsigned long arg);
191 static int dev_user_release(struct inode *inode, struct file *file);
192
193 /** Data **/
194
195 static struct kmem_cache *user_cmd_cachep;
196
197 static DEFINE_MUTEX(dev_priv_mutex);
198
199 static struct file_operations dev_user_fops = {
200         .poll           = dev_user_poll,
201         .unlocked_ioctl = dev_user_ioctl,
202 #ifdef CONFIG_COMPAT
203         .compat_ioctl   = dev_user_ioctl,
204 #endif
205         .release        = dev_user_release,
206 };
207
208 static struct class *dev_user_sysfs_class;
209
210 static spinlock_t dev_list_lock = SPIN_LOCK_UNLOCKED;
211 static LIST_HEAD(dev_list);
212
213 static spinlock_t cleanup_lock = SPIN_LOCK_UNLOCKED;
214 static LIST_HEAD(cleanup_list);
215 static DECLARE_WAIT_QUEUE_HEAD(cleanup_list_waitQ);
216 static struct task_struct *cleanup_thread;
217
218 static inline void ucmd_get(struct scst_user_cmd *ucmd, int barrier)
219 {
220         TRACE_DBG("ucmd %p, ucmd_ref %d", ucmd, atomic_read(&ucmd->ucmd_ref));
221         atomic_inc(&ucmd->ucmd_ref);
222         if (barrier)
223                 smp_mb__after_atomic_inc();
224 }
225
226 static inline void ucmd_put(struct scst_user_cmd *ucmd)
227 {
228         TRACE_DBG("ucmd %p, ucmd_ref %d", ucmd, atomic_read(&ucmd->ucmd_ref));
229         if (atomic_dec_and_test(&ucmd->ucmd_ref))
230                 dev_user_free_ucmd(ucmd);
231 }
232
233 static inline int calc_num_pg(unsigned long buf, int len)
234 {
235         len += buf & ~PAGE_MASK;
236         return (len >> PAGE_SHIFT) + ((len & ~PAGE_MASK) != 0);
237 }
238
239 static inline int is_need_offs_page(unsigned long buf, int len)
240 {
241         return ((buf & ~PAGE_MASK) != 0) && 
242                 ((buf & PAGE_MASK) != ((buf+len-1) & PAGE_MASK));
243 }
244
245 static void __dev_user_not_reg(void)
246 {
247         PRINT_ERROR("%s", "Device not registered");
248         return;
249 }
250
251 static inline int dev_user_check_reg(struct scst_user_dev *dev)
252 {
253         if (dev == NULL) {
254                 __dev_user_not_reg();
255                 return -EINVAL;
256         }
257         return 0;
258 }
259
260 static inline int scst_user_cmd_hashfn(int h)
261 {
262         return h & ((1 << DEV_USER_CMD_HASH_ORDER) - 1);
263 }
264
265 static inline struct scst_user_cmd *__ucmd_find_hash(struct scst_user_dev *dev,
266         unsigned int h)
267 {
268         struct list_head *head;
269         struct scst_user_cmd *ucmd;
270
271         head = &dev->ucmd_hash[scst_user_cmd_hashfn(h)];
272         list_for_each_entry(ucmd, head, hash_list_entry) {
273                 if (ucmd->h == h) {
274                         TRACE_DBG("Found ucmd %p", ucmd);
275                         return ucmd;
276                 }
277         }
278         return NULL;
279 }
280
281 static void cmnd_insert_hash(struct scst_user_cmd *ucmd)
282 {
283         struct list_head *head;
284         struct scst_user_dev *dev = ucmd->dev;
285         struct scst_user_cmd *u;
286         unsigned long flags;
287
288         spin_lock_irqsave(&dev->cmd_lists.cmd_list_lock, flags);
289         do {
290                 ucmd->h = dev->handle_counter++;
291                 u = __ucmd_find_hash(dev, ucmd->h);
292         } while(u != NULL);
293         head = &dev->ucmd_hash[scst_user_cmd_hashfn(ucmd->h)];
294         list_add_tail(&ucmd->hash_list_entry, head);
295         spin_unlock_irqrestore(&dev->cmd_lists.cmd_list_lock, flags);
296
297         TRACE_DBG("Inserted ucmd %p, h=%d", ucmd, ucmd->h);
298         return;
299 }
300
301 static inline void cmnd_remove_hash(struct scst_user_cmd *ucmd)
302 {
303         unsigned long flags;
304         spin_lock_irqsave(&ucmd->dev->cmd_lists.cmd_list_lock, flags);
305         list_del(&ucmd->hash_list_entry);
306         spin_unlock_irqrestore(&ucmd->dev->cmd_lists.cmd_list_lock, flags);
307
308         TRACE_DBG("Removed ucmd %p, h=%d", ucmd, ucmd->h);
309         return;
310 }
311
312 static void dev_user_free_ucmd(struct scst_user_cmd *ucmd)
313 {
314         TRACE_ENTRY();
315
316         TRACE_MEM("Freeing ucmd %p", ucmd);
317
318         cmnd_remove_hash(ucmd);
319         EXTRACHECKS_BUG_ON(ucmd->cmd != NULL);
320
321         kmem_cache_free(user_cmd_cachep, ucmd);
322
323         TRACE_EXIT();
324         return;
325 }
326
327 static struct page *dev_user_alloc_pages(struct scatterlist *sg,
328         gfp_t gfp_mask, void *priv)
329 {
330         struct scst_user_cmd *ucmd = (struct scst_user_cmd*)priv;
331
332         TRACE_ENTRY();
333
334         /* *sg supposed to be zeroed */
335
336         TRACE_MEM("ucmd %p, ubuff %lx, ucmd->cur_data_page %d", ucmd,
337                 ucmd->ubuff, ucmd->cur_data_page);
338
339         if (ucmd->cur_data_page == 0) {
340                 TRACE_MEM("ucmd->first_page_offset %d",
341                         ucmd->first_page_offset);
342                 sg->offset = ucmd->first_page_offset;
343                 ucmd_get(ucmd, 0);
344         }
345
346         if (ucmd->cur_data_page >= ucmd->num_data_pages)
347                 goto out;
348
349         sg->page = ucmd->data_pages[ucmd->cur_data_page];
350         sg->length = PAGE_SIZE - sg->offset;
351
352         ucmd->cur_data_page++;
353
354         TRACE_MEM("page=%p, length=%d", sg->page, sg->length);
355         TRACE_BUFFER("Page data", page_address(sg->page), sg->length);
356
357 out:
358         TRACE_EXIT();
359         return sg->page;
360 }
361
362 static void dev_user_on_cached_mem_free(struct scst_user_cmd *ucmd)
363 {
364         TRACE_ENTRY();
365
366         TRACE_MEM("Preparing ON_CACHED_MEM_FREE (ucmd %p, h %d, ubuff %lx)",
367                 ucmd, ucmd->h, ucmd->ubuff);
368
369         ucmd->user_cmd.cmd_h = ucmd->h;
370         ucmd->user_cmd.subcode = SCST_USER_ON_CACHED_MEM_FREE;
371         ucmd->user_cmd.on_cached_mem_free.pbuf = ucmd->ubuff;
372
373         ucmd->state = UCMD_STATE_ON_CACHE_FREEING;
374
375         dev_user_add_to_ready(ucmd);
376
377         TRACE_EXIT();
378         return;
379 }
380
381 static void dev_user_unmap_buf(struct scst_user_cmd *ucmd)
382 {
383         int i;
384
385         TRACE_ENTRY();
386
387         TRACE_MEM("Unmapping data pages (ucmd %p, ubuff %lx, num %d)", ucmd,
388                 ucmd->ubuff, ucmd->num_data_pages);
389
390         for(i = 0; i < ucmd->num_data_pages; i++) {
391                 struct page *page = ucmd->data_pages[i];
392
393                 if (ucmd->buf_dirty)
394                         SetPageDirty(page);
395
396                 page_cache_release(page);
397         }
398         kfree(ucmd->data_pages);
399         ucmd->data_pages = NULL;
400
401         TRACE_EXIT();
402         return;
403 }
404
405 static void __dev_user_free_sg_entries(struct scst_user_cmd *ucmd)
406 {
407         TRACE_ENTRY();
408
409         sBUG_ON(ucmd->data_pages == NULL);
410
411         TRACE_MEM("Freeing data pages (ucmd=%p, ubuff=%lx, buff_cached=%d)",
412                 ucmd, ucmd->ubuff, ucmd->buff_cached);
413
414         dev_user_unmap_buf(ucmd);
415
416         if (ucmd->buff_cached)
417                 dev_user_on_cached_mem_free(ucmd);
418         else
419                 ucmd_put(ucmd);
420
421         TRACE_EXIT();
422         return;
423 }
424
425 static void dev_user_free_sg_entries(struct scatterlist *sg, int sg_count,
426         void *priv)
427 {
428         struct scst_user_cmd *ucmd = (struct scst_user_cmd*)priv;
429
430         TRACE_MEM("Freeing data pages (sg=%p, sg_count=%d, priv %p)", sg,
431                 sg_count, ucmd);
432
433         __dev_user_free_sg_entries(ucmd);
434
435         return;
436 }
437
438 static inline int is_buff_cached(struct scst_user_cmd *ucmd)
439 {
440         int mem_reuse_type = ucmd->dev->memory_reuse_type;
441
442         if ((mem_reuse_type == SCST_USER_MEM_REUSE_ALL) ||
443             ((ucmd->cmd->data_direction == SCST_DATA_READ) &&
444              (mem_reuse_type == SCST_USER_MEM_REUSE_READ)) ||
445             ((ucmd->cmd->data_direction == SCST_DATA_WRITE) &&
446              (mem_reuse_type == SCST_USER_MEM_REUSE_WRITE))) {
447                 return 1;
448         } else
449                 return 0;
450 }
451
452 /*
453  * Returns 0 for success, <0 for fatal failure, >0 - need pages.
454  * Unmaps the buffer, if needed in case of error
455  */
456 static int dev_user_alloc_sg(struct scst_user_cmd *ucmd, int cached_buff)
457 {
458         int res = 0;
459         struct scst_cmd *cmd = ucmd->cmd;
460         struct scst_user_dev *dev = ucmd->dev;
461         int gfp_mask, flags = 0;
462         int bufflen = cmd->bufflen;
463         int last_len = 0;
464
465         TRACE_ENTRY();
466
467         gfp_mask = __GFP_NOWARN;
468         gfp_mask |= (scst_cmd_atomic(cmd) ? GFP_ATOMIC : GFP_KERNEL);
469
470         if (cached_buff) {
471                 flags |= SCST_POOL_RETURN_OBJ_ON_ALLOC_FAIL;
472                 if (ucmd->ubuff == 0)
473                         flags |= SCST_POOL_NO_ALLOC_ON_CACHE_MISS;
474         } else {
475                 TRACE_MEM("%s", "Not cached buff");
476                 flags |= SCST_POOL_ALLOC_NO_CACHED;
477                 if (ucmd->ubuff == 0) {
478                         res = 1;
479                         goto out;
480                 }
481                 bufflen += ucmd->first_page_offset;
482                 if (is_need_offs_page(ucmd->ubuff, cmd->bufflen))
483                         last_len = bufflen & ~PAGE_MASK;
484                 else
485                         last_len = cmd->bufflen & ~PAGE_MASK;
486                 if (last_len == 0)
487                         last_len = PAGE_SIZE;
488         }
489         ucmd->buff_cached = cached_buff;
490
491         cmd->sg = sgv_pool_alloc(dev->pool, bufflen, gfp_mask, flags,
492                         &cmd->sg_cnt, &ucmd->sgv, ucmd);
493         if (cmd->sg != NULL) {
494                 struct scst_user_cmd *buf_ucmd =
495                         (struct scst_user_cmd*)sgv_get_priv(ucmd->sgv);
496
497                 TRACE_MEM("Buf ucmd %p", buf_ucmd);
498
499                 ucmd->ubuff = buf_ucmd->ubuff;
500                 ucmd->buf_ucmd = buf_ucmd;
501
502                 TRACE_MEM("Buf alloced (ucmd %p, cached_buff %d, ubuff %lx, "
503                         "last_len %d, l %d)", ucmd, cached_buff, ucmd->ubuff,
504                         last_len, cmd->sg[cmd->sg_cnt-1].length);
505
506                 EXTRACHECKS_BUG_ON((ucmd->data_pages != NULL) &&
507                                    (ucmd != buf_ucmd));
508
509                 if (last_len != 0) {
510                         /* We don't use clustering, so the assignment is safe */
511                         cmd->sg[cmd->sg_cnt-1].length = last_len;
512                 }
513
514                 if (unlikely(cmd->sg_cnt > cmd->tgt_dev->max_sg_cnt)) {
515                         static int ll;
516                         if (ll < 10) {
517                                 PRINT_INFO("Unable to complete command due to "
518                                         "SG IO count limitation (requested %d, "
519                                         "available %d, tgt lim %d)", cmd->sg_cnt,
520                                         cmd->tgt_dev->max_sg_cnt,
521                                         cmd->tgt->sg_tablesize);
522                                 ll++;
523                         }
524                         cmd->sg = NULL;
525                         /* sgv will be freed in dev_user_free_sgv() */
526                         res = -1;
527                 }
528         } else {
529                 TRACE_MEM("Buf not alloced (ucmd %p, h %d, buff_cached, %d, "
530                         "sg_cnt %d, ubuff %lx, sgv %p", ucmd, ucmd->h,
531                         ucmd->buff_cached, cmd->sg_cnt, ucmd->ubuff, ucmd->sgv);
532                 if (unlikely(cmd->sg_cnt == 0)) {
533                         TRACE_MEM("Refused allocation (ucmd %p)", ucmd);
534                         sBUG_ON(ucmd->sgv != NULL);
535                         res = -1;
536                 } else {
537                         switch(ucmd->state & ~UCMD_STATE_MASK) {
538                         case UCMD_STATE_BUF_ALLOCING:
539                                 res = 1;
540                                 break;
541                         case UCMD_STATE_EXECING:
542                                 res = -1;
543                                 break;
544                         default:
545                                 sBUG();
546                                 break;
547                         }
548                 }
549         }
550
551 out:
552         TRACE_EXIT_RES(res);
553         return res;
554 }
555
556 static int dev_user_alloc_space(struct scst_user_cmd *ucmd)
557 {
558         int rc, res = SCST_CMD_STATE_DEFAULT;
559         struct scst_cmd *cmd = ucmd->cmd;
560
561         TRACE_ENTRY();
562
563         if (unlikely(ucmd->cmd->data_buf_tgt_alloc)) {
564                 PRINT_ERROR("Target driver %s requested own memory "
565                         "allocation", ucmd->cmd->tgtt->name);
566                 scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
567                 res = SCST_CMD_STATE_PRE_XMIT_RESP;
568                 goto out;
569         }
570
571         ucmd->state = UCMD_STATE_BUF_ALLOCING;
572         cmd->data_buf_alloced = 1;
573
574         rc = dev_user_alloc_sg(ucmd, is_buff_cached(ucmd));
575         if (rc == 0)
576                 goto out;
577         else if (rc < 0) {
578                 scst_set_busy(cmd);
579                 res = SCST_CMD_STATE_PRE_XMIT_RESP;
580                 goto out;
581         }
582
583         if ((cmd->data_direction != SCST_DATA_WRITE) &&
584             !scst_is_cmd_local(cmd)) {
585                 TRACE_DBG("Delayed alloc, ucmd %p", ucmd);
586                 goto out;
587         }
588
589         ucmd->user_cmd.cmd_h = ucmd->h;
590         ucmd->user_cmd.subcode = SCST_USER_ALLOC_MEM;
591         ucmd->user_cmd.alloc_cmd.sess_h = (unsigned long)cmd->tgt_dev;
592         memcpy(ucmd->user_cmd.alloc_cmd.cdb, cmd->cdb, 
593                 min(sizeof(ucmd->user_cmd.alloc_cmd.cdb), sizeof(cmd->cdb)));
594         ucmd->user_cmd.alloc_cmd.cdb_len = cmd->cdb_len;
595         ucmd->user_cmd.alloc_cmd.alloc_len = ucmd->buff_cached ? 
596                 (cmd->sg_cnt << PAGE_SHIFT) : cmd->bufflen;
597         ucmd->user_cmd.alloc_cmd.queue_type = cmd->queue_type;
598         ucmd->user_cmd.alloc_cmd.data_direction = cmd->data_direction;
599
600         dev_user_add_to_ready(ucmd);
601
602         res = SCST_CMD_STATE_STOP;
603
604 out:
605         TRACE_EXIT_RES(res);
606         return res;
607 }
608
609 static struct scst_user_cmd *dev_user_alloc_ucmd(struct scst_user_dev *dev,
610         int gfp_mask)
611 {
612         struct scst_user_cmd *ucmd = NULL;
613
614         TRACE_ENTRY();
615
616 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
617         ucmd = kmem_cache_alloc(user_cmd_cachep, gfp_mask);
618         if (ucmd != NULL)
619                 memset(ucmd, 0, sizeof(*ucmd));
620 #else
621         ucmd = kmem_cache_zalloc(user_cmd_cachep, gfp_mask);
622 #endif
623         if (unlikely(ucmd == NULL)) {
624                 TRACE(TRACE_OUT_OF_MEM, "Unable to allocate "
625                         "user cmd (gfp_mask %x)", gfp_mask);
626                 goto out;
627         }
628         ucmd->dev = dev;
629         atomic_set(&ucmd->ucmd_ref, 1);
630
631         cmnd_insert_hash(ucmd);
632
633         TRACE_MEM("ucmd %p allocated", ucmd);
634
635 out:
636         TRACE_EXIT_HRES((unsigned long)ucmd);
637         return ucmd;
638 }
639
640 static int dev_user_get_block(struct scst_cmd *cmd)
641 {
642         struct scst_user_dev *dev = (struct scst_user_dev*)cmd->dev->dh_priv;
643         /* 
644          * No need for locks here, since *_detach() can not be
645          * called, when there are existing commands.
646          */
647         TRACE_EXIT_RES(dev->block);
648         return dev->block;
649 }
650
651 static int dev_user_parse(struct scst_cmd *cmd)
652 {
653         int rc, res = SCST_CMD_STATE_DEFAULT;
654         struct scst_user_cmd *ucmd;
655         int atomic = scst_cmd_atomic(cmd);
656         struct scst_user_dev *dev = (struct scst_user_dev*)cmd->dev->dh_priv;
657         int gfp_mask = atomic ? GFP_ATOMIC : GFP_KERNEL;
658
659         TRACE_ENTRY();
660
661         if (cmd->dh_priv == NULL) {
662                 ucmd = dev_user_alloc_ucmd(dev, gfp_mask);
663                 if (unlikely(ucmd == NULL)) {
664                         if (atomic) {
665                                 res = SCST_CMD_STATE_NEED_THREAD_CTX;
666                                 goto out;
667                         } else {
668                                 scst_set_busy(cmd);
669                                 goto out_error;
670                         }
671                 }
672                 ucmd->cmd = cmd;
673                 cmd->dh_priv = ucmd;
674         } else {
675                 ucmd = (struct scst_user_cmd*)cmd->dh_priv;
676                 TRACE_DBG("Used ucmd %p, state %x", ucmd, ucmd->state);
677         }
678
679         TRACE_DBG("ucmd %p, cmd %p, state %x", ucmd, cmd, ucmd->state);
680
681         if (ucmd->state != UCMD_STATE_NEW)
682                 goto alloc;
683
684         switch(dev->parse_type) {
685         case SCST_USER_PARSE_STANDARD:
686                 TRACE_DBG("PARSE STANDARD: ucmd %p", ucmd);
687                 rc = dev->generic_parse(cmd, dev_user_get_block);
688                 if ((rc != 0) || (cmd->op_flags & SCST_INFO_INVALID))
689                         goto out_invalid;
690                 break;
691
692         case SCST_USER_PARSE_EXCEPTION:
693                 TRACE_DBG("PARSE EXCEPTION: ucmd %p", ucmd);
694                 rc = dev->generic_parse(cmd, dev_user_get_block);
695                 if ((rc == 0) && (!(cmd->op_flags & SCST_INFO_INVALID)))
696                         break;
697                 else if (rc == SCST_CMD_STATE_NEED_THREAD_CTX) {
698                         TRACE_MEM("Restarting PARSE to thread context "
699                                 "(ucmd %p)", ucmd);
700                         res = SCST_CMD_STATE_NEED_THREAD_CTX;
701                         goto out;
702                 }
703                 /* else go through */
704
705         case SCST_USER_PARSE_CALL:
706                 TRACE_DBG("Preparing PARSE for user space (ucmd=%p, h=%d, "
707                         "bufflen %d)", ucmd, ucmd->h, cmd->bufflen);
708                 ucmd->user_cmd.cmd_h = ucmd->h;
709                 ucmd->user_cmd.subcode = SCST_USER_PARSE;
710                 ucmd->user_cmd.parse_cmd.sess_h = (unsigned long)cmd->tgt_dev;
711                 memcpy(ucmd->user_cmd.parse_cmd.cdb, cmd->cdb, 
712                         min(sizeof(ucmd->user_cmd.parse_cmd.cdb),
713                             sizeof(cmd->cdb)));
714                 ucmd->user_cmd.parse_cmd.cdb_len = cmd->cdb_len;
715                 ucmd->user_cmd.parse_cmd.timeout = cmd->timeout;
716                 ucmd->user_cmd.parse_cmd.bufflen = cmd->bufflen;
717                 ucmd->user_cmd.parse_cmd.queue_type = cmd->queue_type;
718                 ucmd->user_cmd.parse_cmd.data_direction = cmd->data_direction;
719                 ucmd->user_cmd.parse_cmd.expected_values_set =
720                                         cmd->expected_values_set;
721                 ucmd->user_cmd.parse_cmd.expected_data_direction =
722                                         cmd->expected_data_direction;
723                 ucmd->user_cmd.parse_cmd.expected_transfer_len =
724                                         cmd->expected_transfer_len;
725                 ucmd->state = UCMD_STATE_PARSING;
726                 dev_user_add_to_ready(ucmd);
727                 res = SCST_CMD_STATE_STOP;
728                 goto out;
729
730         default:
731                 sBUG();
732                 goto out;
733         }
734
735 alloc:
736         if (cmd->data_direction != SCST_DATA_NONE)
737                 res = dev_user_alloc_space(ucmd);
738
739 out:
740         TRACE_EXIT_RES(res);
741         return res;
742
743 out_invalid:
744         PRINT_ERROR("PARSE failed (ucmd %p, rc %d, invalid %d)", ucmd, rc,
745                 cmd->op_flags & SCST_INFO_INVALID);
746         scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_invalid_opcode));
747
748 out_error:
749         res = SCST_CMD_STATE_PRE_XMIT_RESP;
750         goto out;
751 }
752
753 static void dev_user_flush_dcache(struct scst_user_cmd *ucmd)
754 {
755         struct scst_user_cmd *buf_ucmd = ucmd->buf_ucmd;
756         unsigned long start = buf_ucmd->ubuff;
757         int i;
758
759         TRACE_ENTRY();
760
761         if (start == 0)
762                 goto out;
763
764         for(i = 0; i < buf_ucmd->num_data_pages; i++) {
765                 struct page *page;
766                 page = buf_ucmd->data_pages[i];
767 #ifdef ARCH_HAS_FLUSH_ANON_PAGE
768                 struct vm_area_struct *vma = find_vma(current->mm, start);
769                 if (vma != NULL) 
770                         flush_anon_page(vma, page, start);
771 #endif
772                 flush_dcache_page(page);
773                 start += PAGE_SIZE;
774         }
775
776 out:
777         TRACE_EXIT();
778         return;
779 }
780
781 static int dev_user_exec(struct scst_cmd *cmd)
782 {
783         struct scst_user_cmd *ucmd = (struct scst_user_cmd*)cmd->dh_priv;
784
785         TRACE_ENTRY();
786
787         TRACE_DBG("Preparing EXEC for user space (ucmd=%p, h=%d, "
788                 "bufflen %d, data_len %d, ubuff %lx)", ucmd, ucmd->h,
789                 cmd->bufflen, cmd->data_len, ucmd->ubuff);
790
791         if (cmd->data_direction == SCST_DATA_WRITE)
792                 dev_user_flush_dcache(ucmd);
793
794         ucmd->user_cmd.cmd_h = ucmd->h;
795         ucmd->user_cmd.subcode = SCST_USER_EXEC;
796         ucmd->user_cmd.exec_cmd.sess_h = (unsigned long)cmd->tgt_dev;
797         memcpy(ucmd->user_cmd.exec_cmd.cdb, cmd->cdb, 
798                 min(sizeof(ucmd->user_cmd.exec_cmd.cdb),
799                     sizeof(cmd->cdb)));
800         ucmd->user_cmd.exec_cmd.cdb_len = cmd->cdb_len;
801         ucmd->user_cmd.exec_cmd.bufflen = cmd->bufflen;
802         ucmd->user_cmd.exec_cmd.data_len = cmd->data_len;
803         ucmd->user_cmd.exec_cmd.pbuf = ucmd->ubuff;
804         if ((ucmd->ubuff == 0) && (cmd->data_direction != SCST_DATA_NONE)) {
805                 ucmd->user_cmd.exec_cmd.alloc_len = ucmd->buff_cached ? 
806                         (cmd->sg_cnt << PAGE_SHIFT) : cmd->bufflen;
807         }
808         ucmd->user_cmd.exec_cmd.queue_type = cmd->queue_type;
809         ucmd->user_cmd.exec_cmd.data_direction = cmd->data_direction;
810         ucmd->user_cmd.exec_cmd.partial = 0;
811         ucmd->user_cmd.exec_cmd.timeout = cmd->timeout;
812
813         ucmd->state = UCMD_STATE_EXECING;
814
815         dev_user_add_to_ready(ucmd);
816
817         TRACE_EXIT();
818         return SCST_EXEC_COMPLETED;
819 }
820
821 static void dev_user_free_sgv(struct scst_user_cmd *ucmd)
822 {
823         if (ucmd->sgv != NULL) {
824                 sgv_pool_free(ucmd->sgv);
825                 ucmd->sgv = NULL;
826         } else if (ucmd->data_pages != NULL) {
827                 /* We mapped pages, but for some reason didn't allocate them */
828                 ucmd_get(ucmd, 0);
829                 __dev_user_free_sg_entries(ucmd);
830         }
831         return;
832 }
833
834 static void dev_user_on_free_cmd(struct scst_cmd *cmd)
835 {
836         struct scst_user_cmd *ucmd = (struct scst_user_cmd*)cmd->dh_priv;
837
838         TRACE_ENTRY();
839
840         if (unlikely(ucmd == NULL))
841                 goto out;
842
843         TRACE_MEM("ucmd %p, cmd %p, buff_cached %d, ubuff %lx", ucmd, ucmd->cmd,
844                 ucmd->buff_cached, ucmd->ubuff);
845
846         ucmd->cmd = NULL;
847         if ((cmd->data_direction == SCST_DATA_WRITE) && (ucmd->buf_ucmd != NULL))
848                 ucmd->buf_ucmd->buf_dirty = 1;
849
850         if (ucmd->dev->on_free_cmd_type == SCST_USER_ON_FREE_CMD_IGNORE) {
851                 ucmd->state = UCMD_STATE_ON_FREE_SKIPPED;
852                 /* The state assignment must be before freeing sgv! */
853                 dev_user_free_sgv(ucmd);
854                 ucmd_put(ucmd);
855                 goto out;
856         }
857
858         ucmd->user_cmd.cmd_h = ucmd->h;
859         ucmd->user_cmd.subcode = SCST_USER_ON_FREE_CMD;
860
861         ucmd->user_cmd.on_free_cmd.pbuf = ucmd->ubuff;
862         ucmd->user_cmd.on_free_cmd.resp_data_len = cmd->resp_data_len;
863         ucmd->user_cmd.on_free_cmd.buffer_cached = ucmd->buff_cached;
864         ucmd->user_cmd.on_free_cmd.aborted = ucmd->aborted;
865         ucmd->user_cmd.on_free_cmd.status = cmd->status;
866         ucmd->user_cmd.on_free_cmd.delivery_status = cmd->delivery_status;
867
868         ucmd->state = UCMD_STATE_ON_FREEING;
869
870         dev_user_add_to_ready(ucmd);
871
872 out:
873         TRACE_EXIT();
874         return;
875 }
876
877 static void dev_user_set_block(struct scst_cmd *cmd, int block)
878 {
879         struct scst_user_dev *dev = (struct scst_user_dev*)cmd->dev->dh_priv;
880         /* 
881          * No need for locks here, since *_detach() can not be
882          * called, when there are existing commands.
883          */
884         TRACE_DBG("dev %p, new block %d", dev, block);
885         if (block != 0)
886                 dev->block = block;
887         else
888                 dev->block = dev->def_block;
889         return;
890 }
891
892 static int dev_user_disk_done(struct scst_cmd *cmd)
893 {
894         int res = SCST_CMD_STATE_DEFAULT;
895
896         TRACE_ENTRY();
897
898         res = scst_block_generic_dev_done(cmd, dev_user_set_block);
899
900         TRACE_EXIT_RES(res);
901         return res;
902 }
903
904 static int dev_user_tape_done(struct scst_cmd *cmd)
905 {
906         int res = SCST_CMD_STATE_DEFAULT;
907
908         TRACE_ENTRY();
909
910         res = scst_tape_generic_dev_done(cmd, dev_user_set_block);
911
912         TRACE_EXIT_RES(res);
913         return res;
914 }
915
916 static void dev_user_add_to_ready(struct scst_user_cmd *ucmd)
917 {
918         struct scst_user_dev *dev = ucmd->dev;
919         unsigned long flags;
920         int do_wake;
921
922         TRACE_ENTRY();
923
924         do_wake = (in_interrupt() || 
925                    (ucmd->state == UCMD_STATE_ON_CACHE_FREEING));
926         if (ucmd->cmd)
927                 do_wake |= ucmd->cmd->preprocessing_only;
928
929         EXTRACHECKS_BUG_ON(ucmd->state & UCMD_STATE_JAMMED_MASK);
930
931         spin_lock_irqsave(&dev->cmd_lists.cmd_list_lock, flags);
932
933         /* Hopefully, compiler will make it as a single test/jmp */
934         if (unlikely(dev->attach_cmd_active || dev->tm_cmd_active ||
935                      dev->internal_reset_active || dev->pre_unreg_sess_active ||
936                      (dev->detach_cmd_count != 0))) {
937                 switch(ucmd->state) {
938                 case UCMD_STATE_PARSING:
939                 case UCMD_STATE_BUF_ALLOCING:
940                 case UCMD_STATE_EXECING:
941                         if (dev->pre_unreg_sess_active &&
942                             !(dev->attach_cmd_active || dev->tm_cmd_active ||
943                               dev->internal_reset_active ||
944                               (dev->detach_cmd_count != 0))) {
945                                 struct scst_user_pre_unreg_sess_obj *p, *found = NULL;
946                                 list_for_each_entry(p, &dev->pre_unreg_sess_list,
947                                         pre_unreg_sess_list_entry) {
948                                         if (p->tgt_dev == ucmd->cmd->tgt_dev) {
949                                                 if (p->active)
950                                                         found = p;
951                                                 break;
952                                         }
953                                 }
954                                 if (found == NULL) {
955                                         TRACE_MGMT_DBG("No pre unreg sess "
956                                                 "active (ucmd %p)", ucmd);
957                                         break;
958                                 } else {
959                                         TRACE_MGMT_DBG("Pre unreg sess %p "
960                                                 "active (ucmd %p)", found, ucmd);
961                                 }
962                         }
963                         TRACE(TRACE_MGMT, "Mgmt cmd active, returning BUSY for "
964                                 "ucmd %p", ucmd);
965                         dev_user_unjam_cmd(ucmd, 1, &flags);
966                         spin_unlock_irqrestore(&dev->cmd_lists.cmd_list_lock, flags);
967                         goto out;
968                 }
969         }
970
971         if (unlikely(ucmd->state == UCMD_STATE_TM_EXECING) ||
972             unlikely(ucmd->state == UCMD_STATE_ATTACH_SESS) ||
973             unlikely(ucmd->state == UCMD_STATE_DETACH_SESS)) {
974                 if (dev->prio_queue_type == SCST_USER_PRIO_QUEUE_SEPARATE) {
975                         TRACE_MGMT_DBG("Adding mgmt ucmd %p to prio ready cmd "
976                                 "list", ucmd);
977                         list_add_tail(&ucmd->ready_cmd_list_entry,
978                                 &dev->prio_ready_cmd_list);
979                         wake_up(&dev->prio_cmd_list_waitQ);
980                         do_wake = 0;
981                 } else {
982                         TRACE_MGMT_DBG("Adding mgmt ucmd %p to ready cmd "
983                                 "list", ucmd);
984                         list_add_tail(&ucmd->ready_cmd_list_entry,
985                                 &dev->ready_cmd_list);
986                         do_wake = 1;
987                 }
988         } else if ((ucmd->cmd != NULL) &&
989             unlikely((ucmd->cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE))) {
990                 TRACE_DBG("Adding ucmd %p to head ready cmd list", ucmd);
991                 list_add(&ucmd->ready_cmd_list_entry, &dev->ready_cmd_list);
992         } else {
993                 TRACE_DBG("Adding ucmd %p to ready cmd list", ucmd);
994                 list_add_tail(&ucmd->ready_cmd_list_entry, &dev->ready_cmd_list);
995         }
996
997         if (do_wake) {
998                 TRACE_DBG("Waking up dev %p", dev);
999                 wake_up(&dev->cmd_lists.cmd_list_waitQ);
1000         }
1001
1002         spin_unlock_irqrestore(&dev->cmd_lists.cmd_list_lock, flags);
1003
1004 out:
1005         TRACE_EXIT();
1006         return;
1007 }
1008
1009 static int dev_user_map_buf(struct scst_user_cmd *ucmd, unsigned long ubuff,
1010         int num_pg)
1011 {
1012         int res = 0, rc;
1013         int i;
1014
1015         TRACE_ENTRY();
1016
1017         if (unlikely(ubuff == 0))
1018                 goto out_nomem;
1019
1020         sBUG_ON(ucmd->data_pages != NULL);
1021
1022         ucmd->num_data_pages = num_pg;
1023
1024         ucmd->data_pages = kzalloc(sizeof(*ucmd->data_pages)*ucmd->num_data_pages,
1025                 GFP_KERNEL);
1026         if (ucmd->data_pages == NULL) {
1027                 TRACE(TRACE_OUT_OF_MEM, "Unable to allocate data_pages array "
1028                         "(num_data_pages=%d)", ucmd->num_data_pages);
1029                 res = -ENOMEM;
1030                 goto out_nomem;
1031         }
1032
1033         TRACE_MEM("Mapping buffer (ucmd %p, ubuff %lx, ucmd->num_data_pages %d, "
1034                 "first_page_offset %d, len %d)", ucmd, ubuff,
1035                 ucmd->num_data_pages, (int)(ubuff & ~PAGE_MASK),
1036                 ucmd->cmd->bufflen);
1037
1038         down_read(&current->mm->mmap_sem);
1039         rc = get_user_pages(current, current->mm, ubuff, ucmd->num_data_pages,
1040                 1/*writable*/, 0/*don't force*/, ucmd->data_pages, NULL);
1041         up_read(&current->mm->mmap_sem);
1042
1043         /* get_user_pages() flushes dcache */
1044
1045         if (rc < ucmd->num_data_pages)
1046                 goto out_unmap;
1047
1048         ucmd->ubuff = ubuff;
1049         ucmd->first_page_offset = (ubuff & ~PAGE_MASK);
1050
1051 out:
1052         TRACE_EXIT_RES(res);
1053         return res;
1054
1055 out_nomem:
1056         scst_set_busy(ucmd->cmd);
1057         /* go through */
1058
1059 out_err:
1060         ucmd->cmd->state = SCST_CMD_STATE_PRE_XMIT_RESP;
1061         goto out;
1062
1063 out_unmap:
1064         PRINT_ERROR("Failed to get %d user pages (rc %d)",
1065                 ucmd->num_data_pages, rc);
1066         if (rc > 0) {
1067                 for(i = 0; i < rc; i++)
1068                         page_cache_release(ucmd->data_pages[i]);
1069         }
1070         kfree(ucmd->data_pages);
1071         ucmd->data_pages = NULL;
1072         res = -EFAULT;
1073         scst_set_cmd_error(ucmd->cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
1074         goto out_err;
1075 }
1076
1077 static int dev_user_process_reply_alloc(struct scst_user_cmd *ucmd,
1078         struct scst_user_reply_cmd *reply)
1079 {
1080         int res = 0;
1081         struct scst_cmd *cmd = ucmd->cmd;
1082
1083         TRACE_ENTRY();
1084
1085         TRACE_DBG("ucmd %p, pbuf %Lx", ucmd, reply->alloc_reply.pbuf);
1086
1087         if (likely(reply->alloc_reply.pbuf != 0)) {
1088                 int pages;
1089                 if (ucmd->buff_cached) {
1090                         if (unlikely((reply->alloc_reply.pbuf & ~PAGE_MASK) != 0)) {
1091                                 PRINT_ERROR("Supplied pbuf %Lx isn't "
1092                                         "page aligned", reply->alloc_reply.pbuf);
1093                                 goto out_hwerr;
1094                         }
1095                         pages = cmd->sg_cnt;
1096                 } else
1097                         pages = calc_num_pg(reply->alloc_reply.pbuf, cmd->bufflen);
1098                 res = dev_user_map_buf(ucmd, reply->alloc_reply.pbuf, pages);
1099         } else {
1100                 scst_set_busy(ucmd->cmd);
1101                 ucmd->cmd->state = SCST_CMD_STATE_PRE_XMIT_RESP;
1102         }
1103
1104 out_process:
1105         scst_process_active_cmd(cmd, SCST_CONTEXT_DIRECT);
1106
1107         TRACE_EXIT_RES(res);
1108         return res;
1109
1110 out_hwerr:
1111         scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
1112         res = -EINVAL;
1113         goto out_process;
1114 }
1115
1116 static int dev_user_process_reply_parse(struct scst_user_cmd *ucmd,
1117         struct scst_user_reply_cmd *reply)
1118 {
1119         int res = 0;
1120         struct scst_user_scsi_cmd_reply_parse *preply = 
1121                 &reply->parse_reply;
1122         struct scst_cmd *cmd = ucmd->cmd;
1123
1124         TRACE_ENTRY();
1125
1126         if (unlikely(preply->queue_type > SCST_CMD_QUEUE_ACA))
1127                 goto out_inval;
1128
1129         if (unlikely((preply->data_direction != SCST_DATA_WRITE) &&
1130                      (preply->data_direction != SCST_DATA_READ) &&
1131                      (preply->data_direction != SCST_DATA_NONE)))
1132                 goto out_inval;
1133
1134         if (unlikely((preply->data_direction != SCST_DATA_NONE) &&
1135                      (preply->bufflen == 0)))
1136                 goto out_inval;
1137
1138         if (unlikely((preply->bufflen < 0) || (preply->data_len < 0)))
1139                 goto out_inval;
1140
1141         TRACE_DBG("ucmd %p, queue_type %x, data_direction, %x, bufflen %d, "
1142                 "data_len %d, pbuf %Lx", ucmd, preply->queue_type,
1143                 preply->data_direction, preply->bufflen, preply->data_len,
1144                 reply->alloc_reply.pbuf);
1145
1146         cmd->queue_type = preply->queue_type;
1147         cmd->data_direction = preply->data_direction;
1148         cmd->bufflen = preply->bufflen;
1149         cmd->data_len = preply->data_len;
1150
1151 out_process:
1152         scst_process_active_cmd(cmd, SCST_CONTEXT_DIRECT);
1153
1154         TRACE_EXIT_RES(res);
1155         return res;
1156
1157 out_inval:
1158         PRINT_ERROR("%s", "Invalid parse_reply parameter(s)");
1159         scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
1160         res = -EINVAL;
1161         goto out_process;
1162 }
1163
1164 static int dev_user_process_reply_on_free(struct scst_user_cmd *ucmd)
1165 {
1166         int res = 0;
1167
1168         TRACE_ENTRY();
1169
1170         TRACE_DBG("ON FREE ucmd %p", ucmd);
1171
1172         dev_user_free_sgv(ucmd);
1173         ucmd_put(ucmd);
1174
1175         TRACE_EXIT_RES(res);
1176         return res;
1177 }
1178
1179 static int dev_user_process_reply_on_cache_free(struct scst_user_cmd *ucmd)
1180 {
1181         int res = 0;
1182
1183         TRACE_ENTRY();
1184
1185         TRACE_DBG("ON CACHE FREE ucmd %p", ucmd);
1186
1187         ucmd_put(ucmd);
1188
1189         TRACE_EXIT_RES(res);
1190         return res;
1191 }
1192
1193 static int dev_user_process_reply_exec(struct scst_user_cmd *ucmd,
1194         struct scst_user_reply_cmd *reply)
1195 {
1196         int res = 0;
1197         struct scst_user_scsi_cmd_reply_exec *ereply =
1198                 &reply->exec_reply;
1199         struct scst_cmd *cmd = ucmd->cmd;
1200
1201         TRACE_ENTRY();
1202
1203         if (ereply->reply_type == SCST_EXEC_REPLY_COMPLETED) {
1204                 if (ucmd->background_exec) {
1205                         TRACE_DBG("Background ucmd %p finished", ucmd);
1206                         ucmd_put(ucmd);
1207                         goto out;
1208                 }
1209                 if (unlikely(ereply->resp_data_len > cmd->bufflen))
1210                         goto out_inval;
1211                 if (unlikely((cmd->data_direction != SCST_DATA_READ) &&
1212                              (ereply->resp_data_len != 0)))
1213                         goto out_inval;
1214         } else if (ereply->reply_type == SCST_EXEC_REPLY_BACKGROUND) {
1215                 if (unlikely(ucmd->background_exec))
1216                         goto out_inval;
1217                 if (unlikely((cmd->data_direction == SCST_DATA_READ) ||
1218                              (cmd->resp_data_len != 0)))
1219                         goto out_inval;
1220                 ucmd_get(ucmd, 1);
1221                 ucmd->background_exec = 1;
1222                 TRACE_DBG("Background ucmd %p", ucmd);
1223                 goto out_compl;
1224         } else
1225                 goto out_inval;
1226
1227         TRACE_DBG("ucmd %p, status %d, resp_data_len %d", ucmd,
1228                 ereply->status, ereply->resp_data_len);
1229
1230          if (ereply->resp_data_len != 0) {
1231                 if (ucmd->ubuff == 0) {
1232                         int pages, rc;
1233                         if (unlikely(ereply->pbuf == 0))
1234                                 goto out_busy;
1235                         if (ucmd->buff_cached) {
1236                                 if (unlikely((ereply->pbuf & ~PAGE_MASK) != 0)) {
1237                                         PRINT_ERROR("Supplied pbuf %Lx isn't "
1238                                                 "page aligned", ereply->pbuf);
1239                                         goto out_hwerr;
1240                                 }
1241                                 pages = cmd->sg_cnt;
1242                         } else
1243                                 pages = calc_num_pg(ereply->pbuf, cmd->bufflen);
1244                         rc = dev_user_map_buf(ucmd, ereply->pbuf, pages);
1245                         if ((rc != 0) || (ucmd->ubuff == 0))
1246                                 goto out_compl;
1247
1248                         rc = dev_user_alloc_sg(ucmd, ucmd->buff_cached);
1249                         if (unlikely(rc != 0))
1250                                 goto out_busy;
1251                 } else
1252                         dev_user_flush_dcache(ucmd);
1253                 cmd->may_need_dma_sync = 1;
1254                 scst_set_resp_data_len(cmd, ereply->resp_data_len);
1255         } else if (cmd->resp_data_len != ereply->resp_data_len) {
1256                 if (ucmd->ubuff == 0)
1257                         cmd->resp_data_len = ereply->resp_data_len;
1258                 else
1259                         scst_set_resp_data_len(cmd, ereply->resp_data_len);
1260         }
1261
1262         cmd->status = ereply->status;
1263         if (ereply->sense_len != 0) {
1264                 res = copy_from_user(cmd->sense_buffer,
1265                         (void*)(unsigned long)ereply->psense_buffer,
1266                         min(sizeof(cmd->sense_buffer),
1267                                 (unsigned int)ereply->sense_len));
1268                 if (res < 0) {
1269                         PRINT_ERROR("%s", "Unable to get sense data");
1270                         goto out_hwerr_res_set;
1271                 }
1272         }
1273
1274 out_compl:
1275         cmd->completed = 1;
1276         cmd->scst_cmd_done(cmd, SCST_CMD_STATE_DEFAULT);
1277         /* !! At this point cmd can be already freed !! */
1278
1279 out:
1280         TRACE_EXIT_RES(res);
1281         return res;
1282
1283 out_inval:
1284         PRINT_ERROR("%s", "Invalid exec_reply parameter(s)");
1285
1286 out_hwerr:
1287         res = -EINVAL;
1288
1289 out_hwerr_res_set:
1290         if (ucmd->background_exec) {
1291                 ucmd_put(ucmd);
1292                 goto out;
1293         } else {
1294                 scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
1295                 goto out_compl;
1296         }
1297
1298 out_busy:
1299         scst_set_busy(cmd);
1300         goto out_compl;
1301 }
1302
1303 static int dev_user_process_reply(struct scst_user_dev *dev,
1304         struct scst_user_reply_cmd *reply)
1305 {
1306         int res = 0;
1307         struct scst_user_cmd *ucmd;
1308         int state;
1309
1310         TRACE_ENTRY();
1311
1312         spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
1313
1314         ucmd = __ucmd_find_hash(dev, reply->cmd_h);
1315         if (ucmd == NULL) {
1316                 TRACE_MGMT_DBG("cmd_h %d not found", reply->cmd_h);
1317                 res = -ESRCH;
1318                 goto out_unlock;
1319         }
1320
1321         if (ucmd->background_exec) {
1322                 state = UCMD_STATE_EXECING;
1323                 goto unlock_process;
1324         }
1325
1326         if (unlikely(!(ucmd->state & UCMD_STATE_SENT_MASK))) {
1327                 if (ucmd->state & UCMD_STATE_JAMMED_MASK) {
1328                         TRACE_MGMT_DBG("Reply on jammed ucmd %p, ignoring",
1329                                 ucmd);
1330                 } else {
1331                         TRACE_MGMT_DBG("Ucmd %p isn't in the sent to user "
1332                                 "state %x", ucmd, ucmd->state);
1333                         res = -EBUSY;
1334                 }
1335                 goto out_unlock;
1336         }
1337
1338         if (unlikely(reply->subcode != ucmd->user_cmd.subcode))
1339                 goto out_wrong_state;
1340
1341         if (unlikely(_IOC_NR(reply->subcode) !=
1342                         (ucmd->state & ~UCMD_STATE_SENT_MASK)))
1343                 goto out_wrong_state;
1344
1345         ucmd->state &= ~UCMD_STATE_SENT_MASK;
1346         state = ucmd->state;
1347         ucmd->state |= UCMD_STATE_RECV_MASK;
1348
1349 unlock_process:
1350         spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1351
1352         switch(state) {
1353         case UCMD_STATE_PARSING:
1354                 res = dev_user_process_reply_parse(ucmd, reply);
1355                 break;
1356
1357         case UCMD_STATE_BUF_ALLOCING:
1358                 res = dev_user_process_reply_alloc(ucmd, reply);
1359                 break;
1360
1361         case UCMD_STATE_EXECING:
1362                 res = dev_user_process_reply_exec(ucmd, reply);
1363                 break;
1364         
1365         case UCMD_STATE_ON_FREEING:
1366                 res = dev_user_process_reply_on_free(ucmd);
1367                 break;
1368
1369         case UCMD_STATE_ON_CACHE_FREEING:
1370                 res = dev_user_process_reply_on_cache_free(ucmd);
1371                 break;
1372
1373         case UCMD_STATE_TM_EXECING:
1374                 res = dev_user_process_reply_tm_exec(ucmd, reply->result);
1375                 break;
1376
1377         case UCMD_STATE_ATTACH_SESS:
1378         case UCMD_STATE_DETACH_SESS:
1379                 res = dev_user_process_reply_sess(ucmd, reply->result);
1380                 break;
1381
1382         default:
1383                 sBUG();
1384                 break;
1385         }
1386 out:
1387         TRACE_EXIT_RES(res);
1388         return res;
1389
1390 out_wrong_state:
1391         PRINT_ERROR("Command's %p subcode %x doesn't match internal "
1392                 "command's state %x or reply->subcode (%x) != ucmd->subcode "
1393                 "(%x)", ucmd, _IOC_NR(reply->subcode), ucmd->state,
1394                 reply->subcode, ucmd->user_cmd.subcode);
1395         res = -EINVAL;
1396         dev_user_unjam_cmd(ucmd, 0, NULL);
1397
1398 out_unlock:
1399         spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1400         goto out;
1401 }
1402
1403 static int dev_user_reply_cmd(struct file *file, unsigned long arg)
1404 {
1405         int res = 0;
1406         struct scst_user_dev *dev;
1407         struct scst_user_reply_cmd *reply;
1408
1409         TRACE_ENTRY();
1410
1411         mutex_lock(&dev_priv_mutex);
1412         dev = (struct scst_user_dev*)file->private_data;
1413         res = dev_user_check_reg(dev);
1414         if (res != 0) {
1415                 mutex_unlock(&dev_priv_mutex);
1416                 goto out;
1417         }
1418         down_read(&dev->dev_rwsem);
1419         mutex_unlock(&dev_priv_mutex);
1420
1421         reply = kzalloc(sizeof(*reply), GFP_KERNEL);
1422         if (reply == NULL) {
1423                 res = -ENOMEM;
1424                 goto out_up;
1425         }
1426
1427         res = copy_from_user(reply, (void*)arg, sizeof(*reply));
1428         if (res < 0)
1429                 goto out_free;
1430
1431         TRACE_BUFFER("Reply", reply, sizeof(*reply));
1432
1433         res = dev_user_process_reply(dev, reply);
1434         if (res < 0)
1435                 goto out_free;
1436
1437 out_free:
1438         kfree(reply);
1439
1440 out_up:
1441         up_read(&dev->dev_rwsem);
1442
1443 out:
1444         TRACE_EXIT_RES(res);
1445         return res;
1446 }
1447
1448 static int dev_user_process_scst_commands(struct scst_user_dev *dev)
1449 {
1450         int res = 0;
1451
1452         TRACE_ENTRY();
1453
1454         while (!list_empty(&dev->cmd_lists.active_cmd_list)) {
1455                 struct scst_cmd *cmd = list_entry(
1456                         dev->cmd_lists.active_cmd_list.next, typeof(*cmd),
1457                         cmd_list_entry);
1458                 TRACE_DBG("Deleting cmd %p from active cmd list", cmd);
1459                 list_del(&cmd->cmd_list_entry);
1460                 spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1461                 scst_process_active_cmd(cmd, SCST_CONTEXT_DIRECT);
1462                 spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
1463                 res++;
1464         }
1465
1466         TRACE_EXIT_RES(res);
1467         return res;
1468 }
1469
1470 /* Called under cmd_lists.cmd_list_lock and IRQ off */
1471 struct scst_user_cmd *__dev_user_get_next_cmd(struct list_head *cmd_list)
1472 {
1473         struct scst_user_cmd *u;
1474
1475 again:
1476         u = NULL;
1477         if (!list_empty(cmd_list)) {
1478                 u = list_entry(cmd_list->next, typeof(*u), ready_cmd_list_entry);
1479                 TRACE_DBG("Found ready ucmd %p", u);
1480                 list_del(&u->ready_cmd_list_entry);
1481                 EXTRACHECKS_BUG_ON(u->state & UCMD_STATE_JAMMED_MASK);
1482                 if (u->cmd != NULL) {
1483                         if (u->state == UCMD_STATE_EXECING) {
1484                                 struct scst_user_dev *dev = u->dev;
1485                                 int rc;
1486                                 spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1487                                 rc = scst_check_local_events(u->cmd);
1488                                 if (unlikely(rc != 0)) {
1489                                         u->cmd->scst_cmd_done(u->cmd,
1490                                                 SCST_CMD_STATE_DEFAULT);
1491                                         /* 
1492                                          * !! At this point cmd & u can be !!
1493                                          * !! already freed                !! 
1494                                          */
1495                                         spin_lock_irq(
1496                                                 &dev->cmd_lists.cmd_list_lock);
1497                                         goto again;
1498                                 }
1499                                 /*
1500                                  * There is no real need to lock again here, but
1501                                  * let's do it for simplicity.
1502                                  */
1503                                 spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
1504                         } else if (unlikely(test_bit(SCST_CMD_ABORTED,
1505                                         &u->cmd->cmd_flags))) {
1506                                 switch(u->state) {
1507                                 case UCMD_STATE_PARSING:
1508                                 case UCMD_STATE_BUF_ALLOCING:
1509                                         TRACE_MGMT_DBG("Aborting ucmd %p", u);
1510                                         dev_user_unjam_cmd(u, 0, NULL);
1511                                         goto again;
1512                                 case UCMD_STATE_EXECING:
1513                                         EXTRACHECKS_BUG_ON(1);
1514                                 }
1515                         }
1516                 }
1517                 u->state |= UCMD_STATE_SENT_MASK;
1518         }
1519         return u;
1520 }
1521
1522 static inline int test_cmd_lists(struct scst_user_dev *dev)
1523 {
1524         int res = !list_empty(&dev->cmd_lists.active_cmd_list) ||
1525                   !list_empty(&dev->ready_cmd_list) ||
1526                   !dev->blocking || dev->cleanup_done ||
1527                   signal_pending(current);
1528         return res;
1529 }
1530
1531 /* Called under cmd_lists.cmd_list_lock and IRQ off */
1532 static int dev_user_get_next_cmd(struct scst_user_dev *dev,
1533         struct scst_user_cmd **ucmd)
1534 {
1535         int res = 0;
1536         wait_queue_t wait;
1537
1538         TRACE_ENTRY();
1539
1540         init_waitqueue_entry(&wait, current);
1541
1542         while(1) {
1543                 if (!test_cmd_lists(dev)) {
1544                         add_wait_queue_exclusive(&dev->cmd_lists.cmd_list_waitQ,
1545                                 &wait);
1546                         for (;;) {
1547                                 set_current_state(TASK_INTERRUPTIBLE);
1548                                 if (test_cmd_lists(dev))
1549                                         break;
1550                                 spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1551                                 schedule();
1552                                 spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
1553                         }
1554                         set_current_state(TASK_RUNNING);
1555                         remove_wait_queue(&dev->cmd_lists.cmd_list_waitQ,
1556                                 &wait);
1557                 }
1558
1559                 dev_user_process_scst_commands(dev);
1560
1561                 *ucmd = __dev_user_get_next_cmd(&dev->ready_cmd_list);
1562                 if (*ucmd != NULL)
1563                         break;
1564
1565                 if (!dev->blocking || dev->cleanup_done) {
1566                         res = -EAGAIN;
1567                         TRACE_DBG("No ready commands, returning %d", res);
1568                         break;
1569                 }
1570
1571                 if (signal_pending(current)) {
1572                         res = -EINTR;
1573                         TRACE_DBG("Signal pending, returning %d", res);
1574                         break;
1575                 }
1576         }
1577
1578         TRACE_EXIT_RES(res);
1579         return res;
1580 }
1581
1582 static inline int test_prio_cmd_list(struct scst_user_dev *dev)
1583 {
1584         /*
1585          * Prio queue is always blocking, because poll() seems doesn't
1586          * support, when different threads wait with different events
1587          * mask. Only one thread is woken up on each event and if it
1588          * isn't interested in such events, another (interested) one
1589          * will not be woken up. Does't know if it's a bug or feature.
1590          */
1591         int res = !list_empty(&dev->prio_ready_cmd_list) ||
1592                   dev->cleaning || dev->cleanup_done ||
1593                   signal_pending(current);
1594         return res;
1595 }
1596
1597 /* Called under cmd_lists.cmd_list_lock and IRQ off */
1598 static int dev_user_get_next_prio_cmd(struct scst_user_dev *dev,
1599         struct scst_user_cmd **ucmd)
1600 {
1601         int res = 0;
1602         wait_queue_t wait;
1603
1604         TRACE_ENTRY();
1605
1606         init_waitqueue_entry(&wait, current);
1607
1608         while(1) {
1609                 if (!test_prio_cmd_list(dev)) {
1610                         add_wait_queue_exclusive(&dev->prio_cmd_list_waitQ,
1611                                 &wait);
1612                         for (;;) {
1613                                 set_current_state(TASK_INTERRUPTIBLE);
1614                                 if (test_prio_cmd_list(dev))
1615                                         break;
1616                                 spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1617                                 schedule();
1618                                 spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
1619                         }
1620                         set_current_state(TASK_RUNNING);
1621                         remove_wait_queue(&dev->prio_cmd_list_waitQ, &wait);
1622                 }
1623
1624                 *ucmd = __dev_user_get_next_cmd(&dev->prio_ready_cmd_list);
1625                 if (*ucmd != NULL)
1626                         break;
1627
1628                 if (dev->cleaning || dev->cleanup_done) {
1629                         res = -EAGAIN;
1630                         TRACE_DBG("No ready commands, returning %d", res);
1631                         break;
1632                 }
1633
1634                 if (signal_pending(current)) {
1635                         res = -EINTR;
1636                         TRACE_DBG("Signal pending, returning %d", res);
1637                         break;
1638                 }
1639         }
1640
1641         TRACE_EXIT_RES(res);
1642         return res;
1643 }
1644
1645 static int dev_user_reply_get_cmd(struct file *file, unsigned long arg,
1646         int prio)
1647 {
1648         int res = 0;
1649         struct scst_user_dev *dev;
1650         struct scst_user_get_cmd *cmd;
1651         struct scst_user_reply_cmd *reply;
1652         struct scst_user_cmd *ucmd;
1653         uint64_t ureply;
1654
1655         TRACE_ENTRY();
1656
1657         mutex_lock(&dev_priv_mutex);
1658         dev = (struct scst_user_dev*)file->private_data;
1659         res = dev_user_check_reg(dev);
1660         if (res != 0) {
1661                 mutex_unlock(&dev_priv_mutex);
1662                 goto out;
1663         }
1664         down_read(&dev->dev_rwsem);
1665         mutex_unlock(&dev_priv_mutex);
1666
1667         res = copy_from_user(&ureply, (void*)arg, sizeof(ureply));
1668         if (res < 0)
1669                 goto out_up;
1670
1671         TRACE_DBG("ureply %Ld", ureply);
1672
1673         cmd = kzalloc(max(sizeof(*cmd), sizeof(*reply)), GFP_KERNEL);
1674         if (cmd == NULL) {
1675                 res = -ENOMEM;
1676                 goto out_up;
1677         }
1678
1679         if (ureply != 0) {
1680                 unsigned long u = (unsigned long)ureply;
1681                 reply = (struct scst_user_reply_cmd*)cmd;
1682                 res = copy_from_user(reply, (void*)u, sizeof(*reply));
1683                 if (res < 0)
1684                         goto out_free;
1685
1686                 TRACE_BUFFER("Reply", reply, sizeof(*reply));
1687
1688                 res = dev_user_process_reply(dev, reply);
1689                 if (res < 0)
1690                         goto out_free;
1691         }
1692
1693         spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
1694         if (prio && (dev->prio_queue_type == SCST_USER_PRIO_QUEUE_SEPARATE))
1695                 res = dev_user_get_next_prio_cmd(dev, &ucmd);
1696         else
1697                 res = dev_user_get_next_cmd(dev, &ucmd);
1698         if (res == 0) {
1699                 *cmd = ucmd->user_cmd;
1700                 spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1701                 TRACE_BUFFER("UCMD", cmd, sizeof(*cmd));
1702                 res = copy_to_user((void*)arg, cmd, sizeof(*cmd));
1703         } else
1704                 spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1705
1706 out_free:
1707         kfree(cmd);
1708
1709 out_up:
1710         up_read(&dev->dev_rwsem);
1711
1712 out:
1713         TRACE_EXIT_RES(res);
1714         return res;
1715 }
1716
1717 static long dev_user_ioctl(struct file *file, unsigned int cmd,
1718         unsigned long arg)
1719 {
1720         long res;
1721
1722         TRACE_ENTRY();
1723
1724         switch (cmd) {
1725         case SCST_USER_REPLY_AND_GET_CMD:
1726                 TRACE_DBG("%s", "REPLY_AND_GET_CMD");
1727                 res = dev_user_reply_get_cmd(file, arg, 0);
1728                 break;
1729
1730         case SCST_USER_REPLY_CMD:
1731                 TRACE_DBG("%s", "REPLY_CMD");
1732                 res = dev_user_reply_cmd(file, arg);
1733                 break;
1734
1735         case SCST_USER_REPLY_AND_GET_PRIO_CMD:
1736                 TRACE_DBG("%s", "REPLY_AND_GET_PRIO_CMD");
1737                 res = dev_user_reply_get_cmd(file, arg, 1);
1738                 break;
1739
1740         case SCST_USER_REGISTER_DEVICE:
1741         {
1742                 struct scst_user_dev_desc *dev_desc;
1743                 TRACE_DBG("%s", "REGISTER_DEVICE");
1744                 dev_desc = kmalloc(sizeof(*dev_desc), GFP_KERNEL);
1745                 if (dev_desc == NULL) {
1746                         res = -ENOMEM;
1747                         goto out;
1748                 }
1749                 res = copy_from_user(dev_desc, (void*)arg, sizeof(*dev_desc));
1750                 if (res < 0) {
1751                         kfree(dev_desc);
1752                         goto out;
1753                 }
1754                 TRACE_BUFFER("dev_desc", dev_desc, sizeof(*dev_desc));
1755                 res = dev_user_register_dev(file, dev_desc);
1756                 kfree(dev_desc);
1757                 break;
1758         }
1759
1760         case SCST_USER_SET_OPTIONS:
1761         {
1762                 struct scst_user_opt opt;
1763                 TRACE_DBG("%s", "SET_OPTIONS");
1764                 res = copy_from_user(&opt, (void*)arg, sizeof(opt));
1765                 if (res < 0)
1766                         goto out;
1767                 TRACE_BUFFER("opt", &opt, sizeof(opt));
1768                 res = dev_user_set_opt(file, &opt);
1769                 break;
1770         }
1771
1772         case SCST_USER_GET_OPTIONS:
1773                 TRACE_DBG("%s", "GET_OPTIONS");
1774                 res = dev_user_get_opt(file, (void*)arg);
1775                 break;
1776
1777         default:
1778                 PRINT_ERROR("Invalid ioctl cmd %x", cmd);
1779                 res = -EINVAL;
1780                 goto out;
1781         }
1782
1783 out:
1784         TRACE_EXIT_RES(res);
1785         return res;
1786 }
1787
1788 static unsigned int dev_user_poll(struct file *file, poll_table *wait)
1789 {
1790         int res = 0;
1791         struct scst_user_dev *dev;
1792
1793         TRACE_ENTRY();
1794
1795         mutex_lock(&dev_priv_mutex);
1796         dev = (struct scst_user_dev*)file->private_data;
1797         res = dev_user_check_reg(dev);
1798         if (res != 0) {
1799                 mutex_unlock(&dev_priv_mutex);
1800                 goto out;
1801         }
1802         down_read(&dev->dev_rwsem);
1803         mutex_unlock(&dev_priv_mutex);
1804
1805         spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
1806
1807         if (!list_empty(&dev->ready_cmd_list) ||
1808             !list_empty(&dev->cmd_lists.active_cmd_list)) {
1809                 res |= POLLIN | POLLRDNORM;
1810                 goto out_unlock;
1811         }
1812
1813         spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1814
1815         TRACE_DBG("Before poll_wait() (dev %p)", dev);
1816         poll_wait(file, &dev->cmd_lists.cmd_list_waitQ, wait);
1817         TRACE_DBG("After poll_wait() (dev %p)", dev);
1818
1819         spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
1820
1821         if (!list_empty(&dev->ready_cmd_list) ||
1822             !list_empty(&dev->cmd_lists.active_cmd_list)) {
1823                 res |= POLLIN | POLLRDNORM;
1824                 goto out_unlock;
1825         }
1826
1827 out_unlock:
1828         spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1829
1830         up_read(&dev->dev_rwsem);
1831
1832 out:
1833         TRACE_EXIT_HRES(res);
1834         return res;
1835 }
1836
1837 /*
1838  * Called under cmd_lists.cmd_list_lock, but can drop it inside, then reaquire.
1839  */
1840 static void dev_user_unjam_cmd(struct scst_user_cmd *ucmd, int busy,
1841         unsigned long *flags)
1842 {
1843         int state = ucmd->state & ~UCMD_STATE_MASK;
1844         struct scst_user_dev *dev = ucmd->dev;
1845
1846         TRACE_ENTRY();
1847
1848         if (ucmd->state & UCMD_STATE_JAMMED_MASK)
1849                 goto out;
1850
1851         TRACE_MGMT_DBG("Unjamming ucmd %p (busy %d, state %x)", ucmd, busy,
1852                 ucmd->state);
1853
1854         ucmd->state = state | UCMD_STATE_JAMMED_MASK;
1855
1856         switch(state) {
1857         case UCMD_STATE_PARSING:
1858         case UCMD_STATE_BUF_ALLOCING:
1859                 if (test_bit(SCST_CMD_ABORTED, &ucmd->cmd->cmd_flags))
1860                         ucmd->aborted = 1;
1861                 else {
1862                         if (busy)
1863                                 scst_set_busy(ucmd->cmd);
1864                         else
1865                                 scst_set_cmd_error(ucmd->cmd,
1866                                         SCST_LOAD_SENSE(scst_sense_hardw_error));
1867                 }
1868                 TRACE_MGMT_DBG("Adding ucmd %p to active list", ucmd);
1869                 list_add(&ucmd->cmd->cmd_list_entry,
1870                         &ucmd->cmd->cmd_lists->active_cmd_list);
1871                 wake_up(&ucmd->dev->cmd_lists.cmd_list_waitQ);
1872                 break;
1873
1874         case UCMD_STATE_EXECING:
1875                 if (flags != NULL)
1876                         spin_unlock_irqrestore(&dev->cmd_lists.cmd_list_lock, *flags);
1877                 else
1878                         spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1879                 
1880                 TRACE_MGMT_DBG("EXEC: unjamming ucmd %p", ucmd);
1881
1882                 if (test_bit(SCST_CMD_ABORTED,  &ucmd->cmd->cmd_flags))
1883                         ucmd->aborted = 1;
1884                 else {
1885                         if (busy)
1886                                 scst_set_busy(ucmd->cmd);
1887                         else
1888                                 scst_set_cmd_error(ucmd->cmd,
1889                                         SCST_LOAD_SENSE(scst_sense_hardw_error));
1890                 }
1891
1892                 ucmd->cmd->scst_cmd_done(ucmd->cmd, SCST_CMD_STATE_DEFAULT);
1893                 /* !! At this point cmd ans ucmd can be already freed !! */
1894
1895                 if (flags != NULL)
1896                         spin_lock_irqsave(&dev->cmd_lists.cmd_list_lock, *flags);
1897                 else
1898                         spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
1899                 break;
1900
1901         case UCMD_STATE_ON_FREEING:
1902         case UCMD_STATE_ON_CACHE_FREEING:
1903         case UCMD_STATE_TM_EXECING:
1904         case UCMD_STATE_ATTACH_SESS:
1905         case UCMD_STATE_DETACH_SESS:
1906         {
1907                 if (flags != NULL)
1908                         spin_unlock_irqrestore(&dev->cmd_lists.cmd_list_lock, *flags);
1909                 else
1910                         spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1911
1912                 switch(state) {
1913                 case UCMD_STATE_ON_FREEING:
1914                         dev_user_process_reply_on_free(ucmd);
1915                         break;
1916
1917                 case UCMD_STATE_ON_CACHE_FREEING:
1918                         dev_user_process_reply_on_cache_free(ucmd);
1919                         break;
1920
1921                 case UCMD_STATE_TM_EXECING:
1922                         dev_user_process_reply_tm_exec(ucmd, SCST_MGMT_STATUS_FAILED);
1923                         break;
1924
1925                 case UCMD_STATE_ATTACH_SESS:
1926                 case UCMD_STATE_DETACH_SESS:
1927                         dev_user_process_reply_sess(ucmd, -EFAULT);
1928                         break;
1929                 }
1930
1931                 if (flags != NULL)
1932                         spin_lock_irqsave(&dev->cmd_lists.cmd_list_lock, *flags);
1933                 else
1934                         spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
1935                 break;
1936         }
1937
1938         default:
1939                 PRINT_ERROR("Wrong ucmd state %x", state);
1940                 sBUG();
1941                 break;
1942         }
1943
1944 out:
1945         TRACE_EXIT();
1946         return;
1947 }
1948
1949 static int __unjam_check_tgt_dev(struct scst_user_cmd *ucmd, int state,
1950         struct scst_tgt_dev *tgt_dev)
1951 {
1952         int res = 0;
1953
1954         if (ucmd->cmd == NULL)
1955                 goto out;
1956
1957         if (ucmd->cmd->tgt_dev != tgt_dev)
1958                 goto out;
1959
1960         switch(state & ~UCMD_STATE_MASK) {
1961         case UCMD_STATE_PARSING:
1962         case UCMD_STATE_BUF_ALLOCING:
1963         case UCMD_STATE_EXECING:
1964                 break;
1965         default:
1966                 goto out;
1967         }
1968
1969         res = 1;
1970 out:
1971         return res;
1972 }
1973
1974 static int __unjam_check_tm(struct scst_user_cmd *ucmd, int state)
1975 {
1976         int res = 0;
1977
1978         switch(state & ~UCMD_STATE_MASK) {
1979         case UCMD_STATE_PARSING:
1980         case UCMD_STATE_BUF_ALLOCING:
1981         case UCMD_STATE_EXECING:
1982                 if ((ucmd->cmd != NULL) &&
1983                     (!test_bit(SCST_CMD_ABORTED,
1984                                 &ucmd->cmd->cmd_flags)))
1985                         goto out;
1986                 break;
1987         default:
1988                 goto out;
1989         }
1990
1991         res = 1;
1992 out:
1993         return res;
1994 }
1995
1996 static void dev_user_unjam_dev(struct scst_user_dev *dev, int tm,
1997         struct scst_tgt_dev *tgt_dev)
1998 {
1999         int i;
2000         unsigned long flags;
2001         struct scst_user_cmd *ucmd;
2002
2003         TRACE_ENTRY();
2004
2005         TRACE_MGMT_DBG("Unjamming dev %p", dev);
2006
2007         spin_lock_irqsave(&dev->cmd_lists.cmd_list_lock, flags);
2008
2009 repeat:
2010         for(i = 0; i < (int)ARRAY_SIZE(dev->ucmd_hash); i++) {
2011                 struct list_head *head = &dev->ucmd_hash[i];
2012                 list_for_each_entry(ucmd, head, hash_list_entry) {
2013                         TRACE_DBG("ALL: ucmd %p, state %x, scst_cmd %p",
2014                                 ucmd, ucmd->state, ucmd->cmd);
2015                         if (ucmd->state & UCMD_STATE_SENT_MASK) {
2016                                 int st = ucmd->state & ~UCMD_STATE_SENT_MASK;
2017                                 if (tgt_dev != NULL) {
2018                                         if (__unjam_check_tgt_dev(ucmd, st, 
2019                                                         tgt_dev) == 0)
2020                                                 continue;
2021                                 } else if (tm) {
2022                                         if (__unjam_check_tm(ucmd, st) == 0)
2023                                                 continue;
2024                                 }
2025                                 dev_user_unjam_cmd(ucmd, 0, &flags);
2026                                 goto repeat;
2027                         }
2028                 }
2029         }
2030
2031         if ((tgt_dev != NULL) || tm) {
2032                 list_for_each_entry(ucmd, &dev->ready_cmd_list,
2033                                 ready_cmd_list_entry) {
2034                         TRACE_DBG("READY: ucmd %p, state %x, scst_cmd %p",
2035                                 ucmd, ucmd->state, ucmd->cmd);
2036                         if (tgt_dev != NULL) {
2037                                 if (__unjam_check_tgt_dev(ucmd, ucmd->state,
2038                                                 tgt_dev) == 0)
2039                                         continue;
2040                         } else if (tm) {
2041                                 if (__unjam_check_tm(ucmd, ucmd->state) == 0)
2042                                         continue;
2043                         }
2044                         list_del(&ucmd->ready_cmd_list_entry);
2045                         dev_user_unjam_cmd(ucmd, 0, &flags);
2046                         goto repeat;
2047                 }
2048         }
2049
2050         if (dev_user_process_scst_commands(dev) != 0)
2051                 goto repeat;
2052
2053         spin_unlock_irqrestore(&dev->cmd_lists.cmd_list_lock, flags);
2054
2055         TRACE_EXIT();
2056         return;
2057 }
2058
2059 /**
2060  ** In order to deal with user space handler hangups we rely on remote
2061  ** initiators, which in case if a command doesn't respond for too long
2062  ** supposed to issue a task management command, so on that event we can
2063  ** "unjam" the command. In order to prevent TM command from stalling, we
2064  ** use a timer. In order to prevent too many queued TM commands, we
2065  ** enqueue only 2 of them, the first one with the requested TM function,
2066  ** the second - with TARGET_RESET as the most comprehensive function.
2067  **
2068  ** The only exception here is DETACH_SESS subcode, where there are no TM
2069  ** commands could be expected, so we need manually after a timeout "unjam"
2070  ** all the commands on the device.
2071  **
2072  ** We also don't queue >1 ATTACH_SESS commands and after timeout fail it.
2073  **/
2074
2075 static int dev_user_process_reply_tm_exec(struct scst_user_cmd *ucmd,
2076         int status)
2077 {
2078         int res = 0;
2079         unsigned long flags;
2080
2081         TRACE_ENTRY();
2082
2083         TRACE_MGMT_DBG("TM reply (ucmd %p, fn %d, status %d)", ucmd,
2084                 ucmd->user_cmd.tm_cmd.fn, status);
2085
2086         ucmd->result = status;
2087
2088         spin_lock_irqsave(&ucmd->dev->cmd_lists.cmd_list_lock, flags);
2089
2090         if (ucmd->internal_reset_tm) {
2091                 TRACE_MGMT_DBG("Internal TM ucmd %p finished", ucmd);
2092                 ucmd->dev->internal_reset_active = 0;
2093         } else {
2094                 TRACE_MGMT_DBG("TM ucmd %p finished", ucmd);
2095                 ucmd->dev->tm_cmd_active = 0;
2096         }
2097
2098         if (ucmd->cmpl != NULL)
2099                 complete_all(ucmd->cmpl);
2100
2101         spin_unlock_irqrestore(&ucmd->dev->cmd_lists.cmd_list_lock, flags);
2102
2103         ucmd_put(ucmd);
2104
2105         TRACE_EXIT_RES(res);
2106         return res;
2107 }
2108
2109 static int dev_user_task_mgmt_fn(struct scst_mgmt_cmd *mcmd, 
2110         struct scst_tgt_dev *tgt_dev)
2111 {
2112         int res, rc;
2113         struct scst_user_cmd *ucmd;
2114         struct scst_user_dev *dev = (struct scst_user_dev*)tgt_dev->dev->dh_priv;
2115         struct scst_user_cmd *ucmd_to_abort = NULL;
2116
2117         TRACE_ENTRY();
2118
2119         /* We can't afford missing TM command due to memory shortage */
2120         ucmd = dev_user_alloc_ucmd(dev, GFP_KERNEL|__GFP_NOFAIL);
2121         ucmd->cmpl = kmalloc(sizeof(*ucmd->cmpl), GFP_KERNEL|__GFP_NOFAIL);
2122
2123         init_completion(ucmd->cmpl);
2124
2125         ucmd->user_cmd.cmd_h = ucmd->h;
2126         ucmd->user_cmd.subcode = SCST_USER_TASK_MGMT;
2127         ucmd->user_cmd.tm_cmd.sess_h = (unsigned long)tgt_dev;
2128         ucmd->user_cmd.tm_cmd.fn = mcmd->fn;
2129
2130         if (mcmd->cmd_to_abort != NULL) {
2131                 ucmd_to_abort = (struct scst_user_cmd*)mcmd->cmd_to_abort->dh_priv;
2132                 if (ucmd_to_abort != NULL)
2133                         ucmd->user_cmd.tm_cmd.cmd_h_to_abort = ucmd_to_abort->h;
2134         }
2135
2136         TRACE_MGMT_DBG("Preparing TM ucmd %p (h %d, fn %d, cmd_to_abort %p, "
2137                 "ucmd_to_abort %p, cmd_h_to_abort %d)", ucmd, ucmd->h,
2138                 mcmd->fn, mcmd->cmd_to_abort, ucmd_to_abort,
2139                 ucmd->user_cmd.tm_cmd.cmd_h_to_abort);
2140
2141         ucmd->state = UCMD_STATE_TM_EXECING;
2142
2143         spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
2144         if (dev->internal_reset_active) {
2145                 PRINT_ERROR("Loosing TM cmd %d, because there are other "
2146                         "unprocessed TM commands", mcmd->fn);
2147                 res = SCST_MGMT_STATUS_FAILED;
2148                 goto out_locked_free;
2149         } else if (dev->tm_cmd_active) {
2150                 /*
2151                  * We are going to miss some TM commands, so replace it
2152                  * by the hardest one.
2153                  */
2154                 PRINT_ERROR("Replacing TM cmd %d by TARGET_RESET, because "
2155                         "there is another unprocessed TM command", mcmd->fn);
2156                 ucmd->user_cmd.tm_cmd.fn = SCST_TARGET_RESET;
2157                 ucmd->internal_reset_tm = 1;
2158                 dev->internal_reset_active = 1;
2159         } else
2160                 dev->tm_cmd_active = 1;
2161         spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
2162
2163         ucmd_get(ucmd, 0);
2164         dev_user_add_to_ready(ucmd);
2165
2166         /*
2167          * Since the user space handler should not wait for affecting tasks to
2168          * complete it shall complete the TM request ASAP, otherwise the device
2169          * will be considered stalled.
2170          */
2171         rc = wait_for_completion_timeout(ucmd->cmpl, DEV_USER_TM_TIMEOUT);
2172         if (rc > 0)
2173                 res = ucmd->result;
2174         else {
2175                 PRINT_ERROR("Task management command %p timeout", ucmd);
2176                 res = SCST_MGMT_STATUS_FAILED;
2177         }
2178
2179         sBUG_ON(irqs_disabled());
2180
2181         spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
2182
2183 out_locked_free:
2184         kfree(ucmd->cmpl);
2185         ucmd->cmpl = NULL;
2186         spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
2187
2188         dev_user_unjam_dev(ucmd->dev, 1, NULL);
2189
2190         ucmd_put(ucmd);
2191
2192         TRACE_EXIT();
2193         return res;
2194 }
2195
2196 static int dev_user_attach(struct scst_device *sdev)
2197 {
2198         int res = 0;
2199         struct scst_user_dev *dev = NULL, *d;
2200
2201         TRACE_ENTRY();
2202
2203         spin_lock(&dev_list_lock);
2204         list_for_each_entry(d, &dev_list, dev_list_entry) {
2205                 if (strcmp(d->name, sdev->virt_name) == 0) {
2206                         dev = d;
2207                         break;
2208                 }
2209         }
2210         spin_unlock(&dev_list_lock);
2211         if (dev == NULL) {
2212                 PRINT_ERROR("Device %s not found", sdev->virt_name);
2213                 res = -EINVAL;
2214                 goto out;
2215         }
2216
2217         sdev->p_cmd_lists = &dev->cmd_lists;
2218         sdev->dh_priv = dev;
2219         sdev->tst = dev->tst;
2220         sdev->queue_alg = dev->queue_alg;
2221         sdev->swp = dev->swp;
2222         sdev->tas = dev->tas;
2223         sdev->has_own_order_mgmt = dev->has_own_order_mgmt;
2224
2225         dev->sdev = sdev;
2226
2227         PRINT_INFO("Attached user space SCSI target virtual device \"%s\"",
2228                 dev->name);
2229
2230 out:
2231         TRACE_EXIT();
2232         return res;
2233 }
2234
2235 static void dev_user_detach(struct scst_device *sdev)
2236 {
2237         struct scst_user_dev *dev = (struct scst_user_dev*)sdev->dh_priv;
2238
2239         TRACE_ENTRY();
2240
2241         TRACE_DBG("virt_id %d", sdev->virt_id);
2242
2243         PRINT_INFO("Detached user space SCSI target virtual device \"%s\"",
2244                 dev->name);
2245
2246         /* dev will be freed by the caller */
2247         sdev->dh_priv = NULL;
2248         dev->sdev = NULL;
2249         
2250         TRACE_EXIT();
2251         return;
2252 }
2253
2254 static int dev_user_process_reply_sess(struct scst_user_cmd *ucmd, int status)
2255 {
2256         int res = 0;
2257         unsigned long flags;
2258
2259         TRACE_ENTRY();
2260
2261         TRACE_MGMT_DBG("ucmd %p, cmpl %p, status %d", ucmd, ucmd->cmpl, status);
2262
2263         spin_lock_irqsave(&ucmd->dev->cmd_lists.cmd_list_lock, flags);
2264
2265         if ((ucmd->state & ~UCMD_STATE_MASK) ==
2266                         UCMD_STATE_ATTACH_SESS) {
2267                 TRACE_MGMT_DBG("%s", "ATTACH_SESS finished");
2268                 ucmd->result = status;
2269                 ucmd->dev->attach_cmd_active = 0;
2270         } else if ((ucmd->state & ~UCMD_STATE_MASK) ==
2271                         UCMD_STATE_DETACH_SESS) {
2272                 TRACE_MGMT_DBG("%s", "DETACH_SESS finished");
2273                 ucmd->dev->detach_cmd_count--;
2274         } else
2275                 sBUG();
2276
2277         if (ucmd->cmpl != NULL)
2278                 complete_all(ucmd->cmpl);
2279
2280         spin_unlock_irqrestore(&ucmd->dev->cmd_lists.cmd_list_lock, flags);
2281
2282         ucmd_put(ucmd);
2283
2284         TRACE_EXIT_RES(res);
2285         return res;
2286 }
2287
2288 static int dev_user_attach_tgt(struct scst_tgt_dev *tgt_dev)
2289 {
2290         struct scst_user_dev *dev =
2291                 (struct scst_user_dev*)tgt_dev->dev->dh_priv;
2292         int res = 0, rc;
2293         struct scst_user_cmd *ucmd;
2294
2295         TRACE_ENTRY();
2296
2297         ucmd = dev_user_alloc_ucmd(dev, GFP_KERNEL);
2298         if (ucmd == NULL)
2299                 goto out_nomem;
2300
2301         ucmd->cmpl = kmalloc(sizeof(*ucmd->cmpl), GFP_KERNEL);
2302         if (ucmd->cmpl == NULL)
2303                 goto out_put_nomem;
2304
2305         init_completion(ucmd->cmpl);
2306
2307         ucmd->user_cmd.cmd_h = ucmd->h;
2308         ucmd->user_cmd.subcode = SCST_USER_ATTACH_SESS;
2309         ucmd->user_cmd.sess.sess_h = (unsigned long)tgt_dev;
2310         ucmd->user_cmd.sess.lun = (uint64_t)tgt_dev->lun;
2311         ucmd->user_cmd.sess.threads_num = tgt_dev->sess->tgt->tgtt->threads_num;
2312         ucmd->user_cmd.sess.rd_only = tgt_dev->acg_dev->rd_only_flag;
2313         strncpy(ucmd->user_cmd.sess.initiator_name,
2314                 tgt_dev->sess->initiator_name,
2315                 sizeof(ucmd->user_cmd.sess.initiator_name)-1);
2316         ucmd->user_cmd.sess.initiator_name[
2317                 sizeof(ucmd->user_cmd.sess.initiator_name)-1] = '\0';
2318
2319         TRACE_MGMT_DBG("Preparing ATTACH_SESS %p (h %d, sess_h %Lx, LUN %Lx, "
2320                 "threads_num %d, rd_only_flag %d, initiator %s)", ucmd, ucmd->h,
2321                 ucmd->user_cmd.sess.sess_h, ucmd->user_cmd.sess.lun,
2322                 ucmd->user_cmd.sess.threads_num, ucmd->user_cmd.sess.rd_only,
2323                 ucmd->user_cmd.sess.initiator_name);
2324
2325         ucmd->state = UCMD_STATE_ATTACH_SESS;
2326
2327         spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
2328         if (dev->attach_cmd_active) {
2329                 PRINT_ERROR("%s", "ATTACH_SESS command failed, because "
2330                         "there is another unprocessed ATTACH_SESS command");
2331                 res = -EBUSY;
2332                 goto out_locked_free;
2333         }
2334         dev->attach_cmd_active = 1;
2335         spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
2336
2337         ucmd_get(ucmd, 0);
2338         dev_user_add_to_ready(ucmd);
2339
2340         rc = wait_for_completion_timeout(ucmd->cmpl, DEV_USER_ATTACH_TIMEOUT);
2341         if (rc > 0)
2342                 res = ucmd->result;
2343         else {
2344                 PRINT_ERROR("%s", "ATTACH_SESS command timeout");
2345                 res = -EFAULT;
2346         }
2347
2348         sBUG_ON(irqs_disabled());
2349
2350         spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
2351 out_locked_free:
2352         kfree(ucmd->cmpl);
2353         ucmd->cmpl = NULL;
2354         spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
2355
2356         ucmd_put(ucmd);
2357
2358 out:
2359         TRACE_EXIT_RES(res);
2360         return res;
2361
2362 out_put_nomem:
2363         ucmd_put(ucmd);
2364
2365 out_nomem:
2366         res = -ENOMEM;
2367         goto out;
2368 }
2369
2370 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
2371 static void dev_user_pre_unreg_sess_work_fn(void *p)
2372 #else
2373 static void dev_user_pre_unreg_sess_work_fn(struct work_struct *work)
2374 #endif
2375 {
2376 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
2377         struct scst_user_pre_unreg_sess_obj *pd = (struct scst_user_pre_unreg_sess_obj*)p;
2378 #else
2379         struct scst_user_pre_unreg_sess_obj *pd = container_of(
2380                 (struct delayed_work*)work, struct scst_user_pre_unreg_sess_obj,
2381                 pre_unreg_sess_work);
2382 #endif
2383         struct scst_user_dev *dev =
2384                 (struct scst_user_dev*)pd->tgt_dev->dev->dh_priv;
2385
2386         TRACE_ENTRY();
2387
2388         TRACE_MGMT_DBG("Unreg sess: unjaming dev %p (tgt_dev %p)", dev,
2389                 pd->tgt_dev);
2390
2391         pd->active = 1;
2392
2393         dev_user_unjam_dev(dev, 0, pd->tgt_dev);
2394
2395         if (!pd->exit) {
2396                 TRACE_MGMT_DBG("Rescheduling pre_unreg_sess work %p (dev %p, "
2397                         "tgt_dev %p)", pd, dev, pd->tgt_dev);
2398                 schedule_delayed_work(&pd->pre_unreg_sess_work,
2399                         DEV_USER_PRE_UNREG_POLL_TIME);
2400         }
2401
2402         TRACE_EXIT();
2403         return;
2404 }
2405
2406 static void dev_user_pre_unreg_sess(struct scst_tgt_dev *tgt_dev)
2407 {
2408         struct scst_user_dev *dev =
2409                 (struct scst_user_dev*)tgt_dev->dev->dh_priv;
2410         struct scst_user_pre_unreg_sess_obj *pd;
2411
2412         TRACE_ENTRY();
2413
2414         /* We can't afford missing DETACH command due to memory shortage */
2415         pd = kzalloc(sizeof(*pd), GFP_KERNEL|__GFP_NOFAIL);
2416
2417         pd->tgt_dev = tgt_dev;
2418 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
2419         INIT_WORK(&pd->pre_unreg_sess_work, dev_user_pre_unreg_sess_work_fn, pd);
2420 #else
2421         INIT_DELAYED_WORK(&pd->pre_unreg_sess_work, dev_user_pre_unreg_sess_work_fn);
2422 #endif
2423
2424         spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
2425         dev->pre_unreg_sess_active = 1;
2426         list_add_tail(&pd->pre_unreg_sess_list_entry, &dev->pre_unreg_sess_list);
2427         spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
2428
2429         TRACE_MGMT_DBG("Scheduling pre_unreg_sess work %p (dev %p, tgt_dev %p)",
2430                 pd, dev, pd->tgt_dev);
2431
2432         schedule_delayed_work(&pd->pre_unreg_sess_work, DEV_USER_DETACH_TIMEOUT);
2433
2434         TRACE_EXIT();
2435         return;
2436 }
2437
2438 static void dev_user_detach_tgt(struct scst_tgt_dev *tgt_dev)
2439 {
2440         struct scst_user_dev *dev =
2441                 (struct scst_user_dev*)tgt_dev->dev->dh_priv;
2442         struct scst_user_cmd *ucmd;
2443         struct scst_user_pre_unreg_sess_obj *pd = NULL, *p;
2444
2445         TRACE_ENTRY();
2446
2447         spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
2448         list_for_each_entry(p, &dev->pre_unreg_sess_list,
2449                         pre_unreg_sess_list_entry) {
2450                 if (p->tgt_dev == tgt_dev) {
2451                         list_del(&p->pre_unreg_sess_list_entry);
2452                         if (list_empty(&dev->pre_unreg_sess_list))
2453                                 dev->pre_unreg_sess_active = 0;
2454                         pd = p;
2455                         break;
2456                 }
2457         }
2458         spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
2459
2460         if (pd != NULL) {
2461                 pd->exit = 1;
2462                 TRACE_MGMT_DBG("Canceling pre unreg work %p", pd);
2463                 cancel_delayed_work(&pd->pre_unreg_sess_work);
2464                 flush_scheduled_work();
2465                 kfree(pd);
2466         }
2467
2468         ucmd = dev_user_alloc_ucmd(dev, GFP_KERNEL);
2469         if (ucmd == NULL)
2470                 goto out;
2471
2472         TRACE_MGMT_DBG("Preparing DETACH_SESS %p (h %d, sess_h %Lx)", ucmd,
2473                 ucmd->h, ucmd->user_cmd.sess.sess_h);
2474
2475         ucmd->user_cmd.cmd_h = ucmd->h;
2476         ucmd->user_cmd.subcode = SCST_USER_DETACH_SESS;
2477         ucmd->user_cmd.sess.sess_h = (unsigned long)tgt_dev;
2478
2479         spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
2480         dev->detach_cmd_count++;
2481         spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
2482
2483         ucmd->state = UCMD_STATE_DETACH_SESS;
2484
2485         dev_user_add_to_ready(ucmd);
2486
2487 out:
2488         TRACE_EXIT();
2489         return;
2490 }
2491
2492 /* No locks are needed, but the activity must be suspended */
2493 static void dev_user_setup_functions(struct scst_user_dev *dev)
2494 {
2495         TRACE_ENTRY();
2496
2497         dev->devtype.parse = dev_user_parse;
2498         dev->devtype.dev_done = NULL;
2499
2500         if (dev->parse_type != SCST_USER_PARSE_CALL) {
2501                 switch(dev->devtype.type) {
2502                 case TYPE_DISK:
2503                         dev->generic_parse = scst_sbc_generic_parse;
2504                         dev->devtype.dev_done = dev_user_disk_done;
2505                         break;
2506
2507                 case TYPE_TAPE:
2508                         dev->generic_parse = scst_tape_generic_parse;
2509                         dev->devtype.dev_done = dev_user_tape_done;
2510                         break;
2511
2512                 case TYPE_MOD:
2513                         dev->generic_parse = scst_modisk_generic_parse;
2514                         dev->devtype.dev_done = dev_user_disk_done;
2515                         break;
2516
2517                 case TYPE_ROM:
2518                         dev->generic_parse = scst_cdrom_generic_parse;
2519                         dev->devtype.dev_done = dev_user_disk_done;
2520                         break;
2521
2522                 case TYPE_MEDIUM_CHANGER:
2523                         dev->generic_parse = scst_changer_generic_parse;
2524                         break;
2525
2526                 case TYPE_PROCESSOR:
2527                         dev->generic_parse = scst_processor_generic_parse;
2528                         break;
2529
2530                 case TYPE_RAID:
2531                         dev->generic_parse = scst_raid_generic_parse;
2532                         break;
2533
2534                 default:
2535                         PRINT_INFO("Unknown SCSI type %x, using PARSE_CALL "
2536                                 "for it", dev->devtype.type);
2537                         dev->parse_type = SCST_USER_PARSE_CALL;
2538                         break;
2539                 }
2540         } else {
2541                 dev->generic_parse = NULL;
2542                 dev->devtype.dev_done = NULL;
2543         }
2544
2545         TRACE_EXIT();
2546         return;
2547 }
2548
2549 static int dev_user_register_dev(struct file *file,
2550         const struct scst_user_dev_desc *dev_desc)
2551 {
2552         int res = -ENOMEM, i;
2553         struct scst_user_dev *dev, *d;
2554         int block;
2555
2556         TRACE_ENTRY();
2557
2558         if (dev_desc->version != DEV_USER_VERSION) {
2559                 PRINT_ERROR("Version mismatch (requested %d, required %d)",
2560                         dev_desc->version, DEV_USER_VERSION);
2561                 res = -EINVAL;
2562                 goto out;
2563         }
2564
2565         switch(dev_desc->type) {
2566         case TYPE_DISK:
2567         case TYPE_ROM:
2568         case TYPE_MOD:
2569                 if (dev_desc->block_size == 0) {
2570                         PRINT_ERROR("Wrong block size %d", dev_desc->block_size);
2571                         res = -EINVAL;
2572                         goto out;
2573                 }
2574                 block = scst_calc_block_shift(dev_desc->block_size);
2575                 if (block == -1) {
2576                         res = -EINVAL;
2577                         goto out;
2578                 }
2579                 break;
2580         default:
2581                 block = dev_desc->block_size;
2582                 break;
2583         }
2584
2585         if (!try_module_get(THIS_MODULE)) {
2586                 PRINT_ERROR("%s", "Fail to get module");
2587                 goto out;
2588         }
2589
2590         dev = kzalloc(sizeof(*dev), GFP_KERNEL);
2591         if (dev == NULL)
2592                 goto out_put;
2593
2594         init_rwsem(&dev->dev_rwsem);
2595         spin_lock_init(&dev->cmd_lists.cmd_list_lock);
2596         INIT_LIST_HEAD(&dev->cmd_lists.active_cmd_list);
2597         init_waitqueue_head(&dev->cmd_lists.cmd_list_waitQ);
2598         INIT_LIST_HEAD(&dev->ready_cmd_list);
2599         INIT_LIST_HEAD(&dev->prio_ready_cmd_list);
2600         init_waitqueue_head(&dev->prio_cmd_list_waitQ);
2601         if (file->f_flags & O_NONBLOCK) {
2602                 TRACE_DBG("%s", "Non-blocking operations");
2603                 dev->blocking = 0;
2604         } else
2605                 dev->blocking = 1;
2606         for (i = 0; i < (int)ARRAY_SIZE(dev->ucmd_hash); i++)
2607                 INIT_LIST_HEAD(&dev->ucmd_hash[i]);
2608         INIT_LIST_HEAD(&dev->pre_unreg_sess_list);
2609
2610         strncpy(dev->name, dev_desc->name, sizeof(dev->name)-1);
2611         dev->name[sizeof(dev->name)-1] = '\0';
2612
2613         /*
2614          * We don't use clustered pool, since it implies pages reordering,
2615          * which isn't possible with user space supplied buffers. Although
2616          * it's still possible to cluster pages by the tail of each other,
2617          * seems it doesn't worth the effort.
2618          */
2619         dev->pool = sgv_pool_create(dev->name, 0);
2620         if (dev->pool == NULL)
2621                 goto out_put;
2622         sgv_pool_set_allocator(dev->pool, dev_user_alloc_pages,
2623                 dev_user_free_sg_entries);
2624
2625         scnprintf(dev->devtype.name, sizeof(dev->devtype.name), "dh-%s",
2626                 dev->name);
2627         dev->devtype.type = dev_desc->type;
2628         dev->devtype.threads_num = -1;
2629         dev->devtype.parse_atomic = 1;
2630         dev->devtype.exec_atomic = 0; /* no point to make it 1 */
2631         dev->devtype.dev_done_atomic = 1;
2632         dev->devtype.no_proc = 1;
2633         dev->devtype.attach = dev_user_attach;
2634         dev->devtype.detach = dev_user_detach;
2635         dev->devtype.attach_tgt = dev_user_attach_tgt;
2636         dev->devtype.pre_unreg_sess = dev_user_pre_unreg_sess;
2637         dev->devtype.detach_tgt = dev_user_detach_tgt;
2638         dev->devtype.exec = dev_user_exec;
2639         dev->devtype.on_free_cmd = dev_user_on_free_cmd;
2640         dev->devtype.task_mgmt_fn = dev_user_task_mgmt_fn;
2641
2642         init_completion(&dev->cleanup_cmpl);
2643         dev->block = block;
2644         dev->def_block = dev->block;
2645
2646         res = __dev_user_set_opt(dev, &dev_desc->opt);
2647
2648         TRACE_MEM("dev %p, name %s", dev, dev->name);
2649
2650         spin_lock(&dev_list_lock);
2651
2652         list_for_each_entry(d, &dev_list, dev_list_entry) {
2653                 if (strcmp(d->name, dev->name) == 0) {
2654                         PRINT_ERROR("Device %s already exist",
2655                                 dev->name);
2656                         res = -EEXIST;
2657                         spin_unlock(&dev_list_lock);
2658                         goto out_free;
2659                 }
2660         }
2661
2662         list_add_tail(&dev->dev_list_entry, &dev_list);
2663
2664         spin_unlock(&dev_list_lock);
2665
2666         if (res != 0)
2667                 goto out_del_free;
2668
2669         res = scst_register_virtual_dev_driver(&dev->devtype);
2670         if (res < 0)
2671                 goto out_del_free;
2672
2673         dev->virt_id = scst_register_virtual_device(&dev->devtype, dev->name);
2674         if (dev->virt_id < 0) {
2675                 res = dev->virt_id;
2676                 goto out_unreg_handler;
2677         }
2678
2679         mutex_lock(&dev_priv_mutex);
2680         if (file->private_data != NULL) {
2681                 mutex_unlock(&dev_priv_mutex);
2682                 PRINT_ERROR("%s", "Device already registered");
2683                 res = -EINVAL;
2684                 goto out_unreg_drv;
2685         }
2686         file->private_data = dev;
2687         mutex_unlock(&dev_priv_mutex);
2688
2689 out:
2690         TRACE_EXIT_RES(res);
2691         return res;
2692
2693 out_unreg_drv:
2694         scst_unregister_virtual_device(dev->virt_id);
2695
2696 out_unreg_handler:
2697         scst_unregister_virtual_dev_driver(&dev->devtype);
2698
2699 out_del_free:
2700         spin_lock(&dev_list_lock);
2701         list_del(&dev->dev_list_entry);
2702         spin_unlock(&dev_list_lock);
2703
2704 out_free:
2705         sgv_pool_destroy(dev->pool);
2706         kfree(dev);
2707         goto out_put;
2708
2709 out_put:
2710         module_put(THIS_MODULE);
2711         goto out;
2712 }
2713
2714 static int __dev_user_set_opt(struct scst_user_dev *dev,
2715         const struct scst_user_opt *opt)
2716 {
2717         int res = 0;
2718
2719         TRACE_ENTRY();
2720
2721         TRACE_DBG("parse_type %x, on_free_cmd_type %x, memory_reuse_type %x, "
2722                 "partial_transfers_type %x, partial_len %d", opt->parse_type,
2723                 opt->on_free_cmd_type, opt->memory_reuse_type,
2724                 opt->partial_transfers_type, opt->partial_len);
2725
2726         if ((opt->parse_type > SCST_USER_MAX_PARSE_OPT) ||
2727             (opt->on_free_cmd_type > SCST_USER_MAX_ON_FREE_CMD_OPT) ||
2728             (opt->memory_reuse_type > SCST_USER_MAX_MEM_REUSE_OPT) ||
2729             (opt->prio_queue_type > SCST_USER_MAX_PRIO_QUEUE_OPT) ||
2730             (opt->partial_transfers_type > SCST_USER_MAX_PARTIAL_TRANSFERS_OPT)) {
2731                 PRINT_ERROR("%s", "Invalid option");
2732                 res = -EINVAL;
2733                 goto out;
2734         }
2735
2736         if (((opt->tst != SCST_CONTR_MODE_ONE_TASK_SET) &&
2737              (opt->tst != SCST_CONTR_MODE_SEP_TASK_SETS)) ||
2738             ((opt->queue_alg != SCST_CONTR_MODE_QUEUE_ALG_RESTRICTED_REORDER) &&
2739              (opt->queue_alg != SCST_CONTR_MODE_QUEUE_ALG_UNRESTRICTED_REORDER)) ||
2740             (opt->swp > 1) || (opt->tas > 1) || (opt->has_own_order_mgmt > 1)) {
2741                 PRINT_ERROR("Invalid SCSI option (tst %x, queue_alg %x, swp %x, "
2742                         "tas %x, has_own_order_mgmt %x)", opt->tst,
2743                         opt->queue_alg, opt->swp, opt->tas, opt->has_own_order_mgmt);
2744                 res = -EINVAL;
2745                 goto out;
2746         }
2747
2748         if ((dev->prio_queue_type != opt->prio_queue_type) &&
2749             (opt->prio_queue_type == SCST_USER_PRIO_QUEUE_SINGLE)) {
2750                 struct scst_user_cmd *u, *t;
2751                 /* No need for lock, the activity is suspended */
2752                 list_for_each_entry_safe(u, t, &dev->prio_ready_cmd_list,
2753                                 ready_cmd_list_entry) {
2754                         list_move_tail(&u->ready_cmd_list_entry,
2755                                 &dev->ready_cmd_list);
2756                 }
2757         }
2758
2759         dev->prio_queue_type = opt->prio_queue_type;
2760         dev->parse_type = opt->parse_type;
2761         dev->on_free_cmd_type = opt->on_free_cmd_type;
2762         dev->memory_reuse_type = opt->memory_reuse_type;
2763         dev->partial_transfers_type = opt->partial_transfers_type;
2764         dev->partial_len = opt->partial_len;
2765
2766         dev->tst = opt->tst;
2767         dev->queue_alg = opt->queue_alg;
2768         dev->swp = opt->swp;
2769         dev->tas = opt->tas;
2770         dev->has_own_order_mgmt = opt->has_own_order_mgmt;
2771         if (dev->sdev != NULL) {
2772                 dev->sdev->tst = opt->tst;
2773                 dev->sdev->queue_alg = opt->queue_alg;
2774                 dev->sdev->swp = opt->swp;
2775                 dev->sdev->tas = opt->tas;
2776                 dev->sdev->has_own_order_mgmt = opt->has_own_order_mgmt;
2777         }
2778
2779         dev_user_setup_functions(dev);
2780
2781 out:
2782         TRACE_EXIT_RES(res);
2783         return res;
2784 }
2785
2786 static int dev_user_set_opt(struct file *file, const struct scst_user_opt *opt)
2787 {
2788         int res = 0;
2789         struct scst_user_dev *dev;
2790
2791         TRACE_ENTRY();
2792
2793         mutex_lock(&dev_priv_mutex);
2794         dev = (struct scst_user_dev*)file->private_data;
2795         res = dev_user_check_reg(dev);
2796         if (res != 0) {
2797                 mutex_unlock(&dev_priv_mutex);
2798                 goto out;
2799         }
2800         down_read(&dev->dev_rwsem);
2801         mutex_unlock(&dev_priv_mutex);
2802
2803         scst_suspend_activity();
2804         res = __dev_user_set_opt(dev, opt);
2805         scst_resume_activity();
2806
2807         up_read(&dev->dev_rwsem);
2808
2809 out:
2810         TRACE_EXIT_RES(res);
2811         return res;
2812 }
2813
2814 static int dev_user_get_opt(struct file *file, void *arg)
2815 {
2816         int res = 0;
2817         struct scst_user_dev *dev;
2818         struct scst_user_opt opt;
2819
2820         TRACE_ENTRY();
2821
2822         mutex_lock(&dev_priv_mutex);
2823         dev = (struct scst_user_dev*)file->private_data;
2824         res = dev_user_check_reg(dev);
2825         if (res != 0) {
2826                 mutex_unlock(&dev_priv_mutex);
2827                 goto out;
2828         }
2829         down_read(&dev->dev_rwsem);
2830         mutex_unlock(&dev_priv_mutex);
2831
2832         opt.parse_type = dev->parse_type;
2833         opt.on_free_cmd_type = dev->on_free_cmd_type;
2834         opt.memory_reuse_type = dev->memory_reuse_type;
2835         opt.prio_queue_type = dev->prio_queue_type;
2836         opt.partial_transfers_type = dev->partial_transfers_type;
2837         opt.partial_len = dev->partial_len;
2838         opt.tst = dev->tst;
2839         opt.queue_alg = dev->queue_alg;
2840         opt.tas = dev->tas;
2841         opt.swp = dev->swp;
2842         opt.has_own_order_mgmt = dev->has_own_order_mgmt;
2843
2844         TRACE_DBG("parse_type %x, on_free_cmd_type %x, memory_reuse_type %x, "
2845                 "partial_transfers_type %x, partial_len %d", opt.parse_type,
2846                 opt.on_free_cmd_type, opt.memory_reuse_type,
2847                 opt.partial_transfers_type, opt.partial_len);
2848
2849         res = copy_to_user(arg, &opt, sizeof(opt));
2850
2851         up_read(&dev->dev_rwsem);
2852 out:
2853         TRACE_EXIT_RES(res);
2854         return res;
2855 }
2856
2857 static int dev_usr_parse(struct scst_cmd *cmd)
2858 {
2859         sBUG();
2860         return SCST_CMD_STATE_DEFAULT;
2861 }
2862
2863 /* Needed only for /proc support */
2864 #define USR_TYPE {              \
2865   name:     DEV_USER_NAME,      \
2866   type:     -1,                 \
2867   parse:    dev_usr_parse,      \
2868 }
2869
2870 static struct scst_dev_type dev_user_devtype = USR_TYPE;
2871
2872 static int dev_user_release(struct inode *inode, struct file *file)
2873 {
2874         int res = 0;
2875         struct scst_user_dev *dev;
2876
2877         TRACE_ENTRY();
2878
2879         mutex_lock(&dev_priv_mutex);
2880         dev = (struct scst_user_dev*)file->private_data;
2881         if (dev == NULL) {
2882                 mutex_unlock(&dev_priv_mutex);
2883                 goto out;
2884         }
2885         file->private_data = NULL;
2886
2887         spin_lock(&dev_list_lock);
2888         list_del(&dev->dev_list_entry);
2889         spin_unlock(&dev_list_lock);
2890
2891         mutex_unlock(&dev_priv_mutex);
2892
2893         down_write(&dev->dev_rwsem);
2894
2895         TRACE_DBG("Releasing dev %p", dev);
2896
2897         spin_lock(&cleanup_lock);
2898         list_add_tail(&dev->cleanup_list_entry, &cleanup_list);
2899         spin_unlock(&cleanup_lock);
2900
2901         wake_up(&cleanup_list_waitQ);
2902         wake_up(&dev->prio_cmd_list_waitQ);
2903         wake_up(&dev->cmd_lists.cmd_list_waitQ);
2904
2905         scst_unregister_virtual_device(dev->virt_id);
2906         scst_unregister_virtual_dev_driver(&dev->devtype);
2907
2908         sgv_pool_destroy(dev->pool);
2909
2910         TRACE_DBG("Unregistering finished (dev %p)", dev);
2911
2912         dev->cleanup_done = 1;
2913         wake_up(&cleanup_list_waitQ);
2914         wake_up(&dev->prio_cmd_list_waitQ);
2915         wake_up(&dev->cmd_lists.cmd_list_waitQ);
2916         wait_for_completion(&dev->cleanup_cmpl);
2917
2918         up_write(&dev->dev_rwsem); /* to make the debug check happy */
2919
2920         TRACE_DBG("Releasing completed (dev %p)", dev);
2921
2922         kfree(dev);
2923
2924         module_put(THIS_MODULE);
2925
2926 out:
2927         TRACE_EXIT_RES(res);
2928         return res;
2929 }
2930
2931 static void dev_user_process_cleanup(struct scst_user_dev *dev)
2932 {
2933         struct scst_user_cmd *ucmd;
2934         int rc;
2935
2936         TRACE_ENTRY();
2937
2938         dev->prio_queue_type = SCST_USER_PRIO_QUEUE_SINGLE;
2939         dev->cleaning = 1;
2940         dev->blocking = 1;
2941
2942         while(1) {
2943                 TRACE_DBG("Cleanuping dev %p", dev);
2944
2945                 dev_user_unjam_dev(dev, 0, NULL);
2946
2947                 spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
2948                 rc = dev_user_get_next_prio_cmd(dev, &ucmd);
2949                 if (rc != 0)
2950                         rc = dev_user_get_next_cmd(dev, &ucmd);
2951                 if (rc == 0)
2952                         dev_user_unjam_cmd(ucmd, 1, NULL);
2953                 spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
2954                 if ((rc == -EAGAIN) && dev->cleanup_done)
2955                         break;
2956         }
2957
2958 #ifdef EXTRACHECKS
2959 {
2960         int i;
2961         for(i = 0; i < (int)ARRAY_SIZE(dev->ucmd_hash); i++) {
2962                 struct list_head *head = &dev->ucmd_hash[i];
2963                 struct scst_user_cmd *ucmd, *t;
2964                 list_for_each_entry_safe(ucmd, t, head, hash_list_entry) {
2965                         PRINT_ERROR("Lost ucmd %p (state %x, ref %d)", ucmd,
2966                                 ucmd->state, atomic_read(&ucmd->ucmd_ref));
2967                         ucmd_put(ucmd);
2968                 }
2969         }
2970 }
2971 #endif
2972
2973         TRACE_DBG("Cleanuping done (dev %p)", dev);
2974         complete_all(&dev->cleanup_cmpl);
2975
2976         TRACE_EXIT();
2977         return;
2978 }
2979
2980 static inline int test_cleanup_list(void)
2981 {
2982         int res = !list_empty(&cleanup_list) ||
2983                   unlikely(kthread_should_stop());
2984         return res;
2985 }
2986
2987 static int dev_user_cleanup_thread(void *arg)
2988 {
2989         struct scst_user_dev *dev;
2990
2991         TRACE_ENTRY();
2992
2993         current->flags |= PF_NOFREEZE;
2994
2995         spin_lock(&cleanup_lock);
2996         while(!kthread_should_stop()) {
2997                 wait_queue_t wait;
2998                 init_waitqueue_entry(&wait, current);
2999
3000                 if (!test_cleanup_list()) {
3001                         add_wait_queue_exclusive(&cleanup_list_waitQ, &wait);
3002                         for (;;) {
3003                                 set_current_state(TASK_INTERRUPTIBLE);
3004                                 if (test_cleanup_list())
3005                                         break;
3006                                 spin_unlock(&cleanup_lock);
3007                                 schedule();
3008                                 spin_lock(&cleanup_lock);
3009                         }
3010                         set_current_state(TASK_RUNNING);
3011                         remove_wait_queue(&cleanup_list_waitQ, &wait);
3012                 }
3013 restart:
3014                 list_for_each_entry(dev, &cleanup_list, cleanup_list_entry) {
3015                         list_del(&dev->cleanup_list_entry);
3016                         spin_unlock(&cleanup_lock);
3017                         dev_user_process_cleanup(dev);
3018                         spin_lock(&cleanup_lock);
3019                         goto restart;
3020                 }
3021         }
3022         spin_unlock(&cleanup_lock);
3023
3024         /*
3025          * If kthread_should_stop() is true, we are guaranteed to be
3026          * on the module unload, so cleanup_list must be empty.
3027          */
3028         sBUG_ON(!list_empty(&cleanup_list));
3029
3030         TRACE_EXIT();
3031         return 0;
3032 }
3033
3034 static int __init init_scst_user(void)
3035 {
3036         int res = 0;
3037         struct class_device *class_member;
3038
3039         TRACE_ENTRY();
3040
3041 #if defined(CONFIG_HIGHMEM4G) || defined(CONFIG_HIGHMEM64G)
3042         PRINT_ERROR("%s", "HIGHMEM kernel configurations are not supported. "
3043                 "Consider change VMSPLIT option or use 64-bit "
3044                 "configuration instead. See README file for details.");
3045         res = -EINVAL;
3046         goto out;
3047 #endif
3048
3049         user_cmd_cachep = KMEM_CACHE(scst_user_cmd, SCST_SLAB_FLAGS);
3050         if (user_cmd_cachep == NULL) {
3051                 res = -ENOMEM;
3052                 goto out;
3053         }
3054
3055         dev_user_devtype.module = THIS_MODULE;
3056         if (scst_register_virtual_dev_driver(&dev_user_devtype) < 0) {
3057                 res = -ENODEV;
3058                 goto out_cache;
3059         }
3060
3061         res = scst_dev_handler_build_std_proc(&dev_user_devtype);
3062         if (res != 0)
3063                 goto out_unreg;
3064
3065         dev_user_sysfs_class = class_create(THIS_MODULE, DEV_USER_NAME);
3066         if (IS_ERR(dev_user_sysfs_class)) {
3067                 printk(KERN_ERR "Unable create sysfs class for SCST user "
3068                         "space handler\n");
3069                 res = PTR_ERR(dev_user_sysfs_class);
3070                 goto out_proc;
3071         }
3072
3073         res = register_chrdev(DEV_USER_MAJOR, DEV_USER_NAME, &dev_user_fops);
3074         if (res) {
3075                 printk(KERN_ERR "Unable to get major %d for SCSI tapes\n",
3076                        DEV_USER_MAJOR);
3077                 goto out_class;
3078         }
3079
3080         class_member = class_device_create(dev_user_sysfs_class, NULL,
3081                 MKDEV(DEV_USER_MAJOR, 0), NULL, DEV_USER_NAME);
3082         if (IS_ERR(class_member)) {
3083                 res = PTR_ERR(class_member);
3084                 goto out_chrdev;
3085         }
3086
3087         cleanup_thread = kthread_run(dev_user_cleanup_thread, NULL,
3088                 "scst_usr_cleanupd");
3089         if (IS_ERR(cleanup_thread)) {
3090                 res = PTR_ERR(cleanup_thread);
3091                 PRINT_ERROR("kthread_create() failed: %d", res);
3092                 goto out_dev;
3093         }
3094
3095 out:
3096         TRACE_EXIT_RES(res);
3097         return res;
3098
3099 out_dev:
3100         class_device_destroy(dev_user_sysfs_class, MKDEV(DEV_USER_MAJOR, 0));
3101
3102 out_chrdev:
3103         unregister_chrdev(DEV_USER_MAJOR, DEV_USER_NAME);
3104
3105 out_class:
3106         class_destroy(dev_user_sysfs_class);
3107
3108 out_proc:
3109         scst_dev_handler_destroy_std_proc(&dev_user_devtype);
3110
3111 out_unreg:
3112         scst_unregister_dev_driver(&dev_user_devtype);
3113
3114 out_cache:
3115         kmem_cache_destroy(user_cmd_cachep);
3116         goto out;
3117 }
3118
3119 static void __exit exit_scst_user(void)
3120 {
3121         int rc;
3122
3123         TRACE_ENTRY();
3124
3125         rc = kthread_stop(cleanup_thread);
3126         if (rc < 0) {
3127                 TRACE_MGMT_DBG("kthread_stop() failed: %d", rc);
3128         }
3129
3130         unregister_chrdev(DEV_USER_MAJOR, DEV_USER_NAME);
3131         class_device_destroy(dev_user_sysfs_class, MKDEV(DEV_USER_MAJOR, 0));
3132         class_destroy(dev_user_sysfs_class);
3133
3134         scst_dev_handler_destroy_std_proc(&dev_user_devtype);
3135         scst_unregister_virtual_dev_driver(&dev_user_devtype);
3136
3137         kmem_cache_destroy(user_cmd_cachep);
3138
3139         TRACE_EXIT();
3140         return;
3141 }
3142
3143 module_init(init_scst_user);
3144 module_exit(exit_scst_user);
3145
3146 MODULE_AUTHOR("Vladislav Bolkhovitin");
3147 MODULE_LICENSE("GPL");
3148 MODULE_DESCRIPTION("Virtual user space device handler for SCST");
3149 MODULE_VERSION(SCST_VERSION_STRING);
3150 MODULE_ALIAS_CHARDEV_MAJOR(DEV_USER_MAJOR);