The major TM processing cleanup in scst_user module which was possible after the...
[mirror/scst/.git] / scst / src / dev_handlers / scst_user.c
1 /*
2  *  scst_user.c
3  *
4  *  Copyright (C) 2007 Vladislav Bolkhovitin <vst@vlnb.net>
5  *
6  *  SCSI virtual user space device handler
7  *
8  *  This program is free software; you can redistribute it and/or
9  *  modify it under the terms of the GNU General Public License
10  *  as published by the Free Software Foundation, version 2
11  *  of the License.
12  *
13  *  This program is distributed in the hope that it will be useful,
14  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16  *  GNU General Public License for more details.
17  */
18
19 #include <linux/kthread.h>
20 #include <linux/delay.h>
21 #include <linux/poll.h>
22
23 #define LOG_PREFIX              DEV_USER_NAME
24
25 #include "scst.h"
26 #include "scst_user.h"
27 #include "scst_dev_handler.h"
28
29 #if defined(CONFIG_HIGHMEM4G) || defined(CONFIG_HIGHMEM64G)
30 #warning "HIGHMEM kernel configurations are not supported by this module, \
31         because nowadays it doesn't worth the effort. Consider change \
32         VMSPLIT option or use 64-bit configuration instead. See README file \
33         for details."
34 #endif
35
36 #define DEV_USER_MAJOR                  237
37
38 #define DEV_USER_CMD_HASH_ORDER         6
39
40 #define DEV_USER_ATTACH_TIMEOUT         (5*HZ)
41
42 struct scst_user_dev {
43         struct rw_semaphore dev_rwsem;
44
45         struct scst_cmd_lists cmd_lists;
46
47         /* Protected by cmd_lists.cmd_list_lock */
48         struct list_head ready_cmd_list;
49
50         /* Protected by dev_rwsem or don't need any protection */
51         unsigned int blocking:1;
52         unsigned int cleanup_done:1;
53         unsigned int cleaning:1;
54         unsigned int tst:3;
55         unsigned int queue_alg:4;
56         unsigned int tas:1;
57         unsigned int swp:1;
58         unsigned int has_own_order_mgmt:1;
59
60         int (*generic_parse)(struct scst_cmd *cmd,
61                 int (*get_block)(struct scst_cmd *cmd));
62
63         int block;
64         int def_block;
65
66         struct sgv_pool *pool;
67
68         uint8_t parse_type;
69         uint8_t on_free_cmd_type;
70         uint8_t memory_reuse_type;
71         uint8_t partial_transfers_type;
72         uint32_t partial_len;
73
74         struct scst_dev_type devtype;
75
76         /* Both protected by cmd_lists.cmd_list_lock */
77         unsigned int handle_counter;
78         struct list_head ucmd_hash[1 << DEV_USER_CMD_HASH_ORDER];
79
80         struct scst_device *sdev;
81
82         int virt_id;
83         struct list_head dev_list_entry;
84         char name[SCST_MAX_NAME];
85
86         /* Protected by cleanup_lock */
87         unsigned char in_cleanup_list:1;
88         struct list_head cleanup_list_entry;
89         /* ToDo: make it on-stack */
90         struct completion cleanup_cmpl;
91 };
92
93 /* Most fields are unprotected, since only one thread at time can access them */
94 struct scst_user_cmd {
95         struct scst_cmd *cmd;
96         struct scst_user_dev *dev;
97
98         atomic_t ucmd_ref;
99
100         unsigned int buff_cached:1;
101         unsigned int buf_dirty:1;
102         unsigned int background_exec:1;
103         unsigned int aborted:1;
104
105         struct scst_user_cmd *buf_ucmd;
106
107         int cur_data_page;
108         int num_data_pages;
109         int first_page_offset;
110         unsigned long ubuff;
111         struct page **data_pages;
112         struct sgv_pool_obj *sgv;
113
114         /* 
115          * Special flags, which can be accessed asynchronously (hence "long").
116          * Protected by cmd_lists.cmd_list_lock.
117          */
118         unsigned long sent_to_user:1;
119         unsigned long jammed:1;
120         unsigned long this_state_unjammed:1;
121         unsigned long seen_by_user:1; /* here only as a small optimization */
122
123         unsigned int state;
124
125         struct list_head ready_cmd_list_entry;
126
127         unsigned int h;
128         struct list_head hash_list_entry;
129
130         struct scst_user_get_cmd user_cmd;
131
132         /* cmpl used only by ATTACH_SESS, mcmd used only by TM */
133         union {
134                 struct completion *cmpl;
135                 struct scst_mgmt_cmd *mcmd;
136         };
137         int result;
138 };
139
140 static struct scst_user_cmd *dev_user_alloc_ucmd(struct scst_user_dev *dev,
141         int gfp_mask);
142 static void dev_user_free_ucmd(struct scst_user_cmd *ucmd);
143
144 static int dev_user_parse(struct scst_cmd *cmd);
145 static int dev_user_exec(struct scst_cmd *cmd);
146 static void dev_user_on_free_cmd(struct scst_cmd *cmd);
147 static int dev_user_task_mgmt_fn(struct scst_mgmt_cmd *mcmd,
148         struct scst_tgt_dev *tgt_dev);
149
150 static int dev_user_disk_done(struct scst_cmd *cmd);
151 static int dev_user_tape_done(struct scst_cmd *cmd);
152
153 static struct page *dev_user_alloc_pages(struct scatterlist *sg,
154         gfp_t gfp_mask, void *priv);
155 static void dev_user_free_sg_entries(struct scatterlist *sg, int sg_count,
156                                      void *priv);
157
158 static void dev_user_add_to_ready(struct scst_user_cmd *ucmd);
159
160 static void dev_user_unjam_cmd(struct scst_user_cmd *ucmd, int busy,
161         unsigned long *flags);
162 static void dev_user_unjam_dev(struct scst_user_dev *dev);
163
164 static int dev_user_process_reply_on_free(struct scst_user_cmd *ucmd);
165 static int dev_user_process_reply_tm_exec(struct scst_user_cmd *ucmd,
166         int status);
167 static int dev_user_process_reply_sess(struct scst_user_cmd *ucmd, int status);
168 static int dev_user_register_dev(struct file *file,
169         const struct scst_user_dev_desc *dev_desc);
170 static int __dev_user_set_opt(struct scst_user_dev *dev,
171         const struct scst_user_opt *opt);
172 static int dev_user_set_opt(struct file *file, const struct scst_user_opt *opt);
173 static int dev_user_get_opt(struct file *file, void *arg);
174
175 static unsigned int dev_user_poll(struct file *filp, poll_table *wait);
176 static long dev_user_ioctl(struct file *file, unsigned int cmd,
177         unsigned long arg);
178 static int dev_user_release(struct inode *inode, struct file *file);
179
180 /** Data **/
181
182 static struct kmem_cache *user_cmd_cachep;
183
184 static DEFINE_MUTEX(dev_priv_mutex);
185
186 static struct file_operations dev_user_fops = {
187         .poll           = dev_user_poll,
188         .unlocked_ioctl = dev_user_ioctl,
189 #ifdef CONFIG_COMPAT
190         .compat_ioctl   = dev_user_ioctl,
191 #endif
192         .release        = dev_user_release,
193 };
194
195 static struct class *dev_user_sysfs_class;
196
197 static DEFINE_SPINLOCK(dev_list_lock);
198 static LIST_HEAD(dev_list);
199
200 static DEFINE_SPINLOCK(cleanup_lock);
201 static LIST_HEAD(cleanup_list);
202 static DECLARE_WAIT_QUEUE_HEAD(cleanup_list_waitQ);
203 static struct task_struct *cleanup_thread;
204
205 /*
206  * Skip this command if result is not 0. Must be called under
207  * cmd_lists.cmd_list_lock and IRQ off.
208  */
209 static inline bool ucmd_get_check(struct scst_user_cmd *ucmd)
210 {
211         int r = atomic_inc_return(&ucmd->ucmd_ref);
212         int res;
213         if (unlikely(r == 1)) {
214                 TRACE_DBG("ucmd %p is being destroyed", ucmd);
215                 atomic_dec(&ucmd->ucmd_ref);
216                 res = true;
217                 /*
218                  * Necessary code is serialized by cmd_list_lock in
219                  * cmd_remove_hash()
220                  */
221         } else {
222                 TRACE_DBG("ucmd %p, new ref_cnt %d", ucmd,
223                         atomic_read(&ucmd->ucmd_ref));
224                 res = false;
225         }
226         return res;
227 }
228
229 static inline void __ucmd_get(struct scst_user_cmd *ucmd, bool barrier)
230 {
231         TRACE_DBG("ucmd %p, ucmd_ref %d", ucmd, atomic_read(&ucmd->ucmd_ref));
232         atomic_inc(&ucmd->ucmd_ref);
233         if (barrier)
234                 smp_mb__after_atomic_inc();
235 }
236
237 static inline void ucmd_get_ordered(struct scst_user_cmd *ucmd)
238 {
239         __ucmd_get(ucmd, true);
240 }
241
242 static inline void ucmd_get(struct scst_user_cmd *ucmd)
243 {
244         __ucmd_get(ucmd, false);
245 }
246
247 static inline void ucmd_put(struct scst_user_cmd *ucmd)
248 {
249         TRACE_DBG("ucmd %p, ucmd_ref %d", ucmd, atomic_read(&ucmd->ucmd_ref));
250
251         EXTRACHECKS_BUG_ON(atomic_read(&ucmd->ucmd_ref) == 0);
252
253         if (atomic_dec_and_test(&ucmd->ucmd_ref))
254                 dev_user_free_ucmd(ucmd);
255 }
256
257 static inline int calc_num_pg(unsigned long buf, int len)
258 {
259         len += buf & ~PAGE_MASK;
260         return (len >> PAGE_SHIFT) + ((len & ~PAGE_MASK) != 0);
261 }
262
263 static inline int is_need_offs_page(unsigned long buf, int len)
264 {
265         return ((buf & ~PAGE_MASK) != 0) &&
266                 ((buf & PAGE_MASK) != ((buf+len-1) & PAGE_MASK));
267 }
268
269 static void __dev_user_not_reg(void)
270 {
271         PRINT_ERROR("%s", "Device not registered");
272         return;
273 }
274
275 static inline int dev_user_check_reg(struct scst_user_dev *dev)
276 {
277         if (dev == NULL) {
278                 __dev_user_not_reg();
279                 return -EINVAL;
280         }
281         return 0;
282 }
283
284 static inline int scst_user_cmd_hashfn(int h)
285 {
286         return h & ((1 << DEV_USER_CMD_HASH_ORDER) - 1);
287 }
288
289 static inline struct scst_user_cmd *__ucmd_find_hash(struct scst_user_dev *dev,
290         unsigned int h)
291 {
292         struct list_head *head;
293         struct scst_user_cmd *ucmd;
294
295         head = &dev->ucmd_hash[scst_user_cmd_hashfn(h)];
296         list_for_each_entry(ucmd, head, hash_list_entry) {
297                 if (ucmd->h == h) {
298                         TRACE_DBG("Found ucmd %p", ucmd);
299                         return ucmd;
300                 }
301         }
302         return NULL;
303 }
304
305 static void cmd_insert_hash(struct scst_user_cmd *ucmd)
306 {
307         struct list_head *head;
308         struct scst_user_dev *dev = ucmd->dev;
309         struct scst_user_cmd *u;
310         unsigned long flags;
311
312         spin_lock_irqsave(&dev->cmd_lists.cmd_list_lock, flags);
313         do {
314                 ucmd->h = dev->handle_counter++;
315                 u = __ucmd_find_hash(dev, ucmd->h);
316         } while (u != NULL);
317         head = &dev->ucmd_hash[scst_user_cmd_hashfn(ucmd->h)];
318         list_add_tail(&ucmd->hash_list_entry, head);
319         spin_unlock_irqrestore(&dev->cmd_lists.cmd_list_lock, flags);
320
321         TRACE_DBG("Inserted ucmd %p, h=%d", ucmd, ucmd->h);
322         return;
323 }
324
325 static inline void cmd_remove_hash(struct scst_user_cmd *ucmd)
326 {
327         unsigned long flags;
328
329         spin_lock_irqsave(&ucmd->dev->cmd_lists.cmd_list_lock, flags);
330         list_del(&ucmd->hash_list_entry);
331         spin_unlock_irqrestore(&ucmd->dev->cmd_lists.cmd_list_lock, flags);
332
333         TRACE_DBG("Removed ucmd %p, h=%d", ucmd, ucmd->h);
334         return;
335 }
336
337 static void dev_user_free_ucmd(struct scst_user_cmd *ucmd)
338 {
339         TRACE_ENTRY();
340
341         TRACE_MEM("Freeing ucmd %p", ucmd);
342
343         cmd_remove_hash(ucmd);
344         EXTRACHECKS_BUG_ON(ucmd->cmd != NULL);
345
346         kmem_cache_free(user_cmd_cachep, ucmd);
347
348         TRACE_EXIT();
349         return;
350 }
351
352 static struct page *dev_user_alloc_pages(struct scatterlist *sg,
353         gfp_t gfp_mask, void *priv)
354 {
355         struct scst_user_cmd *ucmd = (struct scst_user_cmd *)priv;
356         int offset = 0;
357
358         TRACE_ENTRY();
359
360         /* *sg supposed to be zeroed */
361
362         TRACE_MEM("ucmd %p, ubuff %lx, ucmd->cur_data_page %d", ucmd,
363                 ucmd->ubuff, ucmd->cur_data_page);
364
365         if (ucmd->cur_data_page == 0) {
366                 TRACE_MEM("ucmd->first_page_offset %d",
367                         ucmd->first_page_offset);
368                 offset = ucmd->first_page_offset;
369                 ucmd_get(ucmd);
370         }
371
372         if (ucmd->cur_data_page >= ucmd->num_data_pages)
373                 goto out;
374
375         sg_set_page(sg, ucmd->data_pages[ucmd->cur_data_page],
376                 PAGE_SIZE - offset, offset);
377         ucmd->cur_data_page++;
378
379         TRACE_MEM("page=%p, length=%d, offset=%d", sg_page(sg), sg->length,
380                 sg->offset);
381         TRACE_BUFFER("Page data", sg_virt(sg), sg->length);
382
383 out:
384         TRACE_EXIT();
385         return sg_page(sg);
386 }
387
388 static void dev_user_on_cached_mem_free(struct scst_user_cmd *ucmd)
389 {
390         TRACE_ENTRY();
391
392         TRACE_MEM("Preparing ON_CACHED_MEM_FREE (ucmd %p, h %d, ubuff %lx)",
393                 ucmd, ucmd->h, ucmd->ubuff);
394
395         ucmd->user_cmd.cmd_h = ucmd->h;
396         ucmd->user_cmd.subcode = SCST_USER_ON_CACHED_MEM_FREE;
397         ucmd->user_cmd.on_cached_mem_free.pbuf = ucmd->ubuff;
398
399         ucmd->state = UCMD_STATE_ON_CACHE_FREEING;
400
401         dev_user_add_to_ready(ucmd);
402
403         TRACE_EXIT();
404         return;
405 }
406
407 static void dev_user_unmap_buf(struct scst_user_cmd *ucmd)
408 {
409         int i;
410
411         TRACE_ENTRY();
412
413         TRACE_MEM("Unmapping data pages (ucmd %p, ubuff %lx, num %d)", ucmd,
414                 ucmd->ubuff, ucmd->num_data_pages);
415
416         for (i = 0; i < ucmd->num_data_pages; i++) {
417                 struct page *page = ucmd->data_pages[i];
418
419                 if (ucmd->buf_dirty)
420                         SetPageDirty(page);
421
422                 page_cache_release(page);
423         }
424
425         kfree(ucmd->data_pages);
426         ucmd->data_pages = NULL;
427
428         TRACE_EXIT();
429         return;
430 }
431
432 static void __dev_user_free_sg_entries(struct scst_user_cmd *ucmd)
433 {
434         TRACE_ENTRY();
435
436         sBUG_ON(ucmd->data_pages == NULL);
437
438         TRACE_MEM("Freeing data pages (ucmd=%p, ubuff=%lx, buff_cached=%d)",
439                 ucmd, ucmd->ubuff, ucmd->buff_cached);
440
441         dev_user_unmap_buf(ucmd);
442
443         if (ucmd->buff_cached)
444                 dev_user_on_cached_mem_free(ucmd);
445         else
446                 ucmd_put(ucmd);
447
448         TRACE_EXIT();
449         return;
450 }
451
452 static void dev_user_free_sg_entries(struct scatterlist *sg, int sg_count,
453         void *priv)
454 {
455         struct scst_user_cmd *ucmd = (struct scst_user_cmd *)priv;
456
457         TRACE_MEM("Freeing data pages (sg=%p, sg_count=%d, priv %p)", sg,
458                 sg_count, ucmd);
459
460         __dev_user_free_sg_entries(ucmd);
461
462         return;
463 }
464
465 static inline int is_buff_cached(struct scst_user_cmd *ucmd)
466 {
467         int mem_reuse_type = ucmd->dev->memory_reuse_type;
468
469         if ((mem_reuse_type == SCST_USER_MEM_REUSE_ALL) ||
470             ((ucmd->cmd->data_direction == SCST_DATA_READ) &&
471              (mem_reuse_type == SCST_USER_MEM_REUSE_READ)) ||
472             ((ucmd->cmd->data_direction == SCST_DATA_WRITE) &&
473              (mem_reuse_type == SCST_USER_MEM_REUSE_WRITE)))
474                 return 1;
475         else
476                 return 0;
477 }
478
479 /*
480  * Returns 0 for success, <0 for fatal failure, >0 - need pages.
481  * Unmaps the buffer, if needed in case of error
482  */
483 static int dev_user_alloc_sg(struct scst_user_cmd *ucmd, int cached_buff)
484 {
485         int res = 0;
486         struct scst_cmd *cmd = ucmd->cmd;
487         struct scst_user_dev *dev = ucmd->dev;
488         int gfp_mask, flags = 0;
489         int bufflen = cmd->bufflen;
490         int last_len = 0;
491
492         TRACE_ENTRY();
493
494         gfp_mask = __GFP_NOWARN;
495         gfp_mask |= (scst_cmd_atomic(cmd) ? GFP_ATOMIC : GFP_KERNEL);
496
497         if (cached_buff) {
498                 flags |= SCST_POOL_RETURN_OBJ_ON_ALLOC_FAIL;
499                 if (ucmd->ubuff == 0)
500                         flags |= SCST_POOL_NO_ALLOC_ON_CACHE_MISS;
501         } else {
502                 TRACE_MEM("%s", "Not cached buff");
503                 flags |= SCST_POOL_ALLOC_NO_CACHED;
504                 if (ucmd->ubuff == 0) {
505                         res = 1;
506                         goto out;
507                 }
508                 bufflen += ucmd->first_page_offset;
509                 if (is_need_offs_page(ucmd->ubuff, cmd->bufflen))
510                         last_len = bufflen & ~PAGE_MASK;
511                 else
512                         last_len = cmd->bufflen & ~PAGE_MASK;
513                 if (last_len == 0)
514                         last_len = PAGE_SIZE;
515         }
516         ucmd->buff_cached = cached_buff;
517
518         cmd->sg = sgv_pool_alloc(dev->pool, bufflen, gfp_mask, flags,
519                         &cmd->sg_cnt, &ucmd->sgv, ucmd);
520         if (cmd->sg != NULL) {
521                 struct scst_user_cmd *buf_ucmd =
522                         (struct scst_user_cmd *)sgv_get_priv(ucmd->sgv);
523
524                 TRACE_MEM("Buf ucmd %p", buf_ucmd);
525
526                 ucmd->ubuff = buf_ucmd->ubuff;
527                 ucmd->buf_ucmd = buf_ucmd;
528
529                 EXTRACHECKS_BUG_ON((ucmd->data_pages != NULL) &&
530                                    (ucmd != buf_ucmd));
531
532                 if (last_len != 0) {
533                         /* We don't use clustering, so the assignment is safe */
534                         cmd->sg[cmd->sg_cnt-1].length = last_len;
535                 }
536
537                 TRACE_MEM("Buf alloced (ucmd %p, cached_buff %d, ubuff %lx, "
538                         "last_len %d, l %d)", ucmd, cached_buff, ucmd->ubuff,
539                         last_len, cmd->sg[cmd->sg_cnt-1].length);
540
541                 if (unlikely(cmd->sg_cnt > cmd->tgt_dev->max_sg_cnt)) {
542                         static int ll;
543                         if (ll < 10) {
544                                 PRINT_INFO("Unable to complete command due to "
545                                         "SG IO count limitation (requested %d, "
546                                         "available %d, tgt lim %d)", cmd->sg_cnt,
547                                         cmd->tgt_dev->max_sg_cnt,
548                                         cmd->tgt->sg_tablesize);
549                                 ll++;
550                         }
551                         cmd->sg = NULL;
552                         /* sgv will be freed in dev_user_free_sgv() */
553                         res = -1;
554                 }
555         } else {
556                 TRACE_MEM("Buf not alloced (ucmd %p, h %d, buff_cached, %d, "
557                         "sg_cnt %d, ubuff %lx, sgv %p", ucmd, ucmd->h,
558                         ucmd->buff_cached, cmd->sg_cnt, ucmd->ubuff, ucmd->sgv);
559                 if (unlikely(cmd->sg_cnt == 0)) {
560                         TRACE_MEM("Refused allocation (ucmd %p)", ucmd);
561                         sBUG_ON(ucmd->sgv != NULL);
562                         res = -1;
563                 } else {
564                         switch (ucmd->state) {
565                         case UCMD_STATE_BUF_ALLOCING:
566                                 res = 1;
567                                 break;
568                         case UCMD_STATE_EXECING:
569                                 res = -1;
570                                 break;
571                         default:
572                                 sBUG();
573                                 break;
574                         }
575                 }
576         }
577
578 out:
579         TRACE_EXIT_RES(res);
580         return res;
581 }
582
583 static int dev_user_alloc_space(struct scst_user_cmd *ucmd)
584 {
585         int rc, res = SCST_CMD_STATE_DEFAULT;
586         struct scst_cmd *cmd = ucmd->cmd;
587
588         TRACE_ENTRY();
589
590         if (unlikely(ucmd->cmd->data_buf_tgt_alloc)) {
591                 PRINT_ERROR("Target driver %s requested own memory "
592                         "allocation", ucmd->cmd->tgtt->name);
593                 scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
594                 res = SCST_CMD_STATE_PRE_XMIT_RESP;
595                 goto out;
596         }
597
598         ucmd->state = UCMD_STATE_BUF_ALLOCING;
599         cmd->data_buf_alloced = 1;
600
601         rc = dev_user_alloc_sg(ucmd, is_buff_cached(ucmd));
602         if (rc == 0)
603                 goto out;
604         else if (rc < 0) {
605                 scst_set_busy(cmd);
606                 res = SCST_CMD_STATE_PRE_XMIT_RESP;
607                 goto out;
608         }
609
610         if ((cmd->data_direction != SCST_DATA_WRITE) &&
611             !scst_is_cmd_local(cmd)) {
612                 TRACE_DBG("Delayed alloc, ucmd %p", ucmd);
613                 goto out;
614         }
615
616         ucmd->user_cmd.cmd_h = ucmd->h;
617         ucmd->user_cmd.subcode = SCST_USER_ALLOC_MEM;
618         ucmd->user_cmd.alloc_cmd.sess_h = (unsigned long)cmd->tgt_dev;
619         memcpy(ucmd->user_cmd.alloc_cmd.cdb, cmd->cdb,
620                 min(sizeof(ucmd->user_cmd.alloc_cmd.cdb), sizeof(cmd->cdb)));
621         ucmd->user_cmd.alloc_cmd.cdb_len = cmd->cdb_len;
622         ucmd->user_cmd.alloc_cmd.alloc_len = ucmd->buff_cached ?
623                 (cmd->sg_cnt << PAGE_SHIFT) : cmd->bufflen;
624         ucmd->user_cmd.alloc_cmd.queue_type = cmd->queue_type;
625         ucmd->user_cmd.alloc_cmd.data_direction = cmd->data_direction;
626         ucmd->user_cmd.alloc_cmd.sn = cmd->tgt_sn;
627
628         dev_user_add_to_ready(ucmd);
629
630         res = SCST_CMD_STATE_STOP;
631
632 out:
633         TRACE_EXIT_RES(res);
634         return res;
635 }
636
637 static struct scst_user_cmd *dev_user_alloc_ucmd(struct scst_user_dev *dev,
638         int gfp_mask)
639 {
640         struct scst_user_cmd *ucmd = NULL;
641
642         TRACE_ENTRY();
643
644 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
645         ucmd = kmem_cache_alloc(user_cmd_cachep, gfp_mask);
646         if (ucmd != NULL)
647                 memset(ucmd, 0, sizeof(*ucmd));
648 #else
649         ucmd = kmem_cache_zalloc(user_cmd_cachep, gfp_mask);
650 #endif
651         if (unlikely(ucmd == NULL)) {
652                 TRACE(TRACE_OUT_OF_MEM, "Unable to allocate "
653                         "user cmd (gfp_mask %x)", gfp_mask);
654                 goto out;
655         }
656         ucmd->dev = dev;
657         atomic_set(&ucmd->ucmd_ref, 1);
658
659         cmd_insert_hash(ucmd);
660
661         TRACE_MEM("ucmd %p allocated", ucmd);
662
663 out:
664         TRACE_EXIT_HRES((unsigned long)ucmd);
665         return ucmd;
666 }
667
668 static int dev_user_get_block(struct scst_cmd *cmd)
669 {
670         struct scst_user_dev *dev = (struct scst_user_dev *)cmd->dev->dh_priv;
671         /*
672          * No need for locks here, since *_detach() can not be
673          * called, when there are existing commands.
674          */
675         TRACE_EXIT_RES(dev->block);
676         return dev->block;
677 }
678
679 static int dev_user_parse(struct scst_cmd *cmd)
680 {
681         int rc, res = SCST_CMD_STATE_DEFAULT;
682         struct scst_user_cmd *ucmd;
683         int atomic = scst_cmd_atomic(cmd);
684         struct scst_user_dev *dev = (struct scst_user_dev *)cmd->dev->dh_priv;
685         int gfp_mask = atomic ? GFP_ATOMIC : GFP_KERNEL;
686
687         TRACE_ENTRY();
688
689         if (cmd->dh_priv == NULL) {
690                 ucmd = dev_user_alloc_ucmd(dev, gfp_mask);
691                 if (unlikely(ucmd == NULL)) {
692                         if (atomic) {
693                                 res = SCST_CMD_STATE_NEED_THREAD_CTX;
694                                 goto out;
695                         } else {
696                                 scst_set_busy(cmd);
697                                 goto out_error;
698                         }
699                 }
700                 ucmd->cmd = cmd;
701                 cmd->dh_priv = ucmd;
702         } else {
703                 ucmd = (struct scst_user_cmd *)cmd->dh_priv;
704                 TRACE_DBG("Used ucmd %p, state %x", ucmd, ucmd->state);
705         }
706
707         TRACE_DBG("ucmd %p, cmd %p, state %x", ucmd, cmd, ucmd->state);
708
709         if (ucmd->state != UCMD_STATE_NEW)
710                 goto alloc;
711
712         switch (dev->parse_type) {
713         case SCST_USER_PARSE_STANDARD:
714                 TRACE_DBG("PARSE STANDARD: ucmd %p", ucmd);
715                 rc = dev->generic_parse(cmd, dev_user_get_block);
716                 if ((rc != 0) || (cmd->op_flags & SCST_INFO_INVALID))
717                         goto out_invalid;
718                 break;
719
720         case SCST_USER_PARSE_EXCEPTION:
721                 TRACE_DBG("PARSE EXCEPTION: ucmd %p", ucmd);
722                 rc = dev->generic_parse(cmd, dev_user_get_block);
723                 if ((rc == 0) && (!(cmd->op_flags & SCST_INFO_INVALID)))
724                         break;
725                 else if (rc == SCST_CMD_STATE_NEED_THREAD_CTX) {
726                         TRACE_MEM("Restarting PARSE to thread context "
727                                 "(ucmd %p)", ucmd);
728                         res = SCST_CMD_STATE_NEED_THREAD_CTX;
729                         goto out;
730                 }
731                 /* else go through */
732
733         case SCST_USER_PARSE_CALL:
734                 TRACE_DBG("Preparing PARSE for user space (ucmd=%p, h=%d, "
735                         "bufflen %d)", ucmd, ucmd->h, cmd->bufflen);
736                 ucmd->user_cmd.cmd_h = ucmd->h;
737                 ucmd->user_cmd.subcode = SCST_USER_PARSE;
738                 ucmd->user_cmd.parse_cmd.sess_h = (unsigned long)cmd->tgt_dev;
739                 memcpy(ucmd->user_cmd.parse_cmd.cdb, cmd->cdb,
740                         min(sizeof(ucmd->user_cmd.parse_cmd.cdb),
741                             sizeof(cmd->cdb)));
742                 ucmd->user_cmd.parse_cmd.cdb_len = cmd->cdb_len;
743                 ucmd->user_cmd.parse_cmd.timeout = cmd->timeout / HZ;
744                 ucmd->user_cmd.parse_cmd.bufflen = cmd->bufflen;
745                 ucmd->user_cmd.parse_cmd.queue_type = cmd->queue_type;
746                 ucmd->user_cmd.parse_cmd.data_direction = cmd->data_direction;
747                 ucmd->user_cmd.parse_cmd.expected_values_set =
748                                         cmd->expected_values_set;
749                 ucmd->user_cmd.parse_cmd.expected_data_direction =
750                                         cmd->expected_data_direction;
751                 ucmd->user_cmd.parse_cmd.expected_transfer_len =
752                                         cmd->expected_transfer_len;
753                 ucmd->user_cmd.parse_cmd.sn = cmd->tgt_sn;
754                 ucmd->state = UCMD_STATE_PARSING;
755                 dev_user_add_to_ready(ucmd);
756                 res = SCST_CMD_STATE_STOP;
757                 goto out;
758
759         default:
760                 sBUG();
761                 goto out;
762         }
763
764 alloc:
765         if (cmd->data_direction != SCST_DATA_NONE)
766                 res = dev_user_alloc_space(ucmd);
767
768 out:
769         TRACE_EXIT_RES(res);
770         return res;
771
772 out_invalid:
773         PRINT_ERROR("PARSE failed (ucmd %p, rc %d, invalid %d)", ucmd, rc,
774                 cmd->op_flags & SCST_INFO_INVALID);
775         scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_invalid_opcode));
776
777 out_error:
778         res = SCST_CMD_STATE_PRE_XMIT_RESP;
779         goto out;
780 }
781
782 static void dev_user_flush_dcache(struct scst_user_cmd *ucmd)
783 {
784         struct scst_user_cmd *buf_ucmd = ucmd->buf_ucmd;
785         unsigned long start = buf_ucmd->ubuff;
786         int i;
787
788         TRACE_ENTRY();
789
790         if (start == 0)
791                 goto out;
792
793         for (i = 0; i < buf_ucmd->num_data_pages; i++) {
794                 struct page *page;
795                 page = buf_ucmd->data_pages[i];
796 #ifdef ARCH_HAS_FLUSH_ANON_PAGE
797                 struct vm_area_struct *vma = find_vma(current->mm, start);
798                 if (vma != NULL)
799                         flush_anon_page(vma, page, start);
800 #endif
801                 flush_dcache_page(page);
802                 start += PAGE_SIZE;
803         }
804
805 out:
806         TRACE_EXIT();
807         return;
808 }
809
810 static int dev_user_exec(struct scst_cmd *cmd)
811 {
812         struct scst_user_cmd *ucmd = (struct scst_user_cmd *)cmd->dh_priv;
813         int res = SCST_EXEC_COMPLETED;
814
815         TRACE_ENTRY();
816
817 #if 0 /* We set exec_atomic in 0 to let SCST core know that we need a thread
818        * context to complete the necessary actions, but all we are going to
819        * do in this function is, in fact, atomic, so let's skip this check.
820        */
821         if (scst_cmd_atomic(cmd)) {
822                 TRACE_DBG("%s", "User exec() can not be called in atomic "
823                         "context, rescheduling to the thread");
824                 res = SCST_EXEC_NEED_THREAD;
825                 goto out;
826         }
827 #endif
828
829         TRACE_DBG("Preparing EXEC for user space (ucmd=%p, h=%d, "
830                 "bufflen %d, data_len %d, ubuff %lx)", ucmd, ucmd->h,
831                 cmd->bufflen, cmd->data_len, ucmd->ubuff);
832
833         if (cmd->data_direction == SCST_DATA_WRITE)
834                 dev_user_flush_dcache(ucmd);
835
836         ucmd->user_cmd.cmd_h = ucmd->h;
837         ucmd->user_cmd.subcode = SCST_USER_EXEC;
838         ucmd->user_cmd.exec_cmd.sess_h = (unsigned long)cmd->tgt_dev;
839         memcpy(ucmd->user_cmd.exec_cmd.cdb, cmd->cdb,
840                 min(sizeof(ucmd->user_cmd.exec_cmd.cdb),
841                     sizeof(cmd->cdb)));
842         ucmd->user_cmd.exec_cmd.cdb_len = cmd->cdb_len;
843         ucmd->user_cmd.exec_cmd.bufflen = cmd->bufflen;
844         ucmd->user_cmd.exec_cmd.data_len = cmd->data_len;
845         ucmd->user_cmd.exec_cmd.pbuf = ucmd->ubuff;
846         if ((ucmd->ubuff == 0) && (cmd->data_direction != SCST_DATA_NONE)) {
847                 ucmd->user_cmd.exec_cmd.alloc_len = ucmd->buff_cached ?
848                         (cmd->sg_cnt << PAGE_SHIFT) : cmd->bufflen;
849         }
850         ucmd->user_cmd.exec_cmd.queue_type = cmd->queue_type;
851         ucmd->user_cmd.exec_cmd.data_direction = cmd->data_direction;
852         ucmd->user_cmd.exec_cmd.partial = 0;
853         ucmd->user_cmd.exec_cmd.timeout = cmd->timeout / HZ;
854         ucmd->user_cmd.exec_cmd.sn = cmd->tgt_sn;
855
856         ucmd->state = UCMD_STATE_EXECING;
857
858         dev_user_add_to_ready(ucmd);
859
860         TRACE_EXIT_RES(res);
861         return res;
862 }
863
864 static void dev_user_free_sgv(struct scst_user_cmd *ucmd)
865 {
866         if (ucmd->sgv != NULL) {
867                 sgv_pool_free(ucmd->sgv);
868                 ucmd->sgv = NULL;
869         } else if (ucmd->data_pages != NULL) {
870                 /* We mapped pages, but for some reason didn't allocate them */
871                 ucmd_get(ucmd);
872                 __dev_user_free_sg_entries(ucmd);
873         }
874         return;
875 }
876
877 static void dev_user_on_free_cmd(struct scst_cmd *cmd)
878 {
879         struct scst_user_cmd *ucmd = (struct scst_user_cmd *)cmd->dh_priv;
880
881         TRACE_ENTRY();
882
883         if (unlikely(ucmd == NULL))
884                 goto out;
885
886         TRACE_MEM("ucmd %p, cmd %p, buff_cached %d, ubuff %lx", ucmd, ucmd->cmd,
887                 ucmd->buff_cached, ucmd->ubuff);
888
889         ucmd->cmd = NULL;
890         if ((cmd->data_direction == SCST_DATA_WRITE) && (ucmd->buf_ucmd != NULL))
891                 ucmd->buf_ucmd->buf_dirty = 1;
892
893         if (ucmd->dev->on_free_cmd_type == SCST_USER_ON_FREE_CMD_IGNORE) {
894                 ucmd->state = UCMD_STATE_ON_FREE_SKIPPED;
895                 /* The state assignment must be before freeing sgv! */
896                 goto out_reply;
897         }
898
899         if (unlikely(!ucmd->seen_by_user)) {
900                 TRACE_MGMT_DBG("Not seen by user ucmd %p", ucmd);
901                 sBUG_ON((ucmd->sgv != NULL) || (ucmd->data_pages != NULL));
902                 goto out_reply;
903         }
904
905         ucmd->user_cmd.cmd_h = ucmd->h;
906         ucmd->user_cmd.subcode = SCST_USER_ON_FREE_CMD;
907
908         ucmd->user_cmd.on_free_cmd.pbuf = ucmd->ubuff;
909         ucmd->user_cmd.on_free_cmd.resp_data_len = cmd->resp_data_len;
910         ucmd->user_cmd.on_free_cmd.buffer_cached = ucmd->buff_cached;
911         ucmd->user_cmd.on_free_cmd.aborted = ucmd->aborted;
912         ucmd->user_cmd.on_free_cmd.status = cmd->status;
913         ucmd->user_cmd.on_free_cmd.delivery_status = cmd->delivery_status;
914
915         ucmd->state = UCMD_STATE_ON_FREEING;
916
917         dev_user_add_to_ready(ucmd);
918
919 out:
920         TRACE_EXIT();
921         return;
922
923 out_reply:
924         dev_user_process_reply_on_free(ucmd);
925         goto out;
926 }
927
928 static void dev_user_set_block(struct scst_cmd *cmd, int block)
929 {
930         struct scst_user_dev *dev = (struct scst_user_dev *)cmd->dev->dh_priv;
931         /*
932          * No need for locks here, since *_detach() can not be
933          * called, when there are existing commands.
934          */
935         TRACE_DBG("dev %p, new block %d", dev, block);
936         if (block != 0)
937                 dev->block = block;
938         else
939                 dev->block = dev->def_block;
940         return;
941 }
942
943 static int dev_user_disk_done(struct scst_cmd *cmd)
944 {
945         int res = SCST_CMD_STATE_DEFAULT;
946
947         TRACE_ENTRY();
948
949         res = scst_block_generic_dev_done(cmd, dev_user_set_block);
950
951         TRACE_EXIT_RES(res);
952         return res;
953 }
954
955 static int dev_user_tape_done(struct scst_cmd *cmd)
956 {
957         int res = SCST_CMD_STATE_DEFAULT;
958
959         TRACE_ENTRY();
960
961         res = scst_tape_generic_dev_done(cmd, dev_user_set_block);
962
963         TRACE_EXIT_RES(res);
964         return res;
965 }
966
967 static void dev_user_add_to_ready(struct scst_user_cmd *ucmd)
968 {
969         struct scst_user_dev *dev = ucmd->dev;
970         unsigned long flags;
971         int do_wake;
972
973         TRACE_ENTRY();
974
975         do_wake = (in_interrupt() ||
976                    (ucmd->state == UCMD_STATE_ON_CACHE_FREEING));
977         if (ucmd->cmd)
978                 do_wake |= ucmd->cmd->preprocessing_only;
979
980         spin_lock_irqsave(&dev->cmd_lists.cmd_list_lock, flags);
981
982         ucmd->this_state_unjammed = 0;
983
984         if ((ucmd->state == UCMD_STATE_PARSING) ||
985             (ucmd->state == UCMD_STATE_BUF_ALLOCING)) {
986                 /*
987                  * If we don't put such commands in the queue head, then under
988                  * high load we might delay threads, waiting for memory
989                  * allocations, for too long and start loosing NOPs, which
990                  * would lead to consider us by remote initiators as
991                  * unresponsive and stuck => broken connections, etc. If none
992                  * of our commands completed in NOP timeout to allow the head
993                  * commands to go, then we are really overloaded and/or stuck.
994                  */
995                 TRACE_DBG("Adding ucmd %p (state %d) to head of ready "
996                         "cmd list", ucmd, ucmd->state);
997                 list_add(&ucmd->ready_cmd_list_entry,
998                         &dev->ready_cmd_list);
999                 do_wake = 1;
1000         } else if (unlikely(ucmd->state == UCMD_STATE_TM_EXECING) ||
1001                    unlikely(ucmd->state == UCMD_STATE_ATTACH_SESS) ||
1002                    unlikely(ucmd->state == UCMD_STATE_DETACH_SESS)) {
1003                 TRACE_MGMT_DBG("Adding mgmt ucmd %p (state %d) to head of "
1004                         "ready cmd list", ucmd, ucmd->state);
1005                 list_add(&ucmd->ready_cmd_list_entry,
1006                         &dev->ready_cmd_list);
1007                 do_wake = 1;
1008         } else if ((ucmd->cmd != NULL) &&
1009                    unlikely((ucmd->cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE))) {
1010                 TRACE_DBG("Adding HQ ucmd %p to head of ready cmd list", ucmd);
1011                 list_add(&ucmd->ready_cmd_list_entry, &dev->ready_cmd_list);
1012         } else {
1013                 TRACE_DBG("Adding ucmd %p to ready cmd list", ucmd);
1014                 list_add_tail(&ucmd->ready_cmd_list_entry, &dev->ready_cmd_list);
1015         }
1016
1017         if (do_wake) {
1018                 TRACE_DBG("Waking up dev %p", dev);
1019                 wake_up(&dev->cmd_lists.cmd_list_waitQ);
1020         }
1021
1022         spin_unlock_irqrestore(&dev->cmd_lists.cmd_list_lock, flags);
1023
1024         smp_mb();
1025         if (unlikely(dev->cleaning)) {
1026                 spin_lock_irqsave(&cleanup_lock, flags);
1027                 if (!dev->in_cleanup_list) {
1028                         TRACE_DBG("Adding dev %p to the cleanup list (ucmd %p)",
1029                                 dev, ucmd);
1030                         list_add_tail(&dev->cleanup_list_entry, &cleanup_list);
1031                         dev->in_cleanup_list = 1;
1032                         wake_up(&cleanup_list_waitQ);
1033                 }
1034                 spin_unlock_irqrestore(&cleanup_lock, flags);
1035         }
1036
1037         TRACE_EXIT();
1038         return;
1039 }
1040
1041 static int dev_user_map_buf(struct scst_user_cmd *ucmd, unsigned long ubuff,
1042         int num_pg)
1043 {
1044         int res = 0, rc;
1045         int i;
1046
1047         TRACE_ENTRY();
1048
1049         if (unlikely(ubuff == 0))
1050                 goto out_nomem;
1051
1052         sBUG_ON(ucmd->data_pages != NULL);
1053
1054         ucmd->num_data_pages = num_pg;
1055
1056         ucmd->data_pages = kzalloc(sizeof(*ucmd->data_pages)*ucmd->num_data_pages,
1057                 GFP_KERNEL);
1058         if (ucmd->data_pages == NULL) {
1059                 TRACE(TRACE_OUT_OF_MEM, "Unable to allocate data_pages array "
1060                         "(num_data_pages=%d)", ucmd->num_data_pages);
1061                 res = -ENOMEM;
1062                 goto out_nomem;
1063         }
1064
1065         TRACE_MEM("Mapping buffer (ucmd %p, ubuff %lx, ucmd->num_data_pages %d, "
1066                 "first_page_offset %d, len %d)", ucmd, ubuff,
1067                 ucmd->num_data_pages, (int)(ubuff & ~PAGE_MASK),
1068                 ucmd->cmd->bufflen);
1069
1070         down_read(&current->mm->mmap_sem);
1071         rc = get_user_pages(current, current->mm, ubuff, ucmd->num_data_pages,
1072                 1/*writable*/, 0/*don't force*/, ucmd->data_pages, NULL);
1073         up_read(&current->mm->mmap_sem);
1074
1075         /* get_user_pages() flushes dcache */
1076
1077         if (rc < ucmd->num_data_pages)
1078                 goto out_unmap;
1079
1080         ucmd->ubuff = ubuff;
1081         ucmd->first_page_offset = (ubuff & ~PAGE_MASK);
1082
1083 out:
1084         TRACE_EXIT_RES(res);
1085         return res;
1086
1087 out_nomem:
1088         scst_set_busy(ucmd->cmd);
1089         /* go through */
1090
1091 out_err:
1092         ucmd->cmd->state = SCST_CMD_STATE_PRE_XMIT_RESP;
1093         goto out;
1094
1095 out_unmap:
1096         PRINT_ERROR("Failed to get %d user pages (rc %d)",
1097                 ucmd->num_data_pages, rc);
1098         if (rc > 0) {
1099                 for (i = 0; i < rc; i++)
1100                         page_cache_release(ucmd->data_pages[i]);
1101         }
1102         kfree(ucmd->data_pages);
1103         ucmd->data_pages = NULL;
1104         res = -EFAULT;
1105         scst_set_cmd_error(ucmd->cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
1106         goto out_err;
1107 }
1108
1109 static int dev_user_process_reply_alloc(struct scst_user_cmd *ucmd,
1110         struct scst_user_reply_cmd *reply)
1111 {
1112         int res = 0;
1113         struct scst_cmd *cmd = ucmd->cmd;
1114
1115         TRACE_ENTRY();
1116
1117         TRACE_DBG("ucmd %p, pbuf %Lx", ucmd, reply->alloc_reply.pbuf);
1118
1119         if (likely(reply->alloc_reply.pbuf != 0)) {
1120                 int pages;
1121                 if (ucmd->buff_cached) {
1122                         if (unlikely((reply->alloc_reply.pbuf & ~PAGE_MASK) != 0)) {
1123                                 PRINT_ERROR("Supplied pbuf %Lx isn't "
1124                                         "page aligned", reply->alloc_reply.pbuf);
1125                                 goto out_hwerr;
1126                         }
1127                         pages = cmd->sg_cnt;
1128                 } else
1129                         pages = calc_num_pg(reply->alloc_reply.pbuf, cmd->bufflen);
1130                 res = dev_user_map_buf(ucmd, reply->alloc_reply.pbuf, pages);
1131         } else {
1132                 scst_set_busy(ucmd->cmd);
1133                 ucmd->cmd->state = SCST_CMD_STATE_PRE_XMIT_RESP;
1134         }
1135
1136 out_process:
1137         scst_process_active_cmd(cmd, SCST_CONTEXT_DIRECT);
1138
1139         TRACE_EXIT_RES(res);
1140         return res;
1141
1142 out_hwerr:
1143         scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
1144         ucmd->cmd->state = SCST_CMD_STATE_PRE_XMIT_RESP;
1145         res = -EINVAL;
1146         goto out_process;
1147 }
1148
1149 static int dev_user_process_reply_parse(struct scst_user_cmd *ucmd,
1150         struct scst_user_reply_cmd *reply)
1151 {
1152         int res = 0;
1153         struct scst_user_scsi_cmd_reply_parse *preply =
1154                 &reply->parse_reply;
1155         struct scst_cmd *cmd = ucmd->cmd;
1156
1157         TRACE_ENTRY();
1158
1159         if (unlikely(preply->queue_type > SCST_CMD_QUEUE_ACA))
1160                 goto out_inval;
1161
1162         if (unlikely((preply->data_direction != SCST_DATA_WRITE) &&
1163                      (preply->data_direction != SCST_DATA_READ) &&
1164                      (preply->data_direction != SCST_DATA_NONE)))
1165                 goto out_inval;
1166
1167         if (unlikely((preply->data_direction != SCST_DATA_NONE) &&
1168                      (preply->bufflen == 0)))
1169                 goto out_inval;
1170
1171         if (unlikely((preply->bufflen < 0) || (preply->data_len < 0)))
1172                 goto out_inval;
1173
1174         TRACE_DBG("ucmd %p, queue_type %x, data_direction, %x, bufflen %d, "
1175                 "data_len %d, pbuf %Lx", ucmd, preply->queue_type,
1176                 preply->data_direction, preply->bufflen, preply->data_len,
1177                 reply->alloc_reply.pbuf);
1178
1179         cmd->queue_type = preply->queue_type;
1180         cmd->data_direction = preply->data_direction;
1181         cmd->bufflen = preply->bufflen;
1182         cmd->data_len = preply->data_len;
1183
1184 out_process:
1185         scst_process_active_cmd(cmd, SCST_CONTEXT_DIRECT);
1186
1187         TRACE_EXIT_RES(res);
1188         return res;
1189
1190 out_inval:
1191         PRINT_ERROR("Invalid parse_reply parameters (LUN %lld, op %x, cmd %p)",
1192                 (long long unsigned int)cmd->lun, cmd->cdb[0], cmd);
1193         PRINT_BUFFER("Invalid parse_reply", reply, sizeof(*reply));
1194         scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
1195         cmd->state = SCST_CMD_STATE_PRE_XMIT_RESP;
1196         res = -EINVAL;
1197         goto out_process;
1198 }
1199
1200 static int dev_user_process_reply_on_free(struct scst_user_cmd *ucmd)
1201 {
1202         int res = 0;
1203
1204         TRACE_ENTRY();
1205
1206         TRACE_DBG("ON FREE ucmd %p", ucmd);
1207
1208         dev_user_free_sgv(ucmd);
1209         ucmd_put(ucmd);
1210
1211         TRACE_EXIT_RES(res);
1212         return res;
1213 }
1214
1215 static int dev_user_process_reply_on_cache_free(struct scst_user_cmd *ucmd)
1216 {
1217         int res = 0;
1218
1219         TRACE_ENTRY();
1220
1221         TRACE_DBG("ON CACHE FREE ucmd %p", ucmd);
1222
1223         ucmd_put(ucmd);
1224
1225         TRACE_EXIT_RES(res);
1226         return res;
1227 }
1228
1229 static int dev_user_process_reply_exec(struct scst_user_cmd *ucmd,
1230         struct scst_user_reply_cmd *reply)
1231 {
1232         int res = 0;
1233         struct scst_user_scsi_cmd_reply_exec *ereply =
1234                 &reply->exec_reply;
1235         struct scst_cmd *cmd = ucmd->cmd;
1236
1237         TRACE_ENTRY();
1238
1239         if (ereply->reply_type == SCST_EXEC_REPLY_COMPLETED) {
1240                 if (ucmd->background_exec) {
1241                         TRACE_DBG("Background ucmd %p finished", ucmd);
1242                         ucmd_put(ucmd);
1243                         goto out;
1244                 }
1245                 if (unlikely(ereply->resp_data_len > cmd->bufflen))
1246                         goto out_inval;
1247                 if (unlikely((cmd->data_direction != SCST_DATA_READ) &&
1248                              (ereply->resp_data_len != 0)))
1249                         goto out_inval;
1250         } else if (ereply->reply_type == SCST_EXEC_REPLY_BACKGROUND) {
1251                 if (unlikely(ucmd->background_exec))
1252                         goto out_inval;
1253                 if (unlikely((cmd->data_direction == SCST_DATA_READ) ||
1254                              (cmd->resp_data_len != 0)))
1255                         goto out_inval;
1256                 ucmd_get_ordered(ucmd);
1257                 ucmd->background_exec = 1;
1258                 TRACE_DBG("Background ucmd %p", ucmd);
1259                 goto out_compl;
1260         } else
1261                 goto out_inval;
1262
1263         TRACE_DBG("ucmd %p, status %d, resp_data_len %d", ucmd,
1264                 ereply->status, ereply->resp_data_len);
1265
1266          if (ereply->resp_data_len != 0) {
1267                 if (ucmd->ubuff == 0) {
1268                         int pages, rc;
1269                         if (unlikely(ereply->pbuf == 0))
1270                                 goto out_busy;
1271                         if (ucmd->buff_cached) {
1272                                 if (unlikely((ereply->pbuf & ~PAGE_MASK) != 0)) {
1273                                         PRINT_ERROR("Supplied pbuf %Lx isn't "
1274                                                 "page aligned", ereply->pbuf);
1275                                         goto out_hwerr;
1276                                 }
1277                                 pages = cmd->sg_cnt;
1278                         } else
1279                                 pages = calc_num_pg(ereply->pbuf, cmd->bufflen);
1280                         rc = dev_user_map_buf(ucmd, ereply->pbuf, pages);
1281                         if ((rc != 0) || (ucmd->ubuff == 0))
1282                                 goto out_compl;
1283
1284                         rc = dev_user_alloc_sg(ucmd, ucmd->buff_cached);
1285                         if (unlikely(rc != 0))
1286                                 goto out_busy;
1287                 } else
1288                         dev_user_flush_dcache(ucmd);
1289                 cmd->may_need_dma_sync = 1;
1290                 scst_set_resp_data_len(cmd, ereply->resp_data_len);
1291         } else if (cmd->resp_data_len != ereply->resp_data_len) {
1292                 if (ucmd->ubuff == 0)
1293                         cmd->resp_data_len = ereply->resp_data_len;
1294                 else
1295                         scst_set_resp_data_len(cmd, ereply->resp_data_len);
1296         }
1297
1298         cmd->status = ereply->status;
1299         if (ereply->sense_len != 0) {
1300                 res = scst_alloc_sense(cmd, 0);
1301                 if (res != 0)
1302                         goto out_compl;
1303                 res = copy_from_user(cmd->sense,
1304                         (void *)(unsigned long)ereply->psense_buffer,
1305                         min((unsigned int)SCST_SENSE_BUFFERSIZE,
1306                                 (unsigned int)ereply->sense_len));
1307                 if (res < 0) {
1308                         PRINT_ERROR("%s", "Unable to get sense data");
1309                         goto out_hwerr_res_set;
1310                 }
1311         }
1312
1313 out_compl:
1314         cmd->completed = 1;
1315         cmd->scst_cmd_done(cmd, SCST_CMD_STATE_DEFAULT);
1316         /* !! At this point cmd can be already freed !! */
1317
1318 out:
1319         TRACE_EXIT_RES(res);
1320         return res;
1321
1322 out_inval:
1323         PRINT_ERROR("Invalid exec_reply parameters (LUN %lld, op %x, cmd %p)",
1324                 (long long unsigned int)cmd->lun, cmd->cdb[0], cmd);
1325         PRINT_BUFFER("Invalid exec_reply", reply, sizeof(*reply));
1326
1327 out_hwerr:
1328         res = -EINVAL;
1329
1330 out_hwerr_res_set:
1331         if (ucmd->background_exec) {
1332                 ucmd_put(ucmd);
1333                 goto out;
1334         } else {
1335                 scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
1336                 goto out_compl;
1337         }
1338
1339 out_busy:
1340         scst_set_busy(cmd);
1341         goto out_compl;
1342 }
1343
1344 static int dev_user_process_reply(struct scst_user_dev *dev,
1345         struct scst_user_reply_cmd *reply)
1346 {
1347         int res = 0;
1348         struct scst_user_cmd *ucmd;
1349         int state;
1350
1351         TRACE_ENTRY();
1352
1353         spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
1354
1355         ucmd = __ucmd_find_hash(dev, reply->cmd_h);
1356         if (unlikely(ucmd == NULL)) {
1357                 TRACE_MGMT_DBG("cmd_h %d not found", reply->cmd_h);
1358                 res = -ESRCH;
1359                 goto out_unlock;
1360         }
1361
1362         if (unlikely(ucmd_get_check(ucmd))) {
1363                 TRACE_MGMT_DBG("Found being destroyed cmd_h %d", reply->cmd_h);
1364                 res = -ESRCH;
1365                 goto out_unlock;
1366         }
1367
1368         if (ucmd->background_exec) {
1369                 state = UCMD_STATE_EXECING;
1370                 goto unlock_process;
1371         }
1372
1373         if (unlikely(ucmd->this_state_unjammed)) {
1374                 TRACE_MGMT_DBG("Reply on unjammed ucmd %p, ignoring",
1375                         ucmd);
1376                 goto out_unlock_put;
1377         }
1378
1379         if (unlikely(!ucmd->sent_to_user)) {
1380                 TRACE_MGMT_DBG("Ucmd %p isn't in the sent to user "
1381                         "state %x", ucmd, ucmd->state);
1382                 res = -EINVAL;
1383                 goto out_unlock_put;
1384         }
1385
1386         if (unlikely(reply->subcode != ucmd->user_cmd.subcode))
1387                 goto out_wrong_state;
1388
1389         if (unlikely(_IOC_NR(reply->subcode) != ucmd->state))
1390                 goto out_wrong_state;
1391
1392         state = ucmd->state;
1393         ucmd->sent_to_user = 0;
1394
1395 unlock_process:
1396         spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1397
1398         switch (state) {
1399         case UCMD_STATE_PARSING:
1400                 res = dev_user_process_reply_parse(ucmd, reply);
1401                 break;
1402
1403         case UCMD_STATE_BUF_ALLOCING:
1404                 res = dev_user_process_reply_alloc(ucmd, reply);
1405                 break;
1406
1407         case UCMD_STATE_EXECING:
1408                 res = dev_user_process_reply_exec(ucmd, reply);
1409                 break;
1410
1411         case UCMD_STATE_ON_FREEING:
1412                 res = dev_user_process_reply_on_free(ucmd);
1413                 break;
1414
1415         case UCMD_STATE_ON_CACHE_FREEING:
1416                 res = dev_user_process_reply_on_cache_free(ucmd);
1417                 break;
1418
1419         case UCMD_STATE_TM_EXECING:
1420                 res = dev_user_process_reply_tm_exec(ucmd, reply->result);
1421                 break;
1422
1423         case UCMD_STATE_ATTACH_SESS:
1424         case UCMD_STATE_DETACH_SESS:
1425                 res = dev_user_process_reply_sess(ucmd, reply->result);
1426                 break;
1427
1428         default:
1429                 sBUG();
1430                 break;
1431         }
1432
1433 out_put:
1434         ucmd_put(ucmd);
1435
1436 out:
1437         TRACE_EXIT_RES(res);
1438         return res;
1439
1440 out_wrong_state:
1441         PRINT_ERROR("Command's %p subcode %x doesn't match internal "
1442                 "command's state %x or reply->subcode (%x) != ucmd->subcode "
1443                 "(%x)", ucmd, _IOC_NR(reply->subcode), ucmd->state,
1444                 reply->subcode, ucmd->user_cmd.subcode);
1445         res = -EINVAL;
1446         dev_user_unjam_cmd(ucmd, 0, NULL);
1447
1448 out_unlock_put:
1449         spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1450         goto out_put;
1451
1452 out_unlock:
1453         spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1454         goto out;
1455 }
1456
1457 static int dev_user_reply_cmd(struct file *file, unsigned long arg)
1458 {
1459         int res = 0;
1460         struct scst_user_dev *dev;
1461         struct scst_user_reply_cmd *reply;
1462
1463         TRACE_ENTRY();
1464
1465         mutex_lock(&dev_priv_mutex);
1466         dev = (struct scst_user_dev *)file->private_data;
1467         res = dev_user_check_reg(dev);
1468         if (res != 0) {
1469                 mutex_unlock(&dev_priv_mutex);
1470                 goto out;
1471         }
1472         down_read(&dev->dev_rwsem);
1473         mutex_unlock(&dev_priv_mutex);
1474
1475         reply = kzalloc(sizeof(*reply), GFP_KERNEL);
1476         if (reply == NULL) {
1477                 res = -ENOMEM;
1478                 goto out_up;
1479         }
1480
1481         res = copy_from_user(reply, (void *)arg, sizeof(*reply));
1482         if (res < 0)
1483                 goto out_free;
1484
1485         TRACE_BUFFER("Reply", reply, sizeof(*reply));
1486
1487         res = dev_user_process_reply(dev, reply);
1488         if (res < 0)
1489                 goto out_free;
1490
1491 out_free:
1492         kfree(reply);
1493
1494 out_up:
1495         up_read(&dev->dev_rwsem);
1496
1497 out:
1498         TRACE_EXIT_RES(res);
1499         return res;
1500 }
1501
1502 static int dev_user_process_scst_commands(struct scst_user_dev *dev)
1503 {
1504         int res = 0;
1505
1506         TRACE_ENTRY();
1507
1508         while (!list_empty(&dev->cmd_lists.active_cmd_list)) {
1509                 struct scst_cmd *cmd = list_entry(
1510                         dev->cmd_lists.active_cmd_list.next, typeof(*cmd),
1511                         cmd_list_entry);
1512                 TRACE_DBG("Deleting cmd %p from active cmd list", cmd);
1513                 list_del(&cmd->cmd_list_entry);
1514                 spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1515                 scst_process_active_cmd(cmd, SCST_CONTEXT_DIRECT |
1516                                                  SCST_CONTEXT_PROCESSABLE);
1517                 spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
1518                 res++;
1519         }
1520
1521         TRACE_EXIT_RES(res);
1522         return res;
1523 }
1524
1525 /* Called under cmd_lists.cmd_list_lock and IRQ off */
1526 struct scst_user_cmd *__dev_user_get_next_cmd(struct list_head *cmd_list)
1527 {
1528         struct scst_user_cmd *u;
1529
1530 again:
1531         u = NULL;
1532         if (!list_empty(cmd_list)) {
1533                 u = list_entry(cmd_list->next, typeof(*u), ready_cmd_list_entry);
1534
1535                 TRACE_DBG("Found ready ucmd %p", u);
1536                 list_del(&u->ready_cmd_list_entry);
1537
1538                 EXTRACHECKS_BUG_ON(u->this_state_unjammed);
1539
1540                 if (u->cmd != NULL) {
1541                         if (u->state == UCMD_STATE_EXECING) {
1542                                 struct scst_user_dev *dev = u->dev;
1543                                 int rc;
1544
1545                                 EXTRACHECKS_BUG_ON(u->jammed);
1546
1547                                 spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1548
1549                                 rc = scst_check_local_events(u->cmd);
1550                                 if (unlikely(rc != 0)) {
1551                                         u->cmd->scst_cmd_done(u->cmd,
1552                                                 SCST_CMD_STATE_DEFAULT);
1553                                         /*
1554                                          * !! At this point cmd & u can be !!
1555                                          * !! already freed                !!
1556                                          */
1557                                         spin_lock_irq(
1558                                                 &dev->cmd_lists.cmd_list_lock);
1559                                         goto again;
1560                                 }
1561
1562                                 spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
1563                         } else if (unlikely(test_bit(SCST_CMD_ABORTED,
1564                                         &u->cmd->cmd_flags))) {
1565                                 switch (u->state) {
1566                                 case UCMD_STATE_PARSING:
1567                                 case UCMD_STATE_BUF_ALLOCING:
1568                                         TRACE_MGMT_DBG("Aborting ucmd %p", u);
1569                                         dev_user_unjam_cmd(u, 0, NULL);
1570                                         goto again;
1571                                 case UCMD_STATE_EXECING:
1572                                         EXTRACHECKS_BUG_ON(1);
1573                                 }
1574                         }
1575                 }
1576                 u->sent_to_user = 1;
1577                 u->seen_by_user = 1;
1578         }
1579         return u;
1580 }
1581
1582 static inline int test_cmd_lists(struct scst_user_dev *dev)
1583 {
1584         int res = !list_empty(&dev->cmd_lists.active_cmd_list) ||
1585                   !list_empty(&dev->ready_cmd_list) ||
1586                   !dev->blocking || dev->cleanup_done ||
1587                   signal_pending(current);
1588         return res;
1589 }
1590
1591 /* Called under cmd_lists.cmd_list_lock and IRQ off */
1592 static int dev_user_get_next_cmd(struct scst_user_dev *dev,
1593         struct scst_user_cmd **ucmd)
1594 {
1595         int res = 0;
1596         wait_queue_t wait;
1597
1598         TRACE_ENTRY();
1599
1600         init_waitqueue_entry(&wait, current);
1601
1602         while (1) {
1603                 if (!test_cmd_lists(dev)) {
1604                         add_wait_queue_exclusive(&dev->cmd_lists.cmd_list_waitQ,
1605                                 &wait);
1606                         for (;;) {
1607                                 set_current_state(TASK_INTERRUPTIBLE);
1608                                 if (test_cmd_lists(dev))
1609                                         break;
1610                                 spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1611                                 schedule();
1612                                 spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
1613                         }
1614                         set_current_state(TASK_RUNNING);
1615                         remove_wait_queue(&dev->cmd_lists.cmd_list_waitQ,
1616                                 &wait);
1617                 }
1618
1619                 dev_user_process_scst_commands(dev);
1620
1621                 *ucmd = __dev_user_get_next_cmd(&dev->ready_cmd_list);
1622                 if (*ucmd != NULL)
1623                         break;
1624
1625                 if (!dev->blocking || dev->cleanup_done) {
1626                         res = -EAGAIN;
1627                         TRACE_DBG("No ready commands, returning %d", res);
1628                         break;
1629                 }
1630
1631                 if (signal_pending(current)) {
1632                         res = -EINTR;
1633                         TRACE_DBG("Signal pending, returning %d", res);
1634                         break;
1635                 }
1636         }
1637
1638         TRACE_EXIT_RES(res);
1639         return res;
1640 }
1641
1642 static int dev_user_reply_get_cmd(struct file *file, unsigned long arg)
1643 {
1644         int res = 0;
1645         struct scst_user_dev *dev;
1646         struct scst_user_get_cmd *cmd;
1647         struct scst_user_reply_cmd *reply;
1648         struct scst_user_cmd *ucmd;
1649         uint64_t ureply;
1650
1651         TRACE_ENTRY();
1652
1653         mutex_lock(&dev_priv_mutex);
1654         dev = (struct scst_user_dev *)file->private_data;
1655         res = dev_user_check_reg(dev);
1656         if (res != 0) {
1657                 mutex_unlock(&dev_priv_mutex);
1658                 goto out;
1659         }
1660         down_read(&dev->dev_rwsem);
1661         mutex_unlock(&dev_priv_mutex);
1662
1663         res = copy_from_user(&ureply, (void *)arg, sizeof(ureply));
1664         if (res < 0)
1665                 goto out_up;
1666
1667         TRACE_DBG("ureply %Ld", (long long unsigned int)ureply);
1668
1669         cmd = kzalloc(max(sizeof(*cmd), sizeof(*reply)), GFP_KERNEL);
1670         if (cmd == NULL) {
1671                 res = -ENOMEM;
1672                 goto out_up;
1673         }
1674
1675         if (ureply != 0) {
1676                 unsigned long u = (unsigned long)ureply;
1677                 reply = (struct scst_user_reply_cmd *)cmd;
1678                 res = copy_from_user(reply, (void *)u, sizeof(*reply));
1679                 if (res < 0)
1680                         goto out_free;
1681
1682                 TRACE_BUFFER("Reply", reply, sizeof(*reply));
1683
1684                 res = dev_user_process_reply(dev, reply);
1685                 if (res < 0)
1686                         goto out_free;
1687         }
1688
1689         spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
1690         res = dev_user_get_next_cmd(dev, &ucmd);
1691         if (res == 0) {
1692                 *cmd = ucmd->user_cmd;
1693                 spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1694                 TRACE_BUFFER("UCMD", cmd, sizeof(*cmd));
1695                 res = copy_to_user((void *)arg, cmd, sizeof(*cmd));
1696         } else
1697                 spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1698
1699 out_free:
1700         kfree(cmd);
1701
1702 out_up:
1703         up_read(&dev->dev_rwsem);
1704
1705 out:
1706         TRACE_EXIT_RES(res);
1707         return res;
1708 }
1709
1710 static long dev_user_ioctl(struct file *file, unsigned int cmd,
1711         unsigned long arg)
1712 {
1713         long res;
1714
1715         TRACE_ENTRY();
1716
1717         switch (cmd) {
1718         case SCST_USER_REPLY_AND_GET_CMD:
1719                 TRACE_DBG("%s", "REPLY_AND_GET_CMD");
1720                 res = dev_user_reply_get_cmd(file, arg);
1721                 break;
1722
1723         case SCST_USER_REPLY_CMD:
1724                 TRACE_DBG("%s", "REPLY_CMD");
1725                 res = dev_user_reply_cmd(file, arg);
1726                 break;
1727
1728         case SCST_USER_REGISTER_DEVICE:
1729         {
1730                 struct scst_user_dev_desc *dev_desc;
1731                 TRACE_DBG("%s", "REGISTER_DEVICE");
1732                 dev_desc = kmalloc(sizeof(*dev_desc), GFP_KERNEL);
1733                 if (dev_desc == NULL) {
1734                         res = -ENOMEM;
1735                         goto out;
1736                 }
1737                 res = copy_from_user(dev_desc, (void *)arg, sizeof(*dev_desc));
1738                 if (res < 0) {
1739                         kfree(dev_desc);
1740                         goto out;
1741                 }
1742                 TRACE_BUFFER("dev_desc", dev_desc, sizeof(*dev_desc));
1743                 dev_desc->name[sizeof(dev_desc->name)-1] = '\0';
1744                 res = dev_user_register_dev(file, dev_desc);
1745                 kfree(dev_desc);
1746                 break;
1747         }
1748
1749         case SCST_USER_SET_OPTIONS:
1750         {
1751                 struct scst_user_opt opt;
1752                 TRACE_DBG("%s", "SET_OPTIONS");
1753                 res = copy_from_user(&opt, (void *)arg, sizeof(opt));
1754                 if (res < 0)
1755                         goto out;
1756                 TRACE_BUFFER("opt", &opt, sizeof(opt));
1757                 res = dev_user_set_opt(file, &opt);
1758                 break;
1759         }
1760
1761         case SCST_USER_GET_OPTIONS:
1762                 TRACE_DBG("%s", "GET_OPTIONS");
1763                 res = dev_user_get_opt(file, (void *)arg);
1764                 break;
1765
1766         default:
1767                 PRINT_ERROR("Invalid ioctl cmd %x", cmd);
1768                 res = -EINVAL;
1769                 goto out;
1770         }
1771
1772 out:
1773         TRACE_EXIT_RES(res);
1774         return res;
1775 }
1776
1777 static unsigned int dev_user_poll(struct file *file, poll_table *wait)
1778 {
1779         int res = 0;
1780         struct scst_user_dev *dev;
1781
1782         TRACE_ENTRY();
1783
1784         mutex_lock(&dev_priv_mutex);
1785         dev = (struct scst_user_dev *)file->private_data;
1786         res = dev_user_check_reg(dev);
1787         if (res != 0) {
1788                 mutex_unlock(&dev_priv_mutex);
1789                 goto out;
1790         }
1791         down_read(&dev->dev_rwsem);
1792         mutex_unlock(&dev_priv_mutex);
1793
1794         spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
1795
1796         if (!list_empty(&dev->ready_cmd_list) ||
1797             !list_empty(&dev->cmd_lists.active_cmd_list)) {
1798                 res |= POLLIN | POLLRDNORM;
1799                 goto out_unlock;
1800         }
1801
1802         spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1803
1804         TRACE_DBG("Before poll_wait() (dev %p)", dev);
1805         poll_wait(file, &dev->cmd_lists.cmd_list_waitQ, wait);
1806         TRACE_DBG("After poll_wait() (dev %p)", dev);
1807
1808         spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
1809
1810         if (!list_empty(&dev->ready_cmd_list) ||
1811             !list_empty(&dev->cmd_lists.active_cmd_list)) {
1812                 res |= POLLIN | POLLRDNORM;
1813                 goto out_unlock;
1814         }
1815
1816 out_unlock:
1817         spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1818
1819         up_read(&dev->dev_rwsem);
1820
1821 out:
1822         TRACE_EXIT_HRES(res);
1823         return res;
1824 }
1825
1826 /*
1827  * Called under cmd_lists.cmd_list_lock, but can drop it inside, then reaquire.
1828  */
1829 static void dev_user_unjam_cmd(struct scst_user_cmd *ucmd, int busy,
1830         unsigned long *flags)
1831 {
1832         int state = ucmd->state;
1833         struct scst_user_dev *dev = ucmd->dev;
1834
1835         TRACE_ENTRY();
1836
1837         if (ucmd->this_state_unjammed)
1838                 goto out;
1839
1840         TRACE_MGMT_DBG("Unjamming ucmd %p (busy %d, state %x)", ucmd, busy,
1841                 state);
1842
1843         ucmd->jammed = 1;
1844         ucmd->this_state_unjammed = 1;
1845         ucmd->sent_to_user = 0;
1846
1847         switch (state) {
1848         case UCMD_STATE_PARSING:
1849         case UCMD_STATE_BUF_ALLOCING:
1850                 if (test_bit(SCST_CMD_ABORTED, &ucmd->cmd->cmd_flags))
1851                         ucmd->aborted = 1;
1852                 else {
1853                         if (busy)
1854                                 scst_set_busy(ucmd->cmd);
1855                         else
1856                                 scst_set_cmd_error(ucmd->cmd,
1857                                         SCST_LOAD_SENSE(scst_sense_hardw_error));
1858                 }
1859
1860                 ucmd->cmd->state = SCST_CMD_STATE_PRE_XMIT_RESP;
1861
1862                 TRACE_MGMT_DBG("Adding ucmd %p to active list", ucmd);
1863                 list_add(&ucmd->cmd->cmd_list_entry,
1864                         &ucmd->cmd->cmd_lists->active_cmd_list);
1865                 wake_up(&ucmd->dev->cmd_lists.cmd_list_waitQ);
1866                 break;
1867
1868         case UCMD_STATE_EXECING:
1869                 if (flags != NULL)
1870                         spin_unlock_irqrestore(&dev->cmd_lists.cmd_list_lock, *flags);
1871                 else
1872                         spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1873
1874                 TRACE_MGMT_DBG("EXEC: unjamming ucmd %p", ucmd);
1875
1876                 if (test_bit(SCST_CMD_ABORTED,  &ucmd->cmd->cmd_flags))
1877                         ucmd->aborted = 1;
1878                 else {
1879                         if (busy)
1880                                 scst_set_busy(ucmd->cmd);
1881                         else
1882                                 scst_set_cmd_error(ucmd->cmd,
1883                                         SCST_LOAD_SENSE(scst_sense_hardw_error));
1884                 }
1885
1886                 ucmd->cmd->scst_cmd_done(ucmd->cmd, SCST_CMD_STATE_DEFAULT);
1887                 /* !! At this point cmd and ucmd can be already freed !! */
1888
1889                 if (flags != NULL)
1890                         spin_lock_irqsave(&dev->cmd_lists.cmd_list_lock, *flags);
1891                 else
1892                         spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
1893                 break;
1894
1895         case UCMD_STATE_ON_FREEING:
1896         case UCMD_STATE_ON_CACHE_FREEING:
1897         case UCMD_STATE_TM_EXECING:
1898         case UCMD_STATE_ATTACH_SESS:
1899         case UCMD_STATE_DETACH_SESS:
1900         {
1901                 if (flags != NULL)
1902                         spin_unlock_irqrestore(&dev->cmd_lists.cmd_list_lock, *flags);
1903                 else
1904                         spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1905
1906                 switch (state) {
1907                 case UCMD_STATE_ON_FREEING:
1908                         dev_user_process_reply_on_free(ucmd);
1909                         break;
1910
1911                 case UCMD_STATE_ON_CACHE_FREEING:
1912                         dev_user_process_reply_on_cache_free(ucmd);
1913                         break;
1914
1915                 case UCMD_STATE_TM_EXECING:
1916                         dev_user_process_reply_tm_exec(ucmd, SCST_MGMT_STATUS_FAILED);
1917                         break;
1918
1919                 case UCMD_STATE_ATTACH_SESS:
1920                 case UCMD_STATE_DETACH_SESS:
1921                         dev_user_process_reply_sess(ucmd, -EFAULT);
1922                         break;
1923                 }
1924
1925                 if (flags != NULL)
1926                         spin_lock_irqsave(&dev->cmd_lists.cmd_list_lock, *flags);
1927                 else
1928                         spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
1929                 break;
1930         }
1931
1932         default:
1933                 PRINT_CRIT_ERROR("Wrong ucmd state %x", state);
1934                 sBUG();
1935                 break;
1936         }
1937
1938 out:
1939         TRACE_EXIT();
1940         return;
1941 }
1942
1943 static void dev_user_unjam_dev(struct scst_user_dev *dev)
1944 {
1945         int i;
1946         unsigned long flags;
1947         struct scst_user_cmd *ucmd;
1948
1949         TRACE_ENTRY();
1950
1951         TRACE_MGMT_DBG("Unjamming dev %p", dev);
1952
1953         spin_lock_irqsave(&dev->cmd_lists.cmd_list_lock, flags);
1954
1955 repeat:
1956         for (i = 0; i < (int)ARRAY_SIZE(dev->ucmd_hash); i++) {
1957                 struct list_head *head = &dev->ucmd_hash[i];
1958                 bool repeat = false;
1959
1960                 list_for_each_entry(ucmd, head, hash_list_entry) {
1961                         if (ucmd_get_check(ucmd))
1962                                 continue;
1963
1964                         TRACE_DBG("ucmd %p, state %x, scst_cmd %p",
1965                                 ucmd, ucmd->state, ucmd->cmd);
1966
1967                         if (ucmd->sent_to_user) {
1968                                 dev_user_unjam_cmd(ucmd, 0, &flags);
1969                                 repeat = true;
1970                         }
1971
1972                         ucmd_put(ucmd);
1973
1974                         if (repeat)
1975                                 goto repeat;
1976                 }
1977         }
1978
1979         if (dev_user_process_scst_commands(dev) != 0)
1980                 goto repeat;
1981
1982         spin_unlock_irqrestore(&dev->cmd_lists.cmd_list_lock, flags);
1983
1984         TRACE_EXIT();
1985         return;
1986 }
1987
1988 static int dev_user_process_reply_tm_exec(struct scst_user_cmd *ucmd,
1989         int status)
1990 {
1991         int res = 0;
1992
1993         TRACE_ENTRY();
1994
1995         TRACE_MGMT_DBG("TM reply (ucmd %p, fn %d, status %d)", ucmd,
1996                 ucmd->user_cmd.tm_cmd.fn, status);
1997
1998         if (status == SCST_MGMT_STATUS_TASK_NOT_EXIST) {
1999                 /*
2000                  * It is possible that user space seen TM cmd before cmd
2001                  * to abort or will never see it at all, because it was
2002                  * aborted on the way there. So, it is safe to return
2003                  * success instead, because, if there is the TM cmd at this
2004                  * point, then the cmd to abort apparrently does exist.
2005                  */
2006                 status = SCST_MGMT_STATUS_SUCCESS;
2007         }
2008
2009         scst_async_mcmd_completed(ucmd->mcmd, status);
2010
2011         ucmd_put(ucmd);
2012
2013         TRACE_EXIT_RES(res);
2014         return res;
2015 }
2016
2017 static void dev_user_abort_ready_commands(struct scst_user_dev *dev)
2018 {
2019         struct scst_user_cmd *ucmd;
2020         unsigned long flags;
2021
2022         TRACE_ENTRY();
2023
2024         spin_lock_irqsave(&dev->cmd_lists.cmd_list_lock, flags);
2025 again:
2026         list_for_each_entry(ucmd, &dev->ready_cmd_list, ready_cmd_list_entry) {
2027                 if ((ucmd->cmd != NULL) && !ucmd->seen_by_user &&
2028                     test_bit(SCST_CMD_ABORTED, &ucmd->cmd->cmd_flags)) {
2029                         switch (ucmd->state) {
2030                         case UCMD_STATE_PARSING:
2031                         case UCMD_STATE_BUF_ALLOCING:
2032                         case UCMD_STATE_EXECING:
2033                                 TRACE_MGMT_DBG("Aborting ready ucmd %p", ucmd);
2034                                 list_del(&ucmd->ready_cmd_list_entry);
2035                                 dev_user_unjam_cmd(ucmd, 0, &flags);
2036                                 goto again;
2037                         }
2038                 }
2039         }
2040
2041         spin_unlock_irqrestore(&dev->cmd_lists.cmd_list_lock, flags);
2042
2043         TRACE_EXIT();
2044         return;
2045 }
2046
2047 static int dev_user_task_mgmt_fn(struct scst_mgmt_cmd *mcmd,
2048         struct scst_tgt_dev *tgt_dev)
2049 {
2050         struct scst_user_cmd *ucmd;
2051         struct scst_user_dev *dev = (struct scst_user_dev *)tgt_dev->dev->dh_priv;
2052         struct scst_user_cmd *ucmd_to_abort = NULL;
2053
2054         TRACE_ENTRY();
2055
2056         /*
2057          * In the used approach we don't do anything with hung devices, which
2058          * stopped responding and/or have stuck commands. We forcedly abort such
2059          * commands only if they not yet sent to the user space or if the device
2060          * is getting unloaded, e.g. if its handler program gets killed. This is
2061          * because it's pretty hard to distinguish between stuck and temporary
2062          * overloaded states of the device. There are several reasons for that:
2063          *
2064          * 1. Some commands need a lot of time to complete (several hours),
2065          *    so for an impatient user such command(s) will always look as
2066          *    stuck.
2067          *
2068          * 2. If we forcedly abort, i.e. abort before it's actually completed
2069          *    in the user space, just one command, we will have to put the whole
2070          *    device offline until we are sure that no more previously aborted
2071          *    commands will get executed. Otherwise, we might have a possibility
2072          *    for data corruption, when aborted and reported as completed
2073          *    command actually gets executed *after* new commands sent
2074          *    after the force abort was done. Many journaling file systems and
2075          *    databases use "provide required commands order via queue draining"
2076          *    approach and not putting the whole device offline after the forced
2077          *    abort will break it. This makes our decision, if a command stuck
2078          *    or not, cost a lot.
2079          *
2080          * So, we leave policy definition if a device stuck or not to
2081          * the user space and simply let all commands live until they are
2082          * completed or their devices get closed/killed. This approach is very
2083          * much OK, but can affect management commands, which need activity
2084          * suspending via scst_suspend_activity() function such as devices or
2085          * targets registration/removal. But during normal life such commands
2086          * should be rare. Plus, when possible, scst_suspend_activity() will
2087          * return after timeout EBUSY status to allow caller to not stuck
2088          * forever as well.
2089          *
2090          * But, anyway, ToDo, we should reimplement that in the SCST core, so
2091          * stuck commands would affect only related devices.
2092          */
2093
2094         dev_user_abort_ready_commands(dev);
2095
2096         /* We can't afford missing TM command due to memory shortage */
2097         ucmd = dev_user_alloc_ucmd(dev, GFP_KERNEL|__GFP_NOFAIL);
2098
2099         ucmd->user_cmd.cmd_h = ucmd->h;
2100         ucmd->user_cmd.subcode = SCST_USER_TASK_MGMT;
2101         ucmd->user_cmd.tm_cmd.sess_h = (unsigned long)tgt_dev;
2102         ucmd->user_cmd.tm_cmd.fn = mcmd->fn;
2103         ucmd->user_cmd.tm_cmd.cmd_sn = mcmd->cmd_sn;
2104         ucmd->user_cmd.tm_cmd.cmd_sn_set = mcmd->cmd_sn_set;
2105
2106         if (mcmd->cmd_to_abort != NULL) {
2107                 ucmd_to_abort = (struct scst_user_cmd *)mcmd->cmd_to_abort->dh_priv;
2108                 if (ucmd_to_abort != NULL)
2109                         ucmd->user_cmd.tm_cmd.cmd_h_to_abort = ucmd_to_abort->h;
2110         }
2111
2112         TRACE_MGMT_DBG("Preparing TM ucmd %p (h %d, fn %d, cmd_to_abort %p, "
2113                 "ucmd_to_abort %p, cmd_h_to_abort %d)", ucmd, ucmd->h,
2114                 mcmd->fn, mcmd->cmd_to_abort, ucmd_to_abort,
2115                 ucmd->user_cmd.tm_cmd.cmd_h_to_abort);
2116
2117         ucmd->mcmd = mcmd;
2118         ucmd->state = UCMD_STATE_TM_EXECING;
2119
2120         scst_prepare_async_mcmd(mcmd);
2121
2122         dev_user_add_to_ready(ucmd);
2123
2124         TRACE_EXIT();
2125         return SCST_DEV_TM_NOT_COMPLETED;
2126 }
2127
2128 static int dev_user_attach(struct scst_device *sdev)
2129 {
2130         int res = 0;
2131         struct scst_user_dev *dev = NULL, *d;
2132
2133         TRACE_ENTRY();
2134
2135         spin_lock(&dev_list_lock);
2136         list_for_each_entry(d, &dev_list, dev_list_entry) {
2137                 if (strcmp(d->name, sdev->virt_name) == 0) {
2138                         dev = d;
2139                         break;
2140                 }
2141         }
2142         spin_unlock(&dev_list_lock);
2143         if (dev == NULL) {
2144                 PRINT_ERROR("Device %s not found", sdev->virt_name);
2145                 res = -EINVAL;
2146                 goto out;
2147         }
2148
2149         sdev->p_cmd_lists = &dev->cmd_lists;
2150         sdev->dh_priv = dev;
2151         sdev->tst = dev->tst;
2152         sdev->queue_alg = dev->queue_alg;
2153         sdev->swp = dev->swp;
2154         sdev->tas = dev->tas;
2155         sdev->has_own_order_mgmt = dev->has_own_order_mgmt;
2156
2157         dev->sdev = sdev;
2158
2159         PRINT_INFO("Attached user space SCSI target virtual device \"%s\"",
2160                 dev->name);
2161
2162 out:
2163         TRACE_EXIT();
2164         return res;
2165 }
2166
2167 static void dev_user_detach(struct scst_device *sdev)
2168 {
2169         struct scst_user_dev *dev = (struct scst_user_dev *)sdev->dh_priv;
2170
2171         TRACE_ENTRY();
2172
2173         TRACE_DBG("virt_id %d", sdev->virt_id);
2174
2175         PRINT_INFO("Detached user space SCSI target virtual device \"%s\"",
2176                 dev->name);
2177
2178         /* dev will be freed by the caller */
2179         sdev->dh_priv = NULL;
2180         dev->sdev = NULL;
2181
2182         TRACE_EXIT();
2183         return;
2184 }
2185
2186 static int dev_user_process_reply_sess(struct scst_user_cmd *ucmd, int status)
2187 {
2188         int res = 0;
2189         unsigned long flags;
2190
2191         TRACE_ENTRY();
2192
2193         TRACE_MGMT_DBG("ucmd %p, cmpl %p, status %d", ucmd, ucmd->cmpl, status);
2194
2195         spin_lock_irqsave(&ucmd->dev->cmd_lists.cmd_list_lock, flags);
2196
2197         if (ucmd->state == UCMD_STATE_ATTACH_SESS) {
2198                 TRACE_MGMT_DBG("%s", "ATTACH_SESS finished");
2199                 ucmd->result = status;
2200         } else if (ucmd->state == UCMD_STATE_DETACH_SESS) {
2201                 TRACE_MGMT_DBG("%s", "DETACH_SESS finished");
2202         } else
2203                 sBUG();
2204
2205         if (ucmd->cmpl != NULL)
2206                 complete_all(ucmd->cmpl);
2207
2208         spin_unlock_irqrestore(&ucmd->dev->cmd_lists.cmd_list_lock, flags);
2209
2210         ucmd_put(ucmd);
2211
2212         TRACE_EXIT_RES(res);
2213         return res;
2214 }
2215
2216 static int dev_user_attach_tgt(struct scst_tgt_dev *tgt_dev)
2217 {
2218         struct scst_user_dev *dev =
2219                 (struct scst_user_dev *)tgt_dev->dev->dh_priv;
2220         int res = 0, rc;
2221         struct scst_user_cmd *ucmd;
2222
2223         TRACE_ENTRY();
2224
2225         ucmd = dev_user_alloc_ucmd(dev, GFP_KERNEL);
2226         if (ucmd == NULL)
2227                 goto out_nomem;
2228
2229         ucmd->cmpl = kmalloc(sizeof(*ucmd->cmpl), GFP_KERNEL);
2230         if (ucmd->cmpl == NULL)
2231                 goto out_put_nomem;
2232
2233         init_completion(ucmd->cmpl);
2234
2235         ucmd->user_cmd.cmd_h = ucmd->h;
2236         ucmd->user_cmd.subcode = SCST_USER_ATTACH_SESS;
2237         ucmd->user_cmd.sess.sess_h = (unsigned long)tgt_dev;
2238         ucmd->user_cmd.sess.lun = (uint64_t)tgt_dev->lun;
2239         ucmd->user_cmd.sess.threads_num = tgt_dev->sess->tgt->tgtt->threads_num;
2240         ucmd->user_cmd.sess.rd_only = tgt_dev->acg_dev->rd_only_flag;
2241         strncpy(ucmd->user_cmd.sess.initiator_name,
2242                 tgt_dev->sess->initiator_name,
2243                 sizeof(ucmd->user_cmd.sess.initiator_name)-1);
2244         ucmd->user_cmd.sess.initiator_name[
2245                 sizeof(ucmd->user_cmd.sess.initiator_name)-1] = '\0';
2246
2247         TRACE_MGMT_DBG("Preparing ATTACH_SESS %p (h %d, sess_h %Lx, LUN %Lx, "
2248                 "threads_num %d, rd_only_flag %d, initiator %s)", ucmd, ucmd->h,
2249                 ucmd->user_cmd.sess.sess_h, ucmd->user_cmd.sess.lun,
2250                 ucmd->user_cmd.sess.threads_num, ucmd->user_cmd.sess.rd_only,
2251                 ucmd->user_cmd.sess.initiator_name);
2252
2253         ucmd->state = UCMD_STATE_ATTACH_SESS;
2254
2255         ucmd_get(ucmd);
2256
2257         dev_user_add_to_ready(ucmd);
2258
2259         rc = wait_for_completion_timeout(ucmd->cmpl, DEV_USER_ATTACH_TIMEOUT);
2260         if (rc > 0)
2261                 res = ucmd->result;
2262         else {
2263                 PRINT_ERROR("%s", "ATTACH_SESS command timeout");
2264                 res = -EFAULT;
2265         }
2266
2267         sBUG_ON(irqs_disabled());
2268
2269         spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
2270         kfree(ucmd->cmpl);
2271         ucmd->cmpl = NULL;
2272         spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
2273
2274         ucmd_put(ucmd);
2275
2276 out:
2277         TRACE_EXIT_RES(res);
2278         return res;
2279
2280 out_put_nomem:
2281         ucmd_put(ucmd);
2282
2283 out_nomem:
2284         res = -ENOMEM;
2285         goto out;
2286 }
2287
2288 static void dev_user_detach_tgt(struct scst_tgt_dev *tgt_dev)
2289 {
2290         struct scst_user_dev *dev =
2291                 (struct scst_user_dev *)tgt_dev->dev->dh_priv;
2292         struct scst_user_cmd *ucmd;
2293
2294         TRACE_ENTRY();
2295
2296         /*
2297          * We can't miss TM command due to memory shortage, because it might
2298          * lead to a memory leak in the user space handler.
2299          */
2300         ucmd = dev_user_alloc_ucmd(dev, GFP_KERNEL|__GFP_NOFAIL);
2301         if (ucmd == NULL)
2302                 goto out;
2303
2304         TRACE_MGMT_DBG("Preparing DETACH_SESS %p (h %d, sess_h %Lx)", ucmd,
2305                 ucmd->h, ucmd->user_cmd.sess.sess_h);
2306
2307         ucmd->user_cmd.cmd_h = ucmd->h;
2308         ucmd->user_cmd.subcode = SCST_USER_DETACH_SESS;
2309         ucmd->user_cmd.sess.sess_h = (unsigned long)tgt_dev;
2310
2311         ucmd->state = UCMD_STATE_DETACH_SESS;
2312
2313         dev_user_add_to_ready(ucmd);
2314
2315 out:
2316         TRACE_EXIT();
2317         return;
2318 }
2319
2320 /* No locks are needed, but the activity must be suspended */
2321 static void dev_user_setup_functions(struct scst_user_dev *dev)
2322 {
2323         TRACE_ENTRY();
2324
2325         dev->devtype.parse = dev_user_parse;
2326         dev->devtype.dev_done = NULL;
2327
2328         if (dev->parse_type != SCST_USER_PARSE_CALL) {
2329                 switch (dev->devtype.type) {
2330                 case TYPE_DISK:
2331                         dev->generic_parse = scst_sbc_generic_parse;
2332                         dev->devtype.dev_done = dev_user_disk_done;
2333                         break;
2334
2335                 case TYPE_TAPE:
2336                         dev->generic_parse = scst_tape_generic_parse;
2337                         dev->devtype.dev_done = dev_user_tape_done;
2338                         break;
2339
2340                 case TYPE_MOD:
2341                         dev->generic_parse = scst_modisk_generic_parse;
2342                         dev->devtype.dev_done = dev_user_disk_done;
2343                         break;
2344
2345                 case TYPE_ROM:
2346                         dev->generic_parse = scst_cdrom_generic_parse;
2347                         dev->devtype.dev_done = dev_user_disk_done;
2348                         break;
2349
2350                 case TYPE_MEDIUM_CHANGER:
2351                         dev->generic_parse = scst_changer_generic_parse;
2352                         break;
2353
2354                 case TYPE_PROCESSOR:
2355                         dev->generic_parse = scst_processor_generic_parse;
2356                         break;
2357
2358                 case TYPE_RAID:
2359                         dev->generic_parse = scst_raid_generic_parse;
2360                         break;
2361
2362                 default:
2363                         PRINT_INFO("Unknown SCSI type %x, using PARSE_CALL "
2364                                 "for it", dev->devtype.type);
2365                         dev->parse_type = SCST_USER_PARSE_CALL;
2366                         break;
2367                 }
2368         } else {
2369                 dev->generic_parse = NULL;
2370                 dev->devtype.dev_done = NULL;
2371         }
2372
2373         TRACE_EXIT();
2374         return;
2375 }
2376
2377 static int dev_user_check_version(const struct scst_user_dev_desc *dev_desc)
2378 {
2379         char ver[sizeof(DEV_USER_VERSION)+1];
2380         int res;
2381
2382         res = copy_from_user(ver, (void *)(unsigned long)dev_desc->version_str,
2383                                 sizeof(ver));
2384         if (res < 0) {
2385                 PRINT_ERROR("%s", "Unable to get version string");
2386                 goto out;
2387         }
2388         ver[sizeof(ver)-1] = '\0';
2389
2390         if (strcmp(ver, DEV_USER_VERSION) != 0) {
2391                 /* ->name already 0-terminated in dev_user_ioctl() */
2392                 PRINT_ERROR("Incorrect version of user device %s (%s)",
2393                         dev_desc->name, ver);
2394                 res = -EINVAL;
2395                 goto out;
2396         }
2397
2398 out:
2399         return res;
2400 }
2401
2402 static int dev_user_register_dev(struct file *file,
2403         const struct scst_user_dev_desc *dev_desc)
2404 {
2405         int res = -ENOMEM, i;
2406         struct scst_user_dev *dev, *d;
2407         int block;
2408
2409         TRACE_ENTRY();
2410
2411         res = dev_user_check_version(dev_desc);
2412         if (res != 0)
2413                 goto out;
2414
2415         switch (dev_desc->type) {
2416         case TYPE_DISK:
2417         case TYPE_ROM:
2418         case TYPE_MOD:
2419                 if (dev_desc->block_size == 0) {
2420                         PRINT_ERROR("Wrong block size %d", dev_desc->block_size);
2421                         res = -EINVAL;
2422                         goto out;
2423                 }
2424                 block = scst_calc_block_shift(dev_desc->block_size);
2425                 if (block == -1) {
2426                         res = -EINVAL;
2427                         goto out;
2428                 }
2429                 break;
2430         default:
2431                 block = dev_desc->block_size;
2432                 break;
2433         }
2434
2435         if (!try_module_get(THIS_MODULE)) {
2436                 PRINT_ERROR("%s", "Fail to get module");
2437                 goto out;
2438         }
2439
2440         dev = kzalloc(sizeof(*dev), GFP_KERNEL);
2441         if (dev == NULL)
2442                 goto out_put;
2443
2444         init_rwsem(&dev->dev_rwsem);
2445         spin_lock_init(&dev->cmd_lists.cmd_list_lock);
2446         INIT_LIST_HEAD(&dev->cmd_lists.active_cmd_list);
2447         init_waitqueue_head(&dev->cmd_lists.cmd_list_waitQ);
2448         INIT_LIST_HEAD(&dev->ready_cmd_list);
2449         if (file->f_flags & O_NONBLOCK) {
2450                 TRACE_DBG("%s", "Non-blocking operations");
2451                 dev->blocking = 0;
2452         } else
2453                 dev->blocking = 1;
2454         for (i = 0; i < (int)ARRAY_SIZE(dev->ucmd_hash); i++)
2455                 INIT_LIST_HEAD(&dev->ucmd_hash[i]);
2456
2457         strncpy(dev->name, dev_desc->name, sizeof(dev->name)-1);
2458         dev->name[sizeof(dev->name)-1] = '\0';
2459
2460         /*
2461          * We don't use clustered pool, since it implies pages reordering,
2462          * which isn't possible with user space supplied buffers. Although
2463          * it's still possible to cluster pages by the tail of each other,
2464          * seems it doesn't worth the effort.
2465          */
2466         dev->pool = sgv_pool_create(dev->name, 0);
2467         if (dev->pool == NULL)
2468                 goto out_put;
2469         sgv_pool_set_allocator(dev->pool, dev_user_alloc_pages,
2470                 dev_user_free_sg_entries);
2471
2472         scnprintf(dev->devtype.name, sizeof(dev->devtype.name), "dh-%s",
2473                 dev->name);
2474         dev->devtype.type = dev_desc->type;
2475         dev->devtype.threads_num = -1;
2476         dev->devtype.parse_atomic = 1;
2477         dev->devtype.exec_atomic = 0; /* no point to make it 1 */
2478         dev->devtype.dev_done_atomic = 1;
2479         dev->devtype.no_proc = 1;
2480         dev->devtype.attach = dev_user_attach;
2481         dev->devtype.detach = dev_user_detach;
2482         dev->devtype.attach_tgt = dev_user_attach_tgt;
2483         dev->devtype.detach_tgt = dev_user_detach_tgt;
2484         dev->devtype.exec = dev_user_exec;
2485         dev->devtype.on_free_cmd = dev_user_on_free_cmd;
2486         dev->devtype.task_mgmt_fn = dev_user_task_mgmt_fn;
2487
2488         init_completion(&dev->cleanup_cmpl);
2489         dev->block = block;
2490         dev->def_block = block;
2491
2492         res = __dev_user_set_opt(dev, &dev_desc->opt);
2493
2494         TRACE_MEM("dev %p, name %s", dev, dev->name);
2495
2496         spin_lock(&dev_list_lock);
2497
2498         list_for_each_entry(d, &dev_list, dev_list_entry) {
2499                 if (strcmp(d->name, dev->name) == 0) {
2500                         PRINT_ERROR("Device %s already exist",
2501                                 dev->name);
2502                         res = -EEXIST;
2503                         spin_unlock(&dev_list_lock);
2504                         goto out_free;
2505                 }
2506         }
2507
2508         list_add_tail(&dev->dev_list_entry, &dev_list);
2509
2510         spin_unlock(&dev_list_lock);
2511
2512         if (res != 0)
2513                 goto out_del_free;
2514
2515         res = scst_register_virtual_dev_driver(&dev->devtype);
2516         if (res < 0)
2517                 goto out_del_free;
2518
2519         dev->virt_id = scst_register_virtual_device(&dev->devtype, dev->name);
2520         if (dev->virt_id < 0) {
2521                 res = dev->virt_id;
2522                 goto out_unreg_handler;
2523         }
2524
2525         mutex_lock(&dev_priv_mutex);
2526         if (file->private_data != NULL) {
2527                 mutex_unlock(&dev_priv_mutex);
2528                 PRINT_ERROR("%s", "Device already registered");
2529                 res = -EINVAL;
2530                 goto out_unreg_drv;
2531         }
2532         file->private_data = dev;
2533         mutex_unlock(&dev_priv_mutex);
2534
2535 out:
2536         TRACE_EXIT_RES(res);
2537         return res;
2538
2539 out_unreg_drv:
2540         scst_unregister_virtual_device(dev->virt_id);
2541
2542 out_unreg_handler:
2543         scst_unregister_virtual_dev_driver(&dev->devtype);
2544
2545 out_del_free:
2546         spin_lock(&dev_list_lock);
2547         list_del(&dev->dev_list_entry);
2548         spin_unlock(&dev_list_lock);
2549
2550 out_free:
2551         sgv_pool_destroy(dev->pool);
2552         kfree(dev);
2553         goto out_put;
2554
2555 out_put:
2556         module_put(THIS_MODULE);
2557         goto out;
2558 }
2559
2560 static int __dev_user_set_opt(struct scst_user_dev *dev,
2561         const struct scst_user_opt *opt)
2562 {
2563         int res = 0;
2564
2565         TRACE_ENTRY();
2566
2567         TRACE_DBG("parse_type %x, on_free_cmd_type %x, memory_reuse_type %x, "
2568                 "partial_transfers_type %x, partial_len %d", opt->parse_type,
2569                 opt->on_free_cmd_type, opt->memory_reuse_type,
2570                 opt->partial_transfers_type, opt->partial_len);
2571
2572         if ((opt->parse_type > SCST_USER_MAX_PARSE_OPT) ||
2573             (opt->on_free_cmd_type > SCST_USER_MAX_ON_FREE_CMD_OPT) ||
2574             (opt->memory_reuse_type > SCST_USER_MAX_MEM_REUSE_OPT) ||
2575             (opt->partial_transfers_type > SCST_USER_MAX_PARTIAL_TRANSFERS_OPT)) {
2576                 PRINT_ERROR("%s", "Invalid option");
2577                 res = -EINVAL;
2578                 goto out;
2579         }
2580
2581         if (((opt->tst != SCST_CONTR_MODE_ONE_TASK_SET) &&
2582              (opt->tst != SCST_CONTR_MODE_SEP_TASK_SETS)) ||
2583             ((opt->queue_alg != SCST_CONTR_MODE_QUEUE_ALG_RESTRICTED_REORDER) &&
2584              (opt->queue_alg != SCST_CONTR_MODE_QUEUE_ALG_UNRESTRICTED_REORDER)) ||
2585             (opt->swp > 1) || (opt->tas > 1) || (opt->has_own_order_mgmt > 1)) {
2586                 PRINT_ERROR("Invalid SCSI option (tst %x, queue_alg %x, swp %x, "
2587                         "tas %x, has_own_order_mgmt %x)", opt->tst,
2588                         opt->queue_alg, opt->swp, opt->tas, opt->has_own_order_mgmt);
2589                 res = -EINVAL;
2590                 goto out;
2591         }
2592
2593         dev->parse_type = opt->parse_type;
2594         dev->on_free_cmd_type = opt->on_free_cmd_type;
2595         dev->memory_reuse_type = opt->memory_reuse_type;
2596         dev->partial_transfers_type = opt->partial_transfers_type;
2597         dev->partial_len = opt->partial_len;
2598
2599         dev->tst = opt->tst;
2600         dev->queue_alg = opt->queue_alg;
2601         dev->swp = opt->swp;
2602         dev->tas = opt->tas;
2603         dev->has_own_order_mgmt = opt->has_own_order_mgmt;
2604         if (dev->sdev != NULL) {
2605                 dev->sdev->tst = opt->tst;
2606                 dev->sdev->queue_alg = opt->queue_alg;
2607                 dev->sdev->swp = opt->swp;
2608                 dev->sdev->tas = opt->tas;
2609                 dev->sdev->has_own_order_mgmt = opt->has_own_order_mgmt;
2610         }
2611
2612         dev_user_setup_functions(dev);
2613
2614 out:
2615         TRACE_EXIT_RES(res);
2616         return res;
2617 }
2618
2619 static int dev_user_set_opt(struct file *file, const struct scst_user_opt *opt)
2620 {
2621         int res = 0;
2622         struct scst_user_dev *dev;
2623
2624         TRACE_ENTRY();
2625
2626         mutex_lock(&dev_priv_mutex);
2627         dev = (struct scst_user_dev *)file->private_data;
2628         res = dev_user_check_reg(dev);
2629         if (res != 0) {
2630                 mutex_unlock(&dev_priv_mutex);
2631                 goto out;
2632         }
2633         down_write(&dev->dev_rwsem);
2634         mutex_unlock(&dev_priv_mutex);
2635
2636         res = scst_suspend_activity(true);
2637         if (res != 0)
2638                 goto out;
2639
2640         res = __dev_user_set_opt(dev, opt);
2641
2642         scst_resume_activity();
2643
2644         up_write(&dev->dev_rwsem);
2645
2646 out:
2647         TRACE_EXIT_RES(res);
2648         return res;
2649 }
2650
2651 static int dev_user_get_opt(struct file *file, void *arg)
2652 {
2653         int res = 0;
2654         struct scst_user_dev *dev;
2655         struct scst_user_opt opt;
2656
2657         TRACE_ENTRY();
2658
2659         mutex_lock(&dev_priv_mutex);
2660         dev = (struct scst_user_dev *)file->private_data;
2661         res = dev_user_check_reg(dev);
2662         if (res != 0) {
2663                 mutex_unlock(&dev_priv_mutex);
2664                 goto out;
2665         }
2666         down_read(&dev->dev_rwsem);
2667         mutex_unlock(&dev_priv_mutex);
2668
2669         opt.parse_type = dev->parse_type;
2670         opt.on_free_cmd_type = dev->on_free_cmd_type;
2671         opt.memory_reuse_type = dev->memory_reuse_type;
2672         opt.partial_transfers_type = dev->partial_transfers_type;
2673         opt.partial_len = dev->partial_len;
2674         opt.tst = dev->tst;
2675         opt.queue_alg = dev->queue_alg;
2676         opt.tas = dev->tas;
2677         opt.swp = dev->swp;
2678         opt.has_own_order_mgmt = dev->has_own_order_mgmt;
2679
2680         TRACE_DBG("parse_type %x, on_free_cmd_type %x, memory_reuse_type %x, "
2681                 "partial_transfers_type %x, partial_len %d", opt.parse_type,
2682                 opt.on_free_cmd_type, opt.memory_reuse_type,
2683                 opt.partial_transfers_type, opt.partial_len);
2684
2685         res = copy_to_user(arg, &opt, sizeof(opt));
2686
2687         up_read(&dev->dev_rwsem);
2688 out:
2689         TRACE_EXIT_RES(res);
2690         return res;
2691 }
2692
2693 static int dev_usr_parse(struct scst_cmd *cmd)
2694 {
2695         sBUG();
2696         return SCST_CMD_STATE_DEFAULT;
2697 }
2698
2699 /* Needed only for /proc support */
2700 #define USR_TYPE {                      \
2701         .name =         DEV_USER_NAME,  \
2702         .type =         -1,             \
2703         .parse =        dev_usr_parse,  \
2704 }
2705
2706 static struct scst_dev_type dev_user_devtype = USR_TYPE;
2707
2708 static int dev_user_release(struct inode *inode, struct file *file)
2709 {
2710         int res = 0;
2711         struct scst_user_dev *dev;
2712
2713         TRACE_ENTRY();
2714
2715         mutex_lock(&dev_priv_mutex);
2716         dev = (struct scst_user_dev *)file->private_data;
2717         if (dev == NULL) {
2718                 mutex_unlock(&dev_priv_mutex);
2719                 goto out;
2720         }
2721         file->private_data = NULL;
2722
2723         TRACE(TRACE_MGMT, "Releasing dev %s", dev->name);
2724
2725         spin_lock(&dev_list_lock);
2726         list_del(&dev->dev_list_entry);
2727         spin_unlock(&dev_list_lock);
2728
2729         mutex_unlock(&dev_priv_mutex);
2730
2731         down_write(&dev->dev_rwsem);
2732
2733         spin_lock_irq(&cleanup_lock);
2734
2735         dev->cleaning = 1;
2736         smp_mb(); /* pair to dev_user_add_to_ready() */
2737
2738         sBUG_ON(dev->in_cleanup_list);
2739         list_add_tail(&dev->cleanup_list_entry, &cleanup_list);
2740         dev->in_cleanup_list = 1;
2741
2742         spin_unlock_irq(&cleanup_lock);
2743
2744         wake_up(&cleanup_list_waitQ);
2745         wake_up(&dev->cmd_lists.cmd_list_waitQ);
2746
2747         scst_unregister_virtual_device(dev->virt_id);
2748         scst_unregister_virtual_dev_driver(&dev->devtype);
2749
2750         sgv_pool_destroy(dev->pool);
2751
2752         TRACE_DBG("Unregistering finished (dev %p)", dev);
2753
2754         dev->cleanup_done = 1;
2755         smp_mb(); /* just in case */
2756
2757         spin_lock_irq(&cleanup_lock);
2758         if (!dev->in_cleanup_list) {
2759                 list_add_tail(&dev->cleanup_list_entry, &cleanup_list);
2760                 dev->in_cleanup_list = 1;
2761         }
2762         spin_unlock_irq(&cleanup_lock);
2763
2764         wake_up(&cleanup_list_waitQ);
2765         wake_up(&dev->cmd_lists.cmd_list_waitQ);
2766
2767         wait_for_completion(&dev->cleanup_cmpl);
2768
2769         up_write(&dev->dev_rwsem); /* to make the debug check happy */
2770
2771         TRACE_DBG("Releasing completed (dev %p)", dev);
2772
2773         kfree(dev);
2774
2775         module_put(THIS_MODULE);
2776
2777 out:
2778         TRACE_EXIT_RES(res);
2779         return res;
2780 }
2781
2782 static void dev_user_process_cleanup(struct scst_user_dev *dev)
2783 {
2784         struct scst_user_cmd *ucmd;
2785         int rc;
2786
2787         TRACE_ENTRY();
2788
2789         dev->blocking = 0;
2790
2791         while (1) {
2792                 TRACE_DBG("Cleanuping dev %p", dev);
2793
2794                 dev_user_unjam_dev(dev);
2795
2796                 spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
2797                 smp_mb(); /* just in case, pair for dev_user_release()
2798                            * cleanup_done assignment.
2799                            */
2800                 rc = dev_user_get_next_cmd(dev, &ucmd);
2801                 if (rc == 0)
2802                         dev_user_unjam_cmd(ucmd, 1, NULL);
2803                 spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
2804
2805                 if (rc == -EAGAIN) {
2806                         if (dev->cleanup_done)
2807                                 break;
2808                         else {
2809                                 TRACE_DBG("No more commands (dev %p)", dev);
2810                                 goto out;
2811                         }
2812                 }
2813         }
2814
2815 #ifdef EXTRACHECKS
2816 {
2817         int i;
2818         for (i = 0; i < (int)ARRAY_SIZE(dev->ucmd_hash); i++) {
2819                 struct list_head *head = &dev->ucmd_hash[i];
2820                 struct scst_user_cmd *ucmd;
2821 again:
2822                 list_for_each_entry(ucmd, head, hash_list_entry) {
2823                         PRINT_ERROR("Lost ucmd %p (state %x, ref %d)", ucmd,
2824                                 ucmd->state, atomic_read(&ucmd->ucmd_ref));
2825                         ucmd_put(ucmd);
2826                         goto again;
2827                 }
2828         }
2829 }
2830 #endif
2831
2832         TRACE_DBG("Cleanuping done (dev %p)", dev);
2833         complete_all(&dev->cleanup_cmpl);
2834
2835 out:
2836         TRACE_EXIT();
2837         return;
2838 }
2839
2840 static inline int test_cleanup_list(void)
2841 {
2842         int res = !list_empty(&cleanup_list) ||
2843                   unlikely(kthread_should_stop());
2844         return res;
2845 }
2846
2847 static int dev_user_cleanup_thread(void *arg)
2848 {
2849         struct scst_user_dev *dev;
2850
2851         TRACE_ENTRY();
2852
2853         PRINT_INFO("Cleanup thread started, PID %d", current->pid);
2854
2855         current->flags |= PF_NOFREEZE;
2856
2857         spin_lock_irq(&cleanup_lock);
2858         while (!kthread_should_stop()) {
2859                 wait_queue_t wait;
2860                 init_waitqueue_entry(&wait, current);
2861
2862                 if (!test_cleanup_list()) {
2863                         add_wait_queue_exclusive(&cleanup_list_waitQ, &wait);
2864                         for (;;) {
2865                                 set_current_state(TASK_INTERRUPTIBLE);
2866                                 if (test_cleanup_list())
2867                                         break;
2868                                 spin_unlock_irq(&cleanup_lock);
2869                                 schedule();
2870                                 spin_lock_irq(&cleanup_lock);
2871                         }
2872                         set_current_state(TASK_RUNNING);
2873                         remove_wait_queue(&cleanup_list_waitQ, &wait);
2874                 }
2875
2876                 while (!list_empty(&cleanup_list)) {
2877                         dev = list_entry(cleanup_list.next, typeof(*dev),
2878                                 cleanup_list_entry);
2879                         list_del(&dev->cleanup_list_entry);
2880                         dev->in_cleanup_list = 0;
2881                         spin_unlock_irq(&cleanup_lock);
2882
2883                         dev_user_process_cleanup(dev);
2884
2885                         spin_lock_irq(&cleanup_lock);
2886                 }
2887         }
2888         spin_unlock_irq(&cleanup_lock);
2889
2890         /*
2891          * If kthread_should_stop() is true, we are guaranteed to be
2892          * on the module unload, so cleanup_list must be empty.
2893          */
2894         sBUG_ON(!list_empty(&cleanup_list));
2895
2896         PRINT_INFO("Cleanup thread PID %d finished", current->pid);
2897
2898         TRACE_EXIT();
2899         return 0;
2900 }
2901
2902 static int __init init_scst_user(void)
2903 {
2904         int res = 0;
2905         struct class_device *class_member;
2906
2907         TRACE_ENTRY();
2908
2909 #if defined(CONFIG_HIGHMEM4G) || defined(CONFIG_HIGHMEM64G)
2910         PRINT_ERROR("%s", "HIGHMEM kernel configurations are not supported. "
2911                 "Consider change VMSPLIT option or use 64-bit "
2912                 "configuration instead. See README file for details.");
2913         res = -EINVAL;
2914         goto out;
2915 #endif
2916
2917         user_cmd_cachep = KMEM_CACHE(scst_user_cmd, SCST_SLAB_FLAGS);
2918         if (user_cmd_cachep == NULL) {
2919                 res = -ENOMEM;
2920                 goto out;
2921         }
2922
2923         dev_user_devtype.module = THIS_MODULE;
2924
2925         res = scst_register_virtual_dev_driver(&dev_user_devtype);
2926         if (res < 0)
2927                 goto out_cache;
2928
2929         res = scst_dev_handler_build_std_proc(&dev_user_devtype);
2930         if (res != 0)
2931                 goto out_unreg;
2932
2933         dev_user_sysfs_class = class_create(THIS_MODULE, DEV_USER_NAME);
2934         if (IS_ERR(dev_user_sysfs_class)) {
2935                 PRINT_ERROR("%s", "Unable create sysfs class for SCST user "
2936                         "space handler");
2937                 res = PTR_ERR(dev_user_sysfs_class);
2938                 goto out_proc;
2939         }
2940
2941         res = register_chrdev(DEV_USER_MAJOR, DEV_USER_NAME, &dev_user_fops);
2942         if (res) {
2943                 PRINT_ERROR("Unable to get major %d for SCSI tapes", DEV_USER_MAJOR);
2944                 goto out_class;
2945         }
2946
2947         class_member = class_device_create(dev_user_sysfs_class, NULL,
2948                 MKDEV(DEV_USER_MAJOR, 0), NULL, DEV_USER_NAME);
2949         if (IS_ERR(class_member)) {
2950                 res = PTR_ERR(class_member);
2951                 goto out_chrdev;
2952         }
2953
2954         cleanup_thread = kthread_run(dev_user_cleanup_thread, NULL,
2955                 "scst_usr_cleanupd");
2956         if (IS_ERR(cleanup_thread)) {
2957                 res = PTR_ERR(cleanup_thread);
2958                 PRINT_ERROR("kthread_create() failed: %d", res);
2959                 goto out_dev;
2960         }
2961
2962 out:
2963         TRACE_EXIT_RES(res);
2964         return res;
2965
2966 out_dev:
2967         class_device_destroy(dev_user_sysfs_class, MKDEV(DEV_USER_MAJOR, 0));
2968
2969 out_chrdev:
2970         unregister_chrdev(DEV_USER_MAJOR, DEV_USER_NAME);
2971
2972 out_class:
2973         class_destroy(dev_user_sysfs_class);
2974
2975 out_proc:
2976         scst_dev_handler_destroy_std_proc(&dev_user_devtype);
2977
2978 out_unreg:
2979         scst_unregister_dev_driver(&dev_user_devtype);
2980
2981 out_cache:
2982         kmem_cache_destroy(user_cmd_cachep);
2983         goto out;
2984 }
2985
2986 static void __exit exit_scst_user(void)
2987 {
2988         int rc;
2989
2990         TRACE_ENTRY();
2991
2992         rc = kthread_stop(cleanup_thread);
2993         if (rc < 0)
2994                 TRACE_MGMT_DBG("kthread_stop() failed: %d", rc);
2995
2996         unregister_chrdev(DEV_USER_MAJOR, DEV_USER_NAME);
2997         class_device_destroy(dev_user_sysfs_class, MKDEV(DEV_USER_MAJOR, 0));
2998         class_destroy(dev_user_sysfs_class);
2999
3000         scst_dev_handler_destroy_std_proc(&dev_user_devtype);
3001         scst_unregister_virtual_dev_driver(&dev_user_devtype);
3002
3003         kmem_cache_destroy(user_cmd_cachep);
3004
3005         TRACE_EXIT();
3006         return;
3007 }
3008
3009 module_init(init_scst_user);
3010 module_exit(exit_scst_user);
3011
3012 MODULE_AUTHOR("Vladislav Bolkhovitin");
3013 MODULE_LICENSE("GPL");
3014 MODULE_DESCRIPTION("Virtual user space device handler for SCST");
3015 MODULE_VERSION(SCST_VERSION_STRING);
3016 MODULE_ALIAS_CHARDEV_MAJOR(DEV_USER_MAJOR);