4 * Copyright (C) 2007 - 2008 Vladislav Bolkhovitin <vst@vlnb.net>
5 * Copyright (C) 2007 - 2008 CMS Distribution Limited
7 * SCSI virtual user space device handler
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation, version 2
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
20 #include <linux/kthread.h>
21 #include <linux/delay.h>
22 #include <linux/poll.h>
23 #include <linux/stddef.h>
25 #define LOG_PREFIX DEV_USER_NAME
28 #include "scst_user.h"
29 #include "scst_dev_handler.h"
31 #if defined(CONFIG_HIGHMEM4G) || defined(CONFIG_HIGHMEM64G)
32 #warning "HIGHMEM kernel configurations are not supported by this module,\
33 because nowadays it doesn't worth the effort. Consider changing\
34 VMSPLIT option or use a 64-bit configuration instead. See README file\
38 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18))
39 #define COMPLETION_INITIALIZER_ONSTACK(work) \
40 ({ init_completion(&work); work; })
43 * Lockdep needs to run a non-constant initializer for on-stack
44 * completions - so we use the _ONSTACK() variant for those that
45 * are on the kernel stack:
48 # define DECLARE_COMPLETION_ONSTACK(work) \
49 struct completion work = COMPLETION_INITIALIZER_ONSTACK(work)
51 # define DECLARE_COMPLETION_ONSTACK(work) DECLARE_COMPLETION(work)
56 #define DEV_USER_MAJOR 237
57 #define DEV_USER_CMD_HASH_ORDER 6
58 #define DEV_USER_ATTACH_TIMEOUT (5*HZ)
60 struct scst_user_dev {
61 struct rw_semaphore dev_rwsem;
63 struct scst_cmd_lists cmd_lists;
65 /* Protected by cmd_lists.cmd_list_lock */
66 struct list_head ready_cmd_list;
68 /* Protected by dev_rwsem or don't need any protection */
69 unsigned int blocking:1;
70 unsigned int cleanup_done:1;
72 unsigned int queue_alg:4;
75 unsigned int d_sense:1;
76 unsigned int has_own_order_mgmt:1;
78 int (*generic_parse)(struct scst_cmd *cmd,
79 int (*get_block)(struct scst_cmd *cmd));
84 struct scst_mem_lim udev_mem_lim;
85 struct sgv_pool *pool;
86 struct sgv_pool *pool_clust;
89 uint8_t on_free_cmd_type;
90 uint8_t memory_reuse_type;
91 uint8_t partial_transfers_type;
94 struct scst_dev_type devtype;
96 /* Both protected by cmd_lists.cmd_list_lock */
97 unsigned int handle_counter;
98 struct list_head ucmd_hash[1 << DEV_USER_CMD_HASH_ORDER];
100 struct scst_device *sdev;
103 struct list_head dev_list_entry;
104 char name[SCST_MAX_NAME];
106 struct list_head cleanup_list_entry;
107 struct completion cleanup_cmpl;
110 /* Most fields are unprotected, since only one thread at time can access them */
111 struct scst_user_cmd {
112 struct scst_cmd *cmd;
113 struct scst_user_dev *dev;
117 unsigned int buff_cached:1;
118 unsigned int buf_dirty:1;
119 unsigned int background_exec:1;
120 unsigned int aborted:1;
122 struct scst_user_cmd *buf_ucmd;
126 int first_page_offset;
128 struct page **data_pages;
129 struct sgv_pool_obj *sgv;
132 * Special flags, which can be accessed asynchronously (hence "long").
133 * Protected by cmd_lists.cmd_list_lock.
135 unsigned long sent_to_user:1;
136 unsigned long jammed:1;
137 unsigned long this_state_unjammed:1;
138 unsigned long seen_by_user:1; /* here only as a small optimization */
142 struct list_head ready_cmd_list_entry;
145 struct list_head hash_list_entry;
147 int user_cmd_payload_len;
148 struct scst_user_get_cmd user_cmd;
150 /* cmpl used only by ATTACH_SESS, mcmd used only by TM */
152 struct completion *cmpl;
153 struct scst_mgmt_cmd *mcmd;
158 static struct scst_user_cmd *dev_user_alloc_ucmd(struct scst_user_dev *dev,
160 static void dev_user_free_ucmd(struct scst_user_cmd *ucmd);
162 static int dev_user_parse(struct scst_cmd *cmd);
163 static int dev_user_exec(struct scst_cmd *cmd);
164 static void dev_user_on_free_cmd(struct scst_cmd *cmd);
165 static int dev_user_task_mgmt_fn(struct scst_mgmt_cmd *mcmd,
166 struct scst_tgt_dev *tgt_dev);
168 static int dev_user_disk_done(struct scst_cmd *cmd);
169 static int dev_user_tape_done(struct scst_cmd *cmd);
171 static struct page *dev_user_alloc_pages(struct scatterlist *sg,
172 gfp_t gfp_mask, void *priv);
173 static void dev_user_free_sg_entries(struct scatterlist *sg, int sg_count,
176 static void dev_user_add_to_ready(struct scst_user_cmd *ucmd);
178 static void dev_user_unjam_cmd(struct scst_user_cmd *ucmd, int busy,
179 unsigned long *flags);
181 static int dev_user_process_reply_on_free(struct scst_user_cmd *ucmd);
182 static int dev_user_process_reply_tm_exec(struct scst_user_cmd *ucmd,
184 static int dev_user_process_reply_sess(struct scst_user_cmd *ucmd, int status);
185 static int dev_user_register_dev(struct file *file,
186 const struct scst_user_dev_desc *dev_desc);
187 static int dev_user_unregister_dev(struct file *file);
188 static int dev_user_flush_cache(struct file *file);
189 static int dev_user_capacity_changed(struct file *file);
190 static int __dev_user_set_opt(struct scst_user_dev *dev,
191 const struct scst_user_opt *opt);
192 static int dev_user_set_opt(struct file *file, const struct scst_user_opt *opt);
193 static int dev_user_get_opt(struct file *file, void __user *arg);
195 static unsigned int dev_user_poll(struct file *filp, poll_table *wait);
196 static long dev_user_ioctl(struct file *file, unsigned int cmd,
198 static int dev_user_release(struct inode *inode, struct file *file);
202 static struct kmem_cache *user_cmd_cachep;
203 static struct kmem_cache *user_get_cmd_cachep;
205 static DEFINE_MUTEX(dev_priv_mutex);
207 static const struct file_operations dev_user_fops = {
208 .poll = dev_user_poll,
209 .unlocked_ioctl = dev_user_ioctl,
211 .compat_ioctl = dev_user_ioctl,
213 .release = dev_user_release,
216 static struct class *dev_user_sysfs_class;
218 static DEFINE_SPINLOCK(dev_list_lock);
219 static LIST_HEAD(dev_list);
221 static DEFINE_SPINLOCK(cleanup_lock);
222 static LIST_HEAD(cleanup_list);
223 static DECLARE_WAIT_QUEUE_HEAD(cleanup_list_waitQ);
224 static struct task_struct *cleanup_thread;
227 * Skip this command if result is not 0. Must be called under
228 * cmd_lists.cmd_list_lock and IRQ off.
230 static inline bool ucmd_get_check(struct scst_user_cmd *ucmd)
232 int r = atomic_inc_return(&ucmd->ucmd_ref);
234 if (unlikely(r == 1)) {
235 TRACE_DBG("ucmd %p is being destroyed", ucmd);
236 atomic_dec(&ucmd->ucmd_ref);
239 * Necessary code is serialized by cmd_list_lock in
243 TRACE_DBG("ucmd %p, new ref_cnt %d", ucmd,
244 atomic_read(&ucmd->ucmd_ref));
250 static inline void __ucmd_get(struct scst_user_cmd *ucmd, bool barrier)
252 TRACE_DBG("ucmd %p, ucmd_ref %d", ucmd, atomic_read(&ucmd->ucmd_ref));
253 atomic_inc(&ucmd->ucmd_ref);
255 smp_mb__after_atomic_inc();
258 static inline void ucmd_get_ordered(struct scst_user_cmd *ucmd)
260 __ucmd_get(ucmd, true);
263 static inline void ucmd_get(struct scst_user_cmd *ucmd)
265 __ucmd_get(ucmd, false);
268 /* Must not be called under cmd_list_lock!! */
269 static inline void ucmd_put(struct scst_user_cmd *ucmd)
271 TRACE_DBG("ucmd %p, ucmd_ref %d", ucmd, atomic_read(&ucmd->ucmd_ref));
273 EXTRACHECKS_BUG_ON(atomic_read(&ucmd->ucmd_ref) == 0);
275 if (atomic_dec_and_test(&ucmd->ucmd_ref))
276 dev_user_free_ucmd(ucmd);
279 static inline int calc_num_pg(unsigned long buf, int len)
281 len += buf & ~PAGE_MASK;
282 return (len >> PAGE_SHIFT) + ((len & ~PAGE_MASK) != 0);
285 static void __dev_user_not_reg(void)
287 PRINT_ERROR("%s", "Device not registered");
291 static inline int dev_user_check_reg(struct scst_user_dev *dev)
294 __dev_user_not_reg();
300 static inline int scst_user_cmd_hashfn(int h)
302 return h & ((1 << DEV_USER_CMD_HASH_ORDER) - 1);
305 static inline struct scst_user_cmd *__ucmd_find_hash(struct scst_user_dev *dev,
308 struct list_head *head;
309 struct scst_user_cmd *ucmd;
311 head = &dev->ucmd_hash[scst_user_cmd_hashfn(h)];
312 list_for_each_entry(ucmd, head, hash_list_entry) {
314 TRACE_DBG("Found ucmd %p", ucmd);
321 static void cmd_insert_hash(struct scst_user_cmd *ucmd)
323 struct list_head *head;
324 struct scst_user_dev *dev = ucmd->dev;
325 struct scst_user_cmd *u;
328 spin_lock_irqsave(&dev->cmd_lists.cmd_list_lock, flags);
330 ucmd->h = dev->handle_counter++;
331 u = __ucmd_find_hash(dev, ucmd->h);
333 head = &dev->ucmd_hash[scst_user_cmd_hashfn(ucmd->h)];
334 list_add_tail(&ucmd->hash_list_entry, head);
335 spin_unlock_irqrestore(&dev->cmd_lists.cmd_list_lock, flags);
337 TRACE_DBG("Inserted ucmd %p, h=%d (dev %s)", ucmd, ucmd->h, dev->name);
341 static inline void cmd_remove_hash(struct scst_user_cmd *ucmd)
345 spin_lock_irqsave(&ucmd->dev->cmd_lists.cmd_list_lock, flags);
346 list_del(&ucmd->hash_list_entry);
347 spin_unlock_irqrestore(&ucmd->dev->cmd_lists.cmd_list_lock, flags);
349 TRACE_DBG("Removed ucmd %p, h=%d", ucmd, ucmd->h);
353 static void dev_user_free_ucmd(struct scst_user_cmd *ucmd)
357 TRACE_MEM("Freeing ucmd %p", ucmd);
359 cmd_remove_hash(ucmd);
360 EXTRACHECKS_BUG_ON(ucmd->cmd != NULL);
362 kmem_cache_free(user_cmd_cachep, ucmd);
368 static struct page *dev_user_alloc_pages(struct scatterlist *sg,
369 gfp_t gfp_mask, void *priv)
371 struct scst_user_cmd *ucmd = (struct scst_user_cmd *)priv;
376 /* *sg supposed to be zeroed */
378 TRACE_MEM("ucmd %p, ubuff %lx, ucmd->cur_data_page %d", ucmd,
379 ucmd->ubuff, ucmd->cur_data_page);
381 if (ucmd->cur_data_page == 0) {
382 TRACE_MEM("ucmd->first_page_offset %d",
383 ucmd->first_page_offset);
384 offset = ucmd->first_page_offset;
388 if (ucmd->cur_data_page >= ucmd->num_data_pages)
391 sg_set_page(sg, ucmd->data_pages[ucmd->cur_data_page],
392 PAGE_SIZE - offset, offset);
393 ucmd->cur_data_page++;
395 TRACE_MEM("page=%p, length=%d, offset=%d", sg_page(sg), sg->length,
397 TRACE_BUFFER("Page data", sg_virt(sg), sg->length);
404 static void dev_user_on_cached_mem_free(struct scst_user_cmd *ucmd)
408 TRACE_MEM("Preparing ON_CACHED_MEM_FREE (ucmd %p, h %d, ubuff %lx)",
409 ucmd, ucmd->h, ucmd->ubuff);
411 ucmd->user_cmd_payload_len =
412 offsetof(struct scst_user_get_cmd, on_cached_mem_free) +
413 sizeof(ucmd->user_cmd.on_cached_mem_free);
414 ucmd->user_cmd.cmd_h = ucmd->h;
415 ucmd->user_cmd.subcode = SCST_USER_ON_CACHED_MEM_FREE;
416 ucmd->user_cmd.on_cached_mem_free.pbuf = ucmd->ubuff;
418 ucmd->state = UCMD_STATE_ON_CACHE_FREEING;
420 dev_user_add_to_ready(ucmd);
426 static void dev_user_unmap_buf(struct scst_user_cmd *ucmd)
432 TRACE_MEM("Unmapping data pages (ucmd %p, ubuff %lx, num %d)", ucmd,
433 ucmd->ubuff, ucmd->num_data_pages);
435 for (i = 0; i < ucmd->num_data_pages; i++) {
436 struct page *page = ucmd->data_pages[i];
441 page_cache_release(page);
444 kfree(ucmd->data_pages);
445 ucmd->data_pages = NULL;
451 static void __dev_user_free_sg_entries(struct scst_user_cmd *ucmd)
455 sBUG_ON(ucmd->data_pages == NULL);
457 TRACE_MEM("Freeing data pages (ucmd=%p, ubuff=%lx, buff_cached=%d)",
458 ucmd, ucmd->ubuff, ucmd->buff_cached);
460 dev_user_unmap_buf(ucmd);
462 if (ucmd->buff_cached)
463 dev_user_on_cached_mem_free(ucmd);
471 static void dev_user_free_sg_entries(struct scatterlist *sg, int sg_count,
474 struct scst_user_cmd *ucmd = (struct scst_user_cmd *)priv;
476 TRACE_MEM("Freeing data pages (sg=%p, sg_count=%d, priv %p)", sg,
479 __dev_user_free_sg_entries(ucmd);
484 static inline int is_buff_cached(struct scst_user_cmd *ucmd)
486 int mem_reuse_type = ucmd->dev->memory_reuse_type;
488 if ((mem_reuse_type == SCST_USER_MEM_REUSE_ALL) ||
489 ((ucmd->cmd->data_direction == SCST_DATA_READ) &&
490 (mem_reuse_type == SCST_USER_MEM_REUSE_READ)) ||
491 ((ucmd->cmd->data_direction == SCST_DATA_WRITE) &&
492 (mem_reuse_type == SCST_USER_MEM_REUSE_WRITE)))
498 static inline int is_need_offs_page(unsigned long buf, int len)
500 return ((buf & ~PAGE_MASK) != 0) &&
501 ((buf & PAGE_MASK) != ((buf+len-1) & PAGE_MASK));
505 * Returns 0 for success, <0 for fatal failure, >0 - need pages.
506 * Unmaps the buffer, if needed in case of error
508 static int dev_user_alloc_sg(struct scst_user_cmd *ucmd, int cached_buff)
511 struct scst_cmd *cmd = ucmd->cmd;
512 struct scst_user_dev *dev = ucmd->dev;
513 struct sgv_pool *pool;
516 int bufflen, orig_bufflen;
518 int out_sg_pages = 0;
522 gfp_mask = __GFP_NOWARN;
523 gfp_mask |= (scst_cmd_atomic(cmd) ? GFP_ATOMIC : GFP_KERNEL);
525 if (cmd->data_direction != SCST_DATA_BIDI) {
526 orig_bufflen = cmd->bufflen;
527 pool = (struct sgv_pool *)cmd->tgt_dev->dh_priv;
529 /* Make in_sg->offset 0 */
530 int len = cmd->bufflen + ucmd->first_page_offset;
531 out_sg_pages = (len >> PAGE_SHIFT) + ((len & ~PAGE_MASK) != 0);
532 orig_bufflen = (out_sg_pages << PAGE_SHIFT) + cmd->in_bufflen;
535 bufflen = orig_bufflen;
537 EXTRACHECKS_BUG_ON(bufflen == 0);
540 flags |= SCST_POOL_RETURN_OBJ_ON_ALLOC_FAIL;
541 if (ucmd->ubuff == 0)
542 flags |= SCST_POOL_NO_ALLOC_ON_CACHE_MISS;
544 TRACE_MEM("%s", "Not cached buff");
545 flags |= SCST_POOL_ALLOC_NO_CACHED;
546 if (ucmd->ubuff == 0) {
550 bufflen += ucmd->first_page_offset;
551 if (is_need_offs_page(ucmd->ubuff, orig_bufflen))
552 last_len = bufflen & ~PAGE_MASK;
554 last_len = orig_bufflen & ~PAGE_MASK;
556 last_len = PAGE_SIZE;
558 ucmd->buff_cached = cached_buff;
560 cmd->sg = sgv_pool_alloc(pool, bufflen, gfp_mask, flags, &cmd->sg_cnt,
561 &ucmd->sgv, &dev->udev_mem_lim, ucmd);
562 if (cmd->sg != NULL) {
563 struct scst_user_cmd *buf_ucmd =
564 (struct scst_user_cmd *)sgv_get_priv(ucmd->sgv);
566 TRACE_MEM("Buf ucmd %p (cmd->sg_cnt %d, last seg len %d, "
567 "last_len %d, bufflen %d)", buf_ucmd, cmd->sg_cnt,
568 cmd->sg[cmd->sg_cnt-1].length, last_len, bufflen);
570 ucmd->ubuff = buf_ucmd->ubuff;
571 ucmd->buf_ucmd = buf_ucmd;
573 EXTRACHECKS_BUG_ON((ucmd->data_pages != NULL) &&
577 cmd->sg[cmd->sg_cnt-1].length &= PAGE_MASK;
578 cmd->sg[cmd->sg_cnt-1].length += last_len;
581 TRACE_MEM("Buf alloced (ucmd %p, cached_buff %d, ubuff %lx, "
582 "last seg len %d)", ucmd, cached_buff, ucmd->ubuff,
583 cmd->sg[cmd->sg_cnt-1].length);
585 if (cmd->data_direction == SCST_DATA_BIDI) {
586 cmd->in_sg = &cmd->sg[out_sg_pages];
587 cmd->in_sg_cnt = cmd->sg_cnt - out_sg_pages;
588 cmd->sg_cnt = out_sg_pages;
589 TRACE_MEM("cmd %p, in_sg %p, in_sg_cnt %d, sg_cnt %d",
590 cmd, cmd->in_sg, cmd->in_sg_cnt, cmd->sg_cnt);
593 if (unlikely(cmd->sg_cnt > cmd->tgt_dev->max_sg_cnt)) {
596 PRINT_INFO("Unable to complete command due to "
597 "SG IO count limitation (requested %d, "
598 "available %d, tgt lim %d)",
599 cmd->sg_cnt, cmd->tgt_dev->max_sg_cnt,
600 cmd->tgt->sg_tablesize);
604 /* sgv will be freed in dev_user_free_sgv() */
608 TRACE_MEM("Buf not alloced (ucmd %p, h %d, buff_cached, %d, "
609 "sg_cnt %d, ubuff %lx, sgv %p", ucmd, ucmd->h,
610 ucmd->buff_cached, cmd->sg_cnt, ucmd->ubuff, ucmd->sgv);
611 if (unlikely(cmd->sg_cnt == 0)) {
612 TRACE_MEM("Refused allocation (ucmd %p)", ucmd);
613 sBUG_ON(ucmd->sgv != NULL);
616 switch (ucmd->state) {
617 case UCMD_STATE_BUF_ALLOCING:
620 case UCMD_STATE_EXECING:
635 static int dev_user_alloc_space(struct scst_user_cmd *ucmd)
637 int rc, res = SCST_CMD_STATE_DEFAULT;
638 struct scst_cmd *cmd = ucmd->cmd;
642 ucmd->state = UCMD_STATE_BUF_ALLOCING;
643 cmd->dh_data_buf_alloced = 1;
645 rc = dev_user_alloc_sg(ucmd, is_buff_cached(ucmd));
650 res = scst_get_cmd_abnormal_done_state(cmd);
654 if ((cmd->data_direction != SCST_DATA_WRITE) &&
655 !scst_is_cmd_local(cmd)) {
656 TRACE_DBG("Delayed alloc, ucmd %p", ucmd);
660 ucmd->user_cmd_payload_len =
661 offsetof(struct scst_user_get_cmd, alloc_cmd) +
662 sizeof(ucmd->user_cmd.alloc_cmd);
663 ucmd->user_cmd.cmd_h = ucmd->h;
664 ucmd->user_cmd.subcode = SCST_USER_ALLOC_MEM;
665 ucmd->user_cmd.alloc_cmd.sess_h = (unsigned long)cmd->tgt_dev;
666 memcpy(ucmd->user_cmd.alloc_cmd.cdb, cmd->cdb, cmd->cdb_len);
667 ucmd->user_cmd.alloc_cmd.cdb_len = cmd->cdb_len;
668 ucmd->user_cmd.alloc_cmd.ext_cdb_len = cmd->ext_cdb_len;
669 ucmd->user_cmd.alloc_cmd.alloc_len = ucmd->buff_cached ?
670 (cmd->sg_cnt << PAGE_SHIFT) : cmd->bufflen;
671 ucmd->user_cmd.alloc_cmd.queue_type = cmd->queue_type;
672 ucmd->user_cmd.alloc_cmd.data_direction = cmd->data_direction;
673 ucmd->user_cmd.alloc_cmd.sn = cmd->tgt_sn;
675 dev_user_add_to_ready(ucmd);
677 res = SCST_CMD_STATE_STOP;
684 static struct scst_user_cmd *dev_user_alloc_ucmd(struct scst_user_dev *dev,
687 struct scst_user_cmd *ucmd = NULL;
691 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
692 ucmd = kmem_cache_alloc(user_cmd_cachep, gfp_mask);
694 memset(ucmd, 0, sizeof(*ucmd));
696 ucmd = kmem_cache_zalloc(user_cmd_cachep, gfp_mask);
698 if (unlikely(ucmd == NULL)) {
699 TRACE(TRACE_OUT_OF_MEM, "Unable to allocate "
700 "user cmd (gfp_mask %x)", gfp_mask);
704 atomic_set(&ucmd->ucmd_ref, 1);
706 cmd_insert_hash(ucmd);
708 TRACE_MEM("ucmd %p allocated", ucmd);
711 TRACE_EXIT_HRES((unsigned long)ucmd);
715 static int dev_user_get_block(struct scst_cmd *cmd)
717 struct scst_user_dev *dev = (struct scst_user_dev *)cmd->dev->dh_priv;
719 * No need for locks here, since *_detach() can not be
720 * called, when there are existing commands.
722 TRACE_EXIT_RES(dev->block);
726 static int dev_user_parse(struct scst_cmd *cmd)
728 int rc, res = SCST_CMD_STATE_DEFAULT;
729 struct scst_user_cmd *ucmd;
730 int atomic = scst_cmd_atomic(cmd);
731 struct scst_user_dev *dev = (struct scst_user_dev *)cmd->dev->dh_priv;
732 gfp_t gfp_mask = atomic ? GFP_ATOMIC : GFP_KERNEL;
736 if (cmd->dh_priv == NULL) {
737 ucmd = dev_user_alloc_ucmd(dev, gfp_mask);
738 if (unlikely(ucmd == NULL)) {
740 res = SCST_CMD_STATE_NEED_THREAD_CTX;
750 ucmd = (struct scst_user_cmd *)cmd->dh_priv;
751 TRACE_DBG("Used ucmd %p, state %x", ucmd, ucmd->state);
754 TRACE_DBG("ucmd %p, cmd %p, state %x", ucmd, cmd, ucmd->state);
756 if (ucmd->state != UCMD_STATE_NEW)
759 switch (dev->parse_type) {
760 case SCST_USER_PARSE_STANDARD:
761 TRACE_DBG("PARSE STANDARD: ucmd %p", ucmd);
762 rc = dev->generic_parse(cmd, dev_user_get_block);
763 if ((rc != 0) || (cmd->op_flags & SCST_INFO_INVALID))
767 case SCST_USER_PARSE_EXCEPTION:
768 TRACE_DBG("PARSE EXCEPTION: ucmd %p", ucmd);
769 rc = dev->generic_parse(cmd, dev_user_get_block);
770 if ((rc == 0) && (!(cmd->op_flags & SCST_INFO_INVALID)))
772 else if (rc == SCST_CMD_STATE_NEED_THREAD_CTX) {
773 TRACE_MEM("Restarting PARSE to thread context "
775 res = SCST_CMD_STATE_NEED_THREAD_CTX;
778 /* else go through */
780 case SCST_USER_PARSE_CALL:
781 TRACE_DBG("Preparing PARSE for user space (ucmd=%p, h=%d, "
782 "bufflen %d)", ucmd, ucmd->h, cmd->bufflen);
783 ucmd->user_cmd_payload_len =
784 offsetof(struct scst_user_get_cmd, parse_cmd) +
785 sizeof(ucmd->user_cmd.parse_cmd);
786 ucmd->user_cmd.cmd_h = ucmd->h;
787 ucmd->user_cmd.subcode = SCST_USER_PARSE;
788 ucmd->user_cmd.parse_cmd.sess_h = (unsigned long)cmd->tgt_dev;
789 memcpy(ucmd->user_cmd.parse_cmd.cdb, cmd->cdb, cmd->cdb_len);
790 ucmd->user_cmd.parse_cmd.cdb_len = cmd->cdb_len;
791 ucmd->user_cmd.parse_cmd.ext_cdb_len = cmd->ext_cdb_len;
792 ucmd->user_cmd.parse_cmd.timeout = cmd->timeout / HZ;
793 ucmd->user_cmd.parse_cmd.bufflen = cmd->bufflen;
794 ucmd->user_cmd.parse_cmd.in_bufflen = cmd->in_bufflen;
795 ucmd->user_cmd.parse_cmd.queue_type = cmd->queue_type;
796 ucmd->user_cmd.parse_cmd.data_direction = cmd->data_direction;
797 ucmd->user_cmd.parse_cmd.expected_values_set =
798 cmd->expected_values_set;
799 ucmd->user_cmd.parse_cmd.expected_data_direction =
800 cmd->expected_data_direction;
801 ucmd->user_cmd.parse_cmd.expected_transfer_len =
802 cmd->expected_transfer_len;
803 ucmd->user_cmd.parse_cmd.sn = cmd->tgt_sn;
804 ucmd->state = UCMD_STATE_PARSING;
805 dev_user_add_to_ready(ucmd);
806 res = SCST_CMD_STATE_STOP;
815 if (cmd->bufflen == 0) {
817 * According to SPC bufflen 0 for data transfer commands isn't
818 * an error, so we need to fix the transfer direction.
820 cmd->data_direction = SCST_DATA_NONE;
823 if (cmd->data_direction != SCST_DATA_NONE)
824 res = dev_user_alloc_space(ucmd);
831 PRINT_ERROR("PARSE failed (ucmd %p, rc %d, invalid %d)", ucmd, rc,
832 cmd->op_flags & SCST_INFO_INVALID);
833 scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_invalid_opcode));
836 res = scst_get_cmd_abnormal_done_state(cmd);
840 static void dev_user_flush_dcache(struct scst_user_cmd *ucmd)
842 struct scst_user_cmd *buf_ucmd = ucmd->buf_ucmd;
843 unsigned long start = buf_ucmd->ubuff;
844 int i, bufflen = ucmd->cmd->bufflen;
852 * Possibly, flushing of all the pages from ucmd->cmd->sg can be
853 * faster, since it should be cache hot, while ucmd->buf_ucmd and
854 * buf_ucmd->data_pages are cache cold. But, from other side,
855 * sizeof(buf_ucmd->data_pages[0]) is considerably smaller, than
856 * sizeof(ucmd->cmd->sg[0]), so on big buffers going over
857 * data_pages array can lead to less cache misses. So, real numbers are
861 for (i = 0; (bufflen > 0) && (i < buf_ucmd->num_data_pages); i++) {
863 page = buf_ucmd->data_pages[i];
864 #ifdef ARCH_HAS_FLUSH_ANON_PAGE
865 struct vm_area_struct *vma = find_vma(current->mm, start);
867 flush_anon_page(vma, page, start);
869 flush_dcache_page(page);
871 bufflen -= PAGE_SIZE;
879 static int dev_user_exec(struct scst_cmd *cmd)
881 struct scst_user_cmd *ucmd = (struct scst_user_cmd *)cmd->dh_priv;
882 int res = SCST_EXEC_COMPLETED;
886 #if 0 /* We set exec_atomic in 0 to let SCST core know that we need a thread
887 * context to complete the necessary actions, but all we are going to
888 * do in this function is, in fact, atomic, so let's skip this check.
890 if (scst_cmd_atomic(cmd)) {
891 TRACE_DBG("%s", "User exec() can not be called in atomic "
892 "context, rescheduling to the thread");
893 res = SCST_EXEC_NEED_THREAD;
898 TRACE_DBG("Preparing EXEC for user space (ucmd=%p, h=%d, "
899 "bufflen %d, data_len %d, ubuff %lx)", ucmd, ucmd->h,
900 cmd->bufflen, cmd->data_len, ucmd->ubuff);
902 if (cmd->data_direction == SCST_DATA_WRITE)
903 dev_user_flush_dcache(ucmd);
905 BUILD_BUG_ON(sizeof(ucmd->user_cmd.exec_cmd.cdb) != sizeof(cmd->cdb));
907 ucmd->user_cmd_payload_len =
908 offsetof(struct scst_user_get_cmd, exec_cmd) +
909 sizeof(ucmd->user_cmd.exec_cmd);
910 ucmd->user_cmd.cmd_h = ucmd->h;
911 ucmd->user_cmd.subcode = SCST_USER_EXEC;
912 ucmd->user_cmd.exec_cmd.sess_h = (unsigned long)cmd->tgt_dev;
913 memcpy(ucmd->user_cmd.exec_cmd.cdb, cmd->cdb, cmd->cdb_len);
914 ucmd->user_cmd.exec_cmd.cdb_len = cmd->cdb_len;
915 ucmd->user_cmd.exec_cmd.ext_cdb_len = cmd->ext_cdb_len;
916 ucmd->user_cmd.exec_cmd.bufflen = cmd->bufflen;
917 ucmd->user_cmd.exec_cmd.data_len = cmd->data_len;
918 ucmd->user_cmd.exec_cmd.pbuf = ucmd->ubuff;
919 if ((ucmd->ubuff == 0) && (cmd->data_direction != SCST_DATA_NONE)) {
920 ucmd->user_cmd.exec_cmd.alloc_len = ucmd->buff_cached ?
921 (cmd->sg_cnt << PAGE_SHIFT) : cmd->bufflen;
923 ucmd->user_cmd.exec_cmd.queue_type = cmd->queue_type;
924 ucmd->user_cmd.exec_cmd.data_direction = cmd->data_direction;
925 ucmd->user_cmd.exec_cmd.partial = 0;
926 ucmd->user_cmd.exec_cmd.timeout = cmd->timeout / HZ;
927 ucmd->user_cmd.exec_cmd.p_in_buf = ucmd->ubuff +
928 (cmd->sg_cnt << PAGE_SHIFT);
929 ucmd->user_cmd.exec_cmd.in_bufflen = cmd->in_bufflen;
930 ucmd->user_cmd.exec_cmd.sn = cmd->tgt_sn;
932 ucmd->state = UCMD_STATE_EXECING;
934 dev_user_add_to_ready(ucmd);
940 static void dev_user_free_sgv(struct scst_user_cmd *ucmd)
942 if (ucmd->sgv != NULL) {
943 sgv_pool_free(ucmd->sgv, &ucmd->dev->udev_mem_lim);
945 } else if (ucmd->data_pages != NULL) {
946 /* We mapped pages, but for some reason didn't allocate them */
948 __dev_user_free_sg_entries(ucmd);
953 static void dev_user_on_free_cmd(struct scst_cmd *cmd)
955 struct scst_user_cmd *ucmd = (struct scst_user_cmd *)cmd->dh_priv;
959 if (unlikely(ucmd == NULL))
962 TRACE_MEM("ucmd %p, cmd %p, buff_cached %d, ubuff %lx", ucmd, ucmd->cmd,
963 ucmd->buff_cached, ucmd->ubuff);
966 if (cmd->data_direction == SCST_DATA_WRITE && ucmd->buf_ucmd != NULL)
967 ucmd->buf_ucmd->buf_dirty = 1;
969 if (ucmd->dev->on_free_cmd_type == SCST_USER_ON_FREE_CMD_IGNORE) {
970 ucmd->state = UCMD_STATE_ON_FREE_SKIPPED;
971 /* The state assignment must be before freeing sgv! */
975 if (unlikely(!ucmd->seen_by_user)) {
976 TRACE_MGMT_DBG("Not seen by user ucmd %p", ucmd);
980 ucmd->user_cmd_payload_len =
981 offsetof(struct scst_user_get_cmd, on_free_cmd) +
982 sizeof(ucmd->user_cmd.on_free_cmd);
983 ucmd->user_cmd.cmd_h = ucmd->h;
984 ucmd->user_cmd.subcode = SCST_USER_ON_FREE_CMD;
985 ucmd->user_cmd.on_free_cmd.pbuf = ucmd->ubuff;
986 ucmd->user_cmd.on_free_cmd.resp_data_len = cmd->resp_data_len;
987 ucmd->user_cmd.on_free_cmd.buffer_cached = ucmd->buff_cached;
988 ucmd->user_cmd.on_free_cmd.aborted = ucmd->aborted;
989 ucmd->user_cmd.on_free_cmd.status = cmd->status;
990 ucmd->user_cmd.on_free_cmd.delivery_status = cmd->delivery_status;
992 ucmd->state = UCMD_STATE_ON_FREEING;
994 dev_user_add_to_ready(ucmd);
1001 dev_user_process_reply_on_free(ucmd);
1005 static void dev_user_set_block(struct scst_cmd *cmd, int block)
1007 struct scst_user_dev *dev = (struct scst_user_dev *)cmd->dev->dh_priv;
1009 * No need for locks here, since *_detach() can not be
1010 * called, when there are existing commands.
1012 TRACE_DBG("dev %p, new block %d", dev, block);
1016 dev->block = dev->def_block;
1020 static int dev_user_disk_done(struct scst_cmd *cmd)
1022 int res = SCST_CMD_STATE_DEFAULT;
1026 res = scst_block_generic_dev_done(cmd, dev_user_set_block);
1028 TRACE_EXIT_RES(res);
1032 static int dev_user_tape_done(struct scst_cmd *cmd)
1034 int res = SCST_CMD_STATE_DEFAULT;
1038 res = scst_tape_generic_dev_done(cmd, dev_user_set_block);
1040 TRACE_EXIT_RES(res);
1044 static void dev_user_add_to_ready(struct scst_user_cmd *ucmd)
1046 struct scst_user_dev *dev = ucmd->dev;
1047 unsigned long flags;
1052 do_wake = (in_interrupt() ||
1053 (ucmd->state == UCMD_STATE_ON_CACHE_FREEING));
1055 do_wake |= ucmd->cmd->preprocessing_only;
1057 spin_lock_irqsave(&dev->cmd_lists.cmd_list_lock, flags);
1059 ucmd->this_state_unjammed = 0;
1061 if ((ucmd->state == UCMD_STATE_PARSING) ||
1062 (ucmd->state == UCMD_STATE_BUF_ALLOCING)) {
1064 * If we don't put such commands in the queue head, then under
1065 * high load we might delay threads, waiting for memory
1066 * allocations, for too long and start loosing NOPs, which
1067 * would lead to consider us by remote initiators as
1068 * unresponsive and stuck => broken connections, etc. If none
1069 * of our commands completed in NOP timeout to allow the head
1070 * commands to go, then we are really overloaded and/or stuck.
1072 TRACE_DBG("Adding ucmd %p (state %d) to head of ready "
1073 "cmd list", ucmd, ucmd->state);
1074 list_add(&ucmd->ready_cmd_list_entry,
1075 &dev->ready_cmd_list);
1077 } else if (unlikely(ucmd->state == UCMD_STATE_TM_EXECING) ||
1078 unlikely(ucmd->state == UCMD_STATE_ATTACH_SESS) ||
1079 unlikely(ucmd->state == UCMD_STATE_DETACH_SESS)) {
1080 TRACE_MGMT_DBG("Adding mgmt ucmd %p (state %d) to head of "
1081 "ready cmd list", ucmd, ucmd->state);
1082 list_add(&ucmd->ready_cmd_list_entry,
1083 &dev->ready_cmd_list);
1085 } else if ((ucmd->cmd != NULL) &&
1086 unlikely((ucmd->cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE))) {
1087 TRACE_DBG("Adding HQ ucmd %p to head of ready cmd list", ucmd);
1088 list_add(&ucmd->ready_cmd_list_entry, &dev->ready_cmd_list);
1090 TRACE_DBG("Adding ucmd %p to ready cmd list", ucmd);
1091 list_add_tail(&ucmd->ready_cmd_list_entry,
1092 &dev->ready_cmd_list);
1096 TRACE_DBG("Waking up dev %p", dev);
1097 wake_up(&dev->cmd_lists.cmd_list_waitQ);
1100 spin_unlock_irqrestore(&dev->cmd_lists.cmd_list_lock, flags);
1106 static int dev_user_map_buf(struct scst_user_cmd *ucmd, unsigned long ubuff,
1111 struct task_struct *tsk = current;
1115 if (unlikely(ubuff == 0))
1118 sBUG_ON(ucmd->data_pages != NULL);
1120 ucmd->num_data_pages = num_pg;
1123 kmalloc(sizeof(*ucmd->data_pages) * ucmd->num_data_pages,
1125 if (ucmd->data_pages == NULL) {
1126 TRACE(TRACE_OUT_OF_MEM, "Unable to allocate data_pages array "
1127 "(num_data_pages=%d)", ucmd->num_data_pages);
1132 TRACE_MEM("Mapping buffer (ucmd %p, ubuff %lx, ucmd->num_data_pages %d,"
1133 " first_page_offset %d, len %d)", ucmd, ubuff,
1134 ucmd->num_data_pages, (int)(ubuff & ~PAGE_MASK),
1135 ucmd->cmd->bufflen);
1137 down_read(&tsk->mm->mmap_sem);
1138 rc = get_user_pages(tsk, tsk->mm, ubuff, ucmd->num_data_pages,
1139 1/*writable*/, 0/*don't force*/, ucmd->data_pages, NULL);
1140 up_read(&tsk->mm->mmap_sem);
1142 /* get_user_pages() flushes dcache */
1144 if (rc < ucmd->num_data_pages)
1147 ucmd->ubuff = ubuff;
1148 ucmd->first_page_offset = (ubuff & ~PAGE_MASK);
1151 TRACE_EXIT_RES(res);
1155 scst_set_busy(ucmd->cmd);
1159 scst_set_cmd_abnormal_done_state(ucmd->cmd);
1163 PRINT_ERROR("Failed to get %d user pages (rc %d)",
1164 ucmd->num_data_pages, rc);
1166 for (i = 0; i < rc; i++)
1167 page_cache_release(ucmd->data_pages[i]);
1169 kfree(ucmd->data_pages);
1170 ucmd->data_pages = NULL;
1172 scst_set_cmd_error(ucmd->cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
1176 static int dev_user_process_reply_alloc(struct scst_user_cmd *ucmd,
1177 struct scst_user_reply_cmd *reply)
1180 struct scst_cmd *cmd = ucmd->cmd;
1184 TRACE_DBG("ucmd %p, pbuf %llx", ucmd, reply->alloc_reply.pbuf);
1186 if (likely(reply->alloc_reply.pbuf != 0)) {
1188 if (ucmd->buff_cached) {
1189 if (unlikely((reply->alloc_reply.pbuf & ~PAGE_MASK) != 0)) {
1190 PRINT_ERROR("Supplied pbuf %llx isn't "
1192 reply->alloc_reply.pbuf);
1195 pages = cmd->sg_cnt;
1197 pages = calc_num_pg(reply->alloc_reply.pbuf,
1199 res = dev_user_map_buf(ucmd, reply->alloc_reply.pbuf, pages);
1201 scst_set_busy(ucmd->cmd);
1202 scst_set_cmd_abnormal_done_state(ucmd->cmd);
1206 scst_process_active_cmd(cmd, false);
1208 TRACE_EXIT_RES(res);
1212 scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
1213 scst_set_cmd_abnormal_done_state(ucmd->cmd);
1218 static int dev_user_process_reply_parse(struct scst_user_cmd *ucmd,
1219 struct scst_user_reply_cmd *reply)
1222 struct scst_user_scsi_cmd_reply_parse *preply =
1223 &reply->parse_reply;
1224 struct scst_cmd *cmd = ucmd->cmd;
1228 if (unlikely(preply->queue_type > SCST_CMD_QUEUE_ACA))
1231 if (unlikely((preply->data_direction != SCST_DATA_WRITE) &&
1232 (preply->data_direction != SCST_DATA_READ) &&
1233 (preply->data_direction != SCST_DATA_NONE)))
1236 if (unlikely((preply->data_direction != SCST_DATA_NONE) &&
1237 (preply->bufflen == 0)))
1240 if (unlikely((preply->bufflen < 0) || (preply->data_len < 0)))
1243 TRACE_DBG("ucmd %p, queue_type %x, data_direction, %x, bufflen %d, "
1244 "data_len %d, pbuf %llx", ucmd, preply->queue_type,
1245 preply->data_direction, preply->bufflen, preply->data_len,
1246 reply->alloc_reply.pbuf);
1248 cmd->queue_type = preply->queue_type;
1249 cmd->data_direction = preply->data_direction;
1250 cmd->bufflen = preply->bufflen;
1251 cmd->data_len = preply->data_len;
1254 scst_process_active_cmd(cmd, false);
1256 TRACE_EXIT_RES(res);
1260 PRINT_ERROR("Invalid parse_reply parameters (LUN %lld, op %x, cmd %p)",
1261 (long long unsigned int)cmd->lun, cmd->cdb[0], cmd);
1262 PRINT_BUFFER("Invalid parse_reply", reply, sizeof(*reply));
1263 scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
1264 scst_set_cmd_abnormal_done_state(cmd);
1269 static int dev_user_process_reply_on_free(struct scst_user_cmd *ucmd)
1275 TRACE_DBG("ON FREE ucmd %p", ucmd);
1277 dev_user_free_sgv(ucmd);
1280 TRACE_EXIT_RES(res);
1284 static int dev_user_process_reply_on_cache_free(struct scst_user_cmd *ucmd)
1290 TRACE_DBG("ON CACHE FREE ucmd %p", ucmd);
1294 TRACE_EXIT_RES(res);
1298 static int dev_user_process_reply_exec(struct scst_user_cmd *ucmd,
1299 struct scst_user_reply_cmd *reply)
1302 struct scst_user_scsi_cmd_reply_exec *ereply =
1304 struct scst_cmd *cmd = ucmd->cmd;
1308 if (ereply->reply_type == SCST_EXEC_REPLY_COMPLETED) {
1309 if (ucmd->background_exec) {
1310 TRACE_DBG("Background ucmd %p finished", ucmd);
1314 if (unlikely(ereply->resp_data_len > cmd->bufflen))
1316 if (unlikely((cmd->data_direction != SCST_DATA_READ) &&
1317 (ereply->resp_data_len != 0)))
1319 } else if (ereply->reply_type == SCST_EXEC_REPLY_BACKGROUND) {
1320 if (unlikely(ucmd->background_exec))
1322 if (unlikely((cmd->data_direction == SCST_DATA_READ) ||
1323 (cmd->resp_data_len != 0)))
1326 * background_exec assignment must be after ucmd get.
1327 * Otherwise, due to reorder, in dev_user_process_reply()
1328 * it is possible that ucmd is destroyed before it "got" here.
1330 ucmd_get_ordered(ucmd);
1331 ucmd->background_exec = 1;
1332 TRACE_DBG("Background ucmd %p", ucmd);
1337 TRACE_DBG("ucmd %p, status %d, resp_data_len %d", ucmd,
1338 ereply->status, ereply->resp_data_len);
1340 if (ereply->resp_data_len != 0) {
1341 if (ucmd->ubuff == 0) {
1343 if (unlikely(ereply->pbuf == 0))
1345 if (ucmd->buff_cached) {
1346 if (unlikely((ereply->pbuf & ~PAGE_MASK) != 0)) {
1347 PRINT_ERROR("Supplied pbuf %llx isn't "
1348 "page aligned", ereply->pbuf);
1351 pages = cmd->sg_cnt;
1353 pages = calc_num_pg(ereply->pbuf, cmd->bufflen);
1354 rc = dev_user_map_buf(ucmd, ereply->pbuf, pages);
1355 if ((rc != 0) || (ucmd->ubuff == 0))
1358 rc = dev_user_alloc_sg(ucmd, ucmd->buff_cached);
1359 if (unlikely(rc != 0))
1362 dev_user_flush_dcache(ucmd);
1363 cmd->may_need_dma_sync = 1;
1364 scst_set_resp_data_len(cmd, ereply->resp_data_len);
1365 } else if (cmd->resp_data_len != ereply->resp_data_len) {
1366 if (ucmd->ubuff == 0)
1367 cmd->resp_data_len = ereply->resp_data_len;
1369 scst_set_resp_data_len(cmd, ereply->resp_data_len);
1372 cmd->status = ereply->status;
1373 if (ereply->sense_len != 0) {
1374 res = scst_alloc_sense(cmd, 0);
1377 res = copy_from_user(cmd->sense,
1378 (void __user *)(unsigned long)ereply->psense_buffer,
1379 min((int)cmd->sense_bufflen, (int)ereply->sense_len));
1381 PRINT_ERROR("%s", "Unable to get sense data");
1382 goto out_hwerr_res_set;
1388 cmd->scst_cmd_done(cmd, SCST_CMD_STATE_DEFAULT, SCST_CONTEXT_DIRECT);
1389 /* !! At this point cmd can be already freed !! */
1392 TRACE_EXIT_RES(res);
1396 PRINT_ERROR("Invalid exec_reply parameters (LUN %lld, op %x, cmd %p)",
1397 (long long unsigned int)cmd->lun, cmd->cdb[0], cmd);
1398 PRINT_BUFFER("Invalid exec_reply", reply, sizeof(*reply));
1404 if (ucmd->background_exec) {
1408 scst_set_cmd_error(cmd,
1409 SCST_LOAD_SENSE(scst_sense_hardw_error));
1418 static int dev_user_process_reply(struct scst_user_dev *dev,
1419 struct scst_user_reply_cmd *reply)
1422 struct scst_user_cmd *ucmd;
1427 spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
1429 ucmd = __ucmd_find_hash(dev, reply->cmd_h);
1430 if (unlikely(ucmd == NULL)) {
1431 TRACE_MGMT_DBG("cmd_h %d not found", reply->cmd_h);
1436 if (unlikely(ucmd_get_check(ucmd))) {
1437 TRACE_MGMT_DBG("Found being destroyed cmd_h %d", reply->cmd_h);
1442 /* To sync. with dev_user_process_reply_exec(). See comment there. */
1444 if (ucmd->background_exec) {
1445 state = UCMD_STATE_EXECING;
1446 goto unlock_process;
1449 if (unlikely(ucmd->this_state_unjammed)) {
1450 TRACE_MGMT_DBG("Reply on unjammed ucmd %p, ignoring",
1452 goto out_unlock_put;
1455 if (unlikely(!ucmd->sent_to_user)) {
1456 TRACE_MGMT_DBG("Ucmd %p isn't in the sent to user "
1457 "state %x", ucmd, ucmd->state);
1459 goto out_unlock_put;
1462 if (unlikely(reply->subcode != ucmd->user_cmd.subcode))
1463 goto out_wrong_state;
1465 if (unlikely(_IOC_NR(reply->subcode) != ucmd->state))
1466 goto out_wrong_state;
1468 state = ucmd->state;
1469 ucmd->sent_to_user = 0;
1472 spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1475 case UCMD_STATE_PARSING:
1476 res = dev_user_process_reply_parse(ucmd, reply);
1479 case UCMD_STATE_BUF_ALLOCING:
1480 res = dev_user_process_reply_alloc(ucmd, reply);
1483 case UCMD_STATE_EXECING:
1484 res = dev_user_process_reply_exec(ucmd, reply);
1487 case UCMD_STATE_ON_FREEING:
1488 res = dev_user_process_reply_on_free(ucmd);
1491 case UCMD_STATE_ON_CACHE_FREEING:
1492 res = dev_user_process_reply_on_cache_free(ucmd);
1495 case UCMD_STATE_TM_EXECING:
1496 res = dev_user_process_reply_tm_exec(ucmd, reply->result);
1499 case UCMD_STATE_ATTACH_SESS:
1500 case UCMD_STATE_DETACH_SESS:
1501 res = dev_user_process_reply_sess(ucmd, reply->result);
1513 TRACE_EXIT_RES(res);
1517 PRINT_ERROR("Command's %p subcode %x doesn't match internal "
1518 "command's state %x or reply->subcode (%x) != ucmd->subcode "
1519 "(%x)", ucmd, _IOC_NR(reply->subcode), ucmd->state,
1520 reply->subcode, ucmd->user_cmd.subcode);
1522 dev_user_unjam_cmd(ucmd, 0, NULL);
1525 spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1529 spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1533 static int dev_user_reply_cmd(struct file *file, void __user *arg)
1536 struct scst_user_dev *dev;
1537 struct scst_user_reply_cmd reply;
1541 mutex_lock(&dev_priv_mutex);
1542 dev = (struct scst_user_dev *)file->private_data;
1543 res = dev_user_check_reg(dev);
1544 if (unlikely(res != 0)) {
1545 mutex_unlock(&dev_priv_mutex);
1548 down_read(&dev->dev_rwsem);
1549 mutex_unlock(&dev_priv_mutex);
1551 res = copy_from_user(&reply, arg, sizeof(reply));
1552 if (unlikely(res < 0))
1555 TRACE_DBG("Reply for dev %s", dev->name);
1557 TRACE_BUFFER("Reply", &reply, sizeof(reply));
1559 res = dev_user_process_reply(dev, &reply);
1560 if (unlikely(res < 0))
1564 up_read(&dev->dev_rwsem);
1567 TRACE_EXIT_RES(res);
1571 static int dev_user_get_ext_cdb(struct file *file, void __user *arg)
1574 struct scst_user_dev *dev;
1575 struct scst_user_cmd *ucmd;
1576 struct scst_cmd *cmd;
1577 struct scst_user_get_ext_cdb get;
1581 mutex_lock(&dev_priv_mutex);
1582 dev = (struct scst_user_dev *)file->private_data;
1583 res = dev_user_check_reg(dev);
1584 if (unlikely(res != 0)) {
1585 mutex_unlock(&dev_priv_mutex);
1588 down_read(&dev->dev_rwsem);
1589 mutex_unlock(&dev_priv_mutex);
1591 res = copy_from_user(&get, arg, sizeof(get));
1592 if (unlikely(res < 0))
1595 TRACE_MGMT_DBG("Get ext cdb for dev %s", dev->name);
1597 TRACE_BUFFER("Get ext cdb", &get, sizeof(get));
1599 spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
1601 ucmd = __ucmd_find_hash(dev, get.cmd_h);
1602 if (unlikely(ucmd == NULL)) {
1603 TRACE_MGMT_DBG("cmd_h %d not found", get.cmd_h);
1608 if (unlikely(ucmd_get_check(ucmd))) {
1609 TRACE_MGMT_DBG("Found being destroyed cmd_h %d", get.cmd_h);
1614 if ((ucmd->cmd != NULL) && (ucmd->state <= UCMD_STATE_EXECING) &&
1615 (ucmd->sent_to_user || ucmd->background_exec)) {
1619 TRACE_MGMT_DBG("Invalid ucmd state %d for cmd_h %d",
1620 ucmd->state, get.cmd_h);
1625 spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1630 if (cmd->ext_cdb == NULL)
1633 TRACE_BUFFER("EXT CDB", cmd->ext_cdb, cmd->ext_cdb_len);
1634 res = copy_to_user((void __user *)(unsigned long)get.ext_cdb_buffer,
1635 cmd->ext_cdb, cmd->ext_cdb_len);
1644 up_read(&dev->dev_rwsem);
1647 TRACE_EXIT_RES(res);
1651 spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1655 static int dev_user_process_scst_commands(struct scst_user_dev *dev)
1656 __releases(&dev->cmd_lists.cmd_list_lock)
1657 __acquires(&dev->cmd_lists.cmd_list_lock)
1663 while (!list_empty(&dev->cmd_lists.active_cmd_list)) {
1664 struct scst_cmd *cmd = list_entry(
1665 dev->cmd_lists.active_cmd_list.next, typeof(*cmd),
1667 TRACE_DBG("Deleting cmd %p from active cmd list", cmd);
1668 list_del(&cmd->cmd_list_entry);
1669 spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1670 scst_process_active_cmd(cmd, false);
1671 spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
1675 TRACE_EXIT_RES(res);
1679 /* Called under cmd_lists.cmd_list_lock and IRQ off */
1680 static struct scst_user_cmd *__dev_user_get_next_cmd(struct list_head *cmd_list)
1681 __releases(&dev->cmd_lists.cmd_list_lock)
1682 __acquires(&dev->cmd_lists.cmd_list_lock)
1684 struct scst_user_cmd *u;
1688 if (!list_empty(cmd_list)) {
1689 u = list_entry(cmd_list->next, typeof(*u),
1690 ready_cmd_list_entry);
1692 TRACE_DBG("Found ready ucmd %p", u);
1693 list_del(&u->ready_cmd_list_entry);
1695 EXTRACHECKS_BUG_ON(u->this_state_unjammed);
1697 if (u->cmd != NULL) {
1698 if (u->state == UCMD_STATE_EXECING) {
1699 struct scst_user_dev *dev = u->dev;
1702 EXTRACHECKS_BUG_ON(u->jammed);
1704 spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1706 rc = scst_check_local_events(u->cmd);
1707 if (unlikely(rc != 0)) {
1708 u->cmd->scst_cmd_done(u->cmd,
1709 SCST_CMD_STATE_DEFAULT,
1710 SCST_CONTEXT_DIRECT);
1712 * !! At this point cmd & u can be !!
1713 * !! already freed !!
1716 &dev->cmd_lists.cmd_list_lock);
1720 spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
1721 } else if (unlikely(test_bit(SCST_CMD_ABORTED,
1722 &u->cmd->cmd_flags))) {
1724 case UCMD_STATE_PARSING:
1725 case UCMD_STATE_BUF_ALLOCING:
1726 TRACE_MGMT_DBG("Aborting ucmd %p", u);
1727 dev_user_unjam_cmd(u, 0, NULL);
1729 case UCMD_STATE_EXECING:
1730 EXTRACHECKS_BUG_ON(1);
1734 u->sent_to_user = 1;
1735 u->seen_by_user = 1;
1740 static inline int test_cmd_lists(struct scst_user_dev *dev)
1742 int res = !list_empty(&dev->cmd_lists.active_cmd_list) ||
1743 !list_empty(&dev->ready_cmd_list) ||
1744 !dev->blocking || dev->cleanup_done ||
1745 signal_pending(current);
1749 /* Called under cmd_lists.cmd_list_lock and IRQ off */
1750 static int dev_user_get_next_cmd(struct scst_user_dev *dev,
1751 struct scst_user_cmd **ucmd)
1758 init_waitqueue_entry(&wait, current);
1761 if (!test_cmd_lists(dev)) {
1762 add_wait_queue_exclusive(&dev->cmd_lists.cmd_list_waitQ,
1765 set_current_state(TASK_INTERRUPTIBLE);
1766 if (test_cmd_lists(dev))
1768 spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1770 spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
1772 set_current_state(TASK_RUNNING);
1773 remove_wait_queue(&dev->cmd_lists.cmd_list_waitQ,
1777 dev_user_process_scst_commands(dev);
1779 *ucmd = __dev_user_get_next_cmd(&dev->ready_cmd_list);
1783 if (!dev->blocking || dev->cleanup_done) {
1785 TRACE_DBG("No ready commands, returning %d", res);
1789 if (signal_pending(current)) {
1791 TRACE_DBG("Signal pending, returning %d", res);
1796 TRACE_EXIT_RES(res);
1800 static int dev_user_reply_get_cmd(struct file *file, void __user *arg)
1803 struct scst_user_dev *dev;
1804 struct scst_user_get_cmd *cmd;
1805 struct scst_user_reply_cmd *reply;
1806 struct scst_user_cmd *ucmd;
1811 mutex_lock(&dev_priv_mutex);
1812 dev = (struct scst_user_dev *)file->private_data;
1813 res = dev_user_check_reg(dev);
1814 if (unlikely(res != 0)) {
1815 mutex_unlock(&dev_priv_mutex);
1818 down_read(&dev->dev_rwsem);
1819 mutex_unlock(&dev_priv_mutex);
1821 /* get_user() can't be used with 64-bit values on x86_32 */
1822 res = copy_from_user(&ureply, (uint64_t __user *)
1823 &((struct scst_user_get_cmd __user *)arg)->preply,
1825 if (unlikely(res < 0))
1828 TRACE_DBG("ureply %lld (dev %s)", (long long unsigned int)ureply,
1831 cmd = kmem_cache_alloc(user_get_cmd_cachep, GFP_KERNEL);
1832 if (unlikely(cmd == NULL)) {
1838 unsigned long u = (unsigned long)ureply;
1839 reply = (struct scst_user_reply_cmd *)cmd;
1840 res = copy_from_user(reply, (void __user *)u, sizeof(*reply));
1841 if (unlikely(res < 0))
1844 TRACE_BUFFER("Reply", reply, sizeof(*reply));
1846 res = dev_user_process_reply(dev, reply);
1847 if (unlikely(res < 0))
1851 kmem_cache_free(user_get_cmd_cachep, cmd);
1853 spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
1855 res = dev_user_get_next_cmd(dev, &ucmd);
1859 * A misbehaving user space handler can make ucmd to get dead
1860 * immediately after we released the lock, which can lead to
1861 * copy of dead data to the user space, which can lead to a
1862 * leak of sensitive information.
1864 if (unlikely(ucmd_get_check(ucmd))) {
1865 /* Oops, this ucmd is already being destroyed. Retry. */
1868 spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1870 EXTRACHECKS_BUG_ON(ucmd->user_cmd_payload_len == 0);
1872 len = ucmd->user_cmd_payload_len;
1873 TRACE_DBG("ucmd %p (user_cmd %p), payload_len %d (len %d)",
1874 ucmd, &ucmd->user_cmd, ucmd->user_cmd_payload_len, len);
1875 TRACE_BUFFER("UCMD", &ucmd->user_cmd, len);
1876 res = copy_to_user(arg, &ucmd->user_cmd, len);
1877 if (unlikely(res != 0)) {
1878 /* Requeue ucmd back */
1879 TRACE_DBG("Copy to user failed (%d), requeuing ucmd %p "
1880 "back to head of ready cmd list", res, ucmd);
1881 spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
1882 list_add(&ucmd->ready_cmd_list_entry,
1883 &dev->ready_cmd_list);
1884 spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1886 #ifdef CONFIG_SCST_EXTRACHECKS
1888 ucmd->user_cmd_payload_len = 0;
1892 spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1895 up_read(&dev->dev_rwsem);
1898 TRACE_EXIT_RES(res);
1902 kmem_cache_free(user_get_cmd_cachep, cmd);
1906 static long dev_user_ioctl(struct file *file, unsigned int cmd,
1914 case SCST_USER_REPLY_AND_GET_CMD:
1915 TRACE_DBG("%s", "REPLY_AND_GET_CMD");
1916 res = dev_user_reply_get_cmd(file, (void __user *)arg);
1919 case SCST_USER_REPLY_CMD:
1920 TRACE_DBG("%s", "REPLY_CMD");
1921 res = dev_user_reply_cmd(file, (void __user *)arg);
1924 case SCST_USER_GET_EXTENDED_CDB:
1925 TRACE_DBG("%s", "GET_EXTENDED_CDB");
1926 res = dev_user_get_ext_cdb(file, (void __user *)arg);
1929 case SCST_USER_REGISTER_DEVICE:
1931 struct scst_user_dev_desc *dev_desc;
1932 TRACE_DBG("%s", "REGISTER_DEVICE");
1933 dev_desc = kmalloc(sizeof(*dev_desc), GFP_KERNEL);
1934 if (dev_desc == NULL) {
1938 res = copy_from_user(dev_desc, (void __user *)arg,
1944 TRACE_BUFFER("dev_desc", dev_desc, sizeof(*dev_desc));
1945 dev_desc->name[sizeof(dev_desc->name)-1] = '\0';
1946 res = dev_user_register_dev(file, dev_desc);
1951 case SCST_USER_UNREGISTER_DEVICE:
1952 TRACE_DBG("%s", "UNREGISTER_DEVICE");
1953 res = dev_user_unregister_dev(file);
1956 case SCST_USER_FLUSH_CACHE:
1957 TRACE_DBG("%s", "FLUSH_CACHE");
1958 res = dev_user_flush_cache(file);
1961 case SCST_USER_SET_OPTIONS:
1963 struct scst_user_opt opt;
1964 TRACE_DBG("%s", "SET_OPTIONS");
1965 res = copy_from_user(&opt, (void __user *)arg, sizeof(opt));
1968 TRACE_BUFFER("opt", &opt, sizeof(opt));
1969 res = dev_user_set_opt(file, &opt);
1973 case SCST_USER_GET_OPTIONS:
1974 TRACE_DBG("%s", "GET_OPTIONS");
1975 res = dev_user_get_opt(file, (void __user *)arg);
1978 case SCST_USER_DEVICE_CAPACITY_CHANGED:
1979 TRACE_DBG("%s", "CAPACITY_CHANGED");
1980 res = dev_user_capacity_changed(file);
1984 PRINT_ERROR("Invalid ioctl cmd %x", cmd);
1990 TRACE_EXIT_RES(res);
1994 static unsigned int dev_user_poll(struct file *file, poll_table *wait)
1997 struct scst_user_dev *dev;
2001 mutex_lock(&dev_priv_mutex);
2002 dev = (struct scst_user_dev *)file->private_data;
2003 res = dev_user_check_reg(dev);
2004 if (unlikely(res != 0)) {
2005 mutex_unlock(&dev_priv_mutex);
2008 down_read(&dev->dev_rwsem);
2009 mutex_unlock(&dev_priv_mutex);
2011 spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
2013 if (!list_empty(&dev->ready_cmd_list) ||
2014 !list_empty(&dev->cmd_lists.active_cmd_list)) {
2015 res |= POLLIN | POLLRDNORM;
2019 spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
2021 TRACE_DBG("Before poll_wait() (dev %s)", dev->name);
2022 poll_wait(file, &dev->cmd_lists.cmd_list_waitQ, wait);
2023 TRACE_DBG("After poll_wait() (dev %s)", dev->name);
2025 spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
2027 if (!list_empty(&dev->ready_cmd_list) ||
2028 !list_empty(&dev->cmd_lists.active_cmd_list)) {
2029 res |= POLLIN | POLLRDNORM;
2034 spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
2036 up_read(&dev->dev_rwsem);
2039 TRACE_EXIT_HRES(res);
2044 * Called under cmd_lists.cmd_list_lock, but can drop it inside, then reacquire.
2046 static void dev_user_unjam_cmd(struct scst_user_cmd *ucmd, int busy,
2047 unsigned long *flags)
2048 __releases(&dev->cmd_lists.cmd_list_lock)
2049 __acquires(&dev->cmd_lists.cmd_list_lock)
2051 int state = ucmd->state;
2052 struct scst_user_dev *dev = ucmd->dev;
2056 if (ucmd->this_state_unjammed)
2059 TRACE_MGMT_DBG("Unjamming ucmd %p (busy %d, state %x)", ucmd, busy,
2063 ucmd->this_state_unjammed = 1;
2064 ucmd->sent_to_user = 0;
2067 case UCMD_STATE_PARSING:
2068 case UCMD_STATE_BUF_ALLOCING:
2069 if (test_bit(SCST_CMD_ABORTED, &ucmd->cmd->cmd_flags))
2073 scst_set_busy(ucmd->cmd);
2075 scst_set_cmd_error(ucmd->cmd,
2076 SCST_LOAD_SENSE(scst_sense_hardw_error));
2078 scst_set_cmd_abnormal_done_state(ucmd->cmd);
2080 TRACE_MGMT_DBG("Adding ucmd %p to active list", ucmd);
2081 list_add(&ucmd->cmd->cmd_list_entry,
2082 &ucmd->cmd->cmd_lists->active_cmd_list);
2083 wake_up(&ucmd->dev->cmd_lists.cmd_list_waitQ);
2086 case UCMD_STATE_EXECING:
2088 spin_unlock_irqrestore(&dev->cmd_lists.cmd_list_lock,
2091 spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
2093 TRACE_MGMT_DBG("EXEC: unjamming ucmd %p", ucmd);
2095 if (test_bit(SCST_CMD_ABORTED, &ucmd->cmd->cmd_flags))
2099 scst_set_busy(ucmd->cmd);
2101 scst_set_cmd_error(ucmd->cmd,
2102 SCST_LOAD_SENSE(scst_sense_hardw_error));
2105 ucmd->cmd->scst_cmd_done(ucmd->cmd, SCST_CMD_STATE_DEFAULT,
2106 SCST_CONTEXT_DIRECT);
2107 /* !! At this point cmd and ucmd can be already freed !! */
2110 spin_lock_irqsave(&dev->cmd_lists.cmd_list_lock,
2113 spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
2116 case UCMD_STATE_ON_FREEING:
2117 case UCMD_STATE_ON_CACHE_FREEING:
2118 case UCMD_STATE_TM_EXECING:
2119 case UCMD_STATE_ATTACH_SESS:
2120 case UCMD_STATE_DETACH_SESS:
2123 spin_unlock_irqrestore(&dev->cmd_lists.cmd_list_lock,
2126 spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
2129 case UCMD_STATE_ON_FREEING:
2130 dev_user_process_reply_on_free(ucmd);
2133 case UCMD_STATE_ON_CACHE_FREEING:
2134 dev_user_process_reply_on_cache_free(ucmd);
2137 case UCMD_STATE_TM_EXECING:
2138 dev_user_process_reply_tm_exec(ucmd,
2139 SCST_MGMT_STATUS_FAILED);
2142 case UCMD_STATE_ATTACH_SESS:
2143 case UCMD_STATE_DETACH_SESS:
2144 dev_user_process_reply_sess(ucmd, -EFAULT);
2149 spin_lock_irqsave(&dev->cmd_lists.cmd_list_lock,
2152 spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
2157 PRINT_CRIT_ERROR("Wrong ucmd state %x", state);
2167 static void dev_user_unjam_dev(struct scst_user_dev *dev)
2168 __releases(&dev->cmd_lists.cmd_list_lock)
2169 __acquires(&dev->cmd_lists.cmd_list_lock)
2172 struct scst_user_cmd *ucmd;
2176 TRACE_MGMT_DBG("Unjamming dev %p", dev);
2178 spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
2181 for (i = 0; i < (int)ARRAY_SIZE(dev->ucmd_hash); i++) {
2182 struct list_head *head = &dev->ucmd_hash[i];
2184 list_for_each_entry(ucmd, head, hash_list_entry) {
2185 if (!ucmd->sent_to_user)
2188 if (ucmd_get_check(ucmd))
2191 TRACE_MGMT_DBG("ucmd %p, state %x, scst_cmd %p", ucmd,
2192 ucmd->state, ucmd->cmd);
2194 dev_user_unjam_cmd(ucmd, 0, NULL);
2196 spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
2198 spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
2204 if (dev_user_process_scst_commands(dev) != 0)
2207 spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
2213 static int dev_user_process_reply_tm_exec(struct scst_user_cmd *ucmd,
2220 TRACE_MGMT_DBG("TM reply (ucmd %p, fn %d, status %d)", ucmd,
2221 ucmd->user_cmd.tm_cmd.fn, status);
2223 if (status == SCST_MGMT_STATUS_TASK_NOT_EXIST) {
2225 * It is possible that user space seen TM cmd before cmd
2226 * to abort or will never see it at all, because it was
2227 * aborted on the way there. So, it is safe to return
2228 * success instead, because, if there is the TM cmd at this
2229 * point, then the cmd to abort apparrently does exist.
2231 status = SCST_MGMT_STATUS_SUCCESS;
2234 scst_async_mcmd_completed(ucmd->mcmd, status);
2238 TRACE_EXIT_RES(res);
2242 static void dev_user_abort_ready_commands(struct scst_user_dev *dev)
2244 struct scst_user_cmd *ucmd;
2245 unsigned long flags;
2249 spin_lock_irqsave(&dev->cmd_lists.cmd_list_lock, flags);
2251 list_for_each_entry(ucmd, &dev->ready_cmd_list, ready_cmd_list_entry) {
2252 if ((ucmd->cmd != NULL) && !ucmd->seen_by_user &&
2253 test_bit(SCST_CMD_ABORTED, &ucmd->cmd->cmd_flags)) {
2254 switch (ucmd->state) {
2255 case UCMD_STATE_PARSING:
2256 case UCMD_STATE_BUF_ALLOCING:
2257 case UCMD_STATE_EXECING:
2258 TRACE_MGMT_DBG("Aborting ready ucmd %p", ucmd);
2259 list_del(&ucmd->ready_cmd_list_entry);
2260 dev_user_unjam_cmd(ucmd, 0, &flags);
2266 spin_unlock_irqrestore(&dev->cmd_lists.cmd_list_lock, flags);
2272 static int dev_user_task_mgmt_fn(struct scst_mgmt_cmd *mcmd,
2273 struct scst_tgt_dev *tgt_dev)
2275 struct scst_user_cmd *ucmd;
2276 struct scst_user_dev *dev =
2277 (struct scst_user_dev *)tgt_dev->dev->dh_priv;
2278 struct scst_user_cmd *ucmd_to_abort = NULL;
2283 * In the used approach we don't do anything with hung devices, which
2284 * stopped responding and/or have stuck commands. We forcedly abort such
2285 * commands only if they not yet sent to the user space or if the device
2286 * is getting unloaded, e.g. if its handler program gets killed. This is
2287 * because it's pretty hard to distinguish between stuck and temporary
2288 * overloaded states of the device. There are several reasons for that:
2290 * 1. Some commands need a lot of time to complete (several hours),
2291 * so for an impatient user such command(s) will always look as
2294 * 2. If we forcedly abort, i.e. abort before it's actually completed
2295 * in the user space, just one command, we will have to put the whole
2296 * device offline until we are sure that no more previously aborted
2297 * commands will get executed. Otherwise, we might have a possibility
2298 * for data corruption, when aborted and reported as completed
2299 * command actually gets executed *after* new commands sent
2300 * after the force abort was done. Many journaling file systems and
2301 * databases use "provide required commands order via queue draining"
2302 * approach and not putting the whole device offline after the forced
2303 * abort will break it. This makes our decision, if a command stuck
2304 * or not, cost a lot.
2306 * So, we leave policy definition if a device stuck or not to
2307 * the user space and simply let all commands live until they are
2308 * completed or their devices get closed/killed. This approach is very
2309 * much OK, but can affect management commands, which need activity
2310 * suspending via scst_suspend_activity() function such as devices or
2311 * targets registration/removal. But during normal life such commands
2312 * should be rare. Plus, when possible, scst_suspend_activity() will
2313 * return after timeout EBUSY status to allow caller to not stuck
2316 * But, anyway, ToDo, we should reimplement that in the SCST core, so
2317 * stuck commands would affect only related devices.
2320 dev_user_abort_ready_commands(dev);
2322 /* We can't afford missing TM command due to memory shortage */
2323 ucmd = dev_user_alloc_ucmd(dev, GFP_KERNEL|__GFP_NOFAIL);
2325 ucmd->user_cmd_payload_len =
2326 offsetof(struct scst_user_get_cmd, tm_cmd) +
2327 sizeof(ucmd->user_cmd.tm_cmd);
2328 ucmd->user_cmd.cmd_h = ucmd->h;
2329 ucmd->user_cmd.subcode = SCST_USER_TASK_MGMT;
2330 ucmd->user_cmd.tm_cmd.sess_h = (unsigned long)tgt_dev;
2331 ucmd->user_cmd.tm_cmd.fn = mcmd->fn;
2332 ucmd->user_cmd.tm_cmd.cmd_sn = mcmd->cmd_sn;
2333 ucmd->user_cmd.tm_cmd.cmd_sn_set = mcmd->cmd_sn_set;
2335 if (mcmd->cmd_to_abort != NULL) {
2337 (struct scst_user_cmd *)mcmd->cmd_to_abort->dh_priv;
2338 if (ucmd_to_abort != NULL)
2339 ucmd->user_cmd.tm_cmd.cmd_h_to_abort = ucmd_to_abort->h;
2342 TRACE_MGMT_DBG("Preparing TM ucmd %p (h %d, fn %d, cmd_to_abort %p, "
2343 "ucmd_to_abort %p, cmd_h_to_abort %d, mcmd %p)", ucmd, ucmd->h,
2344 mcmd->fn, mcmd->cmd_to_abort, ucmd_to_abort,
2345 ucmd->user_cmd.tm_cmd.cmd_h_to_abort, mcmd);
2348 ucmd->state = UCMD_STATE_TM_EXECING;
2350 scst_prepare_async_mcmd(mcmd);
2352 dev_user_add_to_ready(ucmd);
2355 return SCST_DEV_TM_NOT_COMPLETED;
2358 static int dev_user_attach(struct scst_device *sdev)
2361 struct scst_user_dev *dev = NULL, *d;
2365 spin_lock(&dev_list_lock);
2366 list_for_each_entry(d, &dev_list, dev_list_entry) {
2367 if (strcmp(d->name, sdev->virt_name) == 0) {
2372 spin_unlock(&dev_list_lock);
2374 PRINT_ERROR("Device %s not found", sdev->virt_name);
2379 sdev->p_cmd_lists = &dev->cmd_lists;
2380 sdev->dh_priv = dev;
2381 sdev->tst = dev->tst;
2382 sdev->queue_alg = dev->queue_alg;
2383 sdev->swp = dev->swp;
2384 sdev->tas = dev->tas;
2385 sdev->d_sense = dev->d_sense;
2386 sdev->has_own_order_mgmt = dev->has_own_order_mgmt;
2390 PRINT_INFO("Attached user space SCSI target virtual device \"%s\"",
2398 static void dev_user_detach(struct scst_device *sdev)
2400 struct scst_user_dev *dev = (struct scst_user_dev *)sdev->dh_priv;
2404 TRACE_DBG("virt_id %d", sdev->virt_id);
2406 PRINT_INFO("Detached user space SCSI target virtual device \"%s\"",
2409 /* dev will be freed by the caller */
2410 sdev->dh_priv = NULL;
2417 static int dev_user_process_reply_sess(struct scst_user_cmd *ucmd, int status)
2420 unsigned long flags;
2424 TRACE_MGMT_DBG("ucmd %p, cmpl %p, status %d", ucmd, ucmd->cmpl, status);
2426 spin_lock_irqsave(&ucmd->dev->cmd_lists.cmd_list_lock, flags);
2428 if (ucmd->state == UCMD_STATE_ATTACH_SESS) {
2429 TRACE_MGMT_DBG("%s", "ATTACH_SESS finished");
2430 ucmd->result = status;
2431 } else if (ucmd->state == UCMD_STATE_DETACH_SESS) {
2432 TRACE_MGMT_DBG("%s", "DETACH_SESS finished");
2436 if (ucmd->cmpl != NULL)
2437 complete_all(ucmd->cmpl);
2439 spin_unlock_irqrestore(&ucmd->dev->cmd_lists.cmd_list_lock, flags);
2443 TRACE_EXIT_RES(res);
2447 static int dev_user_attach_tgt(struct scst_tgt_dev *tgt_dev)
2449 struct scst_user_dev *dev =
2450 (struct scst_user_dev *)tgt_dev->dev->dh_priv;
2452 struct scst_user_cmd *ucmd;
2453 DECLARE_COMPLETION_ONSTACK(cmpl);
2458 * We can't replace tgt_dev->pool, because it can be used to allocate
2459 * memory for SCST local commands, like REPORT LUNS, where there is no
2460 * corresponding ucmd. Otherwise we will crash in dev_user_alloc_sg().
2462 if (test_bit(SCST_TGT_DEV_CLUST_POOL, &tgt_dev->tgt_dev_flags))
2463 tgt_dev->dh_priv = dev->pool_clust;
2465 tgt_dev->dh_priv = dev->pool;
2467 ucmd = dev_user_alloc_ucmd(dev, GFP_KERNEL);
2473 ucmd->user_cmd_payload_len = offsetof(struct scst_user_get_cmd, sess) +
2474 sizeof(ucmd->user_cmd.sess);
2475 ucmd->user_cmd.cmd_h = ucmd->h;
2476 ucmd->user_cmd.subcode = SCST_USER_ATTACH_SESS;
2477 ucmd->user_cmd.sess.sess_h = (unsigned long)tgt_dev;
2478 ucmd->user_cmd.sess.lun = (uint64_t)tgt_dev->lun;
2479 ucmd->user_cmd.sess.threads_num = tgt_dev->sess->tgt->tgtt->threads_num;
2480 ucmd->user_cmd.sess.rd_only = tgt_dev->acg_dev->rd_only_flag;
2481 strncpy(ucmd->user_cmd.sess.initiator_name,
2482 tgt_dev->sess->initiator_name,
2483 sizeof(ucmd->user_cmd.sess.initiator_name)-1);
2484 ucmd->user_cmd.sess.initiator_name[
2485 sizeof(ucmd->user_cmd.sess.initiator_name)-1] = '\0';
2486 if (tgt_dev->sess->tgt->default_group_name != NULL) {
2487 strncpy(ucmd->user_cmd.sess.target_name,
2488 &tgt_dev->sess->tgt->default_group_name[sizeof(SCST_DEFAULT_ACG_NAME)],
2489 sizeof(ucmd->user_cmd.sess.target_name)-1);
2490 ucmd->user_cmd.sess.target_name[
2491 sizeof(ucmd->user_cmd.sess.target_name)-1] = '\0';
2494 TRACE_MGMT_DBG("Preparing ATTACH_SESS %p (h %d, sess_h %llx, LUN %llx, "
2495 "threads_num %d, rd_only_flag %d, initiator %s, target %s)",
2496 ucmd, ucmd->h, ucmd->user_cmd.sess.sess_h,
2497 ucmd->user_cmd.sess.lun, ucmd->user_cmd.sess.threads_num,
2498 ucmd->user_cmd.sess.rd_only, ucmd->user_cmd.sess.initiator_name,
2499 ucmd->user_cmd.sess.target_name);
2501 ucmd->state = UCMD_STATE_ATTACH_SESS;
2505 dev_user_add_to_ready(ucmd);
2507 rc = wait_for_completion_timeout(ucmd->cmpl, DEV_USER_ATTACH_TIMEOUT);
2511 PRINT_ERROR("%s", "ATTACH_SESS command timeout");
2515 sBUG_ON(irqs_disabled());
2517 spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
2519 spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
2524 TRACE_EXIT_RES(res);
2532 static void dev_user_detach_tgt(struct scst_tgt_dev *tgt_dev)
2534 struct scst_user_dev *dev =
2535 (struct scst_user_dev *)tgt_dev->dev->dh_priv;
2536 struct scst_user_cmd *ucmd;
2541 * We can't miss TM command due to memory shortage, because it might
2542 * lead to a memory leak in the user space handler.
2544 ucmd = dev_user_alloc_ucmd(dev, GFP_KERNEL|__GFP_NOFAIL);
2548 TRACE_MGMT_DBG("Preparing DETACH_SESS %p (h %d, sess_h %llx)", ucmd,
2549 ucmd->h, ucmd->user_cmd.sess.sess_h);
2551 ucmd->user_cmd_payload_len = offsetof(struct scst_user_get_cmd, sess) +
2552 sizeof(ucmd->user_cmd.sess);
2553 ucmd->user_cmd.cmd_h = ucmd->h;
2554 ucmd->user_cmd.subcode = SCST_USER_DETACH_SESS;
2555 ucmd->user_cmd.sess.sess_h = (unsigned long)tgt_dev;
2557 ucmd->state = UCMD_STATE_DETACH_SESS;
2559 dev_user_add_to_ready(ucmd);
2566 /* No locks are needed, but the activity must be suspended */
2567 static void dev_user_setup_functions(struct scst_user_dev *dev)
2571 dev->devtype.parse = dev_user_parse;
2572 dev->devtype.dev_done = NULL;
2574 if (dev->parse_type != SCST_USER_PARSE_CALL) {
2575 switch (dev->devtype.type) {
2577 dev->generic_parse = scst_sbc_generic_parse;
2578 dev->devtype.dev_done = dev_user_disk_done;
2582 dev->generic_parse = scst_tape_generic_parse;
2583 dev->devtype.dev_done = dev_user_tape_done;
2587 dev->generic_parse = scst_modisk_generic_parse;
2588 dev->devtype.dev_done = dev_user_disk_done;
2592 dev->generic_parse = scst_cdrom_generic_parse;
2593 dev->devtype.dev_done = dev_user_disk_done;
2596 case TYPE_MEDIUM_CHANGER:
2597 dev->generic_parse = scst_changer_generic_parse;
2600 case TYPE_PROCESSOR:
2601 dev->generic_parse = scst_processor_generic_parse;
2605 dev->generic_parse = scst_raid_generic_parse;
2609 PRINT_INFO("Unknown SCSI type %x, using PARSE_CALL "
2610 "for it", dev->devtype.type);
2611 dev->parse_type = SCST_USER_PARSE_CALL;
2615 dev->generic_parse = NULL;
2616 dev->devtype.dev_done = NULL;
2623 static int dev_user_check_version(const struct scst_user_dev_desc *dev_desc)
2625 char ver[sizeof(DEV_USER_VERSION)+1];
2628 res = copy_from_user(ver,
2629 (void __user *)(unsigned long)dev_desc->version_str,
2632 PRINT_ERROR("%s", "Unable to get version string");
2635 ver[sizeof(ver)-1] = '\0';
2637 if (strcmp(ver, DEV_USER_VERSION) != 0) {
2638 /* ->name already 0-terminated in dev_user_ioctl() */
2639 PRINT_ERROR("Incorrect version of user device %s (%s)",
2640 dev_desc->name, ver);
2649 static int dev_user_register_dev(struct file *file,
2650 const struct scst_user_dev_desc *dev_desc)
2652 int res = -ENOMEM, i;
2653 struct scst_user_dev *dev, *d;
2658 res = dev_user_check_version(dev_desc);
2662 switch (dev_desc->type) {
2666 if (dev_desc->block_size == 0) {
2667 PRINT_ERROR("Wrong block size %d",
2668 dev_desc->block_size);
2672 block = scst_calc_block_shift(dev_desc->block_size);
2679 block = dev_desc->block_size;
2683 if (!try_module_get(THIS_MODULE)) {
2684 PRINT_ERROR("%s", "Fail to get module");
2688 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
2692 init_rwsem(&dev->dev_rwsem);
2693 spin_lock_init(&dev->cmd_lists.cmd_list_lock);
2694 INIT_LIST_HEAD(&dev->cmd_lists.active_cmd_list);
2695 init_waitqueue_head(&dev->cmd_lists.cmd_list_waitQ);
2696 INIT_LIST_HEAD(&dev->ready_cmd_list);
2697 if (file->f_flags & O_NONBLOCK) {
2698 TRACE_DBG("%s", "Non-blocking operations");
2702 for (i = 0; i < (int)ARRAY_SIZE(dev->ucmd_hash); i++)
2703 INIT_LIST_HEAD(&dev->ucmd_hash[i]);
2705 strncpy(dev->name, dev_desc->name, sizeof(dev->name)-1);
2706 dev->name[sizeof(dev->name)-1] = '\0';
2708 scst_init_mem_lim(&dev->udev_mem_lim);
2710 dev->pool = sgv_pool_create(dev->name, sgv_no_clustering);
2711 if (dev->pool == NULL)
2713 sgv_pool_set_allocator(dev->pool, dev_user_alloc_pages,
2714 dev_user_free_sg_entries);
2716 scnprintf(dev->devtype.name, sizeof(dev->devtype.name), "%s-clust",
2718 dev->pool_clust = sgv_pool_create(dev->devtype.name,
2719 sgv_tail_clustering);
2720 if (dev->pool_clust == NULL)
2722 sgv_pool_set_allocator(dev->pool_clust, dev_user_alloc_pages,
2723 dev_user_free_sg_entries);
2725 scnprintf(dev->devtype.name, sizeof(dev->devtype.name), "dh-%s",
2727 dev->devtype.type = dev_desc->type;
2728 dev->devtype.threads_num = -1;
2729 dev->devtype.parse_atomic = 1;
2730 dev->devtype.exec_atomic = 0; /* no point to make it 1 */
2731 dev->devtype.dev_done_atomic = 1;
2732 dev->devtype.no_proc = 1;
2733 dev->devtype.attach = dev_user_attach;
2734 dev->devtype.detach = dev_user_detach;
2735 dev->devtype.attach_tgt = dev_user_attach_tgt;
2736 dev->devtype.detach_tgt = dev_user_detach_tgt;
2737 dev->devtype.exec = dev_user_exec;
2738 dev->devtype.on_free_cmd = dev_user_on_free_cmd;
2739 dev->devtype.task_mgmt_fn = dev_user_task_mgmt_fn;
2741 init_completion(&dev->cleanup_cmpl);
2743 dev->def_block = block;
2745 res = __dev_user_set_opt(dev, &dev_desc->opt);
2747 TRACE_MEM("dev %p, name %s", dev, dev->name);
2749 spin_lock(&dev_list_lock);
2751 list_for_each_entry(d, &dev_list, dev_list_entry) {
2752 if (strcmp(d->name, dev->name) == 0) {
2753 PRINT_ERROR("Device %s already exist",
2756 spin_unlock(&dev_list_lock);
2761 list_add_tail(&dev->dev_list_entry, &dev_list);
2763 spin_unlock(&dev_list_lock);
2768 res = scst_register_virtual_dev_driver(&dev->devtype);
2772 dev->virt_id = scst_register_virtual_device(&dev->devtype, dev->name);
2773 if (dev->virt_id < 0) {
2775 goto out_unreg_handler;
2778 mutex_lock(&dev_priv_mutex);
2779 if (file->private_data != NULL) {
2780 mutex_unlock(&dev_priv_mutex);
2781 PRINT_ERROR("%s", "Device already registered");
2785 file->private_data = dev;
2786 mutex_unlock(&dev_priv_mutex);
2789 TRACE_EXIT_RES(res);
2793 scst_unregister_virtual_device(dev->virt_id);
2796 scst_unregister_virtual_dev_driver(&dev->devtype);
2799 spin_lock(&dev_list_lock);
2800 list_del(&dev->dev_list_entry);
2801 spin_unlock(&dev_list_lock);
2804 sgv_pool_destroy(dev->pool_clust);
2807 sgv_pool_destroy(dev->pool);
2813 module_put(THIS_MODULE);
2817 static int dev_user_unregister_dev(struct file *file)
2820 struct scst_user_dev *dev;
2824 mutex_lock(&dev_priv_mutex);
2825 dev = (struct scst_user_dev *)file->private_data;
2826 res = dev_user_check_reg(dev);
2828 mutex_unlock(&dev_priv_mutex);
2831 down_read(&dev->dev_rwsem);
2832 mutex_unlock(&dev_priv_mutex);
2834 res = scst_suspend_activity(true);
2838 up_read(&dev->dev_rwsem);
2840 dev_user_release(NULL, file);
2842 scst_resume_activity();
2845 TRACE_EXIT_RES(res);
2849 up_read(&dev->dev_rwsem);
2853 static int dev_user_flush_cache(struct file *file)
2856 struct scst_user_dev *dev;
2860 mutex_lock(&dev_priv_mutex);
2861 dev = (struct scst_user_dev *)file->private_data;
2862 res = dev_user_check_reg(dev);
2864 mutex_unlock(&dev_priv_mutex);
2867 down_read(&dev->dev_rwsem);
2868 mutex_unlock(&dev_priv_mutex);
2870 res = scst_suspend_activity(true);
2874 sgv_pool_flush(dev->pool);
2875 sgv_pool_flush(dev->pool_clust);
2877 scst_resume_activity();
2880 up_read(&dev->dev_rwsem);
2883 TRACE_EXIT_RES(res);
2887 static int dev_user_capacity_changed(struct file *file)
2890 struct scst_user_dev *dev;
2894 mutex_lock(&dev_priv_mutex);
2895 dev = (struct scst_user_dev *)file->private_data;
2896 res = dev_user_check_reg(dev);
2898 mutex_unlock(&dev_priv_mutex);
2901 down_read(&dev->dev_rwsem);
2902 mutex_unlock(&dev_priv_mutex);
2904 scst_capacity_data_changed(dev->sdev);
2906 up_read(&dev->dev_rwsem);
2909 TRACE_EXIT_RES(res);
2914 static int __dev_user_set_opt(struct scst_user_dev *dev,
2915 const struct scst_user_opt *opt)
2921 TRACE_DBG("dev %s, parse_type %x, on_free_cmd_type %x, "
2922 "memory_reuse_type %x, partial_transfers_type %x, "
2923 "partial_len %d", dev->name, opt->parse_type,
2924 opt->on_free_cmd_type, opt->memory_reuse_type,
2925 opt->partial_transfers_type, opt->partial_len);
2927 if (opt->parse_type > SCST_USER_MAX_PARSE_OPT ||
2928 opt->on_free_cmd_type > SCST_USER_MAX_ON_FREE_CMD_OPT ||
2929 opt->memory_reuse_type > SCST_USER_MAX_MEM_REUSE_OPT ||
2930 opt->partial_transfers_type > SCST_USER_MAX_PARTIAL_TRANSFERS_OPT) {
2931 PRINT_ERROR("%s", "Invalid option");
2936 if (((opt->tst != SCST_CONTR_MODE_ONE_TASK_SET) &&
2937 (opt->tst != SCST_CONTR_MODE_SEP_TASK_SETS)) ||
2938 ((opt->queue_alg != SCST_CONTR_MODE_QUEUE_ALG_RESTRICTED_REORDER) &&
2939 (opt->queue_alg != SCST_CONTR_MODE_QUEUE_ALG_UNRESTRICTED_REORDER)) ||
2940 (opt->swp > 1) || (opt->tas > 1) || (opt->has_own_order_mgmt > 1) ||
2941 (opt->d_sense > 1)) {
2942 PRINT_ERROR("Invalid SCSI option (tst %x, queue_alg %x, swp %x,"
2943 " tas %x, d_sense %d, has_own_order_mgmt %x)", opt->tst,
2944 opt->queue_alg, opt->swp, opt->tas, opt->d_sense,
2945 opt->has_own_order_mgmt);
2950 dev->parse_type = opt->parse_type;
2951 dev->on_free_cmd_type = opt->on_free_cmd_type;
2952 dev->memory_reuse_type = opt->memory_reuse_type;
2953 dev->partial_transfers_type = opt->partial_transfers_type;
2954 dev->partial_len = opt->partial_len;
2956 dev->tst = opt->tst;
2957 dev->queue_alg = opt->queue_alg;
2958 dev->swp = opt->swp;
2959 dev->tas = opt->tas;
2960 dev->tst = opt->tst;
2961 dev->d_sense = opt->d_sense;
2962 dev->has_own_order_mgmt = opt->has_own_order_mgmt;
2963 if (dev->sdev != NULL) {
2964 dev->sdev->tst = opt->tst;
2965 dev->sdev->queue_alg = opt->queue_alg;
2966 dev->sdev->swp = opt->swp;
2967 dev->sdev->tas = opt->tas;
2968 dev->sdev->d_sense = opt->d_sense;
2969 dev->sdev->has_own_order_mgmt = opt->has_own_order_mgmt;
2972 dev_user_setup_functions(dev);
2975 TRACE_EXIT_RES(res);
2979 static int dev_user_set_opt(struct file *file, const struct scst_user_opt *opt)
2982 struct scst_user_dev *dev;
2986 mutex_lock(&dev_priv_mutex);
2987 dev = (struct scst_user_dev *)file->private_data;
2988 res = dev_user_check_reg(dev);
2990 mutex_unlock(&dev_priv_mutex);
2993 down_read(&dev->dev_rwsem);
2994 mutex_unlock(&dev_priv_mutex);
2996 res = scst_suspend_activity(true);
3000 res = __dev_user_set_opt(dev, opt);
3002 scst_resume_activity();
3004 up_read(&dev->dev_rwsem);
3007 TRACE_EXIT_RES(res);
3011 static int dev_user_get_opt(struct file *file, void __user *arg)
3014 struct scst_user_dev *dev;
3015 struct scst_user_opt opt;
3019 mutex_lock(&dev_priv_mutex);
3020 dev = (struct scst_user_dev *)file->private_data;
3021 res = dev_user_check_reg(dev);
3023 mutex_unlock(&dev_priv_mutex);
3026 down_read(&dev->dev_rwsem);
3027 mutex_unlock(&dev_priv_mutex);
3029 opt.parse_type = dev->parse_type;
3030 opt.on_free_cmd_type = dev->on_free_cmd_type;
3031 opt.memory_reuse_type = dev->memory_reuse_type;
3032 opt.partial_transfers_type = dev->partial_transfers_type;
3033 opt.partial_len = dev->partial_len;
3035 opt.queue_alg = dev->queue_alg;
3038 opt.d_sense = dev->d_sense;
3039 opt.has_own_order_mgmt = dev->has_own_order_mgmt;
3041 TRACE_DBG("dev %s, parse_type %x, on_free_cmd_type %x, "
3042 "memory_reuse_type %x, partial_transfers_type %x, "
3043 "partial_len %d", dev->name, opt.parse_type,
3044 opt.on_free_cmd_type, opt.memory_reuse_type,
3045 opt.partial_transfers_type, opt.partial_len);
3047 res = copy_to_user(arg, &opt, sizeof(opt));
3049 up_read(&dev->dev_rwsem);
3051 TRACE_EXIT_RES(res);
3055 static int dev_usr_parse(struct scst_cmd *cmd)
3058 return SCST_CMD_STATE_DEFAULT;
3061 /* Needed only for /proc support */
3062 #define USR_TYPE { \
3063 .name = DEV_USER_NAME, \
3065 .parse = dev_usr_parse, \
3068 static struct scst_dev_type dev_user_devtype = USR_TYPE;
3070 static int dev_user_release(struct inode *inode, struct file *file)
3073 struct scst_user_dev *dev;
3077 mutex_lock(&dev_priv_mutex);
3078 dev = (struct scst_user_dev *)file->private_data;
3080 mutex_unlock(&dev_priv_mutex);
3083 file->private_data = NULL;
3085 TRACE(TRACE_MGMT, "Releasing dev %s", dev->name);
3087 spin_lock(&dev_list_lock);
3088 list_del(&dev->dev_list_entry);
3089 spin_unlock(&dev_list_lock);
3092 wake_up_all(&dev->cmd_lists.cmd_list_waitQ);
3094 down_write(&dev->dev_rwsem);
3095 mutex_unlock(&dev_priv_mutex);
3097 spin_lock(&cleanup_lock);
3098 list_add_tail(&dev->cleanup_list_entry, &cleanup_list);
3099 spin_unlock(&cleanup_lock);
3101 wake_up(&cleanup_list_waitQ);
3103 scst_unregister_virtual_device(dev->virt_id);
3104 scst_unregister_virtual_dev_driver(&dev->devtype);
3106 sgv_pool_destroy(dev->pool_clust);
3107 sgv_pool_destroy(dev->pool);
3109 TRACE_DBG("Unregistering finished (dev %p)", dev);
3111 dev->cleanup_done = 1;
3113 wake_up(&cleanup_list_waitQ);
3114 wake_up(&dev->cmd_lists.cmd_list_waitQ);
3116 wait_for_completion(&dev->cleanup_cmpl);
3118 up_write(&dev->dev_rwsem); /* to make the debug check happy */
3120 TRACE_DBG("Releasing completed (dev %p)", dev);
3124 module_put(THIS_MODULE);
3127 TRACE_EXIT_RES(res);
3131 static int dev_user_process_cleanup(struct scst_user_dev *dev)
3133 struct scst_user_cmd *ucmd;
3138 sBUG_ON(dev->blocking);
3139 wake_up_all(&dev->cmd_lists.cmd_list_waitQ); /* just in case */
3142 TRACE_DBG("Cleanuping dev %p", dev);
3144 dev_user_unjam_dev(dev);
3146 spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
3148 rc = dev_user_get_next_cmd(dev, &ucmd);
3150 dev_user_unjam_cmd(ucmd, 1, NULL);
3152 spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
3154 if (rc == -EAGAIN) {
3155 if (dev->cleanup_done)
3158 TRACE_DBG("No more commands (dev %p)", dev);
3164 #ifdef CONFIG_SCST_EXTRACHECKS
3167 for (i = 0; i < (int)ARRAY_SIZE(dev->ucmd_hash); i++) {
3168 struct list_head *head = &dev->ucmd_hash[i];
3169 struct scst_user_cmd *ucmd2;
3171 list_for_each_entry(ucmd2, head, hash_list_entry) {
3172 PRINT_ERROR("Lost ucmd %p (state %x, ref %d)", ucmd2,
3173 ucmd2->state, atomic_read(&ucmd2->ucmd_ref));
3181 TRACE_DBG("Cleanuping done (dev %p)", dev);
3182 complete_all(&dev->cleanup_cmpl);
3186 TRACE_EXIT_RES(res);
3190 static inline int test_cleanup_list(void)
3192 int res = !list_empty(&cleanup_list) ||
3193 unlikely(kthread_should_stop());
3197 static int dev_user_cleanup_thread(void *arg)
3201 PRINT_INFO("Cleanup thread started, PID %d", current->pid);
3203 current->flags |= PF_NOFREEZE;
3205 spin_lock(&cleanup_lock);
3206 while (!kthread_should_stop()) {
3208 init_waitqueue_entry(&wait, current);
3210 if (!test_cleanup_list()) {
3211 add_wait_queue_exclusive(&cleanup_list_waitQ, &wait);
3213 set_current_state(TASK_INTERRUPTIBLE);
3214 if (test_cleanup_list())
3216 spin_unlock(&cleanup_lock);
3218 spin_lock(&cleanup_lock);
3220 set_current_state(TASK_RUNNING);
3221 remove_wait_queue(&cleanup_list_waitQ, &wait);
3225 * We have to poll devices, because commands can go from SCST
3226 * core on cmd_list_waitQ and we have no practical way to
3231 struct scst_user_dev *dev;
3234 while (!list_empty(&cleanup_list)) {
3237 dev = list_entry(cleanup_list.next,
3238 typeof(*dev), cleanup_list_entry);
3239 list_del(&dev->cleanup_list_entry);
3241 spin_unlock(&cleanup_lock);
3242 rc = dev_user_process_cleanup(dev);
3243 spin_lock(&cleanup_lock);
3246 list_add_tail(&dev->cleanup_list_entry,
3250 if (list_empty(&cl_devs))
3253 spin_unlock(&cleanup_lock);
3255 spin_lock(&cleanup_lock);
3257 while (!list_empty(&cl_devs)) {
3258 dev = list_entry(cl_devs.next, typeof(*dev),
3259 cleanup_list_entry);
3260 list_move_tail(&dev->cleanup_list_entry,
3265 spin_unlock(&cleanup_lock);
3268 * If kthread_should_stop() is true, we are guaranteed to be
3269 * on the module unload, so cleanup_list must be empty.
3271 sBUG_ON(!list_empty(&cleanup_list));
3273 PRINT_INFO("Cleanup thread PID %d finished", current->pid);
3279 static int __init init_scst_user(void)
3282 struct max_get_reply {
3284 struct scst_user_get_cmd g;
3285 struct scst_user_reply_cmd r;
3288 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 21)
3289 struct class_device *class_member;
3296 #if defined(CONFIG_HIGHMEM4G) || defined(CONFIG_HIGHMEM64G)
3297 PRINT_ERROR("%s", "HIGHMEM kernel configurations are not supported. "
3298 "Consider changing VMSPLIT option or use a 64-bit "
3299 "configuration instead. See README file for details.");
3304 user_cmd_cachep = KMEM_CACHE(scst_user_cmd, SCST_SLAB_FLAGS);
3305 if (user_cmd_cachep == NULL) {
3310 user_get_cmd_cachep = KMEM_CACHE(max_get_reply, SCST_SLAB_FLAGS);
3311 if (user_get_cmd_cachep == NULL) {
3316 dev_user_devtype.module = THIS_MODULE;
3318 res = scst_register_virtual_dev_driver(&dev_user_devtype);
3322 res = scst_dev_handler_build_std_proc(&dev_user_devtype);
3326 dev_user_sysfs_class = class_create(THIS_MODULE, DEV_USER_NAME);
3327 if (IS_ERR(dev_user_sysfs_class)) {
3328 PRINT_ERROR("%s", "Unable create sysfs class for SCST user "
3330 res = PTR_ERR(dev_user_sysfs_class);
3334 res = register_chrdev(DEV_USER_MAJOR, DEV_USER_NAME, &dev_user_fops);
3336 PRINT_ERROR("Unable to get major %d for SCSI tapes",
3341 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 21)
3342 class_member = class_device_create(dev_user_sysfs_class, NULL,
3343 MKDEV(DEV_USER_MAJOR, 0), NULL, DEV_USER_NAME);
3344 if (IS_ERR(class_member)) {
3345 res = PTR_ERR(class_member);
3349 dev = device_create(dev_user_sysfs_class, NULL,
3350 MKDEV(DEV_USER_MAJOR, 0),
3351 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
3361 cleanup_thread = kthread_run(dev_user_cleanup_thread, NULL,
3362 "scst_usr_cleanupd");
3363 if (IS_ERR(cleanup_thread)) {
3364 res = PTR_ERR(cleanup_thread);
3365 PRINT_ERROR("kthread_create() failed: %d", res);
3370 TRACE_EXIT_RES(res);
3374 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 21)
3375 class_device_destroy(dev_user_sysfs_class, MKDEV(DEV_USER_MAJOR, 0));
3377 device_destroy(dev_user_sysfs_class, MKDEV(DEV_USER_MAJOR, 0));
3381 unregister_chrdev(DEV_USER_MAJOR, DEV_USER_NAME);
3384 class_destroy(dev_user_sysfs_class);
3387 scst_dev_handler_destroy_std_proc(&dev_user_devtype);
3390 scst_unregister_dev_driver(&dev_user_devtype);
3393 kmem_cache_destroy(user_get_cmd_cachep);
3396 kmem_cache_destroy(user_cmd_cachep);
3400 static void __exit exit_scst_user(void)
3406 rc = kthread_stop(cleanup_thread);
3408 TRACE_MGMT_DBG("kthread_stop() failed: %d", rc);
3410 unregister_chrdev(DEV_USER_MAJOR, DEV_USER_NAME);
3411 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 21)
3412 class_device_destroy(dev_user_sysfs_class, MKDEV(DEV_USER_MAJOR, 0));
3414 device_destroy(dev_user_sysfs_class, MKDEV(DEV_USER_MAJOR, 0));
3416 class_destroy(dev_user_sysfs_class);
3418 scst_dev_handler_destroy_std_proc(&dev_user_devtype);
3419 scst_unregister_virtual_dev_driver(&dev_user_devtype);
3421 kmem_cache_destroy(user_get_cmd_cachep);
3422 kmem_cache_destroy(user_cmd_cachep);
3428 module_init(init_scst_user);
3429 module_exit(exit_scst_user);
3431 MODULE_AUTHOR("Vladislav Bolkhovitin");
3432 MODULE_LICENSE("GPL");
3433 MODULE_DESCRIPTION("Virtual user space device handler for SCST");
3434 MODULE_VERSION(SCST_VERSION_STRING);
3435 MODULE_ALIAS_CHARDEV_MAJOR(DEV_USER_MAJOR);