4 * Copyright (C) 2007 - 2009 Vladislav Bolkhovitin <vst@vlnb.net>
5 * Copyright (C) 2007 - 2009 ID7 Ltd.
7 * SCSI virtual user space device handler
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation, version 2
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
20 #include <linux/kthread.h>
21 #include <linux/delay.h>
22 #include <linux/poll.h>
23 #include <linux/stddef.h>
25 #define LOG_PREFIX DEV_USER_NAME
28 #include "scst_user.h"
29 #include "scst_dev_handler.h"
31 #if defined(CONFIG_HIGHMEM4G) || defined(CONFIG_HIGHMEM64G)
32 #warning "HIGHMEM kernel configurations are not supported by this module,\
33 because nowadays it doesn't worth the effort. Consider changing\
34 VMSPLIT option or use a 64-bit configuration instead. See README file\
38 #define DEV_USER_CMD_HASH_ORDER 6
39 #define DEV_USER_ATTACH_TIMEOUT (5*HZ)
41 struct scst_user_dev {
42 struct rw_semaphore dev_rwsem;
44 struct scst_cmd_lists cmd_lists;
46 /* Protected by cmd_lists.cmd_list_lock */
47 struct list_head ready_cmd_list;
49 /* Protected by dev_rwsem or don't need any protection */
50 unsigned int blocking:1;
51 unsigned int cleanup_done:1;
53 unsigned int queue_alg:4;
56 unsigned int d_sense:1;
57 unsigned int has_own_order_mgmt:1;
59 int (*generic_parse)(struct scst_cmd *cmd,
60 int (*get_block)(struct scst_cmd *cmd));
65 struct scst_mem_lim udev_mem_lim;
66 struct sgv_pool *pool;
67 struct sgv_pool *pool_clust;
70 uint8_t on_free_cmd_type;
71 uint8_t memory_reuse_type;
72 uint8_t partial_transfers_type;
75 struct scst_dev_type devtype;
77 /* Both protected by cmd_lists.cmd_list_lock */
78 unsigned int handle_counter;
79 struct list_head ucmd_hash[1 << DEV_USER_CMD_HASH_ORDER];
81 struct scst_device *sdev;
84 struct list_head dev_list_entry;
85 char name[SCST_MAX_NAME];
87 struct list_head cleanup_list_entry;
88 struct completion cleanup_cmpl;
91 /* Most fields are unprotected, since only one thread at time can access them */
92 struct scst_user_cmd {
94 struct scst_user_dev *dev;
98 unsigned int buff_cached:1;
99 unsigned int buf_dirty:1;
100 unsigned int background_exec:1;
101 unsigned int aborted:1;
103 struct scst_user_cmd *buf_ucmd;
107 int first_page_offset;
109 struct page **data_pages;
110 struct sgv_pool_obj *sgv;
113 * Special flags, which can be accessed asynchronously (hence "long").
114 * Protected by cmd_lists.cmd_list_lock.
116 unsigned long sent_to_user:1;
117 unsigned long jammed:1;
118 unsigned long this_state_unjammed:1;
119 unsigned long seen_by_user:1; /* here only as a small optimization */
123 struct list_head ready_cmd_list_entry;
126 struct list_head hash_list_entry;
128 int user_cmd_payload_len;
129 struct scst_user_get_cmd user_cmd;
131 /* cmpl used only by ATTACH_SESS, mcmd used only by TM */
133 struct completion *cmpl;
134 struct scst_mgmt_cmd *mcmd;
139 static struct scst_user_cmd *dev_user_alloc_ucmd(struct scst_user_dev *dev,
141 static void dev_user_free_ucmd(struct scst_user_cmd *ucmd);
143 static int dev_user_parse(struct scst_cmd *cmd);
144 static int dev_user_exec(struct scst_cmd *cmd);
145 static void dev_user_on_free_cmd(struct scst_cmd *cmd);
146 static int dev_user_task_mgmt_fn(struct scst_mgmt_cmd *mcmd,
147 struct scst_tgt_dev *tgt_dev);
149 static int dev_user_disk_done(struct scst_cmd *cmd);
150 static int dev_user_tape_done(struct scst_cmd *cmd);
152 static struct page *dev_user_alloc_pages(struct scatterlist *sg,
153 gfp_t gfp_mask, void *priv);
154 static void dev_user_free_sg_entries(struct scatterlist *sg, int sg_count,
157 static void dev_user_add_to_ready(struct scst_user_cmd *ucmd);
159 static void dev_user_unjam_cmd(struct scst_user_cmd *ucmd, int busy,
160 unsigned long *flags);
162 static int dev_user_process_reply_on_free(struct scst_user_cmd *ucmd);
163 static int dev_user_process_reply_tm_exec(struct scst_user_cmd *ucmd,
165 static int dev_user_process_reply_sess(struct scst_user_cmd *ucmd, int status);
166 static int dev_user_register_dev(struct file *file,
167 const struct scst_user_dev_desc *dev_desc);
168 static int dev_user_unregister_dev(struct file *file);
169 static int dev_user_flush_cache(struct file *file);
170 static int dev_user_capacity_changed(struct file *file);
171 static int dev_user_prealloc_buffer(struct file *file, void __user *arg);
172 static int __dev_user_set_opt(struct scst_user_dev *dev,
173 const struct scst_user_opt *opt);
174 static int dev_user_set_opt(struct file *file, const struct scst_user_opt *opt);
175 static int dev_user_get_opt(struct file *file, void __user *arg);
177 static unsigned int dev_user_poll(struct file *filp, poll_table *wait);
178 static long dev_user_ioctl(struct file *file, unsigned int cmd,
180 static int dev_user_release(struct inode *inode, struct file *file);
181 static int dev_user_exit_dev(struct scst_user_dev *dev);
183 #ifdef CONFIG_SCST_PROC
185 static int dev_user_read_proc(struct seq_file *seq,
186 struct scst_dev_type *dev_type);
188 #else /* CONFIG_SCST_PROC */
190 static ssize_t dev_user_sysfs_commands_show(struct kobject *kobj,
191 struct kobj_attribute *attr, char *buf);
193 static struct kobj_attribute dev_user_commands_attr =
194 __ATTR(commands, S_IRUGO, dev_user_sysfs_commands_show, NULL);
196 static const struct attribute *dev_user_dev_attrs[] = {
197 &dev_user_commands_attr.attr,
201 #endif /* CONFIG_SCST_PROC */
203 static int dev_usr_parse(struct scst_cmd *cmd);
207 static struct kmem_cache *user_cmd_cachep;
208 static struct kmem_cache *user_get_cmd_cachep;
210 static DEFINE_MUTEX(dev_priv_mutex);
212 static const struct file_operations dev_user_fops = {
213 .poll = dev_user_poll,
214 .unlocked_ioctl = dev_user_ioctl,
216 .compat_ioctl = dev_user_ioctl,
218 .release = dev_user_release,
221 static struct scst_dev_type dev_user_devtype = {
222 .name = DEV_USER_NAME,
224 .parse = dev_usr_parse,
225 #ifdef CONFIG_SCST_PROC
226 .read_proc = dev_user_read_proc,
228 #if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
229 .default_trace_flags = SCST_DEFAULT_DEV_LOG_FLAGS,
230 .trace_flags = &trace_flag,
234 static int dev_user_major;
236 static struct class *dev_user_sysfs_class;
238 static DEFINE_SPINLOCK(dev_list_lock);
239 static LIST_HEAD(dev_list);
241 static DEFINE_SPINLOCK(cleanup_lock);
242 static LIST_HEAD(cleanup_list);
243 static DECLARE_WAIT_QUEUE_HEAD(cleanup_list_waitQ);
244 static struct task_struct *cleanup_thread;
247 * Skip this command if result is not 0. Must be called under
248 * cmd_lists.cmd_list_lock and IRQ off.
250 static inline bool ucmd_get_check(struct scst_user_cmd *ucmd)
252 int r = atomic_inc_return(&ucmd->ucmd_ref);
254 if (unlikely(r == 1)) {
255 TRACE_DBG("ucmd %p is being destroyed", ucmd);
256 atomic_dec(&ucmd->ucmd_ref);
259 * Necessary code is serialized by cmd_list_lock in
263 TRACE_DBG("ucmd %p, new ref_cnt %d", ucmd,
264 atomic_read(&ucmd->ucmd_ref));
270 static inline void __ucmd_get(struct scst_user_cmd *ucmd, bool barrier)
272 TRACE_DBG("ucmd %p, ucmd_ref %d", ucmd, atomic_read(&ucmd->ucmd_ref));
273 atomic_inc(&ucmd->ucmd_ref);
275 smp_mb__after_atomic_inc();
278 static inline void ucmd_get_ordered(struct scst_user_cmd *ucmd)
280 __ucmd_get(ucmd, true);
283 static inline void ucmd_get(struct scst_user_cmd *ucmd)
285 __ucmd_get(ucmd, false);
288 /* Must not be called under cmd_list_lock!! */
289 static inline void ucmd_put(struct scst_user_cmd *ucmd)
291 TRACE_DBG("ucmd %p, ucmd_ref %d", ucmd, atomic_read(&ucmd->ucmd_ref));
293 EXTRACHECKS_BUG_ON(atomic_read(&ucmd->ucmd_ref) == 0);
295 if (atomic_dec_and_test(&ucmd->ucmd_ref))
296 dev_user_free_ucmd(ucmd);
299 static inline int calc_num_pg(unsigned long buf, int len)
301 len += buf & ~PAGE_MASK;
302 return (len >> PAGE_SHIFT) + ((len & ~PAGE_MASK) != 0);
305 static void __dev_user_not_reg(void)
307 TRACE(TRACE_MGMT_MINOR, "%s", "Device not registered");
311 static inline int dev_user_check_reg(struct scst_user_dev *dev)
314 __dev_user_not_reg();
320 static inline int scst_user_cmd_hashfn(int h)
322 return h & ((1 << DEV_USER_CMD_HASH_ORDER) - 1);
325 static inline struct scst_user_cmd *__ucmd_find_hash(struct scst_user_dev *dev,
328 struct list_head *head;
329 struct scst_user_cmd *ucmd;
331 head = &dev->ucmd_hash[scst_user_cmd_hashfn(h)];
332 list_for_each_entry(ucmd, head, hash_list_entry) {
334 TRACE_DBG("Found ucmd %p", ucmd);
341 static void cmd_insert_hash(struct scst_user_cmd *ucmd)
343 struct list_head *head;
344 struct scst_user_dev *dev = ucmd->dev;
345 struct scst_user_cmd *u;
348 spin_lock_irqsave(&dev->cmd_lists.cmd_list_lock, flags);
350 ucmd->h = dev->handle_counter++;
351 u = __ucmd_find_hash(dev, ucmd->h);
353 head = &dev->ucmd_hash[scst_user_cmd_hashfn(ucmd->h)];
354 list_add_tail(&ucmd->hash_list_entry, head);
355 spin_unlock_irqrestore(&dev->cmd_lists.cmd_list_lock, flags);
357 TRACE_DBG("Inserted ucmd %p, h=%d (dev %s)", ucmd, ucmd->h, dev->name);
361 static inline void cmd_remove_hash(struct scst_user_cmd *ucmd)
365 spin_lock_irqsave(&ucmd->dev->cmd_lists.cmd_list_lock, flags);
366 list_del(&ucmd->hash_list_entry);
367 spin_unlock_irqrestore(&ucmd->dev->cmd_lists.cmd_list_lock, flags);
369 TRACE_DBG("Removed ucmd %p, h=%d", ucmd, ucmd->h);
373 static void dev_user_free_ucmd(struct scst_user_cmd *ucmd)
377 TRACE_MEM("Freeing ucmd %p", ucmd);
379 cmd_remove_hash(ucmd);
380 EXTRACHECKS_BUG_ON(ucmd->cmd != NULL);
382 kmem_cache_free(user_cmd_cachep, ucmd);
388 static struct page *dev_user_alloc_pages(struct scatterlist *sg,
389 gfp_t gfp_mask, void *priv)
391 struct scst_user_cmd *ucmd = (struct scst_user_cmd *)priv;
396 /* *sg supposed to be zeroed */
398 TRACE_MEM("ucmd %p, ubuff %lx, ucmd->cur_data_page %d", ucmd,
399 ucmd->ubuff, ucmd->cur_data_page);
401 if (ucmd->cur_data_page == 0) {
402 TRACE_MEM("ucmd->first_page_offset %d",
403 ucmd->first_page_offset);
404 offset = ucmd->first_page_offset;
408 if (ucmd->cur_data_page >= ucmd->num_data_pages)
411 sg_set_page(sg, ucmd->data_pages[ucmd->cur_data_page],
412 PAGE_SIZE - offset, offset);
413 ucmd->cur_data_page++;
415 TRACE_MEM("page=%p, length=%d, offset=%d", sg_page(sg), sg->length,
417 TRACE_BUFFER("Page data", sg_virt(sg), sg->length);
424 static void dev_user_on_cached_mem_free(struct scst_user_cmd *ucmd)
428 TRACE_MEM("Preparing ON_CACHED_MEM_FREE (ucmd %p, h %d, ubuff %lx)",
429 ucmd, ucmd->h, ucmd->ubuff);
431 ucmd->user_cmd_payload_len =
432 offsetof(struct scst_user_get_cmd, on_cached_mem_free) +
433 sizeof(ucmd->user_cmd.on_cached_mem_free);
434 ucmd->user_cmd.cmd_h = ucmd->h;
435 ucmd->user_cmd.subcode = SCST_USER_ON_CACHED_MEM_FREE;
436 ucmd->user_cmd.on_cached_mem_free.pbuf = ucmd->ubuff;
438 ucmd->state = UCMD_STATE_ON_CACHE_FREEING;
440 dev_user_add_to_ready(ucmd);
446 static void dev_user_unmap_buf(struct scst_user_cmd *ucmd)
452 TRACE_MEM("Unmapping data pages (ucmd %p, ubuff %lx, num %d)", ucmd,
453 ucmd->ubuff, ucmd->num_data_pages);
455 for (i = 0; i < ucmd->num_data_pages; i++) {
456 struct page *page = ucmd->data_pages[i];
461 page_cache_release(page);
464 kfree(ucmd->data_pages);
465 ucmd->data_pages = NULL;
471 static void __dev_user_free_sg_entries(struct scst_user_cmd *ucmd)
475 sBUG_ON(ucmd->data_pages == NULL);
477 TRACE_MEM("Freeing data pages (ucmd=%p, ubuff=%lx, buff_cached=%d)",
478 ucmd, ucmd->ubuff, ucmd->buff_cached);
480 dev_user_unmap_buf(ucmd);
482 if (ucmd->buff_cached)
483 dev_user_on_cached_mem_free(ucmd);
491 static void dev_user_free_sg_entries(struct scatterlist *sg, int sg_count,
494 struct scst_user_cmd *ucmd = (struct scst_user_cmd *)priv;
496 TRACE_MEM("Freeing data pages (sg=%p, sg_count=%d, priv %p)", sg,
499 __dev_user_free_sg_entries(ucmd);
504 static inline int is_buff_cached(struct scst_user_cmd *ucmd)
506 int mem_reuse_type = ucmd->dev->memory_reuse_type;
508 if ((mem_reuse_type == SCST_USER_MEM_REUSE_ALL) ||
509 ((ucmd->cmd->data_direction == SCST_DATA_READ) &&
510 (mem_reuse_type == SCST_USER_MEM_REUSE_READ)) ||
511 ((ucmd->cmd->data_direction == SCST_DATA_WRITE) &&
512 (mem_reuse_type == SCST_USER_MEM_REUSE_WRITE)))
518 static inline int is_need_offs_page(unsigned long buf, int len)
520 return ((buf & ~PAGE_MASK) != 0) &&
521 ((buf & PAGE_MASK) != ((buf+len-1) & PAGE_MASK));
525 * Returns 0 for success, <0 for fatal failure, >0 - need pages.
526 * Unmaps the buffer, if needed in case of error
528 static int dev_user_alloc_sg(struct scst_user_cmd *ucmd, int cached_buff)
531 struct scst_cmd *cmd = ucmd->cmd;
532 struct scst_user_dev *dev = ucmd->dev;
533 struct sgv_pool *pool;
536 int bufflen, orig_bufflen;
538 int out_sg_pages = 0;
542 gfp_mask = __GFP_NOWARN;
543 gfp_mask |= (scst_cmd_atomic(cmd) ? GFP_ATOMIC : GFP_KERNEL);
545 if (cmd->data_direction != SCST_DATA_BIDI) {
546 orig_bufflen = cmd->bufflen;
547 pool = (struct sgv_pool *)cmd->tgt_dev->dh_priv;
549 /* Make in_sg->offset 0 */
550 int len = cmd->bufflen + ucmd->first_page_offset;
551 out_sg_pages = (len >> PAGE_SHIFT) + ((len & ~PAGE_MASK) != 0);
552 orig_bufflen = (out_sg_pages << PAGE_SHIFT) + cmd->in_bufflen;
555 bufflen = orig_bufflen;
557 EXTRACHECKS_BUG_ON(bufflen == 0);
560 flags |= SGV_POOL_RETURN_OBJ_ON_ALLOC_FAIL;
561 if (ucmd->ubuff == 0)
562 flags |= SGV_POOL_NO_ALLOC_ON_CACHE_MISS;
564 TRACE_MEM("%s", "Not cached buff");
565 flags |= SGV_POOL_ALLOC_NO_CACHED;
566 if (ucmd->ubuff == 0) {
570 bufflen += ucmd->first_page_offset;
571 if (is_need_offs_page(ucmd->ubuff, orig_bufflen))
572 last_len = bufflen & ~PAGE_MASK;
574 last_len = orig_bufflen & ~PAGE_MASK;
576 last_len = PAGE_SIZE;
578 ucmd->buff_cached = cached_buff;
580 cmd->sg = sgv_pool_alloc(pool, bufflen, gfp_mask, flags, &cmd->sg_cnt,
581 &ucmd->sgv, &dev->udev_mem_lim, ucmd);
582 if (cmd->sg != NULL) {
583 struct scst_user_cmd *buf_ucmd =
584 (struct scst_user_cmd *)sgv_get_priv(ucmd->sgv);
586 TRACE_MEM("Buf ucmd %p (cmd->sg_cnt %d, last seg len %d, "
587 "last_len %d, bufflen %d)", buf_ucmd, cmd->sg_cnt,
588 cmd->sg[cmd->sg_cnt-1].length, last_len, bufflen);
590 ucmd->ubuff = buf_ucmd->ubuff;
591 ucmd->buf_ucmd = buf_ucmd;
593 EXTRACHECKS_BUG_ON((ucmd->data_pages != NULL) &&
597 cmd->sg[cmd->sg_cnt-1].length &= PAGE_MASK;
598 cmd->sg[cmd->sg_cnt-1].length += last_len;
601 TRACE_MEM("Buf alloced (ucmd %p, cached_buff %d, ubuff %lx, "
602 "last seg len %d)", ucmd, cached_buff, ucmd->ubuff,
603 cmd->sg[cmd->sg_cnt-1].length);
605 if (cmd->data_direction == SCST_DATA_BIDI) {
606 cmd->in_sg = &cmd->sg[out_sg_pages];
607 cmd->in_sg_cnt = cmd->sg_cnt - out_sg_pages;
608 cmd->sg_cnt = out_sg_pages;
609 TRACE_MEM("cmd %p, in_sg %p, in_sg_cnt %d, sg_cnt %d",
610 cmd, cmd->in_sg, cmd->in_sg_cnt, cmd->sg_cnt);
613 if (unlikely(cmd->sg_cnt > cmd->tgt_dev->max_sg_cnt)) {
616 PRINT_INFO("Unable to complete command due to "
617 "SG IO count limitation (requested %d, "
618 "available %d, tgt lim %d)",
619 cmd->sg_cnt, cmd->tgt_dev->max_sg_cnt,
620 cmd->tgt->sg_tablesize);
624 /* sgv will be freed in dev_user_free_sgv() */
628 TRACE_MEM("Buf not alloced (ucmd %p, h %d, buff_cached, %d, "
629 "sg_cnt %d, ubuff %lx, sgv %p", ucmd, ucmd->h,
630 ucmd->buff_cached, cmd->sg_cnt, ucmd->ubuff, ucmd->sgv);
631 if (unlikely(cmd->sg_cnt == 0)) {
632 TRACE_MEM("Refused allocation (ucmd %p)", ucmd);
633 sBUG_ON(ucmd->sgv != NULL);
636 switch (ucmd->state) {
637 case UCMD_STATE_BUF_ALLOCING:
640 case UCMD_STATE_EXECING:
655 static int dev_user_alloc_space(struct scst_user_cmd *ucmd)
657 int rc, res = SCST_CMD_STATE_DEFAULT;
658 struct scst_cmd *cmd = ucmd->cmd;
662 ucmd->state = UCMD_STATE_BUF_ALLOCING;
663 cmd->dh_data_buf_alloced = 1;
665 rc = dev_user_alloc_sg(ucmd, is_buff_cached(ucmd));
670 res = scst_get_cmd_abnormal_done_state(cmd);
674 if (!(cmd->data_direction & SCST_DATA_WRITE) &&
675 !scst_is_cmd_local(cmd)) {
676 TRACE_DBG("Delayed alloc, ucmd %p", ucmd);
680 ucmd->user_cmd_payload_len =
681 offsetof(struct scst_user_get_cmd, alloc_cmd) +
682 sizeof(ucmd->user_cmd.alloc_cmd);
683 ucmd->user_cmd.cmd_h = ucmd->h;
684 ucmd->user_cmd.subcode = SCST_USER_ALLOC_MEM;
685 ucmd->user_cmd.alloc_cmd.sess_h = (unsigned long)cmd->tgt_dev;
686 memcpy(ucmd->user_cmd.alloc_cmd.cdb, cmd->cdb, cmd->cdb_len);
687 ucmd->user_cmd.alloc_cmd.cdb_len = cmd->cdb_len;
688 ucmd->user_cmd.alloc_cmd.ext_cdb_len = cmd->ext_cdb_len;
689 ucmd->user_cmd.alloc_cmd.alloc_len = ucmd->buff_cached ?
690 (cmd->sg_cnt << PAGE_SHIFT) : cmd->bufflen;
691 ucmd->user_cmd.alloc_cmd.queue_type = cmd->queue_type;
692 ucmd->user_cmd.alloc_cmd.data_direction = cmd->data_direction;
693 ucmd->user_cmd.alloc_cmd.sn = cmd->tgt_sn;
695 dev_user_add_to_ready(ucmd);
697 res = SCST_CMD_STATE_STOP;
704 static struct scst_user_cmd *dev_user_alloc_ucmd(struct scst_user_dev *dev,
707 struct scst_user_cmd *ucmd = NULL;
711 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
712 ucmd = kmem_cache_alloc(user_cmd_cachep, gfp_mask);
714 memset(ucmd, 0, sizeof(*ucmd));
716 ucmd = kmem_cache_zalloc(user_cmd_cachep, gfp_mask);
718 if (unlikely(ucmd == NULL)) {
719 TRACE(TRACE_OUT_OF_MEM, "Unable to allocate "
720 "user cmd (gfp_mask %x)", gfp_mask);
724 atomic_set(&ucmd->ucmd_ref, 1);
726 cmd_insert_hash(ucmd);
728 TRACE_MEM("ucmd %p allocated", ucmd);
731 TRACE_EXIT_HRES((unsigned long)ucmd);
735 static int dev_user_get_block(struct scst_cmd *cmd)
737 struct scst_user_dev *dev = (struct scst_user_dev *)cmd->dev->dh_priv;
739 * No need for locks here, since *_detach() can not be
740 * called, when there are existing commands.
742 TRACE_EXIT_RES(dev->block);
746 static int dev_user_parse(struct scst_cmd *cmd)
748 int rc, res = SCST_CMD_STATE_DEFAULT;
749 struct scst_user_cmd *ucmd;
750 int atomic = scst_cmd_atomic(cmd);
751 struct scst_user_dev *dev = (struct scst_user_dev *)cmd->dev->dh_priv;
752 gfp_t gfp_mask = atomic ? GFP_ATOMIC : GFP_KERNEL;
756 if (cmd->dh_priv == NULL) {
757 ucmd = dev_user_alloc_ucmd(dev, gfp_mask);
758 if (unlikely(ucmd == NULL)) {
760 res = SCST_CMD_STATE_NEED_THREAD_CTX;
770 ucmd = (struct scst_user_cmd *)cmd->dh_priv;
771 TRACE_DBG("Used ucmd %p, state %x", ucmd, ucmd->state);
774 TRACE_DBG("ucmd %p, cmd %p, state %x", ucmd, cmd, ucmd->state);
776 if (ucmd->state != UCMD_STATE_NEW)
779 switch (dev->parse_type) {
780 case SCST_USER_PARSE_STANDARD:
781 TRACE_DBG("PARSE STANDARD: ucmd %p", ucmd);
782 rc = dev->generic_parse(cmd, dev_user_get_block);
783 if ((rc != 0) || (cmd->op_flags & SCST_INFO_NOT_FOUND))
787 case SCST_USER_PARSE_EXCEPTION:
788 TRACE_DBG("PARSE EXCEPTION: ucmd %p", ucmd);
789 rc = dev->generic_parse(cmd, dev_user_get_block);
790 if ((rc == 0) && (!(cmd->op_flags & SCST_INFO_NOT_FOUND)))
792 else if (rc == SCST_CMD_STATE_NEED_THREAD_CTX) {
793 TRACE_MEM("Restarting PARSE to thread context "
795 res = SCST_CMD_STATE_NEED_THREAD_CTX;
798 /* else go through */
800 case SCST_USER_PARSE_CALL:
801 TRACE_DBG("Preparing PARSE for user space (ucmd=%p, h=%d, "
802 "bufflen %d)", ucmd, ucmd->h, cmd->bufflen);
803 ucmd->user_cmd_payload_len =
804 offsetof(struct scst_user_get_cmd, parse_cmd) +
805 sizeof(ucmd->user_cmd.parse_cmd);
806 ucmd->user_cmd.cmd_h = ucmd->h;
807 ucmd->user_cmd.subcode = SCST_USER_PARSE;
808 ucmd->user_cmd.parse_cmd.sess_h = (unsigned long)cmd->tgt_dev;
809 memcpy(ucmd->user_cmd.parse_cmd.cdb, cmd->cdb, cmd->cdb_len);
810 ucmd->user_cmd.parse_cmd.cdb_len = cmd->cdb_len;
811 ucmd->user_cmd.parse_cmd.ext_cdb_len = cmd->ext_cdb_len;
812 ucmd->user_cmd.parse_cmd.timeout = cmd->timeout / HZ;
813 ucmd->user_cmd.parse_cmd.bufflen = cmd->bufflen;
814 ucmd->user_cmd.parse_cmd.in_bufflen = cmd->in_bufflen;
815 ucmd->user_cmd.parse_cmd.queue_type = cmd->queue_type;
816 ucmd->user_cmd.parse_cmd.data_direction = cmd->data_direction;
817 ucmd->user_cmd.parse_cmd.expected_values_set =
818 cmd->expected_values_set;
819 ucmd->user_cmd.parse_cmd.expected_data_direction =
820 cmd->expected_data_direction;
821 ucmd->user_cmd.parse_cmd.expected_transfer_len =
822 cmd->expected_transfer_len;
823 ucmd->user_cmd.parse_cmd.sn = cmd->tgt_sn;
824 ucmd->state = UCMD_STATE_PARSING;
825 dev_user_add_to_ready(ucmd);
826 res = SCST_CMD_STATE_STOP;
835 if (cmd->bufflen == 0) {
837 * According to SPC bufflen 0 for data transfer commands isn't
838 * an error, so we need to fix the transfer direction.
840 cmd->data_direction = SCST_DATA_NONE;
843 if (cmd->data_direction != SCST_DATA_NONE)
844 res = dev_user_alloc_space(ucmd);
851 PRINT_ERROR("PARSE failed (ucmd %p, rc %d)", ucmd, rc);
852 scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_invalid_opcode));
855 res = scst_get_cmd_abnormal_done_state(cmd);
859 static void dev_user_flush_dcache(struct scst_user_cmd *ucmd)
861 struct scst_user_cmd *buf_ucmd = ucmd->buf_ucmd;
862 unsigned long start = buf_ucmd->ubuff;
863 int i, bufflen = ucmd->cmd->bufflen;
871 * Possibly, flushing of all the pages from ucmd->cmd->sg can be
872 * faster, since it should be cache hot, while ucmd->buf_ucmd and
873 * buf_ucmd->data_pages are cache cold. But, from other side,
874 * sizeof(buf_ucmd->data_pages[0]) is considerably smaller, than
875 * sizeof(ucmd->cmd->sg[0]), so on big buffers going over
876 * data_pages array can lead to less cache misses. So, real numbers are
880 for (i = 0; (bufflen > 0) && (i < buf_ucmd->num_data_pages); i++) {
882 page = buf_ucmd->data_pages[i];
883 #ifdef ARCH_HAS_FLUSH_ANON_PAGE
884 struct vm_area_struct *vma = find_vma(current->mm, start);
886 flush_anon_page(vma, page, start);
888 flush_dcache_page(page);
890 bufflen -= PAGE_SIZE;
898 static int dev_user_exec(struct scst_cmd *cmd)
900 struct scst_user_cmd *ucmd = (struct scst_user_cmd *)cmd->dh_priv;
901 int res = SCST_EXEC_COMPLETED;
905 #if 0 /* We set exec_atomic in 0 to let SCST core know that we need a thread
906 * context to complete the necessary actions, but all we are going to
907 * do in this function is, in fact, atomic, so let's skip this check.
909 if (scst_cmd_atomic(cmd)) {
910 TRACE_DBG("%s", "User exec() can not be called in atomic "
911 "context, rescheduling to the thread");
912 res = SCST_EXEC_NEED_THREAD;
917 TRACE_DBG("Preparing EXEC for user space (ucmd=%p, h=%d, "
918 "bufflen %d, data_len %d, ubuff %lx)", ucmd, ucmd->h,
919 cmd->bufflen, cmd->data_len, ucmd->ubuff);
921 if (cmd->data_direction & SCST_DATA_WRITE)
922 dev_user_flush_dcache(ucmd);
924 BUILD_BUG_ON(sizeof(ucmd->user_cmd.exec_cmd.cdb) != sizeof(cmd->cdb));
926 ucmd->user_cmd_payload_len =
927 offsetof(struct scst_user_get_cmd, exec_cmd) +
928 sizeof(ucmd->user_cmd.exec_cmd);
929 ucmd->user_cmd.cmd_h = ucmd->h;
930 ucmd->user_cmd.subcode = SCST_USER_EXEC;
931 ucmd->user_cmd.exec_cmd.sess_h = (unsigned long)cmd->tgt_dev;
932 memcpy(ucmd->user_cmd.exec_cmd.cdb, cmd->cdb, cmd->cdb_len);
933 ucmd->user_cmd.exec_cmd.cdb_len = cmd->cdb_len;
934 ucmd->user_cmd.exec_cmd.ext_cdb_len = cmd->ext_cdb_len;
935 ucmd->user_cmd.exec_cmd.bufflen = cmd->bufflen;
936 ucmd->user_cmd.exec_cmd.data_len = cmd->data_len;
937 ucmd->user_cmd.exec_cmd.pbuf = ucmd->ubuff;
938 if ((ucmd->ubuff == 0) && (cmd->data_direction != SCST_DATA_NONE)) {
939 ucmd->user_cmd.exec_cmd.alloc_len = ucmd->buff_cached ?
940 (cmd->sg_cnt << PAGE_SHIFT) : cmd->bufflen;
942 ucmd->user_cmd.exec_cmd.queue_type = cmd->queue_type;
943 ucmd->user_cmd.exec_cmd.data_direction = cmd->data_direction;
944 ucmd->user_cmd.exec_cmd.partial = 0;
945 ucmd->user_cmd.exec_cmd.timeout = cmd->timeout / HZ;
946 ucmd->user_cmd.exec_cmd.p_in_buf = ucmd->ubuff +
947 (cmd->sg_cnt << PAGE_SHIFT);
948 ucmd->user_cmd.exec_cmd.in_bufflen = cmd->in_bufflen;
949 ucmd->user_cmd.exec_cmd.sn = cmd->tgt_sn;
951 ucmd->state = UCMD_STATE_EXECING;
953 dev_user_add_to_ready(ucmd);
959 static void dev_user_free_sgv(struct scst_user_cmd *ucmd)
961 if (ucmd->sgv != NULL) {
962 sgv_pool_free(ucmd->sgv, &ucmd->dev->udev_mem_lim);
964 } else if (ucmd->data_pages != NULL) {
965 /* We mapped pages, but for some reason didn't allocate them */
967 __dev_user_free_sg_entries(ucmd);
972 static void dev_user_on_free_cmd(struct scst_cmd *cmd)
974 struct scst_user_cmd *ucmd = (struct scst_user_cmd *)cmd->dh_priv;
978 if (unlikely(ucmd == NULL))
981 TRACE_MEM("ucmd %p, cmd %p, buff_cached %d, ubuff %lx", ucmd, ucmd->cmd,
982 ucmd->buff_cached, ucmd->ubuff);
985 if ((cmd->data_direction & SCST_DATA_WRITE) && ucmd->buf_ucmd != NULL)
986 ucmd->buf_ucmd->buf_dirty = 1;
988 if (ucmd->dev->on_free_cmd_type == SCST_USER_ON_FREE_CMD_IGNORE) {
989 ucmd->state = UCMD_STATE_ON_FREE_SKIPPED;
990 /* The state assignment must be before freeing sgv! */
994 if (unlikely(!ucmd->seen_by_user)) {
995 TRACE_MGMT_DBG("Not seen by user ucmd %p", ucmd);
999 ucmd->user_cmd_payload_len =
1000 offsetof(struct scst_user_get_cmd, on_free_cmd) +
1001 sizeof(ucmd->user_cmd.on_free_cmd);
1002 ucmd->user_cmd.cmd_h = ucmd->h;
1003 ucmd->user_cmd.subcode = SCST_USER_ON_FREE_CMD;
1004 ucmd->user_cmd.on_free_cmd.pbuf = ucmd->ubuff;
1005 ucmd->user_cmd.on_free_cmd.resp_data_len = cmd->resp_data_len;
1006 ucmd->user_cmd.on_free_cmd.buffer_cached = ucmd->buff_cached;
1007 ucmd->user_cmd.on_free_cmd.aborted = ucmd->aborted;
1008 ucmd->user_cmd.on_free_cmd.status = cmd->status;
1009 ucmd->user_cmd.on_free_cmd.delivery_status = cmd->delivery_status;
1011 ucmd->state = UCMD_STATE_ON_FREEING;
1013 dev_user_add_to_ready(ucmd);
1020 dev_user_process_reply_on_free(ucmd);
1024 static void dev_user_set_block(struct scst_cmd *cmd, int block)
1026 struct scst_user_dev *dev = (struct scst_user_dev *)cmd->dev->dh_priv;
1028 * No need for locks here, since *_detach() can not be
1029 * called, when there are existing commands.
1031 TRACE_DBG("dev %p, new block %d", dev, block);
1035 dev->block = dev->def_block;
1039 static int dev_user_disk_done(struct scst_cmd *cmd)
1041 int res = SCST_CMD_STATE_DEFAULT;
1045 res = scst_block_generic_dev_done(cmd, dev_user_set_block);
1047 TRACE_EXIT_RES(res);
1051 static int dev_user_tape_done(struct scst_cmd *cmd)
1053 int res = SCST_CMD_STATE_DEFAULT;
1057 res = scst_tape_generic_dev_done(cmd, dev_user_set_block);
1059 TRACE_EXIT_RES(res);
1063 static void dev_user_add_to_ready(struct scst_user_cmd *ucmd)
1065 struct scst_user_dev *dev = ucmd->dev;
1066 unsigned long flags;
1067 int do_wake = in_interrupt();
1072 do_wake |= ucmd->cmd->preprocessing_only;
1074 spin_lock_irqsave(&dev->cmd_lists.cmd_list_lock, flags);
1076 ucmd->this_state_unjammed = 0;
1078 if ((ucmd->state == UCMD_STATE_PARSING) ||
1079 (ucmd->state == UCMD_STATE_BUF_ALLOCING)) {
1081 * If we don't put such commands in the queue head, then under
1082 * high load we might delay threads, waiting for memory
1083 * allocations, for too long and start loosing NOPs, which
1084 * would lead to consider us by remote initiators as
1085 * unresponsive and stuck => broken connections, etc. If none
1086 * of our commands completed in NOP timeout to allow the head
1087 * commands to go, then we are really overloaded and/or stuck.
1089 TRACE_DBG("Adding ucmd %p (state %d) to head of ready "
1090 "cmd list", ucmd, ucmd->state);
1091 list_add(&ucmd->ready_cmd_list_entry,
1092 &dev->ready_cmd_list);
1093 } else if (unlikely(ucmd->state == UCMD_STATE_TM_EXECING) ||
1094 unlikely(ucmd->state == UCMD_STATE_ATTACH_SESS) ||
1095 unlikely(ucmd->state == UCMD_STATE_DETACH_SESS)) {
1096 TRACE_MGMT_DBG("Adding mgmt ucmd %p (state %d) to head of "
1097 "ready cmd list", ucmd, ucmd->state);
1098 list_add(&ucmd->ready_cmd_list_entry,
1099 &dev->ready_cmd_list);
1102 if ((ucmd->cmd != NULL) &&
1103 unlikely((ucmd->cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE))) {
1104 TRACE_DBG("Adding HQ ucmd %p to head of ready cmd list",
1106 list_add(&ucmd->ready_cmd_list_entry,
1107 &dev->ready_cmd_list);
1109 TRACE_DBG("Adding ucmd %p to ready cmd list", ucmd);
1110 list_add_tail(&ucmd->ready_cmd_list_entry,
1111 &dev->ready_cmd_list);
1113 do_wake |= ((ucmd->state == UCMD_STATE_ON_CACHE_FREEING) ||
1114 (ucmd->state == UCMD_STATE_ON_FREEING));
1118 TRACE_DBG("Waking up dev %p", dev);
1119 wake_up(&dev->cmd_lists.cmd_list_waitQ);
1122 spin_unlock_irqrestore(&dev->cmd_lists.cmd_list_lock, flags);
1128 static int dev_user_map_buf(struct scst_user_cmd *ucmd, unsigned long ubuff,
1133 struct task_struct *tsk = current;
1137 if (unlikely(ubuff == 0))
1140 sBUG_ON(ucmd->data_pages != NULL);
1142 ucmd->num_data_pages = num_pg;
1145 kmalloc(sizeof(*ucmd->data_pages) * ucmd->num_data_pages,
1147 if (ucmd->data_pages == NULL) {
1148 TRACE(TRACE_OUT_OF_MEM, "Unable to allocate data_pages array "
1149 "(num_data_pages=%d)", ucmd->num_data_pages);
1154 TRACE_MEM("Mapping buffer (ucmd %p, ubuff %lx, ucmd->num_data_pages %d,"
1155 " first_page_offset %d, len %d)", ucmd, ubuff,
1156 ucmd->num_data_pages, (int)(ubuff & ~PAGE_MASK),
1157 (ucmd->cmd != NULL) ? ucmd->cmd->bufflen : -1);
1159 down_read(&tsk->mm->mmap_sem);
1160 rc = get_user_pages(tsk, tsk->mm, ubuff, ucmd->num_data_pages,
1161 1/*writable*/, 0/*don't force*/, ucmd->data_pages, NULL);
1162 up_read(&tsk->mm->mmap_sem);
1164 /* get_user_pages() flushes dcache */
1166 if (rc < ucmd->num_data_pages)
1169 ucmd->ubuff = ubuff;
1170 ucmd->first_page_offset = (ubuff & ~PAGE_MASK);
1173 TRACE_EXIT_RES(res);
1177 if (ucmd->cmd != NULL)
1178 scst_set_busy(ucmd->cmd);
1182 if (ucmd->cmd != NULL)
1183 scst_set_cmd_abnormal_done_state(ucmd->cmd);
1187 PRINT_ERROR("Failed to get %d user pages (rc %d)",
1188 ucmd->num_data_pages, rc);
1190 for (i = 0; i < rc; i++)
1191 page_cache_release(ucmd->data_pages[i]);
1193 kfree(ucmd->data_pages);
1194 ucmd->data_pages = NULL;
1196 if (ucmd->cmd != NULL)
1197 scst_set_cmd_error(ucmd->cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
1201 static int dev_user_process_reply_alloc(struct scst_user_cmd *ucmd,
1202 struct scst_user_reply_cmd *reply)
1205 struct scst_cmd *cmd = ucmd->cmd;
1209 TRACE_DBG("ucmd %p, pbuf %llx", ucmd, reply->alloc_reply.pbuf);
1211 if (likely(reply->alloc_reply.pbuf != 0)) {
1213 if (ucmd->buff_cached) {
1214 if (unlikely((reply->alloc_reply.pbuf & ~PAGE_MASK) != 0)) {
1215 PRINT_ERROR("Supplied pbuf %llx isn't "
1217 reply->alloc_reply.pbuf);
1220 pages = cmd->sg_cnt;
1222 pages = calc_num_pg(reply->alloc_reply.pbuf,
1224 res = dev_user_map_buf(ucmd, reply->alloc_reply.pbuf, pages);
1226 scst_set_busy(ucmd->cmd);
1227 scst_set_cmd_abnormal_done_state(ucmd->cmd);
1231 scst_post_parse_process_active_cmd(cmd, false);
1233 TRACE_EXIT_RES(res);
1237 scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
1238 scst_set_cmd_abnormal_done_state(ucmd->cmd);
1243 static int dev_user_process_reply_parse(struct scst_user_cmd *ucmd,
1244 struct scst_user_reply_cmd *reply)
1247 struct scst_user_scsi_cmd_reply_parse *preply =
1248 &reply->parse_reply;
1249 struct scst_cmd *cmd = ucmd->cmd;
1253 if (unlikely(preply->queue_type > SCST_CMD_QUEUE_ACA))
1256 if (unlikely((preply->data_direction != SCST_DATA_WRITE) &&
1257 (preply->data_direction != SCST_DATA_READ) &&
1258 (preply->data_direction != SCST_DATA_BIDI) &&
1259 (preply->data_direction != SCST_DATA_NONE)))
1262 if (unlikely((preply->data_direction != SCST_DATA_NONE) &&
1263 (preply->bufflen == 0)))
1266 if (unlikely((preply->bufflen < 0) || (preply->data_len < 0)))
1269 TRACE_DBG("ucmd %p, queue_type %x, data_direction, %x, bufflen %d, "
1270 "data_len %d, pbuf %llx", ucmd, preply->queue_type,
1271 preply->data_direction, preply->bufflen, preply->data_len,
1272 reply->alloc_reply.pbuf);
1274 cmd->queue_type = preply->queue_type;
1275 cmd->data_direction = preply->data_direction;
1276 cmd->bufflen = preply->bufflen;
1277 cmd->data_len = preply->data_len;
1278 if (preply->write_medium)
1279 cmd->op_flags |= SCST_WRITE_MEDIUM;
1282 scst_post_parse_process_active_cmd(cmd, false);
1284 TRACE_EXIT_RES(res);
1288 PRINT_ERROR("Invalid parse_reply parameters (LUN %lld, op %x, cmd %p)",
1289 (long long unsigned int)cmd->lun, cmd->cdb[0], cmd);
1290 PRINT_BUFFER("Invalid parse_reply", reply, sizeof(*reply));
1291 scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
1292 scst_set_cmd_abnormal_done_state(cmd);
1297 static int dev_user_process_reply_on_free(struct scst_user_cmd *ucmd)
1303 TRACE_DBG("ON FREE ucmd %p", ucmd);
1305 dev_user_free_sgv(ucmd);
1308 TRACE_EXIT_RES(res);
1312 static int dev_user_process_reply_on_cache_free(struct scst_user_cmd *ucmd)
1318 TRACE_DBG("ON CACHE FREE ucmd %p", ucmd);
1322 TRACE_EXIT_RES(res);
1326 static int dev_user_process_reply_exec(struct scst_user_cmd *ucmd,
1327 struct scst_user_reply_cmd *reply)
1330 struct scst_user_scsi_cmd_reply_exec *ereply =
1332 struct scst_cmd *cmd = ucmd->cmd;
1336 if (ereply->reply_type == SCST_EXEC_REPLY_COMPLETED) {
1337 if (ucmd->background_exec) {
1338 TRACE_DBG("Background ucmd %p finished", ucmd);
1342 if (unlikely(ereply->resp_data_len > cmd->bufflen))
1344 if (unlikely((cmd->data_direction != SCST_DATA_READ) &&
1345 (ereply->resp_data_len != 0)))
1347 } else if (ereply->reply_type == SCST_EXEC_REPLY_BACKGROUND) {
1348 if (unlikely(ucmd->background_exec))
1350 if (unlikely((cmd->data_direction & SCST_DATA_READ) ||
1351 (cmd->resp_data_len != 0)))
1354 * background_exec assignment must be after ucmd get.
1355 * Otherwise, due to reorder, in dev_user_process_reply()
1356 * it is possible that ucmd is destroyed before it "got" here.
1358 ucmd_get_ordered(ucmd);
1359 ucmd->background_exec = 1;
1360 TRACE_DBG("Background ucmd %p", ucmd);
1365 TRACE_DBG("ucmd %p, status %d, resp_data_len %d", ucmd,
1366 ereply->status, ereply->resp_data_len);
1368 if (ereply->resp_data_len != 0) {
1369 if (ucmd->ubuff == 0) {
1371 if (unlikely(ereply->pbuf == 0))
1373 if (ucmd->buff_cached) {
1374 if (unlikely((ereply->pbuf & ~PAGE_MASK) != 0)) {
1375 PRINT_ERROR("Supplied pbuf %llx isn't "
1376 "page aligned", ereply->pbuf);
1379 pages = cmd->sg_cnt;
1381 pages = calc_num_pg(ereply->pbuf, cmd->bufflen);
1382 rc = dev_user_map_buf(ucmd, ereply->pbuf, pages);
1383 if ((rc != 0) || (ucmd->ubuff == 0))
1386 rc = dev_user_alloc_sg(ucmd, ucmd->buff_cached);
1387 if (unlikely(rc != 0))
1390 dev_user_flush_dcache(ucmd);
1391 cmd->may_need_dma_sync = 1;
1392 scst_set_resp_data_len(cmd, ereply->resp_data_len);
1393 } else if (cmd->resp_data_len != ereply->resp_data_len) {
1394 if (ucmd->ubuff == 0)
1395 cmd->resp_data_len = ereply->resp_data_len;
1397 scst_set_resp_data_len(cmd, ereply->resp_data_len);
1400 cmd->status = ereply->status;
1401 if (ereply->sense_len != 0) {
1404 res = scst_alloc_sense(cmd, 0);
1408 sense_len = min((int)cmd->sense_buflen, (int)ereply->sense_len);
1410 res = copy_from_user(cmd->sense,
1411 (void __user *)(unsigned long)ereply->psense_buffer,
1414 PRINT_ERROR("%s", "Unable to get sense data");
1415 goto out_hwerr_res_set;
1417 cmd->sense_valid_len = sense_len;
1422 cmd->scst_cmd_done(cmd, SCST_CMD_STATE_DEFAULT, SCST_CONTEXT_DIRECT);
1423 /* !! At this point cmd can be already freed !! */
1426 TRACE_EXIT_RES(res);
1430 PRINT_ERROR("Invalid exec_reply parameters (LUN %lld, op %x, cmd %p)",
1431 (long long unsigned int)cmd->lun, cmd->cdb[0], cmd);
1432 PRINT_BUFFER("Invalid exec_reply", reply, sizeof(*reply));
1438 if (ucmd->background_exec) {
1442 scst_set_cmd_error(cmd,
1443 SCST_LOAD_SENSE(scst_sense_hardw_error));
1452 static int dev_user_process_reply(struct scst_user_dev *dev,
1453 struct scst_user_reply_cmd *reply)
1456 struct scst_user_cmd *ucmd;
1461 spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
1463 ucmd = __ucmd_find_hash(dev, reply->cmd_h);
1464 if (unlikely(ucmd == NULL)) {
1465 TRACE_MGMT_DBG("cmd_h %d not found", reply->cmd_h);
1470 if (unlikely(ucmd_get_check(ucmd))) {
1471 TRACE_MGMT_DBG("Found being destroyed cmd_h %d", reply->cmd_h);
1476 /* To sync. with dev_user_process_reply_exec(). See comment there. */
1478 if (ucmd->background_exec) {
1479 state = UCMD_STATE_EXECING;
1480 goto unlock_process;
1483 if (unlikely(ucmd->this_state_unjammed)) {
1484 TRACE_MGMT_DBG("Reply on unjammed ucmd %p, ignoring",
1486 goto out_unlock_put;
1489 if (unlikely(!ucmd->sent_to_user)) {
1490 TRACE_MGMT_DBG("Ucmd %p isn't in the sent to user "
1491 "state %x", ucmd, ucmd->state);
1493 goto out_unlock_put;
1496 if (unlikely(reply->subcode != ucmd->user_cmd.subcode))
1497 goto out_wrong_state;
1499 if (unlikely(_IOC_NR(reply->subcode) != ucmd->state))
1500 goto out_wrong_state;
1502 state = ucmd->state;
1503 ucmd->sent_to_user = 0;
1506 spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1509 case UCMD_STATE_PARSING:
1510 res = dev_user_process_reply_parse(ucmd, reply);
1513 case UCMD_STATE_BUF_ALLOCING:
1514 res = dev_user_process_reply_alloc(ucmd, reply);
1517 case UCMD_STATE_EXECING:
1518 res = dev_user_process_reply_exec(ucmd, reply);
1521 case UCMD_STATE_ON_FREEING:
1522 res = dev_user_process_reply_on_free(ucmd);
1525 case UCMD_STATE_ON_CACHE_FREEING:
1526 res = dev_user_process_reply_on_cache_free(ucmd);
1529 case UCMD_STATE_TM_EXECING:
1530 res = dev_user_process_reply_tm_exec(ucmd, reply->result);
1533 case UCMD_STATE_ATTACH_SESS:
1534 case UCMD_STATE_DETACH_SESS:
1535 res = dev_user_process_reply_sess(ucmd, reply->result);
1547 TRACE_EXIT_RES(res);
1551 PRINT_ERROR("Command's %p subcode %x doesn't match internal "
1552 "command's state %x or reply->subcode (%x) != ucmd->subcode "
1553 "(%x)", ucmd, _IOC_NR(reply->subcode), ucmd->state,
1554 reply->subcode, ucmd->user_cmd.subcode);
1556 dev_user_unjam_cmd(ucmd, 0, NULL);
1559 spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1563 spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1567 static int dev_user_reply_cmd(struct file *file, void __user *arg)
1570 struct scst_user_dev *dev;
1571 struct scst_user_reply_cmd reply;
1575 mutex_lock(&dev_priv_mutex);
1576 dev = (struct scst_user_dev *)file->private_data;
1577 res = dev_user_check_reg(dev);
1578 if (unlikely(res != 0)) {
1579 mutex_unlock(&dev_priv_mutex);
1582 down_read(&dev->dev_rwsem);
1583 mutex_unlock(&dev_priv_mutex);
1585 res = copy_from_user(&reply, arg, sizeof(reply));
1586 if (unlikely(res < 0))
1589 TRACE_DBG("Reply for dev %s", dev->name);
1591 TRACE_BUFFER("Reply", &reply, sizeof(reply));
1593 res = dev_user_process_reply(dev, &reply);
1594 if (unlikely(res < 0))
1598 up_read(&dev->dev_rwsem);
1601 TRACE_EXIT_RES(res);
1605 static int dev_user_get_ext_cdb(struct file *file, void __user *arg)
1608 struct scst_user_dev *dev;
1609 struct scst_user_cmd *ucmd;
1610 struct scst_cmd *cmd = NULL;
1611 struct scst_user_get_ext_cdb get;
1615 mutex_lock(&dev_priv_mutex);
1616 dev = (struct scst_user_dev *)file->private_data;
1617 res = dev_user_check_reg(dev);
1618 if (unlikely(res != 0)) {
1619 mutex_unlock(&dev_priv_mutex);
1622 down_read(&dev->dev_rwsem);
1623 mutex_unlock(&dev_priv_mutex);
1625 res = copy_from_user(&get, arg, sizeof(get));
1626 if (unlikely(res < 0))
1629 TRACE_MGMT_DBG("Get ext cdb for dev %s", dev->name);
1631 TRACE_BUFFER("Get ext cdb", &get, sizeof(get));
1633 spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
1635 ucmd = __ucmd_find_hash(dev, get.cmd_h);
1636 if (unlikely(ucmd == NULL)) {
1637 TRACE_MGMT_DBG("cmd_h %d not found", get.cmd_h);
1642 if (unlikely(ucmd_get_check(ucmd))) {
1643 TRACE_MGMT_DBG("Found being destroyed cmd_h %d", get.cmd_h);
1648 if ((ucmd->cmd != NULL) && (ucmd->state <= UCMD_STATE_EXECING) &&
1649 (ucmd->sent_to_user || ucmd->background_exec)) {
1653 TRACE_MGMT_DBG("Invalid ucmd state %d for cmd_h %d",
1654 ucmd->state, get.cmd_h);
1659 spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1664 if (cmd->ext_cdb == NULL)
1667 TRACE_BUFFER("EXT CDB", cmd->ext_cdb, cmd->ext_cdb_len);
1668 res = copy_to_user((void __user *)(unsigned long)get.ext_cdb_buffer,
1669 cmd->ext_cdb, cmd->ext_cdb_len);
1678 up_read(&dev->dev_rwsem);
1681 TRACE_EXIT_RES(res);
1685 spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1689 static int dev_user_process_scst_commands(struct scst_user_dev *dev)
1690 __releases(&dev->cmd_lists.cmd_list_lock)
1691 __acquires(&dev->cmd_lists.cmd_list_lock)
1697 while (!list_empty(&dev->cmd_lists.active_cmd_list)) {
1698 struct scst_cmd *cmd = list_entry(
1699 dev->cmd_lists.active_cmd_list.next, typeof(*cmd),
1701 TRACE_DBG("Deleting cmd %p from active cmd list", cmd);
1702 list_del(&cmd->cmd_list_entry);
1703 spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1704 scst_process_active_cmd(cmd, false);
1705 spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
1709 TRACE_EXIT_RES(res);
1713 /* Called under cmd_lists.cmd_list_lock and IRQ off */
1714 static struct scst_user_cmd *__dev_user_get_next_cmd(struct list_head *cmd_list)
1715 __releases(&dev->cmd_lists.cmd_list_lock)
1716 __acquires(&dev->cmd_lists.cmd_list_lock)
1718 struct scst_user_cmd *u;
1722 if (!list_empty(cmd_list)) {
1723 u = list_entry(cmd_list->next, typeof(*u),
1724 ready_cmd_list_entry);
1726 TRACE_DBG("Found ready ucmd %p", u);
1727 list_del(&u->ready_cmd_list_entry);
1729 EXTRACHECKS_BUG_ON(u->this_state_unjammed);
1731 if (u->cmd != NULL) {
1732 if (u->state == UCMD_STATE_EXECING) {
1733 struct scst_user_dev *dev = u->dev;
1736 EXTRACHECKS_BUG_ON(u->jammed);
1738 spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1740 rc = scst_check_local_events(u->cmd);
1741 if (unlikely(rc != 0)) {
1742 u->cmd->scst_cmd_done(u->cmd,
1743 SCST_CMD_STATE_DEFAULT,
1744 SCST_CONTEXT_DIRECT);
1746 * !! At this point cmd & u can be !!
1747 * !! already freed !!
1750 &dev->cmd_lists.cmd_list_lock);
1754 spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
1755 } else if (unlikely(test_bit(SCST_CMD_ABORTED,
1756 &u->cmd->cmd_flags))) {
1758 case UCMD_STATE_PARSING:
1759 case UCMD_STATE_BUF_ALLOCING:
1760 TRACE_MGMT_DBG("Aborting ucmd %p", u);
1761 dev_user_unjam_cmd(u, 0, NULL);
1763 case UCMD_STATE_EXECING:
1764 EXTRACHECKS_BUG_ON(1);
1768 u->sent_to_user = 1;
1769 u->seen_by_user = 1;
1774 static inline int test_cmd_lists(struct scst_user_dev *dev)
1776 int res = !list_empty(&dev->cmd_lists.active_cmd_list) ||
1777 !list_empty(&dev->ready_cmd_list) ||
1778 !dev->blocking || dev->cleanup_done ||
1779 signal_pending(current);
1783 /* Called under cmd_lists.cmd_list_lock and IRQ off */
1784 static int dev_user_get_next_cmd(struct scst_user_dev *dev,
1785 struct scst_user_cmd **ucmd)
1792 init_waitqueue_entry(&wait, current);
1795 if (!test_cmd_lists(dev)) {
1796 add_wait_queue_exclusive_head(
1797 &dev->cmd_lists.cmd_list_waitQ,
1800 set_current_state(TASK_INTERRUPTIBLE);
1801 if (test_cmd_lists(dev))
1803 spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1805 spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
1807 set_current_state(TASK_RUNNING);
1808 remove_wait_queue(&dev->cmd_lists.cmd_list_waitQ,
1812 dev_user_process_scst_commands(dev);
1814 *ucmd = __dev_user_get_next_cmd(&dev->ready_cmd_list);
1818 if (!dev->blocking || dev->cleanup_done) {
1820 TRACE_DBG("No ready commands, returning %d", res);
1824 if (signal_pending(current)) {
1826 TRACE_DBG("Signal pending, returning %d", res);
1831 TRACE_EXIT_RES(res);
1835 static int dev_user_reply_get_cmd(struct file *file, void __user *arg)
1838 struct scst_user_dev *dev;
1839 struct scst_user_get_cmd *cmd;
1840 struct scst_user_reply_cmd *reply;
1841 struct scst_user_cmd *ucmd;
1846 mutex_lock(&dev_priv_mutex);
1847 dev = (struct scst_user_dev *)file->private_data;
1848 res = dev_user_check_reg(dev);
1849 if (unlikely(res != 0)) {
1850 mutex_unlock(&dev_priv_mutex);
1853 down_read(&dev->dev_rwsem);
1854 mutex_unlock(&dev_priv_mutex);
1856 /* get_user() can't be used with 64-bit values on x86_32 */
1857 res = copy_from_user(&ureply, (uint64_t __user *)
1858 &((struct scst_user_get_cmd __user *)arg)->preply,
1860 if (unlikely(res < 0))
1863 TRACE_DBG("ureply %lld (dev %s)", (long long unsigned int)ureply,
1866 cmd = kmem_cache_alloc(user_get_cmd_cachep, GFP_KERNEL);
1867 if (unlikely(cmd == NULL)) {
1873 unsigned long u = (unsigned long)ureply;
1874 reply = (struct scst_user_reply_cmd *)cmd;
1875 res = copy_from_user(reply, (void __user *)u, sizeof(*reply));
1876 if (unlikely(res < 0))
1879 TRACE_BUFFER("Reply", reply, sizeof(*reply));
1881 res = dev_user_process_reply(dev, reply);
1882 if (unlikely(res < 0))
1886 kmem_cache_free(user_get_cmd_cachep, cmd);
1888 spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
1890 res = dev_user_get_next_cmd(dev, &ucmd);
1894 * A misbehaving user space handler can make ucmd to get dead
1895 * immediately after we released the lock, which can lead to
1896 * copy of dead data to the user space, which can lead to a
1897 * leak of sensitive information.
1899 if (unlikely(ucmd_get_check(ucmd))) {
1900 /* Oops, this ucmd is already being destroyed. Retry. */
1903 spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1905 EXTRACHECKS_BUG_ON(ucmd->user_cmd_payload_len == 0);
1907 len = ucmd->user_cmd_payload_len;
1908 TRACE_DBG("ucmd %p (user_cmd %p), payload_len %d (len %d)",
1909 ucmd, &ucmd->user_cmd, ucmd->user_cmd_payload_len, len);
1910 TRACE_BUFFER("UCMD", &ucmd->user_cmd, len);
1911 res = copy_to_user(arg, &ucmd->user_cmd, len);
1912 if (unlikely(res != 0)) {
1913 /* Requeue ucmd back */
1914 TRACE_DBG("Copy to user failed (%d), requeuing ucmd %p "
1915 "back to head of ready cmd list", res, ucmd);
1916 spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
1917 list_add(&ucmd->ready_cmd_list_entry,
1918 &dev->ready_cmd_list);
1919 spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1921 #ifdef CONFIG_SCST_EXTRACHECKS
1923 ucmd->user_cmd_payload_len = 0;
1927 spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1930 up_read(&dev->dev_rwsem);
1933 TRACE_EXIT_RES(res);
1937 kmem_cache_free(user_get_cmd_cachep, cmd);
1941 static long dev_user_ioctl(struct file *file, unsigned int cmd,
1949 case SCST_USER_REPLY_AND_GET_CMD:
1950 TRACE_DBG("%s", "REPLY_AND_GET_CMD");
1951 res = dev_user_reply_get_cmd(file, (void __user *)arg);
1954 case SCST_USER_REPLY_CMD:
1955 TRACE_DBG("%s", "REPLY_CMD");
1956 res = dev_user_reply_cmd(file, (void __user *)arg);
1959 case SCST_USER_GET_EXTENDED_CDB:
1960 TRACE_DBG("%s", "GET_EXTENDED_CDB");
1961 res = dev_user_get_ext_cdb(file, (void __user *)arg);
1964 case SCST_USER_REGISTER_DEVICE:
1966 struct scst_user_dev_desc *dev_desc;
1967 TRACE_DBG("%s", "REGISTER_DEVICE");
1968 dev_desc = kmalloc(sizeof(*dev_desc), GFP_KERNEL);
1969 if (dev_desc == NULL) {
1973 res = copy_from_user(dev_desc, (void __user *)arg,
1979 TRACE_BUFFER("dev_desc", dev_desc, sizeof(*dev_desc));
1980 dev_desc->name[sizeof(dev_desc->name)-1] = '\0';
1981 dev_desc->sgv_name[sizeof(dev_desc->sgv_name)-1] = '\0';
1982 res = dev_user_register_dev(file, dev_desc);
1987 case SCST_USER_UNREGISTER_DEVICE:
1988 TRACE_DBG("%s", "UNREGISTER_DEVICE");
1989 res = dev_user_unregister_dev(file);
1992 case SCST_USER_FLUSH_CACHE:
1993 TRACE_DBG("%s", "FLUSH_CACHE");
1994 res = dev_user_flush_cache(file);
1997 case SCST_USER_SET_OPTIONS:
1999 struct scst_user_opt opt;
2000 TRACE_DBG("%s", "SET_OPTIONS");
2001 res = copy_from_user(&opt, (void __user *)arg, sizeof(opt));
2004 TRACE_BUFFER("opt", &opt, sizeof(opt));
2005 res = dev_user_set_opt(file, &opt);
2009 case SCST_USER_GET_OPTIONS:
2010 TRACE_DBG("%s", "GET_OPTIONS");
2011 res = dev_user_get_opt(file, (void __user *)arg);
2014 case SCST_USER_DEVICE_CAPACITY_CHANGED:
2015 TRACE_DBG("%s", "CAPACITY_CHANGED");
2016 res = dev_user_capacity_changed(file);
2019 case SCST_USER_PREALLOC_BUFFER:
2020 TRACE_DBG("%s", "PREALLOC_BUFFER");
2021 res = dev_user_prealloc_buffer(file, (void __user *)arg);
2025 PRINT_ERROR("Invalid ioctl cmd %x", cmd);
2031 TRACE_EXIT_RES(res);
2035 static unsigned int dev_user_poll(struct file *file, poll_table *wait)
2038 struct scst_user_dev *dev;
2042 mutex_lock(&dev_priv_mutex);
2043 dev = (struct scst_user_dev *)file->private_data;
2044 res = dev_user_check_reg(dev);
2045 if (unlikely(res != 0)) {
2046 mutex_unlock(&dev_priv_mutex);
2049 down_read(&dev->dev_rwsem);
2050 mutex_unlock(&dev_priv_mutex);
2052 spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
2054 if (!list_empty(&dev->ready_cmd_list) ||
2055 !list_empty(&dev->cmd_lists.active_cmd_list)) {
2056 res |= POLLIN | POLLRDNORM;
2060 spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
2062 TRACE_DBG("Before poll_wait() (dev %s)", dev->name);
2063 poll_wait(file, &dev->cmd_lists.cmd_list_waitQ, wait);
2064 TRACE_DBG("After poll_wait() (dev %s)", dev->name);
2066 spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
2068 if (!list_empty(&dev->ready_cmd_list) ||
2069 !list_empty(&dev->cmd_lists.active_cmd_list)) {
2070 res |= POLLIN | POLLRDNORM;
2075 spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
2077 up_read(&dev->dev_rwsem);
2080 TRACE_EXIT_HRES(res);
2085 * Called under cmd_lists.cmd_list_lock, but can drop it inside, then reacquire.
2087 static void dev_user_unjam_cmd(struct scst_user_cmd *ucmd, int busy,
2088 unsigned long *flags)
2089 __releases(&dev->cmd_lists.cmd_list_lock)
2090 __acquires(&dev->cmd_lists.cmd_list_lock)
2092 int state = ucmd->state;
2093 struct scst_user_dev *dev = ucmd->dev;
2097 if (ucmd->this_state_unjammed)
2100 TRACE_MGMT_DBG("Unjamming ucmd %p (busy %d, state %x)", ucmd, busy,
2104 ucmd->this_state_unjammed = 1;
2105 ucmd->sent_to_user = 0;
2108 case UCMD_STATE_PARSING:
2109 case UCMD_STATE_BUF_ALLOCING:
2110 if (test_bit(SCST_CMD_ABORTED, &ucmd->cmd->cmd_flags))
2114 scst_set_busy(ucmd->cmd);
2116 scst_set_cmd_error(ucmd->cmd,
2117 SCST_LOAD_SENSE(scst_sense_hardw_error));
2119 scst_set_cmd_abnormal_done_state(ucmd->cmd);
2121 TRACE_MGMT_DBG("Adding ucmd %p to active list", ucmd);
2122 list_add(&ucmd->cmd->cmd_list_entry,
2123 &ucmd->cmd->cmd_lists->active_cmd_list);
2124 wake_up(&ucmd->dev->cmd_lists.cmd_list_waitQ);
2127 case UCMD_STATE_EXECING:
2129 spin_unlock_irqrestore(&dev->cmd_lists.cmd_list_lock,
2132 spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
2134 TRACE_MGMT_DBG("EXEC: unjamming ucmd %p", ucmd);
2136 if (test_bit(SCST_CMD_ABORTED, &ucmd->cmd->cmd_flags))
2140 scst_set_busy(ucmd->cmd);
2142 scst_set_cmd_error(ucmd->cmd,
2143 SCST_LOAD_SENSE(scst_sense_hardw_error));
2146 ucmd->cmd->scst_cmd_done(ucmd->cmd, SCST_CMD_STATE_DEFAULT,
2147 SCST_CONTEXT_DIRECT);
2148 /* !! At this point cmd and ucmd can be already freed !! */
2151 spin_lock_irqsave(&dev->cmd_lists.cmd_list_lock,
2154 spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
2157 case UCMD_STATE_ON_FREEING:
2158 case UCMD_STATE_ON_CACHE_FREEING:
2159 case UCMD_STATE_TM_EXECING:
2160 case UCMD_STATE_ATTACH_SESS:
2161 case UCMD_STATE_DETACH_SESS:
2163 spin_unlock_irqrestore(&dev->cmd_lists.cmd_list_lock,
2166 spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
2169 case UCMD_STATE_ON_FREEING:
2170 dev_user_process_reply_on_free(ucmd);
2173 case UCMD_STATE_ON_CACHE_FREEING:
2174 dev_user_process_reply_on_cache_free(ucmd);
2177 case UCMD_STATE_TM_EXECING:
2178 dev_user_process_reply_tm_exec(ucmd,
2179 SCST_MGMT_STATUS_FAILED);
2182 case UCMD_STATE_ATTACH_SESS:
2183 case UCMD_STATE_DETACH_SESS:
2184 dev_user_process_reply_sess(ucmd, -EFAULT);
2189 spin_lock_irqsave(&dev->cmd_lists.cmd_list_lock,
2192 spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
2196 PRINT_CRIT_ERROR("Wrong ucmd state %x", state);
2206 static int dev_user_unjam_dev(struct scst_user_dev *dev)
2207 __releases(&dev->cmd_lists.cmd_list_lock)
2208 __acquires(&dev->cmd_lists.cmd_list_lock)
2211 struct scst_user_cmd *ucmd;
2215 TRACE_MGMT_DBG("Unjamming dev %p", dev);
2217 sgv_pool_flush(dev->pool);
2218 sgv_pool_flush(dev->pool_clust);
2220 spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
2223 for (i = 0; i < (int)ARRAY_SIZE(dev->ucmd_hash); i++) {
2224 struct list_head *head = &dev->ucmd_hash[i];
2226 list_for_each_entry(ucmd, head, hash_list_entry) {
2229 if (!ucmd->sent_to_user)
2232 if (ucmd_get_check(ucmd))
2235 TRACE_MGMT_DBG("ucmd %p, state %x, scst_cmd %p", ucmd,
2236 ucmd->state, ucmd->cmd);
2238 dev_user_unjam_cmd(ucmd, 0, NULL);
2240 spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
2242 spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
2248 if (dev_user_process_scst_commands(dev) != 0)
2251 spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
2253 TRACE_EXIT_RES(res);
2257 static int dev_user_process_reply_tm_exec(struct scst_user_cmd *ucmd,
2264 TRACE_MGMT_DBG("TM reply (ucmd %p, fn %d, status %d)", ucmd,
2265 ucmd->user_cmd.tm_cmd.fn, status);
2267 if (status == SCST_MGMT_STATUS_TASK_NOT_EXIST) {
2269 * It is possible that user space seen TM cmd before cmd
2270 * to abort or will never see it at all, because it was
2271 * aborted on the way there. So, it is safe to return
2272 * success instead, because, if there is the TM cmd at this
2273 * point, then the cmd to abort apparrently does exist.
2275 status = SCST_MGMT_STATUS_SUCCESS;
2278 scst_async_mcmd_completed(ucmd->mcmd, status);
2282 TRACE_EXIT_RES(res);
2286 static void dev_user_abort_ready_commands(struct scst_user_dev *dev)
2288 struct scst_user_cmd *ucmd;
2289 unsigned long flags;
2293 spin_lock_irqsave(&dev->cmd_lists.cmd_list_lock, flags);
2295 list_for_each_entry(ucmd, &dev->ready_cmd_list, ready_cmd_list_entry) {
2296 if ((ucmd->cmd != NULL) && !ucmd->seen_by_user &&
2297 test_bit(SCST_CMD_ABORTED, &ucmd->cmd->cmd_flags)) {
2298 switch (ucmd->state) {
2299 case UCMD_STATE_PARSING:
2300 case UCMD_STATE_BUF_ALLOCING:
2301 case UCMD_STATE_EXECING:
2302 TRACE_MGMT_DBG("Aborting ready ucmd %p", ucmd);
2303 list_del(&ucmd->ready_cmd_list_entry);
2304 dev_user_unjam_cmd(ucmd, 0, &flags);
2310 spin_unlock_irqrestore(&dev->cmd_lists.cmd_list_lock, flags);
2316 static int dev_user_task_mgmt_fn(struct scst_mgmt_cmd *mcmd,
2317 struct scst_tgt_dev *tgt_dev)
2319 struct scst_user_cmd *ucmd;
2320 struct scst_user_dev *dev =
2321 (struct scst_user_dev *)tgt_dev->dev->dh_priv;
2322 struct scst_user_cmd *ucmd_to_abort = NULL;
2327 * In the used approach we don't do anything with hung devices, which
2328 * stopped responding and/or have stuck commands. We forcedly abort such
2329 * commands only if they not yet sent to the user space or if the device
2330 * is getting unloaded, e.g. if its handler program gets killed. This is
2331 * because it's pretty hard to distinguish between stuck and temporary
2332 * overloaded states of the device. There are several reasons for that:
2334 * 1. Some commands need a lot of time to complete (several hours),
2335 * so for an impatient user such command(s) will always look as
2338 * 2. If we forcedly abort, i.e. abort before it's actually completed
2339 * in the user space, just one command, we will have to put the whole
2340 * device offline until we are sure that no more previously aborted
2341 * commands will get executed. Otherwise, we might have a possibility
2342 * for data corruption, when aborted and reported as completed
2343 * command actually gets executed *after* new commands sent
2344 * after the force abort was done. Many journaling file systems and
2345 * databases use "provide required commands order via queue draining"
2346 * approach and not putting the whole device offline after the forced
2347 * abort will break it. This makes our decision, if a command stuck
2348 * or not, cost a lot.
2350 * So, we leave policy definition if a device stuck or not to
2351 * the user space and simply let all commands live until they are
2352 * completed or their devices get closed/killed. This approach is very
2353 * much OK, but can affect management commands, which need activity
2354 * suspending via scst_suspend_activity() function such as devices or
2355 * targets registration/removal. But during normal life such commands
2356 * should be rare. Plus, when possible, scst_suspend_activity() will
2357 * return after timeout EBUSY status to allow caller to not stuck
2360 * But, anyway, ToDo, we should reimplement that in the SCST core, so
2361 * stuck commands would affect only related devices.
2364 dev_user_abort_ready_commands(dev);
2366 /* We can't afford missing TM command due to memory shortage */
2367 ucmd = dev_user_alloc_ucmd(dev, GFP_KERNEL|__GFP_NOFAIL);
2369 ucmd->user_cmd_payload_len =
2370 offsetof(struct scst_user_get_cmd, tm_cmd) +
2371 sizeof(ucmd->user_cmd.tm_cmd);
2372 ucmd->user_cmd.cmd_h = ucmd->h;
2373 ucmd->user_cmd.subcode = SCST_USER_TASK_MGMT;
2374 ucmd->user_cmd.tm_cmd.sess_h = (unsigned long)tgt_dev;
2375 ucmd->user_cmd.tm_cmd.fn = mcmd->fn;
2376 ucmd->user_cmd.tm_cmd.cmd_sn = mcmd->cmd_sn;
2377 ucmd->user_cmd.tm_cmd.cmd_sn_set = mcmd->cmd_sn_set;
2379 if (mcmd->cmd_to_abort != NULL) {
2381 (struct scst_user_cmd *)mcmd->cmd_to_abort->dh_priv;
2382 if (ucmd_to_abort != NULL)
2383 ucmd->user_cmd.tm_cmd.cmd_h_to_abort = ucmd_to_abort->h;
2386 TRACE_MGMT_DBG("Preparing TM ucmd %p (h %d, fn %d, cmd_to_abort %p, "
2387 "ucmd_to_abort %p, cmd_h_to_abort %d, mcmd %p)", ucmd, ucmd->h,
2388 mcmd->fn, mcmd->cmd_to_abort, ucmd_to_abort,
2389 ucmd->user_cmd.tm_cmd.cmd_h_to_abort, mcmd);
2392 ucmd->state = UCMD_STATE_TM_EXECING;
2394 scst_prepare_async_mcmd(mcmd);
2396 dev_user_add_to_ready(ucmd);
2399 return SCST_DEV_TM_NOT_COMPLETED;
2402 static int dev_user_attach(struct scst_device *sdev)
2405 struct scst_user_dev *dev = NULL, *d;
2409 spin_lock(&dev_list_lock);
2410 list_for_each_entry(d, &dev_list, dev_list_entry) {
2411 if (strcmp(d->name, sdev->virt_name) == 0) {
2416 spin_unlock(&dev_list_lock);
2418 PRINT_ERROR("Device %s not found", sdev->virt_name);
2423 sdev->p_cmd_lists = &dev->cmd_lists;
2424 sdev->dh_priv = dev;
2425 sdev->tst = dev->tst;
2426 sdev->queue_alg = dev->queue_alg;
2427 sdev->swp = dev->swp;
2428 sdev->tas = dev->tas;
2429 sdev->d_sense = dev->d_sense;
2430 sdev->has_own_order_mgmt = dev->has_own_order_mgmt;
2434 PRINT_INFO("Attached user space SCSI target virtual device \"%s\"",
2442 static void dev_user_detach(struct scst_device *sdev)
2444 struct scst_user_dev *dev = (struct scst_user_dev *)sdev->dh_priv;
2448 TRACE_DBG("virt_id %d", sdev->virt_id);
2450 PRINT_INFO("Detached user space SCSI target virtual device \"%s\"",
2453 /* dev will be freed by the caller */
2454 sdev->dh_priv = NULL;
2461 static int dev_user_process_reply_sess(struct scst_user_cmd *ucmd, int status)
2464 unsigned long flags;
2468 TRACE_MGMT_DBG("ucmd %p, cmpl %p, status %d", ucmd, ucmd->cmpl, status);
2470 spin_lock_irqsave(&ucmd->dev->cmd_lists.cmd_list_lock, flags);
2472 if (ucmd->state == UCMD_STATE_ATTACH_SESS) {
2473 TRACE_MGMT_DBG("%s", "ATTACH_SESS finished");
2474 ucmd->result = status;
2475 } else if (ucmd->state == UCMD_STATE_DETACH_SESS) {
2476 TRACE_MGMT_DBG("%s", "DETACH_SESS finished");
2480 if (ucmd->cmpl != NULL)
2481 complete_all(ucmd->cmpl);
2483 spin_unlock_irqrestore(&ucmd->dev->cmd_lists.cmd_list_lock, flags);
2487 TRACE_EXIT_RES(res);
2491 static int dev_user_attach_tgt(struct scst_tgt_dev *tgt_dev)
2493 struct scst_user_dev *dev =
2494 (struct scst_user_dev *)tgt_dev->dev->dh_priv;
2496 struct scst_user_cmd *ucmd;
2497 DECLARE_COMPLETION_ONSTACK(cmpl);
2502 * We can't replace tgt_dev->pool, because it can be used to allocate
2503 * memory for SCST local commands, like REPORT LUNS, where there is no
2504 * corresponding ucmd. Otherwise we will crash in dev_user_alloc_sg().
2506 if (test_bit(SCST_TGT_DEV_CLUST_POOL, &tgt_dev->tgt_dev_flags))
2507 tgt_dev->dh_priv = dev->pool_clust;
2509 tgt_dev->dh_priv = dev->pool;
2511 ucmd = dev_user_alloc_ucmd(dev, GFP_KERNEL);
2517 ucmd->user_cmd_payload_len = offsetof(struct scst_user_get_cmd, sess) +
2518 sizeof(ucmd->user_cmd.sess);
2519 ucmd->user_cmd.cmd_h = ucmd->h;
2520 ucmd->user_cmd.subcode = SCST_USER_ATTACH_SESS;
2521 ucmd->user_cmd.sess.sess_h = (unsigned long)tgt_dev;
2522 ucmd->user_cmd.sess.lun = (uint64_t)tgt_dev->lun;
2523 ucmd->user_cmd.sess.threads_num = tgt_dev->sess->tgt->tgtt->threads_num;
2524 ucmd->user_cmd.sess.rd_only = tgt_dev->acg_dev->rd_only;
2525 strncpy(ucmd->user_cmd.sess.initiator_name,
2526 tgt_dev->sess->initiator_name,
2527 sizeof(ucmd->user_cmd.sess.initiator_name)-1);
2528 ucmd->user_cmd.sess.initiator_name[
2529 sizeof(ucmd->user_cmd.sess.initiator_name)-1] = '\0';
2530 strncpy(ucmd->user_cmd.sess.target_name,
2531 tgt_dev->sess->tgt->tgt_name,
2532 sizeof(ucmd->user_cmd.sess.target_name)-1);
2533 ucmd->user_cmd.sess.target_name[
2534 sizeof(ucmd->user_cmd.sess.target_name)-1] = '\0';
2536 TRACE_MGMT_DBG("Preparing ATTACH_SESS %p (h %d, sess_h %llx, LUN %llx, "
2537 "threads_num %d, rd_only %d, initiator %s, target %s)",
2538 ucmd, ucmd->h, ucmd->user_cmd.sess.sess_h,
2539 ucmd->user_cmd.sess.lun, ucmd->user_cmd.sess.threads_num,
2540 ucmd->user_cmd.sess.rd_only, ucmd->user_cmd.sess.initiator_name,
2541 ucmd->user_cmd.sess.target_name);
2543 ucmd->state = UCMD_STATE_ATTACH_SESS;
2547 dev_user_add_to_ready(ucmd);
2549 rc = wait_for_completion_timeout(ucmd->cmpl, DEV_USER_ATTACH_TIMEOUT);
2553 PRINT_ERROR("%s", "ATTACH_SESS command timeout");
2557 sBUG_ON(irqs_disabled());
2559 spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
2561 spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
2566 TRACE_EXIT_RES(res);
2574 static void dev_user_detach_tgt(struct scst_tgt_dev *tgt_dev)
2576 struct scst_user_dev *dev =
2577 (struct scst_user_dev *)tgt_dev->dev->dh_priv;
2578 struct scst_user_cmd *ucmd;
2583 * We can't miss TM command due to memory shortage, because it might
2584 * lead to a memory leak in the user space handler.
2586 ucmd = dev_user_alloc_ucmd(dev, GFP_KERNEL|__GFP_NOFAIL);
2590 TRACE_MGMT_DBG("Preparing DETACH_SESS %p (h %d, sess_h %llx)", ucmd,
2591 ucmd->h, ucmd->user_cmd.sess.sess_h);
2593 ucmd->user_cmd_payload_len = offsetof(struct scst_user_get_cmd, sess) +
2594 sizeof(ucmd->user_cmd.sess);
2595 ucmd->user_cmd.cmd_h = ucmd->h;
2596 ucmd->user_cmd.subcode = SCST_USER_DETACH_SESS;
2597 ucmd->user_cmd.sess.sess_h = (unsigned long)tgt_dev;
2599 ucmd->state = UCMD_STATE_DETACH_SESS;
2601 dev_user_add_to_ready(ucmd);
2608 /* No locks are needed, but the activity must be suspended */
2609 static void dev_user_setup_functions(struct scst_user_dev *dev)
2613 dev->devtype.parse = dev_user_parse;
2614 dev->devtype.dev_done = NULL;
2616 if (dev->parse_type != SCST_USER_PARSE_CALL) {
2617 switch (dev->devtype.type) {
2619 dev->generic_parse = scst_sbc_generic_parse;
2620 dev->devtype.dev_done = dev_user_disk_done;
2624 dev->generic_parse = scst_tape_generic_parse;
2625 dev->devtype.dev_done = dev_user_tape_done;
2629 dev->generic_parse = scst_modisk_generic_parse;
2630 dev->devtype.dev_done = dev_user_disk_done;
2634 dev->generic_parse = scst_cdrom_generic_parse;
2635 dev->devtype.dev_done = dev_user_disk_done;
2638 case TYPE_MEDIUM_CHANGER:
2639 dev->generic_parse = scst_changer_generic_parse;
2642 case TYPE_PROCESSOR:
2643 dev->generic_parse = scst_processor_generic_parse;
2647 dev->generic_parse = scst_raid_generic_parse;
2651 PRINT_INFO("Unknown SCSI type %x, using PARSE_CALL "
2652 "for it", dev->devtype.type);
2653 dev->parse_type = SCST_USER_PARSE_CALL;
2657 dev->generic_parse = NULL;
2658 dev->devtype.dev_done = NULL;
2665 static int dev_user_check_version(const struct scst_user_dev_desc *dev_desc)
2667 char ver[sizeof(DEV_USER_VERSION)+1];
2670 res = copy_from_user(ver,
2671 (void __user *)(unsigned long)dev_desc->version_str,
2674 PRINT_ERROR("%s", "Unable to get version string");
2677 ver[sizeof(ver)-1] = '\0';
2679 if (strcmp(ver, DEV_USER_VERSION) != 0) {
2680 /* ->name already 0-terminated in dev_user_ioctl() */
2681 PRINT_ERROR("Incorrect version of user device %s (%s)",
2682 dev_desc->name, ver);
2691 static int dev_user_register_dev(struct file *file,
2692 const struct scst_user_dev_desc *dev_desc)
2695 struct scst_user_dev *dev, *d;
2700 res = dev_user_check_version(dev_desc);
2704 switch (dev_desc->type) {
2708 if (dev_desc->block_size == 0) {
2709 PRINT_ERROR("Wrong block size %d",
2710 dev_desc->block_size);
2714 block = scst_calc_block_shift(dev_desc->block_size);
2721 block = dev_desc->block_size;
2725 if (!try_module_get(THIS_MODULE)) {
2726 PRINT_ERROR("%s", "Fail to get module");
2731 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
2737 init_rwsem(&dev->dev_rwsem);
2738 spin_lock_init(&dev->cmd_lists.cmd_list_lock);
2739 INIT_LIST_HEAD(&dev->cmd_lists.active_cmd_list);
2740 init_waitqueue_head(&dev->cmd_lists.cmd_list_waitQ);
2741 INIT_LIST_HEAD(&dev->ready_cmd_list);
2742 if (file->f_flags & O_NONBLOCK) {
2743 TRACE_DBG("%s", "Non-blocking operations");
2747 for (i = 0; i < (int)ARRAY_SIZE(dev->ucmd_hash); i++)
2748 INIT_LIST_HEAD(&dev->ucmd_hash[i]);
2750 strncpy(dev->name, dev_desc->name, sizeof(dev->name)-1);
2751 dev->name[sizeof(dev->name)-1] = '\0';
2753 scst_init_mem_lim(&dev->udev_mem_lim);
2755 scnprintf(dev->devtype.name, sizeof(dev->devtype.name), "%s",
2756 (dev_desc->sgv_name[0] == '\0') ? dev->name :
2757 dev_desc->sgv_name);
2758 dev->pool = sgv_pool_create(dev->devtype.name, sgv_no_clustering,
2759 dev_desc->sgv_single_alloc_pages,
2760 dev_desc->sgv_shared,
2761 dev_desc->sgv_purge_interval);
2762 if (dev->pool == NULL) {
2766 sgv_pool_set_allocator(dev->pool, dev_user_alloc_pages,
2767 dev_user_free_sg_entries);
2769 if (!dev_desc->sgv_disable_clustered_pool) {
2770 scnprintf(dev->devtype.name, sizeof(dev->devtype.name),
2772 (dev_desc->sgv_name[0] == '\0') ? dev->name :
2773 dev_desc->sgv_name);
2774 dev->pool_clust = sgv_pool_create(dev->devtype.name,
2775 sgv_tail_clustering,
2776 dev_desc->sgv_single_alloc_pages,
2777 dev_desc->sgv_shared,
2778 dev_desc->sgv_purge_interval);
2779 if (dev->pool_clust == NULL) {
2783 sgv_pool_set_allocator(dev->pool_clust, dev_user_alloc_pages,
2784 dev_user_free_sg_entries);
2786 dev->pool_clust = dev->pool;
2787 sgv_pool_get(dev->pool_clust);
2790 scnprintf(dev->devtype.name, sizeof(dev->devtype.name), "%s",
2792 dev->devtype.type = dev_desc->type;
2793 dev->devtype.threads_num = -1;
2794 dev->devtype.parse_atomic = 1;
2795 dev->devtype.exec_atomic = 0; /* no point to make it 1 */
2796 dev->devtype.dev_done_atomic = 1;
2797 #ifdef CONFIG_SCST_PROC
2798 dev->devtype.no_proc = 1;
2800 dev->devtype.dev_attrs = dev_user_dev_attrs;
2802 dev->devtype.attach = dev_user_attach;
2803 dev->devtype.detach = dev_user_detach;
2804 dev->devtype.attach_tgt = dev_user_attach_tgt;
2805 dev->devtype.detach_tgt = dev_user_detach_tgt;
2806 dev->devtype.exec = dev_user_exec;
2807 dev->devtype.on_free_cmd = dev_user_on_free_cmd;
2808 dev->devtype.task_mgmt_fn = dev_user_task_mgmt_fn;
2810 dev->devtype.parent = &dev_user_devtype;
2812 init_completion(&dev->cleanup_cmpl);
2814 dev->def_block = block;
2816 res = __dev_user_set_opt(dev, &dev_desc->opt);
2820 TRACE_MEM("dev %p, name %s", dev, dev->name);
2822 spin_lock(&dev_list_lock);
2824 list_for_each_entry(d, &dev_list, dev_list_entry) {
2825 if (strcmp(d->name, dev->name) == 0) {
2826 PRINT_ERROR("Device %s already exist",
2829 spin_unlock(&dev_list_lock);
2834 list_add_tail(&dev->dev_list_entry, &dev_list);
2836 spin_unlock(&dev_list_lock);
2838 res = scst_register_virtual_dev_driver(&dev->devtype);
2842 dev->virt_id = scst_register_virtual_device(&dev->devtype, dev->name);
2843 if (dev->virt_id < 0) {
2845 goto out_unreg_handler;
2848 mutex_lock(&dev_priv_mutex);
2849 if (file->private_data != NULL) {
2850 mutex_unlock(&dev_priv_mutex);
2851 PRINT_ERROR("%s", "Device already registered");
2855 file->private_data = dev;
2856 mutex_unlock(&dev_priv_mutex);
2859 TRACE_EXIT_RES(res);
2863 scst_unregister_virtual_device(dev->virt_id);
2866 scst_unregister_virtual_dev_driver(&dev->devtype);
2869 spin_lock(&dev_list_lock);
2870 list_del(&dev->dev_list_entry);
2871 spin_unlock(&dev_list_lock);
2874 sgv_pool_del(dev->pool_clust);
2877 sgv_pool_del(dev->pool);
2883 module_put(THIS_MODULE);
2887 static int dev_user_unregister_dev(struct file *file)
2890 struct scst_user_dev *dev;
2894 mutex_lock(&dev_priv_mutex);
2895 dev = (struct scst_user_dev *)file->private_data;
2896 res = dev_user_check_reg(dev);
2898 mutex_unlock(&dev_priv_mutex);
2901 down_read(&dev->dev_rwsem);
2902 mutex_unlock(&dev_priv_mutex);
2904 res = scst_suspend_activity(true);
2908 up_read(&dev->dev_rwsem);
2910 mutex_lock(&dev_priv_mutex);
2911 dev = (struct scst_user_dev *)file->private_data;
2913 mutex_unlock(&dev_priv_mutex);
2918 wake_up_all(&dev->cmd_lists.cmd_list_waitQ);
2920 down_write(&dev->dev_rwsem);
2921 file->private_data = NULL;
2922 mutex_unlock(&dev_priv_mutex);
2924 dev_user_exit_dev(dev);
2926 up_write(&dev->dev_rwsem); /* to make lockdep happy */
2931 scst_resume_activity();
2934 TRACE_EXIT_RES(res);
2938 up_read(&dev->dev_rwsem);
2942 static int dev_user_flush_cache(struct file *file)
2945 struct scst_user_dev *dev;
2949 mutex_lock(&dev_priv_mutex);
2950 dev = (struct scst_user_dev *)file->private_data;
2951 res = dev_user_check_reg(dev);
2953 mutex_unlock(&dev_priv_mutex);
2956 down_read(&dev->dev_rwsem);
2957 mutex_unlock(&dev_priv_mutex);
2959 res = scst_suspend_activity(true);
2963 sgv_pool_flush(dev->pool);
2964 sgv_pool_flush(dev->pool_clust);
2966 scst_resume_activity();
2969 up_read(&dev->dev_rwsem);
2972 TRACE_EXIT_RES(res);
2976 static int dev_user_capacity_changed(struct file *file)
2979 struct scst_user_dev *dev;
2983 mutex_lock(&dev_priv_mutex);
2984 dev = (struct scst_user_dev *)file->private_data;
2985 res = dev_user_check_reg(dev);
2987 mutex_unlock(&dev_priv_mutex);
2990 down_read(&dev->dev_rwsem);
2991 mutex_unlock(&dev_priv_mutex);
2993 scst_capacity_data_changed(dev->sdev);
2995 up_read(&dev->dev_rwsem);
2998 TRACE_EXIT_RES(res);
3002 static int dev_user_prealloc_buffer(struct file *file, void __user *arg)
3005 struct scst_user_dev *dev;
3006 union scst_user_prealloc_buffer pre;
3009 struct scst_user_cmd *ucmd;
3011 struct sgv_pool *pool;
3012 struct scatterlist *sg;
3016 mutex_lock(&dev_priv_mutex);
3017 dev = (struct scst_user_dev *)file->private_data;
3018 res = dev_user_check_reg(dev);
3019 if (unlikely(res != 0)) {
3020 mutex_unlock(&dev_priv_mutex);
3023 down_read(&dev->dev_rwsem);
3024 mutex_unlock(&dev_priv_mutex);
3026 res = copy_from_user(&pre.in, arg, sizeof(pre.in));
3027 if (unlikely(res < 0))
3030 TRACE_MEM("Prealloc buffer with size %dKB for dev %s",
3031 pre.in.bufflen / 1024, dev->name);
3032 TRACE_BUFFER("Input param", &pre.in, sizeof(pre.in));
3035 bufflen = pre.in.bufflen;
3037 ucmd = dev_user_alloc_ucmd(dev, GFP_KERNEL);
3043 ucmd->buff_cached = 1;
3045 TRACE_MEM("ucmd %p, pbuf %llx", ucmd, pbuf);
3047 if (unlikely((pbuf & ~PAGE_MASK) != 0)) {
3048 PRINT_ERROR("Supplied pbuf %llx isn't page aligned", pbuf);
3053 pages = calc_num_pg(pbuf, bufflen);
3054 res = dev_user_map_buf(ucmd, pbuf, pages);
3058 if (pre.in.for_clust_pool)
3059 pool = dev->pool_clust;
3063 sg = sgv_pool_alloc(pool, bufflen, GFP_KERNEL, SGV_POOL_ALLOC_GET_NEW,
3064 &sg_cnt, &ucmd->sgv, &dev->udev_mem_lim, ucmd);
3066 struct scst_user_cmd *buf_ucmd =
3067 (struct scst_user_cmd *)sgv_get_priv(ucmd->sgv);
3069 TRACE_MEM("Buf ucmd %p (sg_cnt %d, last seg len %d, "
3070 "bufflen %d)", buf_ucmd, sg_cnt,
3071 sg[sg_cnt-1].length, bufflen);
3073 EXTRACHECKS_BUG_ON(ucmd != buf_ucmd);
3075 ucmd->buf_ucmd = buf_ucmd;
3081 dev_user_free_sgv(ucmd);
3083 pre.out.cmd_h = ucmd->h;
3084 res = copy_to_user(arg, &pre.out, sizeof(pre.out));
3090 up_read(&dev->dev_rwsem);
3093 TRACE_EXIT_RES(res);
3097 static int __dev_user_set_opt(struct scst_user_dev *dev,
3098 const struct scst_user_opt *opt)
3104 TRACE_DBG("dev %s, parse_type %x, on_free_cmd_type %x, "
3105 "memory_reuse_type %x, partial_transfers_type %x, "
3106 "partial_len %d", dev->name, opt->parse_type,
3107 opt->on_free_cmd_type, opt->memory_reuse_type,
3108 opt->partial_transfers_type, opt->partial_len);
3110 if (opt->parse_type > SCST_USER_MAX_PARSE_OPT ||
3111 opt->on_free_cmd_type > SCST_USER_MAX_ON_FREE_CMD_OPT ||
3112 opt->memory_reuse_type > SCST_USER_MAX_MEM_REUSE_OPT ||
3113 opt->partial_transfers_type > SCST_USER_MAX_PARTIAL_TRANSFERS_OPT) {
3114 PRINT_ERROR("%s", "Invalid option");
3119 if (((opt->tst != SCST_CONTR_MODE_ONE_TASK_SET) &&
3120 (opt->tst != SCST_CONTR_MODE_SEP_TASK_SETS)) ||
3121 ((opt->queue_alg != SCST_CONTR_MODE_QUEUE_ALG_RESTRICTED_REORDER) &&
3122 (opt->queue_alg != SCST_CONTR_MODE_QUEUE_ALG_UNRESTRICTED_REORDER)) ||
3123 (opt->swp > 1) || (opt->tas > 1) || (opt->has_own_order_mgmt > 1) ||
3124 (opt->d_sense > 1)) {
3125 PRINT_ERROR("Invalid SCSI option (tst %x, queue_alg %x, swp %x,"
3126 " tas %x, d_sense %d, has_own_order_mgmt %x)", opt->tst,
3127 opt->queue_alg, opt->swp, opt->tas, opt->d_sense,
3128 opt->has_own_order_mgmt);
3133 dev->parse_type = opt->parse_type;
3134 dev->on_free_cmd_type = opt->on_free_cmd_type;
3135 dev->memory_reuse_type = opt->memory_reuse_type;
3136 dev->partial_transfers_type = opt->partial_transfers_type;
3137 dev->partial_len = opt->partial_len;
3139 dev->tst = opt->tst;
3140 dev->queue_alg = opt->queue_alg;
3141 dev->swp = opt->swp;
3142 dev->tas = opt->tas;
3143 dev->tst = opt->tst;
3144 dev->d_sense = opt->d_sense;
3145 dev->has_own_order_mgmt = opt->has_own_order_mgmt;
3146 if (dev->sdev != NULL) {
3147 dev->sdev->tst = opt->tst;
3148 dev->sdev->queue_alg = opt->queue_alg;
3149 dev->sdev->swp = opt->swp;
3150 dev->sdev->tas = opt->tas;
3151 dev->sdev->d_sense = opt->d_sense;
3152 dev->sdev->has_own_order_mgmt = opt->has_own_order_mgmt;
3155 dev_user_setup_functions(dev);
3158 TRACE_EXIT_RES(res);
3162 static int dev_user_set_opt(struct file *file, const struct scst_user_opt *opt)
3165 struct scst_user_dev *dev;
3169 mutex_lock(&dev_priv_mutex);
3170 dev = (struct scst_user_dev *)file->private_data;
3171 res = dev_user_check_reg(dev);
3173 mutex_unlock(&dev_priv_mutex);
3176 down_read(&dev->dev_rwsem);
3177 mutex_unlock(&dev_priv_mutex);
3179 res = scst_suspend_activity(true);
3183 res = __dev_user_set_opt(dev, opt);
3185 scst_resume_activity();
3188 up_read(&dev->dev_rwsem);
3191 TRACE_EXIT_RES(res);
3195 static int dev_user_get_opt(struct file *file, void __user *arg)
3198 struct scst_user_dev *dev;
3199 struct scst_user_opt opt;
3203 mutex_lock(&dev_priv_mutex);
3204 dev = (struct scst_user_dev *)file->private_data;
3205 res = dev_user_check_reg(dev);
3207 mutex_unlock(&dev_priv_mutex);
3210 down_read(&dev->dev_rwsem);
3211 mutex_unlock(&dev_priv_mutex);
3213 opt.parse_type = dev->parse_type;
3214 opt.on_free_cmd_type = dev->on_free_cmd_type;
3215 opt.memory_reuse_type = dev->memory_reuse_type;
3216 opt.partial_transfers_type = dev->partial_transfers_type;
3217 opt.partial_len = dev->partial_len;
3219 opt.queue_alg = dev->queue_alg;
3222 opt.d_sense = dev->d_sense;
3223 opt.has_own_order_mgmt = dev->has_own_order_mgmt;
3225 TRACE_DBG("dev %s, parse_type %x, on_free_cmd_type %x, "
3226 "memory_reuse_type %x, partial_transfers_type %x, "
3227 "partial_len %d", dev->name, opt.parse_type,
3228 opt.on_free_cmd_type, opt.memory_reuse_type,
3229 opt.partial_transfers_type, opt.partial_len);
3231 res = copy_to_user(arg, &opt, sizeof(opt));
3233 up_read(&dev->dev_rwsem);
3235 TRACE_EXIT_RES(res);
3239 static int dev_usr_parse(struct scst_cmd *cmd)
3242 return SCST_CMD_STATE_DEFAULT;
3245 static int dev_user_exit_dev(struct scst_user_dev *dev)
3249 TRACE(TRACE_MGMT, "Releasing dev %s", dev->name);
3251 spin_lock(&dev_list_lock);
3252 list_del(&dev->dev_list_entry);
3253 spin_unlock(&dev_list_lock);
3256 wake_up_all(&dev->cmd_lists.cmd_list_waitQ);
3258 spin_lock(&cleanup_lock);
3259 list_add_tail(&dev->cleanup_list_entry, &cleanup_list);
3260 spin_unlock(&cleanup_lock);
3262 wake_up(&cleanup_list_waitQ);
3264 scst_unregister_virtual_device(dev->virt_id);
3265 scst_unregister_virtual_dev_driver(&dev->devtype);
3267 sgv_pool_flush(dev->pool_clust);
3268 sgv_pool_flush(dev->pool);
3270 TRACE_MGMT_DBG("Unregistering finished (dev %p)", dev);
3272 dev->cleanup_done = 1;
3274 wake_up(&cleanup_list_waitQ);
3275 wake_up(&dev->cmd_lists.cmd_list_waitQ);
3277 wait_for_completion(&dev->cleanup_cmpl);
3279 sgv_pool_del(dev->pool_clust);
3280 sgv_pool_del(dev->pool);
3282 TRACE_MGMT_DBG("Releasing completed (dev %p)", dev);
3284 module_put(THIS_MODULE);
3290 static int __dev_user_release(void *arg)
3292 struct scst_user_dev *dev = (struct scst_user_dev *)arg;
3293 dev_user_exit_dev(dev);
3298 static int dev_user_release(struct inode *inode, struct file *file)
3300 struct scst_user_dev *dev;
3301 struct task_struct *t;
3305 dev = (struct scst_user_dev *)file->private_data;
3308 file->private_data = NULL;
3310 TRACE_MGMT_DBG("Going to release dev %s", dev->name);
3312 t = kthread_run(__dev_user_release, dev, "scst_usr_released");
3314 PRINT_CRIT_ERROR("kthread_run() failed (%ld), releasing device "
3315 "%p directly. If you have several devices under load "
3316 "it might deadlock!", PTR_ERR(t), dev);
3317 __dev_user_release(dev);
3325 static int dev_user_process_cleanup(struct scst_user_dev *dev)
3327 struct scst_user_cmd *ucmd;
3328 int rc = 0, res = 1;
3332 sBUG_ON(dev->blocking);
3333 wake_up_all(&dev->cmd_lists.cmd_list_waitQ); /* just in case */
3338 TRACE_DBG("Cleanuping dev %p", dev);
3340 rc1 = dev_user_unjam_dev(dev);
3341 if ((rc1 == 0) && (rc == -EAGAIN) && dev->cleanup_done)
3344 spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
3346 rc = dev_user_get_next_cmd(dev, &ucmd);
3348 dev_user_unjam_cmd(ucmd, 1, NULL);
3350 spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
3352 if (rc == -EAGAIN) {
3353 if (!dev->cleanup_done) {
3354 TRACE_DBG("No more commands (dev %p)", dev);
3360 #ifdef CONFIG_SCST_EXTRACHECKS
3363 for (i = 0; i < (int)ARRAY_SIZE(dev->ucmd_hash); i++) {
3364 struct list_head *head = &dev->ucmd_hash[i];
3365 struct scst_user_cmd *ucmd2;
3367 list_for_each_entry(ucmd2, head, hash_list_entry) {
3368 PRINT_ERROR("Lost ucmd %p (state %x, ref %d)", ucmd2,
3369 ucmd2->state, atomic_read(&ucmd2->ucmd_ref));
3377 TRACE_DBG("Cleanuping done (dev %p)", dev);
3378 complete_all(&dev->cleanup_cmpl);
3382 TRACE_EXIT_RES(res);
3386 #ifndef CONFIG_SCST_PROC
3388 static ssize_t dev_user_sysfs_commands_show(struct kobject *kobj,
3389 struct kobj_attribute *attr, char *buf)
3391 int pos = 0, ppos, i;
3392 struct scst_device *dev;
3393 struct scst_user_dev *udev;
3394 unsigned long flags;
3398 dev = container_of(kobj, struct scst_device, dev_kobj);
3399 udev = (struct scst_user_dev *)dev->dh_priv;
3401 spin_lock_irqsave(&udev->cmd_lists.cmd_list_lock, flags);
3402 for (i = 0; i < (int)ARRAY_SIZE(udev->ucmd_hash); i++) {
3403 struct list_head *head = &udev->ucmd_hash[i];
3404 struct scst_user_cmd *ucmd;
3405 list_for_each_entry(ucmd, head, hash_list_entry) {
3407 pos += scnprintf(&buf[pos],
3408 SCST_SYSFS_BLOCK_SIZE - pos,
3409 "ucmd %p (state %x, ref %d), "
3410 "sent_to_user %d, seen_by_user %d, "
3411 "aborted %d, jammed %d, scst_cmd %p\n",
3413 atomic_read(&ucmd->ucmd_ref),
3414 ucmd->sent_to_user, ucmd->seen_by_user,
3415 ucmd->aborted, ucmd->jammed, ucmd->cmd);
3416 if (pos >= SCST_SYSFS_BLOCK_SIZE-1) {
3417 ppos += scnprintf(&buf[ppos],
3418 SCST_SYSFS_BLOCK_SIZE - ppos, "...\n");
3424 spin_unlock_irqrestore(&udev->cmd_lists.cmd_list_lock, flags);
3426 TRACE_EXIT_RES(pos);
3430 #else /* CONFIG_SCST_PROC */
3433 * Called when a file in the /proc/scsi_tgt/scst_user is read
3435 static int dev_user_read_proc(struct seq_file *seq, struct scst_dev_type *dev_type)
3438 struct scst_user_dev *dev;
3439 unsigned long flags;
3443 spin_lock(&dev_list_lock);
3445 list_for_each_entry(dev, &dev_list, dev_list_entry) {
3447 seq_printf(seq, "Device %s commands:\n", dev->name);
3448 spin_lock_irqsave(&dev->cmd_lists.cmd_list_lock, flags);
3449 for (i = 0; i < (int)ARRAY_SIZE(dev->ucmd_hash); i++) {
3450 struct list_head *head = &dev->ucmd_hash[i];
3451 struct scst_user_cmd *ucmd;
3452 list_for_each_entry(ucmd, head, hash_list_entry) {
3453 seq_printf(seq, "ucmd %p (state %x, ref %d), "
3454 "sent_to_user %d, seen_by_user %d, "
3455 "aborted %d, jammed %d, scst_cmd %p\n",
3457 atomic_read(&ucmd->ucmd_ref),
3458 ucmd->sent_to_user, ucmd->seen_by_user,
3459 ucmd->aborted, ucmd->jammed, ucmd->cmd);
3462 spin_unlock_irqrestore(&dev->cmd_lists.cmd_list_lock, flags);
3464 spin_unlock(&dev_list_lock);
3466 TRACE_EXIT_RES(res);
3469 #endif /* CONFIG_SCST_PROC */
3471 static inline int test_cleanup_list(void)
3473 int res = !list_empty(&cleanup_list) ||
3474 unlikely(kthread_should_stop());
3478 static int dev_user_cleanup_thread(void *arg)
3482 PRINT_INFO("Cleanup thread started, PID %d", current->pid);
3484 current->flags |= PF_NOFREEZE;
3486 spin_lock(&cleanup_lock);
3487 while (!kthread_should_stop()) {
3489 init_waitqueue_entry(&wait, current);
3491 if (!test_cleanup_list()) {
3492 add_wait_queue_exclusive(&cleanup_list_waitQ, &wait);
3494 set_current_state(TASK_INTERRUPTIBLE);
3495 if (test_cleanup_list())
3497 spin_unlock(&cleanup_lock);
3499 spin_lock(&cleanup_lock);
3501 set_current_state(TASK_RUNNING);
3502 remove_wait_queue(&cleanup_list_waitQ, &wait);
3506 * We have to poll devices, because commands can go from SCST
3507 * core on cmd_list_waitQ and we have no practical way to
3512 struct scst_user_dev *dev;
3515 while (!list_empty(&cleanup_list)) {
3518 dev = list_entry(cleanup_list.next,
3519 typeof(*dev), cleanup_list_entry);
3520 list_del(&dev->cleanup_list_entry);
3522 spin_unlock(&cleanup_lock);
3523 rc = dev_user_process_cleanup(dev);
3524 spin_lock(&cleanup_lock);
3527 list_add_tail(&dev->cleanup_list_entry,
3531 if (list_empty(&cl_devs))
3534 spin_unlock(&cleanup_lock);
3536 spin_lock(&cleanup_lock);
3538 while (!list_empty(&cl_devs)) {
3539 dev = list_entry(cl_devs.next, typeof(*dev),
3540 cleanup_list_entry);
3541 list_move_tail(&dev->cleanup_list_entry,
3546 spin_unlock(&cleanup_lock);
3549 * If kthread_should_stop() is true, we are guaranteed to be
3550 * on the module unload, so cleanup_list must be empty.
3552 sBUG_ON(!list_empty(&cleanup_list));
3554 PRINT_INFO("Cleanup thread PID %d finished", current->pid);
3560 static int __init init_scst_user(void)
3563 struct max_get_reply {
3565 struct scst_user_get_cmd g;
3566 struct scst_user_reply_cmd r;
3569 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 21)
3570 struct class_device *class_member;
3577 #if defined(CONFIG_HIGHMEM4G) || defined(CONFIG_HIGHMEM64G)
3578 PRINT_ERROR("%s", "HIGHMEM kernel configurations are not supported. "
3579 "Consider changing VMSPLIT option or use a 64-bit "
3580 "configuration instead. See README file for details.");
3585 user_cmd_cachep = KMEM_CACHE(scst_user_cmd, SCST_SLAB_FLAGS);
3586 if (user_cmd_cachep == NULL) {
3591 user_get_cmd_cachep = KMEM_CACHE(max_get_reply, SCST_SLAB_FLAGS);
3592 if (user_get_cmd_cachep == NULL) {
3597 dev_user_devtype.module = THIS_MODULE;
3599 res = scst_register_virtual_dev_driver(&dev_user_devtype);
3603 #ifdef CONFIG_SCST_PROC
3604 res = scst_dev_handler_build_std_proc(&dev_user_devtype);
3609 dev_user_sysfs_class = class_create(THIS_MODULE, DEV_USER_NAME);
3610 if (IS_ERR(dev_user_sysfs_class)) {
3611 PRINT_ERROR("%s", "Unable create sysfs class for SCST user "
3613 res = PTR_ERR(dev_user_sysfs_class);
3614 #ifdef CONFIG_SCST_PROC
3621 dev_user_major = register_chrdev(0, DEV_USER_NAME, &dev_user_fops);
3622 if (dev_user_major < 0) {
3623 PRINT_ERROR("register_chrdev() failed: %d", res);
3624 res = dev_user_major;
3628 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 21)
3629 class_member = class_device_create(dev_user_sysfs_class, NULL,
3630 MKDEV(dev_user_major, 0), NULL, DEV_USER_NAME);
3631 if (IS_ERR(class_member)) {
3632 res = PTR_ERR(class_member);