4 * Copyright (C) 2007 Vladislav Bolkhovitin <vst@vlnb.net>
6 * SCSI virtual user space device handler
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation, version 2
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
19 #include <linux/kthread.h>
20 #include <linux/delay.h>
21 #include <linux/poll.h>
23 #define LOG_PREFIX DEV_USER_NAME
26 #include "scst_user.h"
27 #include "scst_dev_handler.h"
29 #if defined(CONFIG_HIGHMEM4G) || defined(CONFIG_HIGHMEM64G)
30 #warning "HIGHMEM kernel configurations are not supported by this module, \
31 because nowadays it doesn't worth the effort. Consider change \
32 VMSPLIT option or use 64-bit configuration instead. See README file \
36 #define DEV_USER_MAJOR 237
37 #define DEV_USER_CMD_HASH_ORDER 6
38 #define DEV_USER_TM_TIMEOUT (10*HZ)
39 #define DEV_USER_ATTACH_TIMEOUT (5*HZ)
40 #define DEV_USER_DETACH_TIMEOUT (5*HZ)
41 #define DEV_USER_PRE_UNREG_POLL_TIME (HZ/10)
45 struct rw_semaphore dev_rwsem;
47 struct scst_cmd_lists cmd_lists;
48 /* All 3 protected by cmd_lists.cmd_list_lock */
49 struct list_head ready_cmd_list;
50 struct list_head prio_ready_cmd_list;
51 wait_queue_head_t prio_cmd_list_waitQ;
53 /* All, including detach_cmd_count, protected by cmd_lists.cmd_list_lock */
54 unsigned short blocking:1;
55 unsigned short cleaning:1;
56 unsigned short cleanup_done:1;
57 unsigned short attach_cmd_active:1;
58 unsigned short tm_cmd_active:1;
59 unsigned short internal_reset_active:1;
60 unsigned short pre_unreg_sess_active:1; /* just a small optimization */
63 unsigned short queue_alg:4;
66 unsigned short has_own_order_mgmt:1;
68 unsigned short detach_cmd_count;
70 int (*generic_parse)(struct scst_cmd *cmd,
71 int (*get_block)(struct scst_cmd *cmd));
76 struct sgv_pool *pool;
79 uint8_t on_free_cmd_type;
80 uint8_t memory_reuse_type;
81 uint8_t prio_queue_type;
82 uint8_t partial_transfers_type;
85 struct scst_dev_type devtype;
87 /* Both protected by cmd_lists.cmd_list_lock */
88 unsigned int handle_counter;
89 struct list_head ucmd_hash[1<<DEV_USER_CMD_HASH_ORDER];
91 struct scst_device *sdev;
94 struct list_head dev_list_entry;
95 char name[SCST_MAX_NAME];
97 /* Protected by cmd_lists.cmd_list_lock */
98 struct list_head pre_unreg_sess_list;
100 struct list_head cleanup_list_entry;
101 struct completion cleanup_cmpl;
104 struct scst_user_pre_unreg_sess_obj
106 struct scst_tgt_dev *tgt_dev;
107 unsigned int active:1;
109 struct list_head pre_unreg_sess_list_entry;
110 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
111 struct work_struct pre_unreg_sess_work;
113 struct delayed_work pre_unreg_sess_work;
117 /* Most fields are unprotected, since only one thread at time can access them */
120 struct scst_cmd *cmd;
121 struct scst_user_dev *dev;
125 unsigned int buff_cached:1;
126 unsigned int buf_dirty:1;
127 unsigned int background_exec:1;
128 unsigned int internal_reset_tm:1;
129 unsigned int aborted:1;
131 struct scst_user_cmd *buf_ucmd;
135 int first_page_offset;
137 struct page **data_pages;
138 struct sgv_pool_obj *sgv;
142 struct list_head ready_cmd_list_entry;
145 struct list_head hash_list_entry;
147 struct scst_user_get_cmd user_cmd;
149 struct completion *cmpl;
153 static struct scst_user_cmd *dev_user_alloc_ucmd(struct scst_user_dev *dev,
155 static void dev_user_free_ucmd(struct scst_user_cmd *ucmd);
157 static int dev_user_parse(struct scst_cmd *cmd);
158 static int dev_user_exec(struct scst_cmd *cmd);
159 static void dev_user_on_free_cmd(struct scst_cmd *cmd);
160 static int dev_user_task_mgmt_fn(struct scst_mgmt_cmd *mcmd,
161 struct scst_tgt_dev *tgt_dev);
163 static int dev_user_disk_done(struct scst_cmd *cmd);
164 static int dev_user_tape_done(struct scst_cmd *cmd);
166 static struct page *dev_user_alloc_pages(struct scatterlist *sg,
167 gfp_t gfp_mask, void *priv);
168 static void dev_user_free_sg_entries(struct scatterlist *sg, int sg_count,
171 static void dev_user_add_to_ready(struct scst_user_cmd *ucmd);
173 static void dev_user_unjam_cmd(struct scst_user_cmd *ucmd, int busy,
174 unsigned long *flags);
175 static void dev_user_unjam_dev(struct scst_user_dev *dev, int tm,
176 struct scst_tgt_dev *tgt_dev);
178 static int dev_user_process_reply_tm_exec(struct scst_user_cmd *ucmd,
180 static int dev_user_process_reply_sess(struct scst_user_cmd *ucmd, int status);
181 static int dev_user_register_dev(struct file *file,
182 const struct scst_user_dev_desc *dev_desc);
183 static int __dev_user_set_opt(struct scst_user_dev *dev,
184 const struct scst_user_opt *opt);
185 static int dev_user_set_opt(struct file *file, const struct scst_user_opt *opt);
186 static int dev_user_get_opt(struct file *file, void *arg);
188 static unsigned int dev_user_poll(struct file *filp, poll_table *wait);
189 static long dev_user_ioctl(struct file *file, unsigned int cmd,
191 static int dev_user_release(struct inode *inode, struct file *file);
195 static struct kmem_cache *user_cmd_cachep;
197 static DEFINE_MUTEX(dev_priv_mutex);
199 static struct file_operations dev_user_fops = {
200 .poll = dev_user_poll,
201 .unlocked_ioctl = dev_user_ioctl,
203 .compat_ioctl = dev_user_ioctl,
205 .release = dev_user_release,
208 static struct class *dev_user_sysfs_class;
210 static spinlock_t dev_list_lock = SPIN_LOCK_UNLOCKED;
211 static LIST_HEAD(dev_list);
213 static spinlock_t cleanup_lock = SPIN_LOCK_UNLOCKED;
214 static LIST_HEAD(cleanup_list);
215 static DECLARE_WAIT_QUEUE_HEAD(cleanup_list_waitQ);
216 static struct task_struct *cleanup_thread;
218 static inline void ucmd_get(struct scst_user_cmd *ucmd, int barrier)
220 TRACE_DBG("ucmd %p, ucmd_ref %d", ucmd, atomic_read(&ucmd->ucmd_ref));
221 atomic_inc(&ucmd->ucmd_ref);
223 smp_mb__after_atomic_inc();
226 static inline void ucmd_put(struct scst_user_cmd *ucmd)
228 TRACE_DBG("ucmd %p, ucmd_ref %d", ucmd, atomic_read(&ucmd->ucmd_ref));
229 if (atomic_dec_and_test(&ucmd->ucmd_ref))
230 dev_user_free_ucmd(ucmd);
233 static inline int calc_num_pg(unsigned long buf, int len)
235 len += buf & ~PAGE_MASK;
236 return (len >> PAGE_SHIFT) + ((len & ~PAGE_MASK) != 0);
239 static inline int is_need_offs_page(unsigned long buf, int len)
241 return ((buf & ~PAGE_MASK) != 0) &&
242 ((buf & PAGE_MASK) != ((buf+len-1) & PAGE_MASK));
245 static void __dev_user_not_reg(void)
247 PRINT_ERROR("%s", "Device not registered");
251 static inline int dev_user_check_reg(struct scst_user_dev *dev)
254 __dev_user_not_reg();
260 static inline int scst_user_cmd_hashfn(int h)
262 return h & ((1 << DEV_USER_CMD_HASH_ORDER) - 1);
265 static inline struct scst_user_cmd *__ucmd_find_hash(struct scst_user_dev *dev,
268 struct list_head *head;
269 struct scst_user_cmd *ucmd;
271 head = &dev->ucmd_hash[scst_user_cmd_hashfn(h)];
272 list_for_each_entry(ucmd, head, hash_list_entry) {
274 TRACE_DBG("Found ucmd %p", ucmd);
281 static void cmd_insert_hash(struct scst_user_cmd *ucmd)
283 struct list_head *head;
284 struct scst_user_dev *dev = ucmd->dev;
285 struct scst_user_cmd *u;
288 spin_lock_irqsave(&dev->cmd_lists.cmd_list_lock, flags);
290 ucmd->h = dev->handle_counter++;
291 u = __ucmd_find_hash(dev, ucmd->h);
293 head = &dev->ucmd_hash[scst_user_cmd_hashfn(ucmd->h)];
294 list_add_tail(&ucmd->hash_list_entry, head);
295 spin_unlock_irqrestore(&dev->cmd_lists.cmd_list_lock, flags);
297 TRACE_DBG("Inserted ucmd %p, h=%d", ucmd, ucmd->h);
301 static inline void cmd_remove_hash(struct scst_user_cmd *ucmd)
304 spin_lock_irqsave(&ucmd->dev->cmd_lists.cmd_list_lock, flags);
305 list_del(&ucmd->hash_list_entry);
306 spin_unlock_irqrestore(&ucmd->dev->cmd_lists.cmd_list_lock, flags);
308 TRACE_DBG("Removed ucmd %p, h=%d", ucmd, ucmd->h);
312 static void dev_user_free_ucmd(struct scst_user_cmd *ucmd)
316 TRACE_MEM("Freeing ucmd %p", ucmd);
318 cmd_remove_hash(ucmd);
319 EXTRACHECKS_BUG_ON(ucmd->cmd != NULL);
321 kmem_cache_free(user_cmd_cachep, ucmd);
327 static struct page *dev_user_alloc_pages(struct scatterlist *sg,
328 gfp_t gfp_mask, void *priv)
330 struct scst_user_cmd *ucmd = (struct scst_user_cmd *)priv;
335 /* *sg supposed to be zeroed */
337 TRACE_MEM("ucmd %p, ubuff %lx, ucmd->cur_data_page %d", ucmd,
338 ucmd->ubuff, ucmd->cur_data_page);
340 if (ucmd->cur_data_page == 0) {
341 TRACE_MEM("ucmd->first_page_offset %d",
342 ucmd->first_page_offset);
343 offset = ucmd->first_page_offset;
347 if (ucmd->cur_data_page >= ucmd->num_data_pages)
350 sg_set_page(sg, ucmd->data_pages[ucmd->cur_data_page],
351 PAGE_SIZE - offset, offset);
352 ucmd->cur_data_page++;
354 TRACE_MEM("page=%p, length=%d, offset=%d", sg_page(sg), sg->length,
356 TRACE_BUFFER("Page data", sg_virt(sg), sg->length);
363 static void dev_user_on_cached_mem_free(struct scst_user_cmd *ucmd)
367 TRACE_MEM("Preparing ON_CACHED_MEM_FREE (ucmd %p, h %d, ubuff %lx)",
368 ucmd, ucmd->h, ucmd->ubuff);
370 ucmd->user_cmd.cmd_h = ucmd->h;
371 ucmd->user_cmd.subcode = SCST_USER_ON_CACHED_MEM_FREE;
372 ucmd->user_cmd.on_cached_mem_free.pbuf = ucmd->ubuff;
374 ucmd->state = UCMD_STATE_ON_CACHE_FREEING;
376 dev_user_add_to_ready(ucmd);
382 static void dev_user_unmap_buf(struct scst_user_cmd *ucmd)
388 TRACE_MEM("Unmapping data pages (ucmd %p, ubuff %lx, num %d)", ucmd,
389 ucmd->ubuff, ucmd->num_data_pages);
391 for (i = 0; i < ucmd->num_data_pages; i++) {
392 struct page *page = ucmd->data_pages[i];
397 page_cache_release(page);
399 kfree(ucmd->data_pages);
400 ucmd->data_pages = NULL;
406 static void __dev_user_free_sg_entries(struct scst_user_cmd *ucmd)
410 sBUG_ON(ucmd->data_pages == NULL);
412 TRACE_MEM("Freeing data pages (ucmd=%p, ubuff=%lx, buff_cached=%d)",
413 ucmd, ucmd->ubuff, ucmd->buff_cached);
415 dev_user_unmap_buf(ucmd);
417 if (ucmd->buff_cached)
418 dev_user_on_cached_mem_free(ucmd);
426 static void dev_user_free_sg_entries(struct scatterlist *sg, int sg_count,
429 struct scst_user_cmd *ucmd = (struct scst_user_cmd *)priv;
431 TRACE_MEM("Freeing data pages (sg=%p, sg_count=%d, priv %p)", sg,
434 __dev_user_free_sg_entries(ucmd);
439 static inline int is_buff_cached(struct scst_user_cmd *ucmd)
441 int mem_reuse_type = ucmd->dev->memory_reuse_type;
443 if ((mem_reuse_type == SCST_USER_MEM_REUSE_ALL) ||
444 ((ucmd->cmd->data_direction == SCST_DATA_READ) &&
445 (mem_reuse_type == SCST_USER_MEM_REUSE_READ)) ||
446 ((ucmd->cmd->data_direction == SCST_DATA_WRITE) &&
447 (mem_reuse_type == SCST_USER_MEM_REUSE_WRITE))) {
454 * Returns 0 for success, <0 for fatal failure, >0 - need pages.
455 * Unmaps the buffer, if needed in case of error
457 static int dev_user_alloc_sg(struct scst_user_cmd *ucmd, int cached_buff)
460 struct scst_cmd *cmd = ucmd->cmd;
461 struct scst_user_dev *dev = ucmd->dev;
462 int gfp_mask, flags = 0;
463 int bufflen = cmd->bufflen;
468 gfp_mask = __GFP_NOWARN;
469 gfp_mask |= (scst_cmd_atomic(cmd) ? GFP_ATOMIC : GFP_KERNEL);
472 flags |= SCST_POOL_RETURN_OBJ_ON_ALLOC_FAIL;
473 if (ucmd->ubuff == 0)
474 flags |= SCST_POOL_NO_ALLOC_ON_CACHE_MISS;
476 TRACE_MEM("%s", "Not cached buff");
477 flags |= SCST_POOL_ALLOC_NO_CACHED;
478 if (ucmd->ubuff == 0) {
482 bufflen += ucmd->first_page_offset;
483 if (is_need_offs_page(ucmd->ubuff, cmd->bufflen))
484 last_len = bufflen & ~PAGE_MASK;
486 last_len = cmd->bufflen & ~PAGE_MASK;
488 last_len = PAGE_SIZE;
490 ucmd->buff_cached = cached_buff;
492 cmd->sg = sgv_pool_alloc(dev->pool, bufflen, gfp_mask, flags,
493 &cmd->sg_cnt, &ucmd->sgv, ucmd);
494 if (cmd->sg != NULL) {
495 struct scst_user_cmd *buf_ucmd =
496 (struct scst_user_cmd *)sgv_get_priv(ucmd->sgv);
498 TRACE_MEM("Buf ucmd %p", buf_ucmd);
500 ucmd->ubuff = buf_ucmd->ubuff;
501 ucmd->buf_ucmd = buf_ucmd;
503 EXTRACHECKS_BUG_ON((ucmd->data_pages != NULL) &&
507 /* We don't use clustering, so the assignment is safe */
508 cmd->sg[cmd->sg_cnt-1].length = last_len;
511 TRACE_MEM("Buf alloced (ucmd %p, cached_buff %d, ubuff %lx, "
512 "last_len %d, l %d)", ucmd, cached_buff, ucmd->ubuff,
513 last_len, cmd->sg[cmd->sg_cnt-1].length);
515 if (unlikely(cmd->sg_cnt > cmd->tgt_dev->max_sg_cnt)) {
518 PRINT_INFO("Unable to complete command due to "
519 "SG IO count limitation (requested %d, "
520 "available %d, tgt lim %d)", cmd->sg_cnt,
521 cmd->tgt_dev->max_sg_cnt,
522 cmd->tgt->sg_tablesize);
526 /* sgv will be freed in dev_user_free_sgv() */
530 TRACE_MEM("Buf not alloced (ucmd %p, h %d, buff_cached, %d, "
531 "sg_cnt %d, ubuff %lx, sgv %p", ucmd, ucmd->h,
532 ucmd->buff_cached, cmd->sg_cnt, ucmd->ubuff, ucmd->sgv);
533 if (unlikely(cmd->sg_cnt == 0)) {
534 TRACE_MEM("Refused allocation (ucmd %p)", ucmd);
535 sBUG_ON(ucmd->sgv != NULL);
538 switch (ucmd->state & ~UCMD_STATE_MASK) {
539 case UCMD_STATE_BUF_ALLOCING:
542 case UCMD_STATE_EXECING:
557 static int dev_user_alloc_space(struct scst_user_cmd *ucmd)
559 int rc, res = SCST_CMD_STATE_DEFAULT;
560 struct scst_cmd *cmd = ucmd->cmd;
564 if (unlikely(ucmd->cmd->data_buf_tgt_alloc)) {
565 PRINT_ERROR("Target driver %s requested own memory "
566 "allocation", ucmd->cmd->tgtt->name);
567 scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
568 res = SCST_CMD_STATE_PRE_XMIT_RESP;
572 ucmd->state = UCMD_STATE_BUF_ALLOCING;
573 cmd->data_buf_alloced = 1;
575 rc = dev_user_alloc_sg(ucmd, is_buff_cached(ucmd));
580 res = SCST_CMD_STATE_PRE_XMIT_RESP;
584 if ((cmd->data_direction != SCST_DATA_WRITE) &&
585 !scst_is_cmd_local(cmd)) {
586 TRACE_DBG("Delayed alloc, ucmd %p", ucmd);
590 ucmd->user_cmd.cmd_h = ucmd->h;
591 ucmd->user_cmd.subcode = SCST_USER_ALLOC_MEM;
592 ucmd->user_cmd.alloc_cmd.sess_h = (unsigned long)cmd->tgt_dev;
593 memcpy(ucmd->user_cmd.alloc_cmd.cdb, cmd->cdb,
594 min(sizeof(ucmd->user_cmd.alloc_cmd.cdb), sizeof(cmd->cdb)));
595 ucmd->user_cmd.alloc_cmd.cdb_len = cmd->cdb_len;
596 ucmd->user_cmd.alloc_cmd.alloc_len = ucmd->buff_cached ?
597 (cmd->sg_cnt << PAGE_SHIFT) : cmd->bufflen;
598 ucmd->user_cmd.alloc_cmd.queue_type = cmd->queue_type;
599 ucmd->user_cmd.alloc_cmd.data_direction = cmd->data_direction;
600 ucmd->user_cmd.alloc_cmd.sn = cmd->tgt_sn;
602 dev_user_add_to_ready(ucmd);
604 res = SCST_CMD_STATE_STOP;
611 static struct scst_user_cmd *dev_user_alloc_ucmd(struct scst_user_dev *dev,
614 struct scst_user_cmd *ucmd = NULL;
618 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
619 ucmd = kmem_cache_alloc(user_cmd_cachep, gfp_mask);
621 memset(ucmd, 0, sizeof(*ucmd));
623 ucmd = kmem_cache_zalloc(user_cmd_cachep, gfp_mask);
625 if (unlikely(ucmd == NULL)) {
626 TRACE(TRACE_OUT_OF_MEM, "Unable to allocate "
627 "user cmd (gfp_mask %x)", gfp_mask);
631 atomic_set(&ucmd->ucmd_ref, 1);
633 cmd_insert_hash(ucmd);
635 TRACE_MEM("ucmd %p allocated", ucmd);
638 TRACE_EXIT_HRES((unsigned long)ucmd);
642 static int dev_user_get_block(struct scst_cmd *cmd)
644 struct scst_user_dev *dev = (struct scst_user_dev *)cmd->dev->dh_priv;
646 * No need for locks here, since *_detach() can not be
647 * called, when there are existing commands.
649 TRACE_EXIT_RES(dev->block);
653 static int dev_user_parse(struct scst_cmd *cmd)
655 int rc, res = SCST_CMD_STATE_DEFAULT;
656 struct scst_user_cmd *ucmd;
657 int atomic = scst_cmd_atomic(cmd);
658 struct scst_user_dev *dev = (struct scst_user_dev *)cmd->dev->dh_priv;
659 int gfp_mask = atomic ? GFP_ATOMIC : GFP_KERNEL;
663 if (cmd->dh_priv == NULL) {
664 ucmd = dev_user_alloc_ucmd(dev, gfp_mask);
665 if (unlikely(ucmd == NULL)) {
667 res = SCST_CMD_STATE_NEED_THREAD_CTX;
677 ucmd = (struct scst_user_cmd *)cmd->dh_priv;
678 TRACE_DBG("Used ucmd %p, state %x", ucmd, ucmd->state);
681 TRACE_DBG("ucmd %p, cmd %p, state %x", ucmd, cmd, ucmd->state);
683 if (ucmd->state != UCMD_STATE_NEW)
686 switch (dev->parse_type) {
687 case SCST_USER_PARSE_STANDARD:
688 TRACE_DBG("PARSE STANDARD: ucmd %p", ucmd);
689 rc = dev->generic_parse(cmd, dev_user_get_block);
690 if ((rc != 0) || (cmd->op_flags & SCST_INFO_INVALID))
694 case SCST_USER_PARSE_EXCEPTION:
695 TRACE_DBG("PARSE EXCEPTION: ucmd %p", ucmd);
696 rc = dev->generic_parse(cmd, dev_user_get_block);
697 if ((rc == 0) && (!(cmd->op_flags & SCST_INFO_INVALID)))
699 else if (rc == SCST_CMD_STATE_NEED_THREAD_CTX) {
700 TRACE_MEM("Restarting PARSE to thread context "
702 res = SCST_CMD_STATE_NEED_THREAD_CTX;
705 /* else go through */
707 case SCST_USER_PARSE_CALL:
708 TRACE_DBG("Preparing PARSE for user space (ucmd=%p, h=%d, "
709 "bufflen %d)", ucmd, ucmd->h, cmd->bufflen);
710 ucmd->user_cmd.cmd_h = ucmd->h;
711 ucmd->user_cmd.subcode = SCST_USER_PARSE;
712 ucmd->user_cmd.parse_cmd.sess_h = (unsigned long)cmd->tgt_dev;
713 memcpy(ucmd->user_cmd.parse_cmd.cdb, cmd->cdb,
714 min(sizeof(ucmd->user_cmd.parse_cmd.cdb),
716 ucmd->user_cmd.parse_cmd.cdb_len = cmd->cdb_len;
717 ucmd->user_cmd.parse_cmd.timeout = cmd->timeout;
718 ucmd->user_cmd.parse_cmd.bufflen = cmd->bufflen;
719 ucmd->user_cmd.parse_cmd.queue_type = cmd->queue_type;
720 ucmd->user_cmd.parse_cmd.data_direction = cmd->data_direction;
721 ucmd->user_cmd.parse_cmd.expected_values_set =
722 cmd->expected_values_set;
723 ucmd->user_cmd.parse_cmd.expected_data_direction =
724 cmd->expected_data_direction;
725 ucmd->user_cmd.parse_cmd.expected_transfer_len =
726 cmd->expected_transfer_len;
727 ucmd->user_cmd.parse_cmd.sn = cmd->tgt_sn;
728 ucmd->state = UCMD_STATE_PARSING;
729 dev_user_add_to_ready(ucmd);
730 res = SCST_CMD_STATE_STOP;
739 if (cmd->data_direction != SCST_DATA_NONE)
740 res = dev_user_alloc_space(ucmd);
747 PRINT_ERROR("PARSE failed (ucmd %p, rc %d, invalid %d)", ucmd, rc,
748 cmd->op_flags & SCST_INFO_INVALID);
749 scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_invalid_opcode));
752 res = SCST_CMD_STATE_PRE_XMIT_RESP;
756 static void dev_user_flush_dcache(struct scst_user_cmd *ucmd)
758 struct scst_user_cmd *buf_ucmd = ucmd->buf_ucmd;
759 unsigned long start = buf_ucmd->ubuff;
767 for (i = 0; i < buf_ucmd->num_data_pages; i++) {
769 page = buf_ucmd->data_pages[i];
770 #ifdef ARCH_HAS_FLUSH_ANON_PAGE
771 struct vm_area_struct *vma = find_vma(current->mm, start);
773 flush_anon_page(vma, page, start);
775 flush_dcache_page(page);
784 static int dev_user_exec(struct scst_cmd *cmd)
786 struct scst_user_cmd *ucmd = (struct scst_user_cmd *)cmd->dh_priv;
787 int res = SCST_EXEC_COMPLETED;
791 #if 0 /* We set exec_atomic in 0 to let SCST core know that we need a thread
792 * context to complete the necessary actions, but all we are going to
793 * do in this function is, in fact, atomic, so let's skip this check.
795 if (scst_cmd_atomic(cmd)) {
796 TRACE_DBG("%s", "User exec() can not be called in atomic "
797 "context, rescheduling to the thread");
798 res = SCST_EXEC_NEED_THREAD;
803 TRACE_DBG("Preparing EXEC for user space (ucmd=%p, h=%d, "
804 "bufflen %d, data_len %d, ubuff %lx)", ucmd, ucmd->h,
805 cmd->bufflen, cmd->data_len, ucmd->ubuff);
807 if (cmd->data_direction == SCST_DATA_WRITE)
808 dev_user_flush_dcache(ucmd);
810 ucmd->user_cmd.cmd_h = ucmd->h;
811 ucmd->user_cmd.subcode = SCST_USER_EXEC;
812 ucmd->user_cmd.exec_cmd.sess_h = (unsigned long)cmd->tgt_dev;
813 memcpy(ucmd->user_cmd.exec_cmd.cdb, cmd->cdb,
814 min(sizeof(ucmd->user_cmd.exec_cmd.cdb),
816 ucmd->user_cmd.exec_cmd.cdb_len = cmd->cdb_len;
817 ucmd->user_cmd.exec_cmd.bufflen = cmd->bufflen;
818 ucmd->user_cmd.exec_cmd.data_len = cmd->data_len;
819 ucmd->user_cmd.exec_cmd.pbuf = ucmd->ubuff;
820 if ((ucmd->ubuff == 0) && (cmd->data_direction != SCST_DATA_NONE)) {
821 ucmd->user_cmd.exec_cmd.alloc_len = ucmd->buff_cached ?
822 (cmd->sg_cnt << PAGE_SHIFT) : cmd->bufflen;
824 ucmd->user_cmd.exec_cmd.queue_type = cmd->queue_type;
825 ucmd->user_cmd.exec_cmd.data_direction = cmd->data_direction;
826 ucmd->user_cmd.exec_cmd.partial = 0;
827 ucmd->user_cmd.exec_cmd.timeout = cmd->timeout;
828 ucmd->user_cmd.exec_cmd.sn = cmd->tgt_sn;
830 ucmd->state = UCMD_STATE_EXECING;
832 dev_user_add_to_ready(ucmd);
838 static void dev_user_free_sgv(struct scst_user_cmd *ucmd)
840 if (ucmd->sgv != NULL) {
841 sgv_pool_free(ucmd->sgv);
843 } else if (ucmd->data_pages != NULL) {
844 /* We mapped pages, but for some reason didn't allocate them */
846 __dev_user_free_sg_entries(ucmd);
851 static void dev_user_on_free_cmd(struct scst_cmd *cmd)
853 struct scst_user_cmd *ucmd = (struct scst_user_cmd *)cmd->dh_priv;
857 if (unlikely(ucmd == NULL))
860 TRACE_MEM("ucmd %p, cmd %p, buff_cached %d, ubuff %lx", ucmd, ucmd->cmd,
861 ucmd->buff_cached, ucmd->ubuff);
864 if ((cmd->data_direction == SCST_DATA_WRITE) && (ucmd->buf_ucmd != NULL))
865 ucmd->buf_ucmd->buf_dirty = 1;
867 if (ucmd->dev->on_free_cmd_type == SCST_USER_ON_FREE_CMD_IGNORE) {
868 ucmd->state = UCMD_STATE_ON_FREE_SKIPPED;
869 /* The state assignment must be before freeing sgv! */
870 dev_user_free_sgv(ucmd);
875 ucmd->user_cmd.cmd_h = ucmd->h;
876 ucmd->user_cmd.subcode = SCST_USER_ON_FREE_CMD;
878 ucmd->user_cmd.on_free_cmd.pbuf = ucmd->ubuff;
879 ucmd->user_cmd.on_free_cmd.resp_data_len = cmd->resp_data_len;
880 ucmd->user_cmd.on_free_cmd.buffer_cached = ucmd->buff_cached;
881 ucmd->user_cmd.on_free_cmd.aborted = ucmd->aborted;
882 ucmd->user_cmd.on_free_cmd.status = cmd->status;
883 ucmd->user_cmd.on_free_cmd.delivery_status = cmd->delivery_status;
885 ucmd->state = UCMD_STATE_ON_FREEING;
887 dev_user_add_to_ready(ucmd);
894 static void dev_user_set_block(struct scst_cmd *cmd, int block)
896 struct scst_user_dev *dev = (struct scst_user_dev *)cmd->dev->dh_priv;
898 * No need for locks here, since *_detach() can not be
899 * called, when there are existing commands.
901 TRACE_DBG("dev %p, new block %d", dev, block);
905 dev->block = dev->def_block;
909 static int dev_user_disk_done(struct scst_cmd *cmd)
911 int res = SCST_CMD_STATE_DEFAULT;
915 res = scst_block_generic_dev_done(cmd, dev_user_set_block);
921 static int dev_user_tape_done(struct scst_cmd *cmd)
923 int res = SCST_CMD_STATE_DEFAULT;
927 res = scst_tape_generic_dev_done(cmd, dev_user_set_block);
933 static void dev_user_add_to_ready(struct scst_user_cmd *ucmd)
935 struct scst_user_dev *dev = ucmd->dev;
941 do_wake = (in_interrupt() ||
942 (ucmd->state == UCMD_STATE_ON_CACHE_FREEING));
944 do_wake |= ucmd->cmd->preprocessing_only;
946 EXTRACHECKS_BUG_ON(ucmd->state & UCMD_STATE_JAMMED_MASK);
948 spin_lock_irqsave(&dev->cmd_lists.cmd_list_lock, flags);
950 /* Hopefully, compiler will make it as a single test/jmp */
951 if (unlikely(dev->attach_cmd_active || dev->tm_cmd_active ||
952 dev->internal_reset_active || dev->pre_unreg_sess_active ||
953 (dev->detach_cmd_count != 0))) {
954 switch (ucmd->state) {
955 case UCMD_STATE_PARSING:
956 case UCMD_STATE_BUF_ALLOCING:
957 case UCMD_STATE_EXECING:
958 if (dev->pre_unreg_sess_active &&
959 !(dev->attach_cmd_active || dev->tm_cmd_active ||
960 dev->internal_reset_active ||
961 (dev->detach_cmd_count != 0))) {
962 struct scst_user_pre_unreg_sess_obj *p, *found = NULL;
963 list_for_each_entry(p, &dev->pre_unreg_sess_list,
964 pre_unreg_sess_list_entry) {
965 if (p->tgt_dev == ucmd->cmd->tgt_dev) {
972 TRACE_MGMT_DBG("No pre unreg sess "
973 "active (ucmd %p)", ucmd);
976 TRACE_MGMT_DBG("Pre unreg sess %p "
977 "active (ucmd %p)", found, ucmd);
980 TRACE(TRACE_MGMT, "Mgmt cmd active, returning BUSY for "
982 dev_user_unjam_cmd(ucmd, 1, &flags);
983 spin_unlock_irqrestore(&dev->cmd_lists.cmd_list_lock, flags);
988 if (unlikely(ucmd->state == UCMD_STATE_TM_EXECING) ||
989 unlikely(ucmd->state == UCMD_STATE_ATTACH_SESS) ||
990 unlikely(ucmd->state == UCMD_STATE_DETACH_SESS)) {
991 if (dev->prio_queue_type == SCST_USER_PRIO_QUEUE_SEPARATE) {
992 TRACE_MGMT_DBG("Adding mgmt ucmd %p to prio ready cmd "
994 list_add_tail(&ucmd->ready_cmd_list_entry,
995 &dev->prio_ready_cmd_list);
996 wake_up(&dev->prio_cmd_list_waitQ);
999 TRACE_MGMT_DBG("Adding mgmt ucmd %p to ready cmd "
1001 list_add_tail(&ucmd->ready_cmd_list_entry,
1002 &dev->ready_cmd_list);
1005 } else if ((ucmd->cmd != NULL) &&
1006 unlikely((ucmd->cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE))) {
1007 TRACE_DBG("Adding ucmd %p to head ready cmd list", ucmd);
1008 list_add(&ucmd->ready_cmd_list_entry, &dev->ready_cmd_list);
1010 TRACE_DBG("Adding ucmd %p to ready cmd list", ucmd);
1011 list_add_tail(&ucmd->ready_cmd_list_entry, &dev->ready_cmd_list);
1015 TRACE_DBG("Waking up dev %p", dev);
1016 wake_up(&dev->cmd_lists.cmd_list_waitQ);
1019 spin_unlock_irqrestore(&dev->cmd_lists.cmd_list_lock, flags);
1026 static int dev_user_map_buf(struct scst_user_cmd *ucmd, unsigned long ubuff,
1034 if (unlikely(ubuff == 0))
1037 sBUG_ON(ucmd->data_pages != NULL);
1039 ucmd->num_data_pages = num_pg;
1041 ucmd->data_pages = kzalloc(sizeof(*ucmd->data_pages)*ucmd->num_data_pages,
1043 if (ucmd->data_pages == NULL) {
1044 TRACE(TRACE_OUT_OF_MEM, "Unable to allocate data_pages array "
1045 "(num_data_pages=%d)", ucmd->num_data_pages);
1050 TRACE_MEM("Mapping buffer (ucmd %p, ubuff %lx, ucmd->num_data_pages %d, "
1051 "first_page_offset %d, len %d)", ucmd, ubuff,
1052 ucmd->num_data_pages, (int)(ubuff & ~PAGE_MASK),
1053 ucmd->cmd->bufflen);
1055 down_read(¤t->mm->mmap_sem);
1056 rc = get_user_pages(current, current->mm, ubuff, ucmd->num_data_pages,
1057 1/*writable*/, 0/*don't force*/, ucmd->data_pages, NULL);
1058 up_read(¤t->mm->mmap_sem);
1060 /* get_user_pages() flushes dcache */
1062 if (rc < ucmd->num_data_pages)
1065 ucmd->ubuff = ubuff;
1066 ucmd->first_page_offset = (ubuff & ~PAGE_MASK);
1069 TRACE_EXIT_RES(res);
1073 scst_set_busy(ucmd->cmd);
1077 ucmd->cmd->state = SCST_CMD_STATE_PRE_XMIT_RESP;
1081 PRINT_ERROR("Failed to get %d user pages (rc %d)",
1082 ucmd->num_data_pages, rc);
1084 for (i = 0; i < rc; i++)
1085 page_cache_release(ucmd->data_pages[i]);
1087 kfree(ucmd->data_pages);
1088 ucmd->data_pages = NULL;
1090 scst_set_cmd_error(ucmd->cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
1094 static int dev_user_process_reply_alloc(struct scst_user_cmd *ucmd,
1095 struct scst_user_reply_cmd *reply)
1098 struct scst_cmd *cmd = ucmd->cmd;
1102 TRACE_DBG("ucmd %p, pbuf %Lx", ucmd, reply->alloc_reply.pbuf);
1104 if (likely(reply->alloc_reply.pbuf != 0)) {
1106 if (ucmd->buff_cached) {
1107 if (unlikely((reply->alloc_reply.pbuf & ~PAGE_MASK) != 0)) {
1108 PRINT_ERROR("Supplied pbuf %Lx isn't "
1109 "page aligned", reply->alloc_reply.pbuf);
1112 pages = cmd->sg_cnt;
1114 pages = calc_num_pg(reply->alloc_reply.pbuf, cmd->bufflen);
1115 res = dev_user_map_buf(ucmd, reply->alloc_reply.pbuf, pages);
1117 scst_set_busy(ucmd->cmd);
1118 ucmd->cmd->state = SCST_CMD_STATE_PRE_XMIT_RESP;
1122 scst_process_active_cmd(cmd, SCST_CONTEXT_DIRECT);
1124 TRACE_EXIT_RES(res);
1128 scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
1133 static int dev_user_process_reply_parse(struct scst_user_cmd *ucmd,
1134 struct scst_user_reply_cmd *reply)
1137 struct scst_user_scsi_cmd_reply_parse *preply =
1138 &reply->parse_reply;
1139 struct scst_cmd *cmd = ucmd->cmd;
1143 if (unlikely(preply->queue_type > SCST_CMD_QUEUE_ACA))
1146 if (unlikely((preply->data_direction != SCST_DATA_WRITE) &&
1147 (preply->data_direction != SCST_DATA_READ) &&
1148 (preply->data_direction != SCST_DATA_NONE)))
1151 if (unlikely((preply->data_direction != SCST_DATA_NONE) &&
1152 (preply->bufflen == 0)))
1155 if (unlikely((preply->bufflen < 0) || (preply->data_len < 0)))
1158 TRACE_DBG("ucmd %p, queue_type %x, data_direction, %x, bufflen %d, "
1159 "data_len %d, pbuf %Lx", ucmd, preply->queue_type,
1160 preply->data_direction, preply->bufflen, preply->data_len,
1161 reply->alloc_reply.pbuf);
1163 cmd->queue_type = preply->queue_type;
1164 cmd->data_direction = preply->data_direction;
1165 cmd->bufflen = preply->bufflen;
1166 cmd->data_len = preply->data_len;
1169 scst_process_active_cmd(cmd, SCST_CONTEXT_DIRECT);
1171 TRACE_EXIT_RES(res);
1175 PRINT_ERROR("%s", "Invalid parse_reply parameter(s)");
1176 scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
1181 static int dev_user_process_reply_on_free(struct scst_user_cmd *ucmd)
1187 TRACE_DBG("ON FREE ucmd %p", ucmd);
1189 dev_user_free_sgv(ucmd);
1192 TRACE_EXIT_RES(res);
1196 static int dev_user_process_reply_on_cache_free(struct scst_user_cmd *ucmd)
1202 TRACE_DBG("ON CACHE FREE ucmd %p", ucmd);
1206 TRACE_EXIT_RES(res);
1210 static int dev_user_process_reply_exec(struct scst_user_cmd *ucmd,
1211 struct scst_user_reply_cmd *reply)
1214 struct scst_user_scsi_cmd_reply_exec *ereply =
1216 struct scst_cmd *cmd = ucmd->cmd;
1220 if (ereply->reply_type == SCST_EXEC_REPLY_COMPLETED) {
1221 if (ucmd->background_exec) {
1222 TRACE_DBG("Background ucmd %p finished", ucmd);
1226 if (unlikely(ereply->resp_data_len > cmd->bufflen))
1228 if (unlikely((cmd->data_direction != SCST_DATA_READ) &&
1229 (ereply->resp_data_len != 0)))
1231 } else if (ereply->reply_type == SCST_EXEC_REPLY_BACKGROUND) {
1232 if (unlikely(ucmd->background_exec))
1234 if (unlikely((cmd->data_direction == SCST_DATA_READ) ||
1235 (cmd->resp_data_len != 0)))
1238 ucmd->background_exec = 1;
1239 TRACE_DBG("Background ucmd %p", ucmd);
1244 TRACE_DBG("ucmd %p, status %d, resp_data_len %d", ucmd,
1245 ereply->status, ereply->resp_data_len);
1247 if (ereply->resp_data_len != 0) {
1248 if (ucmd->ubuff == 0) {
1250 if (unlikely(ereply->pbuf == 0))
1252 if (ucmd->buff_cached) {
1253 if (unlikely((ereply->pbuf & ~PAGE_MASK) != 0)) {
1254 PRINT_ERROR("Supplied pbuf %Lx isn't "
1255 "page aligned", ereply->pbuf);
1258 pages = cmd->sg_cnt;
1260 pages = calc_num_pg(ereply->pbuf, cmd->bufflen);
1261 rc = dev_user_map_buf(ucmd, ereply->pbuf, pages);
1262 if ((rc != 0) || (ucmd->ubuff == 0))
1265 rc = dev_user_alloc_sg(ucmd, ucmd->buff_cached);
1266 if (unlikely(rc != 0))
1269 dev_user_flush_dcache(ucmd);
1270 cmd->may_need_dma_sync = 1;
1271 scst_set_resp_data_len(cmd, ereply->resp_data_len);
1272 } else if (cmd->resp_data_len != ereply->resp_data_len) {
1273 if (ucmd->ubuff == 0)
1274 cmd->resp_data_len = ereply->resp_data_len;
1276 scst_set_resp_data_len(cmd, ereply->resp_data_len);
1279 cmd->status = ereply->status;
1280 if (ereply->sense_len != 0) {
1281 res = scst_alloc_sense(cmd, 0);
1284 res = copy_from_user(cmd->sense,
1285 (void *)(unsigned long)ereply->psense_buffer,
1286 min((unsigned int)SCST_SENSE_BUFFERSIZE,
1287 (unsigned int)ereply->sense_len));
1289 PRINT_ERROR("%s", "Unable to get sense data");
1290 goto out_hwerr_res_set;
1296 cmd->scst_cmd_done(cmd, SCST_CMD_STATE_DEFAULT);
1297 /* !! At this point cmd can be already freed !! */
1300 TRACE_EXIT_RES(res);
1304 PRINT_ERROR("%s", "Invalid exec_reply parameter(s)");
1310 if (ucmd->background_exec) {
1314 scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
1323 static int dev_user_process_reply(struct scst_user_dev *dev,
1324 struct scst_user_reply_cmd *reply)
1327 struct scst_user_cmd *ucmd;
1332 spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
1334 ucmd = __ucmd_find_hash(dev, reply->cmd_h);
1336 TRACE_MGMT_DBG("cmd_h %d not found", reply->cmd_h);
1341 if (ucmd->background_exec) {
1342 state = UCMD_STATE_EXECING;
1343 goto unlock_process;
1346 if (unlikely(!(ucmd->state & UCMD_STATE_SENT_MASK))) {
1347 if (ucmd->state & UCMD_STATE_JAMMED_MASK) {
1348 TRACE_MGMT_DBG("Reply on jammed ucmd %p, ignoring",
1351 TRACE_MGMT_DBG("Ucmd %p isn't in the sent to user "
1352 "state %x", ucmd, ucmd->state);
1358 if (unlikely(reply->subcode != ucmd->user_cmd.subcode))
1359 goto out_wrong_state;
1361 if (unlikely(_IOC_NR(reply->subcode) !=
1362 (ucmd->state & ~UCMD_STATE_SENT_MASK)))
1363 goto out_wrong_state;
1365 ucmd->state &= ~UCMD_STATE_SENT_MASK;
1366 state = ucmd->state;
1367 ucmd->state |= UCMD_STATE_RECV_MASK;
1370 spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1373 case UCMD_STATE_PARSING:
1374 res = dev_user_process_reply_parse(ucmd, reply);
1377 case UCMD_STATE_BUF_ALLOCING:
1378 res = dev_user_process_reply_alloc(ucmd, reply);
1381 case UCMD_STATE_EXECING:
1382 res = dev_user_process_reply_exec(ucmd, reply);
1385 case UCMD_STATE_ON_FREEING:
1386 res = dev_user_process_reply_on_free(ucmd);
1389 case UCMD_STATE_ON_CACHE_FREEING:
1390 res = dev_user_process_reply_on_cache_free(ucmd);
1393 case UCMD_STATE_TM_EXECING:
1394 res = dev_user_process_reply_tm_exec(ucmd, reply->result);
1397 case UCMD_STATE_ATTACH_SESS:
1398 case UCMD_STATE_DETACH_SESS:
1399 res = dev_user_process_reply_sess(ucmd, reply->result);
1407 TRACE_EXIT_RES(res);
1411 PRINT_ERROR("Command's %p subcode %x doesn't match internal "
1412 "command's state %x or reply->subcode (%x) != ucmd->subcode "
1413 "(%x)", ucmd, _IOC_NR(reply->subcode), ucmd->state,
1414 reply->subcode, ucmd->user_cmd.subcode);
1416 dev_user_unjam_cmd(ucmd, 0, NULL);
1419 spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1423 static int dev_user_reply_cmd(struct file *file, unsigned long arg)
1426 struct scst_user_dev *dev;
1427 struct scst_user_reply_cmd *reply;
1431 mutex_lock(&dev_priv_mutex);
1432 dev = (struct scst_user_dev *)file->private_data;
1433 res = dev_user_check_reg(dev);
1435 mutex_unlock(&dev_priv_mutex);
1438 down_read(&dev->dev_rwsem);
1439 mutex_unlock(&dev_priv_mutex);
1441 reply = kzalloc(sizeof(*reply), GFP_KERNEL);
1442 if (reply == NULL) {
1447 res = copy_from_user(reply, (void *)arg, sizeof(*reply));
1451 TRACE_BUFFER("Reply", reply, sizeof(*reply));
1453 res = dev_user_process_reply(dev, reply);
1461 up_read(&dev->dev_rwsem);
1464 TRACE_EXIT_RES(res);
1468 static int dev_user_process_scst_commands(struct scst_user_dev *dev)
1474 while (!list_empty(&dev->cmd_lists.active_cmd_list)) {
1475 struct scst_cmd *cmd = list_entry(
1476 dev->cmd_lists.active_cmd_list.next, typeof(*cmd),
1478 TRACE_DBG("Deleting cmd %p from active cmd list", cmd);
1479 list_del(&cmd->cmd_list_entry);
1480 spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1481 scst_process_active_cmd(cmd, SCST_CONTEXT_DIRECT |
1482 SCST_CONTEXT_PROCESSABLE);
1483 spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
1487 TRACE_EXIT_RES(res);
1491 /* Called under cmd_lists.cmd_list_lock and IRQ off */
1492 struct scst_user_cmd *__dev_user_get_next_cmd(struct list_head *cmd_list)
1494 struct scst_user_cmd *u;
1498 if (!list_empty(cmd_list)) {
1499 u = list_entry(cmd_list->next, typeof(*u), ready_cmd_list_entry);
1501 TRACE_DBG("Found ready ucmd %p", u);
1502 list_del(&u->ready_cmd_list_entry);
1504 EXTRACHECKS_BUG_ON(u->state & UCMD_STATE_JAMMED_MASK);
1506 if (u->cmd != NULL) {
1507 if (u->state == UCMD_STATE_EXECING) {
1508 struct scst_user_dev *dev = u->dev;
1510 spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1511 rc = scst_check_local_events(u->cmd);
1512 if (unlikely(rc != 0)) {
1513 u->cmd->scst_cmd_done(u->cmd,
1514 SCST_CMD_STATE_DEFAULT);
1516 * !! At this point cmd & u can be !!
1517 * !! already freed !!
1520 &dev->cmd_lists.cmd_list_lock);
1524 * There is no real need to lock again here, but
1525 * let's do it for simplicity.
1527 spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
1528 } else if (unlikely(test_bit(SCST_CMD_ABORTED,
1529 &u->cmd->cmd_flags))) {
1531 case UCMD_STATE_PARSING:
1532 case UCMD_STATE_BUF_ALLOCING:
1533 TRACE_MGMT_DBG("Aborting ucmd %p", u);
1534 dev_user_unjam_cmd(u, 0, NULL);
1536 case UCMD_STATE_EXECING:
1537 EXTRACHECKS_BUG_ON(1);
1541 u->state |= UCMD_STATE_SENT_MASK;
1546 static inline int test_cmd_lists(struct scst_user_dev *dev)
1548 int res = !list_empty(&dev->cmd_lists.active_cmd_list) ||
1549 !list_empty(&dev->ready_cmd_list) ||
1550 !dev->blocking || dev->cleanup_done ||
1551 signal_pending(current);
1555 /* Called under cmd_lists.cmd_list_lock and IRQ off */
1556 static int dev_user_get_next_cmd(struct scst_user_dev *dev,
1557 struct scst_user_cmd **ucmd)
1564 init_waitqueue_entry(&wait, current);
1567 if (!test_cmd_lists(dev)) {
1568 add_wait_queue_exclusive(&dev->cmd_lists.cmd_list_waitQ,
1571 set_current_state(TASK_INTERRUPTIBLE);
1572 if (test_cmd_lists(dev))
1574 spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1576 spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
1578 set_current_state(TASK_RUNNING);
1579 remove_wait_queue(&dev->cmd_lists.cmd_list_waitQ,
1583 dev_user_process_scst_commands(dev);
1585 *ucmd = __dev_user_get_next_cmd(&dev->ready_cmd_list);
1589 if (!dev->blocking || dev->cleanup_done) {
1591 TRACE_DBG("No ready commands, returning %d", res);
1595 if (signal_pending(current)) {
1597 TRACE_DBG("Signal pending, returning %d", res);
1602 TRACE_EXIT_RES(res);
1606 static inline int test_prio_cmd_list(struct scst_user_dev *dev)
1609 * Prio queue is always blocking, because poll() seems doesn't
1610 * support, when different threads wait with different events
1611 * mask. Only one thread is woken up on each event and if it
1612 * isn't interested in such events, another (interested) one
1613 * will not be woken up. Does't know if it's a bug or feature.
1615 int res = !list_empty(&dev->prio_ready_cmd_list) ||
1616 dev->cleaning || dev->cleanup_done ||
1617 signal_pending(current);
1621 /* Called under cmd_lists.cmd_list_lock and IRQ off */
1622 static int dev_user_get_next_prio_cmd(struct scst_user_dev *dev,
1623 struct scst_user_cmd **ucmd)
1630 init_waitqueue_entry(&wait, current);
1633 if (!test_prio_cmd_list(dev)) {
1634 add_wait_queue_exclusive(&dev->prio_cmd_list_waitQ,
1637 set_current_state(TASK_INTERRUPTIBLE);
1638 if (test_prio_cmd_list(dev))
1640 spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1642 spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
1644 set_current_state(TASK_RUNNING);
1645 remove_wait_queue(&dev->prio_cmd_list_waitQ, &wait);
1648 *ucmd = __dev_user_get_next_cmd(&dev->prio_ready_cmd_list);
1652 if (dev->cleaning || dev->cleanup_done) {
1654 TRACE_DBG("No ready commands, returning %d", res);
1658 if (signal_pending(current)) {
1660 TRACE_DBG("Signal pending, returning %d", res);
1665 TRACE_EXIT_RES(res);
1669 static int dev_user_reply_get_cmd(struct file *file, unsigned long arg,
1673 struct scst_user_dev *dev;
1674 struct scst_user_get_cmd *cmd;
1675 struct scst_user_reply_cmd *reply;
1676 struct scst_user_cmd *ucmd;
1681 mutex_lock(&dev_priv_mutex);
1682 dev = (struct scst_user_dev *)file->private_data;
1683 res = dev_user_check_reg(dev);
1685 mutex_unlock(&dev_priv_mutex);
1688 down_read(&dev->dev_rwsem);
1689 mutex_unlock(&dev_priv_mutex);
1691 res = copy_from_user(&ureply, (void *)arg, sizeof(ureply));
1695 TRACE_DBG("ureply %Ld", ureply);
1697 cmd = kzalloc(max(sizeof(*cmd), sizeof(*reply)), GFP_KERNEL);
1704 unsigned long u = (unsigned long)ureply;
1705 reply = (struct scst_user_reply_cmd *)cmd;
1706 res = copy_from_user(reply, (void *)u, sizeof(*reply));
1710 TRACE_BUFFER("Reply", reply, sizeof(*reply));
1712 res = dev_user_process_reply(dev, reply);
1717 spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
1718 if (prio && (dev->prio_queue_type == SCST_USER_PRIO_QUEUE_SEPARATE))
1719 res = dev_user_get_next_prio_cmd(dev, &ucmd);
1721 res = dev_user_get_next_cmd(dev, &ucmd);
1723 *cmd = ucmd->user_cmd;
1724 spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1725 TRACE_BUFFER("UCMD", cmd, sizeof(*cmd));
1726 res = copy_to_user((void *)arg, cmd, sizeof(*cmd));
1728 spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1734 up_read(&dev->dev_rwsem);
1737 TRACE_EXIT_RES(res);
1741 static long dev_user_ioctl(struct file *file, unsigned int cmd,
1749 case SCST_USER_REPLY_AND_GET_CMD:
1750 TRACE_DBG("%s", "REPLY_AND_GET_CMD");
1751 res = dev_user_reply_get_cmd(file, arg, 0);
1754 case SCST_USER_REPLY_CMD:
1755 TRACE_DBG("%s", "REPLY_CMD");
1756 res = dev_user_reply_cmd(file, arg);
1759 case SCST_USER_REPLY_AND_GET_PRIO_CMD:
1760 TRACE_DBG("%s", "REPLY_AND_GET_PRIO_CMD");
1761 res = dev_user_reply_get_cmd(file, arg, 1);
1764 case SCST_USER_REGISTER_DEVICE:
1766 struct scst_user_dev_desc *dev_desc;
1767 TRACE_DBG("%s", "REGISTER_DEVICE");
1768 dev_desc = kmalloc(sizeof(*dev_desc), GFP_KERNEL);
1769 if (dev_desc == NULL) {
1773 res = copy_from_user(dev_desc, (void *)arg, sizeof(*dev_desc));
1778 TRACE_BUFFER("dev_desc", dev_desc, sizeof(*dev_desc));
1779 dev_desc->name[sizeof(dev_desc->name)-1] = '\0';
1780 res = dev_user_register_dev(file, dev_desc);
1785 case SCST_USER_SET_OPTIONS:
1787 struct scst_user_opt opt;
1788 TRACE_DBG("%s", "SET_OPTIONS");
1789 res = copy_from_user(&opt, (void *)arg, sizeof(opt));
1792 TRACE_BUFFER("opt", &opt, sizeof(opt));
1793 res = dev_user_set_opt(file, &opt);
1797 case SCST_USER_GET_OPTIONS:
1798 TRACE_DBG("%s", "GET_OPTIONS");
1799 res = dev_user_get_opt(file, (void *)arg);
1803 PRINT_ERROR("Invalid ioctl cmd %x", cmd);
1809 TRACE_EXIT_RES(res);
1813 static unsigned int dev_user_poll(struct file *file, poll_table *wait)
1816 struct scst_user_dev *dev;
1820 mutex_lock(&dev_priv_mutex);
1821 dev = (struct scst_user_dev *)file->private_data;
1822 res = dev_user_check_reg(dev);
1824 mutex_unlock(&dev_priv_mutex);
1827 down_read(&dev->dev_rwsem);
1828 mutex_unlock(&dev_priv_mutex);
1830 spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
1832 if (!list_empty(&dev->ready_cmd_list) ||
1833 !list_empty(&dev->cmd_lists.active_cmd_list)) {
1834 res |= POLLIN | POLLRDNORM;
1838 spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1840 TRACE_DBG("Before poll_wait() (dev %p)", dev);
1841 poll_wait(file, &dev->cmd_lists.cmd_list_waitQ, wait);
1842 TRACE_DBG("After poll_wait() (dev %p)", dev);
1844 spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
1846 if (!list_empty(&dev->ready_cmd_list) ||
1847 !list_empty(&dev->cmd_lists.active_cmd_list)) {
1848 res |= POLLIN | POLLRDNORM;
1853 spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1855 up_read(&dev->dev_rwsem);
1858 TRACE_EXIT_HRES(res);
1863 * Called under cmd_lists.cmd_list_lock, but can drop it inside, then reaquire.
1865 static void dev_user_unjam_cmd(struct scst_user_cmd *ucmd, int busy,
1866 unsigned long *flags)
1868 int state = ucmd->state & ~UCMD_STATE_MASK;
1869 struct scst_user_dev *dev = ucmd->dev;
1873 if (ucmd->state & UCMD_STATE_JAMMED_MASK)
1876 TRACE_MGMT_DBG("Unjamming ucmd %p (busy %d, state %x)", ucmd, busy,
1879 ucmd->state = state | UCMD_STATE_JAMMED_MASK;
1882 case UCMD_STATE_PARSING:
1883 case UCMD_STATE_BUF_ALLOCING:
1884 if (test_bit(SCST_CMD_ABORTED, &ucmd->cmd->cmd_flags))
1888 scst_set_busy(ucmd->cmd);
1890 scst_set_cmd_error(ucmd->cmd,
1891 SCST_LOAD_SENSE(scst_sense_hardw_error));
1893 TRACE_MGMT_DBG("Adding ucmd %p to active list", ucmd);
1894 list_add(&ucmd->cmd->cmd_list_entry,
1895 &ucmd->cmd->cmd_lists->active_cmd_list);
1896 wake_up(&ucmd->dev->cmd_lists.cmd_list_waitQ);
1899 case UCMD_STATE_EXECING:
1901 spin_unlock_irqrestore(&dev->cmd_lists.cmd_list_lock, *flags);
1903 spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1905 TRACE_MGMT_DBG("EXEC: unjamming ucmd %p", ucmd);
1907 if (test_bit(SCST_CMD_ABORTED, &ucmd->cmd->cmd_flags))
1911 scst_set_busy(ucmd->cmd);
1913 scst_set_cmd_error(ucmd->cmd,
1914 SCST_LOAD_SENSE(scst_sense_hardw_error));
1917 ucmd->cmd->scst_cmd_done(ucmd->cmd, SCST_CMD_STATE_DEFAULT);
1918 /* !! At this point cmd ans ucmd can be already freed !! */
1921 spin_lock_irqsave(&dev->cmd_lists.cmd_list_lock, *flags);
1923 spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
1926 case UCMD_STATE_ON_FREEING:
1927 case UCMD_STATE_ON_CACHE_FREEING:
1928 case UCMD_STATE_TM_EXECING:
1929 case UCMD_STATE_ATTACH_SESS:
1930 case UCMD_STATE_DETACH_SESS:
1933 spin_unlock_irqrestore(&dev->cmd_lists.cmd_list_lock, *flags);
1935 spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
1938 case UCMD_STATE_ON_FREEING:
1939 dev_user_process_reply_on_free(ucmd);
1942 case UCMD_STATE_ON_CACHE_FREEING:
1943 dev_user_process_reply_on_cache_free(ucmd);
1946 case UCMD_STATE_TM_EXECING:
1947 dev_user_process_reply_tm_exec(ucmd, SCST_MGMT_STATUS_FAILED);
1950 case UCMD_STATE_ATTACH_SESS:
1951 case UCMD_STATE_DETACH_SESS:
1952 dev_user_process_reply_sess(ucmd, -EFAULT);
1957 spin_lock_irqsave(&dev->cmd_lists.cmd_list_lock, *flags);
1959 spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
1964 PRINT_CRIT_ERROR("Wrong ucmd state %x", state);
1974 static int __unjam_check_tgt_dev(struct scst_user_cmd *ucmd, int state,
1975 struct scst_tgt_dev *tgt_dev)
1979 if (ucmd->cmd == NULL)
1982 if (ucmd->cmd->tgt_dev != tgt_dev)
1985 switch (state & ~UCMD_STATE_MASK) {
1986 case UCMD_STATE_PARSING:
1987 case UCMD_STATE_BUF_ALLOCING:
1988 case UCMD_STATE_EXECING:
1999 static int __unjam_check_tm(struct scst_user_cmd *ucmd, int state)
2003 switch (state & ~UCMD_STATE_MASK) {
2004 case UCMD_STATE_PARSING:
2005 case UCMD_STATE_BUF_ALLOCING:
2006 case UCMD_STATE_EXECING:
2007 if ((ucmd->cmd != NULL) &&
2008 (!test_bit(SCST_CMD_ABORTED,
2009 &ucmd->cmd->cmd_flags)))
2021 static void dev_user_unjam_dev(struct scst_user_dev *dev, int tm,
2022 struct scst_tgt_dev *tgt_dev)
2025 unsigned long flags;
2026 struct scst_user_cmd *ucmd;
2030 TRACE_MGMT_DBG("Unjamming dev %p", dev);
2032 spin_lock_irqsave(&dev->cmd_lists.cmd_list_lock, flags);
2035 for (i = 0; i < (int)ARRAY_SIZE(dev->ucmd_hash); i++) {
2036 struct list_head *head = &dev->ucmd_hash[i];
2037 list_for_each_entry(ucmd, head, hash_list_entry) {
2038 TRACE_DBG("ALL: ucmd %p, state %x, scst_cmd %p",
2039 ucmd, ucmd->state, ucmd->cmd);
2040 if (ucmd->state & UCMD_STATE_SENT_MASK) {
2041 int st = ucmd->state & ~UCMD_STATE_SENT_MASK;
2042 if (tgt_dev != NULL) {
2043 if (__unjam_check_tgt_dev(ucmd, st,
2047 if (__unjam_check_tm(ucmd, st) == 0)
2050 dev_user_unjam_cmd(ucmd, 0, &flags);
2056 if ((tgt_dev != NULL) || tm) {
2057 list_for_each_entry(ucmd, &dev->ready_cmd_list,
2058 ready_cmd_list_entry) {
2059 TRACE_DBG("READY: ucmd %p, state %x, scst_cmd %p",
2060 ucmd, ucmd->state, ucmd->cmd);
2061 if (tgt_dev != NULL) {
2062 if (__unjam_check_tgt_dev(ucmd, ucmd->state,
2066 if (__unjam_check_tm(ucmd, ucmd->state) == 0)
2069 list_del(&ucmd->ready_cmd_list_entry);
2070 dev_user_unjam_cmd(ucmd, 0, &flags);
2075 if (dev_user_process_scst_commands(dev) != 0)
2078 spin_unlock_irqrestore(&dev->cmd_lists.cmd_list_lock, flags);
2085 ** In order to deal with user space handler hangups we rely on remote
2086 ** initiators, which in case if a command doesn't respond for too long
2087 ** supposed to issue a task management command, so on that event we can
2088 ** "unjam" the command. In order to prevent TM command from stalling, we
2089 ** use a timer. In order to prevent too many queued TM commands, we
2090 ** enqueue only 2 of them, the first one with the requested TM function,
2091 ** the second - with TARGET_RESET as the most comprehensive function.
2093 ** The only exception here is DETACH_SESS subcode, where there are no TM
2094 ** commands could be expected, so we need manually after a timeout "unjam"
2095 ** all the commands on the device.
2097 ** We also don't queue >1 ATTACH_SESS commands and after timeout fail it.
2100 static int dev_user_process_reply_tm_exec(struct scst_user_cmd *ucmd,
2104 unsigned long flags;
2108 TRACE_MGMT_DBG("TM reply (ucmd %p, fn %d, status %d)", ucmd,
2109 ucmd->user_cmd.tm_cmd.fn, status);
2111 ucmd->result = status;
2113 spin_lock_irqsave(&ucmd->dev->cmd_lists.cmd_list_lock, flags);
2115 if (ucmd->internal_reset_tm) {
2116 TRACE_MGMT_DBG("Internal TM ucmd %p finished", ucmd);
2117 ucmd->dev->internal_reset_active = 0;
2119 TRACE_MGMT_DBG("TM ucmd %p finished", ucmd);
2120 ucmd->dev->tm_cmd_active = 0;
2123 if (ucmd->cmpl != NULL)
2124 complete_all(ucmd->cmpl);
2126 spin_unlock_irqrestore(&ucmd->dev->cmd_lists.cmd_list_lock, flags);
2130 TRACE_EXIT_RES(res);
2134 static int dev_user_task_mgmt_fn(struct scst_mgmt_cmd *mcmd,
2135 struct scst_tgt_dev *tgt_dev)
2138 struct scst_user_cmd *ucmd;
2139 struct scst_user_dev *dev = (struct scst_user_dev *)tgt_dev->dev->dh_priv;
2140 struct scst_user_cmd *ucmd_to_abort = NULL;
2144 /* We can't afford missing TM command due to memory shortage */
2145 ucmd = dev_user_alloc_ucmd(dev, GFP_KERNEL|__GFP_NOFAIL);
2146 ucmd->cmpl = kmalloc(sizeof(*ucmd->cmpl), GFP_KERNEL|__GFP_NOFAIL);
2148 init_completion(ucmd->cmpl);
2150 ucmd->user_cmd.cmd_h = ucmd->h;
2151 ucmd->user_cmd.subcode = SCST_USER_TASK_MGMT;
2152 ucmd->user_cmd.tm_cmd.sess_h = (unsigned long)tgt_dev;
2153 ucmd->user_cmd.tm_cmd.fn = mcmd->fn;
2154 ucmd->user_cmd.tm_cmd.cmd_sn = mcmd->cmd_sn;
2155 ucmd->user_cmd.tm_cmd.cmd_sn_set = mcmd->cmd_sn_set;
2157 if (mcmd->cmd_to_abort != NULL) {
2158 ucmd_to_abort = (struct scst_user_cmd *)mcmd->cmd_to_abort->dh_priv;
2159 if (ucmd_to_abort != NULL)
2160 ucmd->user_cmd.tm_cmd.cmd_h_to_abort = ucmd_to_abort->h;
2163 TRACE_MGMT_DBG("Preparing TM ucmd %p (h %d, fn %d, cmd_to_abort %p, "
2164 "ucmd_to_abort %p, cmd_h_to_abort %d)", ucmd, ucmd->h,
2165 mcmd->fn, mcmd->cmd_to_abort, ucmd_to_abort,
2166 ucmd->user_cmd.tm_cmd.cmd_h_to_abort);
2168 ucmd->state = UCMD_STATE_TM_EXECING;
2170 spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
2171 if (dev->internal_reset_active) {
2172 PRINT_ERROR("Loosing TM cmd %d, because there are other "
2173 "unprocessed TM commands", mcmd->fn);
2174 res = SCST_MGMT_STATUS_FAILED;
2175 goto out_locked_free;
2176 } else if (dev->tm_cmd_active) {
2178 * We are going to miss some TM commands, so replace it
2179 * by the hardest one.
2181 PRINT_ERROR("Replacing TM cmd %d by TARGET_RESET, because "
2182 "there is another unprocessed TM command", mcmd->fn);
2183 ucmd->user_cmd.tm_cmd.fn = SCST_TARGET_RESET;
2184 ucmd->internal_reset_tm = 1;
2185 dev->internal_reset_active = 1;
2187 dev->tm_cmd_active = 1;
2188 spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
2191 dev_user_add_to_ready(ucmd);
2194 * Since the user space handler should not wait for affecting tasks to
2195 * complete it shall complete the TM request ASAP, otherwise the device
2196 * will be considered stalled.
2198 rc = wait_for_completion_timeout(ucmd->cmpl, DEV_USER_TM_TIMEOUT);
2202 PRINT_ERROR("Task management command %p timeout", ucmd);
2203 res = SCST_MGMT_STATUS_FAILED;
2206 sBUG_ON(irqs_disabled());
2208 spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
2213 spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
2215 dev_user_unjam_dev(ucmd->dev, 1, NULL);
2223 static int dev_user_attach(struct scst_device *sdev)
2226 struct scst_user_dev *dev = NULL, *d;
2230 spin_lock(&dev_list_lock);
2231 list_for_each_entry(d, &dev_list, dev_list_entry) {
2232 if (strcmp(d->name, sdev->virt_name) == 0) {
2237 spin_unlock(&dev_list_lock);
2239 PRINT_ERROR("Device %s not found", sdev->virt_name);
2244 sdev->p_cmd_lists = &dev->cmd_lists;
2245 sdev->dh_priv = dev;
2246 sdev->tst = dev->tst;
2247 sdev->queue_alg = dev->queue_alg;
2248 sdev->swp = dev->swp;
2249 sdev->tas = dev->tas;
2250 sdev->has_own_order_mgmt = dev->has_own_order_mgmt;
2254 PRINT_INFO("Attached user space SCSI target virtual device \"%s\"",
2262 static void dev_user_detach(struct scst_device *sdev)
2264 struct scst_user_dev *dev = (struct scst_user_dev *)sdev->dh_priv;
2268 TRACE_DBG("virt_id %d", sdev->virt_id);
2270 PRINT_INFO("Detached user space SCSI target virtual device \"%s\"",
2273 /* dev will be freed by the caller */
2274 sdev->dh_priv = NULL;
2281 static int dev_user_process_reply_sess(struct scst_user_cmd *ucmd, int status)
2284 unsigned long flags;
2288 TRACE_MGMT_DBG("ucmd %p, cmpl %p, status %d", ucmd, ucmd->cmpl, status);
2290 spin_lock_irqsave(&ucmd->dev->cmd_lists.cmd_list_lock, flags);
2292 if ((ucmd->state & ~UCMD_STATE_MASK) ==
2293 UCMD_STATE_ATTACH_SESS) {
2294 TRACE_MGMT_DBG("%s", "ATTACH_SESS finished");
2295 ucmd->result = status;
2296 ucmd->dev->attach_cmd_active = 0;
2297 } else if ((ucmd->state & ~UCMD_STATE_MASK) ==
2298 UCMD_STATE_DETACH_SESS) {
2299 TRACE_MGMT_DBG("%s", "DETACH_SESS finished");
2300 ucmd->dev->detach_cmd_count--;
2304 if (ucmd->cmpl != NULL)
2305 complete_all(ucmd->cmpl);
2307 spin_unlock_irqrestore(&ucmd->dev->cmd_lists.cmd_list_lock, flags);
2311 TRACE_EXIT_RES(res);
2315 static int dev_user_attach_tgt(struct scst_tgt_dev *tgt_dev)
2317 struct scst_user_dev *dev =
2318 (struct scst_user_dev *)tgt_dev->dev->dh_priv;
2320 struct scst_user_cmd *ucmd;
2324 ucmd = dev_user_alloc_ucmd(dev, GFP_KERNEL);
2328 ucmd->cmpl = kmalloc(sizeof(*ucmd->cmpl), GFP_KERNEL);
2329 if (ucmd->cmpl == NULL)
2332 init_completion(ucmd->cmpl);
2334 ucmd->user_cmd.cmd_h = ucmd->h;
2335 ucmd->user_cmd.subcode = SCST_USER_ATTACH_SESS;
2336 ucmd->user_cmd.sess.sess_h = (unsigned long)tgt_dev;
2337 ucmd->user_cmd.sess.lun = (uint64_t)tgt_dev->lun;
2338 ucmd->user_cmd.sess.threads_num = tgt_dev->sess->tgt->tgtt->threads_num;
2339 ucmd->user_cmd.sess.rd_only = tgt_dev->acg_dev->rd_only_flag;
2340 strncpy(ucmd->user_cmd.sess.initiator_name,
2341 tgt_dev->sess->initiator_name,
2342 sizeof(ucmd->user_cmd.sess.initiator_name)-1);
2343 ucmd->user_cmd.sess.initiator_name[
2344 sizeof(ucmd->user_cmd.sess.initiator_name)-1] = '\0';
2346 TRACE_MGMT_DBG("Preparing ATTACH_SESS %p (h %d, sess_h %Lx, LUN %Lx, "
2347 "threads_num %d, rd_only_flag %d, initiator %s)", ucmd, ucmd->h,
2348 ucmd->user_cmd.sess.sess_h, ucmd->user_cmd.sess.lun,
2349 ucmd->user_cmd.sess.threads_num, ucmd->user_cmd.sess.rd_only,
2350 ucmd->user_cmd.sess.initiator_name);
2352 ucmd->state = UCMD_STATE_ATTACH_SESS;
2354 spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
2355 if (dev->attach_cmd_active) {
2356 PRINT_ERROR("%s", "ATTACH_SESS command failed, because "
2357 "there is another unprocessed ATTACH_SESS command");
2359 goto out_locked_free;
2361 dev->attach_cmd_active = 1;
2362 spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
2365 dev_user_add_to_ready(ucmd);
2367 rc = wait_for_completion_timeout(ucmd->cmpl, DEV_USER_ATTACH_TIMEOUT);
2371 PRINT_ERROR("%s", "ATTACH_SESS command timeout");
2375 sBUG_ON(irqs_disabled());
2377 spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
2381 spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
2386 TRACE_EXIT_RES(res);
2397 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
2398 static void dev_user_pre_unreg_sess_work_fn(void *p)
2400 static void dev_user_pre_unreg_sess_work_fn(struct work_struct *work)
2403 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
2404 struct scst_user_pre_unreg_sess_obj *pd = (struct scst_user_pre_unreg_sess_obj *)p;
2406 struct scst_user_pre_unreg_sess_obj *pd = container_of(
2407 (struct delayed_work *)work, struct scst_user_pre_unreg_sess_obj,
2408 pre_unreg_sess_work);
2410 struct scst_user_dev *dev =
2411 (struct scst_user_dev *)pd->tgt_dev->dev->dh_priv;
2415 TRACE_MGMT_DBG("Unreg sess: unjaming dev %p (tgt_dev %p)", dev,
2420 dev_user_unjam_dev(dev, 0, pd->tgt_dev);
2423 TRACE_MGMT_DBG("Rescheduling pre_unreg_sess work %p (dev %p, "
2424 "tgt_dev %p)", pd, dev, pd->tgt_dev);
2425 schedule_delayed_work(&pd->pre_unreg_sess_work,
2426 DEV_USER_PRE_UNREG_POLL_TIME);
2433 static void dev_user_pre_unreg_sess(struct scst_tgt_dev *tgt_dev)
2435 struct scst_user_dev *dev =
2436 (struct scst_user_dev *)tgt_dev->dev->dh_priv;
2437 struct scst_user_pre_unreg_sess_obj *pd;
2441 /* We can't afford missing DETACH command due to memory shortage */
2442 pd = kzalloc(sizeof(*pd), GFP_KERNEL|__GFP_NOFAIL);
2444 pd->tgt_dev = tgt_dev;
2445 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
2446 INIT_WORK(&pd->pre_unreg_sess_work, dev_user_pre_unreg_sess_work_fn, pd);
2448 INIT_DELAYED_WORK(&pd->pre_unreg_sess_work, dev_user_pre_unreg_sess_work_fn);
2451 spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
2452 dev->pre_unreg_sess_active = 1;
2453 list_add_tail(&pd->pre_unreg_sess_list_entry, &dev->pre_unreg_sess_list);
2454 spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
2456 TRACE_MGMT_DBG("Scheduling pre_unreg_sess work %p (dev %p, tgt_dev %p)",
2457 pd, dev, pd->tgt_dev);
2459 schedule_delayed_work(&pd->pre_unreg_sess_work, DEV_USER_DETACH_TIMEOUT);
2465 static void dev_user_detach_tgt(struct scst_tgt_dev *tgt_dev)
2467 struct scst_user_dev *dev =
2468 (struct scst_user_dev *)tgt_dev->dev->dh_priv;
2469 struct scst_user_cmd *ucmd;
2470 struct scst_user_pre_unreg_sess_obj *pd = NULL, *p;
2474 spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
2475 list_for_each_entry(p, &dev->pre_unreg_sess_list,
2476 pre_unreg_sess_list_entry) {
2477 if (p->tgt_dev == tgt_dev) {
2478 list_del(&p->pre_unreg_sess_list_entry);
2479 if (list_empty(&dev->pre_unreg_sess_list))
2480 dev->pre_unreg_sess_active = 0;
2485 spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
2489 TRACE_MGMT_DBG("Canceling pre unreg work %p", pd);
2490 cancel_delayed_work(&pd->pre_unreg_sess_work);
2491 flush_scheduled_work();
2495 ucmd = dev_user_alloc_ucmd(dev, GFP_KERNEL);
2499 TRACE_MGMT_DBG("Preparing DETACH_SESS %p (h %d, sess_h %Lx)", ucmd,
2500 ucmd->h, ucmd->user_cmd.sess.sess_h);
2502 ucmd->user_cmd.cmd_h = ucmd->h;
2503 ucmd->user_cmd.subcode = SCST_USER_DETACH_SESS;
2504 ucmd->user_cmd.sess.sess_h = (unsigned long)tgt_dev;
2506 spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
2507 dev->detach_cmd_count++;
2508 spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
2510 ucmd->state = UCMD_STATE_DETACH_SESS;
2512 dev_user_add_to_ready(ucmd);
2519 /* No locks are needed, but the activity must be suspended */
2520 static void dev_user_setup_functions(struct scst_user_dev *dev)
2524 dev->devtype.parse = dev_user_parse;
2525 dev->devtype.dev_done = NULL;
2527 if (dev->parse_type != SCST_USER_PARSE_CALL) {
2528 switch (dev->devtype.type) {
2530 dev->generic_parse = scst_sbc_generic_parse;
2531 dev->devtype.dev_done = dev_user_disk_done;
2535 dev->generic_parse = scst_tape_generic_parse;
2536 dev->devtype.dev_done = dev_user_tape_done;
2540 dev->generic_parse = scst_modisk_generic_parse;
2541 dev->devtype.dev_done = dev_user_disk_done;
2545 dev->generic_parse = scst_cdrom_generic_parse;
2546 dev->devtype.dev_done = dev_user_disk_done;
2549 case TYPE_MEDIUM_CHANGER:
2550 dev->generic_parse = scst_changer_generic_parse;
2553 case TYPE_PROCESSOR:
2554 dev->generic_parse = scst_processor_generic_parse;
2558 dev->generic_parse = scst_raid_generic_parse;
2562 PRINT_INFO("Unknown SCSI type %x, using PARSE_CALL "
2563 "for it", dev->devtype.type);
2564 dev->parse_type = SCST_USER_PARSE_CALL;
2568 dev->generic_parse = NULL;
2569 dev->devtype.dev_done = NULL;
2576 static int dev_user_check_version(const struct scst_user_dev_desc *dev_desc)
2578 char ver[sizeof(DEV_USER_VERSION)+1];
2581 res = copy_from_user(ver, (void *)(unsigned long)dev_desc->version_str,
2584 PRINT_ERROR("%s", "Unable to get version string");
2587 ver[sizeof(ver)-1] = '\0';
2589 if (strcmp(ver, DEV_USER_VERSION) != 0) {
2590 /* ->name already 0-terminated in dev_user_ioctl() */
2591 PRINT_ERROR("Incorrect version of user device %s (%s)",
2592 dev_desc->name, ver);
2601 static int dev_user_register_dev(struct file *file,
2602 const struct scst_user_dev_desc *dev_desc)
2604 int res = -ENOMEM, i;
2605 struct scst_user_dev *dev, *d;
2610 res = dev_user_check_version(dev_desc);
2614 switch (dev_desc->type) {
2618 if (dev_desc->block_size == 0) {
2619 PRINT_ERROR("Wrong block size %d", dev_desc->block_size);
2623 block = scst_calc_block_shift(dev_desc->block_size);
2630 block = dev_desc->block_size;
2634 if (!try_module_get(THIS_MODULE)) {
2635 PRINT_ERROR("%s", "Fail to get module");
2639 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
2643 init_rwsem(&dev->dev_rwsem);
2644 spin_lock_init(&dev->cmd_lists.cmd_list_lock);
2645 INIT_LIST_HEAD(&dev->cmd_lists.active_cmd_list);
2646 init_waitqueue_head(&dev->cmd_lists.cmd_list_waitQ);
2647 INIT_LIST_HEAD(&dev->ready_cmd_list);
2648 INIT_LIST_HEAD(&dev->prio_ready_cmd_list);
2649 init_waitqueue_head(&dev->prio_cmd_list_waitQ);
2650 if (file->f_flags & O_NONBLOCK) {
2651 TRACE_DBG("%s", "Non-blocking operations");
2655 for (i = 0; i < (int)ARRAY_SIZE(dev->ucmd_hash); i++)
2656 INIT_LIST_HEAD(&dev->ucmd_hash[i]);
2657 INIT_LIST_HEAD(&dev->pre_unreg_sess_list);
2659 strncpy(dev->name, dev_desc->name, sizeof(dev->name)-1);
2660 dev->name[sizeof(dev->name)-1] = '\0';
2663 * We don't use clustered pool, since it implies pages reordering,
2664 * which isn't possible with user space supplied buffers. Although
2665 * it's still possible to cluster pages by the tail of each other,
2666 * seems it doesn't worth the effort.
2668 dev->pool = sgv_pool_create(dev->name, 0);
2669 if (dev->pool == NULL)
2671 sgv_pool_set_allocator(dev->pool, dev_user_alloc_pages,
2672 dev_user_free_sg_entries);
2674 scnprintf(dev->devtype.name, sizeof(dev->devtype.name), "dh-%s",
2676 dev->devtype.type = dev_desc->type;
2677 dev->devtype.threads_num = -1;
2678 dev->devtype.parse_atomic = 1;
2679 dev->devtype.exec_atomic = 0; /* no point to make it 1 */
2680 dev->devtype.dev_done_atomic = 1;
2681 dev->devtype.no_proc = 1;
2682 dev->devtype.attach = dev_user_attach;
2683 dev->devtype.detach = dev_user_detach;
2684 dev->devtype.attach_tgt = dev_user_attach_tgt;
2685 dev->devtype.pre_unreg_sess = dev_user_pre_unreg_sess;
2686 dev->devtype.detach_tgt = dev_user_detach_tgt;
2687 dev->devtype.exec = dev_user_exec;
2688 dev->devtype.on_free_cmd = dev_user_on_free_cmd;
2689 dev->devtype.task_mgmt_fn = dev_user_task_mgmt_fn;
2691 init_completion(&dev->cleanup_cmpl);
2693 dev->def_block = dev->block;
2695 res = __dev_user_set_opt(dev, &dev_desc->opt);
2697 TRACE_MEM("dev %p, name %s", dev, dev->name);
2699 spin_lock(&dev_list_lock);
2701 list_for_each_entry(d, &dev_list, dev_list_entry) {
2702 if (strcmp(d->name, dev->name) == 0) {
2703 PRINT_ERROR("Device %s already exist",
2706 spin_unlock(&dev_list_lock);
2711 list_add_tail(&dev->dev_list_entry, &dev_list);
2713 spin_unlock(&dev_list_lock);
2718 res = scst_register_virtual_dev_driver(&dev->devtype);
2722 dev->virt_id = scst_register_virtual_device(&dev->devtype, dev->name);
2723 if (dev->virt_id < 0) {
2725 goto out_unreg_handler;
2728 mutex_lock(&dev_priv_mutex);
2729 if (file->private_data != NULL) {
2730 mutex_unlock(&dev_priv_mutex);
2731 PRINT_ERROR("%s", "Device already registered");
2735 file->private_data = dev;
2736 mutex_unlock(&dev_priv_mutex);
2739 TRACE_EXIT_RES(res);
2743 scst_unregister_virtual_device(dev->virt_id);
2746 scst_unregister_virtual_dev_driver(&dev->devtype);
2749 spin_lock(&dev_list_lock);
2750 list_del(&dev->dev_list_entry);
2751 spin_unlock(&dev_list_lock);
2754 sgv_pool_destroy(dev->pool);
2759 module_put(THIS_MODULE);
2763 static int __dev_user_set_opt(struct scst_user_dev *dev,
2764 const struct scst_user_opt *opt)
2770 TRACE_DBG("parse_type %x, on_free_cmd_type %x, memory_reuse_type %x, "
2771 "partial_transfers_type %x, partial_len %d", opt->parse_type,
2772 opt->on_free_cmd_type, opt->memory_reuse_type,
2773 opt->partial_transfers_type, opt->partial_len);
2775 if ((opt->parse_type > SCST_USER_MAX_PARSE_OPT) ||
2776 (opt->on_free_cmd_type > SCST_USER_MAX_ON_FREE_CMD_OPT) ||
2777 (opt->memory_reuse_type > SCST_USER_MAX_MEM_REUSE_OPT) ||
2778 (opt->prio_queue_type > SCST_USER_MAX_PRIO_QUEUE_OPT) ||
2779 (opt->partial_transfers_type > SCST_USER_MAX_PARTIAL_TRANSFERS_OPT)) {
2780 PRINT_ERROR("%s", "Invalid option");
2785 if (((opt->tst != SCST_CONTR_MODE_ONE_TASK_SET) &&
2786 (opt->tst != SCST_CONTR_MODE_SEP_TASK_SETS)) ||
2787 ((opt->queue_alg != SCST_CONTR_MODE_QUEUE_ALG_RESTRICTED_REORDER) &&
2788 (opt->queue_alg != SCST_CONTR_MODE_QUEUE_ALG_UNRESTRICTED_REORDER)) ||
2789 (opt->swp > 1) || (opt->tas > 1) || (opt->has_own_order_mgmt > 1)) {
2790 PRINT_ERROR("Invalid SCSI option (tst %x, queue_alg %x, swp %x, "
2791 "tas %x, has_own_order_mgmt %x)", opt->tst,
2792 opt->queue_alg, opt->swp, opt->tas, opt->has_own_order_mgmt);
2797 if ((dev->prio_queue_type != opt->prio_queue_type) &&
2798 (opt->prio_queue_type == SCST_USER_PRIO_QUEUE_SINGLE)) {
2799 struct scst_user_cmd *u, *t;
2800 /* No need for lock, the activity is suspended */
2801 list_for_each_entry_safe(u, t, &dev->prio_ready_cmd_list,
2802 ready_cmd_list_entry) {
2803 list_move_tail(&u->ready_cmd_list_entry,
2804 &dev->ready_cmd_list);
2808 dev->prio_queue_type = opt->prio_queue_type;
2809 dev->parse_type = opt->parse_type;
2810 dev->on_free_cmd_type = opt->on_free_cmd_type;
2811 dev->memory_reuse_type = opt->memory_reuse_type;
2812 dev->partial_transfers_type = opt->partial_transfers_type;
2813 dev->partial_len = opt->partial_len;
2815 dev->tst = opt->tst;
2816 dev->queue_alg = opt->queue_alg;
2817 dev->swp = opt->swp;
2818 dev->tas = opt->tas;
2819 dev->has_own_order_mgmt = opt->has_own_order_mgmt;
2820 if (dev->sdev != NULL) {
2821 dev->sdev->tst = opt->tst;
2822 dev->sdev->queue_alg = opt->queue_alg;
2823 dev->sdev->swp = opt->swp;
2824 dev->sdev->tas = opt->tas;
2825 dev->sdev->has_own_order_mgmt = opt->has_own_order_mgmt;
2828 dev_user_setup_functions(dev);
2831 TRACE_EXIT_RES(res);
2835 static int dev_user_set_opt(struct file *file, const struct scst_user_opt *opt)
2838 struct scst_user_dev *dev;
2842 mutex_lock(&dev_priv_mutex);
2843 dev = (struct scst_user_dev *)file->private_data;
2844 res = dev_user_check_reg(dev);
2846 mutex_unlock(&dev_priv_mutex);
2849 down_read(&dev->dev_rwsem);
2850 mutex_unlock(&dev_priv_mutex);
2852 scst_suspend_activity();
2853 res = __dev_user_set_opt(dev, opt);
2854 scst_resume_activity();
2856 up_read(&dev->dev_rwsem);
2859 TRACE_EXIT_RES(res);
2863 static int dev_user_get_opt(struct file *file, void *arg)
2866 struct scst_user_dev *dev;
2867 struct scst_user_opt opt;
2871 mutex_lock(&dev_priv_mutex);
2872 dev = (struct scst_user_dev *)file->private_data;
2873 res = dev_user_check_reg(dev);
2875 mutex_unlock(&dev_priv_mutex);
2878 down_read(&dev->dev_rwsem);
2879 mutex_unlock(&dev_priv_mutex);
2881 opt.parse_type = dev->parse_type;
2882 opt.on_free_cmd_type = dev->on_free_cmd_type;
2883 opt.memory_reuse_type = dev->memory_reuse_type;
2884 opt.prio_queue_type = dev->prio_queue_type;
2885 opt.partial_transfers_type = dev->partial_transfers_type;
2886 opt.partial_len = dev->partial_len;
2888 opt.queue_alg = dev->queue_alg;
2891 opt.has_own_order_mgmt = dev->has_own_order_mgmt;
2893 TRACE_DBG("parse_type %x, on_free_cmd_type %x, memory_reuse_type %x, "
2894 "partial_transfers_type %x, partial_len %d", opt.parse_type,
2895 opt.on_free_cmd_type, opt.memory_reuse_type,
2896 opt.partial_transfers_type, opt.partial_len);
2898 res = copy_to_user(arg, &opt, sizeof(opt));
2900 up_read(&dev->dev_rwsem);
2902 TRACE_EXIT_RES(res);
2906 static int dev_usr_parse(struct scst_cmd *cmd)
2909 return SCST_CMD_STATE_DEFAULT;
2912 /* Needed only for /proc support */
2913 #define USR_TYPE { \
2914 .name = DEV_USER_NAME, \
2916 .parse = dev_usr_parse, \
2919 static struct scst_dev_type dev_user_devtype = USR_TYPE;
2921 static int dev_user_release(struct inode *inode, struct file *file)
2924 struct scst_user_dev *dev;
2928 mutex_lock(&dev_priv_mutex);
2929 dev = (struct scst_user_dev *)file->private_data;
2931 mutex_unlock(&dev_priv_mutex);
2934 file->private_data = NULL;
2936 TRACE(TRACE_MGMT, "Releasing dev %s", dev->name);
2938 spin_lock(&dev_list_lock);
2939 list_del(&dev->dev_list_entry);
2940 spin_unlock(&dev_list_lock);
2942 mutex_unlock(&dev_priv_mutex);
2944 down_write(&dev->dev_rwsem);
2946 spin_lock(&cleanup_lock);
2947 list_add_tail(&dev->cleanup_list_entry, &cleanup_list);
2948 spin_unlock(&cleanup_lock);
2950 wake_up(&cleanup_list_waitQ);
2951 wake_up(&dev->prio_cmd_list_waitQ);
2952 wake_up(&dev->cmd_lists.cmd_list_waitQ);
2954 scst_unregister_virtual_device(dev->virt_id);
2955 scst_unregister_virtual_dev_driver(&dev->devtype);
2957 sgv_pool_destroy(dev->pool);
2959 TRACE_DBG("Unregistering finished (dev %p)", dev);
2961 dev->cleanup_done = 1;
2962 wake_up(&cleanup_list_waitQ);
2963 wake_up(&dev->prio_cmd_list_waitQ);
2964 wake_up(&dev->cmd_lists.cmd_list_waitQ);
2965 wait_for_completion(&dev->cleanup_cmpl);
2967 up_write(&dev->dev_rwsem); /* to make the debug check happy */
2969 TRACE_DBG("Releasing completed (dev %p)", dev);
2973 module_put(THIS_MODULE);
2976 TRACE_EXIT_RES(res);
2980 static void dev_user_process_cleanup(struct scst_user_dev *dev)
2982 struct scst_user_cmd *ucmd;
2987 dev->prio_queue_type = SCST_USER_PRIO_QUEUE_SINGLE;
2992 TRACE_DBG("Cleanuping dev %p", dev);
2994 dev_user_unjam_dev(dev, 0, NULL);
2996 spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
2997 rc = dev_user_get_next_prio_cmd(dev, &ucmd);
2999 rc = dev_user_get_next_cmd(dev, &ucmd);
3001 dev_user_unjam_cmd(ucmd, 1, NULL);
3002 spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
3003 if ((rc == -EAGAIN) && dev->cleanup_done)
3010 for (i = 0; i < (int)ARRAY_SIZE(dev->ucmd_hash); i++) {
3011 struct list_head *head = &dev->ucmd_hash[i];
3012 struct scst_user_cmd *ucmd, *t;
3013 list_for_each_entry_safe(ucmd, t, head, hash_list_entry) {
3014 PRINT_ERROR("Lost ucmd %p (state %x, ref %d)", ucmd,
3015 ucmd->state, atomic_read(&ucmd->ucmd_ref));
3022 TRACE_DBG("Cleanuping done (dev %p)", dev);
3023 complete_all(&dev->cleanup_cmpl);
3029 static inline int test_cleanup_list(void)
3031 int res = !list_empty(&cleanup_list) ||
3032 unlikely(kthread_should_stop());
3036 static int dev_user_cleanup_thread(void *arg)
3038 struct scst_user_dev *dev;
3042 PRINT_INFO("Cleanup thread started, PID %d", current->pid);
3044 current->flags |= PF_NOFREEZE;
3046 spin_lock(&cleanup_lock);
3047 while (!kthread_should_stop()) {
3049 init_waitqueue_entry(&wait, current);
3051 if (!test_cleanup_list()) {
3052 add_wait_queue_exclusive(&cleanup_list_waitQ, &wait);
3054 set_current_state(TASK_INTERRUPTIBLE);
3055 if (test_cleanup_list())
3057 spin_unlock(&cleanup_lock);
3059 spin_lock(&cleanup_lock);
3061 set_current_state(TASK_RUNNING);
3062 remove_wait_queue(&cleanup_list_waitQ, &wait);
3065 list_for_each_entry(dev, &cleanup_list, cleanup_list_entry) {
3066 list_del(&dev->cleanup_list_entry);
3067 spin_unlock(&cleanup_lock);
3068 dev_user_process_cleanup(dev);
3069 spin_lock(&cleanup_lock);
3073 spin_unlock(&cleanup_lock);
3076 * If kthread_should_stop() is true, we are guaranteed to be
3077 * on the module unload, so cleanup_list must be empty.
3079 sBUG_ON(!list_empty(&cleanup_list));
3081 PRINT_INFO("Cleanup thread PID %d finished", current->pid);
3087 static int __init init_scst_user(void)
3090 struct class_device *class_member;
3094 #if defined(CONFIG_HIGHMEM4G) || defined(CONFIG_HIGHMEM64G)
3095 PRINT_ERROR("%s", "HIGHMEM kernel configurations are not supported. "
3096 "Consider change VMSPLIT option or use 64-bit "
3097 "configuration instead. See README file for details.");
3102 user_cmd_cachep = KMEM_CACHE(scst_user_cmd, SCST_SLAB_FLAGS);
3103 if (user_cmd_cachep == NULL) {
3108 dev_user_devtype.module = THIS_MODULE;
3110 res = scst_register_virtual_dev_driver(&dev_user_devtype);
3114 res = scst_dev_handler_build_std_proc(&dev_user_devtype);
3118 dev_user_sysfs_class = class_create(THIS_MODULE, DEV_USER_NAME);
3119 if (IS_ERR(dev_user_sysfs_class)) {
3120 PRINT_ERROR("%s", "Unable create sysfs class for SCST user "
3122 res = PTR_ERR(dev_user_sysfs_class);
3126 res = register_chrdev(DEV_USER_MAJOR, DEV_USER_NAME, &dev_user_fops);
3128 PRINT_ERROR("Unable to get major %d for SCSI tapes", DEV_USER_MAJOR);
3132 class_member = class_device_create(dev_user_sysfs_class, NULL,
3133 MKDEV(DEV_USER_MAJOR, 0), NULL, DEV_USER_NAME);
3134 if (IS_ERR(class_member)) {
3135 res = PTR_ERR(class_member);
3139 cleanup_thread = kthread_run(dev_user_cleanup_thread, NULL,
3140 "scst_usr_cleanupd");
3141 if (IS_ERR(cleanup_thread)) {
3142 res = PTR_ERR(cleanup_thread);
3143 PRINT_ERROR("kthread_create() failed: %d", res);
3148 TRACE_EXIT_RES(res);
3152 class_device_destroy(dev_user_sysfs_class, MKDEV(DEV_USER_MAJOR, 0));
3155 unregister_chrdev(DEV_USER_MAJOR, DEV_USER_NAME);
3158 class_destroy(dev_user_sysfs_class);
3161 scst_dev_handler_destroy_std_proc(&dev_user_devtype);
3164 scst_unregister_dev_driver(&dev_user_devtype);
3167 kmem_cache_destroy(user_cmd_cachep);
3171 static void __exit exit_scst_user(void)
3177 rc = kthread_stop(cleanup_thread);
3179 TRACE_MGMT_DBG("kthread_stop() failed: %d", rc);
3182 unregister_chrdev(DEV_USER_MAJOR, DEV_USER_NAME);
3183 class_device_destroy(dev_user_sysfs_class, MKDEV(DEV_USER_MAJOR, 0));
3184 class_destroy(dev_user_sysfs_class);
3186 scst_dev_handler_destroy_std_proc(&dev_user_devtype);
3187 scst_unregister_virtual_dev_driver(&dev_user_devtype);
3189 kmem_cache_destroy(user_cmd_cachep);
3195 module_init(init_scst_user);
3196 module_exit(exit_scst_user);
3198 MODULE_AUTHOR("Vladislav Bolkhovitin");
3199 MODULE_LICENSE("GPL");
3200 MODULE_DESCRIPTION("Virtual user space device handler for SCST");
3201 MODULE_VERSION(SCST_VERSION_STRING);
3202 MODULE_ALIAS_CHARDEV_MAJOR(DEV_USER_MAJOR);