- Fixed race in scst_user, which can lead to usage of already freed command. A misbe...
authorvlnb <vlnb@d57e44dd-8a1f-0410-8b47-8ef2f437770f>
Mon, 26 Jan 2009 18:08:07 +0000 (18:08 +0000)
committervlnb <vlnb@d57e44dd-8a1f-0410-8b47-8ef2f437770f>
Mon, 26 Jan 2009 18:08:07 +0000 (18:08 +0000)
   immediately after the lock is released and we should catch it.
 - Text in README about barriers usage updated
 - Minor cleanup with aim to have more robust code

git-svn-id: https://scst.svn.sourceforge.net/svnroot/scst/trunk@642 d57e44dd-8a1f-0410-8b47-8ef2f437770f

scst/README
scst/README_in-tree
scst/src/dev_handlers/scst_user.c

index 909fcdc..fe203f5 100644 (file)
@@ -607,12 +607,11 @@ IMPORTANT: By default for performance reasons VDISK FILEIO devices use write
           On Linux initiators for EXT3 and ReiserFS file systems the
           barrier protection could be turned on using "barrier=1" and
           "barrier=flush" mount options correspondingly. Note, that
-          usually it turned off by default and the status of barriers
-          usage isn't reported anywhere in the system logs as well as
-          there is no way to know it on the mounted file system (at
-          least no known one). Windows and, AFAIK, other UNIX'es don't
-          need any special explicit options and do necessary barrier
-          actions on write-back caching devices by default. Also note
+          usually it's turned off by default (see http://lwn.net/Articles/283161).
+          You can check if it's turn on or off by looking in /proc/mounts.
+          Windows and, AFAIK, other UNIX'es don't need any special
+          explicit options and do necessary barrier actions on
+          write-back caching devices by default. Also note
           that on some real-life workloads write through caching might
           perform better, than write back one with the barrier
           protection turned on.
index 52ade29..7e1566d 100644 (file)
@@ -553,12 +553,11 @@ IMPORTANT: By default for performance reasons VDISK FILEIO devices use write
           On Linux initiators for EXT3 and ReiserFS file systems the
           barrier protection could be turned on using "barrier=1" and
           "barrier=flush" mount options correspondingly. Note, that
-          usually it turned off by default and the status of barriers
-          usage isn't reported anywhere in the system logs as well as
-          there is no way to know it on the mounted file system (at
-          least no known one). Windows and, AFAIK, other UNIX'es don't
-          need any special explicit options and do necessary barrier
-          actions on write-back caching devices by default. Also note
+          usually it's turned off by default (see http://lwn.net/Articles/283161).
+          You can check if it's turn on or off by looking in /proc/mounts.
+          Windows and, AFAIK, other UNIX'es don't need any special
+          explicit options and do necessary barrier actions on
+          write-back caching devices by default. Also note
           that on some real-life workloads write through caching might
           perform better, than write back one with the barrier
           protection turned on.
@@ -794,6 +793,7 @@ IMPORTANT: If you use on initiator some versions of Windows (at least W2K)
           See also important notes about setting block sizes >512 bytes
           for VDISK FILEIO devices above.
 
+
 Work if target's backstorage or link is too slow
 ------------------------------------------------
 
index bab28d2..b9e0c41 100644 (file)
@@ -57,8 +57,6 @@
 #define DEV_USER_CMD_HASH_ORDER                6
 #define DEV_USER_ATTACH_TIMEOUT                (5*HZ)
 
-#define DEV_USER_HEADER_LEN    offsetof(struct scst_user_get_cmd, preply)
-
 struct scst_user_dev {
        struct rw_semaphore dev_rwsem;
 
@@ -412,7 +410,9 @@ static void dev_user_on_cached_mem_free(struct scst_user_cmd *ucmd)
        TRACE_MEM("Preparing ON_CACHED_MEM_FREE (ucmd %p, h %d, ubuff %lx)",
                ucmd, ucmd->h, ucmd->ubuff);
 
-       ucmd->user_cmd_payload_len = sizeof(ucmd->user_cmd.on_cached_mem_free);
+       ucmd->user_cmd_payload_len =
+               offsetof(struct scst_user_get_cmd, on_cached_mem_free) +
+               sizeof(ucmd->user_cmd.on_cached_mem_free);
        ucmd->user_cmd.cmd_h = ucmd->h;
        ucmd->user_cmd.subcode = SCST_USER_ON_CACHED_MEM_FREE;
        ucmd->user_cmd.on_cached_mem_free.pbuf = ucmd->ubuff;
@@ -630,7 +630,9 @@ static int dev_user_alloc_space(struct scst_user_cmd *ucmd)
                goto out;
        }
 
-       ucmd->user_cmd_payload_len = sizeof(ucmd->user_cmd.alloc_cmd);
+       ucmd->user_cmd_payload_len =
+               offsetof(struct scst_user_get_cmd, alloc_cmd) +
+               sizeof(ucmd->user_cmd.alloc_cmd);
        ucmd->user_cmd.cmd_h = ucmd->h;
        ucmd->user_cmd.subcode = SCST_USER_ALLOC_MEM;
        ucmd->user_cmd.alloc_cmd.sess_h = (unsigned long)cmd->tgt_dev;
@@ -751,7 +753,9 @@ static int dev_user_parse(struct scst_cmd *cmd)
        case SCST_USER_PARSE_CALL:
                TRACE_DBG("Preparing PARSE for user space (ucmd=%p, h=%d, "
                        "bufflen %d)", ucmd, ucmd->h, cmd->bufflen);
-               ucmd->user_cmd_payload_len = sizeof(ucmd->user_cmd.parse_cmd);
+               ucmd->user_cmd_payload_len =
+                       offsetof(struct scst_user_get_cmd, parse_cmd) +
+                       sizeof(ucmd->user_cmd.parse_cmd);
                ucmd->user_cmd.cmd_h = ucmd->h;
                ucmd->user_cmd.subcode = SCST_USER_PARSE;
                ucmd->user_cmd.parse_cmd.sess_h = (unsigned long)cmd->tgt_dev;
@@ -863,7 +867,9 @@ static int dev_user_exec(struct scst_cmd *cmd)
        if (cmd->data_direction == SCST_DATA_WRITE)
                dev_user_flush_dcache(ucmd);
 
-       ucmd->user_cmd_payload_len = sizeof(ucmd->user_cmd.exec_cmd);
+       ucmd->user_cmd_payload_len =
+               offsetof(struct scst_user_get_cmd, exec_cmd) +
+               sizeof(ucmd->user_cmd.exec_cmd);
        ucmd->user_cmd.cmd_h = ucmd->h;
        ucmd->user_cmd.subcode = SCST_USER_EXEC;
        ucmd->user_cmd.exec_cmd.sess_h = (unsigned long)cmd->tgt_dev;
@@ -932,7 +938,9 @@ static void dev_user_on_free_cmd(struct scst_cmd *cmd)
                goto out_reply;
        }
 
-       ucmd->user_cmd_payload_len = sizeof(ucmd->user_cmd.on_free_cmd);
+       ucmd->user_cmd_payload_len =
+               offsetof(struct scst_user_get_cmd, on_free_cmd) +
+               sizeof(ucmd->user_cmd.on_free_cmd);
        ucmd->user_cmd.cmd_h = ucmd->h;
        ucmd->user_cmd.subcode = SCST_USER_ON_FREE_CMD;
        ucmd->user_cmd.on_free_cmd.pbuf = ucmd->ubuff;
@@ -1721,6 +1729,7 @@ static int dev_user_reply_get_cmd(struct file *file, void __user *arg)
        kmem_cache_free(user_get_cmd_cachep, cmd);
 
        spin_lock_irq(&dev->cmd_lists.cmd_list_lock);
+again:
        res = dev_user_get_next_cmd(dev, &ucmd);
        if (res == 0) {
                int len;
@@ -1730,12 +1739,15 @@ static int dev_user_reply_get_cmd(struct file *file, void __user *arg)
                 * copy of dead data to the user space, which can lead to a
                 * leak of sensitive information.
                 */
-               ucmd_get(ucmd);
+               if (unlikely(ucmd_get_check(ucmd))) {
+                       /* Oops, this ucmd is already being destroyed. Retry. */
+                       goto again;
+               }
                spin_unlock_irq(&dev->cmd_lists.cmd_list_lock);
 
                EXTRACHECKS_BUG_ON(ucmd->user_cmd_payload_len == 0);
 
-               len = DEV_USER_HEADER_LEN + ucmd->user_cmd_payload_len;
+               len = ucmd->user_cmd_payload_len;
                TRACE_DBG("ucmd %p (user_cmd %p), payload_len %d (len %d)",
                        ucmd, &ucmd->user_cmd, ucmd->user_cmd_payload_len, len);
                TRACE_BUFFER("UCMD", &ucmd->user_cmd, len);
@@ -2168,7 +2180,9 @@ static int dev_user_task_mgmt_fn(struct scst_mgmt_cmd *mcmd,
        /* We can't afford missing TM command due to memory shortage */
        ucmd = dev_user_alloc_ucmd(dev, GFP_KERNEL|__GFP_NOFAIL);
 
-       ucmd->user_cmd_payload_len = sizeof(ucmd->user_cmd.tm_cmd);
+       ucmd->user_cmd_payload_len =
+               offsetof(struct scst_user_get_cmd, tm_cmd) +
+               sizeof(ucmd->user_cmd.tm_cmd);
        ucmd->user_cmd.cmd_h = ucmd->h;
        ucmd->user_cmd.subcode = SCST_USER_TASK_MGMT;
        ucmd->user_cmd.tm_cmd.sess_h = (unsigned long)tgt_dev;
@@ -2303,7 +2317,8 @@ static int dev_user_attach_tgt(struct scst_tgt_dev *tgt_dev)
 
        ucmd->cmpl = &cmpl;
 
-       ucmd->user_cmd_payload_len = sizeof(ucmd->user_cmd.sess);
+       ucmd->user_cmd_payload_len = offsetof(struct scst_user_get_cmd, sess) +
+               sizeof(ucmd->user_cmd.sess);
        ucmd->user_cmd.cmd_h = ucmd->h;
        ucmd->user_cmd.subcode = SCST_USER_ATTACH_SESS;
        ucmd->user_cmd.sess.sess_h = (unsigned long)tgt_dev;
@@ -2380,7 +2395,8 @@ static void dev_user_detach_tgt(struct scst_tgt_dev *tgt_dev)
        TRACE_MGMT_DBG("Preparing DETACH_SESS %p (h %d, sess_h %llx)", ucmd,
                ucmd->h, ucmd->user_cmd.sess.sess_h);
 
-       ucmd->user_cmd_payload_len = sizeof(ucmd->user_cmd.sess);
+       ucmd->user_cmd_payload_len = offsetof(struct scst_user_get_cmd, sess) +
+               sizeof(ucmd->user_cmd.sess);
        ucmd->user_cmd.cmd_h = ucmd->h;
        ucmd->user_cmd.subcode = SCST_USER_DETACH_SESS;
        ucmd->user_cmd.sess.sess_h = (unsigned long)tgt_dev;