- New scst_user IOCTL SCST_USER_PREALLOC_BUFFER added
authorvlnb <vlnb@d57e44dd-8a1f-0410-8b47-8ef2f437770f>
Wed, 30 Sep 2009 18:35:24 +0000 (18:35 +0000)
committervlnb <vlnb@d57e44dd-8a1f-0410-8b47-8ef2f437770f>
Wed, 30 Sep 2009 18:35:24 +0000 (18:35 +0000)
 - New scst_user option sgv_disable_clustered_pool added
 - fileio_tgt updated to use those new facilities
 - Scst_user docs updated
 - sgv_pool_get() and sgv_pool_put() made exported
 - AENs sense buffer override fixed

git-svn-id: https://scst.svn.sourceforge.net/svnroot/scst/trunk@1159 d57e44dd-8a1f-0410-8b47-8ef2f437770f

doc/scst_user_spec.txt
scst/include/scst_sgv.h
scst/include/scst_user.h
scst/src/dev_handlers/scst_user.c
scst/src/scst_lib.c
scst/src/scst_mem.c
usr/fileio/common.c
usr/fileio/fileio.c

index 610731c..3c38ab7 100644 (file)
@@ -63,8 +63,9 @@ struct scst_user_dev_desc
        uint8_t version;
        uint8_t type;
        uint8_t sgv_shared;
+       uint8_t sgv_disable_clustered_pool;
        int32_t sgv_single_alloc_pages;
-       int32_t sgv_purge_interval
+       int32_t sgv_purge_interval;
        uint8_t has_own_order_mgmt;
        struct scst_user_opt opt;
        uint32_t block_size;
@@ -81,6 +82,14 @@ where:
  - sgv_shared - true, if the SGV cache for this device should be shared with
    other devices. False, if the SGV cache should be dedicated.
 
+ - sgv_disable_clustered_pool - disable usage of clustered pool for this device.
+   Normally, 2 independent SGV pools created and used for each device -
+   normal and clustered. Clustered pool creates and contains SG vectors,
+   in which coalesced paged merged (clustered) in single SG entries.
+   This is good for performance. But not all target drivers can use such
+   SG vectors, plus in some cases it is more convenient to have a single
+   memory pool. So, this option provides such possibility.
+
  - sgv_single_alloc_pages - if 0, then the SGV cache for this device will
    work in the set of power 2 size buffers mode. If >0, then the SGV
    cache will work in the fixed size buffers mode. In this case it sets
@@ -921,6 +930,56 @@ SCST_USER_GET_EXTENDED_CDB returns 0 on success or -1 in case of error,
 and errno is set appropriately.
 
 
+               9. SCST_USER_PREALLOC_BUFFER
+
+SCST_USER_PREALLOC_BUFFER - asks to preallocate a buffer.
+
+It has the following arguments:
+
+union scst_user_prealloc_buffer {
+       struct scst_user_prealloc_buffer_in in;
+       struct scst_user_prealloc_buffer_out out;
+},
+
+where:
+
+ - in - provides data about buffer to preallocate
+
+ - out - returns information about preallocated buffer
+
+Structure scst_user_prealloc_buffer_in has the following definition:
+
+  struct scst_user_prealloc_buffer_in {
+       aligned_u64 pbuf;
+       uint32_t bufflen;
+       uint8_t for_clust_pool;
+},
+
+where:
+
+ - pbuf - pointer to the buffer
+ - bufflen - size of the buffer
+ - for_clust_pool - if 1, then the buffer will be preallocated in the
+   clustered pool. If 0, then the buffer will be preallocated in the
+   normal pool.
+
+Structure scst_user_prealloc_buffer_out has the following definition:
+
+struct scst_user_prealloc_buffer_out {
+       uint32_t cmd_h;
+}
+
+where:
+
+ - cmd_h - handle used to identify the buffer in
+   SCST_USER_ON_CACHED_MEM_FREE subcommand.
+
+SCST_USER_PREALLOC_BUFFER returns 0 on success or -1 in case of error,
+and errno is set appropriately.
+
+
                IV. Commands processing flow example.
 
 As the example consider a simple synchronous VTL, which serves one
index be04bbe..348010b 100644 (file)
 /* Set an object should be returned even if it doesn't have SG vector built */
 #define SGV_POOL_RETURN_OBJ_ON_ALLOC_FAIL      4
 
+/*
+ * Set if the allocated object must be a new one, i.e. from the cache,
+ * but not cached
+ */
+#define SGV_POOL_ALLOC_GET_NEW                 8
+
 struct sgv_pool_obj;
 struct sgv_pool;
 
@@ -100,6 +106,23 @@ struct sgv_pool *sgv_pool_create(const char *name,
  */
 void sgv_pool_del(struct sgv_pool *pool);
 
+/**
+ * sgv_pool_get - increases reference counter for the corresponding SGV cache
+ * @:pool      the cache to get.
+ *
+ * Description:
+ */
+void sgv_pool_get(struct sgv_pool *pool);
+
+/**
+ * sgv_pool_put - decreases reference counter for the corresponding SGV cache
+ * @:pool      the cache to put.
+ *
+ * Description:
+ *    If the reference counter reaches 0, the cache will be destroyed.
+ */
+void sgv_pool_put(struct sgv_pool *pool);
+
 /**
  * sgv_pool_flush - flushes the SGV cache
  * @:pool      the cache to flush
index 135d178..ca83ce1 100644 (file)
@@ -91,6 +91,7 @@ struct scst_user_dev_desc {
        aligned_u64 version_str;
        uint8_t type;
        uint8_t sgv_shared;
+       uint8_t sgv_disable_clustered_pool;
        int32_t sgv_single_alloc_pages;
        int32_t sgv_purge_interval;
        struct scst_user_opt opt;
@@ -246,11 +247,30 @@ struct scst_user_reply_cmd {
        };
 };
 
+/* Be careful adding new members here, this structure is allocated on stack! */
 struct scst_user_get_ext_cdb {
        uint32_t cmd_h;
        aligned_u64 ext_cdb_buffer;
 };
 
+/* Be careful adding new members here, this structure is allocated on stack! */
+struct scst_user_prealloc_buffer_in {
+       aligned_u64 pbuf;
+       uint32_t bufflen;
+       uint8_t for_clust_pool;
+};
+
+/* Be careful adding new members here, this structure is allocated on stack! */
+struct scst_user_prealloc_buffer_out {
+       uint32_t cmd_h;
+};
+
+/* Be careful adding new members here, this structure is allocated on stack! */
+union scst_user_prealloc_buffer {
+       struct scst_user_prealloc_buffer_in in;
+       struct scst_user_prealloc_buffer_out out;
+};
+
 #define SCST_USER_REGISTER_DEVICE      _IOW('u', 1, struct scst_user_dev_desc)
 #define SCST_USER_UNREGISTER_DEVICE    _IO('u', 2)
 #define SCST_USER_SET_OPTIONS          _IOW('u', 3, struct scst_user_opt)
@@ -260,6 +280,7 @@ struct scst_user_get_ext_cdb {
 #define SCST_USER_FLUSH_CACHE          _IO('u', 7)
 #define SCST_USER_DEVICE_CAPACITY_CHANGED _IO('u', 8)
 #define SCST_USER_GET_EXTENDED_CDB     _IOWR('u', 9, struct scst_user_get_ext_cdb)
+#define SCST_USER_PREALLOC_BUFFER      _IOWR('u', 10, union scst_user_prealloc_buffer)
 
 /* Values for scst_user_get_cmd.subcode */
 #define SCST_USER_ATTACH_SESS          \
index b60760d..260bf1b 100644 (file)
@@ -169,6 +169,7 @@ static int dev_user_register_dev(struct file *file,
 static int dev_user_unregister_dev(struct file *file);
 static int dev_user_flush_cache(struct file *file);
 static int dev_user_capacity_changed(struct file *file);
+static int dev_user_prealloc_buffer(struct file *file, void __user *arg);
 static int __dev_user_set_opt(struct scst_user_dev *dev,
        const struct scst_user_opt *opt);
 static int dev_user_set_opt(struct file *file, const struct scst_user_opt *opt);
@@ -1119,7 +1120,7 @@ static int dev_user_map_buf(struct scst_user_cmd *ucmd, unsigned long ubuff,
        TRACE_MEM("Mapping buffer (ucmd %p, ubuff %lx, ucmd->num_data_pages %d,"
                " first_page_offset %d, len %d)", ucmd, ubuff,
                ucmd->num_data_pages, (int)(ubuff & ~PAGE_MASK),
-               ucmd->cmd->bufflen);
+               (ucmd->cmd != NULL) ? ucmd->cmd->bufflen : -1);
 
        down_read(&tsk->mm->mmap_sem);
        rc = get_user_pages(tsk, tsk->mm, ubuff, ucmd->num_data_pages,
@@ -1139,11 +1140,13 @@ out:
        return res;
 
 out_nomem:
-       scst_set_busy(ucmd->cmd);
+       if (ucmd->cmd != NULL)
+               scst_set_busy(ucmd->cmd);
        /* go through */
 
 out_err:
-       scst_set_cmd_abnormal_done_state(ucmd->cmd);
+       if (ucmd->cmd != NULL)
+               scst_set_cmd_abnormal_done_state(ucmd->cmd);
        goto out;
 
 out_unmap:
@@ -1156,7 +1159,8 @@ out_unmap:
        kfree(ucmd->data_pages);
        ucmd->data_pages = NULL;
        res = -EFAULT;
-       scst_set_cmd_error(ucmd->cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
+       if (ucmd->cmd != NULL)
+               scst_set_cmd_error(ucmd->cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
        goto out_err;
 }
 
@@ -1977,6 +1981,11 @@ static long dev_user_ioctl(struct file *file, unsigned int cmd,
                res = dev_user_capacity_changed(file);
                break;
 
+       case SCST_USER_PREALLOC_BUFFER:
+               TRACE_DBG("%s", "PREALLOC_BUFFER");
+               res = dev_user_prealloc_buffer(file, (void __user *)arg);
+               break;
+
        default:
                PRINT_ERROR("Invalid ioctl cmd %x", cmd);
                res = -EINVAL;
@@ -2721,20 +2730,26 @@ static int dev_user_register_dev(struct file *file,
        sgv_pool_set_allocator(dev->pool, dev_user_alloc_pages,
                dev_user_free_sg_entries);
 
-       scnprintf(dev->devtype.name, sizeof(dev->devtype.name), "%s-clust",
-               (dev_desc->sgv_name[0] == '\0') ? dev->name :
-                                                 dev_desc->sgv_name);
-       dev->pool_clust = sgv_pool_create(dev->devtype.name,
-                               sgv_tail_clustering,
-                               dev_desc->sgv_single_alloc_pages,
-                               dev_desc->sgv_shared,
-                               dev_desc->sgv_purge_interval);
-       if (dev->pool_clust == NULL) {
-               res = -ENOMEM;
-               goto out_free0;
+       if (!dev_desc->sgv_disable_clustered_pool) {
+               scnprintf(dev->devtype.name, sizeof(dev->devtype.name),
+                       "%s-clust",
+                       (dev_desc->sgv_name[0] == '\0') ? dev->name :
+                                                         dev_desc->sgv_name);
+               dev->pool_clust = sgv_pool_create(dev->devtype.name,
+                                       sgv_tail_clustering,
+                                       dev_desc->sgv_single_alloc_pages,
+                                       dev_desc->sgv_shared,
+                                       dev_desc->sgv_purge_interval);
+               if (dev->pool_clust == NULL) {
+                       res = -ENOMEM;
+                       goto out_free0;
+               }
+               sgv_pool_set_allocator(dev->pool_clust, dev_user_alloc_pages,
+                       dev_user_free_sg_entries);
+       } else {
+               dev->pool_clust = dev->pool;
+               sgv_pool_get(dev->pool_clust);
        }
-       sgv_pool_set_allocator(dev->pool_clust, dev_user_alloc_pages,
-               dev_user_free_sg_entries);
 
        scnprintf(dev->devtype.name, sizeof(dev->devtype.name), "%s",
                dev->name);
@@ -2942,6 +2957,100 @@ out:
        return res;
 }
 
+static int dev_user_prealloc_buffer(struct file *file, void __user *arg)
+{
+       int res = 0;
+       struct scst_user_dev *dev;
+       union scst_user_prealloc_buffer pre;
+       aligned_u64 pbuf;
+       uint32_t bufflen;
+       struct scst_user_cmd *ucmd;
+       int pages, sg_cnt;
+       struct sgv_pool *pool;
+       struct scatterlist *sg;
+
+       TRACE_ENTRY();
+
+       mutex_lock(&dev_priv_mutex);
+       dev = (struct scst_user_dev *)file->private_data;
+       res = dev_user_check_reg(dev);
+       if (unlikely(res != 0)) {
+               mutex_unlock(&dev_priv_mutex);
+               goto out;
+       }
+       down_read(&dev->dev_rwsem);
+       mutex_unlock(&dev_priv_mutex);
+
+       res = copy_from_user(&pre.in, arg, sizeof(pre.in));
+       if (unlikely(res < 0))
+               goto out_up;
+
+       TRACE_MEM("Prealloc buffer with size %dKB for dev %s",
+               pre.in.bufflen / 1024, dev->name);
+       TRACE_BUFFER("Input param", &pre.in, sizeof(pre.in));
+
+       pbuf = pre.in.pbuf;
+       bufflen = pre.in.bufflen;
+
+       ucmd = dev_user_alloc_ucmd(dev, GFP_KERNEL);
+       if (ucmd == NULL) {
+               res = -ENOMEM;
+               goto out_up;
+       }
+
+       ucmd->buff_cached = 1;
+
+       TRACE_MEM("ucmd %p, pbuf %llx", ucmd, pbuf);
+
+       if (unlikely((pbuf & ~PAGE_MASK) != 0)) {
+               PRINT_ERROR("Supplied pbuf %llx isn't page aligned", pbuf);
+               res = -EINVAL;
+               goto out_put;
+       }
+
+       pages = calc_num_pg(pbuf, bufflen);
+       res = dev_user_map_buf(ucmd, pbuf, pages);
+       if (res != 0)
+               goto out_put;
+
+       if (pre.in.for_clust_pool)
+               pool = dev->pool_clust;
+       else
+               pool = dev->pool;
+
+       sg = sgv_pool_alloc(pool, bufflen, GFP_KERNEL, SGV_POOL_ALLOC_GET_NEW,
+                        &sg_cnt, &ucmd->sgv, &dev->udev_mem_lim, ucmd);
+       if (sg != NULL) {
+               struct scst_user_cmd *buf_ucmd =
+                       (struct scst_user_cmd *)sgv_get_priv(ucmd->sgv);
+
+               TRACE_MEM("Buf ucmd %p (sg_cnt %d, last seg len %d, "
+                       "bufflen %d)", buf_ucmd, sg_cnt,
+                       sg[sg_cnt-1].length, bufflen);
+
+               EXTRACHECKS_BUG_ON(ucmd != buf_ucmd);
+
+               ucmd->buf_ucmd = buf_ucmd;
+       } else {
+               res = -ENOMEM;
+               goto out_put;
+       }
+
+       dev_user_free_sgv(ucmd);
+
+       pre.out.cmd_h = ucmd->h;
+       res = copy_to_user(arg, &pre.out, sizeof(pre.out));
+
+out_put:
+       ucmd_put(ucmd);
+
+out_up:
+       up_read(&dev->dev_rwsem);
+
+out:
+       TRACE_EXIT_RES(res);
+       return res;
+}
 
 static int __dev_user_set_opt(struct scst_user_dev *dev,
        const struct scst_user_opt *opt)
index 646e046..4c191d7 100644 (file)
@@ -506,7 +506,7 @@ void scst_gen_aen_or_ua(struct scst_tgt_dev *tgt_dev,
 
                aen->event_fn = SCST_AEN_SCSI;
                aen->aen_sense_len = scst_set_sense(aen->aen_sense,
-                       SCST_SENSE_BUFFERSIZE, tgt_dev->dev->d_sense,
+                       sizeof(aen->aen_sense), tgt_dev->dev->d_sense,
                        key, asc, ascq);
 
                TRACE_DBG("Calling target's %s report_aen(%p)",
@@ -684,7 +684,7 @@ found:
 
                aen->event_fn = SCST_AEN_SCSI;
                aen->aen_sense_len = scst_set_sense(aen->aen_sense,
-                       SCST_SENSE_BUFFERSIZE, d_sense,
+                       sizeof(aen->aen_sense), d_sense,
                        SCST_LOAD_SENSE(scst_sense_reported_luns_data_changed));
 
                TRACE_DBG("Calling target's %s report_aen(%p)",
index dcbeaa9..99364d9 100644 (file)
@@ -70,9 +70,6 @@ static struct shrinker sgv_shrinker;
  */
 static LIST_HEAD(sgv_pools_list);
 
-static void sgv_pool_get(struct sgv_pool *pool);
-static void sgv_pool_put(struct sgv_pool *pool);
-
 static inline bool sgv_pool_clustered(const struct sgv_pool *pool)
 {
        return pool->clustering_type != sgv_no_clustering;
@@ -700,11 +697,17 @@ out_free:
 }
 
 static struct sgv_pool_obj *sgv_get_obj(struct sgv_pool *pool, int cache_num,
-       int pages, gfp_t gfp_mask)
+       int pages, gfp_t gfp_mask, bool get_new)
 {
        struct sgv_pool_obj *obj;
 
        spin_lock_bh(&pool->sgv_pool_lock);
+
+       if (unlikely(get_new)) {
+               /* Used only for buffers preallocation */
+               goto get_new;
+       }
+
        if (likely(!list_empty(&pool->recycling_lists[cache_num]))) {
                obj = list_entry(pool->recycling_lists[cache_num].next,
                         struct sgv_pool_obj, recycling_list_entry);
@@ -718,6 +721,7 @@ static struct sgv_pool_obj *sgv_get_obj(struct sgv_pool *pool, int cache_num,
                goto out;
        }
 
+get_new:
        if (pool->cached_entries == 0) {
                TRACE_MEM("Adding pool %p to the active list", pool);
                spin_lock_bh(&sgv_pools_lock);
@@ -895,7 +899,7 @@ struct scatterlist *sgv_pool_alloc(struct sgv_pool *pool, unsigned int size,
        if (unlikely(size == 0))
                goto out;
 
-       sBUG_ON((gfp_mask & __GFP_NOFAIL) == __GFP_NOFAIL);
+       EXTRACHECKS_BUG_ON((gfp_mask & __GFP_NOFAIL) == __GFP_NOFAIL);
 
        pages = ((size + PAGE_SIZE - 1) >> PAGE_SHIFT);
        if (pool->single_alloc_pages == 0) {
@@ -930,7 +934,8 @@ struct scatterlist *sgv_pool_alloc(struct sgv_pool *pool, unsigned int size,
                        goto out_fail;
                allowed_mem_checked = true;
 
-               obj = sgv_get_obj(pool, cache_num, pages_to_alloc, gfp_mask);
+               obj = sgv_get_obj(pool, cache_num, pages_to_alloc, gfp_mask,
+                       flags & SGV_POOL_ALLOC_GET_NEW);
                if (unlikely(obj == NULL)) {
                        TRACE(TRACE_OUT_OF_MEM, "Allocation of "
                                "sgv_pool_obj failed (size %d)", size);
@@ -1532,15 +1537,16 @@ static void sgv_pool_destroy(struct sgv_pool *pool)
        return;
 }
 
-static void sgv_pool_get(struct sgv_pool *pool)
+void sgv_pool_get(struct sgv_pool *pool)
 {
        atomic_inc(&pool->sgv_pool_ref);
        TRACE_MEM("Incrementing sgv pool %p ref (new value %d)",
                pool, atomic_read(&pool->sgv_pool_ref));
        return;
 }
+EXPORT_SYMBOL(sgv_pool_get);
 
-static void sgv_pool_put(struct sgv_pool *pool)
+void sgv_pool_put(struct sgv_pool *pool)
 {
        TRACE_MEM("Decrementing sgv pool %p ref (new value %d)",
                pool, atomic_read(&pool->sgv_pool_ref)-1);
@@ -1548,6 +1554,7 @@ static void sgv_pool_put(struct sgv_pool *pool)
                sgv_pool_destroy(pool);
        return;
 }
+EXPORT_SYMBOL(sgv_pool_put);
 
 void sgv_pool_del(struct sgv_pool *pool)
 {
index 6ef62dc..47a748b 100644 (file)
@@ -594,7 +594,7 @@ static int do_cached_mem_free(struct vdisk_cmd *vcmd)
 
        TRACE_ENTRY();
 
-       TRACE_MEM("Cached mem free (cmd %d, buf %"PRIx64")", cmd->cmd_h,
+       TRACE_MEM("Cached mem free (cmd %x, buf %"PRIx64")", cmd->cmd_h,
                cmd->on_cached_mem_free.pbuf);
 
        free((void *)(unsigned long)cmd->on_cached_mem_free.pbuf);
index 13f5a82..e5c1082 100644 (file)
@@ -92,6 +92,8 @@ static int wt_flag, rd_only_flag, o_direct_flag, nullio, nv_cache;
 static int debug_tm_ignore;
 #endif
 static int non_blocking, sgv_shared, sgv_single_alloc_pages, sgv_purge_interval;
+static int sgv_disable_clustered_pool, prealloc_buffers_num, prealloc_buffer_size;
+
 static void *(*alloc_fn)(size_t size) = align_alloc;
 
 static struct option const long_options[] =
@@ -113,6 +115,9 @@ static struct option const long_options[] =
        {"sgv_shared", no_argument, 0, 's'},
        {"sgv_single_cache", required_argument, 0, 'S'},
        {"sgv_purge_interval", required_argument, 0, 'P'},
+       {"sgv_disable_clustered_pool", no_argument, 0, 'D'},
+       {"prealloc_buffers", required_argument, 0, 'R'},
+       {"prealloc_buffer_size", required_argument, 0, 'Z'},
 #if defined(DEBUG) || defined(TRACING)
        {"debug", required_argument, 0, 'd'},
 #endif
@@ -148,6 +153,9 @@ static void usage(void)
        printf("  -S, --sgv_single_cache=n Use single entry SGV cache with n pages/entry\n");
        printf("  -P, --sgv_purge_interval=n Use SGV cache purge interval n seconds\n");
        printf("  -u, --unreg_before_close Unregister before close\n");
+       printf("  -D, --sgv_disable_clustered_pool Disable clustered SGV pool\n");
+       printf("  -R, --prealloc_buffers=n Prealloc n buffers\n");
+       printf("  -Z, --prealloc_buffer_size=n Sets the size in KB of each prealloced buffer\n");
 #if defined(DEBUG) || defined(TRACING)
        printf("  -d, --debug=level     Debug tracing level\n");
 #endif
@@ -184,6 +192,7 @@ out:
 
 static void *align_alloc(size_t size)
 {
+       TRACE_MEM("Request to alloc %dKB", size / 1024);
        return memalign(PAGE_SIZE, size);
 }
 
@@ -242,6 +251,48 @@ out:
        return;
 }
 
+int prealloc_buffers(struct vdisk_dev *dev)
+{
+       int i, c, res = 0;
+
+       if (sgv_disable_clustered_pool)
+               c = 0;
+       else
+               c = 1;
+
+       do {
+               for (i = 0; i < prealloc_buffers_num; i++) {
+                       union scst_user_prealloc_buffer pre;
+
+                       memset(&pre, 0, sizeof(pre));
+                       pre.in.pbuf = (unsigned long)alloc_fn(prealloc_buffer_size);
+                       pre.in.bufflen = prealloc_buffer_size;
+                       pre.in.for_clust_pool = c;
+
+                       if (pre.in.pbuf == 0) {
+                               res = errno;
+                               PRINT_ERROR("Unable to prealloc buffer: %s",
+                                       strerror(res));
+                               goto out;
+                       }
+
+                       res = ioctl(dev->scst_usr_fd, SCST_USER_PREALLOC_BUFFER, &pre);
+                       if (res != 0) {
+                               res = errno;
+                               PRINT_ERROR("Unable to send prealloced buffer: %s",
+                                       strerror(res));
+                               free((void *)(unsigned long)pre.in.pbuf);
+                               goto out;
+                       }
+                       TRACE_MEM("Prealloced buffer cmd_h %x", pre.out.cmd_h);
+               }
+               c--;
+       } while (c >= 0);
+
+out:
+       return res;
+}
+
 int start(int argc, char **argv)
 {
        int res = 0;
@@ -323,6 +374,7 @@ int start(int argc, char **argv)
                }
                desc.sgv_single_alloc_pages = sgv_single_alloc_pages;
                desc.sgv_purge_interval = sgv_purge_interval;
+               desc.sgv_disable_clustered_pool = sgv_disable_clustered_pool;
                desc.type = devs[i].type;
                desc.block_size = devs[i].block_size;
 
@@ -341,6 +393,12 @@ int start(int argc, char **argv)
                        goto out_unreg;
                }
 
+               if ((prealloc_buffers_num > 0) && (prealloc_buffer_size > 0)) {
+                       res = prealloc_buffers(&devs[i]);
+                       if (res != 0)
+                               goto out_unreg;
+               }
+
 #if 1
                {
                        /* Not needed, added here only as a test */
@@ -445,8 +503,8 @@ int main(int argc, char **argv)
 
        memset(devs, 0, sizeof(devs));
 
-       while ((ch = getopt_long(argc, argv, "+b:e:trongluF:I:cp:f:m:d:vsS:P:h", long_options,
-                               &longindex)) >= 0) {
+       while ((ch = getopt_long(argc, argv, "+b:e:trongluF:I:cp:f:m:d:vsS:P:hDR:Z:",
+                       long_options, &longindex)) >= 0) {
                switch (ch) {
                case 'b':
                        block_size = atoi(optarg);
@@ -508,6 +566,15 @@ int main(int argc, char **argv)
                case 'P':
                        sgv_purge_interval = atoi(optarg);
                        break;
+               case 'D':
+                       sgv_disable_clustered_pool = 1;
+                       break;
+               case 'R':
+                       prealloc_buffers_num = atoi(optarg);
+                       break;
+               case 'Z':
+                       prealloc_buffer_size = atoi(optarg) * 1024;
+                       break;
                case 'm':
                        if (strncmp(optarg, "all", 3) == 0)
                                memory_reuse_type = SCST_USER_MEM_REUSE_ALL;
@@ -629,6 +696,13 @@ int main(int argc, char **argv)
                        PRINT_INFO("    %s", "SGV cache purging disabled");
        }
 
+       if (sgv_disable_clustered_pool)
+               PRINT_INFO("    %s", "Disable clustered SGV pool");
+
+       if ((prealloc_buffers_num > 0) && (prealloc_buffer_size > 0))
+               PRINT_INFO("    Prealloc %d buffers of %dKB",
+                       prealloc_buffers_num, prealloc_buffer_size / 1024);
+
        if (!o_direct_flag && (memory_reuse_type == SCST_USER_MEM_NO_REUSE)) {
                PRINT_INFO("    %s", "Using unaligned buffers");
                alloc_fn = malloc;