Update to work on 2.6.23+ kernels + some other related fixes and updates
authorvlnb <vlnb@d57e44dd-8a1f-0410-8b47-8ef2f437770f>
Tue, 23 Oct 2007 15:35:20 +0000 (15:35 +0000)
committervlnb <vlnb@d57e44dd-8a1f-0410-8b47-8ef2f437770f>
Tue, 23 Oct 2007 15:35:20 +0000 (15:35 +0000)
git-svn-id: https://scst.svn.sourceforge.net/svnroot/scst/trunk@209 d57e44dd-8a1f-0410-8b47-8ef2f437770f

15 files changed:
iscsi-scst/kernel/iscsi.c
iscsi-scst/kernel/patches/put_page_callback-2.6.23.patch [new file with mode: 0644]
qla2x00t/qla2x00-target/qla2x00t.c
qla2x00t/qla_attr.c
qla2x00t/qla_os.c
scst/include/scsi_tgt.h
scst/include/scst_debug.h
scst/kernel/scst_exec_req_fifo-2.6.23.patch [new file with mode: 0644]
scst/src/dev_handlers/scst_user.c
scst/src/dev_handlers/scst_vdisk.c
scst/src/scst_lib.c
scst/src/scst_main.c
scst/src/scst_mem.c
scst/src/scst_mem.h
scst/src/scst_proc.c

index 5908d9d..68ff0bc 100644 (file)
@@ -2471,8 +2471,7 @@ static int __init iscsi_init(void)
        if ((err = event_init()) < 0)
                goto out_reg;
 
-       iscsi_cmnd_cache = kmem_cache_create("scst_iscsi_cmnd",
-               sizeof(struct iscsi_cmnd), 0, 0, NULL, NULL);
+       iscsi_cmnd_cache = KMEM_CACHE(iscsi_cmnd, SCST_SLAB_FLAGS);
        if (!iscsi_cmnd_cache) {
                err = -ENOMEM;
                goto out_event;
diff --git a/iscsi-scst/kernel/patches/put_page_callback-2.6.23.patch b/iscsi-scst/kernel/patches/put_page_callback-2.6.23.patch
new file mode 100644 (file)
index 0000000..ceeaf24
--- /dev/null
@@ -0,0 +1,259 @@
+diff -upkr linux-2.6.23/include/linux/mm_types.h linux-2.6.23/include/linux/mm_types.h
+--- linux-2.6.23/include/linux/mm_types.h      2007-10-10 00:31:38.000000000 +0400
++++ linux-2.6.23/include/linux/mm_types.h      2007-10-10 13:42:46.000000000 +0400
+@@ -78,6 +78,15 @@ struct page {
+       void *virtual;                  /* Kernel virtual address (NULL if
+                                          not kmapped, ie. highmem) */
+ #endif /* WANT_PAGE_VIRTUAL */
++      /*
++       * Used to implement support for notification on zero-copy TCP transfer
++       * completeion. Not good to have this field here, it's better to have
++       * it in struct sk_buff, but it would make the code much more
++       * complicated and fragile, if maintained as a separate patch, since all
++       * skb then would have to contain only pages with the same value in this
++       * field.
++       */
++       void *net_priv;
+ };
+ #endif /* _LINUX_MM_TYPES_H */
+diff -upkr linux-2.6.23/include/linux/net.h linux-2.6.23/include/linux/net.h
+--- linux-2.6.23/include/linux/net.h   2007-10-10 00:31:38.000000000 +0400
++++ linux-2.6.23/include/linux/net.h   2007-10-10 13:42:46.000000000 +0400
+@@ -57,6 +57,7 @@ typedef enum {
+ #ifdef __KERNEL__
+ #include <linux/stringify.h>
+ #include <linux/random.h>
++#include <linux/mm.h>
+ #define SOCK_ASYNC_NOSPACE    0
+ #define SOCK_ASYNC_WAITDATA   1
+@@ -319,5 +320,30 @@ extern int net_msg_cost;
+ extern int net_msg_burst;
+ #endif
++/* Support for notification on zero-copy TCP transfer completeion */
++#define NET_PAGE_CALLBACKS_DEFINED
++typedef void (*net_get_page_callback_t)(struct page *page);
++typedef void (*net_put_page_callback_t)(struct page *page);
++
++extern net_get_page_callback_t net_get_page_callback;
++extern net_put_page_callback_t net_put_page_callback;
++
++extern int net_set_get_put_page_callbacks(
++      net_get_page_callback_t get_callback,
++      net_put_page_callback_t put_callback);
++
++static inline void net_get_page(struct page *page)
++{
++      if (page->net_priv != 0)
++              net_get_page_callback(page);
++      get_page(page);
++}
++static inline void net_put_page(struct page *page)
++{
++      if (page->net_priv != 0)
++              net_put_page_callback(page);
++      put_page(page);
++}
++
+ #endif /* __KERNEL__ */
+ #endif        /* _LINUX_NET_H */
+diff -upkr linux-2.6.23/net/core/skbuff.c linux-2.6.23/net/core/skbuff.c
+--- linux-2.6.23/net/core/skbuff.c     2007-10-10 00:31:38.000000000 +0400
++++ linux-2.6.23/net/core/skbuff.c     2007-10-10 13:42:46.000000000 +0400
+@@ -262,7 +262,7 @@ static void skb_release_data(struct sk_b
+               if (skb_shinfo(skb)->nr_frags) {
+                       int i;
+                       for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
+-                              put_page(skb_shinfo(skb)->frags[i].page);
++                              net_put_page(skb_shinfo(skb)->frags[i].page);
+               }
+               if (skb_shinfo(skb)->frag_list)
+@@ -601,7 +601,7 @@ struct sk_buff *pskb_copy(struct sk_buff
+               for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+                       skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i];
+-                      get_page(skb_shinfo(n)->frags[i].page);
++                      net_get_page(skb_shinfo(n)->frags[i].page);
+               }
+               skb_shinfo(n)->nr_frags = i;
+       }
+@@ -664,7 +664,7 @@ int pskb_expand_head(struct sk_buff *skb
+              sizeof(struct skb_shared_info));
+       for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
+-              get_page(skb_shinfo(skb)->frags[i].page);
++              net_get_page(skb_shinfo(skb)->frags[i].page);
+       if (skb_shinfo(skb)->frag_list)
+               skb_clone_fraglist(skb);
+@@ -862,7 +862,7 @@ drop_pages:
+               skb_shinfo(skb)->nr_frags = i;
+               for (; i < nfrags; i++)
+-                      put_page(skb_shinfo(skb)->frags[i].page);
++                      net_put_page(skb_shinfo(skb)->frags[i].page);
+               if (skb_shinfo(skb)->frag_list)
+                       skb_drop_fraglist(skb);
+@@ -1031,7 +1031,7 @@ pull_pages:
+       k = 0;
+       for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+               if (skb_shinfo(skb)->frags[i].size <= eat) {
+-                      put_page(skb_shinfo(skb)->frags[i].page);
++                      net_put_page(skb_shinfo(skb)->frags[i].page);
+                       eat -= skb_shinfo(skb)->frags[i].size;
+               } else {
+                       skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i];
+@@ -1600,7 +1600,7 @@ static inline void skb_split_no_header(s
+                                *    where splitting is expensive.
+                                * 2. Split is accurately. We make this.
+                                */
+-                              get_page(skb_shinfo(skb)->frags[i].page);
++                              net_get_page(skb_shinfo(skb)->frags[i].page);
+                               skb_shinfo(skb1)->frags[0].page_offset += len - pos;
+                               skb_shinfo(skb1)->frags[0].size -= len - pos;
+                               skb_shinfo(skb)->frags[i].size  = len - pos;
+@@ -1976,7 +1976,7 @@ struct sk_buff *skb_segment(struct sk_bu
+                       BUG_ON(i >= nfrags);
+                       *frag = skb_shinfo(skb)->frags[i];
+-                      get_page(frag->page);
++                      net_get_page(frag->page);
+                       size = frag->size;
+                       if (pos < offset) {
+diff -upkr linux-2.6.23/net/core/utils.c linux-2.6.23/net/core/utils.c
+--- linux-2.6.23/net/core/utils.c      2007-10-10 00:31:38.000000000 +0400
++++ linux-2.6.23/net/core/utils.c      2007-10-10 13:43:13.000000000 +0400
+@@ -25,6 +25,7 @@
+ #include <linux/random.h>
+ #include <linux/percpu.h>
+ #include <linux/init.h>
++#include <linux/skbuff.h>
+ #include <net/sock.h>
+ #include <asm/byteorder.h>
+@@ -36,6 +37,9 @@ int net_msg_burst __read_mostly = 10;
+ int net_msg_warn __read_mostly = 1;
+ EXPORT_SYMBOL(net_msg_warn);
++net_get_page_callback_t net_get_page_callback __read_mostly;
++net_put_page_callback_t net_put_page_callback __read_mostly;
++
+ /*
+  * All net warning printk()s should be guarded by this function.
+  */
+@@ -293,3 +297,32 @@ out:
+ }
+ EXPORT_SYMBOL(in6_pton);
++
++int net_set_get_put_page_callbacks(
++      net_get_page_callback_t get_callback,
++      net_put_page_callback_t put_callback)
++{
++      int res = 0;
++
++      if ((net_get_page_callback != NULL) && (get_callback != NULL) &&
++          (net_get_page_callback != get_callback)) {
++              res = -EBUSY;
++              goto out;
++      }
++
++      if ((net_put_page_callback != NULL) && (put_callback != NULL) &&
++          (net_put_page_callback != put_callback)) {
++              res = -EBUSY;
++              goto out;
++      }
++
++      net_get_page_callback = get_callback;
++      net_put_page_callback = put_callback;
++
++out:
++      return res;
++}
++EXPORT_SYMBOL(net_set_get_put_page_callbacks);
++
++EXPORT_SYMBOL(net_get_page_callback);
++EXPORT_SYMBOL(net_put_page_callback);
+diff -upkr linux-2.6.23/net/ipv4/ip_output.c linux-2.6.23/net/ipv4/ip_output.c
+--- linux-2.6.23/net/ipv4/ip_output.c  2007-10-10 00:31:38.000000000 +0400
++++ linux-2.6.23/net/ipv4/ip_output.c  2007-10-10 13:42:46.000000000 +0400
+@@ -999,7 +999,7 @@ alloc_new_skb:
+                                               err = -EMSGSIZE;
+                                               goto error;
+                                       }
+-                                      get_page(page);
++                                      net_get_page(page);
+                                       skb_fill_page_desc(skb, i, page, sk->sk_sndmsg_off, 0);
+                                       frag = &skb_shinfo(skb)->frags[i];
+                               }
+@@ -1157,7 +1157,7 @@ ssize_t  ip_append_page(struct sock *sk, 
+               if (skb_can_coalesce(skb, i, page, offset)) {
+                       skb_shinfo(skb)->frags[i-1].size += len;
+               } else if (i < MAX_SKB_FRAGS) {
+-                      get_page(page);
++                      net_get_page(page);
+                       skb_fill_page_desc(skb, i, page, offset, len);
+               } else {
+                       err = -EMSGSIZE;
+diff -upkr linux-2.6.23/net/ipv4/tcp.c linux-2.6.23/net/ipv4/tcp.c
+--- linux-2.6.23/net/ipv4/tcp.c        2007-10-10 00:31:38.000000000 +0400
++++ linux-2.6.23/net/ipv4/tcp.c        2007-10-10 13:42:46.000000000 +0400
+@@ -560,7 +560,7 @@ new_segment:
+               if (can_coalesce) {
+                       skb_shinfo(skb)->frags[i - 1].size += copy;
+               } else {
+-                      get_page(page);
++                      net_get_page(page);
+                       skb_fill_page_desc(skb, i, page, offset, copy);
+               }
+@@ -765,7 +765,7 @@ new_segment:
+                                       goto new_segment;
+                               } else if (page) {
+                                       if (off == PAGE_SIZE) {
+-                                              put_page(page);
++                                              net_put_page(page);
+                                               TCP_PAGE(sk) = page = NULL;
+                                               off = 0;
+                                       }
+@@ -806,9 +806,9 @@ new_segment:
+                               } else {
+                                       skb_fill_page_desc(skb, i, page, off, copy);
+                                       if (TCP_PAGE(sk)) {
+-                                              get_page(page);
++                                              net_get_page(page);
+                                       } else if (off + copy < PAGE_SIZE) {
+-                                              get_page(page);
++                                              net_get_page(page);
+                                               TCP_PAGE(sk) = page;
+                                       }
+                               }
+diff -upkr linux-2.6.23/net/ipv4/tcp_output.c linux-2.6.23/net/ipv4/tcp_output.c
+--- linux-2.6.23/net/ipv4/tcp_output.c 2007-10-10 00:31:38.000000000 +0400
++++ linux-2.6.23/net/ipv4/tcp_output.c 2007-10-10 13:42:46.000000000 +0400
+@@ -729,7 +729,7 @@ static void __pskb_trim_head(struct sk_b
+       k = 0;
+       for (i=0; i<skb_shinfo(skb)->nr_frags; i++) {
+               if (skb_shinfo(skb)->frags[i].size <= eat) {
+-                      put_page(skb_shinfo(skb)->frags[i].page);
++                      net_put_page(skb_shinfo(skb)->frags[i].page);
+                       eat -= skb_shinfo(skb)->frags[i].size;
+               } else {
+                       skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i];
+diff -upkr linux-2.6.23/net/ipv6/ip6_output.c linux-2.6.23/net/ipv6/ip6_output.c
+--- linux-2.6.23/net/ipv6/ip6_output.c 2007-10-10 00:31:38.000000000 +0400
++++ linux-2.6.23/net/ipv6/ip6_output.c 2007-10-10 13:42:46.000000000 +0400
+@@ -1295,7 +1295,7 @@ alloc_new_skb:
+                                               err = -EMSGSIZE;
+                                               goto error;
+                                       }
+-                                      get_page(page);
++                                      net_get_page(page);
+                                       skb_fill_page_desc(skb, i, page, sk->sk_sndmsg_off, 0);
+                                       frag = &skb_shinfo(skb)->frags[i];
+                               }
index 4c8d6b3..d04f803 100644 (file)
 #error "FC_TARGET_SUPPORT is NOT DEFINED"
 #endif
 
-/*
- * Whether to use slab cach instead of kmalloc/kfree
- */
-#if defined(DEBUG) && defined(CONFIG_DEBUG_SLAB)
-#define Q2T_CACHE_FLAGS ( SLAB_RED_ZONE | SLAB_POISON )
-#else
-#define Q2T_CACHE_FLAGS 0L
-#endif
-
 #ifdef DEBUG
 #define Q2T_DEFAULT_LOG_FLAGS (TRACE_FUNCTION | TRACE_PID | \
        TRACE_OUT_OF_MEM | TRACE_MGMT | TRACE_MGMT_DEBUG | \
@@ -2206,9 +2197,7 @@ static int __init q2t_init(void)
 
        TRACE_ENTRY();
 
-       q2t_cmd_cachep = kmem_cache_create("q2t_cmd_struct",
-                                          sizeof(struct q2t_cmd),
-                                          0, Q2T_CACHE_FLAGS, NULL, NULL);
+       q2t_cmd_cachep = KMEM_CACHE(q2t_cmd, SCST_SLAB_FLAGS);
        if (q2t_cmd_cachep == NULL) {
                res = -ENOMEM;
                goto out;
index 5077276..f22a958 100644 (file)
@@ -321,9 +321,15 @@ static CLASS_DEVICE_ATTR(port_database,
                         qla2x00_update_portdb);
 #endif
 
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23))
 static ssize_t
 qla2x00_sysfs_read_fw_dump(struct kobject *kobj, char *buf, loff_t off,
     size_t count)
+#else
+static ssize_t
+qla2x00_sysfs_read_fw_dump(struct kobject *kobj, struct bin_attribute *attr,
+    char *buf, loff_t off, size_t count)
+#endif
 {
        struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj,
            struct device, kobj)));
@@ -340,9 +346,15 @@ qla2x00_sysfs_read_fw_dump(struct kobject *kobj, char *buf, loff_t off,
        return (count);
 }
 
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23))
 static ssize_t
 qla2x00_sysfs_write_fw_dump(struct kobject *kobj, char *buf, loff_t off,
     size_t count)
+#else
+static ssize_t
+qla2x00_sysfs_write_fw_dump(struct kobject *kobj, struct bin_attribute *attr,
+    char *buf, loff_t off, size_t count)
+#endif
 {
        struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj,
            struct device, kobj)));
@@ -419,9 +431,15 @@ static struct bin_attribute sysfs_fw_dump_attr = {
        .write = qla2x00_sysfs_write_fw_dump,
 };
 
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23))
 static ssize_t
 qla2x00_sysfs_read_nvram(struct kobject *kobj, char *buf, loff_t off,
     size_t count)
+#else
+static ssize_t
+qla2x00_sysfs_read_nvram(struct kobject *kobj, struct bin_attribute *attr,
+    char *buf, loff_t off, size_t count)
+#endif
 {
        struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj,
            struct device, kobj)));
@@ -439,9 +457,15 @@ qla2x00_sysfs_read_nvram(struct kobject *kobj, char *buf, loff_t off,
        return ha->nvram_size;
 }
 
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23))
 static ssize_t
 qla2x00_sysfs_write_nvram(struct kobject *kobj, char *buf, loff_t off,
     size_t count)
+#else
+static ssize_t
+qla2x00_sysfs_write_nvram(struct kobject *kobj, struct bin_attribute *attr,
+    char *buf, loff_t off, size_t count)
+#endif
 {
        struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj,
            struct device, kobj)));
@@ -493,9 +517,15 @@ static struct bin_attribute sysfs_nvram_attr = {
        .write = qla2x00_sysfs_write_nvram,
 };
 
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23))
 static ssize_t
 qla2x00_sysfs_read_optrom(struct kobject *kobj, char *buf, loff_t off,
     size_t count)
+#else
+static ssize_t
+qla2x00_sysfs_read_optrom(struct kobject *kobj, struct bin_attribute *attr,
+    char *buf, loff_t off, size_t count)
+#endif
 {
        struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj,
            struct device, kobj)));
@@ -512,9 +542,15 @@ qla2x00_sysfs_read_optrom(struct kobject *kobj, char *buf, loff_t off,
        return count;
 }
 
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23))
 static ssize_t
 qla2x00_sysfs_write_optrom(struct kobject *kobj, char *buf, loff_t off,
     size_t count)
+#else
+static ssize_t
+qla2x00_sysfs_write_optrom(struct kobject *kobj, struct bin_attribute *attr,
+    char *buf, loff_t off, size_t count)
+#endif
 {
        struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj,
            struct device, kobj)));
@@ -542,9 +578,15 @@ static struct bin_attribute sysfs_optrom_attr = {
        .write = qla2x00_sysfs_write_optrom,
 };
 
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23))
 static ssize_t
 qla2x00_sysfs_write_optrom_ctl(struct kobject *kobj, char *buf, loff_t off,
     size_t count)
+#else
+static ssize_t
+qla2x00_sysfs_write_optrom_ctl(struct kobject *kobj, struct bin_attribute *attr,
+    char *buf, loff_t off, size_t count)
+#endif
 {
        struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj,
            struct device, kobj)));
@@ -622,9 +664,15 @@ static struct bin_attribute sysfs_optrom_ctl_attr = {
        .write = qla2x00_sysfs_write_optrom_ctl,
 };
 
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23))
 static ssize_t
 qla2x00_sysfs_read_vpd(struct kobject *kobj, char *buf, loff_t off,
     size_t count)
+#else
+static ssize_t
+qla2x00_sysfs_read_vpd(struct kobject *kobj, struct bin_attribute *attr,
+    char *buf, loff_t off, size_t count)
+#endif
 {
        struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj,
            struct device, kobj)));
@@ -644,9 +692,15 @@ qla2x00_sysfs_read_vpd(struct kobject *kobj, char *buf, loff_t off,
        return ha->vpd_size;
 }
 
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23))
 static ssize_t
 qla2x00_sysfs_write_vpd(struct kobject *kobj, char *buf, loff_t off,
     size_t count)
+#else
+static ssize_t
+qla2x00_sysfs_write_vpd(struct kobject *kobj, struct bin_attribute *attr,
+    char *buf, loff_t off, size_t count)
+#endif
 {
        struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj,
            struct device, kobj)));
index ca9263b..b6b1c73 100644 (file)
@@ -2775,7 +2775,12 @@ qla2x00_module_init(void)
 
        /* Allocate cache for SRBs. */
        srb_cachep = kmem_cache_create("qla2xxx_srbs", sizeof(srb_t), 0,
-           SLAB_HWCACHE_ALIGN, NULL, NULL);
+           SLAB_HWCACHE_ALIGN, NULL
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23))
+           , NULL);
+#else
+           );
+#endif
        if (srb_cachep == NULL) {
                printk(KERN_ERR
                    "qla2xxx: Unable to allocate SRB cache...Failing load!\n");
index 3c38fe2..f801a7a 100644 (file)
  *************************************************************/
 #define SCST_PROC_ENTRY_NAME         "scsi_tgt"
 
+/*************************************************************
+ ** Kernel cache creation helper
+ *************************************************************/
+#ifndef KMEM_CACHE
+#define KMEM_CACHE(__struct, __flags) kmem_cache_create(#__struct,\
+       sizeof(struct __struct), __alignof__(struct __struct),\
+       (__flags), NULL, NULL)
+#endif
+
 /*************************************************************
  *                     TYPES
  *************************************************************/
index afd24ef..10527ae 100644 (file)
@@ -399,4 +399,10 @@ do {                                            \
 
 #endif /* DEBUG */
 
+#if defined(DEBUG) && defined(CONFIG_DEBUG_SLAB)
+#define SCST_SLAB_FLAGS ( SLAB_RED_ZONE | SLAB_POISON )
+#else
+#define SCST_SLAB_FLAGS 0L
+#endif
+
 #endif /* __SCST_DEBUG_H */
diff --git a/scst/kernel/scst_exec_req_fifo-2.6.23.patch b/scst/kernel/scst_exec_req_fifo-2.6.23.patch
new file mode 100644 (file)
index 0000000..52add2c
--- /dev/null
@@ -0,0 +1,109 @@
+diff -upr linux-2.6.23/drivers/scsi/scsi_lib.c linux-2.6.23/drivers/scsi/scsi_lib.c
+--- linux-2.6.23/drivers/scsi/scsi_lib.c       2007-10-10 00:31:38.000000000 +0400
++++ linux-2.6.23/drivers/scsi/scsi_lib.c       2007-10-10 12:37:27.000000000 +0400
+@@ -366,7 +366,7 @@ free_bios:
+ }
+ /**
+- * scsi_execute_async - insert request
++ * __scsi_execute_async - insert request
+  * @sdev:     scsi device
+  * @cmd:      scsi command
+  * @cmd_len:  length of scsi cdb
+@@ -377,11 +377,14 @@ free_bios:
+  * @timeout:  request timeout in seconds
+  * @retries:  number of times to retry request
+  * @flags:    or into request flags
++ * @at_head:  insert request at head or tail of queue
+  **/
+-int scsi_execute_async(struct scsi_device *sdev, const unsigned char *cmd,
++static inline int __scsi_execute_async(struct scsi_device *sdev,
++                     const unsigned char *cmd,
+                      int cmd_len, int data_direction, void *buffer, unsigned bufflen,
+                      int use_sg, int timeout, int retries, void *privdata,
+-                     void (*done)(void *, char *, int, int), gfp_t gfp)
++                     void (*done)(void *, char *, int, int), gfp_t gfp,
++                     int at_head)
+ {
+       struct request *req;
+       struct scsi_io_context *sioc;
+@@ -418,7 +421,7 @@ int scsi_execute_async(struct scsi_devic
+       sioc->data = privdata;
+       sioc->done = done;
+-      blk_execute_rq_nowait(req->q, NULL, req, 1, scsi_end_async);
++      blk_execute_rq_nowait(req->q, NULL, req, at_head, scsi_end_async);
+       return 0;
+ free_req:
+@@ -427,8 +430,53 @@ free_sense:
+       kmem_cache_free(scsi_io_context_cache, sioc);
+       return DRIVER_ERROR << 24;
+ }
++
++/**
++ * scsi_execute_async - insert request
++ * @sdev:     scsi device
++ * @cmd:      scsi command
++ * @cmd_len:  length of scsi cdb
++ * @data_direction: data direction
++ * @buffer:   data buffer (this can be a kernel buffer or scatterlist)
++ * @bufflen:  len of buffer
++ * @use_sg:   if buffer is a scatterlist this is the number of elements
++ * @timeout:  request timeout in seconds
++ * @retries:  number of times to retry request
++ * @flags:    or into request flags
++ **/
++int scsi_execute_async(struct scsi_device *sdev, const unsigned char *cmd,
++                     int cmd_len, int data_direction, void *buffer, unsigned bufflen,
++                     int use_sg, int timeout, int retries, void *privdata,
++                     void (*done)(void *, char *, int, int), gfp_t gfp)
++{
++      return __scsi_execute_async(sdev, cmd, cmd_len, data_direction, buffer,
++              bufflen, use_sg, timeout, retries, privdata, done, gfp, 1);
++}
+ EXPORT_SYMBOL_GPL(scsi_execute_async);
++/**
++ * scsi_execute_async_fifo - insert request at tail, in FIFO order
++ * @sdev:     scsi device
++ * @cmd:      scsi command
++ * @cmd_len:  length of scsi cdb
++ * @data_direction: data direction
++ * @buffer:   data buffer (this can be a kernel buffer or scatterlist)
++ * @bufflen:  len of buffer
++ * @use_sg:   if buffer is a scatterlist this is the number of elements
++ * @timeout:  request timeout in seconds
++ * @retries:  number of times to retry request
++ * @flags:    or into request flags
++ **/
++int scsi_execute_async_fifo(struct scsi_device *sdev, const unsigned char *cmd,
++                     int cmd_len, int data_direction, void *buffer, unsigned bufflen,
++                     int use_sg, int timeout, int retries, void *privdata,
++                     void (*done)(void *, char *, int, int), gfp_t gfp)
++{
++      return __scsi_execute_async(sdev, cmd, cmd_len, data_direction, buffer,
++              bufflen, use_sg, timeout, retries, privdata, done, gfp, 0);
++}
++EXPORT_SYMBOL_GPL(scsi_execute_async_fifo);
++
+ /*
+  * Function:    scsi_init_cmd_errh()
+  *
+diff -upr linux-2.6.23/include/scsi/scsi_device.h linux-2.6.23/include/scsi/scsi_device.h
+--- linux-2.6.23/include/scsi/scsi_device.h    2007-10-10 00:31:38.000000000 +0400
++++ linux-2.6.23/include/scsi/scsi_device.h    2007-10-10 12:37:27.000000000 +0400
+@@ -303,6 +303,13 @@ extern int scsi_execute_async(struct scs
+                             int timeout, int retries, void *privdata,
+                             void (*done)(void *, char *, int, int),
+                             gfp_t gfp);
++#define SCSI_EXEC_REQ_FIFO_DEFINED
++extern int scsi_execute_async_fifo(struct scsi_device *sdev,
++                            const unsigned char *cmd, int cmd_len, int data_direction,
++                            void *buffer, unsigned bufflen, int use_sg,
++                            int timeout, int retries, void *privdata,
++                            void (*done)(void *, char *, int, int),
++                            gfp_t gfp);
+ static inline int __must_check scsi_device_reprobe(struct scsi_device *sdev)
+ {
index b30883b..63ac04a 100644 (file)
        for details.
 #endif
 
-#if defined(DEBUG) && defined(CONFIG_DEBUG_SLAB)
-#define DEV_USER_SLAB_FLAGS ( SLAB_RED_ZONE | SLAB_POISON )
-#else
-#define DEV_USER_SLAB_FLAGS 0L
-#endif
-
 #define DEV_USER_MAJOR                 237
 #define DEV_USER_CMD_HASH_ORDER                6
 #define DEV_USER_TM_TIMEOUT            (10*HZ)
@@ -99,7 +93,7 @@ struct scst_user_dev
        struct completion cleanup_cmpl;
 };
 
-struct dev_user_pre_unreg_sess_obj
+struct scst_user_pre_unreg_sess_obj
 {
        struct scst_tgt_dev *tgt_dev;
        unsigned int active:1;
@@ -113,7 +107,7 @@ struct dev_user_pre_unreg_sess_obj
 };
 
 /* Most fields are unprotected, since only one thread at time can access them */
-struct dev_user_cmd
+struct scst_user_cmd
 {
        struct scst_cmd *cmd;
        struct scst_user_dev *dev;
@@ -126,7 +120,7 @@ struct dev_user_cmd
        unsigned int internal_reset_tm:1;
        unsigned int aborted:1;
 
-       struct dev_user_cmd *buf_ucmd;
+       struct scst_user_cmd *buf_ucmd;
 
        int cur_data_page;
        int num_data_pages;
@@ -148,9 +142,9 @@ struct dev_user_cmd
        int result;
 };
 
-static struct dev_user_cmd *dev_user_alloc_ucmd(struct scst_user_dev *dev,
+static struct scst_user_cmd *dev_user_alloc_ucmd(struct scst_user_dev *dev,
        int gfp_mask);
-static void dev_user_free_ucmd(struct dev_user_cmd *ucmd);
+static void dev_user_free_ucmd(struct scst_user_cmd *ucmd);
 
 static int dev_user_parse(struct scst_cmd *cmd, struct scst_info_cdb *info_cdb);
 static int dev_user_exec(struct scst_cmd *cmd);
@@ -166,16 +160,16 @@ static struct page *dev_user_alloc_pages(struct scatterlist *sg,
 static void dev_user_free_sg_entries(struct scatterlist *sg, int sg_count,
         void *priv);
 
-static void dev_user_add_to_ready(struct dev_user_cmd *ucmd);
+static void dev_user_add_to_ready(struct scst_user_cmd *ucmd);
 
-static void dev_user_unjam_cmd(struct dev_user_cmd *ucmd, int busy,
+static void dev_user_unjam_cmd(struct scst_user_cmd *ucmd, int busy,
        unsigned long *flags);
 static void dev_user_unjam_dev(struct scst_user_dev *dev, int tm,
        struct scst_tgt_dev *tgt_dev);
 
-static int dev_user_process_reply_tm_exec(struct dev_user_cmd *ucmd,
+static int dev_user_process_reply_tm_exec(struct scst_user_cmd *ucmd,
        int status);
-static int dev_user_process_reply_sess(struct dev_user_cmd *ucmd, int status);
+static int dev_user_process_reply_sess(struct scst_user_cmd *ucmd, int status);
 static int dev_user_register_dev(struct file *file,
        const struct scst_user_dev_desc *dev_desc);
 static int __dev_user_set_opt(struct scst_user_dev *dev,
@@ -213,7 +207,7 @@ static LIST_HEAD(cleanup_list);
 static DECLARE_WAIT_QUEUE_HEAD(cleanup_list_waitQ);
 static struct task_struct *cleanup_thread;
 
-static inline void ucmd_get(struct dev_user_cmd *ucmd, int barrier)
+static inline void ucmd_get(struct scst_user_cmd *ucmd, int barrier)
 {
        TRACE_DBG("ucmd %p, ucmd_ref %d", ucmd, atomic_read(&ucmd->ucmd_ref));
        atomic_inc(&ucmd->ucmd_ref);
@@ -221,7 +215,7 @@ static inline void ucmd_get(struct dev_user_cmd *ucmd, int barrier)
                smp_mb__after_atomic_inc();
 }
 
-static inline void ucmd_put(struct dev_user_cmd *ucmd)
+static inline void ucmd_put(struct scst_user_cmd *ucmd)
 {
        TRACE_DBG("ucmd %p, ucmd_ref %d", ucmd, atomic_read(&ucmd->ucmd_ref));
        if (atomic_dec_and_test(&ucmd->ucmd_ref))
@@ -255,18 +249,18 @@ static inline int dev_user_check_reg(struct scst_user_dev *dev)
        return 0;
 }
 
-static inline int dev_user_cmd_hashfn(int h)
+static inline int scst_user_cmd_hashfn(int h)
 {
        return h & ((1 << DEV_USER_CMD_HASH_ORDER) - 1);
 }
 
-static inline struct dev_user_cmd *__ucmd_find_hash(struct scst_user_dev *dev,
+static inline struct scst_user_cmd *__ucmd_find_hash(struct scst_user_dev *dev,
        unsigned int h)
 {
        struct list_head *head;
-       struct dev_user_cmd *ucmd;
+       struct scst_user_cmd *ucmd;
 
-       head = &dev->ucmd_hash[dev_user_cmd_hashfn(h)];
+       head = &dev->ucmd_hash[scst_user_cmd_hashfn(h)];
        list_for_each_entry(ucmd, head, hash_list_entry) {
                if (ucmd->h == h) {
                        TRACE_DBG("Found ucmd %p", ucmd);
@@ -276,11 +270,11 @@ static inline struct dev_user_cmd *__ucmd_find_hash(struct scst_user_dev *dev,
        return NULL;
 }
 
-static void cmnd_insert_hash(struct dev_user_cmd *ucmd)
+static void cmnd_insert_hash(struct scst_user_cmd *ucmd)
 {
        struct list_head *head;
        struct scst_user_dev *dev = ucmd->dev;
-       struct dev_user_cmd *u;
+       struct scst_user_cmd *u;
        unsigned long flags;
 
        spin_lock_irqsave(&dev->cmd_lists.cmd_list_lock, flags);
@@ -288,7 +282,7 @@ static void cmnd_insert_hash(struct dev_user_cmd *ucmd)
                ucmd->h = dev->handle_counter++;
                u = __ucmd_find_hash(dev, ucmd->h);
        } while(u != NULL);
-       head = &dev->ucmd_hash[dev_user_cmd_hashfn(ucmd->h)];
+       head = &dev->ucmd_hash[scst_user_cmd_hashfn(ucmd->h)];
        list_add_tail(&ucmd->hash_list_entry, head);
        spin_unlock_irqrestore(&dev->cmd_lists.cmd_list_lock, flags);
 
@@ -296,7 +290,7 @@ static void cmnd_insert_hash(struct dev_user_cmd *ucmd)
        return;
 }
 
-static inline void cmnd_remove_hash(struct dev_user_cmd *ucmd)
+static inline void cmnd_remove_hash(struct scst_user_cmd *ucmd)
 {
        unsigned long flags;
        spin_lock_irqsave(&ucmd->dev->cmd_lists.cmd_list_lock, flags);
@@ -307,7 +301,7 @@ static inline void cmnd_remove_hash(struct dev_user_cmd *ucmd)
        return;
 }
 
-static void dev_user_free_ucmd(struct dev_user_cmd *ucmd)
+static void dev_user_free_ucmd(struct scst_user_cmd *ucmd)
 {
        TRACE_ENTRY();
 
@@ -325,7 +319,7 @@ static void dev_user_free_ucmd(struct dev_user_cmd *ucmd)
 static struct page *dev_user_alloc_pages(struct scatterlist *sg,
        gfp_t gfp_mask, void *priv)
 {
-       struct dev_user_cmd *ucmd = (struct dev_user_cmd*)priv;
+       struct scst_user_cmd *ucmd = (struct scst_user_cmd*)priv;
 
        TRACE_ENTRY();
 
@@ -357,7 +351,7 @@ out:
        return sg->page;
 }
 
-static void dev_user_on_cached_mem_free(struct dev_user_cmd *ucmd)
+static void dev_user_on_cached_mem_free(struct scst_user_cmd *ucmd)
 {
        TRACE_ENTRY();
 
@@ -376,7 +370,7 @@ static void dev_user_on_cached_mem_free(struct dev_user_cmd *ucmd)
        return;
 }
 
-static void dev_user_unmap_buf(struct dev_user_cmd *ucmd)
+static void dev_user_unmap_buf(struct scst_user_cmd *ucmd)
 {
        int i;
 
@@ -400,7 +394,7 @@ static void dev_user_unmap_buf(struct dev_user_cmd *ucmd)
        return;
 }
 
-static void __dev_user_free_sg_entries(struct dev_user_cmd *ucmd)
+static void __dev_user_free_sg_entries(struct scst_user_cmd *ucmd)
 {
        TRACE_ENTRY();
 
@@ -423,7 +417,7 @@ static void __dev_user_free_sg_entries(struct dev_user_cmd *ucmd)
 static void dev_user_free_sg_entries(struct scatterlist *sg, int sg_count,
        void *priv)
 {
-       struct dev_user_cmd *ucmd = (struct dev_user_cmd*)priv;
+       struct scst_user_cmd *ucmd = (struct scst_user_cmd*)priv;
 
        TRACE_MEM("Freeing data pages (sg=%p, sg_count=%d, priv %p)", sg,
                sg_count, ucmd);
@@ -433,7 +427,7 @@ static void dev_user_free_sg_entries(struct scatterlist *sg, int sg_count,
        return;
 }
 
-static inline int is_buff_cached(struct dev_user_cmd *ucmd)
+static inline int is_buff_cached(struct scst_user_cmd *ucmd)
 {
        int mem_reuse_type = ucmd->dev->memory_reuse_type;
 
@@ -451,7 +445,7 @@ static inline int is_buff_cached(struct dev_user_cmd *ucmd)
  * Returns 0 for success, <0 for fatal failure, >0 - need pages.
  * Unmaps the buffer, if needed in case of error
  */
-static int dev_user_alloc_sg(struct dev_user_cmd *ucmd, int cached_buff)
+static int dev_user_alloc_sg(struct scst_user_cmd *ucmd, int cached_buff)
 {
        int res = 0;
        struct scst_cmd *cmd = ucmd->cmd;
@@ -489,8 +483,8 @@ static int dev_user_alloc_sg(struct dev_user_cmd *ucmd, int cached_buff)
        cmd->sg = sgv_pool_alloc(dev->pool, bufflen, gfp_mask, flags,
                        &cmd->sg_cnt, &ucmd->sgv, ucmd);
        if (cmd->sg != NULL) {
-               struct dev_user_cmd *buf_ucmd =
-                       (struct dev_user_cmd*)sgv_get_priv(ucmd->sgv);
+               struct scst_user_cmd *buf_ucmd =
+                       (struct scst_user_cmd*)sgv_get_priv(ucmd->sgv);
 
                TRACE_MEM("Buf ucmd %p", buf_ucmd);
 
@@ -551,7 +545,7 @@ out:
        return res;
 }
 
-static int dev_user_alloc_space(struct dev_user_cmd *ucmd)
+static int dev_user_alloc_space(struct scst_user_cmd *ucmd)
 {
        int rc, res = SCST_CMD_STATE_DEFAULT;
        struct scst_cmd *cmd = ucmd->cmd;
@@ -596,10 +590,10 @@ out:
        return res;
 }
 
-static struct dev_user_cmd *dev_user_alloc_ucmd(struct scst_user_dev *dev,
+static struct scst_user_cmd *dev_user_alloc_ucmd(struct scst_user_dev *dev,
        int gfp_mask)
 {
-       struct dev_user_cmd *ucmd = NULL;
+       struct scst_user_cmd *ucmd = NULL;
 
        TRACE_ENTRY();
 
@@ -641,7 +635,7 @@ static int dev_user_get_block(struct scst_cmd *cmd)
 static int dev_user_parse(struct scst_cmd *cmd, struct scst_info_cdb *info_cdb)
 {
        int rc, res = SCST_CMD_STATE_DEFAULT;
-       struct dev_user_cmd *ucmd;
+       struct scst_user_cmd *ucmd;
        int atomic = scst_cmd_atomic(cmd);
        struct scst_user_dev *dev = (struct scst_user_dev*)cmd->dev->dh_priv;
        int gfp_mask = atomic ? GFP_ATOMIC : GFP_KERNEL;
@@ -662,7 +656,7 @@ static int dev_user_parse(struct scst_cmd *cmd, struct scst_info_cdb *info_cdb)
                ucmd->cmd = cmd;
                cmd->dh_priv = ucmd;
        } else {
-               ucmd = (struct dev_user_cmd*)cmd->dh_priv;
+               ucmd = (struct scst_user_cmd*)cmd->dh_priv;
                TRACE_DBG("Used ucmd %p, state %x", ucmd, ucmd->state);
        }
 
@@ -743,9 +737,9 @@ out_error:
        goto out;
 }
 
-static void dev_user_flush_dcache(struct dev_user_cmd *ucmd)
+static void dev_user_flush_dcache(struct scst_user_cmd *ucmd)
 {
-       struct dev_user_cmd *buf_ucmd = ucmd->buf_ucmd;
+       struct scst_user_cmd *buf_ucmd = ucmd->buf_ucmd;
        unsigned long start = buf_ucmd->ubuff;
        int i;
 
@@ -773,7 +767,7 @@ out:
 
 static int dev_user_exec(struct scst_cmd *cmd)
 {
-       struct dev_user_cmd *ucmd = (struct dev_user_cmd*)cmd->dh_priv;
+       struct scst_user_cmd *ucmd = (struct scst_user_cmd*)cmd->dh_priv;
 
        TRACE_ENTRY();
 
@@ -811,7 +805,7 @@ static int dev_user_exec(struct scst_cmd *cmd)
        return SCST_EXEC_COMPLETED;
 }
 
-static void dev_user_free_sgv(struct dev_user_cmd *ucmd)
+static void dev_user_free_sgv(struct scst_user_cmd *ucmd)
 {
        if (ucmd->sgv != NULL) {
                sgv_pool_free(ucmd->sgv);
@@ -826,7 +820,7 @@ static void dev_user_free_sgv(struct dev_user_cmd *ucmd)
 
 static void dev_user_on_free_cmd(struct scst_cmd *cmd)
 {
-       struct dev_user_cmd *ucmd = (struct dev_user_cmd*)cmd->dh_priv;
+       struct scst_user_cmd *ucmd = (struct scst_user_cmd*)cmd->dh_priv;
 
        TRACE_ENTRY();
 
@@ -905,7 +899,7 @@ static int dev_user_tape_done(struct scst_cmd *cmd)
        return res;
 }
 
-static void dev_user_add_to_ready(struct dev_user_cmd *ucmd)
+static void dev_user_add_to_ready(struct scst_user_cmd *ucmd)
 {
        struct scst_user_dev *dev = ucmd->dev;
        unsigned long flags;
@@ -934,7 +928,7 @@ static void dev_user_add_to_ready(struct dev_user_cmd *ucmd)
                            !(dev->attach_cmd_active || dev->tm_cmd_active ||
                              dev->internal_reset_active ||
                              (dev->detach_cmd_count != 0))) {
-                               struct dev_user_pre_unreg_sess_obj *p, *found = NULL;
+                               struct scst_user_pre_unreg_sess_obj *p, *found = NULL;
                                list_for_each_entry(p, &dev->pre_unreg_sess_list,
                                        pre_unreg_sess_list_entry) {
                                        if (p->tgt_dev == ucmd->cmd->tgt_dev) {
@@ -998,7 +992,7 @@ out:
        return;
 }
 
-static int dev_user_map_buf(struct dev_user_cmd *ucmd, unsigned long ubuff,
+static int dev_user_map_buf(struct scst_user_cmd *ucmd, unsigned long ubuff,
        int num_pg)
 {
        int res = 0, rc;
@@ -1066,7 +1060,7 @@ out_unmap:
        goto out_err;
 }
 
-static int dev_user_process_reply_alloc(struct dev_user_cmd *ucmd,
+static int dev_user_process_reply_alloc(struct scst_user_cmd *ucmd,
        struct scst_user_reply_cmd *reply)
 {
        int res = 0;
@@ -1105,7 +1099,7 @@ out_hwerr:
        goto out_process;
 }
 
-static int dev_user_process_reply_parse(struct dev_user_cmd *ucmd,
+static int dev_user_process_reply_parse(struct scst_user_cmd *ucmd,
        struct scst_user_reply_cmd *reply)
 {
        int res = 0;
@@ -1153,7 +1147,7 @@ out_inval:
        goto out_process;
 }
 
-static int dev_user_process_reply_on_free(struct dev_user_cmd *ucmd)
+static int dev_user_process_reply_on_free(struct scst_user_cmd *ucmd)
 {
        int res = 0;
 
@@ -1168,7 +1162,7 @@ static int dev_user_process_reply_on_free(struct dev_user_cmd *ucmd)
        return res;
 }
 
-static int dev_user_process_reply_on_cache_free(struct dev_user_cmd *ucmd)
+static int dev_user_process_reply_on_cache_free(struct scst_user_cmd *ucmd)
 {
        int res = 0;
 
@@ -1182,7 +1176,7 @@ static int dev_user_process_reply_on_cache_free(struct dev_user_cmd *ucmd)
        return res;
 }
 
-static int dev_user_process_reply_exec(struct dev_user_cmd *ucmd,
+static int dev_user_process_reply_exec(struct scst_user_cmd *ucmd,
        struct scst_user_reply_cmd *reply)
 {
        int res = 0;
@@ -1296,7 +1290,7 @@ static int dev_user_process_reply(struct scst_user_dev *dev,
        struct scst_user_reply_cmd *reply)
 {
        int res = 0;
-       struct dev_user_cmd *ucmd;
+       struct scst_user_cmd *ucmd;
        int state;
 
        TRACE_ENTRY();
@@ -1460,9 +1454,9 @@ static int dev_user_process_scst_commands(struct scst_user_dev *dev)
 }
 
 /* Called under cmd_lists.cmd_list_lock and IRQ off */
-struct dev_user_cmd *__dev_user_get_next_cmd(struct list_head *cmd_list)
+struct scst_user_cmd *__dev_user_get_next_cmd(struct list_head *cmd_list)
 {
-       struct dev_user_cmd *u;
+       struct scst_user_cmd *u;
 
 again:
        u = NULL;
@@ -1522,7 +1516,7 @@ static inline int test_cmd_lists(struct scst_user_dev *dev)
 
 /* Called under cmd_lists.cmd_list_lock and IRQ off */
 static int dev_user_get_next_cmd(struct scst_user_dev *dev,
-       struct dev_user_cmd **ucmd)
+       struct scst_user_cmd **ucmd)
 {
        int res = 0;
        wait_queue_t wait;
@@ -1588,7 +1582,7 @@ static inline int test_prio_cmd_list(struct scst_user_dev *dev)
 
 /* Called under cmd_lists.cmd_list_lock and IRQ off */
 static int dev_user_get_next_prio_cmd(struct scst_user_dev *dev,
-       struct dev_user_cmd **ucmd)
+       struct scst_user_cmd **ucmd)
 {
        int res = 0;
        wait_queue_t wait;
@@ -1641,7 +1635,7 @@ static int dev_user_reply_get_cmd(struct file *file, unsigned long arg,
        struct scst_user_dev *dev;
        struct scst_user_get_cmd *cmd;
        struct scst_user_reply_cmd *reply;
-       struct dev_user_cmd *ucmd;
+       struct scst_user_cmd *ucmd;
        uint64_t ureply;
 
        TRACE_ENTRY();
@@ -1829,7 +1823,7 @@ out:
 /*
  * Called under cmd_lists.cmd_list_lock, but can drop it inside, then reaquire.
  */
-static void dev_user_unjam_cmd(struct dev_user_cmd *ucmd, int busy,
+static void dev_user_unjam_cmd(struct scst_user_cmd *ucmd, int busy,
        unsigned long *flags)
 {
        int state = ucmd->state & ~UCMD_STATE_MASK;
@@ -1939,7 +1933,7 @@ out:
        return;
 }
 
-static int __unjam_check_tgt_dev(struct dev_user_cmd *ucmd, int state,
+static int __unjam_check_tgt_dev(struct scst_user_cmd *ucmd, int state,
        struct scst_tgt_dev *tgt_dev)
 {
        int res = 0;
@@ -1964,7 +1958,7 @@ out:
        return res;
 }
 
-static int __unjam_check_tm(struct dev_user_cmd *ucmd, int state)
+static int __unjam_check_tm(struct scst_user_cmd *ucmd, int state)
 {
        int res = 0;
 
@@ -1991,7 +1985,7 @@ static void dev_user_unjam_dev(struct scst_user_dev *dev, int tm,
 {
        int i;
        unsigned long flags;
-       struct dev_user_cmd *ucmd;
+       struct scst_user_cmd *ucmd;
 
        TRACE_ENTRY();
 
@@ -2065,7 +2059,7 @@ repeat:
  ** We also don't queue >1 ATTACH_SESS commands and after timeout fail it.
  **/
 
-static int dev_user_process_reply_tm_exec(struct dev_user_cmd *ucmd,
+static int dev_user_process_reply_tm_exec(struct scst_user_cmd *ucmd,
        int status)
 {
        int res = 0;
@@ -2103,9 +2097,9 @@ static int dev_user_task_mgmt_fn(struct scst_mgmt_cmd *mcmd,
        struct scst_tgt_dev *tgt_dev)
 {
        int res, rc;
-       struct dev_user_cmd *ucmd;
+       struct scst_user_cmd *ucmd;
        struct scst_user_dev *dev = (struct scst_user_dev*)tgt_dev->dev->dh_priv;
-       struct dev_user_cmd *ucmd_to_abort = NULL;
+       struct scst_user_cmd *ucmd_to_abort = NULL;
 
        TRACE_ENTRY();
 
@@ -2121,7 +2115,7 @@ static int dev_user_task_mgmt_fn(struct scst_mgmt_cmd *mcmd,
        ucmd->user_cmd.tm_cmd.fn = mcmd->fn;
 
        if (mcmd->cmd_to_abort != NULL) {
-               ucmd_to_abort = (struct dev_user_cmd*)mcmd->cmd_to_abort->dh_priv;
+               ucmd_to_abort = (struct scst_user_cmd*)mcmd->cmd_to_abort->dh_priv;
                if (ucmd_to_abort != NULL)
                        ucmd->user_cmd.tm_cmd.cmd_h_to_abort = ucmd_to_abort->h;
        }
@@ -2237,7 +2231,7 @@ static void dev_user_detach(struct scst_device *sdev)
        return;
 }
 
-static int dev_user_process_reply_sess(struct dev_user_cmd *ucmd, int status)
+static int dev_user_process_reply_sess(struct scst_user_cmd *ucmd, int status)
 {
        int res = 0;
        unsigned long flags;
@@ -2276,7 +2270,7 @@ static int dev_user_attach_tgt(struct scst_tgt_dev *tgt_dev)
        struct scst_user_dev *dev =
                (struct scst_user_dev*)tgt_dev->dev->dh_priv;
        int res = 0, rc;
-       struct dev_user_cmd *ucmd;
+       struct scst_user_cmd *ucmd;
 
        TRACE_ENTRY();
 
@@ -2360,10 +2354,10 @@ static void dev_user_pre_unreg_sess_work_fn(struct work_struct *work)
 #endif
 {
 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
-       struct dev_user_pre_unreg_sess_obj *pd = (struct dev_user_pre_unreg_sess_obj*)p;
+       struct scst_user_pre_unreg_sess_obj *pd = (struct scst_user_pre_unreg_sess_obj*)p;
 #else
-       struct dev_user_pre_unreg_sess_obj *pd = container_of(
-               (struct delayed_work*)work, struct dev_user_pre_unreg_sess_obj,
+       struct scst_user_pre_unreg_sess_obj *pd = container_of(
+               (struct delayed_work*)work, struct scst_user_pre_unreg_sess_obj,
                pre_unreg_sess_work);
 #endif
        struct scst_user_dev *dev =
@@ -2393,7 +2387,7 @@ static void dev_user_pre_unreg_sess(struct scst_tgt_dev *tgt_dev)
 {
        struct scst_user_dev *dev =
                (struct scst_user_dev*)tgt_dev->dev->dh_priv;
-       struct dev_user_pre_unreg_sess_obj *pd;
+       struct scst_user_pre_unreg_sess_obj *pd;
 
        TRACE_ENTRY();
 
@@ -2425,8 +2419,8 @@ static void dev_user_detach_tgt(struct scst_tgt_dev *tgt_dev)
 {
        struct scst_user_dev *dev =
                (struct scst_user_dev*)tgt_dev->dev->dh_priv;
-       struct dev_user_cmd *ucmd;
-       struct dev_user_pre_unreg_sess_obj *pd = NULL, *p;
+       struct scst_user_cmd *ucmd;
+       struct scst_user_pre_unreg_sess_obj *pd = NULL, *p;
 
        TRACE_ENTRY();
 
@@ -2722,7 +2716,7 @@ static int __dev_user_set_opt(struct scst_user_dev *dev,
 
        if ((dev->prio_queue_type != opt->prio_queue_type) &&
            (opt->prio_queue_type == SCST_USER_PRIO_QUEUE_SINGLE)) {
-               struct dev_user_cmd *u, *t;
+               struct scst_user_cmd *u, *t;
                /* No need for lock, the activity is suspended */
                list_for_each_entry_safe(u, t, &dev->prio_ready_cmd_list,
                                ready_cmd_list_entry) {
@@ -2887,7 +2881,7 @@ out:
 
 static void dev_user_process_cleanup(struct scst_user_dev *dev)
 {
-       struct dev_user_cmd *ucmd;
+       struct scst_user_cmd *ucmd;
        int rc;
 
        TRACE_ENTRY();
@@ -2917,7 +2911,7 @@ static void dev_user_process_cleanup(struct scst_user_dev *dev)
        int i;
        for(i = 0; i < (int)ARRAY_SIZE(dev->ucmd_hash); i++) {
                struct list_head *head = &dev->ucmd_hash[i];
-               struct dev_user_cmd *ucmd, *t;
+               struct scst_user_cmd *ucmd, *t;
                list_for_each_entry_safe(ucmd, t, head, hash_list_entry) {
                        PRINT_ERROR_PR("Lost ucmd %p (state %x, ref %d)", ucmd,
                                ucmd->state, atomic_read(&ucmd->ucmd_ref));
@@ -3003,9 +2997,7 @@ static int __init init_scst_user(void)
        goto out;
 #endif
 
-       user_cmd_cachep = kmem_cache_create("scst_user_cmd",
-               sizeof(struct dev_user_cmd), 0, DEV_USER_SLAB_FLAGS, NULL,
-               NULL);
+       user_cmd_cachep = KMEM_CACHE(scst_user_cmd, SCST_SLAB_FLAGS);
        if (user_cmd_cachep == NULL) {
                res = -ENOMEM;
                goto out;
index 07042d1..ed30141 100644 (file)
@@ -55,12 +55,6 @@ static struct scst_proc_log vdisk_proc_local_trace_tbl[] =
 
 #include "scst_dev_handler.h"
 
-#if defined(DEBUG) && defined(CONFIG_DEBUG_SLAB)
-#define VDISK_SLAB_FLAGS ( SLAB_RED_ZONE | SLAB_POISON )
-#else
-#define VDISK_SLAB_FLAGS 0L
-#endif
-
 /* 8 byte ASCII Vendor */
 #define SCST_FIO_VENDOR                        "SCST_FIO"
 #define SCST_BIO_VENDOR                        "SCST_BIO"
@@ -3170,9 +3164,7 @@ static int __init init_scst_vdisk_driver(void)
 {
        int res, num_threads;
 
-       vdisk_thr_cachep = kmem_cache_create("vdisk_thr_data",
-               sizeof(struct scst_vdisk_thr), 0, VDISK_SLAB_FLAGS, NULL,
-               NULL);
+       vdisk_thr_cachep = KMEM_CACHE(scst_vdisk_thr, SCST_SLAB_FLAGS);
        if (vdisk_thr_cachep == NULL) {
                res = -ENOMEM;
                goto out;
index 15cce60..45e60e3 100644 (file)
@@ -667,13 +667,24 @@ int scst_acg_add_dev(struct scst_acg *acg, struct scst_device *dev, lun_t lun,
        }
 
 out:
+       if (res == 0) {
+               if (dev->virt_name != NULL) {
+                       PRINT_INFO_PR("Added device %s to group %s",
+                               dev->virt_name, acg->acg_name);
+               } else {
+                       PRINT_INFO_PR("Added device %d:%d:%d:%d to group %s",
+                               dev->scsi_dev->host->host_no,
+                               dev->scsi_dev->channel, dev->scsi_dev->id,
+                               dev->scsi_dev->lun, acg->acg_name);
+               }
+       }
+
        TRACE_EXIT_RES(res);
        return res;
 
 out_free:
        list_for_each_entry(tgt_dev, &tmp_tgt_dev_list,
-                        extra_tgt_dev_list_entry) 
-       {
+                        extra_tgt_dev_list_entry) {
                scst_free_tgt_dev(tgt_dev);
        }
        scst_free_acg_dev(acg_dev);
@@ -710,7 +721,19 @@ int scst_acg_remove_dev(struct scst_acg *acg, struct scst_device *dev)
        }
        scst_free_acg_dev(acg_dev);
 
-out:   
+out:
+       if (res == 0) {
+               if (dev->virt_name != NULL) {
+                       PRINT_INFO_PR("Removed device %s from group %s",
+                               dev->virt_name, acg->acg_name);
+               } else {
+                       PRINT_INFO_PR("Removed device %d:%d:%d:%d from group %s",
+                               dev->scsi_dev->host->host_no,
+                               dev->scsi_dev->channel, dev->scsi_dev->id,
+                               dev->scsi_dev->lun, acg->acg_name);
+               }
+       }
+
        TRACE_EXIT_RES(res);
        return res;
 }
@@ -728,8 +751,8 @@ int scst_acg_add_name(struct scst_acg *acg, const char *name)
        list_for_each_entry(n, &acg->acn_list, acn_list_entry) 
        {
                if (strcmp(n->name, name) == 0) {
-                       PRINT_ERROR_PR("Name %s already exists in access "
-                               "control group %s", name, acg->acg_name);
+                       PRINT_ERROR_PR("Name %s already exists in group %s",
+                               name, acg->acg_name);
                        res = -EINVAL;
                        goto out;
                }
@@ -756,6 +779,10 @@ int scst_acg_add_name(struct scst_acg *acg, const char *name)
        list_add_tail(&n->acn_list_entry, &acg->acn_list);
 
 out:
+       if (res == 0) {
+               PRINT_INFO_PR("Added name %s to group %s", name, acg->acg_name);
+       }
+
        TRACE_EXIT_RES(res);
        return res;
 
@@ -783,9 +810,12 @@ int scst_acg_remove_name(struct scst_acg *acg, const char *name)
                }
        }
        
-       if (res != 0) {
-               PRINT_ERROR_PR("Unable to find name %s in access control "
-                       "group %s", name, acg->acg_name);
+       if (res == 0) {
+               PRINT_INFO_PR("Removed name %s from group %s", name,
+                       acg->acg_name);
+       } else {
+               PRINT_ERROR_PR("Unable to find name %s in group %s", name,
+                       acg->acg_name);
        }
 
        TRACE_EXIT_RES(res);
index d329505..ac7ce2f 100644 (file)
@@ -1418,26 +1418,19 @@ static int __init init_scst(void)
 
        scst_threads_info_init();
 
-#define INIT_CACHEP(p, s, t, o) do {                                   \
-               p = kmem_cache_create(s, sizeof(struct t), 0,           \
-                                     SCST_SLAB_FLAGS, NULL, NULL);     \
-               TRACE_MEM("Slab create: %s at %p size %zd", s, p,       \
-                         sizeof(struct t));                            \
+#define INIT_CACHEP(p, s, o) do {                                      \
+               p = KMEM_CACHE(s, SCST_SLAB_FLAGS);                     \
+               TRACE_MEM("Slab create: %s at %p size %zd", #s, p,      \
+                         sizeof(struct s));                            \
                if (p == NULL) { res = -ENOMEM; goto o; }               \
        } while (0)
          
-       INIT_CACHEP(scst_mgmt_cachep, SCST_MGMT_CMD_CACHE_STRING, 
-                   scst_mgmt_cmd, out);
-       INIT_CACHEP(scst_ua_cachep, SCST_UA_CACHE_STRING, 
-                   scst_tgt_dev_UA, out_destroy_mgmt_cache);
-       INIT_CACHEP(scst_cmd_cachep,  SCST_CMD_CACHE_STRING, 
-                   scst_cmd, out_destroy_ua_cache);
-       INIT_CACHEP(scst_sess_cachep, SCST_SESSION_CACHE_STRING,
-                   scst_session, out_destroy_cmd_cache);
-       INIT_CACHEP(scst_tgtd_cachep, SCST_TGT_DEV_CACHE_STRING,
-                   scst_tgt_dev, out_destroy_sess_cache);
-       INIT_CACHEP(scst_acgd_cachep, SCST_ACG_DEV_CACHE_STRING,
-                   scst_acg_dev, out_destroy_tgt_cache);
+       INIT_CACHEP(scst_mgmt_cachep, scst_mgmt_cmd, out);
+       INIT_CACHEP(scst_ua_cachep, scst_tgt_dev_UA, out_destroy_mgmt_cache);
+       INIT_CACHEP(scst_cmd_cachep, scst_cmd, out_destroy_ua_cache);
+       INIT_CACHEP(scst_sess_cachep, scst_session, out_destroy_cmd_cache);
+       INIT_CACHEP(scst_tgtd_cachep, scst_tgt_dev, out_destroy_sess_cache);
+       INIT_CACHEP(scst_acgd_cachep, scst_acg_dev, out_destroy_tgt_cache);
 
        scst_mgmt_mempool = mempool_create(10, mempool_alloc_slab,
                mempool_free_slab, scst_mgmt_cachep);
@@ -1564,30 +1557,20 @@ static void __exit exit_scst(void)
 
        scst_sgv_pools_deinit();
 
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
-#define DEINIT_CACHEP(p, s) do {                       \
-               if (kmem_cache_destroy(p)) {            \
-                       PRINT_INFO_PR("kmem_cache_destroy of %s returned an "\
-                               "error", s);            \
-               }                                       \
-               p = NULL;                               \
+#define DEINIT_CACHEP(p) do {          \
+               kmem_cache_destroy(p);  \
+               p = NULL;               \
        } while (0)
-#else
-#define DEINIT_CACHEP(p, s) do {                       \
-               kmem_cache_destroy(p);                  \
-                p = NULL;                              \
-       } while (0)
-#endif
 
        mempool_destroy(scst_mgmt_mempool);
        mempool_destroy(scst_ua_mempool);
 
-       DEINIT_CACHEP(scst_mgmt_cachep, SCST_MGMT_CMD_CACHE_STRING);
-       DEINIT_CACHEP(scst_ua_cachep, SCST_UA_CACHE_STRING);
-       DEINIT_CACHEP(scst_cmd_cachep, SCST_CMD_CACHE_STRING);
-       DEINIT_CACHEP(scst_sess_cachep, SCST_SESSION_CACHE_STRING);
-       DEINIT_CACHEP(scst_tgtd_cachep, SCST_TGT_DEV_CACHE_STRING);
-       DEINIT_CACHEP(scst_acgd_cachep, SCST_ACG_DEV_CACHE_STRING);
+       DEINIT_CACHEP(scst_mgmt_cachep);
+       DEINIT_CACHEP(scst_ua_cachep);
+       DEINIT_CACHEP(scst_cmd_cachep);
+       DEINIT_CACHEP(scst_sess_cachep);
+       DEINIT_CACHEP(scst_tgtd_cachep);
+       DEINIT_CACHEP(scst_acgd_cachep);
 
        PRINT_INFO_PR("%s", "SCST unloaded");
 
index 4dc7311..3256ae6 100644 (file)
@@ -840,7 +840,12 @@ int sgv_pool_init(struct sgv_pool *pool, const char *name, int clustered)
                scnprintf(pool->cache_names[i], sizeof(pool->cache_names[i]),
                        "%s-%luK", name, (PAGE_SIZE >> 10) << i);
                pool->caches[i] = kmem_cache_create(pool->cache_names[i], 
-                       size, 0, SCST_SLAB_FLAGS, NULL, NULL);
+                       size, 0, SCST_SLAB_FLAGS, NULL
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23))
+                       , NULL);
+#else
+                       );
+#endif
                if (pool->caches[i] == NULL) {
                        TRACE(TRACE_OUT_OF_MEM, "Allocation of sgv_pool cache "
                                "%s(%d) failed", name, i);
@@ -1086,8 +1091,14 @@ int scst_sgv_pools_init(unsigned long mem_hwmark, unsigned long mem_lwmark)
        INIT_WORK(&pools->mgr.apit_pool, sgv_pool_cached_pitbool, NULL);
 #endif
 
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23))
        pools->mgr.sgv_shrinker = set_shrinker(DEFAULT_SEEKS,
                sgv_pool_cached_shrinker);
+#else
+       pools->mgr.sgv_shrinker.shrink = sgv_pool_cached_shrinker;
+       pools->mgr.sgv_shrinker.seeks = DEFAULT_SEEKS;
+       register_shrinker(&pools->mgr.sgv_shrinker);
+#endif
 
 out:
        TRACE_EXIT_RES(res);
@@ -1112,7 +1123,12 @@ void scst_sgv_pools_deinit(void)
 
        TRACE_ENTRY();
 
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23))
        remove_shrinker(pools->mgr.sgv_shrinker);
+#else
+       unregister_shrinker(&pools->mgr.sgv_shrinker);
+#endif
+
        cancel_delayed_work(&pools->mgr.apit_pool);
 
 #ifdef SCST_HIGHMEM
index e50cb50..e1da646 100644 (file)
 
 #define SGV_POOL_ELEMENTS      11
 
-#if defined(DEBUG) && defined(CONFIG_DEBUG_SLAB)
-#define SCST_SLAB_FLAGS ( SLAB_RED_ZONE | SLAB_POISON )
-#else
-#define SCST_SLAB_FLAGS 0L
-#endif
-
 /* 
  * sg_num is indexed by the page number, pg_count is indexed by the sg number.
  * Made in one entry to simplify the code (eg all sizeof(*) parts) and save
@@ -118,8 +112,12 @@ struct scst_sgv_pools_manager
                        u32 releases_on_hiwmk;
                        u32 releases_failed;
                } thr; /* protected by pool_mgr_lock */
-               
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23))              
                struct shrinker *sgv_shrinker;
+#else
+               struct shrinker sgv_shrinker;
+#endif
                
 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20))
                struct delayed_work apit_pool;
index 6aa66f9..fe90806 100644 (file)
@@ -2004,8 +2004,12 @@ struct proc_dir_entry *scst_create_proc_entry(struct proc_dir_entry * root,
 
 int scst_single_seq_open(struct inode *inode, struct file *file)
 {
-       return single_open(file, 
-               container_of(inode->i_fop, struct scst_proc_data, seq_op)->show, 
-               PDE(inode)->data);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
+       struct scst_proc_data *pdata = container_of(PDE(inode)->proc_fops,
+               struct scst_proc_data, seq_op);
+#else
+       struct scst_proc_data *pdata = container_of(inode->i_fop,
+               struct scst_proc_data, seq_op);
+#endif
+       return single_open(file, pdata->show, pdata->data);
 }
-