Update for 2.6.31
authorvlnb <vlnb@d57e44dd-8a1f-0410-8b47-8ef2f437770f>
Fri, 25 Sep 2009 18:31:33 +0000 (18:31 +0000)
committervlnb <vlnb@d57e44dd-8a1f-0410-8b47-8ef2f437770f>
Fri, 25 Sep 2009 18:31:33 +0000 (18:31 +0000)
git-svn-id: https://scst.svn.sourceforge.net/svnroot/scst/trunk@1145 d57e44dd-8a1f-0410-8b47-8ef2f437770f

iscsi-scst/kernel/patches/put_page_callback-2.6.31.patch [new file with mode: 0644]
scst/include/scst_debug.h
scst/kernel/io_context-2.6.31.patch [new file with mode: 0644]
scst/kernel/readahead-2.6.31.patch [new file with mode: 0644]
scst/kernel/scst_exec_req_fifo-2.6.31.patch [new file with mode: 0644]
scst/src/scst_debug.c
scst/src/scst_lib.c

diff --git a/iscsi-scst/kernel/patches/put_page_callback-2.6.31.patch b/iscsi-scst/kernel/patches/put_page_callback-2.6.31.patch
new file mode 100644 (file)
index 0000000..f20dbd4
--- /dev/null
@@ -0,0 +1,390 @@
+diff -upkr linux-diff -upkr linux-2.6.31/include/linux/mm_types.h linux-2.6.31/include/linux/mm_types.h
+--- linux-2.6.31/include/linux/mm_types.h      2009-09-10 02:13:59.000000000 +0400
++++ linux-2.6.31/include/linux/mm_types.h      2009-09-23 14:17:05.000000000 +0400
+@@ -106,6 +106,18 @@ struct page {
+        */
+       void *shadow;
+ #endif
++
++#if defined(CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION)
++      /*
++       * Used to implement support for notification on zero-copy TCP transfer
++       * completion. It might look as not good to have this field here and
++       * it's better to have it in struct sk_buff, but it would make the code
++       * much more complicated and fragile, since all skb then would have to
++       * contain only pages with the same value in this field.
++       */
++       void *net_priv;
++#endif
++
+ };
+ /*
+diff -upkr linux-2.6.31/include/linux/net.h linux-2.6.31/include/linux/net.h
+--- linux-2.6.31/include/linux/net.h   2009-09-10 02:13:59.000000000 +0400
++++ linux-2.6.31/include/linux/net.h   2009-09-23 14:17:05.000000000 +0400
+@@ -57,6 +57,7 @@ typedef enum {
+ #include <linux/random.h>
+ #include <linux/wait.h>
+ #include <linux/fcntl.h>      /* For O_CLOEXEC and O_NONBLOCK */
++#include <linux/mm.h>
+ struct poll_table_struct;
+ struct pipe_inode_info;
+@@ -356,5 +357,44 @@ static const struct proto_ops name##_ops
+ extern struct ratelimit_state net_ratelimit_state;
+ #endif
++#if defined(CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION)
++/* Support for notification on zero-copy TCP transfer completion */
++typedef void (*net_get_page_callback_t)(struct page *page);
++typedef void (*net_put_page_callback_t)(struct page *page);
++
++extern net_get_page_callback_t net_get_page_callback;
++extern net_put_page_callback_t net_put_page_callback;
++
++extern int net_set_get_put_page_callbacks(
++      net_get_page_callback_t get_callback,
++      net_put_page_callback_t put_callback);
++
++/*
++ * See comment for net_set_get_put_page_callbacks() why those functions
++ * don't need any protection.
++ */
++static inline void net_get_page(struct page *page)
++{
++      if (page->net_priv != 0)
++              net_get_page_callback(page);
++      get_page(page);
++}
++static inline void net_put_page(struct page *page)
++{
++      if (page->net_priv != 0)
++              net_put_page_callback(page);
++      put_page(page);
++}
++#else
++static inline void net_get_page(struct page *page)
++{
++      get_page(page);
++}
++static inline void net_put_page(struct page *page)
++{
++      put_page(page);
++}
++#endif /* CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION */
++
+ #endif /* __KERNEL__ */
+ #endif        /* _LINUX_NET_H */
+diff -upkr linux-2.6.31/net/core/dev.c linux-2.6.31/net/core/dev.c
+--- linux-2.6.31/net/core/dev.c        2009-09-10 02:13:59.000000000 +0400
++++ linux-2.6.31/net/core/dev.c        2009-09-23 14:18:41.000000000 +0400
+@@ -2474,7 +2474,7 @@ pull:
+               skb_shinfo(skb)->frags[0].size -= grow;
+               if (unlikely(!skb_shinfo(skb)->frags[0].size)) {
+-                      put_page(skb_shinfo(skb)->frags[0].page);
++                      net_put_page(skb_shinfo(skb)->frags[0].page);
+                       memmove(skb_shinfo(skb)->frags,
+                               skb_shinfo(skb)->frags + 1,
+                               --skb_shinfo(skb)->nr_frags);
+diff -upkr linux-2.6.31/net/core/skbuff.c linux-2.6.31/net/core/skbuff.c
+--- linux-2.6.31/net/core/skbuff.c     2009-09-10 02:13:59.000000000 +0400
++++ linux-2.6.31/net/core/skbuff.c     2009-09-23 14:17:05.000000000 +0400
+@@ -76,13 +76,13 @@ static struct kmem_cache *skbuff_fclone_
+ static void sock_pipe_buf_release(struct pipe_inode_info *pipe,
+                                 struct pipe_buffer *buf)
+ {
+-      put_page(buf->page);
++      net_put_page(buf->page);
+ }
+ static void sock_pipe_buf_get(struct pipe_inode_info *pipe,
+                               struct pipe_buffer *buf)
+ {
+-      get_page(buf->page);
++      net_get_page(buf->page);
+ }
+ static int sock_pipe_buf_steal(struct pipe_inode_info *pipe,
+@@ -344,7 +344,7 @@ static void skb_release_data(struct sk_b
+               if (skb_shinfo(skb)->nr_frags) {
+                       int i;
+                       for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
+-                              put_page(skb_shinfo(skb)->frags[i].page);
++                              net_put_page(skb_shinfo(skb)->frags[i].page);
+               }
+               if (skb_has_frags(skb))
+@@ -765,7 +765,7 @@ struct sk_buff *pskb_copy(struct sk_buff
+               for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+                       skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i];
+-                      get_page(skb_shinfo(n)->frags[i].page);
++                      net_get_page(skb_shinfo(n)->frags[i].page);
+               }
+               skb_shinfo(n)->nr_frags = i;
+       }
+@@ -831,7 +831,7 @@ int pskb_expand_head(struct sk_buff *skb
+              sizeof(struct skb_shared_info));
+       for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
+-              get_page(skb_shinfo(skb)->frags[i].page);
++              net_get_page(skb_shinfo(skb)->frags[i].page);
+       if (skb_has_frags(skb))
+               skb_clone_fraglist(skb);
+@@ -1105,7 +1105,7 @@ drop_pages:
+               skb_shinfo(skb)->nr_frags = i;
+               for (; i < nfrags; i++)
+-                      put_page(skb_shinfo(skb)->frags[i].page);
++                      net_put_page(skb_shinfo(skb)->frags[i].page);
+               if (skb_has_frags(skb))
+                       skb_drop_fraglist(skb);
+@@ -1274,7 +1274,7 @@ pull_pages:
+       k = 0;
+       for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+               if (skb_shinfo(skb)->frags[i].size <= eat) {
+-                      put_page(skb_shinfo(skb)->frags[i].page);
++                      net_put_page(skb_shinfo(skb)->frags[i].page);
+                       eat -= skb_shinfo(skb)->frags[i].size;
+               } else {
+                       skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i];
+@@ -1375,7 +1375,7 @@ EXPORT_SYMBOL(skb_copy_bits);
+  */
+ static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i)
+ {
+-      put_page(spd->pages[i]);
++      net_put_page(spd->pages[i]);
+ }
+ static inline struct page *linear_to_page(struct page *page, unsigned int *len,
+@@ -1399,7 +1399,7 @@ new_page:
+               off = sk->sk_sndmsg_off;
+               mlen = PAGE_SIZE - off;
+               if (mlen < 64 && mlen < *len) {
+-                      put_page(p);
++                      net_put_page(p);
+                       goto new_page;
+               }
+@@ -1409,7 +1409,7 @@ new_page:
+       memcpy(page_address(p) + off, page_address(page) + *offset, *len);
+       sk->sk_sndmsg_off += *len;
+       *offset = off;
+-      get_page(p);
++      net_get_page(p);
+       return p;
+ }
+@@ -1430,7 +1430,7 @@ static inline int spd_fill_page(struct s
+               if (!page)
+                       return 1;
+       } else
+-              get_page(page);
++              net_get_page(page);
+       spd->pages[spd->nr_pages] = page;
+       spd->partial[spd->nr_pages].len = *len;
+@@ -2060,7 +2060,7 @@ static inline void skb_split_no_header(s
+                                *    where splitting is expensive.
+                                * 2. Split is accurately. We make this.
+                                */
+-                              get_page(skb_shinfo(skb)->frags[i].page);
++                              net_get_page(skb_shinfo(skb)->frags[i].page);
+                               skb_shinfo(skb1)->frags[0].page_offset += len - pos;
+                               skb_shinfo(skb1)->frags[0].size -= len - pos;
+                               skb_shinfo(skb)->frags[i].size  = len - pos;
+@@ -2182,7 +2182,7 @@ int skb_shift(struct sk_buff *tgt, struc
+                       to++;
+               } else {
+-                      get_page(fragfrom->page);
++                      net_get_page(fragfrom->page);
+                       fragto->page = fragfrom->page;
+                       fragto->page_offset = fragfrom->page_offset;
+                       fragto->size = todo;
+@@ -2204,7 +2204,7 @@ int skb_shift(struct sk_buff *tgt, struc
+               fragto = &skb_shinfo(tgt)->frags[merge];
+               fragto->size += fragfrom->size;
+-              put_page(fragfrom->page);
++              net_put_page(fragfrom->page);
+       }
+       /* Reposition in the original skb */
+@@ -2602,7 +2602,7 @@ struct sk_buff *skb_segment(struct sk_bu
+               while (pos < offset + len && i < nfrags) {
+                       *frag = skb_shinfo(skb)->frags[i];
+-                      get_page(frag->page);
++                      net_get_page(frag->page);
+                       size = frag->size;
+                       if (pos < offset) {
+diff -upkr linux-2.6.31/net/ipv4/ip_output.c linux-2.6.31/net/ipv4/ip_output.c
+--- linux-2.6.31/net/ipv4/ip_output.c  2009-09-10 02:13:59.000000000 +0400
++++ linux-2.6.31/net/ipv4/ip_output.c  2009-09-23 14:17:05.000000000 +0400
+@@ -1019,7 +1019,7 @@ alloc_new_skb:
+                                               err = -EMSGSIZE;
+                                               goto error;
+                                       }
+-                                      get_page(page);
++                                      net_get_page(page);
+                                       skb_fill_page_desc(skb, i, page, sk->sk_sndmsg_off, 0);
+                                       frag = &skb_shinfo(skb)->frags[i];
+                               }
+@@ -1177,7 +1177,7 @@ ssize_t  ip_append_page(struct sock *sk, 
+               if (skb_can_coalesce(skb, i, page, offset)) {
+                       skb_shinfo(skb)->frags[i-1].size += len;
+               } else if (i < MAX_SKB_FRAGS) {
+-                      get_page(page);
++                      net_get_page(page);
+                       skb_fill_page_desc(skb, i, page, offset, len);
+               } else {
+                       err = -EMSGSIZE;
+diff -upkr linux-2.6.31/net/ipv4/Makefile linux-2.6.31/net/ipv4/Makefile
+--- linux-2.6.31/net/ipv4/Makefile     2009-09-10 02:13:59.000000000 +0400
++++ linux-2.6.31/net/ipv4/Makefile     2009-09-23 14:17:05.000000000 +0400
+@@ -49,6 +49,7 @@ obj-$(CONFIG_TCP_CONG_LP) += tcp_lp.o
+ obj-$(CONFIG_TCP_CONG_YEAH) += tcp_yeah.o
+ obj-$(CONFIG_TCP_CONG_ILLINOIS) += tcp_illinois.o
+ obj-$(CONFIG_NETLABEL) += cipso_ipv4.o
++obj-$(CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION) += tcp_zero_copy.o
+ obj-$(CONFIG_XFRM) += xfrm4_policy.o xfrm4_state.o xfrm4_input.o \
+                     xfrm4_output.o
+diff -upkr linux-2.6.31/net/ipv4/tcp.c linux-2.6.31/net/ipv4/tcp.c
+--- linux-2.6.31/net/ipv4/tcp.c        2009-09-10 02:13:59.000000000 +0400
++++ linux-2.6.31/net/ipv4/tcp.c        2009-09-23 14:17:05.000000000 +0400
+@@ -762,7 +762,7 @@ new_segment:
+               if (can_coalesce) {
+                       skb_shinfo(skb)->frags[i - 1].size += copy;
+               } else {
+-                      get_page(page);
++                      net_get_page(page);
+                       skb_fill_page_desc(skb, i, page, offset, copy);
+               }
+@@ -970,7 +970,7 @@ new_segment:
+                                       goto new_segment;
+                               } else if (page) {
+                                       if (off == PAGE_SIZE) {
+-                                              put_page(page);
++                                              net_put_page(page);
+                                               TCP_PAGE(sk) = page = NULL;
+                                               off = 0;
+                                       }
+@@ -1011,9 +1011,9 @@ new_segment:
+                               } else {
+                                       skb_fill_page_desc(skb, i, page, off, copy);
+                                       if (TCP_PAGE(sk)) {
+-                                              get_page(page);
++                                              net_get_page(page);
+                                       } else if (off + copy < PAGE_SIZE) {
+-                                              get_page(page);
++                                              net_get_page(page);
+                                               TCP_PAGE(sk) = page;
+                                       }
+                               }
+diff -upkr linux-2.6.31/net/ipv4/tcp_output.c linux-2.6.31/net/ipv4/tcp_output.c
+--- linux-2.6.31/net/ipv4/tcp_output.c 2009-09-10 02:13:59.000000000 +0400
++++ linux-2.6.31/net/ipv4/tcp_output.c 2009-09-23 14:17:05.000000000 +0400
+@@ -890,7 +890,7 @@ static void __pskb_trim_head(struct sk_b
+       k = 0;
+       for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+               if (skb_shinfo(skb)->frags[i].size <= eat) {
+-                      put_page(skb_shinfo(skb)->frags[i].page);
++                      net_put_page(skb_shinfo(skb)->frags[i].page);
+                       eat -= skb_shinfo(skb)->frags[i].size;
+               } else {
+                       skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i];
+diff -upkr linux-2.6.31/net/ipv4/tcp_zero_copy.c linux-2.6.31/net/ipv4/tcp_zero_copy.c
+--- linux-2.6.31/net/ipv4/tcp_zero_copy.c      2009-09-25 21:51:49.000000000 +0400
++++ linux-2.6.31/net/ipv4/tcp_zero_copy.c      2009-09-23 14:17:05.000000000 +0400
+@@ -0,0 +1,49 @@
++/*
++ *    Support routines for TCP zero copy transmit
++ *
++ *    Created by Vladislav Bolkhovitin
++ *
++ *    This program is free software; you can redistribute it and/or
++ *      modify it under the terms of the GNU General Public License
++ *      version 2 as published by the Free Software Foundation.
++ */
++
++#include <linux/skbuff.h>
++
++net_get_page_callback_t net_get_page_callback __read_mostly;
++EXPORT_SYMBOL(net_get_page_callback);
++
++net_put_page_callback_t net_put_page_callback __read_mostly;
++EXPORT_SYMBOL(net_put_page_callback);
++
++/*
++ * Caller of this function must ensure that at the moment when it's called
++ * there are no pages in the system with net_priv field set to non-zero
++ * value. Hence, this function, as well as net_get_page() and net_put_page(),
++ * don't need any protection.
++ */
++int net_set_get_put_page_callbacks(
++      net_get_page_callback_t get_callback,
++      net_put_page_callback_t put_callback)
++{
++      int res = 0;
++
++      if ((net_get_page_callback != NULL) && (get_callback != NULL) &&
++          (net_get_page_callback != get_callback)) {
++              res = -EBUSY;
++              goto out;
++      }
++
++      if ((net_put_page_callback != NULL) && (put_callback != NULL) &&
++          (net_put_page_callback != put_callback)) {
++              res = -EBUSY;
++              goto out;
++      }
++
++      net_get_page_callback = get_callback;
++      net_put_page_callback = put_callback;
++
++out:
++      return res;
++}
++EXPORT_SYMBOL(net_set_get_put_page_callbacks);
+diff -upkr linux-2.6.31/net/ipv6/ip6_output.c linux-2.6.31/net/ipv6/ip6_output.c
+--- linux-2.6.31/net/ipv6/ip6_output.c 2009-09-10 02:13:59.000000000 +0400
++++ linux-2.6.31/net/ipv6/ip6_output.c 2009-09-23 14:17:05.000000000 +0400
+@@ -1394,7 +1394,7 @@ alloc_new_skb:
+                                               err = -EMSGSIZE;
+                                               goto error;
+                                       }
+-                                      get_page(page);
++                                      net_get_page(page);
+                                       skb_fill_page_desc(skb, i, page, sk->sk_sndmsg_off, 0);
+                                       frag = &skb_shinfo(skb)->frags[i];
+                               }
+diff -upkr linux-2.6.31/net/Kconfig linux-2.6.31/net/Kconfig
+--- linux-2.6.31/net/Kconfig   2009-09-10 02:13:59.000000000 +0400
++++ linux-2.6.31/net/Kconfig   2009-09-23 14:17:05.000000000 +0400
+@@ -52,6 +52,18 @@ config INET
+         Short answer: say Y.
++config TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION
++      bool "TCP/IP zero-copy transfer completion notification"
++        depends on INET
++        default SCST_ISCSI
++      ---help---
++        Adds support for sending a notification upon completion of a
++          zero-copy TCP/IP transfer. This can speed up certain TCP/IP
++          software. Currently this is only used by the iSCSI target driver
++          iSCSI-SCST.
++
++          If unsure, say N.
++
+ if INET
+ source "net/ipv4/Kconfig"
+ source "net/ipv6/Kconfig"
index c20c926..cc6824c 100644 (file)
 #define EXTRACHECKS_WARN_ON_ONCE(a)
 #endif
 
-#ifdef CONFIG_SCST_DEBUG
-/*#  define LOG_FLAG KERN_DEBUG*/
-#  define LOG_FLAG KERN_INFO
-#  define INFO_FLAG KERN_INFO
-#  define ERROR_FLAG KERN_INFO
-#else
-# define LOG_FLAG KERN_INFO
-# define INFO_FLAG KERN_INFO
-# define ERROR_FLAG KERN_ERR
-#endif
-
-#define CRIT_FLAG KERN_CRIT
-
-#define NO_FLAG ""
-
 #define TRACE_NULL           0x00000000
 #define TRACE_DEBUG          0x00000001
 #define TRACE_FUNCTION       0x00000002
  * present to suppress a checkpatch warning (KERN_CONT is defined as "").
  */
 #define PRINT(log_flag, format, args...)  \
-               printk(KERN_CONT "%s" format "\n", log_flag, ## args)
+               printk(log_flag format "\n", ## args)
 #define PRINTN(log_flag, format, args...) \
-               printk(KERN_CONT "%s" format, log_flag, ## args)
+               printk(log_flag format, ## args)
 
 #ifdef LOG_PREFIX
 #define __LOG_PREFIX   LOG_PREFIX
 #define ___unlikely(a)         unlikely(a)
 #endif
 
-extern int debug_print_prefix(unsigned long trace_flag, const char *log_level,
+extern int debug_print_prefix(unsigned long trace_flag,
        const char *prefix, const char *func, int line);
-extern void debug_print_buffer(const char *log_level, const void *data,
-       int len);
-
-#define TRACE(trace, format, args...)                                    \
-do {                                                                     \
-       if (___unlikely(trace_flag & (trace))) {                          \
-               char *__tflag = LOG_FLAG;                                 \
-               if (debug_print_prefix(trace_flag, __tflag, __LOG_PREFIX, \
-                                      __func__, __LINE__) > 0) {         \
-                       __tflag = NO_FLAG;                                \
-               }                                                         \
-               PRINT(NO_FLAG, "%s" format, __tflag, args);               \
-       }                                                                 \
+extern void debug_print_buffer(const void *data, int len);
+
+#define TRACE(trace, format, args...)                                  \
+do {                                                                   \
+       if (___unlikely(trace_flag & (trace))) {                        \
+               debug_print_prefix(trace_flag, __LOG_PREFIX,            \
+                                      __func__, __LINE__);             \
+               PRINT(KERN_CONT, format, args);                         \
+       }                                                               \
 } while (0)
 
 #ifdef CONFIG_SCST_DEBUG
 
 #define PRINT_BUFFER(message, buff, len)                            \
 do {                                                                \
-       PRINT(NO_FLAG, "%s:%s:", __func__, message);                \
-       debug_print_buffer(INFO_FLAG, buff, len);                   \
+       PRINT(KERN_INFO, "%s:%s:", __func__, message);              \
+       debug_print_buffer(buff, len);                              \
 } while (0)
 
 #else
 
 #define PRINT_BUFFER(message, buff, len)                            \
 do {                                                                \
-       PRINT(NO_FLAG, "%s:", message);                             \
-       debug_print_buffer(INFO_FLAG, buff, len);                   \
+       PRINT(KERN_INFO, "%s:", message);                           \
+       debug_print_buffer(buff, len);                              \
 } while (0)
 
 #endif
@@ -179,13 +160,10 @@ do {                                                                \
 #define PRINT_BUFF_FLAG(flag, message, buff, len)                      \
 do {                                                                   \
        if (___unlikely(trace_flag & (flag))) {                         \
-               char *__tflag = INFO_FLAG;                              \
-               if (debug_print_prefix(trace_flag, __tflag, NULL, __func__,\
-                                      __LINE__) > 0) {                 \
-                       __tflag = NO_FLAG;                              \
-               }                                                       \
-               PRINT(NO_FLAG, "%s%s:", __tflag, message);              \
-               debug_print_buffer(INFO_FLAG, buff, len);               \
+               PRINTN(KERN_INFO, "%s", "");                            \
+               debug_print_prefix(trace_flag, NULL, __func__, __LINE__);\
+               PRINT(KERN_CONT, "%s:", message);                       \
+               debug_print_buffer(buff, len);                          \
        }                                                               \
 } while (0)
 
@@ -202,12 +180,9 @@ do {                                                                       \
 #define TRACE_DBG_FLAG(trace, format, args...)                         \
 do {                                                                   \
        if (trace_flag & (trace)) {                                     \
-               char *__tflag = LOG_FLAG;                               \
-               if (debug_print_prefix(trace_flag, __tflag, NULL, __func__,\
-                                      __LINE__) > 0) {                 \
-                       __tflag = NO_FLAG;                              \
-               }                                                       \
-               PRINT(NO_FLAG, "%s" format, __tflag, args);             \
+               PRINTN(KERN_INFO, "%s", "");                            \
+               debug_print_prefix(trace_flag, NULL, __func__, __LINE__);\
+               PRINT(KERN_CONT, format, args);                         \
        }                                                               \
 } while (0)
 
@@ -222,81 +197,67 @@ do {                                                                      \
 #define TRACE_BUFFER(message, buff, len)                               \
 do {                                                                   \
        if (trace_flag & TRACE_BUFF) {                                  \
-               char *__tflag = LOG_FLAG;                               \
-               if (debug_print_prefix(trace_flag, __tflag, NULL, __func__, \
-                                      __LINE__) > 0) {                 \
-                       __tflag = NO_FLAG;                              \
-               }                                                       \
-               PRINT(NO_FLAG, "%s%s:", __tflag, message);              \
-               debug_print_buffer(LOG_FLAG, buff, len);                \
+               PRINTN(KERN_INFO, "%s", "");                            \
+               debug_print_prefix(trace_flag, NULL, __func__, __LINE__);\
+               PRINT(KERN_CONT, "%s:", message);                       \
+               debug_print_buffer(buff, len);                          \
        }                                                               \
 } while (0)
 
 #define TRACE_BUFF_FLAG(flag, message, buff, len)                      \
 do {                                                                   \
        if (trace_flag & (flag)) {                                      \
-               char *__tflag = LOG_FLAG;                               \
-               if (debug_print_prefix(trace_flag, __tflag, NULL, __func__, \
-                                      __LINE__) > 0) {                 \
-                       __tflag = NO_FLAG;                              \
-               }                                                       \
-               PRINT(NO_FLAG, "%s%s:", __tflag, message);              \
-               debug_print_buffer(LOG_FLAG, buff, len);                \
+               PRINTN(KERN_INFO, "%s", "");                            \
+               debug_print_prefix(trace_flag, NULL, __func__, __LINE__);\
+               PRINT(KERN_CONT, "%s:", message);                       \
+               debug_print_buffer(buff, len);                          \
        }                                                               \
 } while (0)
 
 #define PRINT_LOG_FLAG(log_flag, format, args...)                      \
 do {                                                                   \
-       char *__tflag = log_flag;                                       \
-       if (debug_print_prefix(trace_flag, __tflag, __LOG_PREFIX,       \
-                              __func__, __LINE__) > 0) {               \
-               __tflag = NO_FLAG;                                      \
-       }                                                               \
-       PRINT(NO_FLAG, "%s" format, __tflag, args);                     \
+       PRINTN(KERN_INFO, "%s", "");                                    \
+       debug_print_prefix(trace_flag, __LOG_PREFIX, __func__, __LINE__);\
+       PRINT(KERN_CONT, format, args);                                 \
 } while (0)
 
 #define PRINT_WARNING(format, args...)                                 \
 do {                                                                   \
-       if (strcmp(INFO_FLAG, LOG_FLAG)) {                              \
-               PRINT_LOG_FLAG(LOG_FLAG, "***WARNING***: " format, args); \
-       }                                                               \
-       PRINT_LOG_FLAG(INFO_FLAG, "***WARNING***: " format, args);      \
+       PRINTN(KERN_INFO, "%s", "");                                    \
+       debug_print_prefix(trace_flag, __LOG_PREFIX, __func__, __LINE__);\
+       PRINT(KERN_CONT, "***WARNING***: " format, args);               \
 } while (0)
 
 #define PRINT_ERROR(format, args...)                                   \
 do {                                                                   \
-       if (strcmp(ERROR_FLAG, LOG_FLAG)) {                             \
-               PRINT_LOG_FLAG(LOG_FLAG, "***ERROR***: " format, args); \
-       }                                                               \
-       PRINT_LOG_FLAG(ERROR_FLAG, "***ERROR***: " format, args);       \
+       PRINTN(KERN_INFO, "%s", "");                                    \
+       debug_print_prefix(trace_flag, __LOG_PREFIX, __func__, __LINE__);\
+       PRINT(KERN_CONT, "***ERROR***: " format, args);                 \
 } while (0)
 
 #define PRINT_CRIT_ERROR(format, args...)                              \
 do {                                                                   \
-       /*  if (strcmp(CRIT_FLAG, LOG_FLAG))                            \
-           {                                                           \
-           PRINT_LOG_FLAG(LOG_FLAG, "***CRITICAL ERROR***: " format, args); \
-           }*/                                                         \
-       PRINT_LOG_FLAG(CRIT_FLAG, "***CRITICAL ERROR***: " format, args); \
+       PRINTN(KERN_INFO, "%s", "");                                    \
+       debug_print_prefix(trace_flag, __LOG_PREFIX, __func__, __LINE__);\
+       PRINT(KERN_CONT, "***CRITICAL ERROR***: " format, args);        \
 } while (0)
 
-#define PRINT_INFO(format, args...)                    \
-do {                                                   \
-       if (strcmp(INFO_FLAG, LOG_FLAG)) {              \
-               PRINT_LOG_FLAG(LOG_FLAG, format, args); \
-       }                                               \
-       PRINT_LOG_FLAG(INFO_FLAG, format, args);        \
+#define PRINT_INFO(format, args...)                                    \
+do {                                                                   \
+       PRINTN(KERN_INFO, "%s", "");                                    \
+       debug_print_prefix(trace_flag, __LOG_PREFIX, __func__, __LINE__);\
+       PRINT(KERN_CONT, format, args);                                 \
 } while (0)
 
 #define TRACE_ENTRY()                                                  \
 do {                                                                   \
        if (trace_flag & TRACE_ENTRYEXIT) {                             \
                if (trace_flag & TRACE_PID) {                           \
-                       PRINT(LOG_FLAG, "[%d]: ENTRY %s", current->pid, \
+                       PRINT(KERN_INFO, "[%d]: ENTRY %s", current->pid, \
                                __func__);                              \
                }                                                       \
                else {                                                  \
-                       PRINT(LOG_FLAG, "ENTRY %s", __func__);          \
+                       PRINT(KERN_INFO, "ENTRY %s", __func__); \
                }                                                       \
        }                                                               \
 } while (0)
@@ -305,11 +266,11 @@ do {                                                                      \
 do {                                                                   \
        if (trace_flag & TRACE_ENTRYEXIT) {                             \
                if (trace_flag & TRACE_PID) {                           \
-                       PRINT(LOG_FLAG, "[%d]: EXIT %s", current->pid,  \
+                       PRINT(KERN_INFO, "[%d]: EXIT %s", current->pid, \
                                __func__);                              \
                }                                                       \
                else {                                                  \
-                       PRINT(LOG_FLAG, "EXIT %s", __func__);           \
+                       PRINT(KERN_INFO, "EXIT %s", __func__);          \
                }                                                       \
        }                                                               \
 } while (0)
@@ -318,11 +279,11 @@ do {                                                                      \
 do {                                                                   \
        if (trace_flag & TRACE_ENTRYEXIT) {                             \
                if (trace_flag & TRACE_PID) {                           \
-                       PRINT(LOG_FLAG, "[%d]: EXIT %s: %ld", current->pid, \
+                       PRINT(KERN_INFO, "[%d]: EXIT %s: %ld", current->pid, \
                              __func__, (long)(res));                   \
                }                                                       \
                else {                                                  \
-                       PRINT(LOG_FLAG, "EXIT %s: %ld",                 \
+                       PRINT(KERN_INFO, "EXIT %s: %ld",                \
                                __func__, (long)(res));                 \
                }                                                       \
        }                                                               \
@@ -332,11 +293,11 @@ do {                                                                      \
 do {                                                                   \
        if (trace_flag & TRACE_ENTRYEXIT) {                             \
                if (trace_flag & TRACE_PID) {                           \
-                       PRINT(LOG_FLAG, "[%d]: EXIT %s: 0x%lx", current->pid, \
+                       PRINT(KERN_INFO, "[%d]: EXIT %s: 0x%lx", current->pid, \
                              __func__, (long)(res));                   \
                }                                                       \
                else {                                                  \
-                       PRINT(LOG_FLAG, "EXIT %s: %lx",                 \
+                       PRINT(KERN_INFO, "EXIT %s: %lx",                \
                                        __func__, (long)(res));         \
                }                                                       \
        }                                                               \
@@ -361,24 +322,24 @@ do {                                                                      \
 
 #define PRINT_INFO(format, args...)                            \
 do {                                                           \
-       PRINT(INFO_FLAG, "%s: " format, LOG_PREFIX, args);      \
+       PRINT(KERN_INFO, "%s: " format, LOG_PREFIX, args);      \
 } while (0)
 
 #define PRINT_WARNING(format, args...)          \
 do {                                            \
-       PRINT(INFO_FLAG, "%s: ***WARNING***: "  \
+       PRINT(KERN_INFO, "%s: ***WARNING***: "  \
              format, LOG_PREFIX, args);        \
 } while (0)
 
 #define PRINT_ERROR(format, args...)            \
 do {                                            \
-       PRINT(ERROR_FLAG, "%s: ***ERROR***: "   \
+       PRINT(KERN_INFO, "%s: ***ERROR***: "    \
              format, LOG_PREFIX, args);        \
 } while (0)
 
 #define PRINT_CRIT_ERROR(format, args...)       \
 do {                                            \
-       PRINT(CRIT_FLAG, "%s: ***CRITICAL ERROR***: "   \
+       PRINT(KERN_INFO, "%s: ***CRITICAL ERROR***: "   \
                format, LOG_PREFIX, args);              \
 } while (0)
 
@@ -386,24 +347,24 @@ do {                                            \
 
 #define PRINT_INFO(format, args...)            \
 do {                                            \
-       PRINT(INFO_FLAG, format, args);         \
+       PRINT(KERN_INFO, format, args);         \
 } while (0)
 
 #define PRINT_WARNING(format, args...)          \
 do {                                            \
-       PRINT(INFO_FLAG, "***WARNING***: "      \
+       PRINT(KERN_INFO, "***WARNING***: "      \
                format, args);                  \
 } while (0)
 
 #define PRINT_ERROR(format, args...)           \
 do {                                            \
-       PRINT(ERROR_FLAG, "***ERROR***: "       \
+       PRINT(KERN_ERR, "***ERROR***: " \
                format, args);                  \
 } while (0)
 
 #define PRINT_CRIT_ERROR(format, args...)              \
 do {                                                   \
-       PRINT(CRIT_FLAG, "***CRITICAL ERROR***: "       \
+       PRINT(KERN_CRIT, "***CRITICAL ERROR***: "       \
                format, args);                          \
 } while (0)
 
diff --git a/scst/kernel/io_context-2.6.31.patch b/scst/kernel/io_context-2.6.31.patch
new file mode 100644 (file)
index 0000000..dc703c3
--- /dev/null
@@ -0,0 +1,61 @@
+diff -upkr linux-2.6.31/block/blk-ioc.c linux-2.6.31/block/blk-ioc.c
+--- linux-2.6.31/block/blk-ioc.c       2009-09-10 02:13:59.000000000 +0400
++++ linux-2.6.31/block/blk-ioc.c       2009-09-23 14:17:17.000000000 +0400
+@@ -65,6 +65,21 @@ static void cfq_exit(struct io_context *
+       rcu_read_unlock();
+ }
++void __exit_io_context(struct io_context *ioc)
++{
++      if (ioc == NULL)
++              return;
++
++      if (atomic_dec_and_test(&ioc->nr_tasks)) {
++              if (ioc->aic && ioc->aic->exit)
++                      ioc->aic->exit(ioc->aic);
++              cfq_exit(ioc);
++
++              put_io_context(ioc);
++      }
++}
++EXPORT_SYMBOL(__exit_io_context);
++
+ /* Called by the exitting task */
+ void exit_io_context(void)
+ {
+@@ -75,13 +90,7 @@ void exit_io_context(void)
+       current->io_context = NULL;
+       task_unlock(current);
+-      if (atomic_dec_and_test(&ioc->nr_tasks)) {
+-              if (ioc->aic && ioc->aic->exit)
+-                      ioc->aic->exit(ioc->aic);
+-              cfq_exit(ioc);
+-
+-              put_io_context(ioc);
+-      }
++      __exit_io_context(ioc);
+ }
+ struct io_context *alloc_io_context(gfp_t gfp_flags, int node)
+@@ -105,6 +114,7 @@ struct io_context *alloc_io_context(gfp_
+       return ret;
+ }
++EXPORT_SYMBOL(alloc_io_context);
+ /*
+  * If the current task has no IO context then create one and initialise it.
+diff -upkr linux-2.6.31/include/linux/iocontext.h linux-2.6.31/include/linux/iocontext.h
+--- linux-2.6.31/include/linux/iocontext.h     2009-09-10 02:13:59.000000000 +0400
++++ linux-2.6.31/include/linux/iocontext.h     2009-09-23 14:17:17.000000000 +0400
+@@ -103,7 +103,9 @@ static inline struct io_context *ioc_tas
+ int put_io_context(struct io_context *ioc);
+ void exit_io_context(void);
+ struct io_context *get_io_context(gfp_t gfp_flags, int node);
++#define SCST_IO_CONTEXT
+ struct io_context *alloc_io_context(gfp_t gfp_flags, int node);
++void __exit_io_context(struct io_context *ioc);
+ void copy_io_context(struct io_context **pdst, struct io_context **psrc);
+ #else
+ static inline void exit_io_context(void)
diff --git a/scst/kernel/readahead-2.6.31.patch b/scst/kernel/readahead-2.6.31.patch
new file mode 100644 (file)
index 0000000..4e083db
--- /dev/null
@@ -0,0 +1,12 @@
+diff -upkr linux-2.6.31/mm/readahead.c linux-2.6.31/mm/readahead.c
+--- linux-2.6.31/mm/readahead.c        2009-09-10 02:13:59.000000000 +0400
++++ linux-2.6.31/mm/readahead.c        2009-09-23 14:17:26.000000000 +0400
+@@ -547,5 +547,8 @@ page_cache_async_readahead(struct addres
+       /* do read-ahead */
+       ondemand_readahead(mapping, ra, filp, true, offset, req_size);
++
++      if (PageUptodate(page))
++              blk_run_backing_dev(mapping->backing_dev_info, NULL);
+ }
+ EXPORT_SYMBOL_GPL(page_cache_async_readahead);
diff --git a/scst/kernel/scst_exec_req_fifo-2.6.31.patch b/scst/kernel/scst_exec_req_fifo-2.6.31.patch
new file mode 100644 (file)
index 0000000..8628e9f
--- /dev/null
@@ -0,0 +1,528 @@
+diff -upkr linux-2.6.31/block/blk-map.c linux-2.6.31/block/blk-map.c
+--- linux-2.6.31/block/blk-map.c       2009-09-10 02:13:59.000000000 +0400
++++ linux-2.6.31/block/blk-map.c       2009-09-23 14:48:36.000000000 +0400
+@@ -5,6 +5,7 @@
+ #include <linux/module.h>
+ #include <linux/bio.h>
+ #include <linux/blkdev.h>
++#include <linux/scatterlist.h>
+ #include <scsi/sg.h>          /* for struct sg_iovec */
+ #include "blk.h"
+@@ -271,6 +272,336 @@ int blk_rq_unmap_user(struct bio *bio)
+ }
+ EXPORT_SYMBOL(blk_rq_unmap_user);
++struct blk_kern_sg_work {
++      atomic_t bios_inflight;
++      struct sg_table sg_table;
++      struct scatterlist *src_sgl;
++};
++
++static void blk_free_kern_sg_work(struct blk_kern_sg_work *bw)
++{
++      sg_free_table(&bw->sg_table);
++      kfree(bw);
++      return;
++}
++
++static void blk_bio_map_kern_endio(struct bio *bio, int err)
++{
++      struct blk_kern_sg_work *bw = bio->bi_private;
++
++      if (bw != NULL) {
++              /* Decrement the bios in processing and, if zero, free */
++              BUG_ON(atomic_read(&bw->bios_inflight) <= 0);
++              if (atomic_dec_and_test(&bw->bios_inflight)) {
++                      if ((bio_data_dir(bio) == READ) && (err == 0)) {
++                              unsigned long flags;
++
++                              local_irq_save(flags);  /* to protect KMs */
++                              sg_copy(bw->src_sgl, bw->sg_table.sgl, 0, 0,
++                                      KM_BIO_DST_IRQ, KM_BIO_SRC_IRQ);
++                              local_irq_restore(flags);
++                      }
++                      blk_free_kern_sg_work(bw);
++              }
++      }
++
++      bio_put(bio);
++      return;
++}
++
++static int blk_rq_copy_kern_sg(struct request *rq, struct scatterlist *sgl,
++                             int nents, struct blk_kern_sg_work **pbw,
++                             gfp_t gfp, gfp_t page_gfp)
++{
++      int res = 0, i;
++      struct scatterlist *sg;
++      struct scatterlist *new_sgl;
++      int new_sgl_nents;
++      size_t len = 0, to_copy;
++      struct blk_kern_sg_work *bw;
++
++      bw = kzalloc(sizeof(*bw), gfp);
++      if (bw == NULL)
++              goto out;
++
++      bw->src_sgl = sgl;
++
++      for_each_sg(sgl, sg, nents, i)
++              len += sg->length;
++      to_copy = len;
++
++      new_sgl_nents = PFN_UP(len);
++
++      res = sg_alloc_table(&bw->sg_table, new_sgl_nents, gfp);
++      if (res != 0)
++              goto out_free_bw;
++
++      new_sgl = bw->sg_table.sgl;
++
++      for_each_sg(new_sgl, sg, new_sgl_nents, i) {
++              struct page *pg;
++
++              pg = alloc_page(page_gfp);
++              if (pg == NULL)
++                      goto err_free_new_sgl;
++
++              sg_assign_page(sg, pg);
++              sg->length = min_t(size_t, PAGE_SIZE, len);
++
++              len -= PAGE_SIZE;
++      }
++
++      if (rq_data_dir(rq) == WRITE) {
++              /*
++               * We need to limit amount of copied data to to_copy, because
++               * sgl might have the last element in sgl not marked as last in
++               * SG chaining.
++               */
++              sg_copy(new_sgl, sgl, 0, to_copy,
++                      KM_USER0, KM_USER1);
++      }
++
++      *pbw = bw;
++      /*
++       * REQ_COPY_USER name is misleading. It should be something like
++       * REQ_HAS_TAIL_SPACE_FOR_PADDING.
++       */
++      rq->cmd_flags |= REQ_COPY_USER;
++
++out:
++      return res;
++
++err_free_new_sgl:
++      for_each_sg(new_sgl, sg, new_sgl_nents, i) {
++              struct page *pg = sg_page(sg);
++              if (pg == NULL)
++                      break;
++              __free_page(pg);
++      }
++      sg_free_table(&bw->sg_table);
++
++out_free_bw:
++      kfree(bw);
++      res = -ENOMEM;
++      goto out;
++}
++
++static int __blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
++      int nents, struct blk_kern_sg_work *bw, gfp_t gfp)
++{
++      int res;
++      struct request_queue *q = rq->q;
++      int rw = rq_data_dir(rq);
++      int max_nr_vecs, i;
++      size_t tot_len;
++      bool need_new_bio;
++      struct scatterlist *sg, *prev_sg = NULL;
++      struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
++      int bios;
++
++      if (unlikely((sgl == NULL) || (sgl->length == 0) || (nents <= 0))) {
++              WARN_ON(1);
++              res = -EINVAL;
++              goto out;
++      }
++
++      /*
++       * Let's keep each bio allocation inside a single page to decrease
++       * probability of failure.
++       */
++      max_nr_vecs =  min_t(size_t,
++              ((PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec)),
++              BIO_MAX_PAGES);
++
++      need_new_bio = true;
++      tot_len = 0;
++      bios = 0;
++      for_each_sg(sgl, sg, nents, i) {
++              struct page *page = sg_page(sg);
++              void *page_addr = page_address(page);
++              size_t len = sg->length, l;
++              size_t offset = sg->offset;
++
++              tot_len += len;
++              prev_sg = sg;
++
++              /*
++               * Each segment must be aligned on DMA boundary and
++               * not on stack. The last one may have unaligned
++               * length as long as the total length is aligned to
++               * DMA padding alignment.
++               */
++              if (i == nents - 1)
++                      l = 0;
++              else
++                      l = len;
++              if (((sg->offset | l) & queue_dma_alignment(q)) ||
++                  (page_addr && object_is_on_stack(page_addr + sg->offset))) {
++                      res = -EINVAL;
++                      goto out_free_bios;
++              }
++
++              while (len > 0) {
++                      size_t bytes;
++                      int rc;
++
++                      if (need_new_bio) {
++                              bio = bio_kmalloc(gfp, max_nr_vecs);
++                              if (bio == NULL) {
++                                      res = -ENOMEM;
++                                      goto out_free_bios;
++                              }
++
++                              if (rw == WRITE)
++                                      bio->bi_rw |= 1 << BIO_RW;
++
++                              bios++;
++                              bio->bi_private = bw;
++                              bio->bi_end_io = blk_bio_map_kern_endio;
++
++                              if (hbio == NULL)
++                                      hbio = tbio = bio;
++                              else
++                                      tbio = tbio->bi_next = bio;
++                      }
++
++                      bytes = min_t(size_t, len, PAGE_SIZE - offset);
++
++                      rc = bio_add_pc_page(q, bio, page, bytes, offset);
++                      if (rc < bytes) {
++                              if (unlikely(need_new_bio || (rc < 0))) {
++                                      if (rc < 0)
++                                              res = rc;
++                                      else
++                                              res = -EIO;
++                                      goto out_free_bios;
++                              } else {
++                                      need_new_bio = true;
++                                      len -= rc;
++                                      offset += rc;
++                                      continue;
++                              }
++                      }
++
++                      need_new_bio = false;
++                      offset = 0;
++                      len -= bytes;
++                      page = nth_page(page, 1);
++              }
++      }
++
++      if (hbio == NULL) {
++              res = -EINVAL;
++              goto out_free_bios;
++      }
++
++      /* Total length must be aligned on DMA padding alignment */
++      if ((tot_len & q->dma_pad_mask) &&
++          !(rq->cmd_flags & REQ_COPY_USER)) {
++              res = -EINVAL;
++              goto out_free_bios;
++      }
++
++      if (bw != NULL)
++              atomic_set(&bw->bios_inflight, bios);
++
++      while (hbio != NULL) {
++              bio = hbio;
++              hbio = hbio->bi_next;
++              bio->bi_next = NULL;
++
++              blk_queue_bounce(q, &bio);
++
++              res = blk_rq_append_bio(q, rq, bio);
++              if (unlikely(res != 0)) {
++                      bio->bi_next = hbio;
++                      hbio = bio;
++                      /* We can have one or more bios bounced */
++                      goto out_unmap_bios;
++              }
++      }
++
++      rq->buffer = NULL;
++out:
++      return res;
++
++out_free_bios:
++      while (hbio != NULL) {
++              bio = hbio;
++              hbio = hbio->bi_next;
++              bio_put(bio);
++      }
++      goto out;
++
++out_unmap_bios:
++      blk_rq_unmap_kern_sg(rq, res);
++      goto out;
++}
++
++/**
++ * blk_rq_map_kern_sg - map kernel data to a request, for REQ_TYPE_BLOCK_PC
++ * @rq:               request to fill
++ * @sgl:      area to map
++ * @nents:    number of elements in @sgl
++ * @gfp:      memory allocation flags
++ *
++ * Description:
++ *    Data will be mapped directly if possible. Otherwise a bounce
++ *    buffer will be used.
++ */
++int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
++                     int nents, gfp_t gfp)
++{
++      int res;
++
++      res = __blk_rq_map_kern_sg(rq, sgl, nents, NULL, gfp);
++      if (unlikely(res != 0)) {
++              struct blk_kern_sg_work *bw = NULL;
++
++              res = blk_rq_copy_kern_sg(rq, sgl, nents, &bw,
++                              gfp, rq->q->bounce_gfp | gfp);
++              if (unlikely(res != 0))
++                      goto out;
++
++              res = __blk_rq_map_kern_sg(rq, bw->sg_table.sgl,
++                              bw->sg_table.nents, bw, gfp);
++              if (res != 0) {
++                      blk_free_kern_sg_work(bw);
++                      goto out;
++              }
++      }
++
++      rq->buffer = NULL;
++
++out:
++      return res;
++}
++EXPORT_SYMBOL(blk_rq_map_kern_sg);
++
++/**
++ * blk_rq_unmap_kern_sg - unmap a request with kernel sg
++ * @rq:               request to unmap
++ * @err:      non-zero error code
++ *
++ * Description:
++ *    Unmap a rq previously mapped by blk_rq_map_kern_sg(). Must be called
++ *    only in case of an error!
++ */
++void blk_rq_unmap_kern_sg(struct request *rq, int err)
++{
++      struct bio *bio = rq->bio;
++
++      while (bio) {
++              struct bio *b = bio;
++              bio = bio->bi_next;
++              b->bi_end_io(b, err);
++      }
++      rq->bio = NULL;
++
++      return;
++}
++EXPORT_SYMBOL(blk_rq_unmap_kern_sg);
++
+ /**
+  * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
+  * @q:                request queue where request should be inserted
+diff -upkr linux-2.6.31/include/linux/blkdev.h linux-2.6.31/include/linux/blkdev.h
+--- linux-2.6.31/include/linux/blkdev.h        2009-09-10 02:13:59.000000000 +0400
++++ linux-2.6.31/include/linux/blkdev.h        2009-09-23 14:17:33.000000000 +0400
+@@ -699,6 +699,8 @@ extern unsigned long blk_max_low_pfn, bl
+ #define BLK_DEFAULT_SG_TIMEOUT        (60 * HZ)
+ #define BLK_MIN_SG_TIMEOUT    (7 * HZ)
++#define SCSI_EXEC_REQ_FIFO_DEFINED
++
+ #ifdef CONFIG_BOUNCE
+ extern int init_emergency_isa_pool(void);
+ extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
+@@ -803,6 +805,9 @@ extern int blk_rq_map_kern(struct reques
+ extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
+                              struct rq_map_data *, struct sg_iovec *, int,
+                              unsigned int, gfp_t);
++extern int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
++                            int nents, gfp_t gfp);
++extern void blk_rq_unmap_kern_sg(struct request *rq, int err);
+ extern int blk_execute_rq(struct request_queue *, struct gendisk *,
+                         struct request *, int);
+ extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
+diff -upkr linux-2.6.31/include/linux/scatterlist.h linux-2.6.31/include/linux/scatterlist.h
+--- linux-2.6.31/include/linux/scatterlist.h   2009-09-10 02:13:59.000000000 +0400
++++ linux-2.6.31/include/linux/scatterlist.h   2009-09-23 14:17:33.000000000 +0400
+@@ -3,6 +3,7 @@
+ #include <asm/types.h>
+ #include <asm/scatterlist.h>
++#include <asm/kmap_types.h>
+ #include <linux/mm.h>
+ #include <linux/string.h>
+ #include <asm/io.h>
+@@ -218,6 +219,10 @@ size_t sg_copy_from_buffer(struct scatte
+ size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
+                        void *buf, size_t buflen);
++int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
++          int nents_to_copy, size_t copy_len,
++          enum km_type d_km_type, enum km_type s_km_type);
++
+ /*
+  * Maximum number of entries that will be allocated in one piece, if
+  * a list larger than this is required then chaining will be utilized.
+diff -upkr linux-2.6.31/lib/scatterlist.c linux-2.6.31/lib/scatterlist.c
+--- linux-2.6.31/lib/scatterlist.c     2009-09-10 02:13:59.000000000 +0400
++++ linux-2.6.31/lib/scatterlist.c     2009-09-23 14:17:33.000000000 +0400
+@@ -493,3 +493,132 @@ size_t sg_copy_to_buffer(struct scatterl
+       return sg_copy_buffer(sgl, nents, buf, buflen, 1);
+ }
+ EXPORT_SYMBOL(sg_copy_to_buffer);
++
++/*
++ * Can switch to the next dst_sg element, so, to copy to strictly only
++ * one dst_sg element, it must be either last in the chain, or
++ * copy_len == dst_sg->length.
++ */
++static int sg_copy_elem(struct scatterlist **pdst_sg, size_t *pdst_len,
++                      size_t *pdst_offs, struct scatterlist *src_sg,
++                      size_t copy_len,
++                      enum km_type d_km_type, enum km_type s_km_type)
++{
++      int res = 0;
++      struct scatterlist *dst_sg;
++      size_t src_len, dst_len, src_offs, dst_offs;
++      struct page *src_page, *dst_page;
++
++      dst_sg = *pdst_sg;
++      dst_len = *pdst_len;
++      dst_offs = *pdst_offs;
++      dst_page = sg_page(dst_sg);
++
++      src_page = sg_page(src_sg);
++      src_len = src_sg->length;
++      src_offs = src_sg->offset;
++
++      do {
++              void *saddr, *daddr;
++              size_t n;
++
++              saddr = kmap_atomic(src_page +
++                                       (src_offs >> PAGE_SHIFT), s_km_type) +
++                                  (src_offs & ~PAGE_MASK);
++              daddr = kmap_atomic(dst_page +
++                                      (dst_offs >> PAGE_SHIFT), d_km_type) +
++                                  (dst_offs & ~PAGE_MASK);
++
++              if (((src_offs & ~PAGE_MASK) == 0) &&
++                  ((dst_offs & ~PAGE_MASK) == 0) &&
++                  (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE) &&
++                  (copy_len >= PAGE_SIZE)) {
++                      copy_page(daddr, saddr);
++                      n = PAGE_SIZE;
++              } else {
++                      n = min_t(size_t, PAGE_SIZE - (dst_offs & ~PAGE_MASK),
++                                        PAGE_SIZE - (src_offs & ~PAGE_MASK));
++                      n = min(n, src_len);
++                      n = min(n, dst_len);
++                      n = min_t(size_t, n, copy_len);
++                      memcpy(daddr, saddr, n);
++              }
++              dst_offs += n;
++              src_offs += n;
++
++              kunmap_atomic(saddr, s_km_type);
++              kunmap_atomic(daddr, d_km_type);
++
++              res += n;
++              copy_len -= n;
++              if (copy_len == 0)
++                      goto out;
++
++              src_len -= n;
++              dst_len -= n;
++              if (dst_len == 0) {
++                      dst_sg = sg_next(dst_sg);
++                      if (dst_sg == NULL)
++                              goto out;
++                      dst_page = sg_page(dst_sg);
++                      dst_len = dst_sg->length;
++                      dst_offs = dst_sg->offset;
++              }
++      } while (src_len > 0);
++
++out:
++      *pdst_sg = dst_sg;
++      *pdst_len = dst_len;
++      *pdst_offs = dst_offs;
++      return res;
++}
++
++/**
++ * sg_copy - copy one SG vector to another
++ * @dst_sg:   destination SG
++ * @src_sg:   source SG
++ * @nents_to_copy: maximum number of entries to copy
++ * @copy_len: maximum amount of data to copy. If 0, then copy all.
++ * @d_km_type:        kmap_atomic type for the destination SG
++ * @s_km_type:        kmap_atomic type for the source SG
++ *
++ * Description:
++ *    Data from the source SG vector will be copied to the destination SG
++ *    vector. End of the vectors will be determined by sg_next() returning
++ *    NULL. Returns number of bytes copied.
++ */
++int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
++          int nents_to_copy, size_t copy_len,
++          enum km_type d_km_type, enum km_type s_km_type)
++{
++      int res = 0;
++      size_t dst_len, dst_offs;
++
++      if (copy_len == 0)
++              copy_len = 0x7FFFFFFF; /* copy all */
++
++      if (nents_to_copy == 0)
++              nents_to_copy = 0x7FFFFFFF; /* copy all */
++
++      dst_len = dst_sg->length;
++      dst_offs = dst_sg->offset;
++
++      do {
++              int copied = sg_copy_elem(&dst_sg, &dst_len, &dst_offs,
++                              src_sg, copy_len, d_km_type, s_km_type);
++              copy_len -= copied;
++              res += copied;
++              if ((copy_len == 0) || (dst_sg == NULL))
++                      goto out;
++
++              nents_to_copy--;
++              if (nents_to_copy == 0)
++                      goto out;
++
++              src_sg = sg_next(src_sg);
++      } while (src_sg != NULL);
++
++out:
++      return res;
++}
++EXPORT_SYMBOL(sg_copy);
index 1cd65ff..17670f0 100644 (file)
@@ -46,7 +46,7 @@ static inline int get_current_tid(void)
 #endif
 }
 
-int debug_print_prefix(unsigned long trace_flag, const char *log_level,
+int debug_print_prefix(unsigned long trace_flag,
        const char *prefix, const char *func, int line)
 {
        int i = 0;
@@ -55,6 +55,8 @@ int debug_print_prefix(unsigned long trace_flag, const char *log_level,
 
        spin_lock_irqsave(&trace_buf_lock, flags);
 
+       trace_buf[0] = '\0';
+
        if (trace_flag & TRACE_PID)
                i += snprintf(&trace_buf[i], TRACE_BUF_SIZE, "[%d]: ", pid);
        if (prefix != NULL)
@@ -65,8 +67,7 @@ int debug_print_prefix(unsigned long trace_flag, const char *log_level,
        if (trace_flag & TRACE_LINE)
                i += snprintf(&trace_buf[i], TRACE_BUF_SIZE - i, "%i:", line);
 
-       if (i > 0)
-               PRINTN(log_level, "%s", trace_buf);
+       PRINTN(KERN_INFO, "%s", trace_buf);
 
        spin_unlock_irqrestore(&trace_buf_lock, flags);
 
@@ -74,11 +75,10 @@ int debug_print_prefix(unsigned long trace_flag, const char *log_level,
 }
 EXPORT_SYMBOL(debug_print_prefix);
 
-void debug_print_buffer(const char *log_level, const void *data, int len)
+void debug_print_buffer(const void *data, int len)
 {
        int z, z1, i;
        const unsigned char *buf = (const unsigned char *) data;
-       int f = 0;
        unsigned long flags;
 
        if (buf == NULL)
@@ -86,7 +86,7 @@ void debug_print_buffer(const char *log_level, const void *data, int len)
 
        spin_lock_irqsave(&trace_buf_lock, flags);
 
-       PRINT(NO_FLAG, " (h)___0__1__2__3__4__5__6__7__8__9__A__B__C__D__E__F");
+       PRINT(KERN_INFO, " (h)___0__1__2__3__4__5__6__7__8__9__A__B__C__D__E__F");
        for (z = 0, z1 = 0, i = 0; z < len; z++) {
                if (z % 16 == 0) {
                        if (z != 0) {
@@ -101,9 +101,8 @@ void debug_print_buffer(const char *log_level, const void *data, int len)
                                                trace_buf[i++] = '.';
                                }
                                trace_buf[i] = '\0';
-                               PRINT(NO_FLAG, "%s", trace_buf);
+                               PRINT(KERN_INFO, "%s", trace_buf);
                                i = 0;
-                               f = 1;
                        }
                        i += snprintf(&trace_buf[i], TRACE_BUF_SIZE - i,
                                      "%4x: ", z);
@@ -111,6 +110,7 @@ void debug_print_buffer(const char *log_level, const void *data, int len)
                i += snprintf(&trace_buf[i], TRACE_BUF_SIZE - i, "%02x ",
                              buf[z]);
        }
+
        i += snprintf(&trace_buf[i], TRACE_BUF_SIZE - i, "  ");
        for (; (z1 < z) && (i < TRACE_BUF_SIZE - 1); z1++) {
                if ((buf[z1] > 0x20) && (buf[z1] < 0x80))
@@ -119,10 +119,8 @@ void debug_print_buffer(const char *log_level, const void *data, int len)
                        trace_buf[i++] = '.';
        }
        trace_buf[i] = '\0';
-       if (f)
-               PRINT(log_level, "%s", trace_buf);
-       else
-               PRINT(NO_FLAG, "%s", trace_buf);
+
+       PRINT(KERN_INFO, "%s", trace_buf);
 
        spin_unlock_irqrestore(&trace_buf_lock, flags);
        return;
index cbb5c77..1b7b02d 100644 (file)
@@ -3229,7 +3229,10 @@ static int __blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
                }
        }
 
-       rq->buffer = rq->data = NULL;
+       rq->buffer = NULL;
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 30)
+       rq->data = NULL;
+#endif
 out:
        return res;
 
@@ -3283,8 +3286,10 @@ static int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
                }
        }
 
-       rq->buffer = rq->data = NULL;
-
+       rq->buffer = NULL;
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 30)
+       rq->data = NULL;
+#endif
 out:
        return res;
 }
@@ -3322,7 +3327,11 @@ static void scsi_end_async(struct request *req, int error)
        TRACE_DBG("sioc %p, cmd %p", sioc, sioc->data);
 
        if (sioc->done)
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 30)
                sioc->done(sioc->data, sioc->sense, req->errors, req->data_len);
+#else
+               sioc->done(sioc->data, sioc->sense, req->errors, req->resid_len);
+#endif
 
        if (!sioc->full_cdb_used)
                kmem_cache_free(scsi_io_context_cache, sioc);