* GNU General Public License for more details.
*/
-#define ISCSI_VERSION_STRING "1.0.2/0.4.17r213"
+#define ISCSI_VERSION_STRING "1.0.2/0.4.17r214"
goto out;
}
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 30)
proc_iscsi_dir->owner = THIS_MODULE;
+#endif
err = iscsi_proc_log_entry_build(&iscsi_template);
if (err < 0)
--- /dev/null
+diff -upkr linux-2.6.30.1/include/linux/mm_types.h linux-2.6.30.1/include/linux/mm_types.h
+--- linux-2.6.30.1/include/linux/mm_types.h 2009-06-10 07:05:27.000000000 +0400
++++ linux-2.6.30.1/include/linux/mm_types.h 2009-07-01 15:20:24.000000000 +0400
+@@ -98,6 +98,18 @@ struct page {
+ #ifdef CONFIG_WANT_PAGE_DEBUG_FLAGS
+ unsigned long debug_flags; /* Use atomic bitops on this */
+ #endif
++
++#if defined(CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION)
++ /*
++ * Used to implement support for notification on zero-copy TCP transfer
++ * completion. It might look as not good to have this field here and
++ * it's better to have it in struct sk_buff, but it would make the code
++ * much more complicated and fragile, since all skb then would have to
++ * contain only pages with the same value in this field.
++ */
++ void *net_priv;
++#endif
++
+ };
+
+ /*
+diff -upkr linux-2.6.30.1/include/linux/net.h linux-2.6.30.1/include/linux/net.h
+--- linux-2.6.30.1/include/linux/net.h 2009-06-10 07:05:27.000000000 +0400
++++ linux-2.6.30.1/include/linux/net.h 2009-07-01 15:20:24.000000000 +0400
+@@ -57,6 +57,7 @@ typedef enum {
+ #include <linux/random.h>
+ #include <linux/wait.h>
+ #include <linux/fcntl.h> /* For O_CLOEXEC and O_NONBLOCK */
++#include <linux/mm.h>
+
+ struct poll_table_struct;
+ struct pipe_inode_info;
+@@ -356,5 +357,44 @@ static const struct proto_ops name##_ops
+ extern struct ratelimit_state net_ratelimit_state;
+ #endif
+
++#if defined(CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION)
++/* Support for notification on zero-copy TCP transfer completion */
++typedef void (*net_get_page_callback_t)(struct page *page);
++typedef void (*net_put_page_callback_t)(struct page *page);
++
++extern net_get_page_callback_t net_get_page_callback;
++extern net_put_page_callback_t net_put_page_callback;
++
++extern int net_set_get_put_page_callbacks(
++ net_get_page_callback_t get_callback,
++ net_put_page_callback_t put_callback);
++
++/*
++ * See comment for net_set_get_put_page_callbacks() why those functions
++ * don't need any protection.
++ */
++static inline void net_get_page(struct page *page)
++{
++ if (page->net_priv != 0)
++ net_get_page_callback(page);
++ get_page(page);
++}
++static inline void net_put_page(struct page *page)
++{
++ if (page->net_priv != 0)
++ net_put_page_callback(page);
++ put_page(page);
++}
++#else
++static inline void net_get_page(struct page *page)
++{
++ get_page(page);
++}
++static inline void net_put_page(struct page *page)
++{
++ put_page(page);
++}
++#endif /* CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION */
++
+ #endif /* __KERNEL__ */
+ #endif /* _LINUX_NET_H */
+diff -upkr linux-2.6.30.1/net/core/skbuff.c linux-2.6.30.1/net/core/skbuff.c
+--- linux-2.6.30.1/net/core/skbuff.c 2009-06-10 07:05:27.000000000 +0400
++++ linux-2.6.30.1/net/core/skbuff.c 2009-07-01 15:55:08.000000000 +0400
+@@ -75,13 +75,13 @@ static struct kmem_cache *skbuff_fclone_
+ static void sock_pipe_buf_release(struct pipe_inode_info *pipe,
+ struct pipe_buffer *buf)
+ {
+- put_page(buf->page);
++ net_put_page(buf->page);
+ }
+
+ static void sock_pipe_buf_get(struct pipe_inode_info *pipe,
+ struct pipe_buffer *buf)
+ {
+- get_page(buf->page);
++ net_get_page(buf->page);
+ }
+
+ static int sock_pipe_buf_steal(struct pipe_inode_info *pipe,
+@@ -335,7 +335,7 @@ static void skb_release_data(struct sk_b
+ if (skb_shinfo(skb)->nr_frags) {
+ int i;
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
+- put_page(skb_shinfo(skb)->frags[i].page);
++ net_put_page(skb_shinfo(skb)->frags[i].page);
+ }
+
+ if (skb_shinfo(skb)->frag_list)
+@@ -750,7 +750,7 @@ struct sk_buff *pskb_copy(struct sk_buff
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i];
+- get_page(skb_shinfo(n)->frags[i].page);
++ net_get_page(skb_shinfo(n)->frags[i].page);
+ }
+ skb_shinfo(n)->nr_frags = i;
+ }
+@@ -816,7 +816,7 @@ int pskb_expand_head(struct sk_buff *skb
+ sizeof(struct skb_shared_info));
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
+- get_page(skb_shinfo(skb)->frags[i].page);
++ net_get_page(skb_shinfo(skb)->frags[i].page);
+
+ if (skb_shinfo(skb)->frag_list)
+ skb_clone_fraglist(skb);
+@@ -1088,7 +1088,7 @@ drop_pages:
+ skb_shinfo(skb)->nr_frags = i;
+
+ for (; i < nfrags; i++)
+- put_page(skb_shinfo(skb)->frags[i].page);
++ net_put_page(skb_shinfo(skb)->frags[i].page);
+
+ if (skb_shinfo(skb)->frag_list)
+ skb_drop_fraglist(skb);
+@@ -1257,7 +1257,7 @@ pull_pages:
+ k = 0;
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ if (skb_shinfo(skb)->frags[i].size <= eat) {
+- put_page(skb_shinfo(skb)->frags[i].page);
++ net_put_page(skb_shinfo(skb)->frags[i].page);
+ eat -= skb_shinfo(skb)->frags[i].size;
+ } else {
+ skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i];
+@@ -1362,7 +1362,7 @@ EXPORT_SYMBOL(skb_copy_bits);
+ */
+ static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i)
+ {
+- put_page(spd->pages[i]);
++ net_put_page(spd->pages[i]);
+ }
+
+ static inline struct page *linear_to_page(struct page *page, unsigned int *len,
+@@ -1386,7 +1386,7 @@ new_page:
+ off = sk->sk_sndmsg_off;
+ mlen = PAGE_SIZE - off;
+ if (mlen < 64 && mlen < *len) {
+- put_page(p);
++ net_put_page(p);
+ goto new_page;
+ }
+
+@@ -1396,7 +1396,7 @@ new_page:
+ memcpy(page_address(p) + off, page_address(page) + *offset, *len);
+ sk->sk_sndmsg_off += *len;
+ *offset = off;
+- get_page(p);
++ net_get_page(p);
+
+ return p;
+ }
+@@ -1417,7 +1417,7 @@ static inline int spd_fill_page(struct s
+ if (!page)
+ return 1;
+ } else
+- get_page(page);
++ net_get_page(page);
+
+ spd->pages[spd->nr_pages] = page;
+ spd->partial[spd->nr_pages].len = *len;
+@@ -2057,7 +2057,7 @@ static inline void skb_split_no_header(s
+ * where splitting is expensive.
+ * 2. Split is accurately. We make this.
+ */
+- get_page(skb_shinfo(skb)->frags[i].page);
++ net_get_page(skb_shinfo(skb)->frags[i].page);
+ skb_shinfo(skb1)->frags[0].page_offset += len - pos;
+ skb_shinfo(skb1)->frags[0].size -= len - pos;
+ skb_shinfo(skb)->frags[i].size = len - pos;
+@@ -2179,7 +2179,7 @@ int skb_shift(struct sk_buff *tgt, struc
+ to++;
+
+ } else {
+- get_page(fragfrom->page);
++ net_get_page(fragfrom->page);
+ fragto->page = fragfrom->page;
+ fragto->page_offset = fragfrom->page_offset;
+ fragto->size = todo;
+@@ -2201,7 +2201,7 @@ int skb_shift(struct sk_buff *tgt, struc
+ fragto = &skb_shinfo(tgt)->frags[merge];
+
+ fragto->size += fragfrom->size;
+- put_page(fragfrom->page);
++ net_put_page(fragfrom->page);
+ }
+
+ /* Reposition in the original skb */
+@@ -2600,7 +2600,7 @@ struct sk_buff *skb_segment(struct sk_bu
+
+ while (pos < offset + len && i < nfrags) {
+ *frag = skb_shinfo(skb)->frags[i];
+- get_page(frag->page);
++ net_get_page(frag->page);
+ size = frag->size;
+
+ if (pos < offset) {
+diff -upkr linux-2.6.30.1/net/ipv4/ip_output.c linux-2.6.30.1/net/ipv4/ip_output.c
+--- linux-2.6.30.1/net/ipv4/ip_output.c 2009-06-10 07:05:27.000000000 +0400
++++ linux-2.6.30.1/net/ipv4/ip_output.c 2009-07-01 15:55:08.000000000 +0400
+@@ -1018,7 +1018,7 @@ alloc_new_skb:
+ err = -EMSGSIZE;
+ goto error;
+ }
+- get_page(page);
++ net_get_page(page);
+ skb_fill_page_desc(skb, i, page, sk->sk_sndmsg_off, 0);
+ frag = &skb_shinfo(skb)->frags[i];
+ }
+@@ -1176,7 +1176,7 @@ ssize_t ip_append_page(struct sock *sk,
+ if (skb_can_coalesce(skb, i, page, offset)) {
+ skb_shinfo(skb)->frags[i-1].size += len;
+ } else if (i < MAX_SKB_FRAGS) {
+- get_page(page);
++ net_get_page(page);
+ skb_fill_page_desc(skb, i, page, offset, len);
+ } else {
+ err = -EMSGSIZE;
+diff -upkr linux-2.6.30.1/net/ipv4/Makefile linux-2.6.30.1/net/ipv4/Makefile
+--- linux-2.6.30.1/net/ipv4/Makefile 2009-06-10 07:05:27.000000000 +0400
++++ linux-2.6.30.1/net/ipv4/Makefile 2009-07-01 15:55:08.000000000 +0400
+@@ -49,6 +49,7 @@ obj-$(CONFIG_TCP_CONG_LP) += tcp_lp.o
+ obj-$(CONFIG_TCP_CONG_YEAH) += tcp_yeah.o
+ obj-$(CONFIG_TCP_CONG_ILLINOIS) += tcp_illinois.o
+ obj-$(CONFIG_NETLABEL) += cipso_ipv4.o
++obj-$(CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION) += tcp_zero_copy.o
+
+ obj-$(CONFIG_XFRM) += xfrm4_policy.o xfrm4_state.o xfrm4_input.o \
+ xfrm4_output.o
+diff -upkr linux-2.6.30.1/net/ipv4/tcp.c linux-2.6.30.1/net/ipv4/tcp.c
+--- linux-2.6.30.1/net/ipv4/tcp.c 2009-06-10 07:05:27.000000000 +0400
++++ linux-2.6.30.1/net/ipv4/tcp.c 2009-07-01 15:55:08.000000000 +0400
+@@ -760,7 +760,7 @@ new_segment:
+ if (can_coalesce) {
+ skb_shinfo(skb)->frags[i - 1].size += copy;
+ } else {
+- get_page(page);
++ net_get_page(page);
+ skb_fill_page_desc(skb, i, page, offset, copy);
+ }
+
+@@ -963,7 +963,7 @@ new_segment:
+ goto new_segment;
+ } else if (page) {
+ if (off == PAGE_SIZE) {
+- put_page(page);
++ net_put_page(page);
+ TCP_PAGE(sk) = page = NULL;
+ off = 0;
+ }
+@@ -1004,9 +1004,9 @@ new_segment:
+ } else {
+ skb_fill_page_desc(skb, i, page, off, copy);
+ if (TCP_PAGE(sk)) {
+- get_page(page);
++ net_get_page(page);
+ } else if (off + copy < PAGE_SIZE) {
+- get_page(page);
++ net_get_page(page);
+ TCP_PAGE(sk) = page;
+ }
+ }
+diff -upkr linux-2.6.30.1/net/ipv4/tcp_output.c linux-2.6.30.1/net/ipv4/tcp_output.c
+--- linux-2.6.30.1/net/ipv4/tcp_output.c 2009-06-10 07:05:27.000000000 +0400
++++ linux-2.6.30.1/net/ipv4/tcp_output.c 2009-07-01 15:55:08.000000000 +0400
+@@ -889,7 +889,7 @@ static void __pskb_trim_head(struct sk_b
+ k = 0;
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ if (skb_shinfo(skb)->frags[i].size <= eat) {
+- put_page(skb_shinfo(skb)->frags[i].page);
++ net_put_page(skb_shinfo(skb)->frags[i].page);
+ eat -= skb_shinfo(skb)->frags[i].size;
+ } else {
+ skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i];
+diff -upkr linux-2.6.30.1/net/ipv4/tcp_zero_copy.c linux-2.6.30.1/net/ipv4/tcp_zero_copy.c
+--- linux-2.6.30.1/net/ipv4/tcp_zero_copy.c 2009-06-16 21:19:51.000000000 +0400
++++ linux-2.6.30.1/net/ipv4/tcp_zero_copy.c 2009-07-01 15:55:08.000000000 +0400
+@@ -0,0 +1,49 @@
++/*
++ * Support routines for TCP zero copy transmit
++ *
++ * Created by Vladislav Bolkhovitin
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * version 2 as published by the Free Software Foundation.
++ */
++
++#include <linux/skbuff.h>
++
++net_get_page_callback_t net_get_page_callback __read_mostly;
++EXPORT_SYMBOL(net_get_page_callback);
++
++net_put_page_callback_t net_put_page_callback __read_mostly;
++EXPORT_SYMBOL(net_put_page_callback);
++
++/*
++ * Caller of this function must ensure that at the moment when it's called
++ * there are no pages in the system with net_priv field set to non-zero
++ * value. Hence, this function, as well as net_get_page() and net_put_page(),
++ * don't need any protection.
++ */
++int net_set_get_put_page_callbacks(
++ net_get_page_callback_t get_callback,
++ net_put_page_callback_t put_callback)
++{
++ int res = 0;
++
++ if ((net_get_page_callback != NULL) && (get_callback != NULL) &&
++ (net_get_page_callback != get_callback)) {
++ res = -EBUSY;
++ goto out;
++ }
++
++ if ((net_put_page_callback != NULL) && (put_callback != NULL) &&
++ (net_put_page_callback != put_callback)) {
++ res = -EBUSY;
++ goto out;
++ }
++
++ net_get_page_callback = get_callback;
++ net_put_page_callback = put_callback;
++
++out:
++ return res;
++}
++EXPORT_SYMBOL(net_set_get_put_page_callbacks);
+diff -upkr linux-2.6.30.1/net/ipv6/ip6_output.c linux-2.6.30.1/net/ipv6/ip6_output.c
+--- linux-2.6.30.1/net/ipv6/ip6_output.c 2009-06-10 07:05:27.000000000 +0400
++++ linux-2.6.30.1/net/ipv6/ip6_output.c 2009-07-01 15:55:08.000000000 +0400
+@@ -1394,7 +1394,7 @@ alloc_new_skb:
+ err = -EMSGSIZE;
+ goto error;
+ }
+- get_page(page);
++ net_get_page(page);
+ skb_fill_page_desc(skb, i, page, sk->sk_sndmsg_off, 0);
+ frag = &skb_shinfo(skb)->frags[i];
+ }
+diff -upkr linux-2.6.30.1/net/Kconfig linux-2.6.30.1/net/Kconfig
+--- linux-2.6.30.1/net/Kconfig 2009-06-10 07:05:27.000000000 +0400
++++ linux-2.6.30.1/net/Kconfig 2009-07-01 15:55:08.000000000 +0400
+@@ -52,6 +52,18 @@ config INET
+
+ Short answer: say Y.
+
++config TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION
++ bool "TCP/IP zero-copy transfer completion notification"
++ depends on INET
++ default SCST_ISCSI
++ ---help---
++ Adds support for sending a notification upon completion of a
++ zero-copy TCP/IP transfer. This can speed up certain TCP/IP
++ software. Currently this is only used by the iSCSI target driver
++ iSCSI-SCST.
++
++ If unsure, say N.
++
+ if INET
+ source "net/ipv4/Kconfig"
+ source "net/ipv6/Kconfig"
kernel. This patch does that. You may not patch the kernel if you don't
need pass-through support. Alternatively, you can define
CONFIG_SCST_STRICT_SERIALIZING compile option during the compilation
-(see description below).
+(see description below). Unfortunately, the CONFIG_SCST_STRICT_SERIALIZING
+trick doesn't work on kernels starting from 2.6.30, because those
+kernels don't have the required functionality (scsi_execute_async())
+anymore. So, on them to have pass-through working you have to apply
+scst_exec_req_fifo-2.6.X.patch.
2. io_context-2.6.X.patch. This patch exports some IO context management
functions from the kernel. For performance reasons SCST queues commands
http://sourceforge.net/mailarchive/forum.php?thread_name=a0272b440906030714g67eabc5k8f847fb1e538cc62%40mail.gmail.com&forum_name=scst-devel
thread for more details.
-4. readahead-context-2.6.X.patch. This is backported version of the
-context readahead patch http://lkml.org/lkml/2009/4/12/9, big thanks to
-Wu Fengguang! This is a performance improvement patch.
+4. readahead-context-2.6.X.patch. This is backported from 2.6.31 version
+of the context readahead patch http://lkml.org/lkml/2009/4/12/9, big
+thanks to Wu Fengguang. This is a performance improvement patch. It is
+included in the mainstream kernel 2.6.31.
Then, to compile SCST type 'make scst'. It will build SCST itself and its
device handlers. To install them type 'make scst_install'. The driver
IMPORTANT: By default for performance reasons VDISK FILEIO devices use write
========= back caching policy. This is generally safe from the consistence of
+
journaled file systems, laying over them, point of view, but
your unsaved cached data will be lost in case of
power/hardware/software failure, so you must supply your
target server with some kind of UPS or disable write back
- caching using WRITE_THROUGH flag. You also should note, that
- the file systems journaling over write back caching enabled
- devices works reliably *ONLY* if the order of journal writes
- is guaranteed or it uses some kind of data protection
- barriers (i.e. after writing journal data some kind of
- synchronization with media operations is used), otherwise,
- because of possible reordering in the cache, even after
- successful journal rollback, you very much risk to loose your
- data on the FS. Currently, Linux IO subsystem guarantees
- order of write operations only using data protection
- barriers. Some info about it from the XFS point of view could
- be found at http://oss.sgi.com/projects/xfs/faq.html#wcache.
- On Linux initiators for EXT3 and ReiserFS file systems the
- barrier protection could be turned on using "barrier=1" and
+ caching using WRITE_THROUGH flag.
+ Note, that the file systems journaling over write back
+ caching enabled devices work reliably *ONLY* if the order of
+ journal writes is guaranteed or they use some kind of data
+ protection barriers (i.e. after writing journal data some
+ kind of synchronization with media operations is used),
+ otherwise, because of possible reordering in the cache, even
+ after successful journal rollback, you very much risk to
+ loose your data on the FS. Currently, Linux IO subsystem
+ guarantees order of write operations only using data
+ protection barriers. Some info about it from the XFS point of
+ view could be found at
+ http://oss.sgi.com/projects/xfs/faq.html#wcache. On Linux
+ initiators for EXT3 and ReiserFS file systems the barrier
+ protection could be turned on using "barrier=1" and
"barrier=flush" mount options correspondingly. Note, that
- usually it's turned off by default (see http://lwn.net/Articles/283161).
- You can check if it's turn on or off by looking in /proc/mounts.
- Windows and, AFAIK, other UNIX'es don't need any special
- explicit options and do necessary barrier actions on
- write-back caching devices by default. Also note
- that on some real-life workloads write through caching might
- perform better, than write back one with the barrier
- protection turned on.
- Also you should realize that Linux doesn't provide a
- guarantee that after sync()/fsync() all written data really
- hit permanent storage, they can be then in the cache of your
- backstorage device and lost on power failure event. Thus,
- ever with write-through cache mode, you still need a good UPS
- to protect yourself from your data loss (note, data loss, not
- the file system integrity corruption).
+ usually it's turned off by default (see
+ http://lwn.net/Articles/283161). You can check if it's turn
+ on or off by looking in /proc/mounts. Windows and, AFAIK,
+ other UNIX'es don't need any special explicit options and do
+ necessary barrier actions on write-back caching devices by
+ default. Also note that on some real-life workloads write
+ through caching might perform better, than write back one
+ with the barrier protection turned on.
+ Also you should understand that without barriers enabled
+ (i.e. by default) Linux doesn't provide a guarantee that
+ after sync()/fsync() all written data really hit permanent
+ storage. They can be stored in the cache of your backstorage
+ device only and lost on power failure event. Thus, ever with
+ write-through cache mode, you still either need to enable
+ barriers on your backend file system on the target (for
+ devices in it is, indeed, impossible), or need a good UPS to
+ protect yourself from your data loss (note, data loss, not
+ the file system corruption).
IMPORTANT: Some disk and partition table management utilities don't support
========= block sizes >512 bytes, therefore make sure that your favorite one
IMPORTANT: By default for performance reasons VDISK FILEIO devices use write
========= back caching policy. This is generally safe from the consistence of
+
journaled file systems, laying over them, point of view, but
your unsaved cached data will be lost in case of
power/hardware/software failure, so you must supply your
target server with some kind of UPS or disable write back
- caching using WRITE_THROUGH flag. You also should note, that
- the file systems journaling over write back caching enabled
- devices works reliably *ONLY* if the order of journal writes
- is guaranteed or it uses some kind of data protection
- barriers (i.e. after writing journal data some kind of
- synchronization with media operations is used), otherwise,
- because of possible reordering in the cache, even after
- successful journal rollback, you very much risk to loose your
- data on the FS. Currently, Linux IO subsystem guarantees
- order of write operations only using data protection
- barriers. Some info about it from the XFS point of view could
- be found at http://oss.sgi.com/projects/xfs/faq.html#wcache.
- On Linux initiators for EXT3 and ReiserFS file systems the
- barrier protection could be turned on using "barrier=1" and
+ caching using WRITE_THROUGH flag.
+ Note, that the file systems journaling over write back
+ caching enabled devices work reliably *ONLY* if the order of
+ journal writes is guaranteed or they use some kind of data
+ protection barriers (i.e. after writing journal data some
+ kind of synchronization with media operations is used),
+ otherwise, because of possible reordering in the cache, even
+ after successful journal rollback, you very much risk to
+ loose your data on the FS. Currently, Linux IO subsystem
+ guarantees order of write operations only using data
+ protection barriers. Some info about it from the XFS point of
+ view could be found at
+ http://oss.sgi.com/projects/xfs/faq.html#wcache. On Linux
+ initiators for EXT3 and ReiserFS file systems the barrier
+ protection could be turned on using "barrier=1" and
"barrier=flush" mount options correspondingly. Note, that
- usually it's turned off by default (see http://lwn.net/Articles/283161).
- You can check if it's turn on or off by looking in /proc/mounts.
- Windows and, AFAIK, other UNIX'es don't need any special
- explicit options and do necessary barrier actions on
- write-back caching devices by default. Also note
- that on some real-life workloads write through caching might
- perform better, than write back one with the barrier
- protection turned on.
- Also you should realize that Linux doesn't provide a
- guarantee that after sync()/fsync() all written data really
- hit permanent storage, they can be then in the cache of your
- backstorage device and lost on power failure event. Thus,
- ever with write-through cache mode, you still need a good UPS
- to protect yourself from your data loss (note, data loss, not
- the file system integrity corruption).
+ usually it's turned off by default (see
+ http://lwn.net/Articles/283161). You can check if it's turn
+ on or off by looking in /proc/mounts. Windows and, AFAIK,
+ other UNIX'es don't need any special explicit options and do
+ necessary barrier actions on write-back caching devices by
+ default. Also note that on some real-life workloads write
+ through caching might perform better, than write back one
+ with the barrier protection turned on.
+ Also you should understand that without barriers enabled
+ (i.e. by default) Linux doesn't provide a guarantee that
+ after sync()/fsync() all written data really hit permanent
+ storage. They can be stored in the cache of your backstorage
+ device only and lost on power failure event. Thus, ever with
+ write-through cache mode, you still either need to enable
+ barriers on your backend file system on the target (for
+ devices in it is, indeed, impossible), or need a good UPS to
+ protect yourself from your data loss (note, data loss, not
+ the file system corruption).
IMPORTANT: Some disk and partition table management utilities don't support
========= block sizes >512 bytes, therefore make sure that your favorite one
sg->length = len;
}
+static inline struct scatterlist *sg_next(struct scatterlist *sg)
+{
+ sg++;
+ return sg;
+}
+
#endif
#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24) */
--- /dev/null
+diff -upkr -X linux-2.6.30/Documentation/dontdiff linux-2.6.30/drivers/Kconfig linux-2.6.30/drivers/Kconfig
+--- linux-2.6.30/drivers/Kconfig 2008-07-14 01:51:29.000000000 +0400
++++ linux-2.6.30/drivers/Kconfig 2008-07-24 14:14:46.000000000 +0400
+@@ -24,6 +24,8 @@ source "drivers/ide/Kconfig"
+
+ source "drivers/scsi/Kconfig"
+
++source "drivers/scst/Kconfig"
++
+ source "drivers/ata/Kconfig"
+
+ source "drivers/md/Kconfig"
--- /dev/null
+diff -upkr -X linux-2.6.30/Documentation/dontdiff linux-2.6.30/drivers/Makefile linux-2.6.30/drivers/Makefile
+--- linux-2.6.30/drivers/Makefile 2008-07-14 01:51:29.000000000 +0400
++++ linux-2.6.30/drivers/Makefile 2008-07-24 14:15:29.000000000 +0400
+@@ -41,6 +41,7 @@ obj-$(CONFIG_ATM) += atm/
+ obj-y += macintosh/
+ obj-$(CONFIG_IDE) += ide/
+ obj-$(CONFIG_SCSI) += scsi/
++obj-$(CONFIG_SCST) += scst/
+ obj-$(CONFIG_ATA) += ata/
+ obj-y += net/
+ obj-$(CONFIG_ATM) += atm/
--- /dev/null
+diff -upkr linux-2.6.30.1/block/blk-ioc.c linux-2.6.30.1/block/blk-ioc.c
+--- linux-2.6.30.1/block/blk-ioc.c 2009-06-10 07:05:27.000000000 +0400
++++ linux-2.6.30.1/block/blk-ioc.c 2009-07-01 15:55:08.000000000 +0400
+@@ -65,6 +65,21 @@ static void cfq_exit(struct io_context *
+ rcu_read_unlock();
+ }
+
++void __exit_io_context(struct io_context *ioc)
++{
++ if (ioc == NULL)
++ return;
++
++ if (atomic_dec_and_test(&ioc->nr_tasks)) {
++ if (ioc->aic && ioc->aic->exit)
++ ioc->aic->exit(ioc->aic);
++ cfq_exit(ioc);
++
++ put_io_context(ioc);
++ }
++}
++EXPORT_SYMBOL(__exit_io_context);
++
+ /* Called by the exitting task */
+ void exit_io_context(void)
+ {
+@@ -75,13 +90,7 @@ void exit_io_context(void)
+ current->io_context = NULL;
+ task_unlock(current);
+
+- if (atomic_dec_and_test(&ioc->nr_tasks)) {
+- if (ioc->aic && ioc->aic->exit)
+- ioc->aic->exit(ioc->aic);
+- cfq_exit(ioc);
+-
+- put_io_context(ioc);
+- }
++ __exit_io_context(ioc);
+ }
+
+ struct io_context *alloc_io_context(gfp_t gfp_flags, int node)
+@@ -105,6 +114,7 @@ struct io_context *alloc_io_context(gfp_
+
+ return ret;
+ }
++EXPORT_SYMBOL(alloc_io_context);
+
+ /*
+ * If the current task has no IO context then create one and initialise it.
+diff -upkr linux-2.6.30.1/include/linux/iocontext.h linux-2.6.30.1/include/linux/iocontext.h
+--- linux-2.6.30.1/include/linux/iocontext.h 2009-06-10 07:05:27.000000000 +0400
++++ linux-2.6.30.1/include/linux/iocontext.h 2009-07-01 15:20:24.000000000 +0400
+@@ -103,7 +103,9 @@ static inline struct io_context *ioc_tas
+ int put_io_context(struct io_context *ioc);
+ void exit_io_context(void);
+ struct io_context *get_io_context(gfp_t gfp_flags, int node);
++#define SCST_IO_CONTEXT
+ struct io_context *alloc_io_context(gfp_t gfp_flags, int node);
++void __exit_io_context(struct io_context *ioc);
+ void copy_io_context(struct io_context **pdst, struct io_context **psrc);
+ #else
+ static inline void exit_io_context(void)
--- /dev/null
+diff -upkr linux-2.6.30.1/mm/readahead.c linux-2.6.30.1/mm/readahead.c
+--- linux-2.6.30.1/mm/readahead.c 2009-07-01 13:52:31.000000000 +0400
++++ linux-2.6.30.1/mm/readahead.c 2009-07-01 15:55:08.000000000 +0400
+@@ -565,5 +565,8 @@ page_cache_async_readahead(struct addres
+
+ /* do read-ahead */
+ ondemand_readahead(mapping, ra, filp, true, offset, req_size);
++
++ if (PageUptodate(page))
++ blk_run_backing_dev(mapping->backing_dev_info, NULL);
+ }
+ EXPORT_SYMBOL_GPL(page_cache_async_readahead);
--- /dev/null
+diff -upkr linux-2.6.30.1/block/blk-map.c linux-2.6.30.1/block/blk-map.c
+--- linux-2.6.30.1/block/blk-map.c 2009-06-10 07:05:27.000000000 +0400
++++ linux-2.6.30.1/block/blk-map.c 2009-07-08 21:18:53.000000000 +0400
+@@ -5,6 +5,7 @@
+ #include <linux/module.h>
+ #include <linux/bio.h>
+ #include <linux/blkdev.h>
++#include <linux/scatterlist.h>
+ #include <scsi/sg.h> /* for struct sg_iovec */
+
+ #include "blk.h"
+@@ -154,7 +155,7 @@ int blk_rq_map_user(struct request_queue
+ }
+
+ if (!bio_flagged(bio, BIO_USER_MAPPED))
+- rq->cmd_flags |= REQ_COPY_USER;
++ rq->cmd_flags |= REQ_HAS_TAIL_SPACE_FOR_PADDING;
+
+ rq->buffer = rq->data = NULL;
+ return 0;
+@@ -230,7 +231,7 @@ int blk_rq_map_user_iov(struct request_q
+ }
+
+ if (!bio_flagged(bio, BIO_USER_MAPPED))
+- rq->cmd_flags |= REQ_COPY_USER;
++ rq->cmd_flags |= REQ_HAS_TAIL_SPACE_FOR_PADDING;
+
+ blk_queue_bounce(q, &bio);
+ bio_get(bio);
+@@ -273,6 +274,532 @@ int blk_rq_unmap_user(struct bio *bio)
+ EXPORT_SYMBOL(blk_rq_unmap_user);
+
+ /**
++ * blk_copy_sg - copy one SG vector to another
++ * @dst_sg: destination SG
++ * @src_sg: source SG
++ * @copy_len: maximum amount of data to copy. If 0, then copy all.
++ * @d_km_type: kmap_atomic type for the destination SG
++ * @s_km_type: kmap_atomic type for the source SG
++ *
++ * Description:
++ * Data from the destination SG vector will be copied to the source SG
++ * vector. End of the vectors will be determined by sg_next() returning
++ * NULL. Returns number of bytes copied.
++ */
++int blk_copy_sg(struct scatterlist *dst_sg,
++ struct scatterlist *src_sg, size_t copy_len,
++ enum km_type d_km_type, enum km_type s_km_type)
++{
++ int res = 0;
++ size_t src_len, dst_len, src_offs, dst_offs;
++ struct page *src_page, *dst_page;
++
++ if (copy_len == 0)
++ copy_len = 0x7FFFFFFF; /* copy all */
++
++ dst_page = sg_page(dst_sg);
++ dst_len = dst_sg->length;
++ dst_offs = dst_sg->offset;
++
++ src_offs = 0;
++ do {
++ src_page = sg_page(src_sg);
++ src_len = src_sg->length;
++ src_offs = src_sg->offset;
++
++ do {
++ void *saddr, *daddr;
++ size_t n;
++
++ saddr = kmap_atomic(src_page, s_km_type) + src_offs;
++ daddr = kmap_atomic(dst_page, d_km_type) + dst_offs;
++
++ if ((src_offs == 0) && (dst_offs == 0) &&
++ (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE) &&
++ (copy_len >= PAGE_SIZE)) {
++ copy_page(daddr, saddr);
++ n = PAGE_SIZE;
++ } else {
++ n = min_t(size_t, PAGE_SIZE - dst_offs,
++ PAGE_SIZE - src_offs);
++ n = min(n, src_len);
++ n = min(n, dst_len);
++ n = min_t(size_t, n, copy_len);
++ memcpy(daddr, saddr, n);
++ dst_offs += n;
++ src_offs += n;
++ }
++
++ kunmap_atomic(saddr, s_km_type);
++ kunmap_atomic(daddr, d_km_type);
++
++ res += n;
++ copy_len -= n;
++ if (copy_len == 0)
++ goto out;
++
++ if ((src_offs & ~PAGE_MASK) == 0) {
++ src_page = nth_page(src_page, 1);
++ src_offs = 0;
++ }
++ if ((dst_offs & ~PAGE_MASK) == 0) {
++ dst_page = nth_page(dst_page, 1);
++ dst_offs = 0;
++ }
++
++ src_len -= n;
++ dst_len -= n;
++ if (dst_len == 0) {
++ dst_sg = sg_next(dst_sg);
++ if (dst_sg == NULL)
++ goto out;
++ dst_page = sg_page(dst_sg);
++ dst_len = dst_sg->length;
++ dst_offs = dst_sg->offset;
++ }
++ } while (src_len > 0);
++
++ src_sg = sg_next(src_sg);
++ } while (src_sg != NULL);
++
++out:
++ return res;
++}
++EXPORT_SYMBOL(blk_copy_sg);
++
++void blk_rq_unmap_kern_sg(struct request *req, int do_copy)
++{
++ struct scatterlist *hdr = (struct scatterlist *)req->end_io_data;
++
++ if (hdr == NULL)
++ goto out;
++
++ if (hdr->length == 0) {
++ /* Tail element only was copied */
++ struct scatterlist *new_sg = &hdr[1];
++ struct scatterlist *orig_sg = (struct scatterlist *)hdr->page_link;
++
++ if ((rq_data_dir(req) == READ) && do_copy) {
++ void *saddr, *daddr;
++
++ saddr = kmap_atomic(sg_page(orig_sg), KM_BIO_SRC_IRQ);
++ daddr = kmap_atomic(sg_page(new_sg), KM_BIO_DST_IRQ) +
++ new_sg->offset;
++ memcpy(daddr, saddr, orig_sg->length);
++ kunmap_atomic(saddr, KM_BIO_SRC_IRQ);
++ kunmap_atomic(daddr, KM_BIO_DST_IRQ);
++ }
++
++ __free_pages(sg_page(orig_sg), get_order(orig_sg->length));
++ *orig_sg = *new_sg;
++ kfree(hdr);
++ } else {
++ /* The whole SG was copied */
++ struct scatterlist *new_sgl = &hdr[1];
++ struct scatterlist *orig_sgl = (struct scatterlist *)hdr->page_link;
++ struct scatterlist *sg, *start_sg;
++ int n;
++
++ if ((rq_data_dir(req) == READ) && do_copy) {
++ blk_copy_sg(orig_sgl, new_sgl, 0, KM_BIO_DST_IRQ,
++ KM_BIO_SRC_IRQ);
++ }
++
++ start_sg = hdr;
++ sg = new_sgl;
++ n = 1;
++ while (sg != NULL) {
++ __free_page(sg_page(sg));
++ sg = sg_next(sg);
++ n++;
++ /* One entry for chaining */
++ if ((sg == NULL) || (n == (SG_MAX_SINGLE_ALLOC - 1))) {
++ kfree(start_sg);
++ start_sg = sg;
++ n = 0;
++ }
++ }
++ }
++
++out:
++ return;
++}
++
++static int blk_rq_handle_align_tail_only(struct request *rq,
++ struct scatterlist *sg_to_copy,
++ gfp_t gfp, gfp_t page_gfp)
++{
++ int res = 0;
++ struct scatterlist *tail_sg = sg_to_copy;
++ struct scatterlist *new_sg;
++ struct scatterlist *hdr;
++ int new_sg_nents;
++ struct page *pg;
++
++ new_sg_nents = 2;
++
++ new_sg = kmalloc(sizeof(*new_sg) * new_sg_nents, gfp);
++ if (new_sg == NULL)
++ goto out_nomem;
++
++ sg_init_table(new_sg, new_sg_nents);
++
++ hdr = new_sg;
++ new_sg++;
++ new_sg_nents--;
++
++ hdr->page_link = (unsigned long)tail_sg;
++ *new_sg = *tail_sg;
++
++ pg = alloc_pages(page_gfp, get_order(tail_sg->length));
++ if (pg == NULL)
++ goto err_free_new_sg;
++
++ if (rq_data_dir(rq) == WRITE) {
++ void *saddr, *daddr;
++ saddr = kmap_atomic(sg_page(tail_sg), KM_USER0) +
++ tail_sg->offset;
++ daddr = kmap_atomic(pg, KM_USER1);
++ memcpy(daddr, saddr, tail_sg->length);
++ kunmap_atomic(saddr, KM_USER0);
++ kunmap_atomic(daddr, KM_USER1);
++ }
++
++ sg_assign_page(tail_sg, pg);
++ tail_sg->offset = 0;
++
++ rq->end_io_data = hdr;
++ rq->cmd_flags |= REQ_HAS_TAIL_SPACE_FOR_PADDING;
++
++out:
++ return res;
++
++err_free_new_sg:
++ kfree(new_sg);
++
++out_nomem:
++ res = -ENOMEM;
++ goto out;
++}
++
++static int blk_rq_handle_align(struct request *rq, struct scatterlist **psgl,
++ int *pnents, struct scatterlist *sgl_to_copy,
++ int nents_to_copy, gfp_t gfp, gfp_t page_gfp)
++{
++ int res = 0, i;
++ struct scatterlist *sgl = *psgl;
++ int nents = *pnents;
++ struct scatterlist *sg, *prev_sg;
++ struct scatterlist *new_sgl;
++ struct scatterlist *hdr;
++ size_t len = 0, to_copy;
++ int new_sgl_nents, new_sgl_nents_to_alloc, n;
++
++ if (sgl != sgl_to_copy) {
++ /* Copy only the last element */
++ res = blk_rq_handle_align_tail_only(rq, sgl_to_copy,
++ gfp, page_gfp);
++ if (res == 0)
++ goto out;
++ }
++
++ for_each_sg(sgl, sg, nents, i)
++ len += sg->length;
++ to_copy = len;
++
++ /*
++ * Let's keep each SG allocation inside a single page to decrease
++ * probability of failure.
++ */
++
++ new_sgl_nents = PFN_UP(len) + 1;
++ new_sgl_nents_to_alloc = new_sgl_nents +
++ ((new_sgl_nents - 1) / SG_MAX_SINGLE_ALLOC);
++ n = min_t(size_t, SG_MAX_SINGLE_ALLOC, new_sgl_nents_to_alloc);
++
++ new_sgl = kmalloc(sizeof(*new_sgl) * n, gfp);
++ if (new_sgl == NULL)
++ goto out_nomem;
++
++ sg_init_table(new_sgl, n);
++
++ new_sgl_nents_to_alloc -= n;
++ sg = new_sgl;
++ while (new_sgl_nents_to_alloc > 0) {
++ prev_sg = sg;
++ n = min_t(size_t, SG_MAX_SINGLE_ALLOC, new_sgl_nents_to_alloc);
++
++ sg = kmalloc(sizeof(*sg) * n, gfp);
++ if (sg == NULL)
++ goto out_nomem;
++
++ sg_init_table(sg, n);
++ sg_chain(prev_sg, SG_MAX_SINGLE_ALLOC, sg);
++
++ new_sgl_nents_to_alloc -= n;
++ };
++
++ hdr = new_sgl;
++ new_sgl++;
++ new_sgl_nents--;
++
++ hdr->page_link = (unsigned long)sgl;
++ hdr->length = nents;
++
++ for_each_sg(new_sgl, sg, new_sgl_nents, i) {
++ struct page *pg;
++
++ pg = alloc_page(page_gfp);
++ if (pg == NULL)
++ goto err_free_new_sgl;
++
++ sg_assign_page(sg, pg);
++ sg->length = min_t(size_t, PAGE_SIZE, len);
++
++ len -= PAGE_SIZE;
++ }
++
++ if (rq_data_dir(rq) == WRITE) {
++ /*
++ * We need to limit amount of copied data to to_copy, because
++ * sgl might have the last element not marked as last in
++ * SG chaining.
++ */
++ blk_copy_sg(new_sgl, sgl, to_copy, KM_USER0, KM_USER1);
++ }
++
++ rq->end_io_data = hdr;
++ rq->cmd_flags |= REQ_HAS_TAIL_SPACE_FOR_PADDING;
++
++ *psgl = new_sgl;
++ *pnents = new_sgl_nents;
++
++out:
++ return res;
++
++err_free_new_sgl:
++ for_each_sg(new_sgl, sg, new_sgl_nents, i) {
++ struct page *pg = sg_page(sg);
++ if (pg == NULL)
++ break;
++ __free_page(pg);
++ }
++
++out_nomem:
++ res = -ENOMEM;
++ goto out;
++}
++
++static void bio_map_kern_endio(struct bio *bio, int err)
++{
++ bio_put(bio);
++}
++
++static int __blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
++ int nents, gfp_t gfp, struct scatterlist **sgl_to_copy,
++ int *nents_to_copy)
++{
++ int res;
++ struct request_queue *q = rq->q;
++ int rw = rq_data_dir(rq);
++ int max_nr_vecs, i;
++ size_t tot_len;
++ bool need_new_bio;
++ struct scatterlist *sg, *prev_sg = NULL;
++ struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
++
++ *sgl_to_copy = NULL;
++
++ if (unlikely((sgl == 0) || (nents <= 0))) {
++ WARN_ON(1);
++ res = -EINVAL;
++ goto out;
++ }
++
++ /*
++ * Let's keep each bio allocation inside a single page to decrease
++ * probability of failure.
++ */
++ max_nr_vecs = min_t(size_t,
++ ((PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec)),
++ BIO_MAX_PAGES);
++
++ need_new_bio = true;
++ tot_len = 0;
++ for_each_sg(sgl, sg, nents, i) {
++ struct page *page = sg_page(sg);
++ void *page_addr = page_address(page);
++ size_t len = sg->length, l;
++ size_t offset = sg->offset;
++
++ tot_len += len;
++ prev_sg = sg;
++
++ /*
++ * Each segment must be aligned on DMA boundary and
++ * not on stack. The last one may have unaligned
++ * length as long as the total length is aligned to
++ * DMA padding alignment.
++ */
++ if (i == nents - 1)
++ l = 0;
++ else
++ l = len;
++ if (((sg->offset | l) & queue_dma_alignment(q)) ||
++ (page_addr && object_is_on_stack(page_addr + sg->offset))) {
++ res = -EINVAL;
++ goto out_need_copy;
++ }
++
++ while (len > 0) {
++ size_t bytes;
++ int rc;
++
++ if (need_new_bio) {
++ bio = bio_kmalloc(gfp, max_nr_vecs);
++ if (bio == NULL) {
++ res = -ENOMEM;
++ goto out_free_bios;
++ }
++
++ if (rw == WRITE)
++ bio->bi_rw |= 1 << BIO_RW;
++
++ bio->bi_end_io = bio_map_kern_endio;
++
++ if (hbio == NULL)
++ hbio = tbio = bio;
++ else
++ tbio = tbio->bi_next = bio;
++ }
++
++ bytes = min_t(size_t, len, PAGE_SIZE - offset);
++
++ rc = bio_add_pc_page(q, bio, page, bytes, offset);
++ if (rc < bytes) {
++ if (unlikely(need_new_bio || (rc < 0))) {
++ if (rc < 0)
++ res = rc;
++ else
++ res = -EIO;
++ goto out_need_copy;
++ } else {
++ need_new_bio = true;
++ len -= rc;
++ offset += rc;
++ continue;
++ }
++ }
++
++ need_new_bio = false;
++ offset = 0;
++ len -= bytes;
++ page = nth_page(page, 1);
++ }
++ }
++
++ if (hbio == NULL) {
++ res = -EINVAL;
++ goto out_free_bios;
++ }
++
++ /* Total length must be aligned on DMA padding alignment */
++ if ((tot_len & q->dma_pad_mask) &&
++ !(rq->cmd_flags & REQ_HAS_TAIL_SPACE_FOR_PADDING)) {
++ res = -EINVAL;
++ if (sgl->offset == 0) {
++ *sgl_to_copy = prev_sg;
++ *nents_to_copy = 1;
++ goto out_free_bios;
++ } else
++ goto out_need_copy;
++ }
++
++ while (hbio != NULL) {
++ bio = hbio;
++ hbio = hbio->bi_next;
++ bio->bi_next = NULL;
++
++ blk_queue_bounce(q, &bio);
++
++ res = blk_rq_append_bio(q, rq, bio);
++ if (unlikely(res != 0)) {
++ bio->bi_next = hbio;
++ hbio = bio;
++ goto out_free_bios;
++ }
++ }
++
++ rq->buffer = rq->data = NULL;
++
++out:
++ return res;
++
++out_need_copy:
++ *sgl_to_copy = sgl;
++ *nents_to_copy = nents;
++
++out_free_bios:
++ while (hbio != NULL) {
++ bio = hbio;
++ hbio = hbio->bi_next;
++ bio_put(bio);
++ }
++ goto out;
++}
++
++/**
++ * blk_rq_map_kern_sg - map kernel data to a request, for REQ_TYPE_BLOCK_PC
++ * @rq: request to fill
++ * @sgl: area to map
++ * @nents: number of elements in @sgl
++ * @gfp: memory allocation flags
++ *
++ * Description:
++ * Data will be mapped directly if possible. Otherwise a bounce
++ * buffer will be used.
++ */
++int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
++ int nents, gfp_t gfp)
++{
++ int res;
++ struct scatterlist *sg_to_copy = NULL;
++ int nents_to_copy = 0;
++
++ if (unlikely((sgl == 0) || (sgl->length == 0) ||
++ (nents <= 0) || (rq->end_io_data != NULL))) {
++ WARN_ON(1);
++ res = -EINVAL;
++ goto out;
++ }
++
++ res = __blk_rq_map_kern_sg(rq, sgl, nents, gfp, &sg_to_copy,
++ &nents_to_copy);
++ if (unlikely(res != 0)) {
++ if (sg_to_copy == NULL)
++ goto out;
++
++ res = blk_rq_handle_align(rq, &sgl, &nents, sg_to_copy,
++ nents_to_copy, gfp, rq->q->bounce_gfp | gfp);
++ if (unlikely(res != 0))
++ goto out;
++
++ res = __blk_rq_map_kern_sg(rq, sgl, nents, gfp, &sg_to_copy,
++ &nents_to_copy);
++ if (res != 0) {
++ blk_rq_unmap_kern_sg(rq, 0);
++ goto out;
++ }
++ }
++
++ rq->buffer = rq->data = NULL;
++
++out:
++ return res;
++}
++EXPORT_SYMBOL(blk_rq_map_kern_sg);
++
++/**
+ * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
+ * @q: request queue where request should be inserted
+ * @rq: request to fill
+@@ -309,7 +836,7 @@ int blk_rq_map_kern(struct request_queue
+ bio->bi_rw |= (1 << BIO_RW);
+
+ if (do_copy)
+- rq->cmd_flags |= REQ_COPY_USER;
++ rq->cmd_flags |= REQ_HAS_TAIL_SPACE_FOR_PADDING;
+
+ blk_rq_bio_prep(q, rq, bio);
+ blk_queue_bounce(q, &rq->bio);
+diff -upkr linux-2.6.30.1/block/blk-merge.c linux-2.6.30.1/block/blk-merge.c
+--- linux-2.6.30.1/block/blk-merge.c 2009-06-10 07:05:27.000000000 +0400
++++ linux-2.6.30.1/block/blk-merge.c 2009-07-08 21:18:53.000000000 +0400
+@@ -198,7 +198,7 @@ new_segment:
+ } /* segments in rq */
+
+
+- if (unlikely(rq->cmd_flags & REQ_COPY_USER) &&
++ if ((rq->cmd_flags & REQ_HAS_TAIL_SPACE_FOR_PADDING) &&
+ (rq->data_len & q->dma_pad_mask)) {
+ unsigned int pad_len = (q->dma_pad_mask & ~rq->data_len) + 1;
+
+diff -upkr linux-2.6.30.1/drivers/scsi/scsi_lib.c linux-2.6.30.1/drivers/scsi/scsi_lib.c
+--- linux-2.6.30.1/drivers/scsi/scsi_lib.c 2009-06-10 07:05:27.000000000 +0400
++++ linux-2.6.30.1/drivers/scsi/scsi_lib.c 2009-07-08 21:18:53.000000000 +0400
+@@ -277,6 +277,100 @@ int scsi_execute_req(struct scsi_device
+ }
+ EXPORT_SYMBOL(scsi_execute_req);
+
++struct scsi_io_context {
++ void *blk_data;
++ void *data;
++ void (*done)(void *data, char *sense, int result, int resid);
++ char sense[SCSI_SENSE_BUFFERSIZE];
++};
++
++static struct kmem_cache *scsi_io_context_cache;
++
++static void scsi_end_async(struct request *req, int error)
++{
++ struct scsi_io_context *sioc = req->end_io_data;
++
++ req->end_io_data = sioc->blk_data;
++ blk_rq_unmap_kern_sg(req, (error == 0));
++
++ if (sioc->done)
++ sioc->done(sioc->data, sioc->sense, req->errors, req->data_len);
++
++ kmem_cache_free(scsi_io_context_cache, sioc);
++ __blk_put_request(req->q, req);
++}
++
++/**
++ * scsi_execute_async - insert request
++ * @sdev: scsi device
++ * @cmd: scsi command
++ * @cmd_len: length of scsi cdb
++ * @data_direction: DMA_TO_DEVICE, DMA_FROM_DEVICE, or DMA_NONE
++ * @sgl: data buffer scatterlist
++ * @nents: number of elements in the sgl
++ * @timeout: request timeout in seconds
++ * @retries: number of times to retry request
++ * @privdata: data passed to done()
++ * @done: callback function when done
++ * @gfp: memory allocation flags
++ * @flags: one or more SCSI_ASYNC_EXEC_FLAG_* flags
++ */
++int scsi_execute_async(struct scsi_device *sdev, const unsigned char *cmd,
++ int cmd_len, int data_direction, struct scatterlist *sgl,
++ int nents, int timeout, int retries, void *privdata,
++ void (*done)(void *, char *, int, int), gfp_t gfp,
++ int flags)
++{
++ struct request *req;
++ struct scsi_io_context *sioc;
++ int err = 0;
++ int write = (data_direction == DMA_TO_DEVICE);
++
++ sioc = kmem_cache_zalloc(scsi_io_context_cache, gfp);
++ if (sioc == NULL)
++ return DRIVER_ERROR << 24;
++
++ req = blk_get_request(sdev->request_queue, write, gfp);
++ if (req == NULL)
++ goto free_sense;
++ req->cmd_type = REQ_TYPE_BLOCK_PC;
++ req->cmd_flags |= REQ_QUIET;
++
++ if (flags & SCSI_ASYNC_EXEC_FLAG_HAS_TAIL_SPACE_FOR_PADDING)
++ req->cmd_flags |= REQ_HAS_TAIL_SPACE_FOR_PADDING;
++
++ if (sgl != NULL) {
++ err = blk_rq_map_kern_sg(req, sgl, nents, gfp);
++ if (err)
++ goto free_req;
++ }
++
++ sioc->blk_data = req->end_io_data;
++ sioc->data = privdata;
++ sioc->done = done;
++
++ req->cmd_len = cmd_len;
++ memset(req->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */
++ memcpy(req->cmd, cmd, req->cmd_len);
++ req->sense = sioc->sense;
++ req->sense_len = 0;
++ req->timeout = timeout;
++ req->retries = retries;
++ req->end_io_data = sioc;
++
++ blk_execute_rq_nowait(req->q, NULL, req,
++ flags & SCSI_ASYNC_EXEC_FLAG_AT_HEAD, scsi_end_async);
++ return 0;
++
++free_req:
++ blk_put_request(req);
++
++free_sense:
++ kmem_cache_free(scsi_io_context_cache, sioc);
++ return DRIVER_ERROR << 24;
++}
++EXPORT_SYMBOL_GPL(scsi_execute_async);
++
+ /*
+ * Function: scsi_init_cmd_errh()
+ *
+@@ -1743,12 +1837,20 @@ int __init scsi_init_queue(void)
+ {
+ int i;
+
++ scsi_io_context_cache = kmem_cache_create("scsi_io_context",
++ sizeof(struct scsi_io_context),
++ 0, 0, NULL);
++ if (!scsi_io_context_cache) {
++ printk(KERN_ERR "SCSI: can't init scsi io context cache\n");
++ return -ENOMEM;
++ }
++
+ scsi_sdb_cache = kmem_cache_create("scsi_data_buffer",
+ sizeof(struct scsi_data_buffer),
+ 0, 0, NULL);
+ if (!scsi_sdb_cache) {
+ printk(KERN_ERR "SCSI: can't init scsi sdb cache\n");
+- return -ENOMEM;
++ goto cleanup_io_context;
+ }
+
+ for (i = 0; i < SG_MEMPOOL_NR; i++) {
+@@ -1784,6 +1886,9 @@ cleanup_sdb:
+ }
+ kmem_cache_destroy(scsi_sdb_cache);
+
++cleanup_io_context:
++ kmem_cache_destroy(scsi_io_context_cache);
++
+ return -ENOMEM;
+ }
+
+@@ -1791,6 +1896,7 @@ void scsi_exit_queue(void)
+ {
+ int i;
+
++ kmem_cache_destroy(scsi_io_context_cache);
+ kmem_cache_destroy(scsi_sdb_cache);
+
+ for (i = 0; i < SG_MEMPOOL_NR; i++) {
+diff -upkr linux-2.6.30.1/include/linux/blkdev.h linux-2.6.30.1/include/linux/blkdev.h
+--- linux-2.6.30.1/include/linux/blkdev.h 2009-06-10 07:05:27.000000000 +0400
++++ linux-2.6.30.1/include/linux/blkdev.h 2009-07-08 21:18:53.000000000 +0400
+@@ -115,7 +115,7 @@ enum rq_flag_bits {
+ __REQ_RW_SYNC, /* request is sync (sync write or read) */
+ __REQ_ALLOCED, /* request came from our alloc pool */
+ __REQ_RW_META, /* metadata io request */
+- __REQ_COPY_USER, /* contains copies of user pages */
++ __REQ_HAS_TAIL_SPACE_FOR_PADDING, /* has space for padding in the tail */
+ __REQ_INTEGRITY, /* integrity metadata has been remapped */
+ __REQ_NOIDLE, /* Don't anticipate more IO after this one */
+ __REQ_IO_STAT, /* account I/O stat */
+@@ -143,7 +143,7 @@ enum rq_flag_bits {
+ #define REQ_RW_SYNC (1 << __REQ_RW_SYNC)
+ #define REQ_ALLOCED (1 << __REQ_ALLOCED)
+ #define REQ_RW_META (1 << __REQ_RW_META)
+-#define REQ_COPY_USER (1 << __REQ_COPY_USER)
++#define REQ_HAS_TAIL_SPACE_FOR_PADDING (1 << __REQ_HAS_TAIL_SPACE_FOR_PADDING)
+ #define REQ_INTEGRITY (1 << __REQ_INTEGRITY)
+ #define REQ_NOIDLE (1 << __REQ_NOIDLE)
+ #define REQ_IO_STAT (1 << __REQ_IO_STAT)
+@@ -807,6 +807,9 @@ extern int blk_rq_map_kern(struct reques
+ extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
+ struct rq_map_data *, struct sg_iovec *, int,
+ unsigned int, gfp_t);
++extern int blk_rq_map_kern_sg(struct request *rq,
++ struct scatterlist *sgl, int nents, gfp_t gfp);
++extern void blk_rq_unmap_kern_sg(struct request *req, int do_copy);
+ extern int blk_execute_rq(struct request_queue *, struct gendisk *,
+ struct request *, int);
+ extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
+@@ -909,6 +912,9 @@ extern void blk_dump_rq_flags(struct req
+ extern void generic_unplug_device(struct request_queue *);
+ extern long nr_blockdev_pages(void);
+
++extern int blk_copy_sg(struct scatterlist *, struct scatterlist *, size_t,
++ enum km_type, enum km_type);
++
+ int blk_get_queue(struct request_queue *);
+ struct request_queue *blk_alloc_queue(gfp_t);
+ struct request_queue *blk_alloc_queue_node(gfp_t, int);
+diff -upkr linux-2.6.30.1/include/scsi/scsi_device.h linux-2.6.30.1/include/scsi/scsi_device.h
+--- linux-2.6.30.1/include/scsi/scsi_device.h 2009-06-10 07:05:27.000000000 +0400
++++ linux-2.6.30.1/include/scsi/scsi_device.h 2009-07-06 20:24:54.000000000 +0400
+@@ -372,6 +372,17 @@ extern int scsi_execute_req(struct scsi_
+ struct scsi_sense_hdr *, int timeout, int retries,
+ int *resid);
+
++#define SCSI_ASYNC_EXEC_FLAG_AT_HEAD 1
++#define SCSI_ASYNC_EXEC_FLAG_HAS_TAIL_SPACE_FOR_PADDING 2
++
++#define SCSI_EXEC_REQ_FIFO_DEFINED
++extern int scsi_execute_async(struct scsi_device *sdev, const unsigned char *cmd,
++ int cmd_len, int data_direction,
++ struct scatterlist *sgl, int nents, int timeout,
++ int retries, void *privdata,
++ void (*done)(void *, char *, int, int),
++ gfp_t gfp, int flags);
++
+ static inline int __must_check scsi_device_reprobe(struct scsi_device *sdev)
+ {
+ return device_reprobe(&sdev->sdev_gendev);
struct page *page = virt_to_page(addr);
if (need_new_bio) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30)
+ bio = bio_kmalloc(GFP_KERNEL, max_nr_vecs);
+#else
bio = bio_alloc(GFP_KERNEL, max_nr_vecs);
+#endif
if (!bio) {
PRINT_ERROR("Failed to create bio "
"for data segment= %d cmd %p",
if (!strncmp("close ", p, 6)) {
p += 6;
action = 0;
- } else if (!strncmp("change ", p, 5)) {
+ } else if (!strncmp("change ", p, 7)) {
p += 7;
action = 1;
} else if (!strncmp("open ", p, 5)) {
#include <linux/cdrom.h>
#include <linux/unistd.h>
#include <linux/string.h>
+#include <asm/kmap_types.h>
#include "scst.h"
#include "scst_priv.h"
}
#endif
+static bool is_report_sg_limitation(void)
+{
+#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
+ return (trace_flag & TRACE_OUT_OF_MEM);
+#else
+ return false;
+#endif
+}
+
int scst_alloc_space(struct scst_cmd *cmd)
{
gfp_t gfp_mask;
goto out;
if (unlikely(cmd->sg_cnt > tgt_dev->max_sg_cnt)) {
- if (ll < 10) {
+ if ((ll < 10) || is_report_sg_limitation()) {
PRINT_INFO("Unable to complete command due to "
"SG IO count limitation (requested %d, "
"available %d, tgt lim %d)", cmd->sg_cnt,
goto out_sg_free;
if (unlikely(cmd->in_sg_cnt > tgt_dev->max_sg_cnt)) {
- if (ll < 10) {
+ if ((ll < 10) || is_report_sg_limitation()) {
PRINT_INFO("Unable to complete command due to "
"SG IO count limitation (IN buffer, requested "
"%d, available %d, tgt lim %d)", cmd->in_sg_cnt,
return;
}
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 30) || !defined(SCSI_EXEC_REQ_FIFO_DEFINED)
+/**
+ * blk_copy_sg - copy one SG vector to another
+ * @dst_sg: destination SG
+ * @src_sg: source SG
+ * @copy_len: maximum amount of data to copy. If 0, then copy all.
+ * @d_km_type: kmap_atomic type for the destination SG
+ * @s_km_type: kmap_atomic type for the source SG
+ *
+ * Description:
+ * Data from the destination SG vector will be copied to the source SG
+ * vector. End of the vectors will be determined by sg_next() returning
+ * NULL. Returns number of bytes copied.
+ */
+int blk_copy_sg(struct scatterlist *dst_sg,
+ struct scatterlist *src_sg, size_t copy_len,
+ enum km_type d_km_type, enum km_type s_km_type)
+{
+ int res = 0;
+ size_t src_len, dst_len, src_offs, dst_offs;
+ struct page *src_page, *dst_page;
+
+ if (copy_len == 0)
+ copy_len = 0x7FFFFFFF; /* copy all */
+
+ dst_page = sg_page(dst_sg);
+ dst_len = dst_sg->length;
+ dst_offs = dst_sg->offset;
+
+ src_offs = 0;
+ do {
+ src_page = sg_page(src_sg);
+ src_len = src_sg->length;
+ src_offs = src_sg->offset;
+
+ do {
+ void *saddr, *daddr;
+ size_t n;
+
+ saddr = kmap_atomic(src_page, s_km_type) + src_offs;
+ daddr = kmap_atomic(dst_page, d_km_type) + dst_offs;
+
+ if ((src_offs == 0) && (dst_offs == 0) &&
+ (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE) &&
+ (copy_len >= PAGE_SIZE)) {
+ copy_page(daddr, saddr);
+ n = PAGE_SIZE;
+ } else {
+ n = min_t(size_t, PAGE_SIZE - dst_offs,
+ PAGE_SIZE - src_offs);
+ n = min(n, src_len);
+ n = min(n, dst_len);
+ n = min_t(size_t, n, copy_len);
+ memcpy(daddr, saddr, n);
+ dst_offs += n;
+ src_offs += n;
+ }
+
+ kunmap_atomic(saddr, s_km_type);
+ kunmap_atomic(daddr, d_km_type);
+
+ res += n;
+ copy_len -= n;
+ if (copy_len == 0)
+ goto out;
+
+ if ((src_offs & ~PAGE_MASK) == 0) {
+ src_page = nth_page(src_page, 1);
+ src_offs = 0;
+ }
+ if ((dst_offs & ~PAGE_MASK) == 0) {
+ dst_page = nth_page(dst_page, 1);
+ dst_offs = 0;
+ }
+
+ src_len -= n;
+ dst_len -= n;
+ if (dst_len == 0) {
+ dst_sg = sg_next(dst_sg);
+ if (dst_sg == NULL)
+ goto out;
+ dst_page = sg_page(dst_sg);
+ dst_len = dst_sg->length;
+ dst_offs = dst_sg->offset;
+ }
+ } while (src_len > 0);
+
+ src_sg = sg_next(src_sg);
+ } while (src_sg != NULL);
+
+out:
+ return res;
+}
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 30) || !defined(SCSI_EXEC_REQ_FIFO_DEFINED) */
+
void scst_copy_sg(struct scst_cmd *cmd, enum scst_sg_copy_dir copy_dir)
{
struct scatterlist *src_sg, *dst_sg;
- unsigned int src_sg_cnt, src_len, dst_len, src_offs, dst_offs;
- struct page *src, *dst;
- unsigned int s, d, to_copy;
+ unsigned int to_copy;
+ int atomic = scst_cmd_atomic(cmd);
TRACE_ENTRY();
if (copy_dir == SCST_SG_COPY_FROM_TARGET) {
if (cmd->data_direction != SCST_DATA_BIDI) {
src_sg = cmd->tgt_sg;
- src_sg_cnt = cmd->tgt_sg_cnt;
dst_sg = cmd->sg;
to_copy = cmd->bufflen;
} else {
TRACE_MEM("BIDI cmd %p", cmd);
src_sg = cmd->tgt_in_sg;
- src_sg_cnt = cmd->tgt_in_sg_cnt;
dst_sg = cmd->in_sg;
to_copy = cmd->in_bufflen;
}
} else {
src_sg = cmd->sg;
- src_sg_cnt = cmd->sg_cnt;
dst_sg = cmd->tgt_sg;
to_copy = cmd->resp_data_len;
}
- TRACE_MEM("cmd %p, copy_dir %d, src_sg %p, src_sg_cnt %d, dst_sg %p, "
- "to_copy %d", cmd, copy_dir, src_sg, src_sg_cnt, dst_sg,
- to_copy);
+ TRACE_MEM("cmd %p, copy_dir %d, src_sg %p, dst_sg %p, "
+ "to_copy %d", cmd, copy_dir, src_sg, dst_sg, to_copy);
if (unlikely(src_sg == NULL) || unlikely(dst_sg == NULL)) {
/*
goto out;
}
- dst = sg_page(dst_sg);
- dst_len = dst_sg->length;
- dst_offs = dst_sg->offset;
-
- s = 0;
- d = 0;
- src_offs = 0;
- while (s < src_sg_cnt) {
- src = sg_page(&src_sg[s]);
- src_len = src_sg[s].length;
- src_offs += src_sg[s].offset;
-
- do {
- unsigned int n;
-
- /*
- * Himem pages are not allowed here, see the
- * corresponding #warning in scst_main.c. Correct
- * your target driver or dev handler to not alloc
- * such pages!
- */
- EXTRACHECKS_BUG_ON(PageHighMem(dst) ||
- PageHighMem(src));
-
- TRACE_MEM("cmd %p, to_copy %d, src %p, src_len %d, "
- "src_offs %d, dst %p, dst_len %d, dst_offs %d",
- cmd, to_copy, src, src_len, src_offs, dst,
- dst_len, dst_offs);
-
- if ((src_offs == 0) && (dst_offs == 0) &&
- (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE)) {
- copy_page(page_address(dst), page_address(src));
- n = PAGE_SIZE;
- } else {
- n = min(PAGE_SIZE - dst_offs,
- PAGE_SIZE - src_offs);
- n = min(n, src_len);
- n = min(n, dst_len);
- memcpy(page_address(dst) + dst_offs,
- page_address(src) + src_offs, n);
- dst_offs -= min(n, dst_offs);
- src_offs -= min(n, src_offs);
- }
-
- TRACE_MEM("cmd %p, n %d, s %d", cmd, n, s);
-
- to_copy -= n;
- if (to_copy <= 0)
- goto out;
-
- src_len -= n;
- dst_len -= n;
- if (dst_len == 0) {
- d++;
- dst = sg_page(&dst_sg[d]);
- dst_len = dst_sg[d].length;
- dst_offs += dst_sg[d].offset;
- }
- } while (src_len > 0);
-
- s++;
- }
+ blk_copy_sg(dst_sg, src_sg, to_copy, atomic ? KM_SOFTIRQ0 : KM_USER0,
+ atomic ? KM_SOFTIRQ1 : KM_USER1);
out:
TRACE_EXIT();
details."
#endif
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 30)
#if !defined(SCSI_EXEC_REQ_FIFO_DEFINED) && \
!defined(CONFIG_SCST_STRICT_SERIALIZING)
#warning "Patch scst_exec_req_fifo-<kernel-version> was not applied on\
your kernel and CONFIG_SCST_STRICT_SERIALIZING isn't defined.\
Pass-through dev handlers will not work."
#endif
+#else
+#if !defined(SCSI_EXEC_REQ_FIFO_DEFINED)
+#warning "Patch scst_exec_req_fifo-<kernel-version> was not applied on\
+ your kernel. Pass-through dev handlers will not work."
+#endif
+#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
#if !defined(SCST_IO_CONTEXT)
if (res != 0)
goto out_error;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 30)
#if !defined(SCSI_EXEC_REQ_FIFO_DEFINED) && \
!defined(CONFIG_SCST_STRICT_SERIALIZING)
if (dev_type->exec == NULL) {
"scst_exec_req_fifo-<kernel-version> or define "
"CONFIG_SCST_STRICT_SERIALIZING", dev_type->name);
res = -EINVAL;
- goto out;
+ goto out_error;
}
+#endif
+#else
+#if !defined(SCSI_EXEC_REQ_FIFO_DEFINED)
+ if (dev_type->exec == NULL) {
+ PRINT_ERROR("Pass-through dev handlers (handler \"%s\") not "
+ "supported. Consider applying on your kernel patch "
+ "scst_exec_req_fifo-<kernel-version>", dev_type->name);
+ res = -EINVAL;
+ goto out_error;
+ }
+#endif
#endif
res = scst_suspend_activity(true);
scsi_do_req_fifo(sreq, cmnd, buffer, bufflen, done, timeout, retries);
#endif
}
-#else
+#elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 30)
static inline int scst_exec_req(struct scsi_device *sdev,
const unsigned char *cmd, int cmd_len, int data_direction,
- void *buffer, unsigned bufflen, int use_sg, int timeout, int retries,
- void *privdata, void (*done)(void *, char *, int, int), gfp_t gfp)
+ struct scatterlist *sgl, unsigned bufflen, unsigned nents,
+ int timeout, int retries, void *privdata,
+ void (*done)(void *, char *, int, int), gfp_t gfp)
{
-#ifdef CONFIG_SCST_STRICT_SERIALIZING
- return scsi_execute_async(sdev, cmd, cmd_len, data_direction, buffer,
- bufflen, use_sg, timeout, retries, privdata, done, gfp);
+#if defined(CONFIG_SCST_STRICT_SERIALIZING)
+ return scsi_execute_async(sdev, cmd, cmd_len, data_direction, (void*)sgl,
+ bufflen, nents, timeout, retries, privdata, done, gfp);
#elif !defined(SCSI_EXEC_REQ_FIFO_DEFINED)
- sBUG();
+ WARN_ON(1);
return -1;
#else
return scsi_execute_async_fifo(sdev, cmd, cmd_len, data_direction,
- buffer, bufflen, use_sg, timeout, retries, privdata, done, gfp);
+ (void*)sgl, bufflen, nents, timeout, retries, privdata, done, gfp);
#endif
}
+#else
+#if !defined(SCSI_EXEC_REQ_FIFO_DEFINED)
+#define SCSI_ASYNC_EXEC_FLAG_HAS_TAIL_SPACE_FOR_PADDING 2
+static inline int scsi_execute_async(struct scsi_device *sdev,
+ const unsigned char *cmd, int cmd_len, int data_direction,
+ struct scatterlist *sgl, int nents, int timeout, int retries,
+ void *privdata, void (*done)(void *, char *, int, int),
+ gfp_t gfp, int flags)
+{
+ WARN_ON(1);
+ return -1;
+}
+#endif
#endif
int scst_alloc_space(struct scst_cmd *cmd);
cmd->scsi_req->sr_bufflen, scst_cmd_done, cmd->timeout,
cmd->retries);
#else
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 30)
rc = scst_exec_req(dev->scsi_dev, cmd->cdb, cmd->cdb_len,
cmd->data_direction, cmd->sg, cmd->bufflen, cmd->sg_cnt,
cmd->timeout, cmd->retries, cmd, scst_cmd_done,
atomic ? GFP_ATOMIC : GFP_KERNEL);
+#else
+ rc = scsi_execute_async(dev->scsi_dev, cmd->cdb, cmd->cdb_len,
+ cmd->data_direction, cmd->sg, cmd->sg_cnt,
+ cmd->timeout, cmd->retries, cmd, scst_cmd_done,
+ atomic ? GFP_ATOMIC : GFP_KERNEL,
+ cmd->tgt_data_buf_alloced ? 0 :
+ SCSI_ASYNC_EXEC_FLAG_HAS_TAIL_SPACE_FOR_PADDING);
+#endif
if (unlikely(rc != 0)) {
if (atomic) {
res = SCST_EXEC_NEED_THREAD;
goto out_restore;
} else {
- PRINT_ERROR("scst_exec_req() failed: %d", res);
+ PRINT_ERROR("scst_exec_req() failed: %x", rc);
goto out_error;
}
}
struct scst_tgt *target;
struct scst_session *session[SCST_LOCAL_MAX_TARGETS];
struct device dev;
+ char init_name[20];
};
#define to_scst_lcl_host(d) \
scst_lcl_host->dev.bus = &scst_fake_lld_bus;
scst_lcl_host->dev.parent = &scst_fake_primary;
scst_lcl_host->dev.release = &scst_local_release_adapter;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 30)
sprintf(scst_lcl_host->dev.bus_id, "scst_adp_%d", scst_local_add_host);
+#else
+ snprintf(scst_lcl_host->init_name, sizeof(scst_lcl_host->init_name),
+ "scst_adp_%d", scst_local_add_host);
+ scst_lcl_host->dev.init_name = scst_lcl_host->init_name;
+#endif
error = device_register(&scst_lcl_host->dev);
if (error)
}
static struct device scst_fake_primary = {
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 30)
.bus_id = "scst_fake_0",
+#else
+ .init_name = "scst_fake_0",
+#endif
.release = scst_fake_0_release,
};