Fix for problem with 2.6.31 kernel, where blk_rq_append_bio() was un-exported. So...
authorvlnb <vlnb@d57e44dd-8a1f-0410-8b47-8ef2f437770f>
Tue, 13 Oct 2009 16:01:33 +0000 (16:01 +0000)
committervlnb <vlnb@d57e44dd-8a1f-0410-8b47-8ef2f437770f>
Tue, 13 Oct 2009 16:01:33 +0000 (16:01 +0000)
git-svn-id: https://scst.svn.sourceforge.net/svnroot/scst/trunk@1207 d57e44dd-8a1f-0410-8b47-8ef2f437770f

scst/README
scst/src/scst_lib.c
scst/src/scst_main.c
scst/src/scst_priv.h
scst/src/scst_targ.c

index be3bd5c..8af0ae0 100644 (file)
@@ -63,19 +63,16 @@ them are optional, so, if you don't need the corresponding
 functionality, you may not apply them.
 
 1. scst_exec_req_fifo-2.6.X.patch. This patch is necessary for
-pass-through dev handlers with kernels <2.6.26, because in the them
-scsi_do_req()/scsi_execute_async() work in LIFO order, instead of
-expected and required FIFO. So SCST needs new functions
-scsi_do_req_fifo() or scsi_execute_async_fifo() to be added in the
-kernel. This patch does that. You may not patch the kernel if you don't
-need pass-through support. Alternatively, you can define
-CONFIG_SCST_STRICT_SERIALIZING compile option during the compilation
-(see description below). This patch is optional for kernels starting
-from 2.6.26. On those kernels pass-through will well work without it.
-The patch for them created only for the mainline kernel inclusion.
-(Actually, implementation on scst_scsi_exec_async(), which you can find
-in scst_lib.c for kernels >=2.6.26, can work on the earlier kernels as
-well, so you're welcome to backport it.)
+pass-through dev handlers (scst_disk, scst_tape, etc.). Kernels <2.6.30
+need it, because in them scsi_do_req()/scsi_execute_async() work in LIFO
+order, instead of expected and required FIFO. So, SCST needs new
+functions scsi_do_req_fifo() or scsi_execute_async_fifo() to be added in
+the kernel. Kernels >=2.6.30 need it, because in them there is no
+functionality to execute commands with data in scatter-gather buffers.
+This patch adds the necessary functionality to the kernel. You may not
+patch your kernel if you don't need pass-through support. Alternatively,
+on kernels <2.6.30 you can define CONFIG_SCST_STRICT_SERIALIZING compile
+option during the compilation (see description below).
 
 2. io_context-2.6.X.patch. This patch exports some IO context management
 functions from the kernel. For performance reasons SCST queues commands
index 547f0ff..305eda6 100644 (file)
@@ -35,7 +35,7 @@
 
 #include "scst_cdbprobe.h"
 
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 26)
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30)
 struct scsi_io_context {
        unsigned int full_cdb_used:1;
        void *data;
@@ -2958,401 +2958,7 @@ out:
 
 #endif /* !((LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30)) && defined(SCSI_EXEC_REQ_FIFO_DEFINED)) */
 
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 26) && !((LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30)) && defined(SCSI_EXEC_REQ_FIFO_DEFINED))
-
-#include <linux/pfn.h>
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27)
-static inline int object_is_on_stack(void *obj)
-{
-       void *stack = task_stack_page(current);
-
-       return (obj >= stack) && (obj < (stack + THREAD_SIZE));
-}
-#endif
-
-struct blk_kern_sg_work {
-       atomic_t bios_inflight;
-       struct sg_table sg_table;
-       struct scatterlist *src_sgl;
-};
-
-static void blk_rq_unmap_kern_sg(struct request *rq, int err);
-
-static void blk_free_kern_sg_work(struct blk_kern_sg_work *bw)
-{
-       TRACE_DBG("Freeing bw %p", bw);
-       sg_free_table(&bw->sg_table);
-       kfree(bw);
-       return;
-}
-
-static void blk_bio_map_kern_endio(struct bio *bio, int err)
-{
-       struct blk_kern_sg_work *bw = bio->bi_private;
-
-       TRACE_DBG("bio %p finished", bio);
-
-       if (bw != NULL) {
-               /* Decrement the bios in processing and, if zero, free */
-               BUG_ON(atomic_read(&bw->bios_inflight) <= 0);
-               if (atomic_dec_and_test(&bw->bios_inflight)) {
-                       TRACE_DBG("sgl %p, new_sgl %p, new_sgl_nents %d",
-                               bw->src_sgl, bw->sg_table.sgl,
-                               bw->sg_table.nents);
-                       if ((bio_data_dir(bio) == READ) && (err == 0)) {
-                               unsigned long flags;
-
-                               TRACE_DBG("Copying sgl %p (nents %d) to "
-                                       "orig_sgl %p", bw->sg_table.sgl,
-                                       bw->sg_table.nents, bw->src_sgl);
-
-                               local_irq_save(flags);  /* to protect KMs */
-                               sg_copy(bw->src_sgl, bw->sg_table.sgl, 0, 0,
-                                       KM_BIO_DST_IRQ, KM_BIO_SRC_IRQ);
-                               local_irq_restore(flags);
-                       }
-                       blk_free_kern_sg_work(bw);
-               }
-       }
-
-       bio_put(bio);
-       return;
-}
-
-static int blk_rq_copy_kern_sg(struct request *rq, struct scatterlist *sgl,
-                              int nents, struct blk_kern_sg_work **pbw,
-                              gfp_t gfp, gfp_t page_gfp)
-{
-       int res = 0, i;
-       struct scatterlist *sg;
-       struct scatterlist *new_sgl;
-       int new_sgl_nents;
-       size_t len = 0, to_copy;
-       struct blk_kern_sg_work *bw;
-
-       bw = kzalloc(sizeof(*bw), gfp);
-       if (bw == NULL) {
-               PRINT_ERROR("%s", "Unable to alloc blk_kern_sg_work");
-               goto out;
-       }
-
-       bw->src_sgl = sgl;
-
-       for_each_sg(sgl, sg, nents, i)
-               len += sg->length;
-       to_copy = len;
-
-       new_sgl_nents = PFN_UP(len);
-
-       res = sg_alloc_table(&bw->sg_table, new_sgl_nents, gfp);
-       if (res != 0) {
-               PRINT_ERROR("Unable to alloc copy sg table (nents %d)",
-                       new_sgl_nents);
-               goto out_free_bw;
-       }
-
-       new_sgl = bw->sg_table.sgl;
-
-       TRACE_DBG("sgl %p, nents %d, to_copy %lld, new_sgl %p, new_sgl_nents %d",
-               sgl, nents, (long long)to_copy, new_sgl, new_sgl_nents);
-
-       for_each_sg(new_sgl, sg, new_sgl_nents, i) {
-               struct page *pg;
-
-               pg = alloc_page(page_gfp);
-               if (pg == NULL) {
-                       PRINT_ERROR("Unable to alloc copy page (left %lld)",
-                               (long long)len);
-                       goto err_free_new_sgl;
-               }
-
-               sg_assign_page(sg, pg);
-               sg->length = min_t(size_t, PAGE_SIZE, len);
-
-               len -= PAGE_SIZE;
-       }
-
-       if (rq_data_dir(rq) == WRITE) {
-               /*
-                * We need to limit amount of copied data to to_copy, because
-                * sgl might have the last element in sgl not marked as last in
-                * SG chaining.
-                */
-               TRACE_DBG("Copying sgl %p (nents %d) to new_sgl %p "
-                       "(new_sgl_nents %d), to_copy %lld", sgl, nents,
-                       new_sgl, new_sgl_nents, (long long)to_copy);
-               sg_copy(new_sgl, sgl, 0, to_copy,
-                       KM_USER0, KM_USER1);
-       }
-
-       *pbw = bw;
-       /*
-        * REQ_COPY_USER name is misleading. It should be something like
-        * REQ_HAS_TAIL_SPACE_FOR_PADDING.
-        */
-       rq->cmd_flags |= REQ_COPY_USER;
-
-out:
-       return res;
-
-err_free_new_sgl:
-       for_each_sg(new_sgl, sg, new_sgl_nents, i) {
-               struct page *pg = sg_page(sg);
-               if (pg == NULL)
-                       break;
-               __free_page(pg);
-       }
-       sg_free_table(&bw->sg_table);
-
-out_free_bw:
-       kfree(bw);
-       res = -ENOMEM;
-       goto out;
-}
-
-static int __blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
-       int nents, struct blk_kern_sg_work *bw, gfp_t gfp)
-{
-       int res = 0;
-       struct request_queue *q = rq->q;
-       int rw = rq_data_dir(rq);
-       int max_nr_vecs, i;
-       size_t tot_len;
-       bool need_new_bio;
-       struct scatterlist *sg, *prev_sg = NULL;
-       struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
-       int bios;
-
-       if (unlikely((sgl == NULL) || (sgl->length == 0) || (nents <= 0))) {
-               WARN_ON(1);
-               res = -EINVAL;
-               goto out;
-       }
-
-       /*
-        * Let's keep each bio allocation inside a single page to decrease
-        * probability of failure.
-        */
-       max_nr_vecs =  min_t(size_t,
-               ((PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec)),
-               BIO_MAX_PAGES);
-
-       TRACE_DBG("sgl %p, nents %d, bw %p, max_nr_vecs %d", sgl, nents, bw,
-               max_nr_vecs);
-
-       need_new_bio = true;
-       tot_len = 0;
-       bios = 0;
-       for_each_sg(sgl, sg, nents, i) {
-               struct page *page = sg_page(sg);
-               void *page_addr = page_address(page);
-               size_t len = sg->length, l;
-               size_t offset = sg->offset;
-
-               tot_len += len;
-               prev_sg = sg;
-
-               /*
-                * Each segment must be aligned on DMA boundary and
-                * not on stack. The last one may have unaligned
-                * length as long as the total length is aligned to
-                * DMA padding alignment.
-                */
-               if (i == nents - 1)
-                       l = 0;
-               else
-                       l = len;
-               if (((sg->offset | l) & queue_dma_alignment(q)) ||
-                   (page_addr && object_is_on_stack(page_addr + sg->offset))) {
-                       TRACE_DBG("%s", "DMA alignment or offset don't match");
-                       res = -EINVAL;
-                       goto out_free_bios;
-               }
-
-               while (len > 0) {
-                       size_t bytes;
-                       int rc;
-
-                       if (need_new_bio) {
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30)
-                               bio = bio_kmalloc(gfp, max_nr_vecs);
-#else
-                               bio = bio_alloc(gfp, max_nr_vecs);
-#endif
-                               if (bio == NULL) {
-                                       PRINT_ERROR("%s", "Can't to alloc bio");
-                                       res = -ENOMEM;
-                                       goto out_free_bios;
-                               }
-
-                               TRACE_DBG("bio %p alloced", bio);
-
-                               if (rw == WRITE)
-                                       bio->bi_rw |= 1 << BIO_RW;
-
-                               bios++;
-                               bio->bi_private = bw;
-                               bio->bi_end_io = blk_bio_map_kern_endio;
-
-                               if (hbio == NULL)
-                                       hbio = tbio = bio;
-                               else
-                                       tbio = tbio->bi_next = bio;
-                       }
-
-                       bytes = min_t(size_t, len, PAGE_SIZE - offset);
-
-                       rc = bio_add_pc_page(q, bio, page, bytes, offset);
-                       if (rc < bytes) {
-                               if (unlikely(need_new_bio || (rc < 0))) {
-                                       if (rc < 0)
-                                               res = rc;
-                                       else
-                                               res = -EIO;
-                                       PRINT_ERROR("bio_add_pc_page() failed: "
-                                               "%d", rc);
-                                       goto out_free_bios;
-                               } else {
-                                       need_new_bio = true;
-                                       len -= rc;
-                                       offset += rc;
-                                       continue;
-                               }
-                       }
-
-                       need_new_bio = false;
-                       offset = 0;
-                       len -= bytes;
-                       page = nth_page(page, 1);
-               }
-       }
-
-       if (hbio == NULL) {
-               res = -EINVAL;
-               goto out_free_bios;
-       }
-
-       /* Total length must be aligned on DMA padding alignment */
-       if ((tot_len & q->dma_pad_mask) &&
-           !(rq->cmd_flags & REQ_COPY_USER)) {
-               TRACE_DBG("Total len %lld doesn't match DMA pad mask %x",
-                       (long long)tot_len, q->dma_pad_mask);
-               res = -EINVAL;
-               goto out_free_bios;
-       }
-
-       if (bw != NULL)
-               atomic_set(&bw->bios_inflight, bios);
-
-       while (hbio != NULL) {
-               bio = hbio;
-               hbio = hbio->bi_next;
-               bio->bi_next = NULL;
-
-               blk_queue_bounce(q, &bio);
-
-               res = blk_rq_append_bio(q, rq, bio);
-               if (unlikely(res != 0)) {
-                       PRINT_ERROR("blk_rq_append_bio() failed: %d", res);
-                       bio->bi_next = hbio;
-                       hbio = bio;
-                       /* We can have one or more bios bounced */
-                       goto out_unmap_bios;
-               }
-       }
-
-       rq->buffer = NULL;
-#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 30)
-       rq->data = NULL;
-#endif
-out:
-       return res;
-
-out_free_bios:
-       while (hbio != NULL) {
-               bio = hbio;
-               hbio = hbio->bi_next;
-               bio_put(bio);
-       }
-       goto out;
-
-out_unmap_bios:
-       blk_rq_unmap_kern_sg(rq, res);
-       goto out;
-}
-
-/**
- * blk_rq_map_kern_sg - map kernel data to a request, for REQ_TYPE_BLOCK_PC
- * @rq:                request to fill
- * @sgl:       area to map
- * @nents:     number of elements in @sgl
- * @gfp:       memory allocation flags
- *
- * Description:
- *    Data will be mapped directly if possible. Otherwise a bounce
- *    buffer will be used.
- */
-static int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
-                      int nents, gfp_t gfp)
-{
-       int res;
-
-       res = __blk_rq_map_kern_sg(rq, sgl, nents, NULL, gfp);
-       if (unlikely(res != 0)) {
-               struct blk_kern_sg_work *bw = NULL;
-
-               TRACE_DBG("__blk_rq_map_kern_sg() failed: %d", res);
-
-               res = blk_rq_copy_kern_sg(rq, sgl, nents, &bw,
-                               gfp, rq->q->bounce_gfp | gfp);
-               if (unlikely(res != 0))
-                       goto out;
-
-               res = __blk_rq_map_kern_sg(rq, bw->sg_table.sgl,
-                               bw->sg_table.nents, bw, gfp);
-               if (res != 0) {
-                       TRACE_DBG("Copied __blk_rq_map_kern_sg() failed: %d",
-                               res);
-                       blk_free_kern_sg_work(bw);
-                       goto out;
-               }
-       }
-
-       rq->buffer = NULL;
-#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 30)
-       rq->data = NULL;
-#endif
-out:
-       return res;
-}
-
-/**
- * blk_rq_unmap_kern_sg - unmap a request with kernel sg
- * @rq:                request to unmap
- * @err:       non-zero error code
- *
- * Description:
- *    Unmap a rq previously mapped by blk_rq_map_kern_sg(). Must be called
- *    only in case of an error!
- */
-static void blk_rq_unmap_kern_sg(struct request *rq, int err)
-{
-       struct bio *bio = rq->bio;
-
-       while (bio) {
-               struct bio *b = bio;
-               bio = bio->bi_next;
-               b->bi_end_io(b, err);
-       }
-       rq->bio = NULL;
-
-       return;
-}
-
-#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 26) && !(LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30) && defined(SCSI_EXEC_REQ_FIFO_DEFINED)) */
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 26)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30)) && defined(SCSI_EXEC_REQ_FIFO_DEFINED)
 static void scsi_end_async(struct request *req, int error)
 {
        struct scsi_io_context *sioc = req->end_io_data;
@@ -3496,7 +3102,7 @@ out_free_sioc:
        goto out;
 }
 
-#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 26) */
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30) && defined(SCSI_EXEC_REQ_FIFO_DEFINED) */
 
 void scst_copy_sg(struct scst_cmd *cmd, enum scst_sg_copy_dir copy_dir)
 {
@@ -5361,7 +4967,7 @@ int __init scst_lib_init(void)
 
        scst_scsi_op_list_init();
 
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 26)
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30)
        scsi_io_context_cache = kmem_cache_create("scst_scsi_io_context",
                                        sizeof(struct scsi_io_context),
                                        0, 0, NULL);
@@ -5379,7 +4985,7 @@ out:
 
 void scst_lib_exit(void)
 {
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 26)
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30)
        BUILD_BUG_ON(SCST_MAX_CDB_SIZE != BLK_MAX_CDB);
        BUILD_BUG_ON(SCST_SENSE_BUFFERSIZE < SCSI_SENSE_BUFFERSIZE);
 
index af1272a..56aff4f 100644 (file)
  details."
 #endif
 
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26) && !defined(SCSI_EXEC_REQ_FIFO_DEFINED)
+#if !defined(SCSI_EXEC_REQ_FIFO_DEFINED)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 30)
 #if !defined(CONFIG_SCST_STRICT_SERIALIZING)
 #warning "Patch scst_exec_req_fifo-<kernel-version> was not applied on\
  your kernel and CONFIG_SCST_STRICT_SERIALIZING isn't defined.\
  Pass-through dev handlers will not work."
-#endif
-#endif
+#endif /* !defined(CONFIG_SCST_STRICT_SERIALIZING) */
+#else  /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 30) */
+#warning "Patch scst_exec_req_fifo-<kernel-version> was not applied on\
+ your kernel. Pass-through dev handlers will not work."
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 30) */
+#endif /* !defined(SCSI_EXEC_REQ_FIFO_DEFINED) */
 
 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
 #if !defined(SCST_IO_CONTEXT)
@@ -914,18 +919,24 @@ int __scst_register_dev_driver(struct scst_dev_type *dev_type,
        if (res != 0)
                goto out_error;
 
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26) && !defined(SCSI_EXEC_REQ_FIFO_DEFINED)
-#if !defined(CONFIG_SCST_STRICT_SERIALIZING)
+#if !defined(SCSI_EXEC_REQ_FIFO_DEFINED)
        if (dev_type->exec == NULL) {
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 30)
+#if !defined(CONFIG_SCST_STRICT_SERIALIZING)
                PRINT_ERROR("Pass-through dev handlers (handler \"%s\") not "
                        "supported. Consider applying on your kernel patch "
                        "scst_exec_req_fifo-<kernel-version> or define "
                        "CONFIG_SCST_STRICT_SERIALIZING", dev_type->name);
+#endif /* !defined(CONFIG_SCST_STRICT_SERIALIZING) */
+#else  /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 30) */
+               PRINT_ERROR("Pass-through dev handlers (handler \"%s\") not "
+                       "supported. Consider applying on your kernel patch "
+                       "scst_exec_req_fifo-<kernel-version>", dev_type->name);
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 30) */
                res = -EINVAL;
                goto out_error;
        }
-#endif
-#endif
+#endif /* !defined(SCSI_EXEC_REQ_FIFO_DEFINED) */
 
        res = scst_suspend_activity(true);
        if (res != 0)
index 305a59e..4026412 100644 (file)
@@ -349,7 +349,7 @@ static inline void scst_do_req(struct scsi_request *sreq,
        scsi_do_req_fifo(sreq, cmnd, buffer, bufflen, done, timeout, retries);
 #endif
 }
-#elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
+#elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 30)
 static inline int scst_exec_req(struct scsi_device *sdev,
        const unsigned char *cmd, int cmd_len, int data_direction,
        struct scatterlist *sgl, unsigned bufflen, unsigned nents,
@@ -360,16 +360,25 @@ static inline int scst_exec_req(struct scsi_device *sdev,
        return scsi_execute_async(sdev, cmd, cmd_len, data_direction, (void *)sgl,
                    bufflen, nents, timeout, retries, privdata, done, gfp);
 #elif !defined(SCSI_EXEC_REQ_FIFO_DEFINED)
-       WARN_ON(1);
+       WARN_ON_ONCE(1);
        return -1;
 #else
        return scsi_execute_async_fifo(sdev, cmd, cmd_len, data_direction,
            (void *)sgl, bufflen, nents, timeout, retries, privdata, done, gfp);
 #endif
 }
-#else /* i.e. LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 26) */
+#else /* i.e. LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30) */
+#if defined(SCSI_EXEC_REQ_FIFO_DEFINED)
 int scst_scsi_exec_async(struct scst_cmd *cmd,
        void (*done)(void *, char *, int, int));
+#else
+static inline int scst_scsi_exec_async(struct scst_cmd *cmd,
+       void (*done)(void *, char *, int, int))
+{
+       WARN_ON_ONCE(1);
+       return -1;
+}
+#endif
 #endif
 
 int scst_alloc_space(struct scst_cmd *cmd);
index 4d2ef85..7c2f592 100644 (file)
@@ -2032,7 +2032,7 @@ static int scst_do_real_exec(struct scst_cmd *cmd)
                    cmd->scsi_req->sr_bufflen, scst_cmd_done, cmd->timeout,
                    cmd->retries);
 #else
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 30)
        rc = scst_exec_req(dev->scsi_dev, cmd->cdb, cmd->cdb_len,
                        cmd->data_direction, cmd->sg, cmd->bufflen, cmd->sg_cnt,
                        cmd->timeout, cmd->retries, cmd, scst_cmd_done,