Merge branch 'for-linus' of git://git.kernel.dk/data/git/linux-2.6-block
authorLinus Torvalds <torvalds@woody.linux-foundation.org>
Tue, 16 Oct 2007 17:09:16 +0000 (10:09 -0700)
committerLinus Torvalds <torvalds@woody.linux-foundation.org>
Tue, 16 Oct 2007 17:09:16 +0000 (10:09 -0700)
* 'for-linus' of git://git.kernel.dk/data/git/linux-2.6-block: (63 commits)
  Fix memory leak in dm-crypt
  SPARC64: sg chaining support
  SPARC: sg chaining support
  PPC: sg chaining support
  PS3: sg chaining support
  IA64: sg chaining support
  x86-64: enable sg chaining
  x86-64: update pci-gart iommu to sg helpers
  x86-64: update nommu to sg helpers
  x86-64: update calgary iommu to sg helpers
  swiotlb: sg chaining support
  i386: enable sg chaining
  i386 dma_map_sg: convert to using sg helpers
  mmc: need to zero sglist on init
  Panic in blk_rq_map_sg() from CCISS driver
  remove sglist_len
  remove blk_queue_max_phys_segments in libata
  revert sg segment size ifdefs
  Fixup u14-34f ENABLE_SG_CHAINING
  qla1280: enable use_sg_chaining option
  ...

142 files changed:
Documentation/DMA-mapping.txt
Documentation/HOWTO
Documentation/block/00-INDEX [new file with mode: 0644]
Documentation/block/as-iosched.txt
Documentation/block/biodoc.txt
Documentation/block/deadline-iosched.txt
Documentation/block/ioprio.txt
Documentation/block/request.txt
Documentation/block/switching-sched.txt
arch/ia64/hp/common/sba_iommu.c
arch/ia64/hp/sim/simscsi.c
arch/ia64/sn/pci/pci_dma.c
arch/powerpc/kernel/dma_64.c
arch/powerpc/kernel/ibmebus.c
arch/powerpc/kernel/iommu.c
arch/powerpc/platforms/ps3/system-bus.c
arch/sparc/kernel/ioport.c
arch/sparc/mm/io-unit.c
arch/sparc/mm/iommu.c
arch/sparc/mm/sun4c.c
arch/sparc64/kernel/iommu.c
arch/sparc64/kernel/pci_sun4v.c
arch/x86/kernel/pci-calgary_64.c
arch/x86/kernel/pci-gart_64.c
arch/x86/kernel/pci-nommu_64.c
block/bsg.c
block/elevator.c
block/ll_rw_blk.c
crypto/digest.c
crypto/scatterwalk.c
crypto/scatterwalk.h
drivers/ata/libata-core.c
drivers/ata/libata-scsi.c
drivers/block/cciss.c
drivers/block/cpqarray.c
drivers/block/pktcdvd.c
drivers/block/ps3disk.c
drivers/ide/cris/ide-cris.c
drivers/ide/ide-disk.c
drivers/ide/ide-dma.c
drivers/ide/ide-io.c
drivers/ide/ide-probe.c
drivers/ide/ide-taskfile.c
drivers/ide/mips/au1xxx-ide.c
drivers/ide/pci/sgiioc4.c
drivers/ide/ppc/pmac.c
drivers/infiniband/hw/ipath/ipath_dma.c
drivers/infiniband/ulp/iser/iser_memory.c
drivers/md/dm-crypt.c
drivers/md/dm-table.c
drivers/md/dm.c
drivers/md/dm.h
drivers/md/linear.c
drivers/md/md.c
drivers/md/multipath.c
drivers/md/raid0.c
drivers/md/raid1.c
drivers/md/raid10.c
drivers/md/raid5.c
drivers/message/fusion/mptscsih.c
drivers/message/i2o/i2o_block.c
drivers/mmc/card/queue.c
drivers/s390/scsi/zfcp_def.h
drivers/s390/scsi/zfcp_qdio.c
drivers/scsi/3w-9xxx.c
drivers/scsi/3w-xxxx.c
drivers/scsi/BusLogic.c
drivers/scsi/NCR53c406a.c
drivers/scsi/a100u2w.c
drivers/scsi/aacraid/linit.c
drivers/scsi/aha1542.c
drivers/scsi/aha1740.c
drivers/scsi/aic7xxx/aic79xx_osm.c
drivers/scsi/aic7xxx/aic7xxx_osm.c
drivers/scsi/aic7xxx_old.c
drivers/scsi/aic94xx/aic94xx_task.c
drivers/scsi/arcmsr/arcmsr_hba.c
drivers/scsi/dc395x.c
drivers/scsi/dpt_i2o.c
drivers/scsi/eata.c
drivers/scsi/hosts.c
drivers/scsi/hptiop.c
drivers/scsi/ibmmca.c
drivers/scsi/ibmvscsi/ibmvscsi.c
drivers/scsi/ide-scsi.c
drivers/scsi/initio.c
drivers/scsi/ips.c
drivers/scsi/lpfc/lpfc_scsi.c
drivers/scsi/mac53c94.c
drivers/scsi/megaraid.c
drivers/scsi/megaraid/megaraid_mbox.c
drivers/scsi/megaraid/megaraid_sas.c
drivers/scsi/mesh.c
drivers/scsi/nsp32.c
drivers/scsi/pcmcia/sym53c500_cs.c
drivers/scsi/qla1280.c
drivers/scsi/qla2xxx/qla_os.c
drivers/scsi/qla4xxx/ql4_os.c
drivers/scsi/qlogicfas.c
drivers/scsi/qlogicpti.c
drivers/scsi/scsi_debug.c
drivers/scsi/scsi_lib.c
drivers/scsi/scsi_tgt_lib.c
drivers/scsi/sd.c
drivers/scsi/sg.c
drivers/scsi/stex.c
drivers/scsi/sym53c416.c
drivers/scsi/sym53c8xx_2/sym_glue.c
drivers/scsi/u14-34f.c
drivers/scsi/ultrastor.c
drivers/scsi/wd7000.c
drivers/usb/storage/alauda.c
drivers/usb/storage/datafab.c
drivers/usb/storage/jumpshot.c
drivers/usb/storage/protocol.c
drivers/usb/storage/protocol.h
drivers/usb/storage/sddr09.c
drivers/usb/storage/sddr55.c
drivers/usb/storage/shuttle_usbat.c
fs/bio.c
fs/splice.c
include/asm-ia64/dma-mapping.h
include/asm-ia64/scatterlist.h
include/asm-powerpc/dma-mapping.h
include/asm-powerpc/scatterlist.h
include/asm-sparc/scatterlist.h
include/asm-sparc64/scatterlist.h
include/asm-x86/dma-mapping_32.h
include/asm-x86/dma-mapping_64.h
include/asm-x86/scatterlist_32.h
include/asm-x86/scatterlist_64.h
include/linux/bio.h
include/linux/blkdev.h
include/linux/i2o.h
include/linux/ide.h
include/linux/libata.h
include/linux/scatterlist.h
include/scsi/scsi.h
include/scsi/scsi_cmnd.h
include/scsi/scsi_host.h
lib/swiotlb.c
mm/bounce.c

index e07f2530326b3afcbe69e77720833435e75e57d7..3c8ae020b6a7e3aeb183a5690ea3f0938c66c4f0 100644 (file)
@@ -514,7 +514,7 @@ With scatterlists, you map a region gathered from several regions by:
        int i, count = pci_map_sg(dev, sglist, nents, direction);
        struct scatterlist *sg;
 
-       for (i = 0, sg = sglist; i < count; i++, sg++) {
+       for_each_sg(sglist, sg, count, i) {
                hw_address[i] = sg_dma_address(sg);
                hw_len[i] = sg_dma_len(sg);
        }
@@ -782,5 +782,5 @@ following people:
        Jay Estabrook <Jay.Estabrook@compaq.com>
        Thomas Sailer <sailer@ife.ee.ethz.ch>
        Andrea Arcangeli <andrea@suse.de>
-       Jens Axboe <axboe@suse.de>
+       Jens Axboe <jens.axboe@oracle.com>
        David Mosberger-Tang <davidm@hpl.hp.com>
index c64e969dc33bb6e511f78984c007052e9b6cbc54..dceb3092149821029161477756d0871a72b8e4dc 100644 (file)
@@ -330,7 +330,7 @@ Here is a list of some of the different kernel trees available:
     - ACPI development tree, Len Brown <len.brown@intel.com>
        git.kernel.org:/pub/scm/linux/kernel/git/lenb/linux-acpi-2.6.git
 
-    - Block development tree, Jens Axboe <axboe@suse.de>
+    - Block development tree, Jens Axboe <jens.axboe@oracle.com>
        git.kernel.org:/pub/scm/linux/kernel/git/axboe/linux-2.6-block.git
 
     - DRM development tree, Dave Airlie <airlied@linux.ie>
diff --git a/Documentation/block/00-INDEX b/Documentation/block/00-INDEX
new file mode 100644 (file)
index 0000000..961a051
--- /dev/null
@@ -0,0 +1,20 @@
+00-INDEX
+       - This file
+as-iosched.txt
+       - Anticipatory IO scheduler
+barrier.txt
+       - I/O Barriers
+biodoc.txt
+       - Notes on the Generic Block Layer Rewrite in Linux 2.5
+capability.txt
+       - Generic Block Device Capability (/sys/block/<disk>/capability)
+deadline-iosched.txt
+       - Deadline IO scheduler tunables
+ioprio.txt
+       - Block io priorities (in CFQ scheduler)
+request.txt
+       - The members of struct request (in include/linux/blkdev.h)
+stat.txt
+       - Block layer statistics in /sys/block/<dev>/stat
+switching-sched.txt
+       - Switching I/O schedulers at runtime
index a598fe10a2974f5757761df5ab8f7f98c5c5f84a..738b72be128e3015c4ee80d9923a283cac20877e 100644 (file)
@@ -20,15 +20,10 @@ actually has a head for each physical device in the logical RAID device.
 However, setting the antic_expire (see tunable parameters below) produces
 very similar behavior to the deadline IO scheduler.
 
-
 Selecting IO schedulers
 -----------------------
-To choose IO schedulers at boot time, use the argument 'elevator=deadline'.
-'noop', 'as' and 'cfq' (the default) are also available. IO schedulers are
-assigned globally at boot time only presently. It's also possible to change
-the IO scheduler for a determined device on the fly, as described in
-Documentation/block/switching-sched.txt.
-
+Refer to Documentation/block/switching-sched.txt for information on
+selecting an io scheduler on a per-device basis.
 
 Anticipatory IO scheduler Policies
 ----------------------------------
@@ -115,7 +110,7 @@ statistics (average think time, average seek distance) on the process
 that submitted the just completed request are examined.  If it seems
 likely that that process will submit another request soon, and that
 request is likely to be near the just completed request, then the IO
-scheduler will stop dispatching more read requests for up time (antic_expire)
+scheduler will stop dispatching more read requests for up to (antic_expire)
 milliseconds, hoping that process will submit a new request near the one
 that just completed.  If such a request is made, then it is dispatched
 immediately.  If the antic_expire wait time expires, then the IO scheduler
@@ -165,3 +160,13 @@ The parameters are:
     for big seek time devices though not a linear correspondence - most
     processes have only a few ms thinktime.
 
+In addition to the tunables above there is a read-only file named est_time
+which, when read, will show:
+
+    - The probability of a task exiting without a cooperating task
+      submitting an anticipated IO.
+
+    - The current mean think time.
+
+    - The seek distance used to determine if an incoming IO is better.
+
index dc3f49e3e5392891f10d700567363b3446358b67..93f223b9723f8c66d81e92edd290f607d5469c34 100644 (file)
@@ -2,7 +2,7 @@
        =====================================================
 
 Notes Written on Jan 15, 2002:
-       Jens Axboe <axboe@suse.de>
+       Jens Axboe <jens.axboe@oracle.com>
        Suparna Bhattacharya <suparna@in.ibm.com>
 
 Last Updated May 2, 2002
@@ -21,7 +21,7 @@ Credits:
 ---------
 
 2.5 bio rewrite:
-       Jens Axboe <axboe@suse.de>
+       Jens Axboe <jens.axboe@oracle.com>
 
 Many aspects of the generic block layer redesign were driven by and evolved
 over discussions, prior patches and the collective experience of several
index be08ffd1e9b82a52504c05ad657ebb4bd895d80b..c23cab13c3d1403a1a2f599a6b26b461296d2234 100644 (file)
@@ -5,16 +5,10 @@ This little file attempts to document how the deadline io scheduler works.
 In particular, it will clarify the meaning of the exposed tunables that may be
 of interest to power users.
 
-Each io queue has a set of io scheduler tunables associated with it. These
-tunables control how the io scheduler works. You can find these entries
-in:
-
-/sys/block/<device>/queue/iosched
-
-assuming that you have sysfs mounted on /sys. If you don't have sysfs mounted,
-you can do so by typing:
-
-# mount none /sys -t sysfs
+Selecting IO schedulers
+-----------------------
+Refer to Documentation/block/switching-sched.txt for information on
+selecting an io scheduler on a per-device basis.
 
 
 ********************************************************************************
@@ -41,14 +35,11 @@ fifo_batch
 
 When a read request expires its deadline, we must move some requests from
 the sorted io scheduler list to the block device dispatch queue. fifo_batch
-controls how many requests we move, based on the cost of each request. A
-request is either qualified as a seek or a stream. The io scheduler knows
-the last request that was serviced by the drive (or will be serviced right
-before this one). See seek_cost and stream_unit.
+controls how many requests we move.
 
 
-write_starved  (number of dispatches)
--------------
+writes_starved (number of dispatches)
+--------------
 
 When we have to move requests from the io scheduler queue to the block
 device dispatch queue, we always give a preference to reads. However, we
@@ -73,6 +64,6 @@ that comes at basically 0 cost we leave that on. We simply disable the
 rbtree front sector lookup when the io scheduler merge function is called.
 
 
-Nov 11 2002, Jens Axboe <axboe@suse.de>
+Nov 11 2002, Jens Axboe <jens.axboe@oracle.com>
 
 
index 35e516b0b8a9935b91f2c15ab56685b111f1a1d1..8ed8c59380b4056155bbd9a113e455ff038c1ca9 100644 (file)
@@ -180,4 +180,4 @@ int main(int argc, char *argv[])
 ---> snip ionice.c tool <---
 
 
-March 11 2005, Jens Axboe <axboe@suse.de>
+March 11 2005, Jens Axboe <jens.axboe@oracle.com>
index fff58acb40a3b78ebf9af914a7822ccdc0d321c2..754e104ed3699452f36f636bd234d425f87dbad7 100644 (file)
@@ -1,7 +1,7 @@
 
 struct request documentation
 
-Jens Axboe <axboe@suse.de> 27/05/02
+Jens Axboe <jens.axboe@oracle.com> 27/05/02
 
 1.0
 Index
index 5fa130a675312fe9c59a2f0135411ca2bab785d5..634c952e19648b6e5474ff938208852a89a254cb 100644 (file)
@@ -1,3 +1,18 @@
+To choose IO schedulers at boot time, use the argument 'elevator=deadline'.
+'noop', 'as' and 'cfq' (the default) are also available. IO schedulers are
+assigned globally at boot time only presently.
+
+Each io queue has a set of io scheduler tunables associated with it. These
+tunables control how the io scheduler works. You can find these entries
+in:
+
+/sys/block/<device>/queue/iosched
+
+assuming that you have sysfs mounted on /sys. If you don't have sysfs mounted,
+you can do so by typing:
+
+# mount none /sys -t sysfs
+
 As of the Linux 2.6.10 kernel, it is now possible to change the
 IO scheduler for a given block device on the fly (thus making it possible,
 for instance, to set the CFQ scheduler for the system default, but
@@ -20,3 +35,9 @@ noop anticipatory deadline [cfq]
 # echo anticipatory > /sys/block/hda/queue/scheduler
 # cat /sys/block/hda/queue/scheduler
 noop [anticipatory] deadline cfq
+
+Each io queue has a set of io scheduler tunables associated with it. These
+tunables control how the io scheduler works. You can find these entries
+in:
+
+/sys/block/<device>/queue/iosched
index e980e7aa2306eb52ddc51632b48d97d892157704..4338f4123f31688ad44697c17f7605efeaf4e4b3 100644 (file)
@@ -396,7 +396,7 @@ sba_dump_sg( struct ioc *ioc, struct scatterlist *startsg, int nents)
                printk(KERN_DEBUG " %d : DMA %08lx/%05x CPU %p\n", nents,
                       startsg->dma_address, startsg->dma_length,
                       sba_sg_address(startsg));
-               startsg++;
+               startsg = sg_next(startsg);
        }
 }
 
@@ -409,7 +409,7 @@ sba_check_sg( struct ioc *ioc, struct scatterlist *startsg, int nents)
        while (the_nents-- > 0) {
                if (sba_sg_address(the_sg) == 0x0UL)
                        sba_dump_sg(NULL, startsg, nents);
-               the_sg++;
+               the_sg = sg_next(the_sg);
        }
 }
 
@@ -1201,7 +1201,7 @@ sba_fill_pdir(
                        u32 pide = startsg->dma_address & ~PIDE_FLAG;
                        dma_offset = (unsigned long) pide & ~iovp_mask;
                        startsg->dma_address = 0;
-                       dma_sg++;
+                       dma_sg = sg_next(dma_sg);
                        dma_sg->dma_address = pide | ioc->ibase;
                        pdirp = &(ioc->pdir_base[pide >> iovp_shift]);
                        n_mappings++;
@@ -1228,7 +1228,7 @@ sba_fill_pdir(
                                pdirp++;
                        } while (cnt > 0);
                }
-               startsg++;
+               startsg = sg_next(startsg);
        }
        /* force pdir update */
        wmb();
@@ -1297,7 +1297,7 @@ sba_coalesce_chunks( struct ioc *ioc,
                while (--nents > 0) {
                        unsigned long vaddr;    /* tmp */
 
-                       startsg++;
+                       startsg = sg_next(startsg);
 
                        /* PARANOID */
                        startsg->dma_address = startsg->dma_length = 0;
@@ -1407,7 +1407,7 @@ int sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, int di
 #ifdef ALLOW_IOV_BYPASS_SG
        ASSERT(to_pci_dev(dev)->dma_mask);
        if (likely((ioc->dma_mask & ~to_pci_dev(dev)->dma_mask) == 0)) {
-               for (sg = sglist ; filled < nents ; filled++, sg++){
+               for_each_sg(sglist, sg, nents, filled) {
                        sg->dma_length = sg->length;
                        sg->dma_address = virt_to_phys(sba_sg_address(sg));
                }
@@ -1501,7 +1501,7 @@ void sba_unmap_sg (struct device *dev, struct scatterlist *sglist, int nents, in
        while (nents && sglist->dma_length) {
 
                sba_unmap_single(dev, sglist->dma_address, sglist->dma_length, dir);
-               sglist++;
+               sglist = sg_next(sglist);
                nents--;
        }
 
index d62fa76e5a7d05680a3fcf2d1dc75faee35e88dc..a3a558a0675779e602d9bb43625646cfe2cb02b6 100644 (file)
@@ -360,6 +360,7 @@ static struct scsi_host_template driver_template = {
        .max_sectors            = 1024,
        .cmd_per_lun            = SIMSCSI_REQ_QUEUE_LEN,
        .use_clustering         = DISABLE_CLUSTERING,
+       .use_sg_chaining        = ENABLE_SG_CHAINING,
 };
 
 static int __init
index d79ddacfba2d282aa5c7075e1433b92c2f7deae2..ecd8a52b9b9e23a2e9987d2703976983b30af4c0 100644 (file)
@@ -218,16 +218,17 @@ EXPORT_SYMBOL(sn_dma_unmap_single);
  *
  * Unmap a set of streaming mode DMA translations.
  */
-void sn_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
+void sn_dma_unmap_sg(struct device *dev, struct scatterlist *sgl,
                     int nhwentries, int direction)
 {
        int i;
        struct pci_dev *pdev = to_pci_dev(dev);
        struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
+       struct scatterlist *sg;
 
        BUG_ON(dev->bus != &pci_bus_type);
 
-       for (i = 0; i < nhwentries; i++, sg++) {
+       for_each_sg(sgl, sg, nhwentries, i) {
                provider->dma_unmap(pdev, sg->dma_address, direction);
                sg->dma_address = (dma_addr_t) NULL;
                sg->dma_length = 0;
@@ -244,11 +245,11 @@ EXPORT_SYMBOL(sn_dma_unmap_sg);
  *
  * Maps each entry of @sg for DMA.
  */
-int sn_dma_map_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
+int sn_dma_map_sg(struct device *dev, struct scatterlist *sgl, int nhwentries,
                  int direction)
 {
        unsigned long phys_addr;
-       struct scatterlist *saved_sg = sg;
+       struct scatterlist *saved_sg = sgl, *sg;
        struct pci_dev *pdev = to_pci_dev(dev);
        struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
        int i;
@@ -258,7 +259,7 @@ int sn_dma_map_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
        /*
         * Setup a DMA address for each entry in the scatterlist.
         */
-       for (i = 0; i < nhwentries; i++, sg++) {
+       for_each_sg(sgl, sg, nhwentries, i) {
                phys_addr = SG_ENT_PHYS_ADDRESS(sg);
                sg->dma_address = provider->dma_map(pdev,
                                                    phys_addr, sg->length,
index 7b0e754383cf53e47affcf4a46acab9b5264761c..9001104b56b0ba983e777b66f21b83cdbdb2eeb7 100644 (file)
@@ -154,12 +154,13 @@ static void dma_direct_unmap_single(struct device *dev, dma_addr_t dma_addr,
 {
 }
 
-static int dma_direct_map_sg(struct device *dev, struct scatterlist *sg,
+static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
                             int nents, enum dma_data_direction direction)
 {
+       struct scatterlist *sg;
        int i;
 
-       for (i = 0; i < nents; i++, sg++) {
+       for_each_sg(sgl, sg, nents, i) {
                sg->dma_address = (page_to_phys(sg->page) + sg->offset) |
                        dma_direct_offset;
                sg->dma_length = sg->length;
index 53bf64623bd891ce909f3cd177ab7a2e5df14c77..2e16ca5778a3ac40daf119eb357fc700debbd7b1 100644 (file)
@@ -87,15 +87,16 @@ static void ibmebus_unmap_single(struct device *dev,
 }
 
 static int ibmebus_map_sg(struct device *dev,
-                         struct scatterlist *sg,
+                         struct scatterlist *sgl,
                          int nents, enum dma_data_direction direction)
 {
+       struct scatterlist *sg;
        int i;
 
-       for (i = 0; i < nents; i++) {
-               sg[i].dma_address = (dma_addr_t)page_address(sg[i].page)
-                       + sg[i].offset;
-               sg[i].dma_length = sg[i].length;
+       for_each_sg(sgl, sg, nents, i) {
+               sg->dma_address = (dma_addr_t)page_address(sg->page)
+                       + sg->offset;
+               sg->dma_length = sg->length;
        }
 
        return nents;
index e4ec6eee81a810d90c45faf22aa154b20f1c63a1..306a6f75b6c5a8004bf5f496cbe1d6b86ed8b589 100644 (file)
@@ -277,7 +277,7 @@ int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist,
        dma_addr_t dma_next = 0, dma_addr;
        unsigned long flags;
        struct scatterlist *s, *outs, *segstart;
-       int outcount, incount;
+       int outcount, incount, i;
        unsigned long handle;
 
        BUG_ON(direction == DMA_NONE);
@@ -297,7 +297,7 @@ int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist,
 
        spin_lock_irqsave(&(tbl->it_lock), flags);
 
-       for (s = outs; nelems; nelems--, s++) {
+       for_each_sg(sglist, s, nelems, i) {
                unsigned long vaddr, npages, entry, slen;
 
                slen = s->length;
@@ -341,7 +341,8 @@ int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist,
                        if (novmerge || (dma_addr != dma_next)) {
                                /* Can't merge: create a new segment */
                                segstart = s;
-                               outcount++; outs++;
+                               outcount++;
+                               outs = sg_next(outs);
                                DBG("    can't merge, new segment.\n");
                        } else {
                                outs->dma_length += s->length;
@@ -374,7 +375,7 @@ int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist,
         * next entry of the sglist if we didn't fill the list completely
         */
        if (outcount < incount) {
-               outs++;
+               outs = sg_next(outs);
                outs->dma_address = DMA_ERROR_CODE;
                outs->dma_length = 0;
        }
@@ -385,7 +386,7 @@ int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist,
        return outcount;
 
  failure:
-       for (s = &sglist[0]; s <= outs; s++) {
+       for_each_sg(sglist, s, nelems, i) {
                if (s->dma_length != 0) {
                        unsigned long vaddr, npages;
 
@@ -395,6 +396,8 @@ int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist,
                        s->dma_address = DMA_ERROR_CODE;
                        s->dma_length = 0;
                }
+               if (s == outs)
+                       break;
        }
        spin_unlock_irqrestore(&(tbl->it_lock), flags);
        return 0;
@@ -404,6 +407,7 @@ int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist,
 void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
                int nelems, enum dma_data_direction direction)
 {
+       struct scatterlist *sg;
        unsigned long flags;
 
        BUG_ON(direction == DMA_NONE);
@@ -413,15 +417,16 @@ void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
 
        spin_lock_irqsave(&(tbl->it_lock), flags);
 
+       sg = sglist;
        while (nelems--) {
                unsigned int npages;
-               dma_addr_t dma_handle = sglist->dma_address;
+               dma_addr_t dma_handle = sg->dma_address;
 
-               if (sglist->dma_length == 0)
+               if (sg->dma_length == 0)
                        break;
-               npages = iommu_num_pages(dma_handle,sglist->dma_length);
+               npages = iommu_num_pages(dma_handle, sg->dma_length);
                __iommu_free(tbl, dma_handle, npages);
-               sglist++;
+               sg = sg_next(sg);
        }
 
        /* Flush/invalidate TLBs if necessary. As for iommu_free(), we
index 190ff4b59a557bd5be11d34d3e069b0425bb7572..07e64b48e7fca2d79391466a04a7da5502bf3251 100644 (file)
@@ -616,17 +616,18 @@ static void ps3_unmap_single(struct device *_dev, dma_addr_t dma_addr,
        }
 }
 
-static int ps3_sb_map_sg(struct device *_dev, struct scatterlist *sg, int nents,
-       enum dma_data_direction direction)
+static int ps3_sb_map_sg(struct device *_dev, struct scatterlist *sgl,
+       int nents, enum dma_data_direction direction)
 {
 #if defined(CONFIG_PS3_DYNAMIC_DMA)
        BUG_ON("do");
        return -EPERM;
 #else
        struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev);
+       struct scatterlist *sg;
        int i;
 
-       for (i = 0; i < nents; i++, sg++) {
+       for_each_sg(sgl, sg, nents, i) {
                int result = ps3_dma_map(dev->d_region,
                        page_to_phys(sg->page) + sg->offset, sg->length,
                                         &sg->dma_address, 0);
index 62182d2d7b0de2b1a9b64d8a7316c8cb3d47caf2..9c3ed88853f3d491d09575392bdb8163eb72eef0 100644 (file)
@@ -35,6 +35,7 @@
 #include <linux/slab.h>
 #include <linux/pci.h>         /* struct pci_dev */
 #include <linux/proc_fs.h>
+#include <linux/scatterlist.h>
 
 #include <asm/io.h>
 #include <asm/vaddrs.h>
@@ -717,19 +718,19 @@ void pci_unmap_page(struct pci_dev *hwdev,
  * Device ownership issues as mentioned above for pci_map_single are
  * the same here.
  */
-int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents,
+int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents,
     int direction)
 {
+       struct scatterlist *sg;
        int n;
 
        BUG_ON(direction == PCI_DMA_NONE);
        /* IIep is write-through, not flushing. */
-       for (n = 0; n < nents; n++) {
+       for_each_sg(sgl, sg, nents, n) {
                BUG_ON(page_address(sg->page) == NULL);
                sg->dvma_address =
                        virt_to_phys(page_address(sg->page)) + sg->offset;
                sg->dvma_length = sg->length;
-               sg++;
        }
        return nents;
 }
@@ -738,19 +739,19 @@ int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents,
  * Again, cpu read rules concerning calls here are the same as for
  * pci_unmap_single() above.
  */
-void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents,
+void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents,
     int direction)
 {
+       struct scatterlist *sg;
        int n;
 
        BUG_ON(direction == PCI_DMA_NONE);
        if (direction != PCI_DMA_TODEVICE) {
-               for (n = 0; n < nents; n++) {
+               for_each_sg(sgl, sg, nents, n) {
                        BUG_ON(page_address(sg->page) == NULL);
                        mmu_inval_dma_area(
                            (unsigned long) page_address(sg->page),
                            (sg->length + PAGE_SIZE-1) & PAGE_MASK);
-                       sg++;
                }
        }
 }
@@ -789,34 +790,34 @@ void pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t ba, size_t
  * The same as pci_dma_sync_single_* but for a scatter-gather list,
  * same rules and usage.
  */
-void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction)
+void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, int direction)
 {
+       struct scatterlist *sg;
        int n;
 
        BUG_ON(direction == PCI_DMA_NONE);
        if (direction != PCI_DMA_TODEVICE) {
-               for (n = 0; n < nents; n++) {
+               for_each_sg(sgl, sg, nents, n) {
                        BUG_ON(page_address(sg->page) == NULL);
                        mmu_inval_dma_area(
                            (unsigned long) page_address(sg->page),
                            (sg->length + PAGE_SIZE-1) & PAGE_MASK);
-                       sg++;
                }
        }
 }
 
-void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction)
+void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, int direction)
 {
+       struct scatterlist *sg;
        int n;
 
        BUG_ON(direction == PCI_DMA_NONE);
        if (direction != PCI_DMA_TODEVICE) {
-               for (n = 0; n < nents; n++) {
+               for_each_sg(sgl, sg, nents, n) {
                        BUG_ON(page_address(sg->page) == NULL);
                        mmu_inval_dma_area(
                            (unsigned long) page_address(sg->page),
                            (sg->length + PAGE_SIZE-1) & PAGE_MASK);
-                       sg++;
                }
        }
 }
index 7c89893b1fe8789e71a05694660540ac943bb4c8..375b4db637046964afa002d700837da448d6674d 100644 (file)
@@ -11,8 +11,8 @@
 #include <linux/mm.h>
 #include <linux/highmem.h>     /* pte_offset_map => kmap_atomic */
 #include <linux/bitops.h>
+#include <linux/scatterlist.h>
 
-#include <asm/scatterlist.h>
 #include <asm/pgalloc.h>
 #include <asm/pgtable.h>
 #include <asm/sbus.h>
@@ -144,8 +144,9 @@ static void iounit_get_scsi_sgl(struct scatterlist *sg, int sz, struct sbus_bus
        spin_lock_irqsave(&iounit->lock, flags);
        while (sz != 0) {
                --sz;
-               sg[sz].dvma_address = iounit_get_area(iounit, (unsigned long)page_address(sg[sz].page) + sg[sz].offset, sg[sz].length);
-               sg[sz].dvma_length = sg[sz].length;
+               sg->dvma_address = iounit_get_area(iounit, (unsigned long)page_address(sg->page) + sg->offset, sg->length);
+               sg->dvma_length = sg->length;
+               sg = sg_next(sg);
        }
        spin_unlock_irqrestore(&iounit->lock, flags);
 }
@@ -173,11 +174,12 @@ static void iounit_release_scsi_sgl(struct scatterlist *sg, int sz, struct sbus_
        spin_lock_irqsave(&iounit->lock, flags);
        while (sz != 0) {
                --sz;
-               len = ((sg[sz].dvma_address & ~PAGE_MASK) + sg[sz].length + (PAGE_SIZE-1)) >> PAGE_SHIFT;
-               vaddr = (sg[sz].dvma_address - IOUNIT_DMA_BASE) >> PAGE_SHIFT;
+               len = ((sg->dvma_address & ~PAGE_MASK) + sg->length + (PAGE_SIZE-1)) >> PAGE_SHIFT;
+               vaddr = (sg->dvma_address - IOUNIT_DMA_BASE) >> PAGE_SHIFT;
                IOD(("iounit_release %08lx-%08lx\n", (long)vaddr, (long)len+vaddr));
                for (len += vaddr; vaddr < len; vaddr++)
                        clear_bit(vaddr, iounit->bmap);
+               sg = sg_next(sg);
        }
        spin_unlock_irqrestore(&iounit->lock, flags);
 }
index 52e907af9d29e7b248a09b60987432cfe017cb9a..283656d9f6ea77ae5241ea4677b05c47c5080706 100644 (file)
@@ -12,8 +12,8 @@
 #include <linux/mm.h>
 #include <linux/slab.h>
 #include <linux/highmem.h>     /* pte_offset_map => kmap_atomic */
+#include <linux/scatterlist.h>
 
-#include <asm/scatterlist.h>
 #include <asm/pgalloc.h>
 #include <asm/pgtable.h>
 #include <asm/sbus.h>
@@ -240,7 +240,7 @@ static void iommu_get_scsi_sgl_noflush(struct scatterlist *sg, int sz, struct sb
                n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
                sg->dvma_address = iommu_get_one(sg->page, n, sbus) + sg->offset;
                sg->dvma_length = (__u32) sg->length;
-               sg++;
+               sg = sg_next(sg);
        }
 }
 
@@ -254,7 +254,7 @@ static void iommu_get_scsi_sgl_gflush(struct scatterlist *sg, int sz, struct sbu
                n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
                sg->dvma_address = iommu_get_one(sg->page, n, sbus) + sg->offset;
                sg->dvma_length = (__u32) sg->length;
-               sg++;
+               sg = sg_next(sg);
        }
 }
 
@@ -285,7 +285,7 @@ static void iommu_get_scsi_sgl_pflush(struct scatterlist *sg, int sz, struct sbu
 
                sg->dvma_address = iommu_get_one(sg->page, n, sbus) + sg->offset;
                sg->dvma_length = (__u32) sg->length;
-               sg++;
+               sg = sg_next(sg);
        }
 }
 
@@ -325,7 +325,7 @@ static void iommu_release_scsi_sgl(struct scatterlist *sg, int sz, struct sbus_b
                n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
                iommu_release_one(sg->dvma_address & PAGE_MASK, n, sbus);
                sg->dvma_address = 0x21212121;
-               sg++;
+               sg = sg_next(sg);
        }
 }
 
index 005a3e72d4f2dbf468b6a31a47d85c8ed507ebab..ee6708fc4492476b2d28ed7293a8fbde47c5cd94 100644 (file)
@@ -17,8 +17,8 @@
 #include <linux/highmem.h>
 #include <linux/fs.h>
 #include <linux/seq_file.h>
+#include <linux/scatterlist.h>
 
-#include <asm/scatterlist.h>
 #include <asm/page.h>
 #include <asm/pgalloc.h>
 #include <asm/pgtable.h>
@@ -1228,8 +1228,9 @@ static void sun4c_get_scsi_sgl(struct scatterlist *sg, int sz, struct sbus_bus *
 {
        while (sz != 0) {
                --sz;
-               sg[sz].dvma_address = (__u32)sun4c_lockarea(page_address(sg[sz].page) + sg[sz].offset, sg[sz].length);
-               sg[sz].dvma_length = sg[sz].length;
+               sg->dvma_address = (__u32)sun4c_lockarea(page_address(sg->page) + sg->offset, sg->length);
+               sg->dvma_length = sg->length;
+               sg = sg_next(sg);
        }
 }
 
@@ -1244,7 +1245,8 @@ static void sun4c_release_scsi_sgl(struct scatterlist *sg, int sz, struct sbus_b
 {
        while (sz != 0) {
                --sz;
-               sun4c_unlockarea((char *)sg[sz].dvma_address, sg[sz].length);
+               sun4c_unlockarea((char *)sg->dvma_address, sg->length);
+               sg = sg_next(sg);
        }
 }
 
index b35a62167e9ce31af6f957b28902ca0259bf3b5d..db3ffcf7a12036bee200f0ca207cf52bcab95e23 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/device.h>
 #include <linux/dma-mapping.h>
 #include <linux/errno.h>
+#include <linux/scatterlist.h>
 
 #ifdef CONFIG_PCI
 #include <linux/pci.h>
@@ -480,7 +481,7 @@ static inline void fill_sg(iopte_t *iopte, struct scatterlist *sg,
                           unsigned long iopte_protection)
 {
        struct scatterlist *dma_sg = sg;
-       struct scatterlist *sg_end = sg + nelems;
+       struct scatterlist *sg_end = sg_last(sg, nelems);
        int i;
 
        for (i = 0; i < nused; i++) {
@@ -515,7 +516,7 @@ static inline void fill_sg(iopte_t *iopte, struct scatterlist *sg,
                                        len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL)));
                                        break;
                                }
-                               sg++;
+                               sg = sg_next(sg);
                        }
 
                        pteval = iopte_protection | (pteval & IOPTE_PAGE);
@@ -528,24 +529,24 @@ static inline void fill_sg(iopte_t *iopte, struct scatterlist *sg,
                        }
 
                        pteval = (pteval & IOPTE_PAGE) + len;
-                       sg++;
+                       sg = sg_next(sg);
 
                        /* Skip over any tail mappings we've fully mapped,
                         * adjusting pteval along the way.  Stop when we
                         * detect a page crossing event.
                         */
-                       while (sg < sg_end &&
+                       while (sg != sg_end &&
                               (pteval << (64 - IO_PAGE_SHIFT)) != 0UL &&
                               (pteval == SG_ENT_PHYS_ADDRESS(sg)) &&
                               ((pteval ^
                                 (SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) {
                                pteval += sg->length;
-                               sg++;
+                               sg = sg_next(sg);
                        }
                        if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL)
                                pteval = ~0UL;
                } while (dma_npages != 0);
-               dma_sg++;
+               dma_sg = sg_next(dma_sg);
        }
 }
 
@@ -606,7 +607,7 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
        sgtmp = sglist;
        while (used && sgtmp->dma_length) {
                sgtmp->dma_address += dma_base;
-               sgtmp++;
+               sgtmp = sg_next(sgtmp);
                used--;
        }
        used = nelems - used;
@@ -642,6 +643,7 @@ static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist,
        struct strbuf *strbuf;
        iopte_t *base;
        unsigned long flags, ctx, i, npages;
+       struct scatterlist *sg, *sgprv;
        u32 bus_addr;
 
        if (unlikely(direction == DMA_NONE)) {
@@ -654,11 +656,14 @@ static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist,
 
        bus_addr = sglist->dma_address & IO_PAGE_MASK;
 
-       for (i = 1; i < nelems; i++)
-               if (sglist[i].dma_length == 0)
+       sgprv = NULL;
+       for_each_sg(sglist, sg, nelems, i) {
+               if (sg->dma_length == 0)
                        break;
-       i--;
-       npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) -
+               sgprv = sg;
+       }
+
+       npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length) -
                  bus_addr) >> IO_PAGE_SHIFT;
 
        base = iommu->page_table +
@@ -730,6 +735,7 @@ static void dma_4u_sync_sg_for_cpu(struct device *dev,
        struct iommu *iommu;
        struct strbuf *strbuf;
        unsigned long flags, ctx, npages, i;
+       struct scatterlist *sg, *sgprv;
        u32 bus_addr;
 
        iommu = dev->archdata.iommu;
@@ -753,11 +759,14 @@ static void dma_4u_sync_sg_for_cpu(struct device *dev,
 
        /* Step 2: Kick data out of streaming buffers. */
        bus_addr = sglist[0].dma_address & IO_PAGE_MASK;
-       for(i = 1; i < nelems; i++)
-               if (!sglist[i].dma_length)
+       sgprv = NULL;
+       for_each_sg(sglist, sg, nelems, i) {
+               if (sg->dma_length == 0)
                        break;
-       i--;
-       npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length)
+               sgprv = sg;
+       }
+
+       npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length)
                  - bus_addr) >> IO_PAGE_SHIFT;
        strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
 
index 95de1444ee674c912a4fc51cf66a2b0b2f64754b..cacacfae5451a7ec68600a0e1438319dd7b6e4e3 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/irq.h>
 #include <linux/msi.h>
 #include <linux/log2.h>
+#include <linux/scatterlist.h>
 
 #include <asm/iommu.h>
 #include <asm/irq.h>
@@ -373,7 +374,7 @@ static inline long fill_sg(long entry, struct device *dev,
                           int nused, int nelems, unsigned long prot)
 {
        struct scatterlist *dma_sg = sg;
-       struct scatterlist *sg_end = sg + nelems;
+       struct scatterlist *sg_end = sg_last(sg, nelems);
        unsigned long flags;
        int i;
 
@@ -413,7 +414,7 @@ static inline long fill_sg(long entry, struct device *dev,
                                        len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL)));
                                        break;
                                }
-                               sg++;
+                               sg = sg_next(sg);
                        }
 
                        pteval = (pteval & IOPTE_PAGE);
@@ -431,24 +432,25 @@ static inline long fill_sg(long entry, struct device *dev,
                        }
 
                        pteval = (pteval & IOPTE_PAGE) + len;
-                       sg++;
+                       sg = sg_next(sg);
 
                        /* Skip over any tail mappings we've fully mapped,
                         * adjusting pteval along the way.  Stop when we
                         * detect a page crossing event.
                         */
-                       while (sg < sg_end &&
-                              (pteval << (64 - IO_PAGE_SHIFT)) != 0UL &&
+                       while ((pteval << (64 - IO_PAGE_SHIFT)) != 0UL &&
                               (pteval == SG_ENT_PHYS_ADDRESS(sg)) &&
                               ((pteval ^
                                 (SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) {
                                pteval += sg->length;
-                               sg++;
+                               if (sg == sg_end)
+                                       break;
+                               sg = sg_next(sg);
                        }
                        if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL)
                                pteval = ~0UL;
                } while (dma_npages != 0);
-               dma_sg++;
+               dma_sg = sg_next(dma_sg);
        }
 
        if (unlikely(iommu_batch_end() < 0L))
@@ -510,7 +512,7 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
        sgtmp = sglist;
        while (used && sgtmp->dma_length) {
                sgtmp->dma_address += dma_base;
-               sgtmp++;
+               sgtmp = sg_next(sgtmp);
                used--;
        }
        used = nelems - used;
@@ -545,6 +547,7 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
        struct pci_pbm_info *pbm;
        struct iommu *iommu;
        unsigned long flags, i, npages;
+       struct scatterlist *sg, *sgprv;
        long entry;
        u32 devhandle, bus_addr;
 
@@ -558,12 +561,15 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
        devhandle = pbm->devhandle;
        
        bus_addr = sglist->dma_address & IO_PAGE_MASK;
-
-       for (i = 1; i < nelems; i++)
-               if (sglist[i].dma_length == 0)
+       sgprv = NULL;
+       for_each_sg(sglist, sg, nelems, i) {
+               if (sg->dma_length == 0)
                        break;
-       i--;
-       npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) -
+
+               sgprv = sg;
+       }
+
+       npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length) -
                  bus_addr) >> IO_PAGE_SHIFT;
 
        entry = ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
index 71da01e73f038162fe955181baad38f16de1126e..a50b787b3bfab3a8d9f7ba0079217f1b3a2deae1 100644 (file)
@@ -35,6 +35,7 @@
 #include <linux/pci_ids.h>
 #include <linux/pci.h>
 #include <linux/delay.h>
+#include <linux/scatterlist.h>
 #include <asm/iommu.h>
 #include <asm/calgary.h>
 #include <asm/tce.h>
@@ -384,31 +385,32 @@ static void calgary_unmap_sg(struct device *dev,
        struct scatterlist *sglist, int nelems, int direction)
 {
        struct iommu_table *tbl = find_iommu_table(dev);
+       struct scatterlist *s;
+       int i;
 
        if (!translate_phb(to_pci_dev(dev)))
                return;
 
-       while (nelems--) {
+       for_each_sg(sglist, s, nelems, i) {
                unsigned int npages;
-               dma_addr_t dma = sglist->dma_address;
-               unsigned int dmalen = sglist->dma_length;
+               dma_addr_t dma = s->dma_address;
+               unsigned int dmalen = s->dma_length;
 
                if (dmalen == 0)
                        break;
 
                npages = num_dma_pages(dma, dmalen);
                iommu_free(tbl, dma, npages);
-               sglist++;
        }
 }
 
 static int calgary_nontranslate_map_sg(struct device* dev,
        struct scatterlist *sg, int nelems, int direction)
 {
+       struct scatterlist *s;
        int i;
 
-       for (i = 0; i < nelems; i++ ) {
-               struct scatterlist *s = &sg[i];
+       for_each_sg(sg, s, nelems, i) {
                BUG_ON(!s->page);
                s->dma_address = virt_to_bus(page_address(s->page) +s->offset);
                s->dma_length = s->length;
@@ -420,6 +422,7 @@ static int calgary_map_sg(struct device *dev, struct scatterlist *sg,
        int nelems, int direction)
 {
        struct iommu_table *tbl = find_iommu_table(dev);
+       struct scatterlist *s;
        unsigned long vaddr;
        unsigned int npages;
        unsigned long entry;
@@ -428,8 +431,7 @@ static int calgary_map_sg(struct device *dev, struct scatterlist *sg,
        if (!translate_phb(to_pci_dev(dev)))
                return calgary_nontranslate_map_sg(dev, sg, nelems, direction);
 
-       for (i = 0; i < nelems; i++ ) {
-               struct scatterlist *s = &sg[i];
+       for_each_sg(sg, s, nelems, i) {
                BUG_ON(!s->page);
 
                vaddr = (unsigned long)page_address(s->page) + s->offset;
@@ -454,9 +456,9 @@ static int calgary_map_sg(struct device *dev, struct scatterlist *sg,
        return nelems;
 error:
        calgary_unmap_sg(dev, sg, nelems, direction);
-       for (i = 0; i < nelems; i++) {
-               sg[i].dma_address = bad_dma_address;
-               sg[i].dma_length = 0;
+       for_each_sg(sg, s, nelems, i) {
+               sg->dma_address = bad_dma_address;
+               sg->dma_length = 0;
        }
        return 0;
 }
index 4918c575d582cfd901ca23f47e8327a16045998f..cfcc84e6c350886f8fe19db070511ce950afa3cb 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/interrupt.h>
 #include <linux/bitops.h>
 #include <linux/kdebug.h>
+#include <linux/scatterlist.h>
 #include <asm/atomic.h>
 #include <asm/io.h>
 #include <asm/mtrr.h>
@@ -278,10 +279,10 @@ static void gart_unmap_single(struct device *dev, dma_addr_t dma_addr,
  */
 static void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
 {
+       struct scatterlist *s;
        int i;
 
-       for (i = 0; i < nents; i++) {
-               struct scatterlist *s = &sg[i];
+       for_each_sg(sg, s, nents, i) {
                if (!s->dma_length || !s->length)
                        break;
                gart_unmap_single(dev, s->dma_address, s->dma_length, dir);
@@ -292,14 +293,14 @@ static void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
 static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
                               int nents, int dir)
 {
+       struct scatterlist *s;
        int i;
 
 #ifdef CONFIG_IOMMU_DEBUG
        printk(KERN_DEBUG "dma_map_sg overflow\n");
 #endif
 
-       for (i = 0; i < nents; i++ ) {
-               struct scatterlist *s = &sg[i];
+       for_each_sg(sg, s, nents, i) {
                unsigned long addr = page_to_phys(s->page) + s->offset; 
                if (nonforced_iommu(dev, addr, s->length)) { 
                        addr = dma_map_area(dev, addr, s->length, dir);
@@ -319,23 +320,23 @@ static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
 }
 
 /* Map multiple scatterlist entries continuous into the first. */
-static int __dma_map_cont(struct scatterlist *sg, int start, int stopat,
+static int __dma_map_cont(struct scatterlist *start, int nelems,
                      struct scatterlist *sout, unsigned long pages)
 {
        unsigned long iommu_start = alloc_iommu(pages);
        unsigned long iommu_page = iommu_start; 
+       struct scatterlist *s;
        int i;
 
        if (iommu_start == -1)
                return -1;
-       
-       for (i = start; i < stopat; i++) {
-               struct scatterlist *s = &sg[i];
+
+       for_each_sg(start, s, nelems, i) {
                unsigned long pages, addr;
                unsigned long phys_addr = s->dma_address;
                
-               BUG_ON(i > start && s->offset);
-               if (i == start) {
+               BUG_ON(s != start && s->offset);
+               if (s == start) {
                        *sout = *s; 
                        sout->dma_address = iommu_bus_base;
                        sout->dma_address += iommu_page*PAGE_SIZE + s->offset;
@@ -357,17 +358,17 @@ static int __dma_map_cont(struct scatterlist *sg, int start, int stopat,
        return 0;
 }
 
-static inline int dma_map_cont(struct scatterlist *sg, int start, int stopat,
+static inline int dma_map_cont(struct scatterlist *start, int nelems,
                      struct scatterlist *sout,
                      unsigned long pages, int need)
 {
-       if (!need) { 
-               BUG_ON(stopat - start != 1);
-               *sout = sg[start]; 
-               sout->dma_length = sg[start].length; 
+       if (!need) {
+               BUG_ON(nelems != 1);
+               *sout = *start;
+               sout->dma_length = start->length;
                return 0;
-       } 
-       return __dma_map_cont(sg, start, stopat, sout, pages);
+       }
+       return __dma_map_cont(start, nelems, sout, pages);
 }
                
 /*
@@ -381,6 +382,7 @@ int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
        int start;
        unsigned long pages = 0;
        int need = 0, nextneed;
+       struct scatterlist *s, *ps, *start_sg, *sgmap;
 
        if (nents == 0) 
                return 0;
@@ -390,8 +392,9 @@ int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
 
        out = 0;
        start = 0;
-       for (i = 0; i < nents; i++) {
-               struct scatterlist *s = &sg[i];
+       start_sg = sgmap = sg;
+       ps = NULL; /* shut up gcc */
+       for_each_sg(sg, s, nents, i) {
                dma_addr_t addr = page_to_phys(s->page) + s->offset;
                s->dma_address = addr;
                BUG_ON(s->length == 0); 
@@ -400,29 +403,33 @@ int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
 
                /* Handle the previous not yet processed entries */
                if (i > start) {
-                       struct scatterlist *ps = &sg[i-1];
                        /* Can only merge when the last chunk ends on a page 
                           boundary and the new one doesn't have an offset. */
                        if (!iommu_merge || !nextneed || !need || s->offset ||
-                           (ps->offset + ps->length) % PAGE_SIZE) { 
-                               if (dma_map_cont(sg, start, i, sg+out, pages,
-                                                need) < 0)
+                           (ps->offset + ps->length) % PAGE_SIZE) {
+                               if (dma_map_cont(start_sg, i - start, sgmap,
+                                                 pages, need) < 0)
                                        goto error;
                                out++;
+                               sgmap = sg_next(sgmap);
                                pages = 0;
-                               start = i;      
+                               start = i;
+                               start_sg = s;
                        }
                }
 
                need = nextneed;
                pages += to_pages(s->offset, s->length);
+               ps = s;
        }
-       if (dma_map_cont(sg, start, i, sg+out, pages, need) < 0)
+       if (dma_map_cont(start_sg, i - start, sgmap, pages, need) < 0)
                goto error;
        out++;
        flush_gart();
-       if (out < nents) 
-               sg[out].dma_length = 0; 
+       if (out < nents) {
+               sgmap = sg_next(sgmap);
+               sgmap->dma_length = 0;
+       }
        return out;
 
 error:
@@ -437,8 +444,8 @@ error:
        if (panic_on_overflow)
                panic("dma_map_sg: overflow on %lu pages\n", pages);
        iommu_full(dev, pages << PAGE_SHIFT, dir);
-       for (i = 0; i < nents; i++)
-               sg[i].dma_address = bad_dma_address;
+       for_each_sg(sg, s, nents, i)
+               s->dma_address = bad_dma_address;
        return 0;
 } 
 
index 2a34c6c025a920832455dc0c32c4978ee5881ee0..e85d4360360c68fe6db2166194e8a748f709f1f2 100644 (file)
@@ -5,6 +5,7 @@
 #include <linux/pci.h>
 #include <linux/string.h>
 #include <linux/dma-mapping.h>
+#include <linux/scatterlist.h>
 
 #include <asm/iommu.h>
 #include <asm/processor.h>
@@ -57,10 +58,10 @@ static void nommu_unmap_single(struct device *dev, dma_addr_t addr,size_t size,
 static int nommu_map_sg(struct device *hwdev, struct scatterlist *sg,
               int nents, int direction)
 {
+       struct scatterlist *s;
        int i;
 
-       for (i = 0; i < nents; i++ ) {
-               struct scatterlist *s = &sg[i];
+       for_each_sg(sg, s, nents, i) {
                BUG_ON(!s->page);
                s->dma_address = virt_to_bus(page_address(s->page) +s->offset);
                if (!check_addr("map_sg", hwdev, s->dma_address, s->length))
index b8ddfc66f210aac023479291a95ea1627f396950..8e181ab3afb9a515f38195b450145002a2be044d 100644 (file)
@@ -908,7 +908,7 @@ static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
        }
 }
 
-static struct file_operations bsg_fops = {
+static const struct file_operations bsg_fops = {
        .read           =       bsg_read,
        .write          =       bsg_write,
        .poll           =       bsg_poll,
index b9c518afe1f8bf61907999f56844c5583a9d0a43..952aee04a68ad7cc240b8ecb7a8a0e1590b9a33f 100644 (file)
@@ -712,6 +712,14 @@ struct request *elv_next_request(struct request_queue *q)
        int ret;
 
        while ((rq = __elv_next_request(q)) != NULL) {
+               /*
+                * Kill the empty barrier place holder, the driver must
+                * not ever see it.
+                */
+               if (blk_empty_barrier(rq)) {
+                       end_queued_request(rq, 1);
+                       continue;
+               }
                if (!(rq->cmd_flags & REQ_STARTED)) {
                        /*
                         * This is the first time the device driver
@@ -751,15 +759,8 @@ struct request *elv_next_request(struct request_queue *q)
                        rq = NULL;
                        break;
                } else if (ret == BLKPREP_KILL) {
-                       int nr_bytes = rq->hard_nr_sectors << 9;
-
-                       if (!nr_bytes)
-                               nr_bytes = rq->data_len;
-
-                       blkdev_dequeue_request(rq);
                        rq->cmd_flags |= REQ_QUIET;
-                       end_that_request_chunk(rq, 0, nr_bytes);
-                       end_that_request_last(rq, 0);
+                       end_queued_request(rq, 0);
                } else {
                        printk(KERN_ERR "%s: bad return=%d\n", __FUNCTION__,
                                                                ret);
index a83823fcd74f3f90a42d8a470cad9947c5cdd56f..9eabac95fbe053917cb25f7ba1e9fb208b8e94d1 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/cpu.h>
 #include <linux/blktrace_api.h>
 #include <linux/fault-inject.h>
+#include <linux/scatterlist.h>
 
 /*
  * for max sense size
@@ -304,23 +305,6 @@ int blk_queue_ordered(struct request_queue *q, unsigned ordered,
 
 EXPORT_SYMBOL(blk_queue_ordered);
 
-/**
- * blk_queue_issue_flush_fn - set function for issuing a flush
- * @q:     the request queue
- * @iff:   the function to be called issuing the flush
- *
- * Description:
- *   If a driver supports issuing a flush command, the support is notified
- *   to the block layer by defining it through this call.
- *
- **/
-void blk_queue_issue_flush_fn(struct request_queue *q, issue_flush_fn *iff)
-{
-       q->issue_flush_fn = iff;
-}
-
-EXPORT_SYMBOL(blk_queue_issue_flush_fn);
-
 /*
  * Cache flushing for ordered writes handling
  */
@@ -377,10 +361,12 @@ void blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error)
        /*
         * Okay, sequence complete.
         */
-       rq = q->orig_bar_rq;
-       uptodate = q->orderr ? q->orderr : 1;
+       uptodate = 1;
+       if (q->orderr)
+               uptodate = q->orderr;
 
        q->ordseq = 0;
+       rq = q->orig_bar_rq;
 
        end_that_request_first(rq, uptodate, rq->hard_nr_sectors);
        end_that_request_last(rq, uptodate);
@@ -445,7 +431,8 @@ static inline struct request *start_ordered(struct request_queue *q,
        rq_init(q, rq);
        if (bio_data_dir(q->orig_bar_rq->bio) == WRITE)
                rq->cmd_flags |= REQ_RW;
-       rq->cmd_flags |= q->ordered & QUEUE_ORDERED_FUA ? REQ_FUA : 0;
+       if (q->ordered & QUEUE_ORDERED_FUA)
+               rq->cmd_flags |= REQ_FUA;
        rq->elevator_private = NULL;
        rq->elevator_private2 = NULL;
        init_request_from_bio(rq, q->orig_bar_rq->bio);
@@ -455,9 +442,12 @@ static inline struct request *start_ordered(struct request_queue *q,
         * Queue ordered sequence.  As we stack them at the head, we
         * need to queue in reverse order.  Note that we rely on that
         * no fs request uses ELEVATOR_INSERT_FRONT and thus no fs
-        * request gets inbetween ordered sequence.
+        * request gets inbetween ordered sequence. If this request is
+        * an empty barrier, we don't need to do a postflush ever since
+        * there will be no data written between the pre and post flush.
+        * Hence a single flush will suffice.
         */
-       if (q->ordered & QUEUE_ORDERED_POSTFLUSH)
+       if ((q->ordered & QUEUE_ORDERED_POSTFLUSH) && !blk_empty_barrier(rq))
                queue_flush(q, QUEUE_ORDERED_POSTFLUSH);
        else
                q->ordseq |= QUEUE_ORDSEQ_POSTFLUSH;
@@ -481,7 +471,7 @@ static inline struct request *start_ordered(struct request_queue *q,
 int blk_do_ordered(struct request_queue *q, struct request **rqp)
 {
        struct request *rq = *rqp;
-       int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq);
+       const int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq);
 
        if (!q->ordseq) {
                if (!is_barrier)
@@ -1329,9 +1319,10 @@ static int blk_hw_contig_segment(struct request_queue *q, struct bio *bio,
  * must make sure sg can hold rq->nr_phys_segments entries
  */
 int blk_rq_map_sg(struct request_queue *q, struct request *rq,
-                 struct scatterlist *sg)
+                 struct scatterlist *sglist)
 {
        struct bio_vec *bvec, *bvprv;
+       struct scatterlist *next_sg, *sg;
        struct req_iterator iter;
        int nsegs, cluster;
 
@@ -1342,11 +1333,12 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
         * for each bio in rq
         */
        bvprv = NULL;
+       sg = next_sg = &sglist[0];
        rq_for_each_segment(bvec, rq, iter) {
                int nbytes = bvec->bv_len;
 
                if (bvprv && cluster) {
-                       if (sg[nsegs - 1].length + nbytes > q->max_segment_size)
+                       if (sg->length + nbytes > q->max_segment_size)
                                goto new_segment;
 
                        if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
@@ -1354,14 +1346,15 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
                        if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec))
                                goto new_segment;
 
-                       sg[nsegs - 1].length += nbytes;
+                       sg->length += nbytes;
                } else {
 new_segment:
-                       memset(&sg[nsegs],0,sizeof(struct scatterlist));
-                       sg[nsegs].page = bvec->bv_page;
-                       sg[nsegs].length = nbytes;
-                       sg[nsegs].offset = bvec->bv_offset;
+                       sg = next_sg;
+                       next_sg = sg_next(sg);
 
+                       sg->page = bvec->bv_page;
+                       sg->length = nbytes;
+                       sg->offset = bvec->bv_offset;
                        nsegs++;
                }
                bvprv = bvec;
@@ -2660,6 +2653,14 @@ int blk_execute_rq(struct request_queue *q, struct gendisk *bd_disk,
 
 EXPORT_SYMBOL(blk_execute_rq);
 
+static void bio_end_empty_barrier(struct bio *bio, int err)
+{
+       if (err)
+               clear_bit(BIO_UPTODATE, &bio->bi_flags);
+
+       complete(bio->bi_private);
+}
+
 /**
  * blkdev_issue_flush - queue a flush
  * @bdev:      blockdev to issue flush for
@@ -2672,7 +2673,10 @@ EXPORT_SYMBOL(blk_execute_rq);
  */
 int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
 {
+       DECLARE_COMPLETION_ONSTACK(wait);
        struct request_queue *q;
+       struct bio *bio;
+       int ret;
 
        if (bdev->bd_disk == NULL)
                return -ENXIO;
@@ -2680,10 +2684,32 @@ int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
        q = bdev_get_queue(bdev);
        if (!q)
                return -ENXIO;
-       if (!q->issue_flush_fn)
-               return -EOPNOTSUPP;
 
-       return q->issue_flush_fn(q, bdev->bd_disk, error_sector);
+       bio = bio_alloc(GFP_KERNEL, 0);
+       if (!bio)
+               return -ENOMEM;
+
+       bio->bi_end_io = bio_end_empty_barrier;
+       bio->bi_private = &wait;
+       bio->bi_bdev = bdev;
+       submit_bio(1 << BIO_RW_BARRIER, bio);
+
+       wait_for_completion(&wait);
+
+       /*
+        * The driver must store the error location in ->bi_sector, if
+        * it supports it. For non-stacked drivers, this should be copied
+        * from rq->sector.
+        */
+       if (error_sector)
+               *error_sector = bio->bi_sector;
+
+       ret = 0;
+       if (!bio_flagged(bio, BIO_UPTODATE))
+               ret = -EIO;
+
+       bio_put(bio);
+       return ret;
 }
 
 EXPORT_SYMBOL(blkdev_issue_flush);
@@ -3051,7 +3077,7 @@ static inline void blk_partition_remap(struct bio *bio)
 {
        struct block_device *bdev = bio->bi_bdev;
 
-       if (bdev != bdev->bd_contains) {
+       if (bio_sectors(bio) && bdev != bdev->bd_contains) {
                struct hd_struct *p = bdev->bd_part;
                const int rw = bio_data_dir(bio);
 
@@ -3117,6 +3143,35 @@ static inline int should_fail_request(struct bio *bio)
 
 #endif /* CONFIG_FAIL_MAKE_REQUEST */
 
+/*
+ * Check whether this bio extends beyond the end of the device.
+ */
+static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors)
+{
+       sector_t maxsector;
+
+       if (!nr_sectors)
+               return 0;
+
+       /* Test device or partition size, when known. */
+       maxsector = bio->bi_bdev->bd_inode->i_size >> 9;
+       if (maxsector) {
+               sector_t sector = bio->bi_sector;
+
+               if (maxsector < nr_sectors || maxsector - nr_sectors < sector) {
+                       /*
+                        * This may well happen - the kernel calls bread()
+                        * without checking the size of the device, e.g., when
+                        * mounting a device.
+                        */
+                       handle_bad_sector(bio);
+                       return 1;
+               }
+       }
+
+       return 0;
+}
+
 /**
  * generic_make_request: hand a buffer to its device driver for I/O
  * @bio:  The bio describing the location in memory and on the device.
@@ -3144,27 +3199,14 @@ static inline int should_fail_request(struct bio *bio)
 static inline void __generic_make_request(struct bio *bio)
 {
        struct request_queue *q;
-       sector_t maxsector;
        sector_t old_sector;
        int ret, nr_sectors = bio_sectors(bio);
        dev_t old_dev;
 
        might_sleep();
-       /* Test device or partition size, when known. */
-       maxsector = bio->bi_bdev->bd_inode->i_size >> 9;
-       if (maxsector) {
-               sector_t sector = bio->bi_sector;
 
-               if (maxsector < nr_sectors || maxsector - nr_sectors < sector) {
-                       /*
-                        * This may well happen - the kernel calls bread()
-                        * without checking the size of the device, e.g., when
-                        * mounting a device.
-                        */
-                       handle_bad_sector(bio);
-                       goto end_io;
-               }
-       }
+       if (bio_check_eod(bio, nr_sectors))
+               goto end_io;
 
        /*
         * Resolve the mapping until finished. (drivers are
@@ -3191,7 +3233,7 @@ end_io:
                        break;
                }
 
-               if (unlikely(bio_sectors(bio) > q->max_hw_sectors)) {
+               if (unlikely(nr_sectors > q->max_hw_sectors)) {
                        printk("bio too big device %s (%u > %u)\n", 
                                bdevname(bio->bi_bdev, b),
                                bio_sectors(bio),
@@ -3212,7 +3254,7 @@ end_io:
                blk_partition_remap(bio);
 
                if (old_sector != -1)
-                       blk_add_trace_remap(q, bio, old_dev, bio->bi_sector, 
+                       blk_add_trace_remap(q, bio, old_dev, bio->bi_sector,
                                            old_sector);
 
                blk_add_trace_bio(q, bio, BLK_TA_QUEUE);
@@ -3220,21 +3262,8 @@ end_io:
                old_sector = bio->bi_sector;
                old_dev = bio->bi_bdev->bd_dev;
 
-               maxsector = bio->bi_bdev->bd_inode->i_size >> 9;
-               if (maxsector) {
-                       sector_t sector = bio->bi_sector;
-
-                       if (maxsector < nr_sectors ||
-                                       maxsector - nr_sectors < sector) {
-                               /*
-                                * This may well happen - partitions are not
-                                * checked to make sure they are within the size
-                                * of the whole device.
-                                */
-                               handle_bad_sector(bio);
-                               goto end_io;
-                       }
-               }
+               if (bio_check_eod(bio, nr_sectors))
+                       goto end_io;
 
                ret = q->make_request_fn(q, bio);
        } while (ret);
@@ -3307,23 +3336,32 @@ void submit_bio(int rw, struct bio *bio)
 {
        int count = bio_sectors(bio);
 
-       BIO_BUG_ON(!bio->bi_size);
-       BIO_BUG_ON(!bio->bi_io_vec);
        bio->bi_rw |= rw;
-       if (rw & WRITE) {
-               count_vm_events(PGPGOUT, count);
-       } else {
-               task_io_account_read(bio->bi_size);
-               count_vm_events(PGPGIN, count);
-       }
 
-       if (unlikely(block_dump)) {
-               char b[BDEVNAME_SIZE];
-               printk(KERN_DEBUG "%s(%d): %s block %Lu on %s\n",
-                       current->comm, current->pid,
-                       (rw & WRITE) ? "WRITE" : "READ",
-                       (unsigned long long)bio->bi_sector,
-                       bdevname(bio->bi_bdev,b));
+       /*
+        * If it's a regular read/write or a barrier with data attached,
+        * go through the normal accounting stuff before submission.
+        */
+       if (!bio_empty_barrier(bio)) {
+
+               BIO_BUG_ON(!bio->bi_size);
+               BIO_BUG_ON(!bio->bi_io_vec);
+
+               if (rw & WRITE) {
+                       count_vm_events(PGPGOUT, count);
+               } else {
+                       task_io_account_read(bio->bi_size);
+                       count_vm_events(PGPGIN, count);
+               }
+
+               if (unlikely(block_dump)) {
+                       char b[BDEVNAME_SIZE];
+                       printk(KERN_DEBUG "%s(%d): %s block %Lu on %s\n",
+                               current->comm, current->pid,
+                               (rw & WRITE) ? "WRITE" : "READ",
+                               (unsigned long long)bio->bi_sector,
+                               bdevname(bio->bi_bdev,b));
+               }
        }
 
        generic_make_request(bio);
@@ -3399,6 +3437,14 @@ static int __end_that_request_first(struct request *req, int uptodate,
        while ((bio = req->bio) != NULL) {
                int nbytes;
 
+               /*
+                * For an empty barrier request, the low level driver must
+                * store a potential error location in ->sector. We pass
+                * that back up in ->bi_sector.
+                */
+               if (blk_empty_barrier(req))
+                       bio->bi_sector = req->sector;
+
                if (nr_bytes >= bio->bi_size) {
                        req->bio = bio->bi_next;
                        nbytes = bio->bi_size;
@@ -3564,7 +3610,7 @@ static struct notifier_block blk_cpu_notifier __cpuinitdata = {
  * Description:
  *     Ends all I/O on a request. It does not handle partial completions,
  *     unless the driver actually implements this in its completion callback
- *     through requeueing. Theh actual completion happens out-of-order,
+ *     through requeueing. The actual completion happens out-of-order,
  *     through a softirq handler. The user must have registered a completion
  *     callback through blk_queue_softirq_done().
  **/
@@ -3627,15 +3673,83 @@ void end_that_request_last(struct request *req, int uptodate)
 
 EXPORT_SYMBOL(end_that_request_last);
 
-void end_request(struct request *req, int uptodate)
+static inline void __end_request(struct request *rq, int uptodate,
+                                unsigned int nr_bytes, int dequeue)
 {
-       if (!end_that_request_first(req, uptodate, req->hard_cur_sectors)) {
-               add_disk_randomness(req->rq_disk);
-               blkdev_dequeue_request(req);
-               end_that_request_last(req, uptodate);
+       if (!end_that_request_chunk(rq, uptodate, nr_bytes)) {
+               if (dequeue)
+                       blkdev_dequeue_request(rq);
+               add_disk_randomness(rq->rq_disk);
+               end_that_request_last(rq, uptodate);
        }
 }
 
+static unsigned int rq_byte_size(struct request *rq)
+{
+       if (blk_fs_request(rq))
+               return rq->hard_nr_sectors << 9;
+
+       return rq->data_len;
+}
+
+/**
+ * end_queued_request - end all I/O on a queued request
+ * @rq:                the request being processed
+ * @uptodate:  error value or 0/1 uptodate flag
+ *
+ * Description:
+ *     Ends all I/O on a request, and removes it from the block layer queues.
+ *     Not suitable for normal IO completion, unless the driver still has
+ *     the request attached to the block layer.
+ *
+ **/
+void end_queued_request(struct request *rq, int uptodate)
+{
+       __end_request(rq, uptodate, rq_byte_size(rq), 1);
+}
+EXPORT_SYMBOL(end_queued_request);
+
+/**
+ * end_dequeued_request - end all I/O on a dequeued request
+ * @rq:                the request being processed
+ * @uptodate:  error value or 0/1 uptodate flag
+ *
+ * Description:
+ *     Ends all I/O on a request. The request must already have been
+ *     dequeued using blkdev_dequeue_request(), as is normally the case
+ *     for most drivers.
+ *
+ **/
+void end_dequeued_request(struct request *rq, int uptodate)
+{
+       __end_request(rq, uptodate, rq_byte_size(rq), 0);
+}
+EXPORT_SYMBOL(end_dequeued_request);
+
+
+/**
+ * end_request - end I/O on the current segment of the request
+ * @rq:                the request being processed
+ * @uptodate:  error value or 0/1 uptodate flag
+ *
+ * Description:
+ *     Ends I/O on the current segment of a request. If that is the only
+ *     remaining segment, the request is also completed and freed.
+ *
+ *     This is a remnant of how older block drivers handled IO completions.
+ *     Modern drivers typically end IO on the full request in one go, unless
+ *     they have a residual value to account for. For that case this function
+ *     isn't really useful, unless the residual just happens to be the
+ *     full current segment. In other words, don't use this function in new
+ *     code. Either use end_request_completely(), or the
+ *     end_that_request_chunk() (along with end_that_request_last()) for
+ *     partial completions.
+ *
+ **/
+void end_request(struct request *req, int uptodate)
+{
+       __end_request(req, uptodate, req->hard_cur_sectors << 9, 1);
+}
 EXPORT_SYMBOL(end_request);
 
 static void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
@@ -3949,7 +4063,23 @@ static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
        return queue_var_show(max_hw_sectors_kb, (page));
 }
 
+static ssize_t queue_max_segments_show(struct request_queue *q, char *page)
+{
+       return queue_var_show(q->max_phys_segments, page);
+}
+
+static ssize_t queue_max_segments_store(struct request_queue *q,
+                                       const char *page, size_t count)
+{
+       unsigned long segments;
+       ssize_t ret = queue_var_store(&segments, page, count);
+
+       spin_lock_irq(q->queue_lock);
+       q->max_phys_segments = segments;
+       spin_unlock_irq(q->queue_lock);
 
+       return ret;
+}
 static struct queue_sysfs_entry queue_requests_entry = {
        .attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR },
        .show = queue_requests_show,
@@ -3973,6 +4103,12 @@ static struct queue_sysfs_entry queue_max_hw_sectors_entry = {
        .show = queue_max_hw_sectors_show,
 };
 
+static struct queue_sysfs_entry queue_max_segments_entry = {
+       .attr = {.name = "max_segments", .mode = S_IRUGO | S_IWUSR },
+       .show = queue_max_segments_show,
+       .store = queue_max_segments_store,
+};
+
 static struct queue_sysfs_entry queue_iosched_entry = {
        .attr = {.name = "scheduler", .mode = S_IRUGO | S_IWUSR },
        .show = elv_iosched_show,
@@ -3984,6 +4120,7 @@ static struct attribute *default_attrs[] = {
        &queue_ra_entry.attr,
        &queue_max_hw_sectors_entry.attr,
        &queue_max_sectors_entry.attr,
+       &queue_max_segments_entry.attr,
        &queue_iosched_entry.attr,
        NULL,
 };
index 1bf7414aeb9e4e212beda804cf270aa310548c5d..e56de6748b155eadf0f749c801e911a9a2d23577 100644 (file)
@@ -77,7 +77,7 @@ static int update2(struct hash_desc *desc,
 
                if (!nbytes)
                        break;
-               sg = sg_next(sg);
+               sg = scatterwalk_sg_next(sg);
        }
 
        return 0;
index 3052f6507f53f3118de2adfc818da7d56e78b3f4..d6852c33cfb78f2ac2300280f90dadc6acb3b5a7 100644 (file)
@@ -62,7 +62,7 @@ static void scatterwalk_pagedone(struct scatter_walk *walk, int out,
                walk->offset += PAGE_SIZE - 1;
                walk->offset &= PAGE_MASK;
                if (walk->offset >= walk->sg->offset + walk->sg->length)
-                       scatterwalk_start(walk, sg_next(walk->sg));
+                       scatterwalk_start(walk, scatterwalk_sg_next(walk->sg));
        }
 }
 
index 500a220ad908cbad83bc8f1a285a03c169d71961..9c73e37a42cef9ad9eb63133cf78a35ac78ae9fd 100644 (file)
@@ -20,7 +20,7 @@
 
 #include "internal.h"
 
-static inline struct scatterlist *sg_next(struct scatterlist *sg)
+static inline struct scatterlist *scatterwalk_sg_next(struct scatterlist *sg)
 {
        return (++sg)->length ? sg : (void *)sg->page;
 }
index 68699b3e799843249086f7ae4f944b6dd893fe1a..bbaa545ea999817200366d54f3ea4b65cb6f32af 100644 (file)
@@ -1410,7 +1410,7 @@ static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
  */
 unsigned ata_exec_internal_sg(struct ata_device *dev,
                              struct ata_taskfile *tf, const u8 *cdb,
-                             int dma_dir, struct scatterlist *sg,
+                             int dma_dir, struct scatterlist *sgl,
                              unsigned int n_elem, unsigned long timeout)
 {
        struct ata_link *link = dev->link;
@@ -1472,11 +1472,12 @@ unsigned ata_exec_internal_sg(struct ata_device *dev,
        qc->dma_dir = dma_dir;
        if (dma_dir != DMA_NONE) {
                unsigned int i, buflen = 0;
+               struct scatterlist *sg;
 
-               for (i = 0; i < n_elem; i++)
-                       buflen += sg[i].length;
+               for_each_sg(sgl, sg, n_elem, i)
+                       buflen += sg->length;
 
-               ata_sg_init(qc, sg, n_elem);
+               ata_sg_init(qc, sgl, n_elem);
                qc->nbytes = buflen;
        }
 
@@ -4292,7 +4293,7 @@ void ata_sg_clean(struct ata_queued_cmd *qc)
                if (qc->n_elem)
                        dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
                /* restore last sg */
-               sg[qc->orig_n_elem - 1].length += qc->pad_len;
+               sg_last(sg, qc->orig_n_elem)->length += qc->pad_len;
                if (pad_buf) {
                        struct scatterlist *psg = &qc->pad_sgent;
                        void *addr = kmap_atomic(psg->page, KM_IRQ0);
@@ -4547,6 +4548,7 @@ void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
        qc->orig_n_elem = 1;
        qc->buf_virt = buf;
        qc->nbytes = buflen;
+       qc->cursg = qc->__sg;
 
        sg_init_one(&qc->sgent, buf, buflen);
 }
@@ -4572,6 +4574,7 @@ void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
        qc->__sg = sg;
        qc->n_elem = n_elem;
        qc->orig_n_elem = n_elem;
+       qc->cursg = qc->__sg;
 }
 
 /**
@@ -4661,7 +4664,7 @@ static int ata_sg_setup(struct ata_queued_cmd *qc)
 {
        struct ata_port *ap = qc->ap;
        struct scatterlist *sg = qc->__sg;
-       struct scatterlist *lsg = &sg[qc->n_elem - 1];
+       struct scatterlist *lsg = sg_last(qc->__sg, qc->n_elem);
        int n_elem, pre_n_elem, dir, trim_sg = 0;
 
        VPRINTK("ENTER, ata%u\n", ap->print_id);
@@ -4825,7 +4828,6 @@ void ata_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
 static void ata_pio_sector(struct ata_queued_cmd *qc)
 {
        int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
-       struct scatterlist *sg = qc->__sg;
        struct ata_port *ap = qc->ap;
        struct page *page;
        unsigned int offset;
@@ -4834,8 +4836,8 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
        if (qc->curbytes == qc->nbytes - qc->sect_size)
                ap->hsm_task_state = HSM_ST_LAST;
 
-       page = sg[qc->cursg].page;
-       offset = sg[qc->cursg].offset + qc->cursg_ofs;
+       page = qc->cursg->page;
+       offset = qc->cursg->offset + qc->cursg_ofs;
 
        /* get the current page and offset */
        page = nth_page(page, (offset >> PAGE_SHIFT));
@@ -4863,8 +4865,8 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
        qc->curbytes += qc->sect_size;
        qc->cursg_ofs += qc->sect_size;
 
-       if (qc->cursg_ofs == (&sg[qc->cursg])->length) {
-               qc->cursg++;
+       if (qc->cursg_ofs == qc->cursg->length) {
+               qc->cursg = sg_next(qc->cursg);
                qc->cursg_ofs = 0;
        }
 }
@@ -4950,16 +4952,18 @@ static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
 {
        int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
        struct scatterlist *sg = qc->__sg;
+       struct scatterlist *lsg = sg_last(qc->__sg, qc->n_elem);
        struct ata_port *ap = qc->ap;
        struct page *page;
        unsigned char *buf;
        unsigned int offset, count;
+       int no_more_sg = 0;
 
        if (qc->curbytes + bytes >= qc->nbytes)
                ap->hsm_task_state = HSM_ST_LAST;
 
 next_sg:
-       if (unlikely(qc->cursg >= qc->n_elem)) {
+       if (unlikely(no_more_sg)) {
                /*
                 * The end of qc->sg is reached and the device expects
                 * more data to transfer. In order not to overrun qc->sg
@@ -4982,7 +4986,7 @@ next_sg:
                return;
        }
 
-       sg = &qc->__sg[qc->cursg];
+       sg = qc->cursg;
 
        page = sg->page;
        offset = sg->offset + qc->cursg_ofs;
@@ -5021,7 +5025,10 @@ next_sg:
        qc->cursg_ofs += count;
 
        if (qc->cursg_ofs == sg->length) {
-               qc->cursg++;
+               if (qc->cursg == lsg)
+                       no_more_sg = 1;
+
+               qc->cursg = sg_next(qc->cursg);
                qc->cursg_ofs = 0;
        }
 
index 5237a491622ba021b0b3f073bc22cee09e196673..9fbb39cd0f5892414a6cb72d4378de8c99da960f 100644 (file)
@@ -801,8 +801,6 @@ int ata_scsi_slave_config(struct scsi_device *sdev)
 
        ata_scsi_sdev_config(sdev);
 
-       blk_queue_max_phys_segments(sdev->request_queue, LIBATA_MAX_PRD);
-
        sdev->manage_start_stop = 1;
 
        if (dev)
index 55c3237fb1bc8792df398d4f18531a48f9da3cb1..3fb7e8bc436d1b39eb579efc5ddc7b1222d69e4b 100644 (file)
@@ -1191,7 +1191,6 @@ static inline void complete_buffers(struct bio *bio, int status)
 {
        while (bio) {
                struct bio *xbh = bio->bi_next;
-               int nr_sectors = bio_sectors(bio);
 
                bio->bi_next = NULL;
                bio_endio(bio, status ? 0 : -EIO);
@@ -2570,6 +2569,7 @@ static void do_cciss_request(struct request_queue *q)
               (int)creq->nr_sectors);
 #endif                         /* CCISS_DEBUG */
 
+       memset(tmp_sg, 0, sizeof(tmp_sg));
        seg = blk_rq_map_sg(q, creq, tmp_sg);
 
        /* get the DMA records for the setup */
index 3853c9a38d6a1dd4457ccb445b804546f07a609a..568603d3043e66d1743437095b5fb28c7008596b 100644 (file)
@@ -981,9 +981,8 @@ static void start_io(ctlr_info_t *h)
 static inline void complete_buffers(struct bio *bio, int ok)
 {
        struct bio *xbh;
-       while(bio) {
-               int nr_sectors = bio_sectors(bio);
 
+       while (bio) {
                xbh = bio->bi_next;
                bio->bi_next = NULL;
                
index 540bf3676985bc10d8fff8bff74db9dbef9a0835..a8130a4ad6d4329572b722b2947059db776fa8e2 100644 (file)
@@ -1133,16 +1133,21 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
         * Schedule reads for missing parts of the packet.
         */
        for (f = 0; f < pkt->frames; f++) {
+               struct bio_vec *vec;
+
                int p, offset;
                if (written[f])
                        continue;
                bio = pkt->r_bios[f];
+               vec = bio->bi_io_vec;
                bio_init(bio);
                bio->bi_max_vecs = 1;
                bio->bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9);
                bio->bi_bdev = pd->bdev;
                bio->bi_end_io = pkt_end_io_read;
                bio->bi_private = pkt;
+               bio->bi_io_vec = vec;
+               bio->bi_destructor = pkt_bio_destructor;
 
                p = (f * CD_FRAMESIZE) / PAGE_SIZE;
                offset = (f * CD_FRAMESIZE) % PAGE_SIZE;
@@ -1439,6 +1444,8 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
        pkt->w_bio->bi_bdev = pd->bdev;
        pkt->w_bio->bi_end_io = pkt_end_io_packet_write;
        pkt->w_bio->bi_private = pkt;
+       pkt->w_bio->bi_io_vec = bvec;
+       pkt->w_bio->bi_destructor = pkt_bio_destructor;
        for (f = 0; f < pkt->frames; f++)
                if (!bio_add_page(pkt->w_bio, bvec[f].bv_page, CD_FRAMESIZE, bvec[f].bv_offset))
                        BUG();
index 06d0552cf49cb228b149ecb4d19d612a6e50b87b..e354bfc070e1e8d2e83ce8abd843d9ed7b965a3f 100644 (file)
@@ -414,26 +414,6 @@ static void ps3disk_prepare_flush(struct request_queue *q, struct request *req)
        req->cmd_type = REQ_TYPE_FLUSH;
 }
 
-static int ps3disk_issue_flush(struct request_queue *q, struct gendisk *gendisk,
-                              sector_t *sector)
-{
-       struct ps3_storage_device *dev = q->queuedata;
-       struct request *req;
-       int res;
-
-       dev_dbg(&dev->sbd.core, "%s:%u\n", __func__, __LINE__);
-
-       req = blk_get_request(q, WRITE, __GFP_WAIT);
-       ps3disk_prepare_flush(q, req);
-       res = blk_execute_rq(q, gendisk, req, 0);
-       if (res)
-               dev_err(&dev->sbd.core, "%s:%u: flush request failed %d\n",
-                       __func__, __LINE__, res);
-       blk_put_request(req);
-       return res;
-}
-
-
 static unsigned long ps3disk_mask;
 
 static DEFINE_MUTEX(ps3disk_mask_mutex);
@@ -506,7 +486,6 @@ static int __devinit ps3disk_probe(struct ps3_system_bus_device *_dev)
        blk_queue_dma_alignment(queue, dev->blk_size-1);
        blk_queue_hardsect_size(queue, dev->blk_size);
 
-       blk_queue_issue_flush_fn(queue, ps3disk_issue_flush);
        blk_queue_ordered(queue, QUEUE_ORDERED_DRAIN_FLUSH,
                          ps3disk_prepare_flush);
 
index 2b4d2a0ae5c26e1d7bf7f42b1630b0c36a6332ab..c306c9f534ab6600e665669f44d2bc05377ad2f2 100644 (file)
@@ -939,7 +939,8 @@ static int cris_ide_build_dmatable (ide_drive_t *drive)
                /* group sequential buffers into one large buffer */
                addr = page_to_phys(sg->page) + sg->offset;
                size = sg_dma_len(sg);
-               while (sg++, --i) {
+               while (--i) {
+                       sg = sg_next(sg);
                        if ((addr + size) != page_to_phys(sg->page) + sg->offset)
                                break;
                        size += sg_dma_len(sg);
index 4754769eda9781ee2531b46f8bcbf42167e41685..92177ca48b4de7edb5e828339059fcb05e854490 100644 (file)
@@ -716,32 +716,6 @@ static void idedisk_prepare_flush(struct request_queue *q, struct request *rq)
        rq->buffer = rq->cmd;
 }
 
-static int idedisk_issue_flush(struct request_queue *q, struct gendisk *disk,
-                              sector_t *error_sector)
-{
-       ide_drive_t *drive = q->queuedata;
-       struct request *rq;
-       int ret;
-
-       if (!drive->wcache)
-               return 0;
-
-       rq = blk_get_request(q, WRITE, __GFP_WAIT);
-
-       idedisk_prepare_flush(q, rq);
-
-       ret = blk_execute_rq(q, disk, rq, 0);
-
-       /*
-        * if we failed and caller wants error offset, get it
-        */
-       if (ret && error_sector)
-               *error_sector = ide_get_error_location(drive, rq->cmd);
-
-       blk_put_request(rq);
-       return ret;
-}
-
 /*
  * This is tightly woven into the driver->do_special can not touch.
  * DON'T do it again until a total personality rewrite is committed.
@@ -781,7 +755,6 @@ static void update_ordered(ide_drive_t *drive)
        struct hd_driveid *id = drive->id;
        unsigned ordered = QUEUE_ORDERED_NONE;
        prepare_flush_fn *prep_fn = NULL;
-       issue_flush_fn *issue_fn = NULL;
 
        if (drive->wcache) {
                unsigned long long capacity;
@@ -805,13 +778,11 @@ static void update_ordered(ide_drive_t *drive)
                if (barrier) {
                        ordered = QUEUE_ORDERED_DRAIN_FLUSH;
                        prep_fn = idedisk_prepare_flush;
-                       issue_fn = idedisk_issue_flush;
                }
        } else
                ordered = QUEUE_ORDERED_DRAIN;
 
        blk_queue_ordered(drive->queue, ordered, prep_fn);
-       blk_queue_issue_flush_fn(drive->queue, issue_fn);
 }
 
 static int write_cache(ide_drive_t *drive, int arg)
index b453211ee0fc1364cebe09be3bd0296bf6500207..a4cbbbaccde9d7bea55537d6b8da755996751ea0 100644 (file)
@@ -280,7 +280,7 @@ int ide_build_dmatable (ide_drive_t *drive, struct request *rq)
                        }
                }
 
-               sg++;
+               sg = sg_next(sg);
                i--;
        }
 
index 4cece930114cf637cbacfc239b0a6aa3f083e5b6..04273d3c147c09ecead03f5e9315740a530d1d03 100644 (file)
@@ -322,41 +322,6 @@ static void ide_complete_pm_request (ide_drive_t *drive, struct request *rq)
        spin_unlock_irqrestore(&ide_lock, flags);
 }
 
-/*
- * FIXME: probably move this somewhere else, name is bad too :)
- */
-u64 ide_get_error_location(ide_drive_t *drive, char *args)
-{
-       u32 high, low;
-       u8 hcyl, lcyl, sect;
-       u64 sector;
-
-       high = 0;
-       hcyl = args[5];
-       lcyl = args[4];
-       sect = args[3];
-
-       if (ide_id_has_flush_cache_ext(drive->id)) {
-               low = (hcyl << 16) | (lcyl << 8) | sect;
-               HWIF(drive)->OUTB(drive->ctl|0x80, IDE_CONTROL_REG);
-               high = ide_read_24(drive);
-       } else {
-               u8 cur = HWIF(drive)->INB(IDE_SELECT_REG);
-               if (cur & 0x40) {
-                       high = cur & 0xf;
-                       low = (hcyl << 16) | (lcyl << 8) | sect;
-               } else {
-                       low = hcyl * drive->head * drive->sect;
-                       low += lcyl * drive->sect;
-                       low += sect - 1;
-               }
-       }
-
-       sector = ((u64) high << 24) | low;
-       return sector;
-}
-EXPORT_SYMBOL(ide_get_error_location);
-
 /**
  *     ide_end_drive_cmd       -       end an explicit drive command
  *     @drive: command 
@@ -881,7 +846,8 @@ void ide_init_sg_cmd(ide_drive_t *drive, struct request *rq)
        ide_hwif_t *hwif = drive->hwif;
 
        hwif->nsect = hwif->nleft = rq->nr_sectors;
-       hwif->cursg = hwif->cursg_ofs = 0;
+       hwif->cursg_ofs = 0;
+       hwif->cursg = NULL;
 }
 
 EXPORT_SYMBOL_GPL(ide_init_sg_cmd);
index d1011712601c9a01913d143200c615d713d4bae2..34b1fb65bc79e3c5d396721b74859715474642d7 100644 (file)
@@ -1349,7 +1349,7 @@ static int hwif_init(ide_hwif_t *hwif)
        if (!hwif->sg_max_nents)
                hwif->sg_max_nents = PRD_ENTRIES;
 
-       hwif->sg_table = kmalloc(sizeof(struct scatterlist)*hwif->sg_max_nents,
+       hwif->sg_table = kzalloc(sizeof(struct scatterlist)*hwif->sg_max_nents,
                                 GFP_KERNEL);
        if (!hwif->sg_table) {
                printk(KERN_ERR "%s: unable to allocate SG table.\n", hwif->name);
index aa06dafb74acb09dd4383e5a3c7421dcc13db2b7..2a3c8d498343ff417f398d1fb4b93e8678dbf82f 100644 (file)
@@ -45,6 +45,7 @@
 #include <linux/hdreg.h>
 #include <linux/ide.h>
 #include <linux/bitops.h>
+#include <linux/scatterlist.h>
 
 #include <asm/byteorder.h>
 #include <asm/irq.h>
@@ -263,6 +264,7 @@ static void ide_pio_sector(ide_drive_t *drive, unsigned int write)
 {
        ide_hwif_t *hwif = drive->hwif;
        struct scatterlist *sg = hwif->sg_table;
+       struct scatterlist *cursg = hwif->cursg;
        struct page *page;
 #ifdef CONFIG_HIGHMEM
        unsigned long flags;
@@ -270,8 +272,14 @@ static void ide_pio_sector(ide_drive_t *drive, unsigned int write)
        unsigned int offset;
        u8 *buf;
 
-       page = sg[hwif->cursg].page;
-       offset = sg[hwif->cursg].offset + hwif->cursg_ofs * SECTOR_SIZE;
+       cursg = hwif->cursg;
+       if (!cursg) {
+               cursg = sg;
+               hwif->cursg = sg;
+       }
+
+       page = cursg->page;
+       offset = cursg->offset + hwif->cursg_ofs * SECTOR_SIZE;
 
        /* get the current page and offset */
        page = nth_page(page, (offset >> PAGE_SHIFT));
@@ -285,8 +293,8 @@ static void ide_pio_sector(ide_drive_t *drive, unsigned int write)
        hwif->nleft--;
        hwif->cursg_ofs++;
 
-       if ((hwif->cursg_ofs * SECTOR_SIZE) == sg[hwif->cursg].length) {
-               hwif->cursg++;
+       if ((hwif->cursg_ofs * SECTOR_SIZE) == cursg->length) {
+               hwif->cursg = sg_next(hwif->cursg);
                hwif->cursg_ofs = 0;
        }
 
@@ -367,6 +375,8 @@ static ide_startstop_t task_error(ide_drive_t *drive, struct request *rq,
 
 static void task_end_request(ide_drive_t *drive, struct request *rq, u8 stat)
 {
+       HWIF(drive)->cursg = NULL;
+
        if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
                ide_task_t *task = rq->special;
 
index aebde49365d16a68b28fdce811a95052c3262e1b..892d08f61dc0b64f3200b7c8ba39c2b9bbc8fbc8 100644 (file)
@@ -296,7 +296,7 @@ static int auide_build_dmatable(ide_drive_t *drive)
                        cur_addr += tc;
                        cur_len -= tc;
                }
-               sg++;
+               sg = sg_next(sg);
                i--;
        }
 
index 85ffaaa39b1b87eecc84e1f87a4d8911fdfa2dfd..c74fef6bbc916058cf0dc56f1c78200d59d7401a 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/mm.h>
 #include <linux/ioport.h>
 #include <linux/blkdev.h>
+#include <linux/scatterlist.h>
 #include <linux/ioc4.h>
 #include <asm/io.h>
 
@@ -537,7 +538,7 @@ sgiioc4_build_dma_table(ide_drive_t * drive, struct request *rq, int ddir)
                        }
                }
 
-               sg++;
+               sg = sg_next(sg);
                i--;
        }
 
index 7d8873839e2103d8af49c7dd0fe73260c23af88a..9e86406bf44b7c6ebfc700cdbc16dc358dd50bc0 100644 (file)
@@ -1539,7 +1539,7 @@ pmac_ide_build_dmatable(ide_drive_t *drive, struct request *rq)
                        cur_len -= tc;
                        ++table;
                }
-               sg++;
+               sg = sg_next(sg);
                i--;
        }
 
index f87f003e3ef887845d6d0da293b7df24f6874da4..22709a4f8fc89304672b12de18f29d8f2a981871 100644 (file)
@@ -30,6 +30,7 @@
  * SOFTWARE.
  */
 
+#include <linux/scatterlist.h>
 #include <rdma/ib_verbs.h>
 
 #include "ipath_verbs.h"
@@ -96,17 +97,18 @@ static void ipath_dma_unmap_page(struct ib_device *dev,
        BUG_ON(!valid_dma_direction(direction));
 }
 
-static int ipath_map_sg(struct ib_device *dev, struct scatterlist *sg, int nents,
-                       enum dma_data_direction direction)
+static int ipath_map_sg(struct ib_device *dev, struct scatterlist *sgl,
+                       int nents, enum dma_data_direction direction)
 {
+       struct scatterlist *sg;
        u64 addr;
        int i;
        int ret = nents;
 
        BUG_ON(!valid_dma_direction(direction));
 
-       for (i = 0; i < nents; i++) {
-               addr = (u64) page_address(sg[i].page);
+       for_each_sg(sgl, sg, nents, i) {
+               addr = (u64) page_address(sg->page);
                /* TODO: handle highmem pages */
                if (!addr) {
                        ret = 0;
index e05690e3592ffeefc154e69009053588d361d434..f3529b6f0a337261e3eb275f5b0c48693a747054 100644 (file)
@@ -124,17 +124,19 @@ static int iser_start_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
 
        if (cmd_dir == ISER_DIR_OUT) {
                /* copy the unaligned sg the buffer which is used for RDMA */
-               struct scatterlist *sg = (struct scatterlist *)data->buf;
+               struct scatterlist *sgl = (struct scatterlist *)data->buf;
+               struct scatterlist *sg;
                int i;
                char *p, *from;
 
-               for (p = mem, i = 0; i < data->size; i++) {
-                       from = kmap_atomic(sg[i].page, KM_USER0);
+               p = mem;
+               for_each_sg(sgl, sg, data->size, i) {
+                       from = kmap_atomic(sg->page, KM_USER0);
                        memcpy(p,
-                              from + sg[i].offset,
-                              sg[i].length);
+                              from + sg->offset,
+                              sg->length);
                        kunmap_atomic(from, KM_USER0);
-                       p += sg[i].length;
+                       p += sg->length;
                }
        }
 
@@ -176,7 +178,7 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
 
        if (cmd_dir == ISER_DIR_IN) {
                char *mem;
-               struct scatterlist *sg;
+               struct scatterlist *sgl, *sg;
                unsigned char *p, *to;
                unsigned int sg_size;
                int i;
@@ -184,16 +186,17 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
                /* copy back read RDMA to unaligned sg */
                mem     = mem_copy->copy_buf;
 
-               sg      = (struct scatterlist *)iser_ctask->data[ISER_DIR_IN].buf;
+               sgl     = (struct scatterlist *)iser_ctask->data[ISER_DIR_IN].buf;
                sg_size = iser_ctask->data[ISER_DIR_IN].size;
 
-               for (p = mem, i = 0; i < sg_size; i++){
-                       to = kmap_atomic(sg[i].page, KM_SOFTIRQ0);
-                       memcpy(to + sg[i].offset,
+               p = mem;
+               for_each_sg(sgl, sg, sg_size, i) {
+                       to = kmap_atomic(sg->page, KM_SOFTIRQ0);
+                       memcpy(to + sg->offset,
                               p,
-                              sg[i].length);
+                              sg->length);
                        kunmap_atomic(to, KM_SOFTIRQ0);
-                       p += sg[i].length;
+                       p += sg->length;
                }
        }
 
@@ -224,7 +227,8 @@ static int iser_sg_to_page_vec(struct iser_data_buf *data,
                               struct iser_page_vec *page_vec,
                               struct ib_device *ibdev)
 {
-       struct scatterlist *sg = (struct scatterlist *)data->buf;
+       struct scatterlist *sgl = (struct scatterlist *)data->buf;
+       struct scatterlist *sg;
        u64 first_addr, last_addr, page;
        int end_aligned;
        unsigned int cur_page = 0;
@@ -232,24 +236,25 @@ static int iser_sg_to_page_vec(struct iser_data_buf *data,
        int i;
 
        /* compute the offset of first element */
-       page_vec->offset = (u64) sg[0].offset & ~MASK_4K;
+       page_vec->offset = (u64) sgl[0].offset & ~MASK_4K;
 
-       for (i = 0; i < data->dma_nents; i++) {
-               unsigned int dma_len = ib_sg_dma_len(ibdev, &sg[i]);
+       for_each_sg(sgl, sg, data->dma_nents, i) {
+               unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
 
                total_sz += dma_len;
 
-               first_addr = ib_sg_dma_address(ibdev, &sg[i]);
+               first_addr = ib_sg_dma_address(ibdev, sg);
                last_addr  = first_addr + dma_len;
 
                end_aligned   = !(last_addr  & ~MASK_4K);
 
                /* continue to collect page fragments till aligned or SG ends */
                while (!end_aligned && (i + 1 < data->dma_nents)) {
+                       sg = sg_next(sg);
                        i++;
-                       dma_len = ib_sg_dma_len(ibdev, &sg[i]);
+                       dma_len = ib_sg_dma_len(ibdev, sg);
                        total_sz += dma_len;
-                       last_addr = ib_sg_dma_address(ibdev, &sg[i]) + dma_len;
+                       last_addr = ib_sg_dma_address(ibdev, sg) + dma_len;
                        end_aligned = !(last_addr  & ~MASK_4K);
                }
 
@@ -284,25 +289,26 @@ static int iser_sg_to_page_vec(struct iser_data_buf *data,
 static unsigned int iser_data_buf_aligned_len(struct iser_data_buf *data,
                                              struct ib_device *ibdev)
 {
-       struct scatterlist *sg;
+       struct scatterlist *sgl, *sg;
        u64 end_addr, next_addr;
        int i, cnt;
        unsigned int ret_len = 0;
 
-       sg = (struct scatterlist *)data->buf;
+       sgl = (struct scatterlist *)data->buf;
 
-       for (cnt = 0, i = 0; i < data->dma_nents; i++, cnt++) {
+       cnt = 0;
+       for_each_sg(sgl, sg, data->dma_nents, i) {
                /* iser_dbg("Checking sg iobuf [%d]: phys=0x%08lX "
                   "offset: %ld sz: %ld\n", i,
-                  (unsigned long)page_to_phys(sg[i].page),
-                  (unsigned long)sg[i].offset,
-                  (unsigned long)sg[i].length); */
-               end_addr = ib_sg_dma_address(ibdev, &sg[i]) +
-                          ib_sg_dma_len(ibdev, &sg[i]);
+                  (unsigned long)page_to_phys(sg->page),
+                  (unsigned long)sg->offset,
+                  (unsigned long)sg->length); */
+               end_addr = ib_sg_dma_address(ibdev, sg) +
+                          ib_sg_dma_len(ibdev, sg);
                /* iser_dbg("Checking sg iobuf end address "
                       "0x%08lX\n", end_addr); */
                if (i + 1 < data->dma_nents) {
-                       next_addr = ib_sg_dma_address(ibdev, &sg[i+1]);
+                       next_addr = ib_sg_dma_address(ibdev, sg_next(sg));
                        /* are i, i+1 fragments of the same page? */
                        if (end_addr == next_addr)
                                continue;
@@ -322,15 +328,16 @@ static unsigned int iser_data_buf_aligned_len(struct iser_data_buf *data,
 static void iser_data_buf_dump(struct iser_data_buf *data,
                               struct ib_device *ibdev)
 {
-       struct scatterlist *sg = (struct scatterlist *)data->buf;
+       struct scatterlist *sgl = (struct scatterlist *)data->buf;
+       struct scatterlist *sg;
        int i;
 
-       for (i = 0; i < data->dma_nents; i++)
+       for_each_sg(sgl, sg, data->dma_nents, i)
                iser_err("sg[%d] dma_addr:0x%lX page:0x%p "
                         "off:0x%x sz:0x%x dma_len:0x%x\n",
-                        i, (unsigned long)ib_sg_dma_address(ibdev, &sg[i]),
-                        sg[i].page, sg[i].offset,
-                        sg[i].length, ib_sg_dma_len(ibdev, &sg[i]));
+                        i, (unsigned long)ib_sg_dma_address(ibdev, sg),
+                        sg->page, sg->offset,
+                        sg->length, ib_sg_dma_len(ibdev, sg));
 }
 
 static void iser_dump_page_vec(struct iser_page_vec *page_vec)
index 8216a6f75be57fcd234e50a21b3491c6054d9a80..64fee90bb68b92f5ac897c9ebe3fff3f0e578880 100644 (file)
@@ -441,33 +441,12 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
        return clone;
 }
 
-static void crypt_free_buffer_pages(struct crypt_config *cc,
-                                    struct bio *clone, unsigned int bytes)
+static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
 {
-       unsigned int i, start, end;
+       unsigned int i;
        struct bio_vec *bv;
 
-       /*
-        * This is ugly, but Jens Axboe thinks that using bi_idx in the
-        * endio function is too dangerous at the moment, so I calculate the
-        * correct position using bi_vcnt and bi_size.
-        * The bv_offset and bv_len fields might already be modified but we
-        * know that we always allocated whole pages.
-        * A fix to the bi_idx issue in the kernel is in the works, so
-        * we will hopefully be able to revert to the cleaner solution soon.
-        */
-       i = clone->bi_vcnt - 1;
-       bv = bio_iovec_idx(clone, i);
-       end = (i << PAGE_SHIFT) + (bv->bv_offset + bv->bv_len) - clone->bi_size;
-       start = end - bytes;
-
-       start >>= PAGE_SHIFT;
-       if (!clone->bi_size)
-               end = clone->bi_vcnt;
-       else
-               end >>= PAGE_SHIFT;
-
-       for (i = start; i < end; i++) {
+       for (i = 0; i < clone->bi_vcnt; i++) {
                bv = bio_iovec_idx(clone, i);
                BUG_ON(!bv->bv_page);
                mempool_free(bv->bv_page, cc->page_pool);
@@ -519,7 +498,7 @@ static void crypt_endio(struct bio *clone, int error)
         * free the processed pages
         */
        if (!read_io) {
-               crypt_free_buffer_pages(cc, clone, clone->bi_size);
+               crypt_free_buffer_pages(cc, clone);
                goto out;
        }
 
@@ -608,7 +587,7 @@ static void process_write(struct dm_crypt_io *io)
                ctx.idx_out = 0;
 
                if (unlikely(crypt_convert(cc, &ctx) < 0)) {
-                       crypt_free_buffer_pages(cc, clone, clone->bi_size);
+                       crypt_free_buffer_pages(cc, clone);
                        bio_put(clone);
                        dec_pending(io, -EIO);
                        return;
index 2bcde5798b5a6c27dc9b07b6ea9325a88dfdf5f8..fbe477bb2c68ca4005a7f606af18a715edbfc5f9 100644 (file)
@@ -999,33 +999,6 @@ void dm_table_unplug_all(struct dm_table *t)
        }
 }
 
-int dm_table_flush_all(struct dm_table *t)
-{
-       struct list_head *d, *devices = dm_table_get_devices(t);
-       int ret = 0;
-       unsigned i;
-
-       for (i = 0; i < t->num_targets; i++)
-               if (t->targets[i].type->flush)
-                       t->targets[i].type->flush(&t->targets[i]);
-
-       for (d = devices->next; d != devices; d = d->next) {
-               struct dm_dev *dd = list_entry(d, struct dm_dev, list);
-               struct request_queue *q = bdev_get_queue(dd->bdev);
-               int err;
-
-               if (!q->issue_flush_fn)
-                       err = -EOPNOTSUPP;
-               else
-                       err = q->issue_flush_fn(q, dd->bdev->bd_disk, NULL);
-
-               if (!ret)
-                       ret = err;
-       }
-
-       return ret;
-}
-
 struct mapped_device *dm_table_get_md(struct dm_table *t)
 {
        dm_get(t->md);
@@ -1043,4 +1016,3 @@ EXPORT_SYMBOL(dm_table_get_md);
 EXPORT_SYMBOL(dm_table_put);
 EXPORT_SYMBOL(dm_table_get);
 EXPORT_SYMBOL(dm_table_unplug_all);
-EXPORT_SYMBOL(dm_table_flush_all);
index 167765c477470b346591850e9a5d0f1b5c5e3626..d837d37f62093d1669a5bf87d160eab34eb45aa3 100644 (file)
@@ -840,21 +840,6 @@ static int dm_request(struct request_queue *q, struct bio *bio)
        return 0;
 }
 
-static int dm_flush_all(struct request_queue *q, struct gendisk *disk,
-                       sector_t *error_sector)
-{
-       struct mapped_device *md = q->queuedata;
-       struct dm_table *map = dm_get_table(md);
-       int ret = -ENXIO;
-
-       if (map) {
-               ret = dm_table_flush_all(map);
-               dm_table_put(map);
-       }
-
-       return ret;
-}
-
 static void dm_unplug_all(struct request_queue *q)
 {
        struct mapped_device *md = q->queuedata;
@@ -1003,7 +988,6 @@ static struct mapped_device *alloc_dev(int minor)
        blk_queue_make_request(md->queue, dm_request);
        blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
        md->queue->unplug_fn = dm_unplug_all;
-       md->queue->issue_flush_fn = dm_flush_all;
 
        md->io_pool = mempool_create_slab_pool(MIN_IOS, _io_cache);
        if (!md->io_pool)
index 462ee652a89015c94490d6500b3e8f879fd8de46..4b3faa45277ec81666876110423c9c0f1cb8954e 100644 (file)
@@ -111,7 +111,6 @@ void dm_table_postsuspend_targets(struct dm_table *t);
 int dm_table_resume_targets(struct dm_table *t);
 int dm_table_any_congested(struct dm_table *t, int bdi_bits);
 void dm_table_unplug_all(struct dm_table *t);
-int dm_table_flush_all(struct dm_table *t);
 
 /*-----------------------------------------------------------------
  * A registry of target types.
index 550148770bb214844d89f51060d084ba85585d76..56a11f6c127b888f0518b8da9c2dd838d70fc74c 100644 (file)
@@ -92,25 +92,6 @@ static void linear_unplug(struct request_queue *q)
        }
 }
 
-static int linear_issue_flush(struct request_queue *q, struct gendisk *disk,
-                             sector_t *error_sector)
-{
-       mddev_t *mddev = q->queuedata;
-       linear_conf_t *conf = mddev_to_conf(mddev);
-       int i, ret = 0;
-
-       for (i=0; i < mddev->raid_disks && ret == 0; i++) {
-               struct block_device *bdev = conf->disks[i].rdev->bdev;
-               struct request_queue *r_queue = bdev_get_queue(bdev);
-
-               if (!r_queue->issue_flush_fn)
-                       ret = -EOPNOTSUPP;
-               else
-                       ret = r_queue->issue_flush_fn(r_queue, bdev->bd_disk, error_sector);
-       }
-       return ret;
-}
-
 static int linear_congested(void *data, int bits)
 {
        mddev_t *mddev = data;
@@ -279,7 +260,6 @@ static int linear_run (mddev_t *mddev)
 
        blk_queue_merge_bvec(mddev->queue, linear_mergeable_bvec);
        mddev->queue->unplug_fn = linear_unplug;
-       mddev->queue->issue_flush_fn = linear_issue_flush;
        mddev->queue->backing_dev_info.congested_fn = linear_congested;
        mddev->queue->backing_dev_info.congested_data = mddev;
        return 0;
index acf1b81b47cbeccf74d85cc6bc9f66ee998afbbb..0dc563d76b393208465e8209470ec982b88c4cbd 100644 (file)
@@ -3463,7 +3463,6 @@ static int do_md_stop(mddev_t * mddev, int mode)
                        mddev->pers->stop(mddev);
                        mddev->queue->merge_bvec_fn = NULL;
                        mddev->queue->unplug_fn = NULL;
-                       mddev->queue->issue_flush_fn = NULL;
                        mddev->queue->backing_dev_info.congested_fn = NULL;
                        if (mddev->pers->sync_request)
                                sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
index f2a63f394ad980187ba9fc319ce71b57d27f349a..b35731cceac671ee647e108174a44cacfa82c381 100644 (file)
@@ -194,35 +194,6 @@ static void multipath_status (struct seq_file *seq, mddev_t *mddev)
        seq_printf (seq, "]");
 }
 
-static int multipath_issue_flush(struct request_queue *q, struct gendisk *disk,
-                                sector_t *error_sector)
-{
-       mddev_t *mddev = q->queuedata;
-       multipath_conf_t *conf = mddev_to_conf(mddev);
-       int i, ret = 0;
-
-       rcu_read_lock();
-       for (i=0; i<mddev->raid_disks && ret == 0; i++) {
-               mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev);
-               if (rdev && !test_bit(Faulty, &rdev->flags)) {
-                       struct block_device *bdev = rdev->bdev;
-                       struct request_queue *r_queue = bdev_get_queue(bdev);
-
-                       if (!r_queue->issue_flush_fn)
-                               ret = -EOPNOTSUPP;
-                       else {
-                               atomic_inc(&rdev->nr_pending);
-                               rcu_read_unlock();
-                               ret = r_queue->issue_flush_fn(r_queue, bdev->bd_disk,
-                                                             error_sector);
-                               rdev_dec_pending(rdev, mddev);
-                               rcu_read_lock();
-                       }
-               }
-       }
-       rcu_read_unlock();
-       return ret;
-}
 static int multipath_congested(void *data, int bits)
 {
        mddev_t *mddev = data;
@@ -527,7 +498,6 @@ static int multipath_run (mddev_t *mddev)
        mddev->array_size = mddev->size;
 
        mddev->queue->unplug_fn = multipath_unplug;
-       mddev->queue->issue_flush_fn = multipath_issue_flush;
        mddev->queue->backing_dev_info.congested_fn = multipath_congested;
        mddev->queue->backing_dev_info.congested_data = mddev;
 
index ef0da2d8495969de264563ce7ee6751afe002aff..e79e1a538d44cda459f00dffa89e4c93bc15b421 100644 (file)
@@ -40,26 +40,6 @@ static void raid0_unplug(struct request_queue *q)
        }
 }
 
-static int raid0_issue_flush(struct request_queue *q, struct gendisk *disk,
-                            sector_t *error_sector)
-{
-       mddev_t *mddev = q->queuedata;
-       raid0_conf_t *conf = mddev_to_conf(mddev);
-       mdk_rdev_t **devlist = conf->strip_zone[0].dev;
-       int i, ret = 0;
-
-       for (i=0; i<mddev->raid_disks && ret == 0; i++) {
-               struct block_device *bdev = devlist[i]->bdev;
-               struct request_queue *r_queue = bdev_get_queue(bdev);
-
-               if (!r_queue->issue_flush_fn)
-                       ret = -EOPNOTSUPP;
-               else
-                       ret = r_queue->issue_flush_fn(r_queue, bdev->bd_disk, error_sector);
-       }
-       return ret;
-}
-
 static int raid0_congested(void *data, int bits)
 {
        mddev_t *mddev = data;
@@ -250,7 +230,6 @@ static int create_strip_zones (mddev_t *mddev)
 
        mddev->queue->unplug_fn = raid0_unplug;
 
-       mddev->queue->issue_flush_fn = raid0_issue_flush;
        mddev->queue->backing_dev_info.congested_fn = raid0_congested;
        mddev->queue->backing_dev_info.congested_data = mddev;
 
index 6d03bea6fa58cb0239db4356bf8e814be6dfdeba..0bcefad8241334bcd7a83358ccdd1901cca04254 100644 (file)
@@ -567,36 +567,6 @@ static void raid1_unplug(struct request_queue *q)
        md_wakeup_thread(mddev->thread);
 }
 
-static int raid1_issue_flush(struct request_queue *q, struct gendisk *disk,
-                            sector_t *error_sector)
-{
-       mddev_t *mddev = q->queuedata;
-       conf_t *conf = mddev_to_conf(mddev);
-       int i, ret = 0;
-
-       rcu_read_lock();
-       for (i=0; i<mddev->raid_disks && ret == 0; i++) {
-               mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
-               if (rdev && !test_bit(Faulty, &rdev->flags)) {
-                       struct block_device *bdev = rdev->bdev;
-                       struct request_queue *r_queue = bdev_get_queue(bdev);
-
-                       if (!r_queue->issue_flush_fn)
-                               ret = -EOPNOTSUPP;
-                       else {
-                               atomic_inc(&rdev->nr_pending);
-                               rcu_read_unlock();
-                               ret = r_queue->issue_flush_fn(r_queue, bdev->bd_disk,
-                                                             error_sector);
-                               rdev_dec_pending(rdev, mddev);
-                               rcu_read_lock();
-                       }
-               }
-       }
-       rcu_read_unlock();
-       return ret;
-}
-
 static int raid1_congested(void *data, int bits)
 {
        mddev_t *mddev = data;
@@ -1997,7 +1967,6 @@ static int run(mddev_t *mddev)
        mddev->array_size = mddev->size;
 
        mddev->queue->unplug_fn = raid1_unplug;
-       mddev->queue->issue_flush_fn = raid1_issue_flush;
        mddev->queue->backing_dev_info.congested_fn = raid1_congested;
        mddev->queue->backing_dev_info.congested_data = mddev;
 
index 25a96c42bdb0c4b922c9c48dd9ba093ded1b44eb..fc6607acb6e4734f12c4dbed23bf9e82ab5816ec 100644 (file)
@@ -611,36 +611,6 @@ static void raid10_unplug(struct request_queue *q)
        md_wakeup_thread(mddev->thread);
 }
 
-static int raid10_issue_flush(struct request_queue *q, struct gendisk *disk,
-                            sector_t *error_sector)
-{
-       mddev_t *mddev = q->queuedata;
-       conf_t *conf = mddev_to_conf(mddev);
-       int i, ret = 0;
-
-       rcu_read_lock();
-       for (i=0; i<mddev->raid_disks && ret == 0; i++) {
-               mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
-               if (rdev && !test_bit(Faulty, &rdev->flags)) {
-                       struct block_device *bdev = rdev->bdev;
-                       struct request_queue *r_queue = bdev_get_queue(bdev);
-
-                       if (!r_queue->issue_flush_fn)
-                               ret = -EOPNOTSUPP;
-                       else {
-                               atomic_inc(&rdev->nr_pending);
-                               rcu_read_unlock();
-                               ret = r_queue->issue_flush_fn(r_queue, bdev->bd_disk,
-                                                             error_sector);
-                               rdev_dec_pending(rdev, mddev);
-                               rcu_read_lock();
-                       }
-               }
-       }
-       rcu_read_unlock();
-       return ret;
-}
-
 static int raid10_congested(void *data, int bits)
 {
        mddev_t *mddev = data;
@@ -2118,7 +2088,6 @@ static int run(mddev_t *mddev)
        mddev->resync_max_sectors = size << conf->chunk_shift;
 
        mddev->queue->unplug_fn = raid10_unplug;
-       mddev->queue->issue_flush_fn = raid10_issue_flush;
        mddev->queue->backing_dev_info.congested_fn = raid10_congested;
        mddev->queue->backing_dev_info.congested_data = mddev;
 
index caaca9e178bc2bdd15e960596c569d79aa86212a..8ee181a01f5206730bd0ad23744247bf51fb289a 100644 (file)
@@ -3204,36 +3204,6 @@ static void raid5_unplug_device(struct request_queue *q)
        unplug_slaves(mddev);
 }
 
-static int raid5_issue_flush(struct request_queue *q, struct gendisk *disk,
-                            sector_t *error_sector)
-{
-       mddev_t *mddev = q->queuedata;
-       raid5_conf_t *conf = mddev_to_conf(mddev);
-       int i, ret = 0;
-
-       rcu_read_lock();
-       for (i=0; i<mddev->raid_disks && ret == 0; i++) {
-               mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);
-               if (rdev && !test_bit(Faulty, &rdev->flags)) {
-                       struct block_device *bdev = rdev->bdev;
-                       struct request_queue *r_queue = bdev_get_queue(bdev);
-
-                       if (!r_queue->issue_flush_fn)
-                               ret = -EOPNOTSUPP;
-                       else {
-                               atomic_inc(&rdev->nr_pending);
-                               rcu_read_unlock();
-                               ret = r_queue->issue_flush_fn(r_queue, bdev->bd_disk,
-                                                             error_sector);
-                               rdev_dec_pending(rdev, mddev);
-                               rcu_read_lock();
-                       }
-               }
-       }
-       rcu_read_unlock();
-       return ret;
-}
-
 static int raid5_congested(void *data, int bits)
 {
        mddev_t *mddev = data;
@@ -4263,7 +4233,6 @@ static int run(mddev_t *mddev)
                       mdname(mddev));
 
        mddev->queue->unplug_fn = raid5_unplug_device;
-       mddev->queue->issue_flush_fn = raid5_issue_flush;
        mddev->queue->backing_dev_info.congested_data = mddev;
        mddev->queue->backing_dev_info.congested_fn = raid5_congested;
 
index 822a3aa4fae5a8defd6176ef133ae7dbdb33427d..626bb3c9af2b0d6614d2313753b4c9796e64bb5e 100644 (file)
@@ -293,7 +293,7 @@ nextSGEset:
        for (ii=0; ii < (numSgeThisFrame-1); ii++) {
                thisxfer = sg_dma_len(sg);
                if (thisxfer == 0) {
-                       sg ++; /* Get next SG element from the OS */
+                       sg = sg_next(sg); /* Get next SG element from the OS */
                        sg_done++;
                        continue;
                }
@@ -301,7 +301,7 @@ nextSGEset:
                v2 = sg_dma_address(sg);
                mptscsih_add_sge(psge, sgflags | thisxfer, v2);
 
-               sg++;           /* Get next SG element from the OS */
+               sg = sg_next(sg);       /* Get next SG element from the OS */
                psge += (sizeof(u32) + sizeof(dma_addr_t));
                sgeOffset += (sizeof(u32) + sizeof(dma_addr_t));
                sg_done++;
@@ -322,7 +322,7 @@ nextSGEset:
                v2 = sg_dma_address(sg);
                mptscsih_add_sge(psge, sgflags | thisxfer, v2);
                /*
-               sg++;
+               sg = sg_next(sg);
                psge += (sizeof(u32) + sizeof(dma_addr_t));
                */
                sgeOffset += (sizeof(u32) + sizeof(dma_addr_t));
index 50b2c73344105b3f867d4b322864af9d540573ca..d602ba6d5417670fcd72f6719aa12114d9c40b91 100644 (file)
@@ -148,29 +148,6 @@ static int i2o_block_device_flush(struct i2o_device *dev)
        return i2o_msg_post_wait(dev->iop, msg, 60);
 };
 
-/**
- *     i2o_block_issue_flush - device-flush interface for block-layer
- *     @queue: the request queue of the device which should be flushed
- *     @disk: gendisk
- *     @error_sector: error offset
- *
- *     Helper function to provide flush functionality to block-layer.
- *
- *     Returns 0 on success or negative error code on failure.
- */
-
-static int i2o_block_issue_flush(struct request_queue * queue, struct gendisk *disk,
-                                sector_t * error_sector)
-{
-       struct i2o_block_device *i2o_blk_dev = queue->queuedata;
-       int rc = -ENODEV;
-
-       if (likely(i2o_blk_dev))
-               rc = i2o_block_device_flush(i2o_blk_dev->i2o_dev);
-
-       return rc;
-}
-
 /**
  *     i2o_block_device_mount - Mount (load) the media of device dev
  *     @dev: I2O device which should receive the mount request
@@ -1009,7 +986,6 @@ static struct i2o_block_device *i2o_block_device_alloc(void)
        }
 
        blk_queue_prep_rq(queue, i2o_block_prep_req_fn);
-       blk_queue_issue_flush_fn(queue, i2o_block_issue_flush);
 
        gd->major = I2O_MAJOR;
        gd->queue = queue;
index b0abc7d928051274f5168fe4d1e85fcbabc31b9b..a5d0354bbbda808e45cdd9594f6c45e75f2e7022 100644 (file)
@@ -153,14 +153,14 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
                        blk_queue_max_hw_segments(mq->queue, bouncesz / 512);
                        blk_queue_max_segment_size(mq->queue, bouncesz);
 
-                       mq->sg = kmalloc(sizeof(struct scatterlist),
+                       mq->sg = kzalloc(sizeof(struct scatterlist),
                                GFP_KERNEL);
                        if (!mq->sg) {
                                ret = -ENOMEM;
                                goto cleanup_queue;
                        }
 
-                       mq->bounce_sg = kmalloc(sizeof(struct scatterlist) *
+                       mq->bounce_sg = kzalloc(sizeof(struct scatterlist) *
                                bouncesz / 512, GFP_KERNEL);
                        if (!mq->bounce_sg) {
                                ret = -ENOMEM;
@@ -177,7 +177,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
                blk_queue_max_hw_segments(mq->queue, host->max_hw_segs);
                blk_queue_max_segment_size(mq->queue, host->max_seg_size);
 
-               mq->sg = kmalloc(sizeof(struct scatterlist) *
+               mq->sg = kzalloc(sizeof(struct scatterlist) *
                        host->max_phys_segs, GFP_KERNEL);
                if (!mq->sg) {
                        ret = -ENOMEM;
index 16e5563e0c651959aad3bd4e7cf7c90aacf0bfaf..57cac7008e0b4cbf6db25636e6921b7a958a4886 100644 (file)
@@ -34,6 +34,7 @@
 #include <linux/slab.h>
 #include <linux/mempool.h>
 #include <linux/syscalls.h>
+#include <linux/scatterlist.h>
 #include <linux/ioctl.h>
 #include <scsi/scsi.h>
 #include <scsi/scsi_tcq.h>
index 3f105fdcf239610b18e14a9617b90630b0444302..51d92b196ee716848268a79d4c8a7653b282e775 100644 (file)
@@ -590,7 +590,7 @@ zfcp_qdio_sbals_from_segment(struct zfcp_fsf_req *fsf_req, unsigned long sbtype,
  */
 int
 zfcp_qdio_sbals_from_sg(struct zfcp_fsf_req *fsf_req, unsigned long sbtype,
-                        struct scatterlist *sg,        int sg_count, int max_sbals)
+                        struct scatterlist *sgl, int sg_count, int max_sbals)
 {
        int sg_index;
        struct scatterlist *sg_segment;
@@ -606,9 +606,7 @@ zfcp_qdio_sbals_from_sg(struct zfcp_fsf_req *fsf_req, unsigned long sbtype,
        sbale->flags |= sbtype;
 
        /* process all segements of scatter-gather list */
-       for (sg_index = 0, sg_segment = sg, bytes = 0;
-            sg_index < sg_count;
-            sg_index++, sg_segment++) {
+       for_each_sg(sgl, sg_segment, sg_count, sg_index) {
                retval = zfcp_qdio_sbals_from_segment(
                                fsf_req,
                                sbtype,
index efd9d8d3a890c60ead2c5b5d547ff636cde73cfb..fb14014ee16e190f6688daba25684a19db216082 100644 (file)
@@ -1990,6 +1990,7 @@ static struct scsi_host_template driver_template = {
        .max_sectors            = TW_MAX_SECTORS,
        .cmd_per_lun            = TW_MAX_CMDS_PER_LUN,
        .use_clustering         = ENABLE_CLUSTERING,
+       .use_sg_chaining        = ENABLE_SG_CHAINING,
        .shost_attrs            = twa_host_attrs,
        .emulated               = 1
 };
index c7995fc216e8ed43a572ea9e41f482fc7c6a88c8..a64153b960344d84d3b742905fa9132d3bb092c5 100644 (file)
@@ -2261,6 +2261,7 @@ static struct scsi_host_template driver_template = {
        .max_sectors            = TW_MAX_SECTORS,
        .cmd_per_lun            = TW_MAX_CMDS_PER_LUN,  
        .use_clustering         = ENABLE_CLUSTERING,
+       .use_sg_chaining        = ENABLE_SG_CHAINING,
        .shost_attrs            = tw_host_attrs,
        .emulated               = 1
 };
index 9b206176f717773fce56674a5fc8f4da5508354e..49e1ffa4b2ffbf700f2c1c22b43ebd7c59f8776a 100644 (file)
@@ -3575,6 +3575,7 @@ static struct scsi_host_template Bus_Logic_template = {
        .unchecked_isa_dma = 1,
        .max_sectors = 128,
        .use_clustering = ENABLE_CLUSTERING,
+       .use_sg_chaining = ENABLE_SG_CHAINING,
 };
 
 /*
index eda8c48f6be7713ae0e4597e14ff7a07a8de4ac1..3168a1794849657c5447df861331e1329a845e91 100644 (file)
@@ -1066,7 +1066,8 @@ static struct scsi_host_template driver_template =
      .sg_tablesize             = 32                    /*SG_ALL*/ /*SG_NONE*/, 
      .cmd_per_lun              = 1                     /* commands per lun */, 
      .unchecked_isa_dma        = 1                     /* unchecked_isa_dma */,
-     .use_clustering           = ENABLE_CLUSTERING                               
+     .use_clustering           = ENABLE_CLUSTERING,
+     .use_sg_chaining           = ENABLE_SG_CHAINING,
 };
 
 #include "scsi_module.c"
index f608d4a1d6daeaa8b44ca46d25cb3fe2b3cc2276..d3a6d15fb77af90ead7ee78bfd1deed94e5bef8f 100644 (file)
@@ -1071,6 +1071,7 @@ static struct scsi_host_template inia100_template = {
        .sg_tablesize           = SG_ALL,
        .cmd_per_lun            = 1,
        .use_clustering         = ENABLE_CLUSTERING,
+       .use_sg_chaining        = ENABLE_SG_CHAINING,
 };
 
 static int __devinit inia100_probe_one(struct pci_dev *pdev,
index a7f42a17b5c708ba331b59874690f8a84cdab920..038980be763d18f75ef37902662ceb766b72a530 100644 (file)
@@ -944,6 +944,7 @@ static struct scsi_host_template aac_driver_template = {
        .cmd_per_lun                    = AAC_NUM_IO_FIB, 
 #endif 
        .use_clustering                 = ENABLE_CLUSTERING,
+       .use_sg_chaining                = ENABLE_SG_CHAINING,
        .emulated                       = 1,
 };
 
index cbbfbc9f3e0fad2f272ad2e3ffb2e0c8a0a9d283..961a1882cb7eb0741ea26659c67ac8a557fb6516 100644 (file)
@@ -61,15 +61,15 @@ static void BAD_DMA(void *address, unsigned int length)
 }
 
 static void BAD_SG_DMA(Scsi_Cmnd * SCpnt,
-                      struct scatterlist *sgpnt,
+                      struct scatterlist *sgp,
                       int nseg,
                       int badseg)
 {
        printk(KERN_CRIT "sgpnt[%d:%d] page %p/0x%llx length %u\n",
               badseg, nseg,
-              page_address(sgpnt[badseg].page) + sgpnt[badseg].offset,
-              (unsigned long long)SCSI_SG_PA(&sgpnt[badseg]),
-              sgpnt[badseg].length);
+              page_address(sgp->page) + sgp->offset,
+              (unsigned long long)SCSI_SG_PA(sgp),
+              sgp->length);
 
        /*
         * Not safe to continue.
@@ -691,7 +691,7 @@ static int aha1542_queuecommand(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *))
        memcpy(ccb[mbo].cdb, cmd, ccb[mbo].cdblen);
 
        if (SCpnt->use_sg) {
-               struct scatterlist *sgpnt;
+               struct scatterlist *sg;
                struct chain *cptr;
 #ifdef DEBUG
                unsigned char *ptr;
@@ -699,23 +699,21 @@ static int aha1542_queuecommand(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *))
                int i;
                ccb[mbo].op = 2;        /* SCSI Initiator Command  w/scatter-gather */
                SCpnt->host_scribble = kmalloc(512, GFP_KERNEL | GFP_DMA);
-               sgpnt = (struct scatterlist *) SCpnt->request_buffer;
                cptr = (struct chain *) SCpnt->host_scribble;
                if (cptr == NULL) {
                        /* free the claimed mailbox slot */
                        HOSTDATA(SCpnt->device->host)->SCint[mbo] = NULL;
                        return SCSI_MLQUEUE_HOST_BUSY;
                }
-               for (i = 0; i < SCpnt->use_sg; i++) {
-                       if (sgpnt[i].length == 0 || SCpnt->use_sg > 16 ||
-                           (((int) sgpnt[i].offset) & 1) || (sgpnt[i].length & 1)) {
+               scsi_for_each_sg(SCpnt, sg, SCpnt->use_sg, i) {
+                       if (sg->length == 0 || SCpnt->use_sg > 16 ||
+                           (((int) sg->offset) & 1) || (sg->length & 1)) {
                                unsigned char *ptr;
                                printk(KERN_CRIT "Bad segment list supplied to aha1542.c (%d, %d)\n", SCpnt->use_sg, i);
-                               for (i = 0; i < SCpnt->use_sg; i++) {
+                               scsi_for_each_sg(SCpnt, sg, SCpnt->use_sg, i) {
                                        printk(KERN_CRIT "%d: %p %d\n", i,
-                                              (page_address(sgpnt[i].page) +
-                                               sgpnt[i].offset),
-                                              sgpnt[i].length);
+                                              (page_address(sg->page) +
+                                               sg->offset), sg->length);
                                };
                                printk(KERN_CRIT "cptr %x: ", (unsigned int) cptr);
                                ptr = (unsigned char *) &cptr[i];
@@ -723,10 +721,10 @@ static int aha1542_queuecommand(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *))
                                        printk("%02x ", ptr[i]);
                                panic("Foooooooood fight!");
                        };
-                       any2scsi(cptr[i].dataptr, SCSI_SG_PA(&sgpnt[i]));
-                       if (SCSI_SG_PA(&sgpnt[i]) + sgpnt[i].length - 1 > ISA_DMA_THRESHOLD)
-                               BAD_SG_DMA(SCpnt, sgpnt, SCpnt->use_sg, i);
-                       any2scsi(cptr[i].datalen, sgpnt[i].length);
+                       any2scsi(cptr[i].dataptr, SCSI_SG_PA(sg));
+                       if (SCSI_SG_PA(sg) + sg->length - 1 > ISA_DMA_THRESHOLD)
+                               BAD_SG_DMA(SCpnt, sg, SCpnt->use_sg, i);
+                       any2scsi(cptr[i].datalen, sg->length);
                };
                any2scsi(ccb[mbo].datalen, SCpnt->use_sg * sizeof(struct chain));
                any2scsi(ccb[mbo].dataptr, SCSI_BUF_PA(cptr));
index e4a4f3a965d9a9ce3f4b79c7f140c8e42ba63870..f6722fd46008a2f451ceb645411e93f6b4115fae 100644 (file)
@@ -563,6 +563,7 @@ static struct scsi_host_template aha1740_template = {
        .sg_tablesize     = AHA1740_SCATTER,
        .cmd_per_lun      = AHA1740_CMDLUN,
        .use_clustering   = ENABLE_CLUSTERING,
+       .use_sg_chaining  = ENABLE_SG_CHAINING,
        .eh_abort_handler = aha1740_eh_abort_handler,
 };
 
index a055a96e3ad34e9185f10a214f798788928e6260..42c0f14a262cbf8f3b4e3e44c80046b009a9c7b1 100644 (file)
@@ -766,6 +766,7 @@ struct scsi_host_template aic79xx_driver_template = {
        .max_sectors            = 8192,
        .cmd_per_lun            = 2,
        .use_clustering         = ENABLE_CLUSTERING,
+       .use_sg_chaining        = ENABLE_SG_CHAINING,
        .slave_alloc            = ahd_linux_slave_alloc,
        .slave_configure        = ahd_linux_slave_configure,
        .target_alloc           = ahd_linux_target_alloc,
index 2e9c38f2e8a68d1065be272baa7174fa2eb06e90..7770befbf50c17171a03077a91e5794070c8217f 100644 (file)
@@ -747,6 +747,7 @@ struct scsi_host_template aic7xxx_driver_template = {
        .max_sectors            = 8192,
        .cmd_per_lun            = 2,
        .use_clustering         = ENABLE_CLUSTERING,
+       .use_sg_chaining        = ENABLE_SG_CHAINING,
        .slave_alloc            = ahc_linux_slave_alloc,
        .slave_configure        = ahc_linux_slave_configure,
        .target_alloc           = ahc_linux_target_alloc,
index 1a71b0236c974ef8ed3006f9a82756c163d0d3c6..4025608d6964cf3ee019708d3ae4ee067e1094ac 100644 (file)
@@ -11142,6 +11142,7 @@ static struct scsi_host_template driver_template = {
        .max_sectors            = 2048,
        .cmd_per_lun            = 3,
        .use_clustering         = ENABLE_CLUSTERING,
+       .use_sg_chaining        = ENABLE_SG_CHAINING,
 };
 
 #include "scsi_module.c"
index f2b23e01401ad95b825b73c8c0648edc2c09b874..ee0a98bffcd4c0fec572afd6e67e707c4ddb1cca 100644 (file)
@@ -94,7 +94,7 @@ static inline int asd_map_scatterlist(struct sas_task *task,
                        res = -ENOMEM;
                        goto err_unmap;
                }
-               for (sc = task->scatter, i = 0; i < num_sg; i++, sc++) {
+               for_each_sg(task->scatter, sc, num_sg, i) {
                        struct sg_el *sg =
                                &((struct sg_el *)ascb->sg_arr->vaddr)[i];
                        sg->bus_addr = cpu_to_le64((u64)sg_dma_address(sc));
@@ -103,7 +103,7 @@ static inline int asd_map_scatterlist(struct sas_task *task,
                                sg->flags |= ASD_SG_EL_LIST_EOL;
                }
 
-               for (sc = task->scatter, i = 0; i < 2; i++, sc++) {
+               for_each_sg(task->scatter, sc, 2, i) {
                        sg_arr[i].bus_addr =
                                cpu_to_le64((u64)sg_dma_address(sc));
                        sg_arr[i].size = cpu_to_le32((u32)sg_dma_len(sc));
@@ -115,7 +115,7 @@ static inline int asd_map_scatterlist(struct sas_task *task,
                sg_arr[2].bus_addr=cpu_to_le64((u64)ascb->sg_arr->dma_handle);
        } else {
                int i;
-               for (sc = task->scatter, i = 0; i < num_sg; i++, sc++) {
+               for_each_sg(task->scatter, sc, num_sg, i) {
                        sg_arr[i].bus_addr =
                                cpu_to_le64((u64)sg_dma_address(sc));
                        sg_arr[i].size = cpu_to_le32((u32)sg_dma_len(sc));
index cfcf40159eab96e0d512dabec9b39c4c3c19296a..f81777586b8f8ea77b7a2ee8abeae4b13f1e9523 100644 (file)
@@ -122,6 +122,7 @@ static struct scsi_host_template arcmsr_scsi_host_template = {
        .max_sectors            = ARCMSR_MAX_XFER_SECTORS,
        .cmd_per_lun            = ARCMSR_MAX_CMD_PERLUN,
        .use_clustering         = ENABLE_CLUSTERING,
+       .use_sg_chaining        = ENABLE_SG_CHAINING,
        .shost_attrs            = arcmsr_host_attrs,
 };
 #ifdef CONFIG_SCSI_ARCMSR_AER
index 1591824cf4b3b7ed6c8c01202426b717032590e9..fd42d47892021487aa916588b99d87a55c76a048 100644 (file)
@@ -4765,6 +4765,7 @@ static struct scsi_host_template dc395x_driver_template = {
        .eh_bus_reset_handler   = dc395x_eh_bus_reset,
        .unchecked_isa_dma      = 0,
        .use_clustering         = DISABLE_CLUSTERING,
+       .use_sg_chaining        = ENABLE_SG_CHAINING,
 };
 
 
index bea9d659af1549e0dd2e5a8cd1f996eb78d65c25..8258506ba7d793d8a4fe2fd8e0e7fe4a3fb26245 100644 (file)
@@ -3295,6 +3295,7 @@ static struct scsi_host_template adpt_template = {
        .this_id                = 7,
        .cmd_per_lun            = 1,
        .use_clustering         = ENABLE_CLUSTERING,
+       .use_sg_chaining        = ENABLE_SG_CHAINING,
 };
 
 static s32 adpt_scsi_register(adpt_hba* pHba)
index ec2233114bc9b6bbaf74d9281e5d4e9b53376653..7ead5210de968dfb0ece36e1d81058a018875d14 100644 (file)
@@ -523,7 +523,8 @@ static struct scsi_host_template driver_template = {
        .slave_configure = eata2x_slave_configure,
        .this_id = 7,
        .unchecked_isa_dma = 1,
-       .use_clustering = ENABLE_CLUSTERING
+       .use_clustering = ENABLE_CLUSTERING,
+       .use_sg_chaining = ENABLE_SG_CHAINING,
 };
 
 #if !defined(__BIG_ENDIAN_BITFIELD) && !defined(__LITTLE_ENDIAN_BITFIELD)
index adc9559cb6f40b20e4666d93550bc0b8b6fc8e83..112ab6abe62bf513cd7f2b25926fdab9571f0304 100644 (file)
@@ -343,6 +343,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
        shost->use_clustering = sht->use_clustering;
        shost->ordered_tag = sht->ordered_tag;
        shost->active_mode = sht->supported_mode;
+       shost->use_sg_chaining = sht->use_sg_chaining;
 
        if (sht->max_host_blocked)
                shost->max_host_blocked = sht->max_host_blocked;
index 8b384fa7f048c96a92d555e74377ab3d1a4bb22d..8515054cdf703ef079f7525334c24865276fa11b 100644 (file)
@@ -655,6 +655,7 @@ static struct scsi_host_template driver_template = {
        .unchecked_isa_dma          = 0,
        .emulated                   = 0,
        .use_clustering             = ENABLE_CLUSTERING,
+       .use_sg_chaining            = ENABLE_SG_CHAINING,
        .proc_name                  = driver_name,
        .shost_attrs                = hptiop_attrs,
        .this_id                    = -1,
index 1a924e9b02718f405dac5e9be65a13979b4a502a..714e6273a70d0444c28e938dbcb864ee677cdb87 100644 (file)
@@ -1501,6 +1501,7 @@ static struct scsi_host_template ibmmca_driver_template = {
           .sg_tablesize   = 16,
           .cmd_per_lun    = 1,
           .use_clustering = ENABLE_CLUSTERING,
+          .use_sg_chaining = ENABLE_SG_CHAINING,
 };
 
 static int ibmmca_probe(struct device *dev)
index cda0cc3d182fde457f4b15259d8a10eb476489af..22d91ee173c594f1d101f6f1d97e9395aac00ccb 100644 (file)
@@ -1548,6 +1548,7 @@ static struct scsi_host_template driver_template = {
        .this_id = -1,
        .sg_tablesize = SG_ALL,
        .use_clustering = ENABLE_CLUSTERING,
+       .use_sg_chaining = ENABLE_SG_CHAINING,
        .shost_attrs = ibmvscsi_attrs,
 };
 
index d81bb076a15a887b062f8325061cb2f9d46b6430..d297f64cd4325f3d4e0d2f1fff74a9f3db3d46cc 100644 (file)
@@ -70,6 +70,7 @@ typedef struct idescsi_pc_s {
        u8 *buffer;                             /* Data buffer */
        u8 *current_position;                   /* Pointer into the above buffer */
        struct scatterlist *sg;                 /* Scatter gather table */
+       struct scatterlist *last_sg;            /* Last sg element */
        int b_count;                            /* Bytes transferred from current entry */
        struct scsi_cmnd *scsi_cmd;             /* SCSI command */
        void (*done)(struct scsi_cmnd *);       /* Scsi completion routine */
@@ -173,12 +174,6 @@ static void idescsi_input_buffers (ide_drive_t *drive, idescsi_pc_t *pc, unsigne
        char *buf;
 
        while (bcount) {
-               if (pc->sg - scsi_sglist(pc->scsi_cmd) >
-                                                scsi_sg_count(pc->scsi_cmd)) {
-                       printk (KERN_ERR "ide-scsi: scatter gather table too small, discarding data\n");
-                       idescsi_discard_data (drive, bcount);
-                       return;
-               }
                count = min(pc->sg->length - pc->b_count, bcount);
                if (PageHighMem(pc->sg->page)) {
                        unsigned long flags;
@@ -197,10 +192,17 @@ static void idescsi_input_buffers (ide_drive_t *drive, idescsi_pc_t *pc, unsigne
                }
                bcount -= count; pc->b_count += count;
                if (pc->b_count == pc->sg->length) {
-                       pc->sg++;
+                       if (pc->sg == pc->last_sg)
+                               break;
+                       pc->sg = sg_next(pc->sg);
                        pc->b_count = 0;
                }
        }
+
+       if (bcount) {
+               printk (KERN_ERR "ide-scsi: scatter gather table too small, discarding data\n");
+               idescsi_discard_data (drive, bcount);
+       }
 }
 
 static void idescsi_output_buffers (ide_drive_t *drive, idescsi_pc_t *pc, unsigned int bcount)
@@ -209,12 +211,6 @@ static void idescsi_output_buffers (ide_drive_t *drive, idescsi_pc_t *pc, unsign
        char *buf;
 
        while (bcount) {
-               if (pc->sg - scsi_sglist(pc->scsi_cmd) >
-                                                scsi_sg_count(pc->scsi_cmd)) {
-                       printk (KERN_ERR "ide-scsi: scatter gather table too small, padding with zeros\n");
-                       idescsi_output_zeros (drive, bcount);
-                       return;
-               }
                count = min(pc->sg->length - pc->b_count, bcount);
                if (PageHighMem(pc->sg->page)) {
                        unsigned long flags;
@@ -233,10 +229,17 @@ static void idescsi_output_buffers (ide_drive_t *drive, idescsi_pc_t *pc, unsign
                }
                bcount -= count; pc->b_count += count;
                if (pc->b_count == pc->sg->length) {
-                       pc->sg++;
+                       if (pc->sg == pc->last_sg)
+                               break;
+                       pc->sg = sg_next(pc->sg);
                        pc->b_count = 0;
                }
        }
+
+       if (bcount) {
+               printk (KERN_ERR "ide-scsi: scatter gather table too small, padding with zeros\n");
+               idescsi_output_zeros (drive, bcount);
+       }
 }
 
 static void hexdump(u8 *x, int len)
@@ -804,6 +807,7 @@ static int idescsi_queue (struct scsi_cmnd *cmd,
        memcpy (pc->c, cmd->cmnd, cmd->cmd_len);
        pc->buffer = NULL;
        pc->sg = scsi_sglist(cmd);
+       pc->last_sg = sg_last(pc->sg, cmd->use_sg);
        pc->b_count = 0;
        pc->request_transfer = pc->buffer_size = scsi_bufflen(cmd);
        pc->scsi_cmd = cmd;
index d9dfb69ae031c0c5804b336a62cc6b2f9d549d63..22d40fd5845b68b30e22fe06dc56d35633ffa501 100644 (file)
@@ -2831,6 +2831,7 @@ static struct scsi_host_template initio_template = {
        .sg_tablesize           = SG_ALL,
        .cmd_per_lun            = 1,
        .use_clustering         = ENABLE_CLUSTERING,
+       .use_sg_chaining        = ENABLE_SG_CHAINING,
 };
 
 static int initio_probe_one(struct pci_dev *pdev,
index 2ed099e2c20d50ae6d91dacd3832c57f61de6c24..edaac2714c5abc2ecbcfbf711f94ece48a00509a 100644 (file)
@@ -3252,7 +3252,7 @@ ips_done(ips_ha_t * ha, ips_scb_t * scb)
                 */
                if ((scb->breakup) || (scb->sg_break)) {
                         struct scatterlist *sg;
-                        int sg_dma_index, ips_sg_index = 0;
+                        int i, sg_dma_index, ips_sg_index = 0;
 
                        /* we had a data breakup */
                        scb->data_len = 0;
@@ -3261,20 +3261,22 @@ ips_done(ips_ha_t * ha, ips_scb_t * scb)
 
                         /* Spin forward to last dma chunk */
                         sg_dma_index = scb->breakup;
+                        for (i = 0; i < scb->breakup; i++)
+                                sg = sg_next(sg);
 
                        /* Take care of possible partial on last chunk */
                         ips_fill_scb_sg_single(ha,
-                                               sg_dma_address(&sg[sg_dma_index]),
+                                               sg_dma_address(sg),
                                                scb, ips_sg_index++,
-                                               sg_dma_len(&sg[sg_dma_index]));
+                                               sg_dma_len(sg));
 
                         for (; sg_dma_index < scsi_sg_count(scb->scsi_cmd);
-                             sg_dma_index++) {
+                             sg_dma_index++, sg = sg_next(sg)) {
                                 if (ips_fill_scb_sg_single
                                     (ha,
-                                     sg_dma_address(&sg[sg_dma_index]),
+                                     sg_dma_address(sg),
                                      scb, ips_sg_index++,
-                                     sg_dma_len(&sg[sg_dma_index])) < 0)
+                                     sg_dma_len(sg)) < 0)
                                         break;
                         }
 
index cd674938ccd53ab2c5f7ebe7a7e563930f6dcd68..c0755565fae9da39f9173fbfcd5d92e85db5229e 100644 (file)
@@ -1438,6 +1438,7 @@ struct scsi_host_template lpfc_template = {
        .scan_finished          = lpfc_scan_finished,
        .this_id                = -1,
        .sg_tablesize           = LPFC_SG_SEG_CNT,
+       .use_sg_chaining        = ENABLE_SG_CHAINING,
        .cmd_per_lun            = LPFC_CMD_PER_LUN,
        .use_clustering         = ENABLE_CLUSTERING,
        .shost_attrs            = lpfc_hba_attrs,
@@ -1460,6 +1461,7 @@ struct scsi_host_template lpfc_vport_template = {
        .sg_tablesize           = LPFC_SG_SEG_CNT,
        .cmd_per_lun            = LPFC_CMD_PER_LUN,
        .use_clustering         = ENABLE_CLUSTERING,
+       .use_sg_chaining        = ENABLE_SG_CHAINING,
        .shost_attrs            = lpfc_vport_attrs,
        .max_sectors            = 0xFFFF,
 };
index b12ad7c7c6736908419987896a2402898ba2620e..a035001f44386a36e49abcd3d292fb31239cf85e 100644 (file)
@@ -402,6 +402,7 @@ static struct scsi_host_template mac53c94_template = {
        .sg_tablesize   = SG_ALL,
        .cmd_per_lun    = 1,
        .use_clustering = DISABLE_CLUSTERING,
+       .use_sg_chaining = ENABLE_SG_CHAINING,
 };
 
 static int mac53c94_probe(struct macio_dev *mdev, const struct of_device_id *match)
index e7e11f282c8f4357d419e8508746871cdda1a492..10d1aff9938a1938b79ec54e92cb1a5bb6bc7ecd 100644 (file)
@@ -4492,6 +4492,7 @@ static struct scsi_host_template megaraid_template = {
        .sg_tablesize                   = MAX_SGLIST,
        .cmd_per_lun                    = DEF_CMD_PER_LUN,
        .use_clustering                 = ENABLE_CLUSTERING,
+       .use_sg_chaining                = ENABLE_SG_CHAINING,
        .eh_abort_handler               = megaraid_abort,
        .eh_device_reset_handler        = megaraid_reset,
        .eh_bus_reset_handler           = megaraid_reset,
index c6a53dccc16a84d52b6aa3d041630a57421a58ee..e4e4c6a39ed6368d090cef0dea29fff4f491e757 100644 (file)
@@ -361,6 +361,7 @@ static struct scsi_host_template megaraid_template_g = {
        .eh_host_reset_handler          = megaraid_reset_handler,
        .change_queue_depth             = megaraid_change_queue_depth,
        .use_clustering                 = ENABLE_CLUSTERING,
+       .use_sg_chaining                = ENABLE_SG_CHAINING,
        .sdev_attrs                     = megaraid_sdev_attrs,
        .shost_attrs                    = megaraid_shost_attrs,
 };
index ebb948c016bbfa871db78ebc3ce5508562440585..e3c5c52822030f42283b6c9baa357f1921238336 100644 (file)
@@ -1110,6 +1110,7 @@ static struct scsi_host_template megasas_template = {
        .eh_timed_out = megasas_reset_timer,
        .bios_param = megasas_bios_param,
        .use_clustering = ENABLE_CLUSTERING,
+       .use_sg_chaining = ENABLE_SG_CHAINING,
 };
 
 /**
index 651d09b08f2a22eb6283da21830736271d1d4cd8..7470ff39ab22f59bd09e8140ba42811730b50230 100644 (file)
@@ -1843,6 +1843,7 @@ static struct scsi_host_template mesh_template = {
        .sg_tablesize                   = SG_ALL,
        .cmd_per_lun                    = 2,
        .use_clustering                 = DISABLE_CLUSTERING,
+       .use_sg_chaining                = ENABLE_SG_CHAINING,
 };
 
 static int mesh_probe(struct macio_dev *mdev, const struct of_device_id *match)
index 7fed35372150afe68d619d61d4faa4a2a4588437..28161dc95e0d7107613933da5186e50f265a0ded 100644 (file)
@@ -281,6 +281,7 @@ static struct scsi_host_template nsp32_template = {
        .cmd_per_lun                    = 1,
        .this_id                        = NSP32_HOST_SCSIID,
        .use_clustering                 = DISABLE_CLUSTERING,
+       .use_sg_chaining                = ENABLE_SG_CHAINING,
        .eh_abort_handler               = nsp32_eh_abort,
        .eh_bus_reset_handler           = nsp32_eh_bus_reset,
        .eh_host_reset_handler          = nsp32_eh_host_reset,
index 961839ecfe868ed487b69ceb1000330f903bb496..190e2a7d706748a10275bde1c40842415b459948 100644 (file)
@@ -694,6 +694,7 @@ static struct scsi_host_template sym53c500_driver_template = {
      .sg_tablesize             = 32,
      .cmd_per_lun              = 1,
      .use_clustering           = ENABLE_CLUSTERING,
+     .use_sg_chaining          = ENABLE_SG_CHAINING,
      .shost_attrs              = SYM53C500_shost_attrs
 };
 
index fba8aa8a81b58c8f689d136cd65435d5873f5192..76089cf55f4e956f857d7e3e123460d94c7923fe 100644 (file)
@@ -2775,7 +2775,7 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
        struct device_reg __iomem *reg = ha->iobase;
        struct scsi_cmnd *cmd = sp->cmd;
        cmd_a64_entry_t *pkt;
-       struct scatterlist *sg = NULL;
+       struct scatterlist *sg = NULL, *s;
        __le32 *dword_ptr;
        dma_addr_t dma_handle;
        int status = 0;
@@ -2889,13 +2889,16 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
         * Load data segments.
         */
        if (seg_cnt) {  /* If data transfer. */
+               int remseg = seg_cnt;
                /* Setup packet address segment pointer. */
                dword_ptr = (u32 *)&pkt->dseg_0_address;
 
                if (cmd->use_sg) {      /* If scatter gather */
                        /* Load command entry data segments. */
-                       for (cnt = 0; cnt < 2 && seg_cnt; cnt++, seg_cnt--) {
-                               dma_handle = sg_dma_address(sg);
+                       for_each_sg(sg, s, seg_cnt, cnt) {
+                               if (cnt == 2)
+                                       break;
+                               dma_handle = sg_dma_address(s);
 #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
                                if (ha->flags.use_pci_vchannel)
                                        sn_pci_set_vchan(ha->pdev,
@@ -2906,12 +2909,12 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
                                        cpu_to_le32(pci_dma_lo32(dma_handle));
                                *dword_ptr++ =
                                        cpu_to_le32(pci_dma_hi32(dma_handle));
-                               *dword_ptr++ = cpu_to_le32(sg_dma_len(sg));
-                               sg++;
+                               *dword_ptr++ = cpu_to_le32(sg_dma_len(s));
                                dprintk(3, "S/G Segment phys_addr=%x %x, len=0x%x\n",
                                        cpu_to_le32(pci_dma_hi32(dma_handle)),
                                        cpu_to_le32(pci_dma_lo32(dma_handle)),
-                                       cpu_to_le32(sg_dma_len(sg)));
+                                       cpu_to_le32(sg_dma_len(sg_next(s))));
+                               remseg--;
                        }
                        dprintk(5, "qla1280_64bit_start_scsi: Scatter/gather "
                                "command packet data - b %i, t %i, l %i \n",
@@ -2926,7 +2929,9 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
                        dprintk(3, "S/G Building Continuation...seg_cnt=0x%x "
                                "remains\n", seg_cnt);
 
-                       while (seg_cnt > 0) {
+                       while (remseg > 0) {
+                               /* Update sg start */
+                               sg = s;
                                /* Adjust ring index. */
                                ha->req_ring_index++;
                                if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
@@ -2952,9 +2957,10 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
                                        (u32 *)&((struct cont_a64_entry *) pkt)->dseg_0_address;
 
                                /* Load continuation entry data segments. */
-                               for (cnt = 0; cnt < 5 && seg_cnt;
-                                    cnt++, seg_cnt--) {
-                                       dma_handle = sg_dma_address(sg);
+                               for_each_sg(sg, s, remseg, cnt) {
+                                       if (cnt == 5)
+                                               break;
+                                       dma_handle = sg_dma_address(s);
 #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
                                if (ha->flags.use_pci_vchannel)
                                        sn_pci_set_vchan(ha->pdev, 
@@ -2966,13 +2972,13 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
                                        *dword_ptr++ =
                                                cpu_to_le32(pci_dma_hi32(dma_handle));
                                        *dword_ptr++ =
-                                               cpu_to_le32(sg_dma_len(sg));
+                                               cpu_to_le32(sg_dma_len(s));
                                        dprintk(3, "S/G Segment Cont. phys_addr=%x %x, len=0x%x\n",
                                                cpu_to_le32(pci_dma_hi32(dma_handle)),
                                                cpu_to_le32(pci_dma_lo32(dma_handle)),
-                                               cpu_to_le32(sg_dma_len(sg)));
-                                       sg++;
+                                               cpu_to_le32(sg_dma_len(s)));
                                }
+                               remseg -= cnt;
                                dprintk(5, "qla1280_64bit_start_scsi: "
                                        "continuation packet data - b %i, t "
                                        "%i, l %i \n", SCSI_BUS_32(cmd),
@@ -3062,7 +3068,7 @@ qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
        struct device_reg __iomem *reg = ha->iobase;
        struct scsi_cmnd *cmd = sp->cmd;
        struct cmd_entry *pkt;
-       struct scatterlist *sg = NULL;
+       struct scatterlist *sg = NULL, *s;
        __le32 *dword_ptr;
        int status = 0;
        int cnt;
@@ -3188,6 +3194,7 @@ qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
         * Load data segments.
         */
        if (seg_cnt) {
+               int remseg = seg_cnt;
                /* Setup packet address segment pointer. */
                dword_ptr = &pkt->dseg_0_address;
 
@@ -3196,22 +3203,25 @@ qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
                        qla1280_dump_buffer(1, (char *)sg, 4 * 16);
 
                        /* Load command entry data segments. */
-                       for (cnt = 0; cnt < 4 && seg_cnt; cnt++, seg_cnt--) {
+                       for_each_sg(sg, s, seg_cnt, cnt) {
+                               if (cnt == 4)
+                                       break;
                                *dword_ptr++ =
-                                       cpu_to_le32(pci_dma_lo32(sg_dma_address(sg)));
-                               *dword_ptr++ =
-                                       cpu_to_le32(sg_dma_len(sg));
+                                       cpu_to_le32(pci_dma_lo32(sg_dma_address(s)));
+                               *dword_ptr++ = cpu_to_le32(sg_dma_len(s));
                                dprintk(3, "S/G Segment phys_addr=0x%lx, len=0x%x\n",
-                                       (pci_dma_lo32(sg_dma_address(sg))),
-                                       (sg_dma_len(sg)));
-                               sg++;
+                                       (pci_dma_lo32(sg_dma_address(s))),
+                                       (sg_dma_len(s)));
+                               remseg--;
                        }
                        /*
                         * Build continuation packets.
                         */
                        dprintk(3, "S/G Building Continuation"
                                "...seg_cnt=0x%x remains\n", seg_cnt);
-                       while (seg_cnt > 0) {
+                       while (remseg > 0) {
+                               /* Continue from end point */
+                               sg = s;
                                /* Adjust ring index. */
                                ha->req_ring_index++;
                                if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
@@ -3239,19 +3249,20 @@ qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
                                        &((struct cont_entry *) pkt)->dseg_0_address;
 
                                /* Load continuation entry data segments. */
-                               for (cnt = 0; cnt < 7 && seg_cnt;
-                                    cnt++, seg_cnt--) {
+                               for_each_sg(sg, s, remseg, cnt) {
+                                       if (cnt == 7)
+                                               break;
                                        *dword_ptr++ =
-                                               cpu_to_le32(pci_dma_lo32(sg_dma_address(sg)));
+                                               cpu_to_le32(pci_dma_lo32(sg_dma_address(s)));
                                        *dword_ptr++ =
-                                               cpu_to_le32(sg_dma_len(sg));
+                                               cpu_to_le32(sg_dma_len(s));
                                        dprintk(1,
                                                "S/G Segment Cont. phys_addr=0x%x, "
                                                "len=0x%x\n",
-                                               cpu_to_le32(pci_dma_lo32(sg_dma_address(sg))),
-                                               cpu_to_le32(sg_dma_len(sg)));
-                                       sg++;
+                                               cpu_to_le32(pci_dma_lo32(sg_dma_address(s))),
+                                               cpu_to_le32(sg_dma_len(s)));
                                }
+                               remseg -= cnt;
                                dprintk(5, "qla1280_32bit_start_scsi: "
                                        "continuation packet data - "
                                        "scsi(%i:%i:%i)\n", SCSI_BUS_32(cmd),
@@ -4248,6 +4259,7 @@ static struct scsi_host_template qla1280_driver_template = {
        .sg_tablesize           = SG_ALL,
        .cmd_per_lun            = 1,
        .use_clustering         = ENABLE_CLUSTERING,
+       .use_sg_chaining        = ENABLE_SG_CHAINING,
 };
 
 
index a6bb8d0ecf139fe46de567eb773abd8fead11dba..0351d380c2d7b1790c93d5655ff856b66d59cb74 100644 (file)
@@ -132,6 +132,7 @@ struct scsi_host_template qla2x00_driver_template = {
        .this_id                = -1,
        .cmd_per_lun            = 3,
        .use_clustering         = ENABLE_CLUSTERING,
+       .use_sg_chaining        = ENABLE_SG_CHAINING,
        .sg_tablesize           = SG_ALL,
 
        /*
@@ -163,6 +164,7 @@ struct scsi_host_template qla24xx_driver_template = {
        .this_id                = -1,
        .cmd_per_lun            = 3,
        .use_clustering         = ENABLE_CLUSTERING,
+       .use_sg_chaining        = ENABLE_SG_CHAINING,
        .sg_tablesize           = SG_ALL,
 
        .max_sectors            = 0xFFFF,
index b1d565c12c5b9af0da0350f909c11e95f9617b59..03b68d4f3bd0e1821f84aa5d75af404e8170e16c 100644 (file)
@@ -94,6 +94,7 @@ static struct scsi_host_template qla4xxx_driver_template = {
        .this_id                = -1,
        .cmd_per_lun            = 3,
        .use_clustering         = ENABLE_CLUSTERING,
+       .use_sg_chaining        = ENABLE_SG_CHAINING,
        .sg_tablesize           = SG_ALL,
 
        .max_sectors            = 0xFFFF,
index 1e874f1fb5c64a403fd6e7da8365edce51b929df..1769f965eedf496e9e0602365220641c5caa2fd1 100644 (file)
@@ -197,6 +197,7 @@ static struct scsi_host_template qlogicfas_driver_template = {
        .sg_tablesize           = SG_ALL,
        .cmd_per_lun            = 1,
        .use_clustering         = DISABLE_CLUSTERING,
+       .use_sg_chaining        = ENABLE_SG_CHAINING,
 };
 
 static __init int qlogicfas_init(void)
index e93f80316a19369d7d3416c887ece96bed1117c6..7a2e7986b038f16185875fe95bc8e2a32acc80a4 100644 (file)
@@ -868,7 +868,7 @@ static inline int load_cmd(struct scsi_cmnd *Cmnd, struct Command_Entry *cmd,
                           struct qlogicpti *qpti, u_int in_ptr, u_int out_ptr)
 {
        struct dataseg *ds;
-       struct scatterlist *sg;
+       struct scatterlist *sg, *s;
        int i, n;
 
        if (Cmnd->use_sg) {
@@ -884,11 +884,12 @@ static inline int load_cmd(struct scsi_cmnd *Cmnd, struct Command_Entry *cmd,
                n = sg_count;
                if (n > 4)
                        n = 4;
-               for (i = 0; i < n; i++, sg++) {
-                       ds[i].d_base = sg_dma_address(sg);
-                       ds[i].d_count = sg_dma_len(sg);
+               for_each_sg(sg, s, n, i) {
+                       ds[i].d_base = sg_dma_address(s);
+                       ds[i].d_count = sg_dma_len(s);
                }
                sg_count -= 4;
+               sg = s;
                while (sg_count > 0) {
                        struct Continuation_Entry *cont;
 
@@ -907,9 +908,9 @@ static inline int load_cmd(struct scsi_cmnd *Cmnd, struct Command_Entry *cmd,
                        n = sg_count;
                        if (n > 7)
                                n = 7;
-                       for (i = 0; i < n; i++, sg++) {
-                               ds[i].d_base = sg_dma_address(sg);
-                               ds[i].d_count = sg_dma_len(sg);
+                       for_each_sg(sg, s, n, i) {
+                               ds[i].d_base = sg_dma_address(s);
+                               ds[i].d_count = sg_dma_len(s);
                        }
                        sg_count -= n;
                }
index 4947dfe625a6f694546b3862c9f4d92b9b332f7f..72ee4c9cfb1ac0329fa8d413b5559dac46ee4a26 100644 (file)
@@ -38,6 +38,7 @@
 #include <linux/proc_fs.h>
 #include <linux/vmalloc.h>
 #include <linux/moduleparam.h>
+#include <linux/scatterlist.h>
 
 #include <linux/blkdev.h>
 #include "scsi.h"
@@ -600,7 +601,7 @@ static int fill_from_dev_buffer(struct scsi_cmnd * scp, unsigned char * arr,
        int k, req_len, act_len, len, active;
        void * kaddr;
        void * kaddr_off;
-       struct scatterlist * sgpnt;
+       struct scatterlist * sg;
 
        if (0 == scp->request_bufflen)
                return 0;
@@ -619,16 +620,16 @@ static int fill_from_dev_buffer(struct scsi_cmnd * scp, unsigned char * arr,
                        scp->resid = req_len - act_len;
                return 0;
        }
-       sgpnt = (struct scatterlist *)scp->request_buffer;
        active = 1;
-       for (k = 0, req_len = 0, act_len = 0; k < scp->use_sg; ++k, ++sgpnt) {
+       req_len = act_len = 0;
+       scsi_for_each_sg(scp, sg, scp->use_sg, k) {
                if (active) {
                        kaddr = (unsigned char *)
-                               kmap_atomic(sgpnt->page, KM_USER0);
+                               kmap_atomic(sg->page, KM_USER0);
                        if (NULL == kaddr)
                                return (DID_ERROR << 16);
-                       kaddr_off = (unsigned char *)kaddr + sgpnt->offset;
-                       len = sgpnt->length;
+                       kaddr_off = (unsigned char *)kaddr + sg->offset;
+                       len = sg->length;
                        if ((req_len + len) > arr_len) {
                                active = 0;
                                len = arr_len - req_len;
@@ -637,7 +638,7 @@ static int fill_from_dev_buffer(struct scsi_cmnd * scp, unsigned char * arr,
                        kunmap_atomic(kaddr, KM_USER0);
                        act_len += len;
                }
-               req_len += sgpnt->length;
+               req_len += sg->length;
        }
        if (scp->resid)
                scp->resid -= act_len;
@@ -653,7 +654,7 @@ static int fetch_to_dev_buffer(struct scsi_cmnd * scp, unsigned char * arr,
        int k, req_len, len, fin;
        void * kaddr;
        void * kaddr_off;
-       struct scatterlist * sgpnt;
+       struct scatterlist * sg;
 
        if (0 == scp->request_bufflen)
                return 0;
@@ -668,13 +669,14 @@ static int fetch_to_dev_buffer(struct scsi_cmnd * scp, unsigned char * arr,
                memcpy(arr, scp->request_buffer, len);
                return len;
        }
-       sgpnt = (struct scatterlist *)scp->request_buffer;
-       for (k = 0, req_len = 0, fin = 0; k < scp->use_sg; ++k, ++sgpnt) {
-               kaddr = (unsigned char *)kmap_atomic(sgpnt->page, KM_USER0);
+       sg = scsi_sglist(scp);
+       req_len = fin = 0;
+       for (k = 0; k < scp->use_sg; ++k, sg = sg_next(sg)) {
+               kaddr = (unsigned char *)kmap_atomic(sg->page, KM_USER0);
                if (NULL == kaddr)
                        return -1;
-               kaddr_off = (unsigned char *)kaddr + sgpnt->offset;
-               len = sgpnt->length;
+               kaddr_off = (unsigned char *)kaddr + sg->offset;
+               len = sg->length;
                if ((req_len + len) > max_arr_len) {
                        len = max_arr_len - req_len;
                        fin = 1;
@@ -683,7 +685,7 @@ static int fetch_to_dev_buffer(struct scsi_cmnd * scp, unsigned char * arr,
                kunmap_atomic(kaddr, KM_USER0);
                if (fin)
                        return req_len + len;
-               req_len += sgpnt->length;
+               req_len += sg->length;
        }
        return req_len;
 }
index 207f1aa08869f209be7ba1bde8f727534796701b..aac8a02cbe8040348d8383d8551514a619598b2d 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/pci.h>
 #include <linux/delay.h>
 #include <linux/hardirq.h>
+#include <linux/scatterlist.h>
 
 #include <scsi/scsi.h>
 #include <scsi/scsi_cmnd.h>
 #define SG_MEMPOOL_NR          ARRAY_SIZE(scsi_sg_pools)
 #define SG_MEMPOOL_SIZE                2
 
+/*
+ * The maximum number of SG segments that we will put inside a scatterlist
+ * (unless chaining is used). Should ideally fit inside a single page, to
+ * avoid a higher order allocation.
+ */
+#define SCSI_MAX_SG_SEGMENTS   128
+
 struct scsi_host_sg_pool {
        size_t          size;
-       char            *name; 
+       char            *name;
        struct kmem_cache       *slab;
        mempool_t       *pool;
 };
 
-#if (SCSI_MAX_PHYS_SEGMENTS < 32)
-#error SCSI_MAX_PHYS_SEGMENTS is too small
-#endif
-
-#define SP(x) { x, "sgpool-" #x } 
+#define SP(x) { x, "sgpool-" #x }
 static struct scsi_host_sg_pool scsi_sg_pools[] = {
        SP(8),
        SP(16),
+#if (SCSI_MAX_SG_SEGMENTS > 16)
        SP(32),
-#if (SCSI_MAX_PHYS_SEGMENTS > 32)
+#if (SCSI_MAX_SG_SEGMENTS > 32)
        SP(64),
-#if (SCSI_MAX_PHYS_SEGMENTS > 64)
+#if (SCSI_MAX_SG_SEGMENTS > 64)
        SP(128),
-#if (SCSI_MAX_PHYS_SEGMENTS > 128)
-       SP(256),
-#if (SCSI_MAX_PHYS_SEGMENTS > 256)
-#error SCSI_MAX_PHYS_SEGMENTS is too large
-#endif
 #endif
 #endif
 #endif
-};     
+};
 #undef SP
 
 static void scsi_run_queue(struct request_queue *q);
@@ -289,14 +289,16 @@ static int scsi_req_map_sg(struct request *rq, struct scatterlist *sgl,
        struct request_queue *q = rq->q;
        int nr_pages = (bufflen + sgl[0].offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
        unsigned int data_len = bufflen, len, bytes, off;
+       struct scatterlist *sg;
        struct page *page;
        struct bio *bio = NULL;
        int i, err, nr_vecs = 0;
 
-       for (i = 0; i < nsegs; i++) {
-               page = sgl[i].page;
-               off = sgl[i].offset;
-               len = sgl[i].length;
+       for_each_sg(sgl, sg, nsegs, i) {
+               page = sg->page;
+               off = sg->offset;
+               len = sg->length;
+               data_len += len;
 
                while (len > 0 && data_len > 0) {
                        /*
@@ -695,56 +697,170 @@ static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate,
        return NULL;
 }
 
-struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *cmd, gfp_t gfp_mask)
-{
-       struct scsi_host_sg_pool *sgp;
-       struct scatterlist *sgl;
+/*
+ * Like SCSI_MAX_SG_SEGMENTS, but for archs that have sg chaining. This limit
+ * is totally arbitrary, a setting of 2048 will get you at least 8mb ios.
+ */
+#define SCSI_MAX_SG_CHAIN_SEGMENTS     2048
 
-       BUG_ON(!cmd->use_sg);
+static inline unsigned int scsi_sgtable_index(unsigned short nents)
+{
+       unsigned int index;
 
-       switch (cmd->use_sg) {
+       switch (nents) {
        case 1 ... 8:
-               cmd->sglist_len = 0;
+               index = 0;
                break;
        case 9 ... 16:
-               cmd->sglist_len = 1;
+               index = 1;
                break;
+#if (SCSI_MAX_SG_SEGMENTS > 16)
        case 17 ... 32:
-               cmd->sglist_len = 2;
+               index = 2;
                break;
-#if (SCSI_MAX_PHYS_SEGMENTS > 32)
+#if (SCSI_MAX_SG_SEGMENTS > 32)
        case 33 ... 64:
-               cmd->sglist_len = 3;
+               index = 3;
                break;
-#if (SCSI_MAX_PHYS_SEGMENTS > 64)
+#if (SCSI_MAX_SG_SEGMENTS > 64)
        case 65 ... 128:
-               cmd->sglist_len = 4;
-               break;
-#if (SCSI_MAX_PHYS_SEGMENTS  > 128)
-       case 129 ... 256:
-               cmd->sglist_len = 5;
+               index = 4;
                break;
 #endif
 #endif
 #endif
        default:
-               return NULL;
+               printk(KERN_ERR "scsi: bad segment count=%d\n", nents);
+               BUG();
        }
 
-       sgp = scsi_sg_pools + cmd->sglist_len;
-       sgl = mempool_alloc(sgp->pool, gfp_mask);
-       return sgl;
+       return index;
+}
+
+struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *cmd, gfp_t gfp_mask)
+{
+       struct scsi_host_sg_pool *sgp;
+       struct scatterlist *sgl, *prev, *ret;
+       unsigned int index;
+       int this, left;
+
+       BUG_ON(!cmd->use_sg);
+
+       left = cmd->use_sg;
+       ret = prev = NULL;
+       do {
+               this = left;
+               if (this > SCSI_MAX_SG_SEGMENTS) {
+                       this = SCSI_MAX_SG_SEGMENTS - 1;
+                       index = SG_MEMPOOL_NR - 1;
+               } else
+                       index = scsi_sgtable_index(this);
+
+               left -= this;
+
+               sgp = scsi_sg_pools + index;
+
+               sgl = mempool_alloc(sgp->pool, gfp_mask);
+               if (unlikely(!sgl))
+                       goto enomem;
+
+               memset(sgl, 0, sizeof(*sgl) * sgp->size);
+
+               /*
+                * first loop through, set initial index and return value
+                */
+               if (!ret)
+                       ret = sgl;
+
+               /*
+                * chain previous sglist, if any. we know the previous
+                * sglist must be the biggest one, or we would not have
+                * ended up doing another loop.
+                */
+               if (prev)
+                       sg_chain(prev, SCSI_MAX_SG_SEGMENTS, sgl);
+
+               /*
+                * don't allow subsequent mempool allocs to sleep, it would
+                * violate the mempool principle.
+                */
+               gfp_mask &= ~__GFP_WAIT;
+               gfp_mask |= __GFP_HIGH;
+               prev = sgl;
+       } while (left);
+
+       /*
+        * ->use_sg may get modified after dma mapping has potentially
+        * shrunk the number of segments, so keep a copy of it for free.
+        */
+       cmd->__use_sg = cmd->use_sg;
+       return ret;
+enomem:
+       if (ret) {
+               /*
+                * Free entries chained off ret. Since we were trying to
+                * allocate another sglist, we know that all entries are of
+                * the max size.
+                */
+               sgp = scsi_sg_pools + SG_MEMPOOL_NR - 1;
+               prev = ret;
+               ret = &ret[SCSI_MAX_SG_SEGMENTS - 1];
+
+               while ((sgl = sg_chain_ptr(ret)) != NULL) {
+                       ret = &sgl[SCSI_MAX_SG_SEGMENTS - 1];
+                       mempool_free(sgl, sgp->pool);
+               }
+
+               mempool_free(prev, sgp->pool);
+       }
+       return NULL;
 }
 
 EXPORT_SYMBOL(scsi_alloc_sgtable);
 
-void scsi_free_sgtable(struct scatterlist *sgl, int index)
+void scsi_free_sgtable(struct scsi_cmnd *cmd)
 {
+       struct scatterlist *sgl = cmd->request_buffer;
        struct scsi_host_sg_pool *sgp;
 
-       BUG_ON(index >= SG_MEMPOOL_NR);
+       /*
+        * if this is the biggest size sglist, check if we have
+        * chained parts we need to free
+        */
+       if (cmd->__use_sg > SCSI_MAX_SG_SEGMENTS) {
+               unsigned short this, left;
+               struct scatterlist *next;
+               unsigned int index;
+
+               left = cmd->__use_sg - (SCSI_MAX_SG_SEGMENTS - 1);
+               next = sg_chain_ptr(&sgl[SCSI_MAX_SG_SEGMENTS - 1]);
+               while (left && next) {
+                       sgl = next;
+                       this = left;
+                       if (this > SCSI_MAX_SG_SEGMENTS) {
+                               this = SCSI_MAX_SG_SEGMENTS - 1;
+                               index = SG_MEMPOOL_NR - 1;
+                       } else
+                               index = scsi_sgtable_index(this);
+
+                       left -= this;
+
+                       sgp = scsi_sg_pools + index;
+
+                       if (left)
+                               next = sg_chain_ptr(&sgl[sgp->size - 1]);
+
+                       mempool_free(sgl, sgp->pool);
+               }
+
+               /*
+                * Restore original, will be freed below
+                */
+               sgl = cmd->request_buffer;
+               sgp = scsi_sg_pools + SG_MEMPOOL_NR - 1;
+       } else
+               sgp = scsi_sg_pools + scsi_sgtable_index(cmd->__use_sg);
 
-       sgp = scsi_sg_pools + index;
        mempool_free(sgl, sgp->pool);
 }
 
@@ -770,7 +886,7 @@ EXPORT_SYMBOL(scsi_free_sgtable);
 static void scsi_release_buffers(struct scsi_cmnd *cmd)
 {
        if (cmd->use_sg)
-               scsi_free_sgtable(cmd->request_buffer, cmd->sglist_len);
+               scsi_free_sgtable(cmd);
 
        /*
         * Zero these out.  They now point to freed memory, and it is
@@ -984,7 +1100,6 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
 static int scsi_init_io(struct scsi_cmnd *cmd)
 {
        struct request     *req = cmd->request;
-       struct scatterlist *sgpnt;
        int                count;
 
        /*
@@ -997,14 +1112,13 @@ static int scsi_init_io(struct scsi_cmnd *cmd)
        /*
         * If sg table allocation fails, requeue request later.
         */
-       sgpnt = scsi_alloc_sgtable(cmd, GFP_ATOMIC);
-       if (unlikely(!sgpnt)) {
+       cmd->request_buffer = scsi_alloc_sgtable(cmd, GFP_ATOMIC);
+       if (unlikely(!cmd->request_buffer)) {
                scsi_unprep_request(req);
                return BLKPREP_DEFER;
        }
 
        req->buffer = NULL;
-       cmd->request_buffer = (char *) sgpnt;
        if (blk_pc_request(req))
                cmd->request_bufflen = req->data_len;
        else
@@ -1529,8 +1643,25 @@ struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,
        if (!q)
                return NULL;
 
+       /*
+        * this limit is imposed by hardware restrictions
+        */
        blk_queue_max_hw_segments(q, shost->sg_tablesize);
-       blk_queue_max_phys_segments(q, SCSI_MAX_PHYS_SEGMENTS);
+
+       /*
+        * In the future, sg chaining support will be mandatory and this
+        * ifdef can then go away. Right now we don't have all archs
+        * converted, so better keep it safe.
+        */
+#ifdef ARCH_HAS_SG_CHAIN
+       if (shost->use_sg_chaining)
+               blk_queue_max_phys_segments(q, SCSI_MAX_SG_CHAIN_SEGMENTS);
+       else
+               blk_queue_max_phys_segments(q, SCSI_MAX_SG_SEGMENTS);
+#else
+       blk_queue_max_phys_segments(q, SCSI_MAX_SG_SEGMENTS);
+#endif
+
        blk_queue_max_sectors(q, shost->max_sectors);
        blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost));
        blk_queue_segment_boundary(q, shost->dma_boundary);
@@ -2193,18 +2324,19 @@ EXPORT_SYMBOL_GPL(scsi_target_unblock);
  *
  * Returns virtual address of the start of the mapped page
  */
-void *scsi_kmap_atomic_sg(struct scatterlist *sg, int sg_count,
+void *scsi_kmap_atomic_sg(struct scatterlist *sgl, int sg_count,
                          size_t *offset, size_t *len)
 {
        int i;
        size_t sg_len = 0, len_complete = 0;
+       struct scatterlist *sg;
        struct page *page;
 
        WARN_ON(!irqs_disabled());
 
-       for (i = 0; i < sg_count; i++) {
+       for_each_sg(sgl, sg, sg_count, i) {
                len_complete = sg_len; /* Complete sg-entries */
-               sg_len += sg[i].length;
+               sg_len += sg->length;
                if (sg_len > *offset)
                        break;
        }
@@ -2218,10 +2350,10 @@ void *scsi_kmap_atomic_sg(struct scatterlist *sg, int sg_count,
        }
 
        /* Offset starting from the beginning of first page in this sg-entry */
-       *offset = *offset - len_complete + sg[i].offset;
+       *offset = *offset - len_complete + sg->offset;
 
        /* Assumption: contiguous pages can be accessed as "page + i" */
-       page = nth_page(sg[i].page, (*offset >> PAGE_SHIFT));
+       page = nth_page(sg->page, (*offset >> PAGE_SHIFT));
        *offset &= ~PAGE_MASK;
 
        /* Bytes in this sg-entry from *offset to the end of the page */
index 66c692ffa3054a2ddafa8c8435bbe642e3737e5c..a91761c3645f4d06b17237aa26deb5fb086a9e39 100644 (file)
@@ -332,7 +332,7 @@ static void scsi_tgt_cmd_done(struct scsi_cmnd *cmd)
        scsi_tgt_uspace_send_status(cmd, tcmd->itn_id, tcmd->tag);
 
        if (cmd->request_buffer)
-               scsi_free_sgtable(cmd->request_buffer, cmd->sglist_len);
+               scsi_free_sgtable(cmd);
 
        queue_work(scsi_tgtd, &tcmd->work);
 }
@@ -373,7 +373,7 @@ static int scsi_tgt_init_cmd(struct scsi_cmnd *cmd, gfp_t gfp_mask)
        }
 
        eprintk("cmd %p cnt %d\n", cmd, cmd->use_sg);
-       scsi_free_sgtable(cmd->request_buffer, cmd->sglist_len);
+       scsi_free_sgtable(cmd);
        return -EINVAL;
 }
 
index 0a3a528212c2f3fca24f39a9951caba2f2d4e2d0..69f542c4923ce5be9c9575cc3ec30f0c82a6b92f 100644 (file)
@@ -826,27 +826,6 @@ static int sd_sync_cache(struct scsi_disk *sdkp)
        return 0;
 }
 
-static int sd_issue_flush(struct request_queue *q, struct gendisk *disk,
-                         sector_t *error_sector)
-{
-       int ret = 0;
-       struct scsi_device *sdp = q->queuedata;
-       struct scsi_disk *sdkp;
-
-       if (sdp->sdev_state != SDEV_RUNNING)
-               return -ENXIO;
-
-       sdkp = scsi_disk_get_from_dev(&sdp->sdev_gendev);
-
-       if (!sdkp)
-               return -ENODEV;
-
-       if (sdkp->WCE)
-               ret = sd_sync_cache(sdkp);
-       scsi_disk_put(sdkp);
-       return ret;
-}
-
 static void sd_prepare_flush(struct request_queue *q, struct request *rq)
 {
        memset(rq->cmd, 0, sizeof(rq->cmd));
@@ -1697,7 +1676,6 @@ static int sd_probe(struct device *dev)
        sd_revalidate_disk(gd);
 
        blk_queue_prep_rq(sdp->request_queue, sd_prep_fn);
-       blk_queue_issue_flush_fn(sdp->request_queue, sd_issue_flush);
 
        gd->driverfs_dev = &sdp->sdev_gendev;
        gd->flags = GENHD_FL_DRIVERFS;
index f6f5fc7d0ceeed80a55e7c1a53d95928c767efcf..7238b2dfc4975d9a53a82f07dfd55ff427b27e19 100644 (file)
@@ -1165,7 +1165,7 @@ sg_vma_nopage(struct vm_area_struct *vma, unsigned long addr, int *type)
        sg = rsv_schp->buffer;
        sa = vma->vm_start;
        for (k = 0; (k < rsv_schp->k_use_sg) && (sa < vma->vm_end);
-            ++k, ++sg) {
+            ++k, sg = sg_next(sg)) {
                len = vma->vm_end - sa;
                len = (len < sg->length) ? len : sg->length;
                if (offset < len) {
@@ -1209,7 +1209,7 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma)
        sa = vma->vm_start;
        sg = rsv_schp->buffer;
        for (k = 0; (k < rsv_schp->k_use_sg) && (sa < vma->vm_end);
-            ++k, ++sg) {
+            ++k, sg = sg_next(sg)) {
                len = vma->vm_end - sa;
                len = (len < sg->length) ? len : sg->length;
                sa += len;
@@ -1840,7 +1840,7 @@ sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size)
        }
        for (k = 0, sg = schp->buffer, rem_sz = blk_size;
             (rem_sz > 0) && (k < mx_sc_elems);
-            ++k, rem_sz -= ret_sz, ++sg) {
+            ++k, rem_sz -= ret_sz, sg = sg_next(sg)) {
                
                num = (rem_sz > scatter_elem_sz_prev) ?
                      scatter_elem_sz_prev : rem_sz;
@@ -1913,7 +1913,7 @@ sg_write_xfer(Sg_request * srp)
                if (res)
                        return res;
 
-               for (; p; ++sg, ksglen = sg->length,
+               for (; p; sg = sg_next(sg), ksglen = sg->length,
                     p = page_address(sg->page)) {
                        if (usglen <= 0)
                                break;
@@ -1992,7 +1992,7 @@ sg_remove_scat(Sg_scatter_hold * schp)
                        int k;
 
                        for (k = 0; (k < schp->k_use_sg) && sg->page;
-                            ++k, ++sg) {
+                            ++k, sg = sg_next(sg)) {
                                SCSI_LOG_TIMEOUT(5, printk(
                                    "sg_remove_scat: k=%d, pg=0x%p, len=%d\n",
                                    k, sg->page, sg->length));
@@ -2045,7 +2045,7 @@ sg_read_xfer(Sg_request * srp)
                if (res)
                        return res;
 
-               for (; p; ++sg, ksglen = sg->length,
+               for (; p; sg = sg_next(sg), ksglen = sg->length,
                     p = page_address(sg->page)) {
                        if (usglen <= 0)
                                break;
@@ -2092,7 +2092,7 @@ sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer)
        if ((!outp) || (num_read_xfer <= 0))
                return 0;
 
-       for (k = 0; (k < schp->k_use_sg) && sg->page; ++k, ++sg) {
+       for (k = 0; (k < schp->k_use_sg) && sg->page; ++k, sg = sg_next(sg)) {
                num = sg->length;
                if (num > num_read_xfer) {
                        if (__copy_to_user(outp, page_address(sg->page),
@@ -2142,7 +2142,7 @@ sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size)
        SCSI_LOG_TIMEOUT(4, printk("sg_link_reserve: size=%d\n", size));
        rem = size;
 
-       for (k = 0; k < rsv_schp->k_use_sg; ++k, ++sg) {
+       for (k = 0; k < rsv_schp->k_use_sg; ++k, sg = sg_next(sg)) {
                num = sg->length;
                if (rem <= num) {
                        sfp->save_scat_len = num;
index 72f6d8015358d6f52c967f51876d53cac2597c29..e3fab3a6aed79f843e44de0720ff3dea485b18b2 100644 (file)
@@ -1123,6 +1123,7 @@ static struct scsi_host_template driver_template = {
        .this_id                        = -1,
        .sg_tablesize                   = ST_MAX_SG,
        .cmd_per_lun                    = ST_CMD_PER_LUN,
+       .use_sg_chaining                = ENABLE_SG_CHAINING,
 };
 
 static int stex_set_dma_mask(struct pci_dev * pdev)
index 92bfaeafe30da0fece863c0b39e7fb60568d7bfc..8befab7e98397b759c9c3a80989b5f4edb5f42ee 100644 (file)
@@ -854,5 +854,6 @@ static struct scsi_host_template driver_template = {
        .cmd_per_lun =          1,
        .unchecked_isa_dma =    1,
        .use_clustering =       ENABLE_CLUSTERING,
+       .use_sg_chaining =      ENABLE_SG_CHAINING,
 };
 #include "scsi_module.c"
index 3db22325ea2cdf55a76b0dca34e59a2f579495e6..db03c4c8ec1e55fe21fb9dc31db910ea69d80767 100644 (file)
@@ -1808,6 +1808,7 @@ static struct scsi_host_template sym2_template = {
        .eh_host_reset_handler  = sym53c8xx_eh_host_reset_handler,
        .this_id                = 7,
        .use_clustering         = ENABLE_CLUSTERING,
+       .use_sg_chaining        = ENABLE_SG_CHAINING,
        .max_sectors            = 0xFFFF,
 #ifdef SYM_LINUX_PROC_INFO_SUPPORT
        .proc_info              = sym53c8xx_proc_info,
index fc9f51818e8f3fb2b6e4a0d48027250d67a4eeaa..7edd6ceb13b26ba8447f8d4b9096fdfde94ee07a 100644 (file)
@@ -450,7 +450,8 @@ static struct scsi_host_template driver_template = {
                 .slave_configure         = u14_34f_slave_configure,
                 .this_id                 = 7,
                 .unchecked_isa_dma       = 1,
-                .use_clustering          = ENABLE_CLUSTERING
+                .use_clustering          = ENABLE_CLUSTERING,
+                .use_sg_chaining         = ENABLE_SG_CHAINING,
                 };
 
 #if !defined(__BIG_ENDIAN_BITFIELD) && !defined(__LITTLE_ENDIAN_BITFIELD)
index c08235d5afc9c648fc2a29c0aad22515149da419..ea72bbeb8f9d54ae4a02cf505480e4bca463b29c 100644 (file)
@@ -1197,5 +1197,6 @@ static struct scsi_host_template driver_template = {
        .cmd_per_lun       = ULTRASTOR_MAX_CMDS_PER_LUN,
        .unchecked_isa_dma = 1,
        .use_clustering    = ENABLE_CLUSTERING,
+       .use_sg_chaining   = ENABLE_SG_CHAINING,
 };
 #include "scsi_module.c"
index d6fd4259c56bde5614d22fd8ffb532e2fdb18a15..255c611e78b8469a8ee2b2951c18f3b357d123c5 100644 (file)
@@ -1671,6 +1671,7 @@ static struct scsi_host_template driver_template = {
        .cmd_per_lun            = 1,
        .unchecked_isa_dma      = 1,
        .use_clustering         = ENABLE_CLUSTERING,
+       .use_sg_chaining        = ENABLE_SG_CHAINING,
 };
 
 #include "scsi_module.c"
index 4d3cbb12b7131ff2bbfa153fa81db2452991b976..8d3711a7ff0655aa519811a295b6b57c2b8b0253 100644 (file)
@@ -798,12 +798,13 @@ static int alauda_read_data(struct us_data *us, unsigned long address,
 {
        unsigned char *buffer;
        u16 lba, max_lba;
-       unsigned int page, len, index, offset;
+       unsigned int page, len, offset;
        unsigned int blockshift = MEDIA_INFO(us).blockshift;
        unsigned int pageshift = MEDIA_INFO(us).pageshift;
        unsigned int blocksize = MEDIA_INFO(us).blocksize;
        unsigned int pagesize = MEDIA_INFO(us).pagesize;
        unsigned int uzonesize = MEDIA_INFO(us).uzonesize;
+       struct scatterlist *sg;
        int result;
 
        /*
@@ -827,7 +828,8 @@ static int alauda_read_data(struct us_data *us, unsigned long address,
        max_lba = MEDIA_INFO(us).capacity >> (blockshift + pageshift);
 
        result = USB_STOR_TRANSPORT_GOOD;
-       index = offset = 0;
+       offset = 0;
+       sg = NULL;
 
        while (sectors > 0) {
                unsigned int zone = lba / uzonesize; /* integer division */
@@ -873,7 +875,7 @@ static int alauda_read_data(struct us_data *us, unsigned long address,
 
                /* Store the data in the transfer buffer */
                usb_stor_access_xfer_buf(buffer, len, us->srb,
-                               &index, &offset, TO_XFER_BUF);
+                               &sg, &offset, TO_XFER_BUF);
 
                page = 0;
                lba++;
@@ -891,11 +893,12 @@ static int alauda_write_data(struct us_data *us, unsigned long address,
                unsigned int sectors)
 {
        unsigned char *buffer, *blockbuffer;
-       unsigned int page, len, index, offset;
+       unsigned int page, len, offset;
        unsigned int blockshift = MEDIA_INFO(us).blockshift;
        unsigned int pageshift = MEDIA_INFO(us).pageshift;
        unsigned int blocksize = MEDIA_INFO(us).blocksize;
        unsigned int pagesize = MEDIA_INFO(us).pagesize;
+       struct scatterlist *sg;
        u16 lba, max_lba;
        int result;
 
@@ -929,7 +932,8 @@ static int alauda_write_data(struct us_data *us, unsigned long address,
        max_lba = MEDIA_INFO(us).capacity >> (pageshift + blockshift);
 
        result = USB_STOR_TRANSPORT_GOOD;
-       index = offset = 0;
+       offset = 0;
+       sg = NULL;
 
        while (sectors > 0) {
                /* Write as many sectors as possible in this block */
@@ -946,7 +950,7 @@ static int alauda_write_data(struct us_data *us, unsigned long address,
 
                /* Get the data from the transfer buffer */
                usb_stor_access_xfer_buf(buffer, len, us->srb,
-                               &index, &offset, FROM_XFER_BUF);
+                               &sg, &offset, FROM_XFER_BUF);
 
                result = alauda_write_lba(us, lba, page, pages, buffer,
                        blockbuffer);
index c87ad1bae1d6327b3f131af80cf04033ebbf78cf..579e9f52053adac18b5720271b4e443cc3e18821 100644 (file)
@@ -98,7 +98,8 @@ static int datafab_read_data(struct us_data *us,
        unsigned char  thistime;
        unsigned int totallen, alloclen;
        int len, result;
-       unsigned int sg_idx = 0, sg_offset = 0;
+       unsigned int sg_offset = 0;
+       struct scatterlist *sg = NULL;
 
        // we're working in LBA mode.  according to the ATA spec, 
        // we can support up to 28-bit addressing.  I don't know if Datafab
@@ -155,7 +156,7 @@ static int datafab_read_data(struct us_data *us,
 
                // Store the data in the transfer buffer
                usb_stor_access_xfer_buf(buffer, len, us->srb,
-                                &sg_idx, &sg_offset, TO_XFER_BUF);
+                                &sg, &sg_offset, TO_XFER_BUF);
 
                sector += thistime;
                totallen -= len;
@@ -181,7 +182,8 @@ static int datafab_write_data(struct us_data *us,
        unsigned char thistime;
        unsigned int totallen, alloclen;
        int len, result;
-       unsigned int sg_idx = 0, sg_offset = 0;
+       unsigned int sg_offset = 0;
+       struct scatterlist *sg = NULL;
 
        // we're working in LBA mode.  according to the ATA spec, 
        // we can support up to 28-bit addressing.  I don't know if Datafab
@@ -217,7 +219,7 @@ static int datafab_write_data(struct us_data *us,
 
                // Get the data from the transfer buffer
                usb_stor_access_xfer_buf(buffer, len, us->srb,
-                               &sg_idx, &sg_offset, FROM_XFER_BUF);
+                               &sg, &sg_offset, FROM_XFER_BUF);
 
                command[0] = 0;
                command[1] = thistime;
index 003fcf5458882d3ec90f07251ea5f7aa2f437d34..61097cbb1585047e6893a8f59aec52de4771afdc 100644 (file)
@@ -119,7 +119,8 @@ static int jumpshot_read_data(struct us_data *us,
        unsigned char  thistime;
        unsigned int totallen, alloclen;
        int len, result;
-       unsigned int sg_idx = 0, sg_offset = 0;
+       unsigned int sg_offset = 0;
+       struct scatterlist *sg = NULL;
 
        // we're working in LBA mode.  according to the ATA spec, 
        // we can support up to 28-bit addressing.  I don't know if Jumpshot
@@ -170,7 +171,7 @@ static int jumpshot_read_data(struct us_data *us,
 
                // Store the data in the transfer buffer
                usb_stor_access_xfer_buf(buffer, len, us->srb,
-                                &sg_idx, &sg_offset, TO_XFER_BUF);
+                                &sg, &sg_offset, TO_XFER_BUF);
 
                sector += thistime;
                totallen -= len;
@@ -195,7 +196,8 @@ static int jumpshot_write_data(struct us_data *us,
        unsigned char  thistime;
        unsigned int totallen, alloclen;
        int len, result, waitcount;
-       unsigned int sg_idx = 0, sg_offset = 0;
+       unsigned int sg_offset = 0;
+       struct scatterlist *sg = NULL;
 
        // we're working in LBA mode.  according to the ATA spec, 
        // we can support up to 28-bit addressing.  I don't know if Jumpshot
@@ -225,7 +227,7 @@ static int jumpshot_write_data(struct us_data *us,
 
                // Get the data from the transfer buffer
                usb_stor_access_xfer_buf(buffer, len, us->srb,
-                               &sg_idx, &sg_offset, FROM_XFER_BUF);
+                               &sg, &sg_offset, FROM_XFER_BUF);
 
                command[0] = 0;
                command[1] = thistime;
index 9ad30428d2dd5f1fb223fd5ad2a229faf551049b..cc8f7c52c7292400525305c026f2145d536d3ccc 100644 (file)
@@ -157,7 +157,7 @@ void usb_stor_transparent_scsi_command(struct scsi_cmnd *srb,
  * pick up from where this one left off. */
 
 unsigned int usb_stor_access_xfer_buf(unsigned char *buffer,
-       unsigned int buflen, struct scsi_cmnd *srb, unsigned int *index,
+       unsigned int buflen, struct scsi_cmnd *srb, struct scatterlist **sgptr,
        unsigned int *offset, enum xfer_buf_dir dir)
 {
        unsigned int cnt;
@@ -184,16 +184,17 @@ unsigned int usb_stor_access_xfer_buf(unsigned char *buffer,
         * located in high memory -- then kmap() will map it to a temporary
         * position in the kernel's virtual address space. */
        } else {
-               struct scatterlist *sg =
-                               (struct scatterlist *) srb->request_buffer
-                               + *index;
+               struct scatterlist *sg = *sgptr;
+
+               if (!sg)
+                       sg = (struct scatterlist *) srb->request_buffer;
 
                /* This loop handles a single s-g list entry, which may
                 * include multiple pages.  Find the initial page structure
                 * and the starting offset within the page, and update
                 * the *offset and *index values for the next loop. */
                cnt = 0;
-               while (cnt < buflen && *index < srb->use_sg) {
+               while (cnt < buflen) {
                        struct page *page = sg->page +
                                        ((sg->offset + *offset) >> PAGE_SHIFT);
                        unsigned int poff =
@@ -209,8 +210,7 @@ unsigned int usb_stor_access_xfer_buf(unsigned char *buffer,
 
                                /* Transfer continues to next s-g entry */
                                *offset = 0;
-                               ++*index;
-                               ++sg;
+                               sg = sg_next(sg);
                        }
 
                        /* Transfer the data for all the pages in this
@@ -234,6 +234,7 @@ unsigned int usb_stor_access_xfer_buf(unsigned char *buffer,
                                sglen -= plen;
                        }
                }
+               *sgptr = sg;
        }
 
        /* Return the amount actually transferred */
@@ -245,9 +246,10 @@ unsigned int usb_stor_access_xfer_buf(unsigned char *buffer,
 void usb_stor_set_xfer_buf(unsigned char *buffer,
        unsigned int buflen, struct scsi_cmnd *srb)
 {
-       unsigned int index = 0, offset = 0;
+       unsigned int offset = 0;
+       struct scatterlist *sg = NULL;
 
-       usb_stor_access_xfer_buf(buffer, buflen, srb, &index, &offset,
+       usb_stor_access_xfer_buf(buffer, buflen, srb, &sg, &offset,
                        TO_XFER_BUF);
        if (buflen < srb->request_bufflen)
                srb->resid = srb->request_bufflen - buflen;
index 845bed4b80317fb336cbe14ca227f8fa41907a17..8737a36891caa6514d6ab8f01f04600774f7d731 100644 (file)
@@ -52,7 +52,7 @@ extern void usb_stor_transparent_scsi_command(struct scsi_cmnd*,
 enum xfer_buf_dir      {TO_XFER_BUF, FROM_XFER_BUF};
 
 extern unsigned int usb_stor_access_xfer_buf(unsigned char *buffer,
-       unsigned int buflen, struct scsi_cmnd *srb, unsigned int *index,
+       unsigned int buflen, struct scsi_cmnd *srb, struct scatterlist **,
        unsigned int *offset, enum xfer_buf_dir dir);
 
 extern void usb_stor_set_xfer_buf(unsigned char *buffer,
index b2ed2a3e6fcae4499bc5d930169bdf16af93ad64..b12202c5da2d058a074864b4e3f00959ea9e0c24 100644 (file)
@@ -705,7 +705,8 @@ sddr09_read_data(struct us_data *us,
        unsigned char *buffer;
        unsigned int lba, maxlba, pba;
        unsigned int page, pages;
-       unsigned int len, index, offset;
+       unsigned int len, offset;
+       struct scatterlist *sg;
        int result;
 
        // Figure out the initial LBA and page
@@ -730,7 +731,8 @@ sddr09_read_data(struct us_data *us,
        // contiguous LBA's. Another exercise left to the student.
 
        result = 0;
-       index = offset = 0;
+       offset = 0;
+       sg = NULL;
 
        while (sectors > 0) {
 
@@ -777,7 +779,7 @@ sddr09_read_data(struct us_data *us,
 
                // Store the data in the transfer buffer
                usb_stor_access_xfer_buf(buffer, len, us->srb,
-                               &index, &offset, TO_XFER_BUF);
+                               &sg, &offset, TO_XFER_BUF);
 
                page = 0;
                lba++;
@@ -931,7 +933,8 @@ sddr09_write_data(struct us_data *us,
        unsigned int pagelen, blocklen;
        unsigned char *blockbuffer;
        unsigned char *buffer;
-       unsigned int len, index, offset;
+       unsigned int len, offset;
+       struct scatterlist *sg;
        int result;
 
        // Figure out the initial LBA and page
@@ -968,7 +971,8 @@ sddr09_write_data(struct us_data *us,
        }
 
        result = 0;
-       index = offset = 0;
+       offset = 0;
+       sg = NULL;
 
        while (sectors > 0) {
 
@@ -987,7 +991,7 @@ sddr09_write_data(struct us_data *us,
 
                // Get the data from the transfer buffer
                usb_stor_access_xfer_buf(buffer, len, us->srb,
-                               &index, &offset, FROM_XFER_BUF);
+                               &sg, &offset, FROM_XFER_BUF);
 
                result = sddr09_write_lba(us, lba, page, pages,
                                buffer, blockbuffer);
index 0b1b5b59ca7b6d2423a002a143ef3cf2f1f207a5..d43a3415e12f1d3aa105cfe0b2ff9cdd70c5c62c 100644 (file)
@@ -167,7 +167,8 @@ static int sddr55_read_data(struct us_data *us,
        unsigned long address;
 
        unsigned short pages;
-       unsigned int len, index, offset;
+       unsigned int len, offset;
+       struct scatterlist *sg;
 
        // Since we only read in one block at a time, we have to create
        // a bounce buffer and move the data a piece at a time between the
@@ -178,7 +179,8 @@ static int sddr55_read_data(struct us_data *us,
        buffer = kmalloc(len, GFP_NOIO);
        if (buffer == NULL)
                return USB_STOR_TRANSPORT_ERROR; /* out of memory */
-       index = offset = 0;
+       offset = 0;
+       sg = NULL;
 
        while (sectors>0) {
 
@@ -255,7 +257,7 @@ static int sddr55_read_data(struct us_data *us,
 
                // Store the data in the transfer buffer
                usb_stor_access_xfer_buf(buffer, len, us->srb,
-                               &index, &offset, TO_XFER_BUF);
+                               &sg, &offset, TO_XFER_BUF);
 
                page = 0;
                lba++;
@@ -287,7 +289,8 @@ static int sddr55_write_data(struct us_data *us,
 
        unsigned short pages;
        int i;
-       unsigned int len, index, offset;
+       unsigned int len, offset;
+       struct scatterlist *sg;
 
        /* check if we are allowed to write */
        if (info->read_only || info->force_read_only) {
@@ -304,7 +307,8 @@ static int sddr55_write_data(struct us_data *us,
        buffer = kmalloc(len, GFP_NOIO);
        if (buffer == NULL)
                return USB_STOR_TRANSPORT_ERROR;
-       index = offset = 0;
+       offset = 0;
+       sg = NULL;
 
        while (sectors > 0) {
 
@@ -322,7 +326,7 @@ static int sddr55_write_data(struct us_data *us,
 
                // Get the data from the transfer buffer
                usb_stor_access_xfer_buf(buffer, len, us->srb,
-                               &index, &offset, FROM_XFER_BUF);
+                               &sg, &offset, FROM_XFER_BUF);
 
                US_DEBUGP("Write %02X pages, to PBA %04X"
                        " (LBA %04X) page %02X\n",
index 17ca4d73577b851fdf33eb91dfe2514c97c8ec46..cb22a9ad16943cfbccdef9356651c1fa4f499342 100644 (file)
@@ -993,7 +993,8 @@ static int usbat_flash_read_data(struct us_data *us,
        unsigned char  thistime;
        unsigned int totallen, alloclen;
        int len, result;
-       unsigned int sg_idx = 0, sg_offset = 0;
+       unsigned int sg_offset = 0;
+       struct scatterlist *sg = NULL;
 
        result = usbat_flash_check_media(us, info);
        if (result != USB_STOR_TRANSPORT_GOOD)
@@ -1047,7 +1048,7 @@ static int usbat_flash_read_data(struct us_data *us,
        
                /* Store the data in the transfer buffer */
                usb_stor_access_xfer_buf(buffer, len, us->srb,
-                                        &sg_idx, &sg_offset, TO_XFER_BUF);
+                                        &sg, &sg_offset, TO_XFER_BUF);
 
                sector += thistime;
                totallen -= len;
@@ -1083,7 +1084,8 @@ static int usbat_flash_write_data(struct us_data *us,
        unsigned char  thistime;
        unsigned int totallen, alloclen;
        int len, result;
-       unsigned int sg_idx = 0, sg_offset = 0;
+       unsigned int sg_offset = 0;
+       struct scatterlist *sg = NULL;
 
        result = usbat_flash_check_media(us, info);
        if (result != USB_STOR_TRANSPORT_GOOD)
@@ -1122,7 +1124,7 @@ static int usbat_flash_write_data(struct us_data *us,
 
                /* Get the data from the transfer buffer */
                usb_stor_access_xfer_buf(buffer, len, us->srb,
-                                        &sg_idx, &sg_offset, FROM_XFER_BUF);
+                                        &sg, &sg_offset, FROM_XFER_BUF);
 
                /* ATA command 0x30 (WRITE SECTORS) */
                usbat_pack_ata_sector_cmd(command, thistime, sector, 0x30);
@@ -1162,8 +1164,8 @@ static int usbat_hp8200e_handle_read10(struct us_data *us,
        unsigned char *buffer;
        unsigned int len;
        unsigned int sector;
-       unsigned int sg_segment = 0;
        unsigned int sg_offset = 0;
+       struct scatterlist *sg = NULL;
 
        US_DEBUGP("handle_read10: transfersize %d\n",
                srb->transfersize);
@@ -1220,9 +1222,6 @@ static int usbat_hp8200e_handle_read10(struct us_data *us,
        sector |= short_pack(data[7+5], data[7+4]);
        transferred = 0;
 
-       sg_segment = 0; /* for keeping track of where we are in */
-       sg_offset = 0;  /* the scatter/gather list */
-
        while (transferred != srb->request_bufflen) {
 
                if (len > srb->request_bufflen - transferred)
@@ -1255,7 +1254,7 @@ static int usbat_hp8200e_handle_read10(struct us_data *us,
 
                /* Store the data in the transfer buffer */
                usb_stor_access_xfer_buf(buffer, len, srb,
-                                &sg_segment, &sg_offset, TO_XFER_BUF);
+                                &sg, &sg_offset, TO_XFER_BUF);
 
                /* Update the amount transferred and the sector number */
 
index 5f604f269dfa87e26d24b97dbb945b333697f675..d59ddbf79626cd0ca546a15bac0196544445fafd 100644 (file)
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -109,11 +109,14 @@ static inline struct bio_vec *bvec_alloc_bs(gfp_t gfp_mask, int nr, unsigned lon
 
 void bio_free(struct bio *bio, struct bio_set *bio_set)
 {
-       const int pool_idx = BIO_POOL_IDX(bio);
+       if (bio->bi_io_vec) {
+               const int pool_idx = BIO_POOL_IDX(bio);
 
-       BIO_BUG_ON(pool_idx >= BIOVEC_NR_POOLS);
+               BIO_BUG_ON(pool_idx >= BIOVEC_NR_POOLS);
+
+               mempool_free(bio->bi_io_vec, bio_set->bvec_pools[pool_idx]);
+       }
 
-       mempool_free(bio->bi_io_vec, bio_set->bvec_pools[pool_idx]);
        mempool_free(bio, bio_set->bio_pool);
 }
 
@@ -127,21 +130,9 @@ static void bio_fs_destructor(struct bio *bio)
 
 void bio_init(struct bio *bio)
 {
-       bio->bi_next = NULL;
-       bio->bi_bdev = NULL;
+       memset(bio, 0, sizeof(*bio));
        bio->bi_flags = 1 << BIO_UPTODATE;
-       bio->bi_rw = 0;
-       bio->bi_vcnt = 0;
-       bio->bi_idx = 0;
-       bio->bi_phys_segments = 0;
-       bio->bi_hw_segments = 0;
-       bio->bi_hw_front_size = 0;
-       bio->bi_hw_back_size = 0;
-       bio->bi_size = 0;
-       bio->bi_max_vecs = 0;
-       bio->bi_end_io = NULL;
        atomic_set(&bio->bi_cnt, 1);
-       bio->bi_private = NULL;
 }
 
 /**
index a7568bcc0f9959b22b559ce02b259e8f843d1a3b..59a941d404d9c60b46a83ced4472ee915a7fe93c 100644 (file)
@@ -1335,10 +1335,10 @@ static int pipe_to_user(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
        if (copy_to_user(sd->u.userptr, src + buf->offset, sd->len))
                ret = -EFAULT;
 
+       buf->ops->unmap(pipe, buf, src);
 out:
        if (ret > 0)
                sd->u.userptr += ret;
-       buf->ops->unmap(pipe, buf, src);
        return ret;
 }
 
index 3ca6d5c14b2e729ca772db529e70c7e479449ab2..f1735a22d0ea28cd08ce5be1413c78cd1e397061 100644 (file)
@@ -6,7 +6,7 @@
  *     David Mosberger-Tang <davidm@hpl.hp.com>
  */
 #include <asm/machvec.h>
-#include <asm/scatterlist.h>
+#include <linux/scatterlist.h>
 
 #define dma_alloc_coherent     platform_dma_alloc_coherent
 /* coherent mem. is cheap */
index a452ea24205a70b1fd271d47158706f7809d6957..7d5234d50312d4cbf784c6d3b51a6eda93923194 100644 (file)
@@