Merge branch 'for-linus' of git://git.kernel.dk/data/git/linux-2.6-block
authorLinus Torvalds <torvalds@woody.linux-foundation.org>
Tue, 16 Oct 2007 17:09:16 +0000 (10:09 -0700)
committerLinus Torvalds <torvalds@woody.linux-foundation.org>
Tue, 16 Oct 2007 17:09:16 +0000 (10:09 -0700)
* 'for-linus' of git://git.kernel.dk/data/git/linux-2.6-block: (63 commits)
  Fix memory leak in dm-crypt
  SPARC64: sg chaining support
  SPARC: sg chaining support
  PPC: sg chaining support
  PS3: sg chaining support
  IA64: sg chaining support
  x86-64: enable sg chaining
  x86-64: update pci-gart iommu to sg helpers
  x86-64: update nommu to sg helpers
  x86-64: update calgary iommu to sg helpers
  swiotlb: sg chaining support
  i386: enable sg chaining
  i386 dma_map_sg: convert to using sg helpers
  mmc: need to zero sglist on init
  Panic in blk_rq_map_sg() from CCISS driver
  remove sglist_len
  remove blk_queue_max_phys_segments in libata
  revert sg segment size ifdefs
  Fixup u14-34f ENABLE_SG_CHAINING
  qla1280: enable use_sg_chaining option
  ...

1  2 
block/ll_rw_blk.c
drivers/ata/libata-scsi.c
drivers/message/fusion/mptscsih.c
fs/splice.c

diff --combined block/ll_rw_blk.c
index a83823fcd74f3f90a42d8a470cad9947c5cdd56f,527bd8d4db501d79b75aad799f309686c6ab8490..9eabac95fbe053917cb25f7ba1e9fb208b8e94d1
@@@ -30,6 -30,7 +30,7 @@@
  #include <linux/cpu.h>
  #include <linux/blktrace_api.h>
  #include <linux/fault-inject.h>
+ #include <linux/scatterlist.h>
  
  /*
   * for max sense size
@@@ -304,23 -305,6 +305,6 @@@ int blk_queue_ordered(struct request_qu
  
  EXPORT_SYMBOL(blk_queue_ordered);
  
- /**
-  * blk_queue_issue_flush_fn - set function for issuing a flush
-  * @q:     the request queue
-  * @iff:   the function to be called issuing the flush
-  *
-  * Description:
-  *   If a driver supports issuing a flush command, the support is notified
-  *   to the block layer by defining it through this call.
-  *
-  **/
- void blk_queue_issue_flush_fn(struct request_queue *q, issue_flush_fn *iff)
- {
-       q->issue_flush_fn = iff;
- }
- EXPORT_SYMBOL(blk_queue_issue_flush_fn);
  /*
   * Cache flushing for ordered writes handling
   */
@@@ -377,10 -361,12 +361,12 @@@ void blk_ordered_complete_seq(struct re
        /*
         * Okay, sequence complete.
         */
-       rq = q->orig_bar_rq;
-       uptodate = q->orderr ? q->orderr : 1;
+       uptodate = 1;
+       if (q->orderr)
+               uptodate = q->orderr;
  
        q->ordseq = 0;
+       rq = q->orig_bar_rq;
  
        end_that_request_first(rq, uptodate, rq->hard_nr_sectors);
        end_that_request_last(rq, uptodate);
@@@ -445,7 -431,8 +431,8 @@@ static inline struct request *start_ord
        rq_init(q, rq);
        if (bio_data_dir(q->orig_bar_rq->bio) == WRITE)
                rq->cmd_flags |= REQ_RW;
-       rq->cmd_flags |= q->ordered & QUEUE_ORDERED_FUA ? REQ_FUA : 0;
+       if (q->ordered & QUEUE_ORDERED_FUA)
+               rq->cmd_flags |= REQ_FUA;
        rq->elevator_private = NULL;
        rq->elevator_private2 = NULL;
        init_request_from_bio(rq, q->orig_bar_rq->bio);
         * Queue ordered sequence.  As we stack them at the head, we
         * need to queue in reverse order.  Note that we rely on that
         * no fs request uses ELEVATOR_INSERT_FRONT and thus no fs
-        * request gets inbetween ordered sequence.
+        * request gets inbetween ordered sequence. If this request is
+        * an empty barrier, we don't need to do a postflush ever since
+        * there will be no data written between the pre and post flush.
+        * Hence a single flush will suffice.
         */
-       if (q->ordered & QUEUE_ORDERED_POSTFLUSH)
+       if ((q->ordered & QUEUE_ORDERED_POSTFLUSH) && !blk_empty_barrier(rq))
                queue_flush(q, QUEUE_ORDERED_POSTFLUSH);
        else
                q->ordseq |= QUEUE_ORDSEQ_POSTFLUSH;
  int blk_do_ordered(struct request_queue *q, struct request **rqp)
  {
        struct request *rq = *rqp;
-       int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq);
+       const int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq);
  
        if (!q->ordseq) {
                if (!is_barrier)
@@@ -1329,9 -1319,10 +1319,10 @@@ static int blk_hw_contig_segment(struc
   * must make sure sg can hold rq->nr_phys_segments entries
   */
  int blk_rq_map_sg(struct request_queue *q, struct request *rq,
-                 struct scatterlist *sg)
+                 struct scatterlist *sglist)
  {
        struct bio_vec *bvec, *bvprv;
+       struct scatterlist *next_sg, *sg;
        struct req_iterator iter;
        int nsegs, cluster;
  
         * for each bio in rq
         */
        bvprv = NULL;
+       sg = next_sg = &sglist[0];
        rq_for_each_segment(bvec, rq, iter) {
                int nbytes = bvec->bv_len;
  
                if (bvprv && cluster) {
-                       if (sg[nsegs - 1].length + nbytes > q->max_segment_size)
+                       if (sg->length + nbytes > q->max_segment_size)
                                goto new_segment;
  
                        if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
                        if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec))
                                goto new_segment;
  
-                       sg[nsegs - 1].length += nbytes;
+                       sg->length += nbytes;
                } else {
  new_segment:
-                       memset(&sg[nsegs],0,sizeof(struct scatterlist));
-                       sg[nsegs].page = bvec->bv_page;
-                       sg[nsegs].length = nbytes;
-                       sg[nsegs].offset = bvec->bv_offset;
+                       sg = next_sg;
+                       next_sg = sg_next(sg);
  
+                       sg->page = bvec->bv_page;
+                       sg->length = nbytes;
+                       sg->offset = bvec->bv_offset;
                        nsegs++;
                }
                bvprv = bvec;
@@@ -2660,6 -2653,14 +2653,14 @@@ int blk_execute_rq(struct request_queu
  
  EXPORT_SYMBOL(blk_execute_rq);
  
+ static void bio_end_empty_barrier(struct bio *bio, int err)
+ {
+       if (err)
+               clear_bit(BIO_UPTODATE, &bio->bi_flags);
+       complete(bio->bi_private);
+ }
  /**
   * blkdev_issue_flush - queue a flush
   * @bdev:     blockdev to issue flush for
   */
  int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
  {
+       DECLARE_COMPLETION_ONSTACK(wait);
        struct request_queue *q;
+       struct bio *bio;
+       int ret;
  
        if (bdev->bd_disk == NULL)
                return -ENXIO;
        q = bdev_get_queue(bdev);
        if (!q)
                return -ENXIO;
-       if (!q->issue_flush_fn)
-               return -EOPNOTSUPP;
  
-       return q->issue_flush_fn(q, bdev->bd_disk, error_sector);
+       bio = bio_alloc(GFP_KERNEL, 0);
+       if (!bio)
+               return -ENOMEM;
+       bio->bi_end_io = bio_end_empty_barrier;
+       bio->bi_private = &wait;
+       bio->bi_bdev = bdev;
+       submit_bio(1 << BIO_RW_BARRIER, bio);
+       wait_for_completion(&wait);
+       /*
+        * The driver must store the error location in ->bi_sector, if
+        * it supports it. For non-stacked drivers, this should be copied
+        * from rq->sector.
+        */
+       if (error_sector)
+               *error_sector = bio->bi_sector;
+       ret = 0;
+       if (!bio_flagged(bio, BIO_UPTODATE))
+               ret = -EIO;
+       bio_put(bio);
+       return ret;
  }
  
  EXPORT_SYMBOL(blkdev_issue_flush);
@@@ -3051,7 -3077,7 +3077,7 @@@ static inline void blk_partition_remap(
  {
        struct block_device *bdev = bio->bi_bdev;
  
-       if (bdev != bdev->bd_contains) {
+       if (bio_sectors(bio) && bdev != bdev->bd_contains) {
                struct hd_struct *p = bdev->bd_part;
                const int rw = bio_data_dir(bio);
  
@@@ -3117,6 -3143,35 +3143,35 @@@ static inline int should_fail_request(s
  
  #endif /* CONFIG_FAIL_MAKE_REQUEST */
  
+ /*
+  * Check whether this bio extends beyond the end of the device.
+  */
+ static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors)
+ {
+       sector_t maxsector;
+       if (!nr_sectors)
+               return 0;
+       /* Test device or partition size, when known. */
+       maxsector = bio->bi_bdev->bd_inode->i_size >> 9;
+       if (maxsector) {
+               sector_t sector = bio->bi_sector;
+               if (maxsector < nr_sectors || maxsector - nr_sectors < sector) {
+                       /*
+                        * This may well happen - the kernel calls bread()
+                        * without checking the size of the device, e.g., when
+                        * mounting a device.
+                        */
+                       handle_bad_sector(bio);
+                       return 1;
+               }
+       }
+       return 0;
+ }
  /**
   * generic_make_request: hand a buffer to its device driver for I/O
   * @bio:  The bio describing the location in memory and on the device.
  static inline void __generic_make_request(struct bio *bio)
  {
        struct request_queue *q;
-       sector_t maxsector;
        sector_t old_sector;
        int ret, nr_sectors = bio_sectors(bio);
        dev_t old_dev;
  
        might_sleep();
-       /* Test device or partition size, when known. */
-       maxsector = bio->bi_bdev->bd_inode->i_size >> 9;
-       if (maxsector) {
-               sector_t sector = bio->bi_sector;
  
-               if (maxsector < nr_sectors || maxsector - nr_sectors < sector) {
-                       /*
-                        * This may well happen - the kernel calls bread()
-                        * without checking the size of the device, e.g., when
-                        * mounting a device.
-                        */
-                       handle_bad_sector(bio);
-                       goto end_io;
-               }
-       }
+       if (bio_check_eod(bio, nr_sectors))
+               goto end_io;
  
        /*
         * Resolve the mapping until finished. (drivers are
@@@ -3191,7 -3233,7 +3233,7 @@@ end_io
                        break;
                }
  
-               if (unlikely(bio_sectors(bio) > q->max_hw_sectors)) {
+               if (unlikely(nr_sectors > q->max_hw_sectors)) {
                        printk("bio too big device %s (%u > %u)\n", 
                                bdevname(bio->bi_bdev, b),
                                bio_sectors(bio),
                blk_partition_remap(bio);
  
                if (old_sector != -1)
-                       blk_add_trace_remap(q, bio, old_dev, bio->bi_sector, 
+                       blk_add_trace_remap(q, bio, old_dev, bio->bi_sector,
                                            old_sector);
  
                blk_add_trace_bio(q, bio, BLK_TA_QUEUE);
                old_sector = bio->bi_sector;
                old_dev = bio->bi_bdev->bd_dev;
  
-               maxsector = bio->bi_bdev->bd_inode->i_size >> 9;
-               if (maxsector) {
-                       sector_t sector = bio->bi_sector;
-                       if (maxsector < nr_sectors ||
-                                       maxsector - nr_sectors < sector) {
-                               /*
-                                * This may well happen - partitions are not
-                                * checked to make sure they are within the size
-                                * of the whole device.
-                                */
-                               handle_bad_sector(bio);
-                               goto end_io;
-                       }
-               }
+               if (bio_check_eod(bio, nr_sectors))
+                       goto end_io;
  
                ret = q->make_request_fn(q, bio);
        } while (ret);
@@@ -3307,23 -3336,32 +3336,32 @@@ void submit_bio(int rw, struct bio *bio
  {
        int count = bio_sectors(bio);
  
-       BIO_BUG_ON(!bio->bi_size);
-       BIO_BUG_ON(!bio->bi_io_vec);
        bio->bi_rw |= rw;
-       if (rw & WRITE) {
-               count_vm_events(PGPGOUT, count);
-       } else {
-               task_io_account_read(bio->bi_size);
-               count_vm_events(PGPGIN, count);
-       }
  
-       if (unlikely(block_dump)) {
-               char b[BDEVNAME_SIZE];
-               printk(KERN_DEBUG "%s(%d): %s block %Lu on %s\n",
-                       current->comm, current->pid,
-                       (rw & WRITE) ? "WRITE" : "READ",
-                       (unsigned long long)bio->bi_sector,
-                       bdevname(bio->bi_bdev,b));
+       /*
+        * If it's a regular read/write or a barrier with data attached,
+        * go through the normal accounting stuff before submission.
+        */
+       if (!bio_empty_barrier(bio)) {
+               BIO_BUG_ON(!bio->bi_size);
+               BIO_BUG_ON(!bio->bi_io_vec);
+               if (rw & WRITE) {
+                       count_vm_events(PGPGOUT, count);
+               } else {
+                       task_io_account_read(bio->bi_size);
+                       count_vm_events(PGPGIN, count);
+               }
+               if (unlikely(block_dump)) {
+                       char b[BDEVNAME_SIZE];
+                       printk(KERN_DEBUG "%s(%d): %s block %Lu on %s\n",
+                               current->comm, current->pid,
+                               (rw & WRITE) ? "WRITE" : "READ",
+                               (unsigned long long)bio->bi_sector,
+                               bdevname(bio->bi_bdev,b));
+               }
        }
  
        generic_make_request(bio);
@@@ -3399,6 -3437,14 +3437,14 @@@ static int __end_that_request_first(str
        while ((bio = req->bio) != NULL) {
                int nbytes;
  
+               /*
+                * For an empty barrier request, the low level driver must
+                * store a potential error location in ->sector. We pass
+                * that back up in ->bi_sector.
+                */
+               if (blk_empty_barrier(req))
+                       bio->bi_sector = req->sector;
                if (nr_bytes >= bio->bi_size) {
                        req->bio = bio->bi_next;
                        nbytes = bio->bi_size;
@@@ -3564,7 -3610,7 +3610,7 @@@ static struct notifier_block blk_cpu_no
   * Description:
   *     Ends all I/O on a request. It does not handle partial completions,
   *     unless the driver actually implements this in its completion callback
-  *     through requeueing. Theh actual completion happens out-of-order,
+  *     through requeueing. The actual completion happens out-of-order,
   *     through a softirq handler. The user must have registered a completion
   *     callback through blk_queue_softirq_done().
   **/
@@@ -3627,15 -3673,83 +3673,83 @@@ void end_that_request_last(struct reque
  
  EXPORT_SYMBOL(end_that_request_last);
  
- void end_request(struct request *req, int uptodate)
+ static inline void __end_request(struct request *rq, int uptodate,
+                                unsigned int nr_bytes, int dequeue)
  {
-       if (!end_that_request_first(req, uptodate, req->hard_cur_sectors)) {
-               add_disk_randomness(req->rq_disk);
-               blkdev_dequeue_request(req);
-               end_that_request_last(req, uptodate);
+       if (!end_that_request_chunk(rq, uptodate, nr_bytes)) {
+               if (dequeue)
+                       blkdev_dequeue_request(rq);
+               add_disk_randomness(rq->rq_disk);
+               end_that_request_last(rq, uptodate);
        }
  }
  
+ static unsigned int rq_byte_size(struct request *rq)
+ {
+       if (blk_fs_request(rq))
+               return rq->hard_nr_sectors << 9;
+       return rq->data_len;
+ }
+ /**
+  * end_queued_request - end all I/O on a queued request
+  * @rq:               the request being processed
+  * @uptodate: error value or 0/1 uptodate flag
+  *
+  * Description:
+  *     Ends all I/O on a request, and removes it from the block layer queues.
+  *     Not suitable for normal IO completion, unless the driver still has
+  *     the request attached to the block layer.
+  *
+  **/
+ void end_queued_request(struct request *rq, int uptodate)
+ {
+       __end_request(rq, uptodate, rq_byte_size(rq), 1);
+ }
+ EXPORT_SYMBOL(end_queued_request);
+ /**
+  * end_dequeued_request - end all I/O on a dequeued request
+  * @rq:               the request being processed
+  * @uptodate: error value or 0/1 uptodate flag
+  *
+  * Description:
+  *     Ends all I/O on a request. The request must already have been
+  *     dequeued using blkdev_dequeue_request(), as is normally the case
+  *     for most drivers.
+  *
+  **/
+ void end_dequeued_request(struct request *rq, int uptodate)
+ {
+       __end_request(rq, uptodate, rq_byte_size(rq), 0);
+ }
+ EXPORT_SYMBOL(end_dequeued_request);
+ /**
+  * end_request - end I/O on the current segment of the request
+  * @rq:               the request being processed
+  * @uptodate: error value or 0/1 uptodate flag
+  *
+  * Description:
+  *     Ends I/O on the current segment of a request. If that is the only
+  *     remaining segment, the request is also completed and freed.
+  *
+  *     This is a remnant of how older block drivers handled IO completions.
+  *     Modern drivers typically end IO on the full request in one go, unless
+  *     they have a residual value to account for. For that case this function
+  *     isn't really useful, unless the residual just happens to be the
+  *     full current segment. In other words, don't use this function in new
+  *     code. Either use end_request_completely(), or the
+  *     end_that_request_chunk() (along with end_that_request_last()) for
+  *     partial completions.
+  *
+  **/
+ void end_request(struct request *req, int uptodate)
+ {
+       __end_request(req, uptodate, req->hard_cur_sectors << 9, 1);
+ }
  EXPORT_SYMBOL(end_request);
  
  static void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
@@@ -3928,6 -4042,7 +4042,6 @@@ queue_max_sectors_store(struct request_
                        max_hw_sectors_kb = q->max_hw_sectors >> 1,
                        page_kb = 1 << (PAGE_CACHE_SHIFT - 10);
        ssize_t ret = queue_var_store(&max_sectors_kb, page, count);
 -      int ra_kb;
  
        if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb)
                return -EINVAL;
         * values synchronously:
         */
        spin_lock_irq(q->queue_lock);
 -      /*
 -       * Trim readahead window as well, if necessary:
 -       */
 -      ra_kb = q->backing_dev_info.ra_pages << (PAGE_CACHE_SHIFT - 10);
 -      if (ra_kb > max_sectors_kb)
 -              q->backing_dev_info.ra_pages =
 -                              max_sectors_kb >> (PAGE_CACHE_SHIFT - 10);
 -
        q->max_sectors = max_sectors_kb << 1;
        spin_unlock_irq(q->queue_lock);
  
@@@ -3949,7 -4072,23 +4063,23 @@@ static ssize_t queue_max_hw_sectors_sho
        return queue_var_show(max_hw_sectors_kb, (page));
  }
  
+ static ssize_t queue_max_segments_show(struct request_queue *q, char *page)
+ {
+       return queue_var_show(q->max_phys_segments, page);
+ }
+ static ssize_t queue_max_segments_store(struct request_queue *q,
+                                       const char *page, size_t count)
+ {
+       unsigned long segments;
+       ssize_t ret = queue_var_store(&segments, page, count);
+       spin_lock_irq(q->queue_lock);
+       q->max_phys_segments = segments;
+       spin_unlock_irq(q->queue_lock);
  
+       return ret;
+ }
  static struct queue_sysfs_entry queue_requests_entry = {
        .attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR },
        .show = queue_requests_show,
@@@ -3973,6 -4112,12 +4103,12 @@@ static struct queue_sysfs_entry queue_m
        .show = queue_max_hw_sectors_show,
  };
  
+ static struct queue_sysfs_entry queue_max_segments_entry = {
+       .attr = {.name = "max_segments", .mode = S_IRUGO | S_IWUSR },
+       .show = queue_max_segments_show,
+       .store = queue_max_segments_store,
+ };
  static struct queue_sysfs_entry queue_iosched_entry = {
        .attr = {.name = "scheduler", .mode = S_IRUGO | S_IWUSR },
        .show = elv_iosched_show,
@@@ -3984,6 -4129,7 +4120,7 @@@ static struct attribute *default_attrs[
        &queue_ra_entry.attr,
        &queue_max_hw_sectors_entry.attr,
        &queue_max_sectors_entry.attr,
+       &queue_max_segments_entry.attr,
        &queue_iosched_entry.attr,
        NULL,
  };
index 5237a491622ba021b0b3f073bc22cee09e196673,ba62d534f32b91c401f99939a7193a7a2c975bdf..9fbb39cd0f5892414a6cb72d4378de8c99da960f
@@@ -801,8 -801,6 +801,6 @@@ int ata_scsi_slave_config(struct scsi_d
  
        ata_scsi_sdev_config(sdev);
  
-       blk_queue_max_phys_segments(sdev->request_queue, LIBATA_MAX_PRD);
        sdev->manage_start_stop = 1;
  
        if (dev)
@@@ -3240,7 -3238,7 +3238,7 @@@ static void ata_scsi_handle_link_detach
  
  /**
   *    ata_scsi_media_change_notify - send media change event
 - *    @atadev: Pointer to the disk device with media change event
 + *    @dev: Pointer to the disk device with media change event
   *
   *    Tell the block layer to send a media change notification
   *    event.
index 822a3aa4fae5a8defd6176ef133ae7dbdb33427d,3918ebf01e8d14942bd0f3639d691e5947908e7c..626bb3c9af2b0d6614d2313753b4c9796e64bb5e
@@@ -293,7 -293,7 +293,7 @@@ nextSGEset
        for (ii=0; ii < (numSgeThisFrame-1); ii++) {
                thisxfer = sg_dma_len(sg);
                if (thisxfer == 0) {
-                       sg ++; /* Get next SG element from the OS */
+                       sg = sg_next(sg); /* Get next SG element from the OS */
                        sg_done++;
                        continue;
                }
                v2 = sg_dma_address(sg);
                mptscsih_add_sge(psge, sgflags | thisxfer, v2);
  
-               sg++;           /* Get next SG element from the OS */
+               sg = sg_next(sg);       /* Get next SG element from the OS */
                psge += (sizeof(u32) + sizeof(dma_addr_t));
                sgeOffset += (sizeof(u32) + sizeof(dma_addr_t));
                sg_done++;
                v2 = sg_dma_address(sg);
                mptscsih_add_sge(psge, sgflags | thisxfer, v2);
                /*
-               sg++;
+               sg = sg_next(sg);
                psge += (sizeof(u32) + sizeof(dma_addr_t));
                */
                sgeOffset += (sizeof(u32) + sizeof(dma_addr_t));
@@@ -2605,10 -2605,14 +2605,10 @@@ mptscsih_set_scsi_lookup(MPT_ADAPTER *i
  }
  
  /**
 - * SCPNT_TO_LOOKUP_IDX
 - *
 - * search's for a given scmd in the ScsiLookup[] array list
 - *
 + * SCPNT_TO_LOOKUP_IDX - searches for a given scmd in the ScsiLookup[] array list
   * @ioc: Pointer to MPT_ADAPTER structure
 - * @scmd: scsi_cmnd pointer
 - *
 - **/
 + * @sc: scsi_cmnd pointer
 + */
  static int
  SCPNT_TO_LOOKUP_IDX(MPT_ADAPTER *ioc, struct scsi_cmnd *sc)
  {
diff --combined fs/splice.c
index a7568bcc0f9959b22b559ce02b259e8f843d1a3b,02c39ae719b69a365ba4392f2ebfed4ae6e59651..59a941d404d9c60b46a83ced4472ee915a7fe93c
@@@ -447,7 -447,7 +447,7 @@@ fill_it
         */
        while (page_nr < nr_pages)
                page_cache_release(pages[page_nr++]);
 -      in->f_ra.prev_index = index;
 +      in->f_ra.prev_pos = (loff_t)index << PAGE_CACHE_SHIFT;
  
        if (spd.nr_pages)
                return splice_to_pipe(pipe, &spd);
@@@ -563,7 -563,7 +563,7 @@@ static int pipe_to_file(struct pipe_ino
        struct address_space *mapping = file->f_mapping;
        unsigned int offset, this_len;
        struct page *page;
 -      pgoff_t index;
 +      void *fsdata;
        int ret;
  
        /*
        if (unlikely(ret))
                return ret;
  
 -      index = sd->pos >> PAGE_CACHE_SHIFT;
        offset = sd->pos & ~PAGE_CACHE_MASK;
  
        this_len = sd->len;
        if (this_len + offset > PAGE_CACHE_SIZE)
                this_len = PAGE_CACHE_SIZE - offset;
  
 -find_page:
 -      page = find_lock_page(mapping, index);
 -      if (!page) {
 -              ret = -ENOMEM;
 -              page = page_cache_alloc_cold(mapping);
 -              if (unlikely(!page))
 -                      goto out_ret;
 -
 -              /*
 -               * This will also lock the page
 -               */
 -              ret = add_to_page_cache_lru(page, mapping, index,
 -                                          GFP_KERNEL);
 -              if (unlikely(ret))
 -                      goto out_release;
 -      }
 -
 -      ret = mapping->a_ops->prepare_write(file, page, offset, offset+this_len);
 -      if (unlikely(ret)) {
 -              loff_t isize = i_size_read(mapping->host);
 -
 -              if (ret != AOP_TRUNCATED_PAGE)
 -                      unlock_page(page);
 -              page_cache_release(page);
 -              if (ret == AOP_TRUNCATED_PAGE)
 -                      goto find_page;
 -
 -              /*
 -               * prepare_write() may have instantiated a few blocks
 -               * outside i_size.  Trim these off again.
 -               */
 -              if (sd->pos + this_len > isize)
 -                      vmtruncate(mapping->host, isize);
 -
 -              goto out_ret;
 -      }
 +      ret = pagecache_write_begin(file, mapping, sd->pos, this_len,
 +                              AOP_FLAG_UNINTERRUPTIBLE, &page, &fsdata);
 +      if (unlikely(ret))
 +              goto out;
  
        if (buf->page != page) {
                /*
                kunmap_atomic(dst, KM_USER1);
                buf->ops->unmap(pipe, buf, src);
        }
 -
 -      ret = mapping->a_ops->commit_write(file, page, offset, offset+this_len);
 -      if (ret) {
 -              if (ret == AOP_TRUNCATED_PAGE) {
 -                      page_cache_release(page);
 -                      goto find_page;
 -              }
 -              if (ret < 0)
 -                      goto out;
 -              /*
 -               * Partial write has happened, so 'ret' already initialized by
 -               * number of bytes written, Where is nothing we have to do here.
 -               */
 -      } else
 -              ret = this_len;
 -      /*
 -       * Return the number of bytes written and mark page as
 -       * accessed, we are now done!
 -       */
 -      mark_page_accessed(page);
 +      ret = pagecache_write_end(file, mapping, sd->pos, this_len, this_len,
 +                              page, fsdata);
  out:
 -      unlock_page(page);
 -out_release:
 -      page_cache_release(page);
 -out_ret:
        return ret;
  }
  
@@@ -1335,10 -1390,10 +1335,10 @@@ static int pipe_to_user(struct pipe_ino
        if (copy_to_user(sd->u.userptr, src + buf->offset, sd->len))
                ret = -EFAULT;
  
+       buf->ops->unmap(pipe, buf, src);
  out:
        if (ret > 0)
                sd->u.userptr += ret;
-       buf->ops->unmap(pipe, buf, src);
        return ret;
  }