[ARM] 4808/2: ixp4xx: Merge nas100d-power.c into nas100d-setup.c
[sfrench/cifs-2.6.git] / mm / filemap.c
index 557fd887254f6cb2b891d0fd11a4c09e79283e3f..89ce6fe5f8be152e71218085af5396dcb72d315c 100644 (file)
 #include <linux/uio.h>
 #include <linux/hash.h>
 #include <linux/writeback.h>
+#include <linux/backing-dev.h>
 #include <linux/pagevec.h>
 #include <linux/blkdev.h>
+#include <linux/backing-dev.h>
 #include <linux/security.h>
 #include <linux/syscalls.h>
 #include <linux/cpuset.h>
-#include "filemap.h"
+#include <linux/hardirq.h> /* for BUG_ON(!in_atomic()) only */
 #include "internal.h"
 
 /*
@@ -63,6 +65,7 @@ generic_file_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
  *    ->private_lock           (__free_pte->__set_page_dirty_buffers)
  *      ->swap_lock            (exclusive_swap_page, others)
  *        ->mapping->tree_lock
+ *          ->zone.lock
  *
  *  ->i_mutex
  *    ->i_mmap_lock            (truncate->unmap_mapping_range)
@@ -121,6 +124,18 @@ void __remove_from_page_cache(struct page *page)
        mapping->nrpages--;
        __dec_zone_page_state(page, NR_FILE_PAGES);
        BUG_ON(page_mapped(page));
+
+       /*
+        * Some filesystems seem to re-dirty the page even after
+        * the VM has canceled the dirty bit (eg ext3 journaling).
+        *
+        * Fix it up by doing a final dirty accounting check after
+        * having removed the page entirely.
+        */
+       if (PageDirty(page) && mapping_cap_account_dirty(mapping)) {
+               dec_zone_page_state(page, NR_FILE_DIRTY);
+               dec_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE);
+       }
 }
 
 void remove_from_page_cache(struct page *page)
@@ -170,6 +185,12 @@ static int sync_page(void *word)
        return 0;
 }
 
+static int sync_page_killable(void *word)
+{
+       sync_page(word);
+       return fatal_signal_pending(current) ? -EINTR : 0;
+}
+
 /**
  * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range
  * @mapping:   address space structure to write
@@ -574,6 +595,14 @@ void fastcall __lock_page(struct page *page)
 }
 EXPORT_SYMBOL(__lock_page);
 
+int fastcall __lock_page_killable(struct page *page)
+{
+       DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
+
+       return __wait_on_bit_lock(page_waitqueue(page), &wait,
+                                       sync_page_killable, TASK_KILLABLE);
+}
+
 /*
  * Variant of lock_page that does not require the caller to hold a reference
  * on the page's mapping.
@@ -839,7 +868,7 @@ static void shrink_readahead_size_eio(struct file *filp,
 /**
  * do_generic_mapping_read - generic file read routine
  * @mapping:   address_space to be read
- * @_ra:       file's readahead state
+ * @ra:                file's readahead state
  * @filp:      the file to read
  * @ppos:      current file position
  * @desc:      read_descriptor
@@ -965,7 +994,8 @@ page_ok:
 
 page_not_up_to_date:
                /* Get exclusive access to the page ... */
-               lock_page(page);
+               if (lock_page_killable(page))
+                       goto readpage_eio;
 
                /* Did it get truncated before we got the lock? */
                if (!page->mapping) {
@@ -993,7 +1023,8 @@ readpage:
                }
 
                if (!PageUptodate(page)) {
-                       lock_page(page);
+                       if (lock_page_killable(page))
+                               goto readpage_eio;
                        if (!PageUptodate(page)) {
                                if (page->mapping == NULL) {
                                        /*
@@ -1004,15 +1035,16 @@ readpage:
                                        goto find_page;
                                }
                                unlock_page(page);
-                               error = -EIO;
                                shrink_readahead_size_eio(filp, ra);
-                               goto readpage_error;
+                               goto readpage_eio;
                        }
                        unlock_page(page);
                }
 
                goto page_ok;
 
+readpage_eio:
+               error = -EIO;
 readpage_error:
                /* UHHUH! A synchronous read error occurred. Report it */
                desc->error = error;
@@ -1297,7 +1329,7 @@ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 
        size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
        if (vmf->pgoff >= size)
-               goto outside_data_content;
+               return VM_FAULT_SIGBUS;
 
        /* If we don't want any read-ahead, don't bother */
        if (VM_RandomReadHint(vma))
@@ -1374,7 +1406,7 @@ retry_find:
        if (unlikely(vmf->pgoff >= size)) {
                unlock_page(page);
                page_cache_release(page);
-               goto outside_data_content;
+               return VM_FAULT_SIGBUS;
        }
 
        /*
@@ -1385,15 +1417,6 @@ retry_find:
        vmf->page = page;
        return ret | VM_FAULT_LOCKED;
 
-outside_data_content:
-       /*
-        * An external ptracer can access pages that normally aren't
-        * accessible..
-        */
-       if (vma->vm_mm == current->mm)
-               return VM_FAULT_SIGBUS;
-
-       /* Fall through to the non-read-ahead case */
 no_cached_page:
        /*
         * We're only likely to ever get here if MADV_RANDOM is in
@@ -1626,17 +1649,22 @@ int __remove_suid(struct dentry *dentry, int kill)
 
 int remove_suid(struct dentry *dentry)
 {
-       int kill = should_remove_suid(dentry);
+       int killsuid = should_remove_suid(dentry);
+       int killpriv = security_inode_need_killpriv(dentry);
+       int error = 0;
 
-       if (unlikely(kill))
-               return __remove_suid(dentry, kill);
+       if (killpriv < 0)
+               return killpriv;
+       if (killpriv)
+               error = security_inode_killpriv(dentry);
+       if (!error && killsuid)
+               error = __remove_suid(dentry, killsuid);
 
-       return 0;
+       return error;
 }
 EXPORT_SYMBOL(remove_suid);
 
-size_t
-__filemap_copy_from_user_iovec_inatomic(char *vaddr,
+static size_t __iovec_copy_from_user_inatomic(char *vaddr,
                        const struct iovec *iov, size_t base, size_t bytes)
 {
        size_t copied = 0, left = 0;
@@ -1658,6 +1686,124 @@ __filemap_copy_from_user_iovec_inatomic(char *vaddr,
        return copied - left;
 }
 
+/*
+ * Copy as much as we can into the page and return the number of bytes which
+ * were sucessfully copied.  If a fault is encountered then return the number of
+ * bytes which were copied.
+ */
+size_t iov_iter_copy_from_user_atomic(struct page *page,
+               struct iov_iter *i, unsigned long offset, size_t bytes)
+{
+       char *kaddr;
+       size_t copied;
+
+       BUG_ON(!in_atomic());
+       kaddr = kmap_atomic(page, KM_USER0);
+       if (likely(i->nr_segs == 1)) {
+               int left;
+               char __user *buf = i->iov->iov_base + i->iov_offset;
+               left = __copy_from_user_inatomic_nocache(kaddr + offset,
+                                                       buf, bytes);
+               copied = bytes - left;
+       } else {
+               copied = __iovec_copy_from_user_inatomic(kaddr + offset,
+                                               i->iov, i->iov_offset, bytes);
+       }
+       kunmap_atomic(kaddr, KM_USER0);
+
+       return copied;
+}
+EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
+
+/*
+ * This has the same sideeffects and return value as
+ * iov_iter_copy_from_user_atomic().
+ * The difference is that it attempts to resolve faults.
+ * Page must not be locked.
+ */
+size_t iov_iter_copy_from_user(struct page *page,
+               struct iov_iter *i, unsigned long offset, size_t bytes)
+{
+       char *kaddr;
+       size_t copied;
+
+       kaddr = kmap(page);
+       if (likely(i->nr_segs == 1)) {
+               int left;
+               char __user *buf = i->iov->iov_base + i->iov_offset;
+               left = __copy_from_user_nocache(kaddr + offset, buf, bytes);
+               copied = bytes - left;
+       } else {
+               copied = __iovec_copy_from_user_inatomic(kaddr + offset,
+                                               i->iov, i->iov_offset, bytes);
+       }
+       kunmap(page);
+       return copied;
+}
+EXPORT_SYMBOL(iov_iter_copy_from_user);
+
+static void __iov_iter_advance_iov(struct iov_iter *i, size_t bytes)
+{
+       if (likely(i->nr_segs == 1)) {
+               i->iov_offset += bytes;
+       } else {
+               const struct iovec *iov = i->iov;
+               size_t base = i->iov_offset;
+
+               while (bytes) {
+                       int copy = min(bytes, iov->iov_len - base);
+
+                       bytes -= copy;
+                       base += copy;
+                       if (iov->iov_len == base) {
+                               iov++;
+                               base = 0;
+                       }
+               }
+               i->iov = iov;
+               i->iov_offset = base;
+       }
+}
+
+void iov_iter_advance(struct iov_iter *i, size_t bytes)
+{
+       BUG_ON(i->count < bytes);
+
+       __iov_iter_advance_iov(i, bytes);
+       i->count -= bytes;
+}
+EXPORT_SYMBOL(iov_iter_advance);
+
+/*
+ * Fault in the first iovec of the given iov_iter, to a maximum length
+ * of bytes. Returns 0 on success, or non-zero if the memory could not be
+ * accessed (ie. because it is an invalid address).
+ *
+ * writev-intensive code may want this to prefault several iovecs -- that
+ * would be possible (callers must not rely on the fact that _only_ the
+ * first iovec will be faulted with the current implementation).
+ */
+int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
+{
+       char __user *buf = i->iov->iov_base + i->iov_offset;
+       bytes = min(bytes, i->iov->iov_len - i->iov_offset);
+       return fault_in_pages_readable(buf, bytes);
+}
+EXPORT_SYMBOL(iov_iter_fault_in_readable);
+
+/*
+ * Return the count of just the current iov_iter segment.
+ */
+size_t iov_iter_single_seg_count(struct iov_iter *i)
+{
+       const struct iovec *iov = i->iov;
+       if (i->nr_segs == 1)
+               return i->count;
+       else
+               return min(i->count, iov->iov_len - i->iov_offset);
+}
+EXPORT_SYMBOL(iov_iter_single_seg_count);
+
 /*
  * Performs necessary checks before doing a write
  *
@@ -1740,6 +1886,91 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
 }
 EXPORT_SYMBOL(generic_write_checks);
 
+int pagecache_write_begin(struct file *file, struct address_space *mapping,
+                               loff_t pos, unsigned len, unsigned flags,
+                               struct page **pagep, void **fsdata)
+{
+       const struct address_space_operations *aops = mapping->a_ops;
+
+       if (aops->write_begin) {
+               return aops->write_begin(file, mapping, pos, len, flags,
+                                                       pagep, fsdata);
+       } else {
+               int ret;
+               pgoff_t index = pos >> PAGE_CACHE_SHIFT;
+               unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
+               struct inode *inode = mapping->host;
+               struct page *page;
+again:
+               page = __grab_cache_page(mapping, index);
+               *pagep = page;
+               if (!page)
+                       return -ENOMEM;
+
+               if (flags & AOP_FLAG_UNINTERRUPTIBLE && !PageUptodate(page)) {
+                       /*
+                        * There is no way to resolve a short write situation
+                        * for a !Uptodate page (except by double copying in
+                        * the caller done by generic_perform_write_2copy).
+                        *
+                        * Instead, we have to bring it uptodate here.
+                        */
+                       ret = aops->readpage(file, page);
+                       page_cache_release(page);
+                       if (ret) {
+                               if (ret == AOP_TRUNCATED_PAGE)
+                                       goto again;
+                               return ret;
+                       }
+                       goto again;
+               }
+
+               ret = aops->prepare_write(file, page, offset, offset+len);
+               if (ret) {
+                       unlock_page(page);
+                       page_cache_release(page);
+                       if (pos + len > inode->i_size)
+                               vmtruncate(inode, inode->i_size);
+               }
+               return ret;
+       }
+}
+EXPORT_SYMBOL(pagecache_write_begin);
+
+int pagecache_write_end(struct file *file, struct address_space *mapping,
+                               loff_t pos, unsigned len, unsigned copied,
+                               struct page *page, void *fsdata)
+{
+       const struct address_space_operations *aops = mapping->a_ops;
+       int ret;
+
+       if (aops->write_end) {
+               mark_page_accessed(page);
+               ret = aops->write_end(file, mapping, pos, len, copied,
+                                                       page, fsdata);
+       } else {
+               unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
+               struct inode *inode = mapping->host;
+
+               flush_dcache_page(page);
+               ret = aops->commit_write(file, page, offset, offset+len);
+               unlock_page(page);
+               mark_page_accessed(page);
+               page_cache_release(page);
+
+               if (ret < 0) {
+                       if (pos + len > inode->i_size)
+                               vmtruncate(inode, inode->i_size);
+               } else if (ret > 0)
+                       ret = min_t(size_t, copied, ret);
+               else
+                       ret = copied;
+       }
+
+       return ret;
+}
+EXPORT_SYMBOL(pagecache_write_end);
+
 ssize_t
 generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
                unsigned long *nr_segs, loff_t pos, loff_t *ppos,
@@ -1783,8 +2014,7 @@ EXPORT_SYMBOL(generic_file_direct_write);
  * Find or create a page at the given pagecache position. Return the locked
  * page. This function is specifically for buffered writes.
  */
-static struct page *__grab_cache_page(struct address_space *mapping,
-                                                       pgoff_t index)
+struct page *__grab_cache_page(struct address_space *mapping, pgoff_t index)
 {
        int status;
        struct page *page;
@@ -1805,41 +2035,29 @@ repeat:
        }
        return page;
 }
+EXPORT_SYMBOL(__grab_cache_page);
 
-ssize_t
-generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
-               unsigned long nr_segs, loff_t pos, loff_t *ppos,
-               size_t count, ssize_t written)
+static ssize_t generic_perform_write_2copy(struct file *file,
+                               struct iov_iter *i, loff_t pos)
 {
-       struct file *file = iocb->ki_filp;
        struct address_space *mapping = file->f_mapping;
        const struct address_space_operations *a_ops = mapping->a_ops;
-       struct inode    *inode = mapping->host;
-       long            status = 0;
-       const struct iovec *cur_iov = iov; /* current iovec */
-       size_t          iov_offset = 0;    /* offset in the current iovec */
-       char __user     *buf;
-
-       /*
-        * handle partial DIO write.  Adjust cur_iov if needed.
-        */
-       filemap_set_next_iovec(&cur_iov, nr_segs, &iov_offset, written);
+       struct inode *inode = mapping->host;
+       long status = 0;
+       ssize_t written = 0;
 
        do {
                struct page *src_page;
                struct page *page;
                pgoff_t index;          /* Pagecache index for current page */
                unsigned long offset;   /* Offset into pagecache page */
-               unsigned long seglen;   /* Bytes remaining in current iovec */
                unsigned long bytes;    /* Bytes to write to page */
                size_t copied;          /* Bytes copied from user */
 
-               buf = cur_iov->iov_base + iov_offset;
                offset = (pos & (PAGE_CACHE_SIZE - 1));
                index = pos >> PAGE_CACHE_SHIFT;
-               bytes = PAGE_CACHE_SIZE - offset;
-               if (bytes > count)
-                       bytes = count;
+               bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset,
+                                               iov_iter_count(i));
 
                /*
                 * a non-NULL src_page indicates that we're doing the
@@ -1847,10 +2065,6 @@ generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
                 */
                src_page = NULL;
 
-               seglen = cur_iov->iov_len - iov_offset;
-               if (seglen > bytes)
-                       seglen = bytes;
-
                /*
                 * Bring in the user page that we will copy from _first_.
                 * Otherwise there's a nasty deadlock on copying from the
@@ -1861,7 +2075,7 @@ generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
                 * to check that the address is actually valid, when atomic
                 * usercopies are used, below.
                 */
-               if (unlikely(fault_in_pages_readable(buf, seglen))) {
+               if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
                        status = -EFAULT;
                        break;
                }
@@ -1877,7 +2091,7 @@ generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
                 * cannot take a pagefault with the destination page locked.
                 * So pin the source page to copy it.
                 */
-               if (!PageUptodate(page)) {
+               if (!PageUptodate(page) && !segment_eq(get_fs(), KERNEL_DS)) {
                        unlock_page(page);
 
                        src_page = alloc_page(GFP_KERNEL);
@@ -1892,8 +2106,8 @@ generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
                         * same reason as we can't take a page fault with a
                         * page locked (as explained below).
                         */
-                       copied = filemap_copy_from_user(src_page, offset,
-                                       cur_iov, nr_segs, iov_offset, bytes);
+                       copied = iov_iter_copy_from_user(src_page, i,
+                                                               offset, bytes);
                        if (unlikely(copied == 0)) {
                                status = -EFAULT;
                                page_cache_release(page);
@@ -1917,7 +2131,6 @@ generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
                                page_cache_release(src_page);
                                continue;
                        }
-
                }
 
                status = a_ops->prepare_write(file, page, offset, offset+bytes);
@@ -1939,8 +2152,8 @@ generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
                         * really matter.
                         */
                        pagefault_disable();
-                       copied = filemap_copy_from_user_atomic(page, offset,
-                                       cur_iov, nr_segs, iov_offset, bytes);
+                       copied = iov_iter_copy_from_user_atomic(page, i,
+                                                               offset, bytes);
                        pagefault_enable();
                } else {
                        void *src, *dst;
@@ -1954,7 +2167,7 @@ generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
                flush_dcache_page(page);
 
                status = a_ops->commit_write(file, page, offset, offset+bytes);
-               if (unlikely(status < 0 || status == AOP_TRUNCATED_PAGE))
+               if (unlikely(status < 0))
                        goto fs_write_aop_error;
                if (unlikely(status > 0)) /* filesystem did partial write */
                        copied = min_t(size_t, copied, status);
@@ -1965,18 +2178,16 @@ generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
                if (src_page)
                        page_cache_release(src_page);
 
-               written += copied;
-               count -= copied;
+               iov_iter_advance(i, copied);
                pos += copied;
-               filemap_set_next_iovec(&cur_iov, nr_segs, &iov_offset, copied);
+               written += copied;
 
                balance_dirty_pages_ratelimited(mapping);
                cond_resched();
                continue;
 
 fs_write_aop_error:
-               if (status != AOP_TRUNCATED_PAGE)
-                       unlock_page(page);
+               unlock_page(page);
                page_cache_release(page);
                if (src_page)
                        page_cache_release(src_page);
@@ -1988,17 +2199,125 @@ fs_write_aop_error:
                 */
                if (pos + bytes > inode->i_size)
                        vmtruncate(inode, inode->i_size);
-               if (status == AOP_TRUNCATED_PAGE)
-                       continue;
-               else
-                       break;
-       } while (count);
-       *ppos = pos;
+               break;
+       } while (iov_iter_count(i));
+
+       return written ? written : status;
+}
+
+static ssize_t generic_perform_write(struct file *file,
+                               struct iov_iter *i, loff_t pos)
+{
+       struct address_space *mapping = file->f_mapping;
+       const struct address_space_operations *a_ops = mapping->a_ops;
+       long status = 0;
+       ssize_t written = 0;
+       unsigned int flags = 0;
 
        /*
-        * For now, when the user asks for O_SYNC, we'll actually give O_DSYNC
+        * Copies from kernel address space cannot fail (NFSD is a big user).
         */
+       if (segment_eq(get_fs(), KERNEL_DS))
+               flags |= AOP_FLAG_UNINTERRUPTIBLE;
+
+       do {
+               struct page *page;
+               pgoff_t index;          /* Pagecache index for current page */
+               unsigned long offset;   /* Offset into pagecache page */
+               unsigned long bytes;    /* Bytes to write to page */
+               size_t copied;          /* Bytes copied from user */
+               void *fsdata;
+
+               offset = (pos & (PAGE_CACHE_SIZE - 1));
+               index = pos >> PAGE_CACHE_SHIFT;
+               bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset,
+                                               iov_iter_count(i));
+
+again:
+
+               /*
+                * Bring in the user page that we will copy from _first_.
+                * Otherwise there's a nasty deadlock on copying from the
+                * same page as we're writing to, without it being marked
+                * up-to-date.
+                *
+                * Not only is this an optimisation, but it is also required
+                * to check that the address is actually valid, when atomic
+                * usercopies are used, below.
+                */
+               if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
+                       status = -EFAULT;
+                       break;
+               }
+
+               status = a_ops->write_begin(file, mapping, pos, bytes, flags,
+                                               &page, &fsdata);
+               if (unlikely(status))
+                       break;
+
+               pagefault_disable();
+               copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
+               pagefault_enable();
+               flush_dcache_page(page);
+
+               status = a_ops->write_end(file, mapping, pos, bytes, copied,
+                                               page, fsdata);
+               if (unlikely(status < 0))
+                       break;
+               copied = status;
+
+               cond_resched();
+
+               if (unlikely(copied == 0)) {
+                       /*
+                        * If we were unable to copy any data at all, we must
+                        * fall back to a single segment length write.
+                        *
+                        * If we didn't fallback here, we could livelock
+                        * because not all segments in the iov can be copied at
+                        * once without a pagefault.
+                        */
+                       bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset,
+                                               iov_iter_single_seg_count(i));
+                       goto again;
+               }
+               iov_iter_advance(i, copied);
+               pos += copied;
+               written += copied;
+
+               balance_dirty_pages_ratelimited(mapping);
+
+       } while (iov_iter_count(i));
+
+       return written ? written : status;
+}
+
+ssize_t
+generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
+               unsigned long nr_segs, loff_t pos, loff_t *ppos,
+               size_t count, ssize_t written)
+{
+       struct file *file = iocb->ki_filp;
+       struct address_space *mapping = file->f_mapping;
+       const struct address_space_operations *a_ops = mapping->a_ops;
+       struct inode *inode = mapping->host;
+       ssize_t status;
+       struct iov_iter i;
+
+       iov_iter_init(&i, iov, nr_segs, count, written);
+       if (a_ops->write_begin)
+               status = generic_perform_write(file, &i, pos);
+       else
+               status = generic_perform_write_2copy(file, &i, pos);
+
        if (likely(status >= 0)) {
+               written += status;
+               *ppos = pos + status;
+
+               /*
+                * For now, when the user asks for O_SYNC, we'll actually give
+                * O_DSYNC
+                */
                if (unlikely((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
                        if (!a_ops->writepage || !is_sync_kiocb(iocb))
                                status = generic_osync_inode(inode, mapping,
@@ -2212,21 +2531,17 @@ generic_file_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
        }
 
        retval = mapping->a_ops->direct_IO(rw, iocb, iov, offset, nr_segs);
-       if (retval)
-               goto out;
 
        /*
         * Finally, try again to invalidate clean pages which might have been
-        * faulted in by get_user_pages() if the source of the write was an
-        * mmap()ed region of the file we're writing.  That's a pretty crazy
-        * thing to do, so we don't support it 100%.  If this invalidation
-        * fails and we have -EIOCBQUEUED we ignore the failure.
+        * cached by non-direct readahead, or faulted in by get_user_pages()
+        * if the source of the write was an mmap'ed region of the file
+        * we're writing.  Either one is a pretty crazy thing to do,
+        * so we don't support it 100%.  If this invalidation
+        * fails, tough, the write still worked...
         */
        if (rw == WRITE && mapping->nrpages) {
-               int err = invalidate_inode_pages2_range(mapping,
-                                             offset >> PAGE_CACHE_SHIFT, end);
-               if (err && retval >= 0)
-                       retval = err;
+               invalidate_inode_pages2_range(mapping, offset >> PAGE_CACHE_SHIFT, end);
        }
 out:
        return retval;