Merge branch 'akpm' (patches from Andrew)
[sfrench/cifs-2.6.git] / mm / filemap.c
index e3b8987153e67d8e4a92fb5781d8e34a9a506bfd..1a6beaf69f49f3d9e58497e5719e71627111072c 100644 (file)
@@ -249,7 +249,7 @@ static void page_cache_free_page(struct address_space *mapping,
                freepage(page);
 
        if (PageTransHuge(page) && !PageHuge(page)) {
-               page_ref_sub(page, HPAGE_PMD_NR);
+               page_ref_sub(page, thp_nr_pages(page));
                VM_BUG_ON_PAGE(page_count(page) <= 0, page);
        } else {
                put_page(page);
@@ -829,13 +829,12 @@ EXPORT_SYMBOL_GPL(replace_page_cache_page);
 
 noinline int __add_to_page_cache_locked(struct page *page,
                                        struct address_space *mapping,
-                                       pgoff_t offset, gfp_t gfp_mask,
+                                       pgoff_t offset, gfp_t gfp,
                                        void **shadowp)
 {
        XA_STATE(xas, &mapping->i_pages, offset);
        int huge = PageHuge(page);
        int error;
-       void *old;
 
        VM_BUG_ON_PAGE(!PageLocked(page), page);
        VM_BUG_ON_PAGE(PageSwapBacked(page), page);
@@ -846,25 +845,46 @@ noinline int __add_to_page_cache_locked(struct page *page,
        page->index = offset;
 
        if (!huge) {
-               error = mem_cgroup_charge(page, current->mm, gfp_mask);
+               error = mem_cgroup_charge(page, current->mm, gfp);
                if (error)
                        goto error;
        }
 
+       gfp &= GFP_RECLAIM_MASK;
+
        do {
+               unsigned int order = xa_get_order(xas.xa, xas.xa_index);
+               void *entry, *old = NULL;
+
+               if (order > thp_order(page))
+                       xas_split_alloc(&xas, xa_load(xas.xa, xas.xa_index),
+                                       order, gfp);
                xas_lock_irq(&xas);
-               old = xas_load(&xas);
-               if (old && !xa_is_value(old))
-                       xas_set_err(&xas, -EEXIST);
+               xas_for_each_conflict(&xas, entry) {
+                       old = entry;
+                       if (!xa_is_value(entry)) {
+                               xas_set_err(&xas, -EEXIST);
+                               goto unlock;
+                       }
+               }
+
+               if (old) {
+                       if (shadowp)
+                               *shadowp = old;
+                       /* entry may have been split before we acquired lock */
+                       order = xa_get_order(xas.xa, xas.xa_index);
+                       if (order > thp_order(page)) {
+                               xas_split(&xas, old, order);
+                               xas_reset(&xas);
+                       }
+               }
+
                xas_store(&xas, page);
                if (xas_error(&xas))
                        goto unlock;
 
-               if (xa_is_value(old)) {
+               if (old)
                        mapping->nrexceptional--;
-                       if (shadowp)
-                               *shadowp = old;
-               }
                mapping->nrpages++;
 
                /* hugetlb pages do not participate in page cache accounting */
@@ -872,7 +892,7 @@ noinline int __add_to_page_cache_locked(struct page *page,
                        __inc_lruvec_page_state(page, NR_FILE_PAGES);
 unlock:
                xas_unlock_irq(&xas);
-       } while (xas_nomem(&xas, gfp_mask & GFP_RECLAIM_MASK));
+       } while (xas_nomem(&xas, gfp));
 
        if (xas_error(&xas)) {
                error = xas_error(&xas);
@@ -1425,7 +1445,7 @@ static inline bool clear_bit_unlock_is_negative_byte(long nr, volatile void *mem
  * unlock_page - unlock a locked page
  * @page: the page
  *
- * Unlocks the page and wakes up sleepers in ___wait_on_page_locked().
+ * Unlocks the page and wakes up sleepers in wait_on_page_locked().
  * Also wakes sleepers in wait_on_page_writeback() because the wakeup
  * mechanism between PageLocked pages and PageWriteback pages is shared.
  * But that's OK - sleepers in wait_on_page_writeback() just go back to sleep.
@@ -2568,8 +2588,8 @@ static struct file *do_sync_mmap_readahead(struct vm_fault *vmf)
        struct file *file = vmf->vma->vm_file;
        struct file_ra_state *ra = &file->f_ra;
        struct address_space *mapping = file->f_mapping;
+       DEFINE_READAHEAD(ractl, file, mapping, vmf->pgoff);
        struct file *fpin = NULL;
-       pgoff_t offset = vmf->pgoff;
        unsigned int mmap_miss;
 
        /* If we don't want any read-ahead, don't bother */
@@ -2580,8 +2600,7 @@ static struct file *do_sync_mmap_readahead(struct vm_fault *vmf)
 
        if (vmf->vma->vm_flags & VM_SEQ_READ) {
                fpin = maybe_unlock_mmap_for_io(vmf, fpin);
-               page_cache_sync_readahead(mapping, ra, file, offset,
-                                         ra->ra_pages);
+               page_cache_sync_ra(&ractl, ra, ra->ra_pages);
                return fpin;
        }
 
@@ -2601,10 +2620,11 @@ static struct file *do_sync_mmap_readahead(struct vm_fault *vmf)
         * mmap read-around
         */
        fpin = maybe_unlock_mmap_for_io(vmf, fpin);
-       ra->start = max_t(long, 0, offset - ra->ra_pages / 2);
+       ra->start = max_t(long, 0, vmf->pgoff - ra->ra_pages / 2);
        ra->size = ra->ra_pages;
        ra->async_size = ra->ra_pages / 4;
-       ra_submit(ra, mapping, file);
+       ractl._index = ra->start;
+       do_page_cache_ra(&ractl, ra->size, ra->async_size);
        return fpin;
 }
 
@@ -2984,7 +3004,7 @@ filler:
                goto out;
 
        /*
-        * Page is not up to date and may be locked due one of the following
+        * Page is not up to date and may be locked due to one of the following
         * case a: Page is being filled and the page lock is held
         * case b: Read/write error clearing the page uptodate status
         * case c: Truncation in progress (page locked)