mm: move clearing of page->mapping to page_cache_tree_delete()
[sfrench/cifs-2.6.git] / mm / filemap.c
index 594d73fef8b43bae852f4f7ace1e8cfc46b23690..c649624d386c17281e9e7a6ebe58b343a2fc0163 100644 (file)
@@ -165,6 +165,9 @@ static void page_cache_tree_delete(struct address_space *mapping,
                                     workingset_update_node, mapping);
        }
 
+       page->mapping = NULL;
+       /* Leave page->index set: truncation lookup relies upon it */
+
        if (shadow) {
                mapping->nrexceptional += nr;
                /*
@@ -224,34 +227,49 @@ void __delete_from_page_cache(struct page *page, void *shadow)
                }
        }
 
+       /* hugetlb pages do not participate in page cache accounting. */
+       if (!PageHuge(page)) {
+               __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, -nr);
+               if (PageSwapBacked(page)) {
+                       __mod_node_page_state(page_pgdat(page), NR_SHMEM, -nr);
+                       if (PageTransHuge(page))
+                               __dec_node_page_state(page, NR_SHMEM_THPS);
+               } else {
+                       VM_BUG_ON_PAGE(PageTransHuge(page), page);
+               }
+
+               /*
+                * At this point page must be either written or cleaned by
+                * truncate.  Dirty page here signals a bug and loss of
+                * unwritten data.
+                *
+                * This fixes dirty accounting after removing the page entirely
+                * but leaves PageDirty set: it has no effect for truncated
+                * page and anyway will be cleared before returning page into
+                * buddy allocator.
+                */
+               if (WARN_ON_ONCE(PageDirty(page)))
+                       account_page_cleaned(page, mapping,
+                                            inode_to_wb(mapping->host));
+       }
        page_cache_tree_delete(mapping, page, shadow);
+}
 
-       page->mapping = NULL;
-       /* Leave page->index set: truncation lookup relies upon it */
+static void page_cache_free_page(struct address_space *mapping,
+                               struct page *page)
+{
+       void (*freepage)(struct page *);
 
-       /* hugetlb pages do not participate in page cache accounting. */
-       if (PageHuge(page))
-               return;
+       freepage = mapping->a_ops->freepage;
+       if (freepage)
+               freepage(page);
 
-       __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, -nr);
-       if (PageSwapBacked(page)) {
-               __mod_node_page_state(page_pgdat(page), NR_SHMEM, -nr);
-               if (PageTransHuge(page))
-                       __dec_node_page_state(page, NR_SHMEM_THPS);
+       if (PageTransHuge(page) && !PageHuge(page)) {
+               page_ref_sub(page, HPAGE_PMD_NR);
+               VM_BUG_ON_PAGE(page_count(page) <= 0, page);
        } else {
-               VM_BUG_ON_PAGE(PageTransHuge(page), page);
+               put_page(page);
        }
-
-       /*
-        * At this point page must be either written or cleaned by truncate.
-        * Dirty page here signals a bug and loss of unwritten data.
-        *
-        * This fixes dirty accounting after removing the page entirely but
-        * leaves PageDirty set: it has no effect for truncated page and
-        * anyway will be cleared before returning page into buddy allocator.
-        */
-       if (WARN_ON_ONCE(PageDirty(page)))
-               account_page_cleaned(page, mapping, inode_to_wb(mapping->host));
 }
 
 /**
@@ -266,25 +284,13 @@ void delete_from_page_cache(struct page *page)
 {
        struct address_space *mapping = page_mapping(page);
        unsigned long flags;
-       void (*freepage)(struct page *);
 
        BUG_ON(!PageLocked(page));
-
-       freepage = mapping->a_ops->freepage;
-
        spin_lock_irqsave(&mapping->tree_lock, flags);
        __delete_from_page_cache(page, NULL);
        spin_unlock_irqrestore(&mapping->tree_lock, flags);
 
-       if (freepage)
-               freepage(page);
-
-       if (PageTransHuge(page) && !PageHuge(page)) {
-               page_ref_sub(page, HPAGE_PMD_NR);
-               VM_BUG_ON_PAGE(page_count(page) <= 0, page);
-       } else {
-               put_page(page);
-       }
+       page_cache_free_page(mapping, page);
 }
 EXPORT_SYMBOL(delete_from_page_cache);
 
@@ -420,19 +426,17 @@ static void __filemap_fdatawait_range(struct address_space *mapping,
                return;
 
        pagevec_init(&pvec, 0);
-       while ((index <= end) &&
-                       (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
-                       PAGECACHE_TAG_WRITEBACK,
-                       min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1)) != 0) {
+       while (index <= end) {
                unsigned i;
 
+               nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index,
+                               end, PAGECACHE_TAG_WRITEBACK);
+               if (!nr_pages)
+                       break;
+
                for (i = 0; i < nr_pages; i++) {
                        struct page *page = pvec.pages[i];
 
-                       /* until radix tree lookup accepts end_index */
-                       if (page->index > end)
-                               continue;
-
                        wait_on_page_writeback(page);
                        ClearPageError(page);
                }
@@ -1754,9 +1758,10 @@ repeat:
 EXPORT_SYMBOL(find_get_pages_contig);
 
 /**
- * find_get_pages_tag - find and return pages that match @tag
+ * find_get_pages_range_tag - find and return pages in given range matching @tag
  * @mapping:   the address_space to search
  * @index:     the starting page index
+ * @end:       The final page index (inclusive)
  * @tag:       the tag index
  * @nr_pages:  the maximum number of pages
  * @pages:     where the resulting pages are placed
@@ -1764,8 +1769,9 @@ EXPORT_SYMBOL(find_get_pages_contig);
  * Like find_get_pages, except we only return pages which are tagged with
  * @tag.   We update @index to index the next page for the traversal.
  */
-unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
-                       int tag, unsigned int nr_pages, struct page **pages)
+unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index,
+                       pgoff_t end, int tag, unsigned int nr_pages,
+                       struct page **pages)
 {
        struct radix_tree_iter iter;
        void **slot;
@@ -1778,6 +1784,9 @@ unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
        radix_tree_for_each_tagged(slot, &mapping->page_tree,
                                   &iter, *index, tag) {
                struct page *head, *page;
+
+               if (iter.index > end)
+                       break;
 repeat:
                page = radix_tree_deref_slot(slot);
                if (unlikely(!page))
@@ -1819,18 +1828,28 @@ repeat:
                }
 
                pages[ret] = page;
-               if (++ret == nr_pages)
-                       break;
+               if (++ret == nr_pages) {
+                       *index = pages[ret - 1]->index + 1;
+                       goto out;
+               }
        }
 
+       /*
+        * We come here when we got at @end. We take care to not overflow the
+        * index @index as it confuses some of the callers. This breaks the
+        * iteration when there is page at index -1 but that is already broken
+        * anyway.
+        */
+       if (end == (pgoff_t)-1)
+               *index = (pgoff_t)-1;
+       else
+               *index = end + 1;
+out:
        rcu_read_unlock();
 
-       if (ret)
-               *index = pages[ret - 1]->index + 1;
-
        return ret;
 }
-EXPORT_SYMBOL(find_get_pages_tag);
+EXPORT_SYMBOL(find_get_pages_range_tag);
 
 /**
  * find_get_entries_tag - find and return entries that match @tag