Merge git://git.kernel.org/pub/scm/linux/kernel/git/rw/uml
[sfrench/cifs-2.6.git] / mm / filemap.c
index 693f62212a59a704dec99516d1d8a975b59bc7ee..ab77e19ab09c0f447e932c1d63c1261b481289e0 100644 (file)
@@ -66,7 +66,7 @@
  *  ->i_mmap_rwsem             (truncate_pagecache)
  *    ->private_lock           (__free_pte->__set_page_dirty_buffers)
  *      ->swap_lock            (exclusive_swap_page, others)
- *        ->mapping->tree_lock
+ *        ->i_pages lock
  *
  *  ->i_mutex
  *    ->i_mmap_rwsem           (truncate->unmap_mapping_range)
@@ -74,7 +74,7 @@
  *  ->mmap_sem
  *    ->i_mmap_rwsem
  *      ->page_table_lock or pte_lock  (various, mainly in memory.c)
- *        ->mapping->tree_lock (arch-dependent flush_dcache_mmap_lock)
+ *        ->i_pages lock       (arch-dependent flush_dcache_mmap_lock)
  *
  *  ->mmap_sem
  *    ->lock_page              (access_process_vm)
@@ -84,7 +84,7 @@
  *
  *  bdi->wb.list_lock
  *    sb_lock                  (fs/fs-writeback.c)
- *    ->mapping->tree_lock     (__sync_single_inode)
+ *    ->i_pages lock           (__sync_single_inode)
  *
  *  ->i_mmap_rwsem
  *    ->anon_vma.lock          (vma_adjust)
  *  ->page_table_lock or pte_lock
  *    ->swap_lock              (try_to_unmap_one)
  *    ->private_lock           (try_to_unmap_one)
- *    ->tree_lock              (try_to_unmap_one)
+ *    ->i_pages lock           (try_to_unmap_one)
  *    ->zone_lru_lock(zone)    (follow_page->mark_page_accessed)
  *    ->zone_lru_lock(zone)    (check_pte_range->isolate_lru_page)
  *    ->private_lock           (page_remove_rmap->set_page_dirty)
- *    ->tree_lock              (page_remove_rmap->set_page_dirty)
+ *    ->i_pages lock           (page_remove_rmap->set_page_dirty)
  *    bdi.wb->list_lock                (page_remove_rmap->set_page_dirty)
  *    ->inode->i_lock          (page_remove_rmap->set_page_dirty)
  *    ->memcg->move_lock       (page_remove_rmap->lock_page_memcg)
@@ -118,14 +118,15 @@ static int page_cache_tree_insert(struct address_space *mapping,
        void **slot;
        int error;
 
-       error = __radix_tree_create(&mapping->page_tree, page->index, 0,
+       error = __radix_tree_create(&mapping->i_pages, page->index, 0,
                                    &node, &slot);
        if (error)
                return error;
        if (*slot) {
                void *p;
 
-               p = radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
+               p = radix_tree_deref_slot_protected(slot,
+                                                   &mapping->i_pages.xa_lock);
                if (!radix_tree_exceptional_entry(p))
                        return -EEXIST;
 
@@ -133,7 +134,7 @@ static int page_cache_tree_insert(struct address_space *mapping,
                if (shadowp)
                        *shadowp = p;
        }
-       __radix_tree_replace(&mapping->page_tree, node, slot, page,
+       __radix_tree_replace(&mapping->i_pages, node, slot, page,
                             workingset_lookup_update(mapping));
        mapping->nrpages++;
        return 0;
@@ -155,13 +156,13 @@ static void page_cache_tree_delete(struct address_space *mapping,
                struct radix_tree_node *node;
                void **slot;
 
-               __radix_tree_lookup(&mapping->page_tree, page->index + i,
+               __radix_tree_lookup(&mapping->i_pages, page->index + i,
                                    &node, &slot);
 
                VM_BUG_ON_PAGE(!node && nr != 1, page);
 
-               radix_tree_clear_tags(&mapping->page_tree, node, slot);
-               __radix_tree_replace(&mapping->page_tree, node, slot, shadow,
+               radix_tree_clear_tags(&mapping->i_pages, node, slot);
+               __radix_tree_replace(&mapping->i_pages, node, slot, shadow,
                                workingset_lookup_update(mapping));
        }
 
@@ -253,7 +254,7 @@ static void unaccount_page_cache_page(struct address_space *mapping,
 /*
  * Delete a page from the page cache and free it. Caller has to make
  * sure the page is locked and that nobody else uses it - or that usage
- * is safe.  The caller must hold the mapping's tree_lock.
+ * is safe.  The caller must hold the i_pages lock.
  */
 void __delete_from_page_cache(struct page *page, void *shadow)
 {
@@ -296,9 +297,9 @@ void delete_from_page_cache(struct page *page)
        unsigned long flags;
 
        BUG_ON(!PageLocked(page));
-       spin_lock_irqsave(&mapping->tree_lock, flags);
+       xa_lock_irqsave(&mapping->i_pages, flags);
        __delete_from_page_cache(page, NULL);
-       spin_unlock_irqrestore(&mapping->tree_lock, flags);
+       xa_unlock_irqrestore(&mapping->i_pages, flags);
 
        page_cache_free_page(mapping, page);
 }
@@ -309,14 +310,14 @@ EXPORT_SYMBOL(delete_from_page_cache);
  * @mapping: the mapping to which pages belong
  * @pvec: pagevec with pages to delete
  *
- * The function walks over mapping->page_tree and removes pages passed in @pvec
- * from the radix tree. The function expects @pvec to be sorted by page index.
- * It tolerates holes in @pvec (radix tree entries at those indices are not
+ * The function walks over mapping->i_pages and removes pages passed in @pvec
+ * from the mapping. The function expects @pvec to be sorted by page index.
+ * It tolerates holes in @pvec (mapping entries at those indices are not
  * modified). The function expects only THP head pages to be present in the
- * @pvec and takes care to delete all corresponding tail pages from the radix
- * tree as well.
+ * @pvec and takes care to delete all corresponding tail pages from the
+ * mapping as well.
  *
- * The function expects mapping->tree_lock to be held.
+ * The function expects the i_pages lock to be held.
  */
 static void
 page_cache_tree_delete_batch(struct address_space *mapping,
@@ -330,11 +331,11 @@ page_cache_tree_delete_batch(struct address_space *mapping,
        pgoff_t start;
 
        start = pvec->pages[0]->index;
-       radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) {
+       radix_tree_for_each_slot(slot, &mapping->i_pages, &iter, start) {
                if (i >= pagevec_count(pvec) && !tail_pages)
                        break;
                page = radix_tree_deref_slot_protected(slot,
-                                                      &mapping->tree_lock);
+                                                      &mapping->i_pages.xa_lock);
                if (radix_tree_exceptional_entry(page))
                        continue;
                if (!tail_pages) {
@@ -357,8 +358,8 @@ page_cache_tree_delete_batch(struct address_space *mapping,
                } else {
                        tail_pages--;
                }
-               radix_tree_clear_tags(&mapping->page_tree, iter.node, slot);
-               __radix_tree_replace(&mapping->page_tree, iter.node, slot, NULL,
+               radix_tree_clear_tags(&mapping->i_pages, iter.node, slot);
+               __radix_tree_replace(&mapping->i_pages, iter.node, slot, NULL,
                                workingset_lookup_update(mapping));
                total_pages++;
        }
@@ -374,14 +375,14 @@ void delete_from_page_cache_batch(struct address_space *mapping,
        if (!pagevec_count(pvec))
                return;
 
-       spin_lock_irqsave(&mapping->tree_lock, flags);
+       xa_lock_irqsave(&mapping->i_pages, flags);
        for (i = 0; i < pagevec_count(pvec); i++) {
                trace_mm_filemap_delete_from_page_cache(pvec->pages[i]);
 
                unaccount_page_cache_page(mapping, pvec->pages[i]);
        }
        page_cache_tree_delete_batch(mapping, pvec);
-       spin_unlock_irqrestore(&mapping->tree_lock, flags);
+       xa_unlock_irqrestore(&mapping->i_pages, flags);
 
        for (i = 0; i < pagevec_count(pvec); i++)
                page_cache_free_page(mapping, pvec->pages[i]);
@@ -798,7 +799,7 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
                new->mapping = mapping;
                new->index = offset;
 
-               spin_lock_irqsave(&mapping->tree_lock, flags);
+               xa_lock_irqsave(&mapping->i_pages, flags);
                __delete_from_page_cache(old, NULL);
                error = page_cache_tree_insert(mapping, new, NULL);
                BUG_ON(error);
@@ -810,7 +811,7 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
                        __inc_node_page_state(new, NR_FILE_PAGES);
                if (PageSwapBacked(new))
                        __inc_node_page_state(new, NR_SHMEM);
-               spin_unlock_irqrestore(&mapping->tree_lock, flags);
+               xa_unlock_irqrestore(&mapping->i_pages, flags);
                mem_cgroup_migrate(old, new);
                radix_tree_preload_end();
                if (freepage)
@@ -852,7 +853,7 @@ static int __add_to_page_cache_locked(struct page *page,
        page->mapping = mapping;
        page->index = offset;
 
-       spin_lock_irq(&mapping->tree_lock);
+       xa_lock_irq(&mapping->i_pages);
        error = page_cache_tree_insert(mapping, page, shadowp);
        radix_tree_preload_end();
        if (unlikely(error))
@@ -861,7 +862,7 @@ static int __add_to_page_cache_locked(struct page *page,
        /* hugetlb pages do not participate in page cache accounting. */
        if (!huge)
                __inc_node_page_state(page, NR_FILE_PAGES);
-       spin_unlock_irq(&mapping->tree_lock);
+       xa_unlock_irq(&mapping->i_pages);
        if (!huge)
                mem_cgroup_commit_charge(page, memcg, false, false);
        trace_mm_filemap_add_to_page_cache(page);
@@ -869,7 +870,7 @@ static int __add_to_page_cache_locked(struct page *page,
 err_insert:
        page->mapping = NULL;
        /* Leave page->index set: truncation relies upon it */
-       spin_unlock_irq(&mapping->tree_lock);
+       xa_unlock_irq(&mapping->i_pages);
        if (!huge)
                mem_cgroup_cancel_charge(page, memcg, false);
        put_page(page);
@@ -1353,7 +1354,7 @@ pgoff_t page_cache_next_hole(struct address_space *mapping,
        for (i = 0; i < max_scan; i++) {
                struct page *page;
 
-               page = radix_tree_lookup(&mapping->page_tree, index);
+               page = radix_tree_lookup(&mapping->i_pages, index);
                if (!page || radix_tree_exceptional_entry(page))
                        break;
                index++;
@@ -1394,7 +1395,7 @@ pgoff_t page_cache_prev_hole(struct address_space *mapping,
        for (i = 0; i < max_scan; i++) {
                struct page *page;
 
-               page = radix_tree_lookup(&mapping->page_tree, index);
+               page = radix_tree_lookup(&mapping->i_pages, index);
                if (!page || radix_tree_exceptional_entry(page))
                        break;
                index--;
@@ -1427,7 +1428,7 @@ struct page *find_get_entry(struct address_space *mapping, pgoff_t offset)
        rcu_read_lock();
 repeat:
        page = NULL;
-       pagep = radix_tree_lookup_slot(&mapping->page_tree, offset);
+       pagep = radix_tree_lookup_slot(&mapping->i_pages, offset);
        if (pagep) {
                page = radix_tree_deref_slot(pagep);
                if (unlikely(!page))
@@ -1633,7 +1634,7 @@ unsigned find_get_entries(struct address_space *mapping,
                return 0;
 
        rcu_read_lock();
-       radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) {
+       radix_tree_for_each_slot(slot, &mapping->i_pages, &iter, start) {
                struct page *head, *page;
 repeat:
                page = radix_tree_deref_slot(slot);
@@ -1710,7 +1711,7 @@ unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start,
                return 0;
 
        rcu_read_lock();
-       radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, *start) {
+       radix_tree_for_each_slot(slot, &mapping->i_pages, &iter, *start) {
                struct page *head, *page;
 
                if (iter.index > end)
@@ -1795,7 +1796,7 @@ unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index,
                return 0;
 
        rcu_read_lock();
-       radix_tree_for_each_contig(slot, &mapping->page_tree, &iter, index) {
+       radix_tree_for_each_contig(slot, &mapping->i_pages, &iter, index) {
                struct page *head, *page;
 repeat:
                page = radix_tree_deref_slot(slot);
@@ -1875,8 +1876,7 @@ unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index,
                return 0;
 
        rcu_read_lock();
-       radix_tree_for_each_tagged(slot, &mapping->page_tree,
-                                  &iter, *index, tag) {
+       radix_tree_for_each_tagged(slot, &mapping->i_pages, &iter, *index, tag) {
                struct page *head, *page;
 
                if (iter.index > end)
@@ -1969,8 +1969,7 @@ unsigned find_get_entries_tag(struct address_space *mapping, pgoff_t start,
                return 0;
 
        rcu_read_lock();
-       radix_tree_for_each_tagged(slot, &mapping->page_tree,
-                                  &iter, start, tag) {
+       radix_tree_for_each_tagged(slot, &mapping->i_pages, &iter, start, tag) {
                struct page *head, *page;
 repeat:
                page = radix_tree_deref_slot(slot);
@@ -2624,8 +2623,7 @@ void filemap_map_pages(struct vm_fault *vmf,
        struct page *head, *page;
 
        rcu_read_lock();
-       radix_tree_for_each_slot(slot, &mapping->page_tree, &iter,
-                       start_pgoff) {
+       radix_tree_for_each_slot(slot, &mapping->i_pages, &iter, start_pgoff) {
                if (iter.index > end_pgoff)
                        break;
 repeat: