mm/rmap.c: remove redundant variable cend
[sfrench/cifs-2.6.git] / mm / rmap.c
index c570f82e6827153316465b9e18f0fca376a1c1a1..6b5a0f219ac0821f9298e6ae6fcd266762ddb3b5 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -63,6 +63,7 @@
 #include <linux/hugetlb.h>
 #include <linux/backing-dev.h>
 #include <linux/page_idle.h>
+#include <linux/memremap.h>
 
 #include <asm/tlbflush.h>
 
@@ -390,7 +391,7 @@ void unlink_anon_vmas(struct vm_area_struct *vma)
                 * Leave empty anon_vmas on the list - we'll need
                 * to free them outside the lock.
                 */
-               if (RB_EMPTY_ROOT(&anon_vma->rb_root)) {
+               if (RB_EMPTY_ROOT(&anon_vma->rb_root.rb_root)) {
                        anon_vma->parent->degree--;
                        continue;
                }
@@ -424,7 +425,7 @@ static void anon_vma_ctor(void *data)
 
        init_rwsem(&anon_vma->rwsem);
        atomic_set(&anon_vma->refcount, 0);
-       anon_vma->rb_root = RB_ROOT;
+       anon_vma->rb_root = RB_ROOT_CACHED;
 }
 
 void __init anon_vma_init(void)
@@ -898,7 +899,7 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
        mmu_notifier_invalidate_range_start(vma->vm_mm, start, end);
 
        while (page_vma_mapped_walk(&pvmw)) {
-               unsigned long cstart, cend;
+               unsigned long cstart;
                int ret = 0;
 
                cstart = address = pvmw.address;
@@ -914,7 +915,6 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
                        entry = pte_wrprotect(entry);
                        entry = pte_mkclean(entry);
                        set_pte_at(vma->vm_mm, address, pte, entry);
-                       cend = cstart + PAGE_SIZE;
                        ret = 1;
                } else {
 #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
@@ -930,7 +930,6 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
                        entry = pmd_mkclean(entry);
                        set_pmd_at(vma->vm_mm, address, pmd, entry);
                        cstart &= PMD_MASK;
-                       cend = cstart + PMD_SIZE;
                        ret = 1;
 #else
                        /* unexpected pmd-mapped page? */
@@ -938,10 +937,15 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
 #endif
                }
 
-               if (ret) {
-                       mmu_notifier_invalidate_range(vma->vm_mm, cstart, cend);
+               /*
+                * No need to call mmu_notifier_invalidate_range() as we are
+                * downgrading page table protection not changing it to point
+                * to a new page.
+                *
+                * See Documentation/vm/mmu_notifier.txt
+                */
+               if (ret)
                        (*cleaned)++;
-               }
        }
 
        mmu_notifier_invalidate_range_end(vma->vm_mm, start, end);
@@ -1346,9 +1350,13 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
        if ((flags & TTU_MUNLOCK) && !(vma->vm_flags & VM_LOCKED))
                return true;
 
+       if (IS_ENABLED(CONFIG_MIGRATION) && (flags & TTU_MIGRATION) &&
+           is_zone_device_page(page) && !is_device_private_page(page))
+               return true;
+
        if (flags & TTU_SPLIT_HUGE_PMD) {
                split_huge_pmd_address(vma, address,
-                               flags & TTU_MIGRATION, page);
+                               flags & TTU_SPLIT_FREEZE, page);
        }
 
        /*
@@ -1360,6 +1368,19 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
        mmu_notifier_invalidate_range_start(vma->vm_mm, start, end);
 
        while (page_vma_mapped_walk(&pvmw)) {
+#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
+               /* PMD-mapped THP migration entry */
+               if (!pvmw.pte && (flags & TTU_MIGRATION)) {
+                       VM_BUG_ON_PAGE(PageHuge(page) || !PageTransCompound(page), page);
+
+                       if (!PageAnon(page))
+                               continue;
+
+                       set_pmd_migration_entry(&pvmw, page);
+                       continue;
+               }
+#endif
+
                /*
                 * If the page is mlock()d, we cannot swap it out.
                 * If it's recently referenced (perhaps page_referenced
@@ -1390,6 +1411,31 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
                address = pvmw.address;
 
 
+               if (IS_ENABLED(CONFIG_MIGRATION) &&
+                   (flags & TTU_MIGRATION) &&
+                   is_zone_device_page(page)) {
+                       swp_entry_t entry;
+                       pte_t swp_pte;
+
+                       pteval = ptep_get_and_clear(mm, pvmw.address, pvmw.pte);
+
+                       /*
+                        * Store the pfn of the page in a special migration
+                        * pte. do_swap_page() will wait until the migration
+                        * pte is removed and then restart fault handling.
+                        */
+                       entry = make_migration_entry(page, 0);
+                       swp_pte = swp_entry_to_pte(entry);
+                       if (pte_soft_dirty(pteval))
+                               swp_pte = pte_swp_mksoft_dirty(swp_pte);
+                       set_pte_at(mm, pvmw.address, pvmw.pte, swp_pte);
+                       /*
+                        * No need to invalidate here it will synchronize on
+                        * against the special swap migration pte.
+                        */
+                       goto discard;
+               }
+
                if (!(flags & TTU_IGNORE_ACCESS)) {
                        if (ptep_clear_flush_young_notify(vma, address,
                                                pvmw.pte)) {
@@ -1444,8 +1490,11 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
                         * will take care of the rest.
                         */
                        dec_mm_counter(mm, mm_counter(page));
+                       /* We have to invalidate as we cleared the pte */
+                       mmu_notifier_invalidate_range(mm, address,
+                                                     address + PAGE_SIZE);
                } else if (IS_ENABLED(CONFIG_MIGRATION) &&
-                               (flags & TTU_MIGRATION)) {
+                               (flags & (TTU_MIGRATION|TTU_SPLIT_FREEZE))) {
                        swp_entry_t entry;
                        pte_t swp_pte;
                        /*
@@ -1459,6 +1508,10 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
                        if (pte_soft_dirty(pteval))
                                swp_pte = pte_swp_mksoft_dirty(swp_pte);
                        set_pte_at(mm, address, pvmw.pte, swp_pte);
+                       /*
+                        * No need to invalidate here it will synchronize on
+                        * against the special swap migration pte.
+                        */
                } else if (PageAnon(page)) {
                        swp_entry_t entry = { .val = page_private(subpage) };
                        pte_t swp_pte;
@@ -1470,6 +1523,8 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
                                WARN_ON_ONCE(1);
                                ret = false;
                                /* We have to invalidate as we cleared the pte */
+                               mmu_notifier_invalidate_range(mm, address,
+                                                       address + PAGE_SIZE);
                                page_vma_mapped_walk_done(&pvmw);
                                break;
                        }
@@ -1477,6 +1532,9 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
                        /* MADV_FREE page check */
                        if (!PageSwapBacked(page)) {
                                if (!PageDirty(page)) {
+                                       /* Invalidate as we cleared the pte */
+                                       mmu_notifier_invalidate_range(mm,
+                                               address, address + PAGE_SIZE);
                                        dec_mm_counter(mm, MM_ANONPAGES);
                                        goto discard;
                                }
@@ -1510,13 +1568,39 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
                        if (pte_soft_dirty(pteval))
                                swp_pte = pte_swp_mksoft_dirty(swp_pte);
                        set_pte_at(mm, address, pvmw.pte, swp_pte);
-               } else
+                       /* Invalidate as we cleared the pte */
+                       mmu_notifier_invalidate_range(mm, address,
+                                                     address + PAGE_SIZE);
+               } else {
+                       /*
+                        * We should not need to notify here as we reach this
+                        * case only from freeze_page() itself only call from
+                        * split_huge_page_to_list() so everything below must
+                        * be true:
+                        *   - page is not anonymous
+                        *   - page is locked
+                        *
+                        * So as it is a locked file back page thus it can not
+                        * be remove from the page cache and replace by a new
+                        * page before mmu_notifier_invalidate_range_end so no
+                        * concurrent thread might update its page table to
+                        * point at new page while a device still is using this
+                        * page.
+                        *
+                        * See Documentation/vm/mmu_notifier.txt
+                        */
                        dec_mm_counter(mm, mm_counter_file(page));
+               }
 discard:
+               /*
+                * No need to call mmu_notifier_invalidate_range() it has be
+                * done above for all cases requiring it to happen under page
+                * table lock before mmu_notifier_invalidate_range_end()
+                *
+                * See Documentation/vm/mmu_notifier.txt
+                */
                page_remove_rmap(subpage, PageHuge(page));
                put_page(page);
-               mmu_notifier_invalidate_range(mm, address,
-                                             address + PAGE_SIZE);
        }
 
        mmu_notifier_invalidate_range_end(vma->vm_mm, start, end);
@@ -1575,7 +1659,8 @@ bool try_to_unmap(struct page *page, enum ttu_flags flags)
         * locking requirements of exec(), migration skips
         * temporary VMAs until after exec() completes.
         */
-       if ((flags & TTU_MIGRATION) && !PageKsm(page) && PageAnon(page))
+       if ((flags & (TTU_MIGRATION|TTU_SPLIT_FREEZE))
+           && !PageKsm(page) && PageAnon(page))
                rwc.invalid_vma = invalid_migration_vma;
 
        if (flags & TTU_RMAP_LOCKED)