mm/rmap.c: remove redundant variable cend
[sfrench/cifs-2.6.git] / mm / rmap.c
index b874c4761e8422829610d9a1173c56139db5823b..6b5a0f219ac0821f9298e6ae6fcd266762ddb3b5 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -899,7 +899,7 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
        mmu_notifier_invalidate_range_start(vma->vm_mm, start, end);
 
        while (page_vma_mapped_walk(&pvmw)) {
-               unsigned long cstart, cend;
+               unsigned long cstart;
                int ret = 0;
 
                cstart = address = pvmw.address;
@@ -915,7 +915,6 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
                        entry = pte_wrprotect(entry);
                        entry = pte_mkclean(entry);
                        set_pte_at(vma->vm_mm, address, pte, entry);
-                       cend = cstart + PAGE_SIZE;
                        ret = 1;
                } else {
 #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
@@ -931,7 +930,6 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
                        entry = pmd_mkclean(entry);
                        set_pmd_at(vma->vm_mm, address, pmd, entry);
                        cstart &= PMD_MASK;
-                       cend = cstart + PMD_SIZE;
                        ret = 1;
 #else
                        /* unexpected pmd-mapped page? */
@@ -939,10 +937,15 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
 #endif
                }
 
-               if (ret) {
-                       mmu_notifier_invalidate_range(vma->vm_mm, cstart, cend);
+               /*
+                * No need to call mmu_notifier_invalidate_range() as we are
+                * downgrading page table protection not changing it to point
+                * to a new page.
+                *
+                * See Documentation/vm/mmu_notifier.txt
+                */
+               if (ret)
                        (*cleaned)++;
-               }
        }
 
        mmu_notifier_invalidate_range_end(vma->vm_mm, start, end);
@@ -1426,6 +1429,10 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
                        if (pte_soft_dirty(pteval))
                                swp_pte = pte_swp_mksoft_dirty(swp_pte);
                        set_pte_at(mm, pvmw.address, pvmw.pte, swp_pte);
+                       /*
+                        * No need to invalidate here it will synchronize on
+                        * against the special swap migration pte.
+                        */
                        goto discard;
                }
 
@@ -1483,6 +1490,9 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
                         * will take care of the rest.
                         */
                        dec_mm_counter(mm, mm_counter(page));
+                       /* We have to invalidate as we cleared the pte */
+                       mmu_notifier_invalidate_range(mm, address,
+                                                     address + PAGE_SIZE);
                } else if (IS_ENABLED(CONFIG_MIGRATION) &&
                                (flags & (TTU_MIGRATION|TTU_SPLIT_FREEZE))) {
                        swp_entry_t entry;
@@ -1498,6 +1508,10 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
                        if (pte_soft_dirty(pteval))
                                swp_pte = pte_swp_mksoft_dirty(swp_pte);
                        set_pte_at(mm, address, pvmw.pte, swp_pte);
+                       /*
+                        * No need to invalidate here it will synchronize on
+                        * against the special swap migration pte.
+                        */
                } else if (PageAnon(page)) {
                        swp_entry_t entry = { .val = page_private(subpage) };
                        pte_t swp_pte;
@@ -1509,6 +1523,8 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
                                WARN_ON_ONCE(1);
                                ret = false;
                                /* We have to invalidate as we cleared the pte */
+                               mmu_notifier_invalidate_range(mm, address,
+                                                       address + PAGE_SIZE);
                                page_vma_mapped_walk_done(&pvmw);
                                break;
                        }
@@ -1516,6 +1532,9 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
                        /* MADV_FREE page check */
                        if (!PageSwapBacked(page)) {
                                if (!PageDirty(page)) {
+                                       /* Invalidate as we cleared the pte */
+                                       mmu_notifier_invalidate_range(mm,
+                                               address, address + PAGE_SIZE);
                                        dec_mm_counter(mm, MM_ANONPAGES);
                                        goto discard;
                                }
@@ -1549,13 +1568,39 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
                        if (pte_soft_dirty(pteval))
                                swp_pte = pte_swp_mksoft_dirty(swp_pte);
                        set_pte_at(mm, address, pvmw.pte, swp_pte);
-               } else
+                       /* Invalidate as we cleared the pte */
+                       mmu_notifier_invalidate_range(mm, address,
+                                                     address + PAGE_SIZE);
+               } else {
+                       /*
+                        * We should not need to notify here as we reach this
+                        * case only from freeze_page() itself only call from
+                        * split_huge_page_to_list() so everything below must
+                        * be true:
+                        *   - page is not anonymous
+                        *   - page is locked
+                        *
+                        * So as it is a locked file back page thus it can not
+                        * be remove from the page cache and replace by a new
+                        * page before mmu_notifier_invalidate_range_end so no
+                        * concurrent thread might update its page table to
+                        * point at new page while a device still is using this
+                        * page.
+                        *
+                        * See Documentation/vm/mmu_notifier.txt
+                        */
                        dec_mm_counter(mm, mm_counter_file(page));
+               }
 discard:
+               /*
+                * No need to call mmu_notifier_invalidate_range() it has be
+                * done above for all cases requiring it to happen under page
+                * table lock before mmu_notifier_invalidate_range_end()
+                *
+                * See Documentation/vm/mmu_notifier.txt
+                */
                page_remove_rmap(subpage, PageHuge(page));
                put_page(page);
-               mmu_notifier_invalidate_range(mm, address,
-                                             address + PAGE_SIZE);
        }
 
        mmu_notifier_invalidate_range_end(vma->vm_mm, start, end);