rmap: extend try_to_unmap() to be usable by split_huge_page()
authorKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Thu, 17 Mar 2016 21:20:04 +0000 (14:20 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 17 Mar 2016 22:09:34 +0000 (15:09 -0700)
Add support for two ttu_flags:

  - TTU_SPLIT_HUGE_PMD would split PMD if it's there, before trying to
    unmap page;

  - TTU_RMAP_LOCKED indicates that caller holds relevant rmap lock;

Also, change rwc->done to !page_mapcount() instead of !page_mapped().
try_to_unmap() works on pte level, so we are really interested in the
mappedness of this small page rather than of the compound page it's a
part of.

Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/huge_mm.h
include/linux/rmap.h
mm/huge_memory.c
mm/rmap.c

index a4cecb4801ec78cbb051edbcbea3f0e6d42d3837..01ad22e938b010656bcbda1d5b33b4c4db040077 100644 (file)
@@ -106,6 +106,9 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
                        __split_huge_pmd(__vma, __pmd, __address);      \
        }  while (0)
 
+
+void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address);
+
 #if HPAGE_PMD_ORDER >= MAX_ORDER
 #error "hugepages can't be allocated by the buddy allocator"
 #endif
@@ -173,6 +176,10 @@ static inline int split_huge_page(struct page *page)
 static inline void deferred_split_huge_page(struct page *page) {}
 #define split_huge_pmd(__vma, __pmd, __address)        \
        do { } while (0)
+
+static inline void split_huge_pmd_address(struct vm_area_struct *vma,
+               unsigned long address) {}
+
 static inline int hugepage_madvise(struct vm_area_struct *vma,
                                   unsigned long *vm_flags, int advice)
 {
index a5875e9b4a27d4ba7869bb7e48fd261d4ca4166e..3d975e2252d4ffea3a11d92324e96db279a44842 100644 (file)
@@ -86,6 +86,7 @@ enum ttu_flags {
        TTU_MIGRATION = 2,              /* migration mode */
        TTU_MUNLOCK = 4,                /* munlock mode */
        TTU_LZFREE = 8,                 /* lazy free mode */
+       TTU_SPLIT_HUGE_PMD = 16,        /* split huge PMD if any */
 
        TTU_IGNORE_MLOCK = (1 << 8),    /* ignore mlock */
        TTU_IGNORE_ACCESS = (1 << 9),   /* don't age */
@@ -93,6 +94,8 @@ enum ttu_flags {
        TTU_BATCH_FLUSH = (1 << 11),    /* Batch TLB flushes where possible
                                         * and caller guarantees they will
                                         * do a final flush if necessary */
+       TTU_RMAP_LOCKED = (1 << 12)     /* do not grab rmap lock:
+                                        * caller holds it */
 };
 
 #ifdef CONFIG_MMU
index e1a177c2079170deeefc62fb66c724584ce42452..11d15674ff38d819b25f3cc1f0a1a03681ab1c4b 100644 (file)
@@ -3006,15 +3006,12 @@ out:
        }
 }
 
-static void split_huge_pmd_address(struct vm_area_struct *vma,
-                                   unsigned long address)
+void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address)
 {
        pgd_t *pgd;
        pud_t *pud;
        pmd_t *pmd;
 
-       VM_BUG_ON(!(address & ~HPAGE_PMD_MASK));
-
        pgd = pgd_offset(vma->vm_mm, address);
        if (!pgd_present(*pgd))
                return;
index 30b739ce0ffa2de3f212035a3ef6cf8a2d14cac5..945933a01010eedef1e803e76dcba966c234c26c 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1431,6 +1431,8 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
        if ((flags & TTU_MUNLOCK) && !(vma->vm_flags & VM_LOCKED))
                goto out;
 
+       if (flags & TTU_SPLIT_HUGE_PMD)
+               split_huge_pmd_address(vma, address);
        pte = page_check_address(page, mm, address, &ptl, 0);
        if (!pte)
                goto out;
@@ -1576,10 +1578,10 @@ static bool invalid_migration_vma(struct vm_area_struct *vma, void *arg)
        return is_vma_temporary_stack(vma);
 }
 
-static int page_not_mapped(struct page *page)
+static int page_mapcount_is_zero(struct page *page)
 {
-       return !page_mapped(page);
-};
+       return !page_mapcount(page);
+}
 
 /**
  * try_to_unmap - try to remove all page table mappings to a page
@@ -1606,12 +1608,10 @@ int try_to_unmap(struct page *page, enum ttu_flags flags)
        struct rmap_walk_control rwc = {
                .rmap_one = try_to_unmap_one,
                .arg = &rp,
-               .done = page_not_mapped,
+               .done = page_mapcount_is_zero,
                .anon_lock = page_lock_anon_vma_read,
        };
 
-       VM_BUG_ON_PAGE(!PageHuge(page) && PageTransHuge(page), page);
-
        /*
         * During exec, a temporary VMA is setup and later moved.
         * The VMA is moved under the anon_vma lock but not the
@@ -1623,9 +1623,12 @@ int try_to_unmap(struct page *page, enum ttu_flags flags)
        if ((flags & TTU_MIGRATION) && !PageKsm(page) && PageAnon(page))
                rwc.invalid_vma = invalid_migration_vma;
 
-       ret = rmap_walk(page, &rwc);
+       if (flags & TTU_RMAP_LOCKED)
+               ret = rmap_walk_locked(page, &rwc);
+       else
+               ret = rmap_walk(page, &rwc);
 
-       if (ret != SWAP_MLOCK && !page_mapped(page)) {
+       if (ret != SWAP_MLOCK && !page_mapcount(page)) {
                ret = SWAP_SUCCESS;
                if (rp.lazyfreed && !PageDirty(page))
                        ret = SWAP_LZFREE;
@@ -1633,6 +1636,11 @@ int try_to_unmap(struct page *page, enum ttu_flags flags)
        return ret;
 }
 
+static int page_not_mapped(struct page *page)
+{
+       return !page_mapped(page);
+};
+
 /**
  * try_to_munlock - try to munlock a page
  * @page: the page to be munlocked