mm/swapfile.c: unify normal/huge code path in swap_page_trans_huge_swapped()
[sfrench/cifs-2.6.git] / mm / memory.c
index 348279ff6e51283770f4aea735bfa8092cfc669d..19f47d7b9b86795ed8b116cffceedd1bb625675d 100644 (file)
@@ -859,6 +859,10 @@ struct page *_vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
                                return NULL;
                        }
                }
+
+               if (pte_devmap(pte))
+                       return NULL;
+
                print_bad_pte(vma, addr, pte, NULL);
                return NULL;
        }
@@ -923,6 +927,8 @@ struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
                }
        }
 
+       if (pmd_devmap(pmd))
+               return NULL;
        if (is_zero_pfn(pfn))
                return NULL;
        if (unlikely(pfn > highest_memmap_pfn))
@@ -1607,20 +1613,8 @@ void zap_page_range(struct vm_area_struct *vma, unsigned long start,
        tlb_gather_mmu(&tlb, mm, start, end);
        update_hiwater_rss(mm);
        mmu_notifier_invalidate_range_start(mm, start, end);
-       for ( ; vma && vma->vm_start < end; vma = vma->vm_next) {
+       for ( ; vma && vma->vm_start < end; vma = vma->vm_next)
                unmap_single_vma(&tlb, vma, start, end, NULL);
-
-               /*
-                * zap_page_range does not specify whether mmap_sem should be
-                * held for read or write. That allows parallel zap_page_range
-                * operations to unmap a PTE and defer a flush meaning that
-                * this call observes pte_none and fails to flush the TLB.
-                * Rather than adding a complex API, ensure that no stale
-                * TLB entries exist when this call returns.
-                */
-               flush_tlb_range(vma, start, end);
-       }
-
        mmu_notifier_invalidate_range_end(mm, start, end);
        tlb_finish_mmu(&tlb, start, end);
 }
@@ -3394,7 +3388,7 @@ static int do_set_pmd(struct vm_fault *vmf, struct page *page)
        if (write)
                entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
 
-       add_mm_counter(vma->vm_mm, MM_FILEPAGES, HPAGE_PMD_NR);
+       add_mm_counter(vma->vm_mm, mm_counter_file(page), HPAGE_PMD_NR);
        page_add_file_rmap(page, true);
        /*
         * deposit and withdraw with pmd lock held
@@ -4147,7 +4141,7 @@ int handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
         * space.  Kernel faults are handled more gracefully.
         */
        if (flags & FAULT_FLAG_USER)
-               mem_cgroup_oom_enable();
+               mem_cgroup_enter_user_fault();
 
        if (unlikely(is_vm_hugetlb_page(vma)))
                ret = hugetlb_fault(vma->vm_mm, vma, address, flags);
@@ -4155,7 +4149,7 @@ int handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
                ret = __handle_mm_fault(vma, address, flags);
 
        if (flags & FAULT_FLAG_USER) {
-               mem_cgroup_oom_disable();
+               mem_cgroup_exit_user_fault();
                /*
                 * The task may have entered a memcg OOM situation but
                 * if the allocation error was handled gracefully (no
@@ -4593,71 +4587,93 @@ EXPORT_SYMBOL(__might_fault);
 #endif
 
 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
-static void clear_gigantic_page(struct page *page,
-                               unsigned long addr,
-                               unsigned int pages_per_huge_page)
-{
-       int i;
-       struct page *p = page;
-
-       might_sleep();
-       for (i = 0; i < pages_per_huge_page;
-            i++, p = mem_map_next(p, page, i)) {
-               cond_resched();
-               clear_user_highpage(p, addr + i * PAGE_SIZE);
-       }
-}
-void clear_huge_page(struct page *page,
-                    unsigned long addr_hint, unsigned int pages_per_huge_page)
+/*
+ * Process all subpages of the specified huge page with the specified
+ * operation.  The target subpage will be processed last to keep its
+ * cache lines hot.
+ */
+static inline void process_huge_page(
+       unsigned long addr_hint, unsigned int pages_per_huge_page,
+       void (*process_subpage)(unsigned long addr, int idx, void *arg),
+       void *arg)
 {
        int i, n, base, l;
        unsigned long addr = addr_hint &
                ~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1);
 
-       if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) {
-               clear_gigantic_page(page, addr, pages_per_huge_page);
-               return;
-       }
-
-       /* Clear sub-page to access last to keep its cache lines hot */
+       /* Process target subpage last to keep its cache lines hot */
        might_sleep();
        n = (addr_hint - addr) / PAGE_SIZE;
        if (2 * n <= pages_per_huge_page) {
-               /* If sub-page to access in first half of huge page */
+               /* If target subpage in first half of huge page */
                base = 0;
                l = n;
-               /* Clear sub-pages at the end of huge page */
+               /* Process subpages at the end of huge page */
                for (i = pages_per_huge_page - 1; i >= 2 * n; i--) {
                        cond_resched();
-                       clear_user_highpage(page + i, addr + i * PAGE_SIZE);
+                       process_subpage(addr + i * PAGE_SIZE, i, arg);
                }
        } else {
-               /* If sub-page to access in second half of huge page */
+               /* If target subpage in second half of huge page */
                base = pages_per_huge_page - 2 * (pages_per_huge_page - n);
                l = pages_per_huge_page - n;
-               /* Clear sub-pages at the begin of huge page */
+               /* Process subpages at the begin of huge page */
                for (i = 0; i < base; i++) {
                        cond_resched();
-                       clear_user_highpage(page + i, addr + i * PAGE_SIZE);
+                       process_subpage(addr + i * PAGE_SIZE, i, arg);
                }
        }
        /*
-        * Clear remaining sub-pages in left-right-left-right pattern
-        * towards the sub-page to access
+        * Process remaining subpages in left-right-left-right pattern
+        * towards the target subpage
         */
        for (i = 0; i < l; i++) {
                int left_idx = base + i;
                int right_idx = base + 2 * l - 1 - i;
 
                cond_resched();
-               clear_user_highpage(page + left_idx,
-                                   addr + left_idx * PAGE_SIZE);
+               process_subpage(addr + left_idx * PAGE_SIZE, left_idx, arg);
                cond_resched();
-               clear_user_highpage(page + right_idx,
-                                   addr + right_idx * PAGE_SIZE);
+               process_subpage(addr + right_idx * PAGE_SIZE, right_idx, arg);
        }
 }
 
+static void clear_gigantic_page(struct page *page,
+                               unsigned long addr,
+                               unsigned int pages_per_huge_page)
+{
+       int i;
+       struct page *p = page;
+
+       might_sleep();
+       for (i = 0; i < pages_per_huge_page;
+            i++, p = mem_map_next(p, page, i)) {
+               cond_resched();
+               clear_user_highpage(p, addr + i * PAGE_SIZE);
+       }
+}
+
+static void clear_subpage(unsigned long addr, int idx, void *arg)
+{
+       struct page *page = arg;
+
+       clear_user_highpage(page + idx, addr);
+}
+
+void clear_huge_page(struct page *page,
+                    unsigned long addr_hint, unsigned int pages_per_huge_page)
+{
+       unsigned long addr = addr_hint &
+               ~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1);
+
+       if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) {
+               clear_gigantic_page(page, addr, pages_per_huge_page);
+               return;
+       }
+
+       process_huge_page(addr_hint, pages_per_huge_page, clear_subpage, page);
+}
+
 static void copy_user_gigantic_page(struct page *dst, struct page *src,
                                    unsigned long addr,
                                    struct vm_area_struct *vma,
@@ -4677,11 +4693,31 @@ static void copy_user_gigantic_page(struct page *dst, struct page *src,
        }
 }
 
+struct copy_subpage_arg {
+       struct page *dst;
+       struct page *src;
+       struct vm_area_struct *vma;
+};
+
+static void copy_subpage(unsigned long addr, int idx, void *arg)
+{
+       struct copy_subpage_arg *copy_arg = arg;
+
+       copy_user_highpage(copy_arg->dst + idx, copy_arg->src + idx,
+                          addr, copy_arg->vma);
+}
+
 void copy_user_huge_page(struct page *dst, struct page *src,
-                        unsigned long addr, struct vm_area_struct *vma,
+                        unsigned long addr_hint, struct vm_area_struct *vma,
                         unsigned int pages_per_huge_page)
 {
-       int i;
+       unsigned long addr = addr_hint &
+               ~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1);
+       struct copy_subpage_arg arg = {
+               .dst = dst,
+               .src = src,
+               .vma = vma,
+       };
 
        if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) {
                copy_user_gigantic_page(dst, src, addr, vma,
@@ -4689,11 +4725,7 @@ void copy_user_huge_page(struct page *dst, struct page *src,
                return;
        }
 
-       might_sleep();
-       for (i = 0; i < pages_per_huge_page; i++) {
-               cond_resched();
-               copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma);
-       }
+       process_huge_page(addr_hint, pages_per_huge_page, copy_subpage, &arg);
 }
 
 long copy_huge_page_from_user(struct page *dst_page,