Merge branch 'akpm' (patches from Andrew)
authorLinus Torvalds <torvalds@linux-foundation.org>
Sat, 5 Sep 2020 20:28:40 +0000 (13:28 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 5 Sep 2020 20:28:40 +0000 (13:28 -0700)
Merge misc fixes from Andrew Morton:
 "19 patches.

  Subsystems affected by this patch series: MAINTAINERS, ipc, fork,
  checkpatch, lib, and mm (memcg, slub, pagemap, madvise, migration,
  hugetlb)"

* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
  include/linux/log2.h: add missing () around n in roundup_pow_of_two()
  mm/khugepaged.c: fix khugepaged's request size in collapse_file
  mm/hugetlb: fix a race between hugetlb sysctl handlers
  mm/hugetlb: try preferred node first when alloc gigantic page from cma
  mm/migrate: preserve soft dirty in remove_migration_pte()
  mm/migrate: remove unnecessary is_zone_device_page() check
  mm/rmap: fixup copying of soft dirty and uffd ptes
  mm/migrate: fixup setting UFFD_WP flag
  mm: madvise: fix vma user-after-free
  checkpatch: fix the usage of capture group ( ... )
  fork: adjust sysctl_max_threads definition to match prototype
  ipc: adjust proc_ipc_sem_dointvec definition to match prototype
  mm: track page table modifications in __apply_to_page_range()
  MAINTAINERS: IA64: mark Status as Odd Fixes only
  MAINTAINERS: add LLVM maintainers
  MAINTAINERS: update Cavium/Marvell entries
  mm: slub: fix conversion of freelist_corrupted()
  mm: memcg: fix memcg reclaim soft lockup
  memcg: fix use-after-free in uncharge_batch

1  2 
mm/memory.c

diff --combined mm/memory.c
index 148eafb8cbb1b871d64983d1ef4c33cecd97521c,547b81a1405912d288c6d5adfdadac0a1788c5da..469af373ae76e16c5879687e3db1964c29491169
@@@ -73,6 -73,7 +73,7 @@@
  #include <linux/numa.h>
  #include <linux/perf_event.h>
  #include <linux/ptrace.h>
+ #include <linux/vmalloc.h>
  
  #include <trace/events/kmem.h>
  
@@@ -83,6 -84,7 +84,7 @@@
  #include <asm/tlb.h>
  #include <asm/tlbflush.h>
  
+ #include "pgalloc-track.h"
  #include "internal.h"
  
  #if defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS) && !defined(CONFIG_COMPILE_TEST)
@@@ -2206,7 -2208,8 +2208,8 @@@ EXPORT_SYMBOL(vm_iomap_memory)
  
  static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
                                     unsigned long addr, unsigned long end,
-                                    pte_fn_t fn, void *data, bool create)
+                                    pte_fn_t fn, void *data, bool create,
+                                    pgtbl_mod_mask *mask)
  {
        pte_t *pte;
        int err = 0;
  
        if (create) {
                pte = (mm == &init_mm) ?
-                       pte_alloc_kernel(pmd, addr) :
+                       pte_alloc_kernel_track(pmd, addr, mask) :
                        pte_alloc_map_lock(mm, pmd, addr, &ptl);
                if (!pte)
                        return -ENOMEM;
                                break;
                }
        } while (addr += PAGE_SIZE, addr != end);
+       *mask |= PGTBL_PTE_MODIFIED;
  
        arch_leave_lazy_mmu_mode();
  
  
  static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
                                     unsigned long addr, unsigned long end,
-                                    pte_fn_t fn, void *data, bool create)
+                                    pte_fn_t fn, void *data, bool create,
+                                    pgtbl_mod_mask *mask)
  {
        pmd_t *pmd;
        unsigned long next;
        BUG_ON(pud_huge(*pud));
  
        if (create) {
-               pmd = pmd_alloc(mm, pud, addr);
+               pmd = pmd_alloc_track(mm, pud, addr, mask);
                if (!pmd)
                        return -ENOMEM;
        } else {
                next = pmd_addr_end(addr, end);
                if (create || !pmd_none_or_clear_bad(pmd)) {
                        err = apply_to_pte_range(mm, pmd, addr, next, fn, data,
-                                                create);
+                                                create, mask);
                        if (err)
                                break;
                }
  
  static int apply_to_pud_range(struct mm_struct *mm, p4d_t *p4d,
                                     unsigned long addr, unsigned long end,
-                                    pte_fn_t fn, void *data, bool create)
+                                    pte_fn_t fn, void *data, bool create,
+                                    pgtbl_mod_mask *mask)
  {
        pud_t *pud;
        unsigned long next;
        int err = 0;
  
        if (create) {
-               pud = pud_alloc(mm, p4d, addr);
+               pud = pud_alloc_track(mm, p4d, addr, mask);
                if (!pud)
                        return -ENOMEM;
        } else {
                next = pud_addr_end(addr, end);
                if (create || !pud_none_or_clear_bad(pud)) {
                        err = apply_to_pmd_range(mm, pud, addr, next, fn, data,
-                                                create);
+                                                create, mask);
                        if (err)
                                break;
                }
  
  static int apply_to_p4d_range(struct mm_struct *mm, pgd_t *pgd,
                                     unsigned long addr, unsigned long end,
-                                    pte_fn_t fn, void *data, bool create)
+                                    pte_fn_t fn, void *data, bool create,
+                                    pgtbl_mod_mask *mask)
  {
        p4d_t *p4d;
        unsigned long next;
        int err = 0;
  
        if (create) {
-               p4d = p4d_alloc(mm, pgd, addr);
+               p4d = p4d_alloc_track(mm, pgd, addr, mask);
                if (!p4d)
                        return -ENOMEM;
        } else {
                next = p4d_addr_end(addr, end);
                if (create || !p4d_none_or_clear_bad(p4d)) {
                        err = apply_to_pud_range(mm, p4d, addr, next, fn, data,
-                                                create);
+                                                create, mask);
                        if (err)
                                break;
                }
@@@ -2331,8 -2338,9 +2338,9 @@@ static int __apply_to_page_range(struc
                                 void *data, bool create)
  {
        pgd_t *pgd;
-       unsigned long next;
+       unsigned long start = addr, next;
        unsigned long end = addr + size;
+       pgtbl_mod_mask mask = 0;
        int err = 0;
  
        if (WARN_ON(addr >= end))
                next = pgd_addr_end(addr, end);
                if (!create && pgd_none_or_clear_bad(pgd))
                        continue;
-               err = apply_to_p4d_range(mm, pgd, addr, next, fn, data, create);
+               err = apply_to_p4d_range(mm, pgd, addr, next, fn, data, create, &mask);
                if (err)
                        break;
        } while (pgd++, addr = next, addr != end);
  
+       if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
+               arch_sync_kernel_mappings(start, start + size);
        return err;
  }
  
@@@ -2622,7 -2633,6 +2633,7 @@@ static inline void wp_page_reuse(struc
        if (ptep_set_access_flags(vma, vmf->address, vmf->pte, entry, 1))
                update_mmu_cache(vma, vmf->address, vmf->pte);
        pte_unmap_unlock(vmf->pte, vmf->ptl);
 +      count_vm_event(PGREUSE);
  }
  
  /*
@@@ -2928,25 -2938,50 +2939,25 @@@ static vm_fault_t do_wp_page(struct vm_
         * not dirty accountable.
         */
        if (PageAnon(vmf->page)) {
 -              int total_map_swapcount;
 -              if (PageKsm(vmf->page) && (PageSwapCache(vmf->page) ||
 -                                         page_count(vmf->page) != 1))
 +              struct page *page = vmf->page;
 +
 +              /* PageKsm() doesn't necessarily raise the page refcount */
 +              if (PageKsm(page) || page_count(page) != 1)
 +                      goto copy;
 +              if (!trylock_page(page))
 +                      goto copy;
 +              if (PageKsm(page) || page_mapcount(page) != 1 || page_count(page) != 1) {
 +                      unlock_page(page);
                        goto copy;
 -              if (!trylock_page(vmf->page)) {
 -                      get_page(vmf->page);
 -                      pte_unmap_unlock(vmf->pte, vmf->ptl);
 -                      lock_page(vmf->page);
 -                      vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
 -                                      vmf->address, &vmf->ptl);
 -                      if (!pte_same(*vmf->pte, vmf->orig_pte)) {
 -                              update_mmu_tlb(vma, vmf->address, vmf->pte);
 -                              unlock_page(vmf->page);
 -                              pte_unmap_unlock(vmf->pte, vmf->ptl);
 -                              put_page(vmf->page);
 -                              return 0;
 -                      }
 -                      put_page(vmf->page);
 -              }
 -              if (PageKsm(vmf->page)) {
 -                      bool reused = reuse_ksm_page(vmf->page, vmf->vma,
 -                                                   vmf->address);
 -                      unlock_page(vmf->page);
 -                      if (!reused)
 -                              goto copy;
 -                      wp_page_reuse(vmf);
 -                      return VM_FAULT_WRITE;
 -              }
 -              if (reuse_swap_page(vmf->page, &total_map_swapcount)) {
 -                      if (total_map_swapcount == 1) {
 -                              /*
 -                               * The page is all ours. Move it to
 -                               * our anon_vma so the rmap code will
 -                               * not search our parent or siblings.
 -                               * Protected against the rmap code by
 -                               * the page lock.
 -                               */
 -                              page_move_anon_rmap(vmf->page, vma);
 -                      }
 -                      unlock_page(vmf->page);
 -                      wp_page_reuse(vmf);
 -                      return VM_FAULT_WRITE;
                }
 -              unlock_page(vmf->page);
 +              /*
 +               * Ok, we've got the only map reference, and the only
 +               * page count reference, and the page is locked,
 +               * it's dark out, and we're wearing sunglasses. Hit it.
 +               */
 +              wp_page_reuse(vmf);
 +              unlock_page(page);
 +              return VM_FAULT_WRITE;
        } else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
                                        (VM_WRITE|VM_SHARED))) {
                return wp_page_shared(vmf);