mm: memcontrol: delete unused lrucare handling
authorJohannes Weiner <hannes@cmpxchg.org>
Wed, 3 Jun 2020 23:02:24 +0000 (16:02 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 4 Jun 2020 03:09:48 +0000 (20:09 -0700)
Swapin faults were the last event to charge pages after they had already
been put on the LRU list.  Now that we charge directly on swapin, the
lrucare portion of the charge code is unused.

Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Reviewed-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Alex Shi <alex.shi@linux.alibaba.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: "Kirill A. Shutemov" <kirill@shutemov.name>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Roman Gushchin <guro@fb.com>
Cc: Balbir Singh <bsingharora@gmail.com>
Cc: Shakeel Butt <shakeelb@google.com>
Link: http://lkml.kernel.org/r/20200508183105.225460-19-hannes@cmpxchg.org
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/memcontrol.h
kernel/events/uprobes.c
mm/filemap.c
mm/huge_memory.c
mm/khugepaged.c
mm/memcontrol.c
mm/memory.c
mm/migrate.c
mm/shmem.c
mm/swap_state.c
mm/userfaultfd.c

index 96257f995caa918c61c344abc568e3b6f134ed04..d5bf3b5bfe6d393816e6f71a6ac39a3ba97f7fd7 100644 (file)
@@ -355,8 +355,7 @@ static inline unsigned long mem_cgroup_protection(struct mem_cgroup *memcg,
 enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root,
                                                struct mem_cgroup *memcg);
 
-int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask,
-                     bool lrucare);
+int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask);
 
 void mem_cgroup_uncharge(struct page *page);
 void mem_cgroup_uncharge_list(struct list_head *page_list);
@@ -839,7 +838,7 @@ static inline enum mem_cgroup_protection mem_cgroup_protected(
 }
 
 static inline int mem_cgroup_charge(struct page *page, struct mm_struct *mm,
-                                   gfp_t gfp_mask, bool lrucare)
+                                   gfp_t gfp_mask)
 {
        return 0;
 }
index 4253c153e985eb94d774da303ae637c3f2ee7f44..eddc8db96027949cb526da4e7348157908c498a3 100644 (file)
@@ -167,8 +167,7 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
                                addr + PAGE_SIZE);
 
        if (new_page) {
-               err = mem_cgroup_charge(new_page, vma->vm_mm, GFP_KERNEL,
-                                       false);
+               err = mem_cgroup_charge(new_page, vma->vm_mm, GFP_KERNEL);
                if (err)
                        return err;
        }
index f08b0ca34e311a097f7197508b5b6eb68d1bd03e..45599062198980647c0a20a0266d4b6ce435c30e 100644 (file)
@@ -845,7 +845,7 @@ static int __add_to_page_cache_locked(struct page *page,
        page->index = offset;
 
        if (!huge) {
-               error = mem_cgroup_charge(page, current->mm, gfp_mask, false);
+               error = mem_cgroup_charge(page, current->mm, gfp_mask);
                if (error)
                        goto error;
        }
index e9201a88157e30667048e9ee63abd52224448e05..6df182a18d2cc9369014f3aa3890145fdc8e26a5 100644 (file)
@@ -593,7 +593,7 @@ static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf,
 
        VM_BUG_ON_PAGE(!PageCompound(page), page);
 
-       if (mem_cgroup_charge(page, vma->vm_mm, gfp, false)) {
+       if (mem_cgroup_charge(page, vma->vm_mm, gfp)) {
                put_page(page);
                count_vm_event(THP_FAULT_FALLBACK);
                count_vm_event(THP_FAULT_FALLBACK_CHARGE);
index 32c85b81837a101c6d0b2762f37daee94c384a74..f29038c485e09f6c9e185d31519d8364546b919a 100644 (file)
@@ -1059,7 +1059,7 @@ static void collapse_huge_page(struct mm_struct *mm,
                goto out_nolock;
        }
 
-       if (unlikely(mem_cgroup_charge(new_page, mm, gfp, false))) {
+       if (unlikely(mem_cgroup_charge(new_page, mm, gfp))) {
                result = SCAN_CGROUP_CHARGE_FAIL;
                goto out_nolock;
        }
@@ -1632,7 +1632,7 @@ static void collapse_file(struct mm_struct *mm,
                goto out;
        }
 
-       if (unlikely(mem_cgroup_charge(new_page, mm, gfp, false))) {
+       if (unlikely(mem_cgroup_charge(new_page, mm, gfp))) {
                result = SCAN_CGROUP_CHARGE_FAIL;
                goto out;
        }
index 425a265dc2a009ab58e27cae8a1fadf27f62c27c..316a84025090dbbf5bf796a35f7a889c6ad5e9b2 100644 (file)
@@ -2655,51 +2655,9 @@ static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
 }
 #endif
 
-static void lock_page_lru(struct page *page, int *isolated)
+static void commit_charge(struct page *page, struct mem_cgroup *memcg)
 {
-       pg_data_t *pgdat = page_pgdat(page);
-
-       spin_lock_irq(&pgdat->lru_lock);
-       if (PageLRU(page)) {
-               struct lruvec *lruvec;
-
-               lruvec = mem_cgroup_page_lruvec(page, pgdat);
-               ClearPageLRU(page);
-               del_page_from_lru_list(page, lruvec, page_lru(page));
-               *isolated = 1;
-       } else
-               *isolated = 0;
-}
-
-static void unlock_page_lru(struct page *page, int isolated)
-{
-       pg_data_t *pgdat = page_pgdat(page);
-
-       if (isolated) {
-               struct lruvec *lruvec;
-
-               lruvec = mem_cgroup_page_lruvec(page, pgdat);
-               VM_BUG_ON_PAGE(PageLRU(page), page);
-               SetPageLRU(page);
-               add_page_to_lru_list(page, lruvec, page_lru(page));
-       }
-       spin_unlock_irq(&pgdat->lru_lock);
-}
-
-static void commit_charge(struct page *page, struct mem_cgroup *memcg,
-                         bool lrucare)
-{
-       int isolated;
-
        VM_BUG_ON_PAGE(page->mem_cgroup, page);
-
-       /*
-        * In some cases, SwapCache and FUSE(splice_buf->radixtree), the page
-        * may already be on some other mem_cgroup's LRU.  Take care of it.
-        */
-       if (lrucare)
-               lock_page_lru(page, &isolated);
-
        /*
         * Nobody should be changing or seriously looking at
         * page->mem_cgroup at this point:
@@ -2715,9 +2673,6 @@ static void commit_charge(struct page *page, struct mem_cgroup *memcg,
         *   have the page locked
         */
        page->mem_cgroup = memcg;
-
-       if (lrucare)
-               unlock_page_lru(page, isolated);
 }
 
 #ifdef CONFIG_MEMCG_KMEM
@@ -6503,22 +6458,18 @@ out:
  * @page: page to charge
  * @mm: mm context of the victim
  * @gfp_mask: reclaim mode
- * @lrucare: page might be on the LRU already
  *
  * Try to charge @page to the memcg that @mm belongs to, reclaiming
  * pages according to @gfp_mask if necessary.
  *
  * Returns 0 on success. Otherwise, an error code is returned.
  */
-int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask,
-                     bool lrucare)
+int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask)
 {
        unsigned int nr_pages = hpage_nr_pages(page);
        struct mem_cgroup *memcg = NULL;
        int ret = 0;
 
-       VM_BUG_ON_PAGE(PageLRU(page) && !lrucare, page);
-
        if (mem_cgroup_disabled())
                goto out;
 
@@ -6552,7 +6503,7 @@ int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask,
        if (ret)
                goto out_put;
 
-       commit_charge(page, memcg, lrucare);
+       commit_charge(page, memcg);
 
        local_irq_disable();
        mem_cgroup_charge_statistics(memcg, page, nr_pages);
@@ -6753,7 +6704,7 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
                page_counter_charge(&memcg->memsw, nr_pages);
        css_get_many(&memcg->css, nr_pages);
 
-       commit_charge(newpage, memcg, false);
+       commit_charge(newpage, memcg);
 
        local_irq_save(flags);
        mem_cgroup_charge_statistics(memcg, newpage, nr_pages);
index 9c886e4207a2829307d65653648025c64fda86a3..d50d8b498af51dbca1a27c8814a7ce7f420be664 100644 (file)
@@ -2675,7 +2675,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
                }
        }
 
-       if (mem_cgroup_charge(new_page, mm, GFP_KERNEL, false))
+       if (mem_cgroup_charge(new_page, mm, GFP_KERNEL))
                goto oom_free_new;
        cgroup_throttle_swaprate(new_page, GFP_KERNEL);
 
@@ -3134,7 +3134,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
                                /* Tell memcg to use swap ownership records */
                                SetPageSwapCache(page);
                                err = mem_cgroup_charge(page, vma->vm_mm,
-                                                       GFP_KERNEL, false);
+                                                       GFP_KERNEL);
                                ClearPageSwapCache(page);
                                if (err)
                                        goto out_page;
@@ -3358,7 +3358,7 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
        if (!page)
                goto oom;
 
-       if (mem_cgroup_charge(page, vma->vm_mm, GFP_KERNEL, false))
+       if (mem_cgroup_charge(page, vma->vm_mm, GFP_KERNEL))
                goto oom_free_page;
        cgroup_throttle_swaprate(page, GFP_KERNEL);
 
@@ -3854,7 +3854,7 @@ static vm_fault_t do_cow_fault(struct vm_fault *vmf)
        if (!vmf->cow_page)
                return VM_FAULT_OOM;
 
-       if (mem_cgroup_charge(vmf->cow_page, vma->vm_mm, GFP_KERNEL, false)) {
+       if (mem_cgroup_charge(vmf->cow_page, vma->vm_mm, GFP_KERNEL)) {
                put_page(vmf->cow_page);
                return VM_FAULT_OOM;
        }
index 44cee40221ecdebf6273dfda405ead5acd02b918..7bfd0962149e2072296849922a5a8fb5ce6faf87 100644 (file)
@@ -2786,7 +2786,7 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate,
 
        if (unlikely(anon_vma_prepare(vma)))
                goto abort;
-       if (mem_cgroup_charge(page, vma->vm_mm, GFP_KERNEL, false))
+       if (mem_cgroup_charge(page, vma->vm_mm, GFP_KERNEL))
                goto abort;
 
        /*
index b791161850460e8087f9c628d06f139487db6e2f..e83de27ce8f4ac7365d263be3586db3d39021c80 100644 (file)
@@ -624,7 +624,7 @@ static int shmem_add_to_page_cache(struct page *page,
        page->index = index;
 
        if (!PageSwapCache(page)) {
-               error = mem_cgroup_charge(page, charge_mm, gfp, false);
+               error = mem_cgroup_charge(page, charge_mm, gfp);
                if (error) {
                        if (PageTransHuge(page)) {
                                count_vm_event(THP_FILE_FALLBACK);
index f841257a3014cc27a36bbeeab923008ca645a1e7..ab0462819a5b974a849143d12f0585852e284070 100644 (file)
@@ -435,7 +435,7 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
                goto fail_unlock;
        }
 
-       if (mem_cgroup_charge(page, NULL, gfp_mask, false)) {
+       if (mem_cgroup_charge(page, NULL, gfp_mask)) {
                delete_from_swap_cache(page);
                goto fail_unlock;
        }
index 2745489415cc0e4f0ecf52fd8dd838cb096ac463..7f5194046b01be1b239446c4a6dc29e768aa7d08 100644 (file)
@@ -96,7 +96,7 @@ static int mcopy_atomic_pte(struct mm_struct *dst_mm,
        __SetPageUptodate(page);
 
        ret = -ENOMEM;
-       if (mem_cgroup_charge(page, dst_mm, GFP_KERNEL, false))
+       if (mem_cgroup_charge(page, dst_mm, GFP_KERNEL))
                goto out_release;
 
        _dst_pte = pte_mkdirty(mk_pte(page, dst_vma->vm_page_prot));