mm: convert mm_counter_file() to take a folio
authorKefeng Wang <wangkefeng.wang@huawei.com>
Thu, 11 Jan 2024 15:24:29 +0000 (15:24 +0000)
committerAndrew Morton <akpm@linux-foundation.org>
Thu, 22 Feb 2024 00:00:04 +0000 (16:00 -0800)
Now all callers of mm_counter_file() have a folio, convert
mm_counter_file() to take a folio.  Saves a call to compound_head() hidden
inside PageSwapBacked().

Link: https://lkml.kernel.org/r/20240111152429.3374566-11-willy@infradead.org
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: David Hildenbrand <david@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/mm.h
kernel/events/uprobes.c
mm/huge_memory.c
mm/khugepaged.c
mm/memory.c
mm/rmap.c

index 22e597b36b388718d469ca24cef6e699b1653b1b..ac6b71cbdffbfaaf64290ff74d8c61cf35abfc7c 100644 (file)
@@ -2595,10 +2595,10 @@ static inline void dec_mm_counter(struct mm_struct *mm, int member)
        mm_trace_rss_stat(mm, member);
 }
 
-/* Optimized variant when page is already known not to be PageAnon */
-static inline int mm_counter_file(struct page *page)
+/* Optimized variant when folio is already known not to be anon */
+static inline int mm_counter_file(struct folio *folio)
 {
-       if (PageSwapBacked(page))
+       if (folio_test_swapbacked(folio))
                return MM_SHMEMPAGES;
        return MM_FILEPAGES;
 }
@@ -2607,7 +2607,7 @@ static inline int mm_counter(struct folio *folio)
 {
        if (folio_test_anon(folio))
                return MM_ANONPAGES;
-       return mm_counter_file(&folio->page);
+       return mm_counter_file(folio);
 }
 
 static inline unsigned long get_mm_rss(struct mm_struct *mm)
index 929e98c629652a0fef1b71e6c002cca41936c4b4..e4834d23e1d1a209dd6f7ff34c7c9e30a56334fe 100644 (file)
@@ -188,7 +188,7 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
                dec_mm_counter(mm, MM_ANONPAGES);
 
        if (!folio_test_anon(old_folio)) {
-               dec_mm_counter(mm, mm_counter_file(old_page));
+               dec_mm_counter(mm, mm_counter_file(old_folio));
                inc_mm_counter(mm, MM_ANONPAGES);
        }
 
index 7a28a7db08ea0d0a9df538495ff9fb05bdbf72e7..f005f04247355fdc9b038cd4858e50909e1c37cd 100644 (file)
@@ -1931,7 +1931,7 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
                } else {
                        if (arch_needs_pgtable_deposit())
                                zap_deposited_table(tlb->mm, pmd);
-                       add_mm_counter(tlb->mm, mm_counter_file(&folio->page),
+                       add_mm_counter(tlb->mm, mm_counter_file(folio),
                                       -HPAGE_PMD_NR);
                }
 
@@ -2456,7 +2456,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
                        folio_remove_rmap_pmd(folio, page, vma);
                        folio_put(folio);
                }
-               add_mm_counter(mm, mm_counter_file(&folio->page), -HPAGE_PMD_NR);
+               add_mm_counter(mm, mm_counter_file(folio), -HPAGE_PMD_NR);
                return;
        }
 
index 2b219acb528e25fd7f16b9f58d85a81048355bd4..fe43fbc44525399ec523f8e5ef8626a49ac690cc 100644 (file)
@@ -1634,7 +1634,7 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
        /* step 3: set proper refcount and mm_counters. */
        if (nr_ptes) {
                folio_ref_sub(folio, nr_ptes);
-               add_mm_counter(mm, mm_counter_file(&folio->page), -nr_ptes);
+               add_mm_counter(mm, mm_counter_file(folio), -nr_ptes);
        }
 
        /* step 4: remove empty page table */
@@ -1665,7 +1665,7 @@ abort:
        if (nr_ptes) {
                flush_tlb_mm(mm);
                folio_ref_sub(folio, nr_ptes);
-               add_mm_counter(mm, mm_counter_file(&folio->page), -nr_ptes);
+               add_mm_counter(mm, mm_counter_file(folio), -nr_ptes);
        }
        if (start_pte)
                pte_unmap_unlock(start_pte, ptl);
index 87ef9809984728f89e1bdd76313b8f35d759ca1b..5e608edfe330c522c92dcb62e71bcb09500e5ea4 100644 (file)
@@ -966,7 +966,7 @@ copy_present_pte(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
        } else if (page) {
                folio_get(folio);
                folio_dup_file_rmap_pte(folio, page);
-               rss[mm_counter_file(page)]++;
+               rss[mm_counter_file(folio)]++;
        }
 
        /*
@@ -1873,7 +1873,7 @@ static int insert_page_into_pte_locked(struct vm_area_struct *vma, pte_t *pte,
                return -EBUSY;
        /* Ok, finally just insert the thing.. */
        folio_get(folio);
-       inc_mm_counter(vma->vm_mm, mm_counter_file(page));
+       inc_mm_counter(vma->vm_mm, mm_counter_file(folio));
        folio_add_file_rmap_pte(folio, page, vma);
        set_pte_at(vma->vm_mm, addr, pte, mk_pte(page, prot));
        return 0;
@@ -3178,7 +3178,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
        if (likely(vmf->pte && pte_same(ptep_get(vmf->pte), vmf->orig_pte))) {
                if (old_folio) {
                        if (!folio_test_anon(old_folio)) {
-                               dec_mm_counter(mm, mm_counter_file(&old_folio->page));
+                               dec_mm_counter(mm, mm_counter_file(old_folio));
                                inc_mm_counter(mm, MM_ANONPAGES);
                        }
                } else {
@@ -4483,7 +4483,7 @@ vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page)
        if (write)
                entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
 
-       add_mm_counter(vma->vm_mm, mm_counter_file(page), HPAGE_PMD_NR);
+       add_mm_counter(vma->vm_mm, mm_counter_file(folio), HPAGE_PMD_NR);
        folio_add_file_rmap_pmd(folio, page, vma);
 
        /*
@@ -4546,7 +4546,7 @@ void set_pte_range(struct vm_fault *vmf, struct folio *folio,
                folio_add_new_anon_rmap(folio, vma, addr);
                folio_add_lru_vma(folio, vma);
        } else {
-               add_mm_counter(vma->vm_mm, mm_counter_file(page), nr);
+               add_mm_counter(vma->vm_mm, mm_counter_file(folio), nr);
                folio_add_file_rmap_ptes(folio, page, nr, vma);
        }
        set_ptes(vma->vm_mm, addr, vmf->pte, entry, nr);
index 4648cf1d8178b5b3e5e8edc66a4f2582dca0f1d9..1cf2bffa48ed87ba9f763a63d1863f5cef9d5b9b 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1903,7 +1903,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
                         *
                         * See Documentation/mm/mmu_notifier.rst
                         */
-                       dec_mm_counter(mm, mm_counter_file(&folio->page));
+                       dec_mm_counter(mm, mm_counter_file(folio));
                }
 discard:
                if (unlikely(folio_test_hugetlb(folio)))