mm/damon/paddr: rename 'damon_pa_access_chk_result->page_sz' to 'folio_sz'
authorSeongJae Park <sj@kernel.org>
Mon, 9 Jan 2023 21:33:33 +0000 (21:33 +0000)
committerAndrew Morton <akpm@linux-foundation.org>
Thu, 19 Jan 2023 01:12:58 +0000 (17:12 -0800)
DAMON's physical address space monitoring operations set is using folio
now.  Rename 'damon_pa_access_chk_result->page_sz' to reflect the fact.

Link: https://lkml.kernel.org/r/20230109213335.62525-5-sj@kernel.org
Signed-off-by: SeongJae Park <sj@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/damon/paddr.c

index 99d4c357ef2b653712287001277f9624fce84b25..65c1e0f91535b635dc39bbbc574a3a1fef7254ea 100644 (file)
@@ -80,7 +80,8 @@ static void damon_pa_prepare_access_checks(struct damon_ctx *ctx)
 }
 
 struct damon_pa_access_chk_result {
-       unsigned long page_sz;
+       /* size of the folio for the access checked physical memory address */
+       unsigned long folio_sz;
        bool accessed;
 };
 
@@ -91,7 +92,7 @@ static bool __damon_pa_young(struct folio *folio, struct vm_area_struct *vma,
        DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0);
 
        result->accessed = false;
-       result->page_sz = PAGE_SIZE;
+       result->folio_sz = PAGE_SIZE;
        while (page_vma_mapped_walk(&pvmw)) {
                addr = pvmw.address;
                if (pvmw.pte) {
@@ -103,7 +104,7 @@ static bool __damon_pa_young(struct folio *folio, struct vm_area_struct *vma,
                        result->accessed = pmd_young(*pvmw.pmd) ||
                                !folio_test_idle(folio) ||
                                mmu_notifier_test_young(vma->vm_mm, addr);
-                       result->page_sz = HPAGE_PMD_SIZE;
+                       result->folio_sz = HPAGE_PMD_SIZE;
 #else
                        WARN_ON_ONCE(1);
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
@@ -118,11 +119,11 @@ static bool __damon_pa_young(struct folio *folio, struct vm_area_struct *vma,
        return !result->accessed;
 }
 
-static bool damon_pa_young(unsigned long paddr, unsigned long *page_sz)
+static bool damon_pa_young(unsigned long paddr, unsigned long *folio_sz)
 {
        struct folio *folio = damon_get_folio(PHYS_PFN(paddr));
        struct damon_pa_access_chk_result result = {
-               .page_sz = PAGE_SIZE,
+               .folio_sz = PAGE_SIZE,
                .accessed = false,
        };
        struct rmap_walk_control rwc = {
@@ -157,25 +158,25 @@ static bool damon_pa_young(unsigned long paddr, unsigned long *page_sz)
        folio_put(folio);
 
 out:
-       *page_sz = result.page_sz;
+       *folio_sz = result.folio_sz;
        return result.accessed;
 }
 
 static void __damon_pa_check_access(struct damon_region *r)
 {
        static unsigned long last_addr;
-       static unsigned long last_page_sz = PAGE_SIZE;
+       static unsigned long last_folio_sz = PAGE_SIZE;
        static bool last_accessed;
 
        /* If the region is in the last checked page, reuse the result */
-       if (ALIGN_DOWN(last_addr, last_page_sz) ==
-                               ALIGN_DOWN(r->sampling_addr, last_page_sz)) {
+       if (ALIGN_DOWN(last_addr, last_folio_sz) ==
+                               ALIGN_DOWN(r->sampling_addr, last_folio_sz)) {
                if (last_accessed)
                        r->nr_accesses++;
                return;
        }
 
-       last_accessed = damon_pa_young(r->sampling_addr, &last_page_sz);
+       last_accessed = damon_pa_young(r->sampling_addr, &last_folio_sz);
        if (last_accessed)
                r->nr_accesses++;