Merge tag 'kbuild-v6.3' of git://git.kernel.org/pub/scm/linux/kernel/git/masahiroy...
[sfrench/cifs-2.6.git] / mm / memory.c
index 3e836fecd0354c8aa433d65ce59b8b5d62ea75a1..f456f3b5049cf1545e875436cd9138b3c2b6ead4 100644 (file)
@@ -625,6 +625,16 @@ out:
        return pfn_to_page(pfn);
 }
 
+struct folio *vm_normal_folio(struct vm_area_struct *vma, unsigned long addr,
+                           pte_t pte)
+{
+       struct page *page = vm_normal_page(vma, addr, pte);
+
+       if (page)
+               return page_folio(page);
+       return NULL;
+}
+
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
                                pmd_t pmd)
@@ -853,13 +863,13 @@ copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
 static inline int
 copy_present_page(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
                  pte_t *dst_pte, pte_t *src_pte, unsigned long addr, int *rss,
-                 struct page **prealloc, struct page *page)
+                 struct folio **prealloc, struct page *page)
 {
-       struct page *new_page;
+       struct folio *new_folio;
        pte_t pte;
 
-       new_page = *prealloc;
-       if (!new_page)
+       new_folio = *prealloc;
+       if (!new_folio)
                return -EAGAIN;
 
        /*
@@ -867,18 +877,18 @@ copy_present_page(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma
         * over and copy the page & arm it.
         */
        *prealloc = NULL;
-       copy_user_highpage(new_page, page, addr, src_vma);
-       __SetPageUptodate(new_page);
-       page_add_new_anon_rmap(new_page, dst_vma, addr);
-       lru_cache_add_inactive_or_unevictable(new_page, dst_vma);
-       rss[mm_counter(new_page)]++;
+       copy_user_highpage(&new_folio->page, page, addr, src_vma);
+       __folio_mark_uptodate(new_folio);
+       folio_add_new_anon_rmap(new_folio, dst_vma, addr);
+       folio_add_lru_vma(new_folio, dst_vma);
+       rss[MM_ANONPAGES]++;
 
        /* All done, just insert the new page copy in the child */
-       pte = mk_pte(new_page, dst_vma->vm_page_prot);
+       pte = mk_pte(&new_folio->page, dst_vma->vm_page_prot);
        pte = maybe_mkwrite(pte_mkdirty(pte), dst_vma);
        if (userfaultfd_pte_wp(dst_vma, *src_pte))
                /* Uffd-wp needs to be delivered to dest pte as well */
-               pte = pte_wrprotect(pte_mkuffd_wp(pte));
+               pte = pte_mkuffd_wp(pte);
        set_pte_at(dst_vma->vm_mm, addr, dst_pte, pte);
        return 0;
 }
@@ -890,33 +900,36 @@ copy_present_page(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma
 static inline int
 copy_present_pte(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
                 pte_t *dst_pte, pte_t *src_pte, unsigned long addr, int *rss,
-                struct page **prealloc)
+                struct folio **prealloc)
 {
        struct mm_struct *src_mm = src_vma->vm_mm;
        unsigned long vm_flags = src_vma->vm_flags;
        pte_t pte = *src_pte;
        struct page *page;
+       struct folio *folio;
 
        page = vm_normal_page(src_vma, addr, pte);
-       if (page && PageAnon(page)) {
+       if (page)
+               folio = page_folio(page);
+       if (page && folio_test_anon(folio)) {
                /*
                 * If this page may have been pinned by the parent process,
                 * copy the page immediately for the child so that we'll always
                 * guarantee the pinned page won't be randomly replaced in the
                 * future.
                 */
-               get_page(page);
+               folio_get(folio);
                if (unlikely(page_try_dup_anon_rmap(page, false, src_vma))) {
-                       /* Page maybe pinned, we have to copy. */
-                       put_page(page);
+                       /* Page may be pinned, we have to copy. */
+                       folio_put(folio);
                        return copy_present_page(dst_vma, src_vma, dst_pte, src_pte,
                                                 addr, rss, prealloc, page);
                }
-               rss[mm_counter(page)]++;
+               rss[MM_ANONPAGES]++;
        } else if (page) {
-               get_page(page);
+               folio_get(folio);
                page_dup_file_rmap(page, false);
-               rss[mm_counter(page)]++;
+               rss[mm_counter_file(page)]++;
        }
 
        /*
@@ -927,7 +940,7 @@ copy_present_pte(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
                ptep_set_wrprotect(src_mm, addr, src_pte);
                pte = pte_wrprotect(pte);
        }
-       VM_BUG_ON(page && PageAnon(page) && PageAnonExclusive(page));
+       VM_BUG_ON(page && folio_test_anon(folio) && PageAnonExclusive(page));
 
        /*
         * If it's a shared mapping, mark it clean in
@@ -944,23 +957,22 @@ copy_present_pte(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
        return 0;
 }
 
-static inline struct page *
-page_copy_prealloc(struct mm_struct *src_mm, struct vm_area_struct *vma,
-                  unsigned long addr)
+static inline struct folio *page_copy_prealloc(struct mm_struct *src_mm,
+               struct vm_area_struct *vma, unsigned long addr)
 {
-       struct page *new_page;
+       struct folio *new_folio;
 
-       new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, addr);
-       if (!new_page)
+       new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, addr, false);
+       if (!new_folio)
                return NULL;
 
-       if (mem_cgroup_charge(page_folio(new_page), src_mm, GFP_KERNEL)) {
-               put_page(new_page);
+       if (mem_cgroup_charge(new_folio, src_mm, GFP_KERNEL)) {
+               folio_put(new_folio);
                return NULL;
        }
-       cgroup_throttle_swaprate(new_page, GFP_KERNEL);
+       cgroup_throttle_swaprate(&new_folio->page, GFP_KERNEL);
 
-       return new_page;
+       return new_folio;
 }
 
 static int
@@ -976,7 +988,7 @@ copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
        int progress, ret = 0;
        int rss[NR_MM_COUNTERS];
        swp_entry_t entry = (swp_entry_t){0};
-       struct page *prealloc = NULL;
+       struct folio *prealloc = NULL;
 
 again:
        progress = 0;
@@ -1046,7 +1058,7 @@ again:
                         * will allocate page according to address).  This
                         * could only happen if one pinned pte changed.
                         */
-                       put_page(prealloc);
+                       folio_put(prealloc);
                        prealloc = NULL;
                }
                progress += 8;
@@ -1083,7 +1095,7 @@ again:
                goto again;
 out:
        if (unlikely(prealloc))
-               put_page(prealloc);
+               folio_put(prealloc);
        return ret;
 }
 
@@ -1256,7 +1268,7 @@ copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma)
 
        if (is_cow) {
                mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE,
-                                       0, src_vma, src_mm, addr, end);
+                                       0, src_mm, addr, end);
                mmu_notifier_invalidate_range_start(&range);
                /*
                 * Disabling preemption is not needed for the write side, as
@@ -1392,8 +1404,7 @@ again:
                                                force_flush = 1;
                                        }
                                }
-                               if (pte_young(ptent) &&
-                                   likely(!(vma->vm_flags & VM_SEQ_READ)))
+                               if (pte_young(ptent) && likely(vma_has_recency(vma)))
                                        mark_page_accessed(page);
                        }
                        rss[mm_counter(page)]--;
@@ -1602,7 +1613,7 @@ void unmap_page_range(struct mmu_gather *tlb,
 static void unmap_single_vma(struct mmu_gather *tlb,
                struct vm_area_struct *vma, unsigned long start_addr,
                unsigned long end_addr,
-               struct zap_details *details)
+               struct zap_details *details, bool mm_wr_locked)
 {
        unsigned long start = max(vma->vm_start, start_addr);
        unsigned long end;
@@ -1617,7 +1628,7 @@ static void unmap_single_vma(struct mmu_gather *tlb,
                uprobe_munmap(vma, start, end);
 
        if (unlikely(vma->vm_flags & VM_PFNMAP))
-               untrack_pfn(vma, 0, 0);
+               untrack_pfn(vma, 0, 0, mm_wr_locked);
 
        if (start != end) {
                if (unlikely(is_vm_hugetlb_page(vma))) {
@@ -1664,7 +1675,7 @@ static void unmap_single_vma(struct mmu_gather *tlb,
  */
 void unmap_vmas(struct mmu_gather *tlb, struct maple_tree *mt,
                struct vm_area_struct *vma, unsigned long start_addr,
-               unsigned long end_addr)
+               unsigned long end_addr, bool mm_wr_locked)
 {
        struct mmu_notifier_range range;
        struct zap_details details = {
@@ -1674,45 +1685,16 @@ void unmap_vmas(struct mmu_gather *tlb, struct maple_tree *mt,
        };
        MA_STATE(mas, mt, vma->vm_end, vma->vm_end);
 
-       mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, vma->vm_mm,
+       mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma->vm_mm,
                                start_addr, end_addr);
        mmu_notifier_invalidate_range_start(&range);
        do {
-               unmap_single_vma(tlb, vma, start_addr, end_addr, &details);
+               unmap_single_vma(tlb, vma, start_addr, end_addr, &details,
+                                mm_wr_locked);
        } while ((vma = mas_find(&mas, end_addr - 1)) != NULL);
        mmu_notifier_invalidate_range_end(&range);
 }
 
-/**
- * zap_page_range - remove user pages in a given range
- * @vma: vm_area_struct holding the applicable pages
- * @start: starting address of pages to zap
- * @size: number of bytes to zap
- *
- * Caller must protect the VMA list
- */
-void zap_page_range(struct vm_area_struct *vma, unsigned long start,
-               unsigned long size)
-{
-       struct maple_tree *mt = &vma->vm_mm->mm_mt;
-       unsigned long end = start + size;
-       struct mmu_notifier_range range;
-       struct mmu_gather tlb;
-       MA_STATE(mas, mt, vma->vm_end, vma->vm_end);
-
-       lru_add_drain();
-       mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
-                               start, start + size);
-       tlb_gather_mmu(&tlb, vma->vm_mm);
-       update_hiwater_rss(vma->vm_mm);
-       mmu_notifier_invalidate_range_start(&range);
-       do {
-               unmap_single_vma(&tlb, vma, start, range.end, NULL);
-       } while ((vma = mas_find(&mas, end - 1)) != NULL);
-       mmu_notifier_invalidate_range_end(&range);
-       tlb_finish_mmu(&tlb);
-}
-
 /**
  * zap_page_range_single - remove user pages in a given range
  * @vma: vm_area_struct holding the applicable pages
@@ -1730,7 +1712,7 @@ void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
        struct mmu_gather tlb;
 
        lru_add_drain();
-       mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
+       mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
                                address, end);
        if (is_vm_hugetlb_page(vma))
                adjust_range_if_pmd_sharing_possible(vma, &range.start,
@@ -1742,7 +1724,7 @@ void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
         * unmap 'address-end' not 'range.start-range.end' as range
         * could have been expanded for hugetlb pmd sharing.
         */
-       unmap_single_vma(&tlb, vma, address, end, details);
+       unmap_single_vma(&tlb, vma, address, end, details, false);
        mmu_notifier_invalidate_range_end(&range);
        tlb_finish_mmu(&tlb);
 }
@@ -1947,7 +1929,7 @@ int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr,
        if (!(vma->vm_flags & VM_MIXEDMAP)) {
                BUG_ON(mmap_read_trylock(vma->vm_mm));
                BUG_ON(vma->vm_flags & VM_PFNMAP);
-               vma->vm_flags |= VM_MIXEDMAP;
+               vm_flags_set(vma, VM_MIXEDMAP);
        }
        /* Defer page refcount checking till we're about to map that page. */
        return insert_pages(vma, addr, pages, num, vma->vm_page_prot);
@@ -2005,7 +1987,7 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
        if (!(vma->vm_flags & VM_MIXEDMAP)) {
                BUG_ON(mmap_read_trylock(vma->vm_mm));
                BUG_ON(vma->vm_flags & VM_PFNMAP);
-               vma->vm_flags |= VM_MIXEDMAP;
+               vm_flags_set(vma, VM_MIXEDMAP);
        }
        return insert_page(vma, addr, page, vma->vm_page_prot);
 }
@@ -2471,7 +2453,7 @@ int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr,
                vma->vm_pgoff = pfn;
        }
 
-       vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
+       vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
 
        BUG_ON(addr >= end);
        pfn -= addr >> PAGE_SHIFT;
@@ -2511,7 +2493,7 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
 
        err = remap_pfn_range_notrack(vma, addr, pfn, size, prot);
        if (err)
-               untrack_pfn(vma, pfn, PAGE_ALIGN(size));
+               untrack_pfn(vma, pfn, PAGE_ALIGN(size), true);
        return err;
 }
 EXPORT_SYMBOL(remap_pfn_range);
@@ -3064,8 +3046,8 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
        const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE;
        struct vm_area_struct *vma = vmf->vma;
        struct mm_struct *mm = vma->vm_mm;
-       struct page *old_page = vmf->page;
-       struct page *new_page = NULL;
+       struct folio *old_folio = NULL;
+       struct folio *new_folio = NULL;
        pte_t entry;
        int page_copied = 0;
        struct mmu_notifier_range range;
@@ -3073,21 +3055,22 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
 
        delayacct_wpcopy_start();
 
+       if (vmf->page)
+               old_folio = page_folio(vmf->page);
        if (unlikely(anon_vma_prepare(vma)))
                goto oom;
 
        if (is_zero_pfn(pte_pfn(vmf->orig_pte))) {
-               new_page = alloc_zeroed_user_highpage_movable(vma,
-                                                             vmf->address);
-               if (!new_page)
+               new_folio = vma_alloc_zeroed_movable_folio(vma, vmf->address);
+               if (!new_folio)
                        goto oom;
        } else {
-               new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma,
-                               vmf->address);
-               if (!new_page)
+               new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma,
+                               vmf->address, false);
+               if (!new_folio)
                        goto oom;
 
-               ret = __wp_page_copy_user(new_page, old_page, vmf);
+               ret = __wp_page_copy_user(&new_folio->page, vmf->page, vmf);
                if (ret) {
                        /*
                         * COW failed, if the fault was solved by other,
@@ -3096,23 +3079,23 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
                         * from the second attempt.
                         * The -EHWPOISON case will not be retried.
                         */
-                       put_page(new_page);
-                       if (old_page)
-                               put_page(old_page);
+                       folio_put(new_folio);
+                       if (old_folio)
+                               folio_put(old_folio);
 
                        delayacct_wpcopy_end();
                        return ret == -EHWPOISON ? VM_FAULT_HWPOISON : 0;
                }
-               kmsan_copy_page_meta(new_page, old_page);
+               kmsan_copy_page_meta(&new_folio->page, vmf->page);
        }
 
-       if (mem_cgroup_charge(page_folio(new_page), mm, GFP_KERNEL))
+       if (mem_cgroup_charge(new_folio, mm, GFP_KERNEL))
                goto oom_free_new;
-       cgroup_throttle_swaprate(new_page, GFP_KERNEL);
+       cgroup_throttle_swaprate(&new_folio->page, GFP_KERNEL);
 
-       __SetPageUptodate(new_page);
+       __folio_mark_uptodate(new_folio);
 
-       mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm,
+       mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm,
                                vmf->address & PAGE_MASK,
                                (vmf->address & PAGE_MASK) + PAGE_SIZE);
        mmu_notifier_invalidate_range_start(&range);
@@ -3122,16 +3105,16 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
         */
        vmf->pte = pte_offset_map_lock(mm, vmf->pmd, vmf->address, &vmf->ptl);
        if (likely(pte_same(*vmf->pte, vmf->orig_pte))) {
-               if (old_page) {
-                       if (!PageAnon(old_page)) {
-                               dec_mm_counter(mm, mm_counter_file(old_page));
+               if (old_folio) {
+                       if (!folio_test_anon(old_folio)) {
+                               dec_mm_counter(mm, mm_counter_file(&old_folio->page));
                                inc_mm_counter(mm, MM_ANONPAGES);
                        }
                } else {
                        inc_mm_counter(mm, MM_ANONPAGES);
                }
                flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
-               entry = mk_pte(new_page, vma->vm_page_prot);
+               entry = mk_pte(&new_folio->page, vma->vm_page_prot);
                entry = pte_sw_mkyoung(entry);
                if (unlikely(unshare)) {
                        if (pte_soft_dirty(vmf->orig_pte))
@@ -3150,8 +3133,8 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
                 * some TLBs while the old PTE remains in others.
                 */
                ptep_clear_flush_notify(vma, vmf->address, vmf->pte);
-               page_add_new_anon_rmap(new_page, vma, vmf->address);
-               lru_cache_add_inactive_or_unevictable(new_page, vma);
+               folio_add_new_anon_rmap(new_folio, vma, vmf->address);
+               folio_add_lru_vma(new_folio, vma);
                /*
                 * We call the notify macro here because, when using secondary
                 * mmu page tables (such as kvm shadow page tables), we want the
@@ -3160,7 +3143,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
                BUG_ON(unshare && pte_write(entry));
                set_pte_at_notify(mm, vmf->address, vmf->pte, entry);
                update_mmu_cache(vma, vmf->address, vmf->pte);
-               if (old_page) {
+               if (old_folio) {
                        /*
                         * Only after switching the pte to the new page may
                         * we remove the mapcount here. Otherwise another
@@ -3183,18 +3166,18 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
                         * mapcount is visible. So transitively, TLBs to
                         * old page will be flushed before it can be reused.
                         */
-                       page_remove_rmap(old_page, vma, false);
+                       page_remove_rmap(vmf->page, vma, false);
                }
 
                /* Free the old page.. */
-               new_page = old_page;
+               new_folio = old_folio;
                page_copied = 1;
        } else {
                update_mmu_tlb(vma, vmf->address, vmf->pte);
        }
 
-       if (new_page)
-               put_page(new_page);
+       if (new_folio)
+               folio_put(new_folio);
 
        pte_unmap_unlock(vmf->pte, vmf->ptl);
        /*
@@ -3202,19 +3185,19 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
         * the above ptep_clear_flush_notify() did already call it.
         */
        mmu_notifier_invalidate_range_only_end(&range);
-       if (old_page) {
+       if (old_folio) {
                if (page_copied)
-                       free_swap_cache(old_page);
-               put_page(old_page);
+                       free_swap_cache(&old_folio->page);
+               folio_put(old_folio);
        }
 
        delayacct_wpcopy_end();
        return 0;
 oom_free_new:
-       put_page(new_page);
+       folio_put(new_folio);
 oom:
-       if (old_page)
-               put_page(old_page);
+       if (old_folio)
+               folio_put(old_folio);
 
        delayacct_wpcopy_end();
        return VM_FAULT_OOM;
@@ -3582,7 +3565,7 @@ static vm_fault_t remove_device_exclusive_entry(struct vm_fault *vmf)
 
        if (!folio_lock_or_retry(folio, vma->vm_mm, vmf->flags))
                return VM_FAULT_RETRY;
-       mmu_notifier_range_init_owner(&range, MMU_NOTIFY_EXCLUSIVE, 0, vma,
+       mmu_notifier_range_init_owner(&range, MMU_NOTIFY_EXCLUSIVE, 0,
                                vma->vm_mm, vmf->address & PAGE_MASK,
                                (vmf->address & PAGE_MASK) + PAGE_SIZE, NULL);
        mmu_notifier_invalidate_range_start(&range);
@@ -3644,9 +3627,7 @@ static vm_fault_t pte_marker_handle_uffd_wp(struct vm_fault *vmf)
 {
        /*
         * Just in case there're leftover special ptes even after the region
-        * got unregistered - we can simply clear them.  We can also do that
-        * proactively when e.g. when we do UFFDIO_UNREGISTER upon some uffd-wp
-        * ranges, but it should be more efficient to be done lazily here.
+        * got unregistered - we can simply clear them.
         */
        if (unlikely(!userfaultfd_wp(vmf->vma) || vma_is_anonymous(vmf->vma)))
                return pte_marker_clear(vmf);
@@ -3840,6 +3821,9 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
                if (unlikely(!page)) {
                        ret = VM_FAULT_OOM;
                        goto out_page;
+               } else if (unlikely(PTR_ERR(page) == -EHWPOISON)) {
+                       ret = VM_FAULT_HWPOISON;
+                       goto out_page;
                }
                folio = page_folio(page);
 
@@ -3885,10 +3869,6 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
         * the swap entry concurrently) for certainly exclusive pages.
         */
        if (!folio_test_ksm(folio)) {
-               /*
-                * Note that pte_swp_exclusive() == false for architectures
-                * without __HAVE_ARCH_PTE_SWP_EXCLUSIVE.
-                */
                exclusive = pte_swp_exclusive(vmf->orig_pte);
                if (folio != swapcache) {
                        /*
@@ -3950,10 +3930,8 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
        flush_icache_page(vma, page);
        if (pte_swp_soft_dirty(vmf->orig_pte))
                pte = pte_mksoft_dirty(pte);
-       if (pte_swp_uffd_wp(vmf->orig_pte)) {
+       if (pte_swp_uffd_wp(vmf->orig_pte))
                pte = pte_mkuffd_wp(pte);
-               pte = pte_wrprotect(pte);
-       }
        vmf->orig_pte = pte;
 
        /* ksm created a completely new copy */
@@ -4021,7 +3999,7 @@ out_release:
 static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
 {
        struct vm_area_struct *vma = vmf->vma;
-       struct page *page;
+       struct folio *folio;
        vm_fault_t ret = 0;
        pte_t entry;
 
@@ -4071,22 +4049,22 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
        /* Allocate our own private page. */
        if (unlikely(anon_vma_prepare(vma)))
                goto oom;
-       page = alloc_zeroed_user_highpage_movable(vma, vmf->address);
-       if (!page)
+       folio = vma_alloc_zeroed_movable_folio(vma, vmf->address);
+       if (!folio)
                goto oom;
 
-       if (mem_cgroup_charge(page_folio(page), vma->vm_mm, GFP_KERNEL))
+       if (mem_cgroup_charge(folio, vma->vm_mm, GFP_KERNEL))
                goto oom_free_page;
-       cgroup_throttle_swaprate(page, GFP_KERNEL);
+       cgroup_throttle_swaprate(&folio->page, GFP_KERNEL);
 
        /*
-        * The memory barrier inside __SetPageUptodate makes sure that
+        * The memory barrier inside __folio_mark_uptodate makes sure that
         * preceding stores to the page contents become visible before
         * the set_pte_at() write.
         */
-       __SetPageUptodate(page);
+       __folio_mark_uptodate(folio);
 
-       entry = mk_pte(page, vma->vm_page_prot);
+       entry = mk_pte(&folio->page, vma->vm_page_prot);
        entry = pte_sw_mkyoung(entry);
        if (vma->vm_flags & VM_WRITE)
                entry = pte_mkwrite(pte_mkdirty(entry));
@@ -4105,13 +4083,13 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
        /* Deliver the page fault to userland, check inside PT lock */
        if (userfaultfd_missing(vma)) {
                pte_unmap_unlock(vmf->pte, vmf->ptl);
-               put_page(page);
+               folio_put(folio);
                return handle_userfault(vmf, VM_UFFD_MISSING);
        }
 
        inc_mm_counter(vma->vm_mm, MM_ANONPAGES);
-       page_add_new_anon_rmap(page, vma, vmf->address);
-       lru_cache_add_inactive_or_unevictable(page, vma);
+       folio_add_new_anon_rmap(folio, vma, vmf->address);
+       folio_add_lru_vma(folio, vma);
 setpte:
        set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry);
 
@@ -4121,10 +4099,10 @@ unlock:
        pte_unmap_unlock(vmf->pte, vmf->ptl);
        return ret;
 release:
-       put_page(page);
+       folio_put(folio);
        goto unlock;
 oom_free_page:
-       put_page(page);
+       folio_put(folio);
 oom:
        return VM_FAULT_OOM;
 }
@@ -4296,7 +4274,7 @@ void do_set_pte(struct vm_fault *vmf, struct page *page, unsigned long addr)
        if (write)
                entry = maybe_mkwrite(pte_mkdirty(entry), vma);
        if (unlikely(uffd_wp))
-               entry = pte_mkuffd_wp(pte_wrprotect(entry));
+               entry = pte_mkuffd_wp(entry);
        /* copy-on-write page */
        if (write && !(vma->vm_flags & VM_SHARED)) {
                inc_mm_counter(vma->vm_mm, MM_ANONPAGES);
@@ -5137,8 +5115,8 @@ static inline void mm_account_fault(struct pt_regs *regs,
 #ifdef CONFIG_LRU_GEN
 static void lru_gen_enter_fault(struct vm_area_struct *vma)
 {
-       /* the LRU algorithm doesn't apply to sequential or random reads */
-       current->in_lru_fault = !(vma->vm_flags & (VM_SEQ_READ | VM_RAND_READ));
+       /* the LRU algorithm only applies to accesses with recency */
+       current->in_lru_fault = vma_has_recency(vma);
 }
 
 static void lru_gen_exit_fault(void)