mm: convert collapse_huge_page() to use a folio
[sfrench/cifs-2.6.git] / mm / khugepaged.c
index de174d049e7122fe6065e7a01a21c03c7df01721..1002e915638839d7ebfb9a282d7a310d96b8bd3c 100644 (file)
@@ -1090,6 +1090,7 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
        pmd_t *pmd, _pmd;
        pte_t *pte;
        pgtable_t pgtable;
+       struct folio *folio;
        struct page *hpage;
        spinlock_t *pmd_ptl, *pte_ptl;
        int result = SCAN_FAIL;
@@ -1212,13 +1213,13 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
        if (unlikely(result != SCAN_SUCCEED))
                goto out_up_write;
 
+       folio = page_folio(hpage);
        /*
-        * spin_lock() below is not the equivalent of smp_wmb(), but
-        * the smp_wmb() inside __SetPageUptodate() can be reused to
-        * avoid the copy_huge_page writes to become visible after
-        * the set_pmd_at() write.
+        * The smp_wmb() inside __folio_mark_uptodate() ensures the
+        * copy_huge_page writes become visible before the set_pmd_at()
+        * write.
         */
-       __SetPageUptodate(hpage);
+       __folio_mark_uptodate(folio);
        pgtable = pmd_pgtable(_pmd);
 
        _pmd = mk_huge_pmd(hpage, vma->vm_page_prot);
@@ -1226,8 +1227,8 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
 
        spin_lock(pmd_ptl);
        BUG_ON(!pmd_none(*pmd));
-       page_add_new_anon_rmap(hpage, vma, address);
-       lru_cache_add_inactive_or_unevictable(hpage, vma);
+       folio_add_new_anon_rmap(folio, vma, address);
+       folio_add_lru_vma(folio, vma);
        pgtable_trans_huge_deposit(mm, pmd, pgtable);
        set_pmd_at(mm, address, pmd, _pmd);
        update_mmu_cache_pmd(vma, address, pmd);