Merge tag 'rust-fixes-6.9' of https://github.com/Rust-for-Linux/linux
[sfrench/cifs-2.6.git] / mm / rmap.c
index f5d43edad529a76858a9aab5536a755f0a50ec67..3746a553101832164b8171c188fd9f9d6af1b6af 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1780,7 +1780,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
                                set_huge_pte_at(mm, address, pvmw.pte, pteval,
                                                hsz);
                        } else {
-                               dec_mm_counter(mm, mm_counter(&folio->page));
+                               dec_mm_counter(mm, mm_counter(folio));
                                set_pte_at(mm, address, pvmw.pte, pteval);
                        }
 
@@ -1795,7 +1795,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
                         * migration) will not expect userfaults on already
                         * copied pages.
                         */
-                       dec_mm_counter(mm, mm_counter(&folio->page));
+                       dec_mm_counter(mm, mm_counter(folio));
                } else if (folio_test_anon(folio)) {
                        swp_entry_t entry = page_swap_entry(subpage);
                        pte_t swp_pte;
@@ -1903,7 +1903,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
                         *
                         * See Documentation/mm/mmu_notifier.rst
                         */
-                       dec_mm_counter(mm, mm_counter_file(&folio->page));
+                       dec_mm_counter(mm, mm_counter_file(folio));
                }
 discard:
                if (unlikely(folio_test_hugetlb(folio)))
@@ -2169,7 +2169,7 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
                                swp_pte = pte_swp_mkuffd_wp(swp_pte);
                        set_pte_at(mm, pvmw.address, pvmw.pte, swp_pte);
                        trace_set_migration_pte(pvmw.address, pte_val(swp_pte),
-                                               compound_order(&folio->page));
+                                               folio_order(folio));
                        /*
                         * No need to invalidate here it will synchronize on
                         * against the special swap migration pte.
@@ -2181,7 +2181,7 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
                                set_huge_pte_at(mm, address, pvmw.pte, pteval,
                                                hsz);
                        } else {
-                               dec_mm_counter(mm, mm_counter(&folio->page));
+                               dec_mm_counter(mm, mm_counter(folio));
                                set_pte_at(mm, address, pvmw.pte, pteval);
                        }
 
@@ -2196,7 +2196,7 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
                         * migration) will not expect userfaults on already
                         * copied pages.
                         */
-                       dec_mm_counter(mm, mm_counter(&folio->page));
+                       dec_mm_counter(mm, mm_counter(folio));
                } else {
                        swp_entry_t entry;
                        pte_t swp_pte;
@@ -2261,7 +2261,7 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
                        else
                                set_pte_at(mm, address, pvmw.pte, swp_pte);
                        trace_set_migration_pte(address, pte_val(swp_pte),
-                                               compound_order(&folio->page));
+                                               folio_order(folio));
                        /*
                         * No need to invalidate here it will synchronize on
                         * against the special swap migration pte.