mm: convert free_swap_cache() to take a folio
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Tue, 27 Feb 2024 17:42:52 +0000 (17:42 +0000)
committerAndrew Morton <akpm@linux-foundation.org>
Tue, 5 Mar 2024 01:01:26 +0000 (17:01 -0800)
All but one caller already has a folio, so convert
free_page_and_swap_cache() to have a folio and remove the call to
page_folio().

Link: https://lkml.kernel.org/r/20240227174254.710559-19-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Ryan Roberts <ryan.roberts@arm.com>
Reviewed-by: David Hildenbrand <david@redhat.com>
Cc: Mel Gorman <mgorman@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/swap.h
mm/khugepaged.c
mm/memory.c
mm/swap_state.c

index 8d28f6091a320ef024597dfd6f84526702d5ec41..1ad6f63d1a52359cc13cefeb47ee1be28dd6e73d 100644 (file)
@@ -447,9 +447,9 @@ static inline unsigned long total_swapcache_pages(void)
        return global_node_page_state(NR_SWAPCACHE);
 }
 
-extern void free_swap_cache(struct page *page);
-extern void free_page_and_swap_cache(struct page *);
-extern void free_pages_and_swap_cache(struct encoded_page **, int);
+void free_swap_cache(struct folio *folio);
+void free_page_and_swap_cache(struct page *);
+void free_pages_and_swap_cache(struct encoded_page **, int);
 /* linux/mm/swapfile.c */
 extern atomic_long_t nr_swap_pages;
 extern long total_swap_pages;
@@ -531,7 +531,7 @@ static inline void put_swap_device(struct swap_info_struct *si)
 /* used to sanity check ptes in zap_pte_range when CONFIG_SWAP=0 */
 #define free_swap_and_cache(e) is_pfn_swap_entry(e)
 
-static inline void free_swap_cache(struct page *page)
+static inline void free_swap_cache(struct folio *folio)
 {
 }
 
index b9223e803262e39020d887a7dabe3049fc77db6b..38830174608fba663ed416ad4e2661242e484c58 100644 (file)
@@ -731,7 +731,7 @@ static void __collapse_huge_page_copy_succeeded(pte_t *pte,
                node_stat_sub_folio(src, NR_ISOLATED_ANON +
                                folio_is_file_lru(src));
                folio_unlock(src);
-               free_swap_cache(&src->page);
+               free_swap_cache(src);
                folio_putback_lru(src);
        }
 }
index 1c45b6a42a1b95d27c33e35467ada1553606e348..75e0abda86427c918e6e7c7c5f3a33104d49d0eb 100644 (file)
@@ -3452,7 +3452,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
                folio_put(new_folio);
        if (old_folio) {
                if (page_copied)
-                       free_swap_cache(&old_folio->page);
+                       free_swap_cache(old_folio);
                folio_put(old_folio);
        }
 
index 96b5c585f04792b0697a674f254f888ad6809d6c..bfc7e8c58a6d34b948d73916ccd9bf0f1be14e21 100644 (file)
@@ -283,10 +283,8 @@ void clear_shadow_from_swap_cache(int type, unsigned long begin,
  * folio_free_swap() _with_ the lock.
  *                                     - Marcelo
  */
-void free_swap_cache(struct page *page)
+void free_swap_cache(struct folio *folio)
 {
-       struct folio *folio = page_folio(page);
-
        if (folio_test_swapcache(folio) && !folio_mapped(folio) &&
            folio_trylock(folio)) {
                folio_free_swap(folio);
@@ -300,9 +298,11 @@ void free_swap_cache(struct page *page)
  */
 void free_page_and_swap_cache(struct page *page)
 {
-       free_swap_cache(page);
+       struct folio *folio = page_folio(page);
+
+       free_swap_cache(folio);
        if (!is_huge_zero_page(page))
-               put_page(page);
+               folio_put(folio);
 }
 
 /*
@@ -319,7 +319,7 @@ void free_pages_and_swap_cache(struct encoded_page **pages, int nr)
        for (int i = 0; i < nr; i++) {
                struct folio *folio = page_folio(encoded_page_ptr(pages[i]));
 
-               free_swap_cache(&folio->page);
+               free_swap_cache(folio);
                refs[folios.nr] = 1;
                if (unlikely(encoded_page_flags(pages[i]) &
                             ENCODED_PAGE_BIT_NR_PAGES_NEXT))