mm/writeback: Add folio_account_cleaned()
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Tue, 4 May 2021 20:12:09 +0000 (16:12 -0400)
committerMatthew Wilcox (Oracle) <willy@infradead.org>
Mon, 18 Oct 2021 11:49:40 +0000 (07:49 -0400)
Get the statistics right; compound pages were being accounted as a
single page.  This didn't matter before now as no filesystem which
supported compound pages did writeback.  Also move the declaration
to pagemap.h since this is part of the page cache.  Add a wrapper for
account_page_cleaned().

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: David Howells <dhowells@redhat.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
include/linux/mm.h
include/linux/pagemap.h
mm/page-writeback.c

index 00510b02ffe1ea77df9ec2475c75f30228fa975d..f47af4ca4873cf97593982752cc383008f0c7408 100644 (file)
@@ -39,7 +39,6 @@ struct anon_vma_chain;
 struct file_ra_state;
 struct user_struct;
 struct writeback_control;
-struct bdi_writeback;
 struct pt_regs;
 
 extern int sysctl_page_lock_unfairness;
@@ -2006,8 +2005,6 @@ extern void do_invalidatepage(struct page *page, unsigned int offset,
 
 int redirty_page_for_writepage(struct writeback_control *wbc,
                                struct page *page);
-void account_page_cleaned(struct page *page, struct address_space *mapping,
-                         struct bdi_writeback *wb);
 bool folio_mark_dirty(struct folio *folio);
 bool set_page_dirty(struct page *page);
 int set_page_dirty_lock(struct page *page);
index aa9a083083df3bee369eb68cbb0159746e094e29..8ba7da513ac4702f911fac5c576f404789c074ba 100644 (file)
@@ -778,6 +778,13 @@ static inline void __set_page_dirty(struct page *page,
 {
        __folio_mark_dirty(page_folio(page), mapping, warn);
 }
+void folio_account_cleaned(struct folio *folio, struct address_space *mapping,
+                         struct bdi_writeback *wb);
+static inline void account_page_cleaned(struct page *page,
+               struct address_space *mapping, struct bdi_writeback *wb)
+{
+       return folio_account_cleaned(page_folio(page), mapping, wb);
+}
 
 int __set_page_dirty_nobuffers(struct page *page);
 int __set_page_dirty_no_writeback(struct page *page);
index a501dad430af134e75b016cd666a4338af8bca39..82e3bc3d4eaef3062c157fe09b58f2607b7d9e96 100644 (file)
@@ -2470,14 +2470,15 @@ static void folio_account_dirtied(struct folio *folio,
  *
  * Caller must hold lock_page_memcg().
  */
-void account_page_cleaned(struct page *page, struct address_space *mapping,
+void folio_account_cleaned(struct folio *folio, struct address_space *mapping,
                          struct bdi_writeback *wb)
 {
        if (mapping_can_writeback(mapping)) {
-               dec_lruvec_page_state(page, NR_FILE_DIRTY);
-               dec_zone_page_state(page, NR_ZONE_WRITE_PENDING);
-               dec_wb_stat(wb, WB_RECLAIMABLE);
-               task_io_account_cancelled_write(PAGE_SIZE);
+               long nr = folio_nr_pages(folio);
+               lruvec_stat_mod_folio(folio, NR_FILE_DIRTY, -nr);
+               zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, -nr);
+               wb_stat_mod(wb, WB_RECLAIMABLE, -nr);
+               task_io_account_cancelled_write(nr * PAGE_SIZE);
        }
 }