Page lock (PG_locked bit of page->flags)
mm->page_table_lock or split pte_lock
- lock_page_memcg (memcg->move_lock)
+ folio_memcg_lock (memcg->move_lock)
mapping->i_pages lock
lruvec->lru_lock.
*
* - the folio lock
* - LRU isolation
- * - lock_page_memcg()
+ * - folio_memcg_lock()
* - exclusive reference
* - mem_cgroup_trylock_pages()
*
void folio_memcg_lock(struct folio *folio);
void folio_memcg_unlock(struct folio *folio);
-void lock_page_memcg(struct page *page);
-void unlock_page_memcg(struct page *page);
void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val);
{
}
-static inline void lock_page_memcg(struct page *page)
-{
-}
-
-static inline void unlock_page_memcg(struct page *page)
-{
-}
-
static inline void folio_memcg_lock(struct folio *folio)
{
}
* ->i_pages lock (page_remove_rmap->set_page_dirty)
* bdi.wb->list_lock (page_remove_rmap->set_page_dirty)
* ->inode->i_lock (page_remove_rmap->set_page_dirty)
- * ->memcg->move_lock (page_remove_rmap->lock_page_memcg)
+ * ->memcg->move_lock (page_remove_rmap->folio_memcg_lock)
* bdi.wb->list_lock (zap_pte_range->set_page_dirty)
* ->inode->i_lock (zap_pte_range->set_page_dirty)
* ->private_lock (zap_pte_range->block_dirty_folio)
* When charge migration first begins, we can have multiple
* critical sections holding the fast-path RCU lock and one
* holding the slowpath move_lock. Track the task who has the
- * move_lock for unlock_page_memcg().
+ * move_lock for folio_memcg_unlock().
*/
memcg->move_lock_task = current;
memcg->move_lock_flags = flags;
}
-void lock_page_memcg(struct page *page)
-{
- folio_memcg_lock(page_folio(page));
-}
-
static void __folio_memcg_unlock(struct mem_cgroup *memcg)
{
if (memcg && memcg->move_lock_task == current) {
__folio_memcg_unlock(folio_memcg(folio));
}
-void unlock_page_memcg(struct page *page)
-{
- folio_memcg_unlock(page_folio(page));
-}
-
struct memcg_stock_pcp {
local_lock_t stock_lock;
struct mem_cgroup *cached; /* this never be root cgroup */
*
* - the page lock
* - LRU isolation
- * - lock_page_memcg()
+ * - folio_memcg_lock()
* - exclusive reference
* - mem_cgroup_trylock_pages()
*/
* with (un)charging, migration, LRU putback, or anything else
* that would rely on a stable page's memory cgroup.
*
- * Note that lock_page_memcg is a memcg lock, not a page lock,
+ * Note that folio_memcg_lock is a memcg lock, not a page lock,
* to save space. As soon as we switch page's memory cgroup to a
* new memcg that isn't locked, the above state can change
* concurrently again. Make sure we're truly done with it.
{
lru_add_drain_all();
/*
- * Signal lock_page_memcg() to take the memcg's move_lock
+ * Signal folio_memcg_lock() to take the memcg's move_lock
* while we're moving its pages to another memcg. Then wait
* for already started RCU-only updates to finish.
*/
/*
* Helper function for set_page_dirty family.
*
- * Caller must hold lock_page_memcg().
+ * Caller must hold folio_memcg_lock().
*
* NOTE: This relies on being atomic wrt interrupts.
*/
/*
* Helper function for deaccounting dirty page without writeback.
*
- * Caller must hold lock_page_memcg().
+ * Caller must hold folio_memcg_lock().
*/
void folio_account_cleaned(struct folio *folio, struct bdi_writeback *wb)
{
* If warn is true, then emit a warning if the folio is not uptodate and has
* not been truncated.
*
- * The caller must hold lock_page_memcg(). Most callers have the folio
+ * The caller must hold folio_memcg_lock(). Most callers have the folio
* locked. A few have the folio blocked from truncation through other
* means (eg zap_vma_pages() has it mapped and is holding the page table
* lock). This can also be called from mark_buffer_dirty(), which I