mm/zswap: stop lru list shrinking when encounter warm region
authorChengming Zhou <zhouchengming@bytedance.com>
Sun, 4 Feb 2024 03:06:01 +0000 (03:06 +0000)
committerAndrew Morton <akpm@linux-foundation.org>
Thu, 22 Feb 2024 18:24:54 +0000 (10:24 -0800)
When the shrinker encounter an existing folio in swap cache, it means we
are shrinking into the warmer region.  We should terminate shrinking if
we're in the dynamic shrinker context.

This patch add LRU_STOP to support this, to avoid overshrinking.

Link: https://lkml.kernel.org/r/20240201-b4-zswap-invalidate-entry-v2-3-99d4084260a0@bytedance.com
Signed-off-by: Chengming Zhou <zhouchengming@bytedance.com>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Acked-by: Nhat Pham <nphamcs@gmail.com>
Reviewed-by: Yosry Ahmed <yosryahmed@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/list_lru.h
mm/list_lru.c
mm/zswap.c

index f2882a82069027649a8228f3288335223a1fa02f..792b67ceb631b5aa992d8ac9cc2758440f03457d 100644 (file)
@@ -24,6 +24,8 @@ enum lru_status {
        LRU_SKIP,               /* item cannot be locked, skip */
        LRU_RETRY,              /* item not freeable. May drop the lock
                                   internally, but has to return locked. */
+       LRU_STOP,               /* stop lru list walking. May drop the lock
+                                  internally, but has to return locked. */
 };
 
 struct list_lru_one {
index 61f3b6b1134fbe6bda2753dd809d2823f85ad309..3fd64736bc4589b87de7d37663139fcc692a5281 100644 (file)
@@ -243,6 +243,9 @@ restart:
                         */
                        assert_spin_locked(&nlru->lock);
                        goto restart;
+               case LRU_STOP:
+                       assert_spin_locked(&nlru->lock);
+                       goto out;
                default:
                        BUG();
                }
index ef41a7bd81f243ce2514dbd3346c46933f2b904c..f8a4ac389118a0f1abc02989faeea5dba0309e64 100644 (file)
@@ -1315,8 +1315,10 @@ static enum lru_status shrink_memcg_cb(struct list_head *item, struct list_lru_o
                 * into the warmer region. We should terminate shrinking (if we're in the dynamic
                 * shrinker context).
                 */
-               if (writeback_result == -EEXIST && encountered_page_in_swapcache)
+               if (writeback_result == -EEXIST && encountered_page_in_swapcache) {
+                       ret = LRU_STOP;
                        *encountered_page_in_swapcache = true;
+               }
        } else {
                zswap_written_back_pages++;
        }