mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2024-12-28 16:52:18 +00:00
mm/zswap: stop lru list shrinking when encounter warm region
When the shrinker encounter an existing folio in swap cache, it means we are shrinking into the warmer region. We should terminate shrinking if we're in the dynamic shrinker context. This patch add LRU_STOP to support this, to avoid overshrinking. Link: https://lkml.kernel.org/r/20240201-b4-zswap-invalidate-entry-v2-3-99d4084260a0@bytedance.com Signed-off-by: Chengming Zhou <zhouchengming@bytedance.com> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Acked-by: Nhat Pham <nphamcs@gmail.com> Reviewed-by: Yosry Ahmed <yosryahmed@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
0827a1fb14
commit
b49547ade3
@ -24,6 +24,8 @@ enum lru_status {
|
||||
LRU_SKIP, /* item cannot be locked, skip */
|
||||
LRU_RETRY, /* item not freeable. May drop the lock
|
||||
internally, but has to return locked. */
|
||||
LRU_STOP, /* stop lru list walking. May drop the lock
|
||||
internally, but has to return locked. */
|
||||
};
|
||||
|
||||
struct list_lru_one {
|
||||
|
@ -243,6 +243,9 @@ __list_lru_walk_one(struct list_lru *lru, int nid, int memcg_idx,
|
||||
*/
|
||||
assert_spin_locked(&nlru->lock);
|
||||
goto restart;
|
||||
case LRU_STOP:
|
||||
assert_spin_locked(&nlru->lock);
|
||||
goto out;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
|
@ -1315,8 +1315,10 @@ static enum lru_status shrink_memcg_cb(struct list_head *item, struct list_lru_o
|
||||
* into the warmer region. We should terminate shrinking (if we're in the dynamic
|
||||
* shrinker context).
|
||||
*/
|
||||
if (writeback_result == -EEXIST && encountered_page_in_swapcache)
|
||||
if (writeback_result == -EEXIST && encountered_page_in_swapcache) {
|
||||
ret = LRU_STOP;
|
||||
*encountered_page_in_swapcache = true;
|
||||
}
|
||||
} else {
|
||||
zswap_written_back_pages++;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user