linux-next/include/linux/zswap.h
Kanchana P Sridhar 6e1fa555ec mm: zswap: modify zswap_stored_pages to be atomic_long_t
For zswap_store() to support large folios, we need to be able to do a
batch update of zswap_stored_pages upon successful store of all pages in
the folio.  For this, we need to add folio_nr_pages(), which returns a
long, to zswap_stored_pages.

Link: https://lkml.kernel.org/r/20241001053222.6944-6-kanchana.p.sridhar@intel.com
Signed-off-by: Kanchana P Sridhar <kanchana.p.sridhar@intel.com>
Acked-by: Yosry Ahmed <yosryahmed@google.com>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Reviewed-by: Nhat Pham <nphamcs@gmail.com>
Cc: Chengming Zhou <chengming.zhou@linux.dev>
Cc: "Huang, Ying" <ying.huang@intel.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Shakeel Butt <shakeel.butt@linux.dev>
Cc: Usama Arif <usamaarif642@gmail.com>
Cc: Wajdi Feghali <wajdi.k.feghali@intel.com>
Cc: "Zou, Nanhai" <nanhai.zou@intel.com>
Cc: Barry Song <21cnbao@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
2024-11-11 00:26:42 -08:00

75 lines
1.8 KiB
C

/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_ZSWAP_H
#define _LINUX_ZSWAP_H
#include <linux/types.h>
#include <linux/mm_types.h>
struct lruvec;
extern atomic_long_t zswap_stored_pages;
#ifdef CONFIG_ZSWAP
struct zswap_lruvec_state {
/*
* Number of swapped in pages from disk, i.e not found in the zswap pool.
*
* This is consumed and subtracted from the lru size in
* zswap_shrinker_count() to penalize past overshrinking that led to disk
* swapins. The idea is that had we considered this many more pages in the
* LRU active/protected and not written them back, we would not have had to
* swapped them in.
*/
atomic_long_t nr_disk_swapins;
};
unsigned long zswap_total_pages(void);
bool zswap_store(struct folio *folio);
bool zswap_load(struct folio *folio);
void zswap_invalidate(swp_entry_t swp);
int zswap_swapon(int type, unsigned long nr_pages);
void zswap_swapoff(int type);
void zswap_memcg_offline_cleanup(struct mem_cgroup *memcg);
void zswap_lruvec_state_init(struct lruvec *lruvec);
void zswap_folio_swapin(struct folio *folio);
bool zswap_is_enabled(void);
bool zswap_never_enabled(void);
#else
struct zswap_lruvec_state {};
static inline bool zswap_store(struct folio *folio)
{
return false;
}
static inline bool zswap_load(struct folio *folio)
{
return false;
}
static inline void zswap_invalidate(swp_entry_t swp) {}
static inline int zswap_swapon(int type, unsigned long nr_pages)
{
return 0;
}
static inline void zswap_swapoff(int type) {}
static inline void zswap_memcg_offline_cleanup(struct mem_cgroup *memcg) {}
static inline void zswap_lruvec_state_init(struct lruvec *lruvec) {}
static inline void zswap_folio_swapin(struct folio *folio) {}
static inline bool zswap_is_enabled(void)
{
return false;
}
static inline bool zswap_never_enabled(void)
{
return true;
}
#endif
#endif /* _LINUX_ZSWAP_H */