mm/zsmalloc: use zpdesc in trylock_zspage()/lock_zspage()

Convert trylock_zspage() and lock_zspage() to use zpdesc. To achieve
that, introduce a couple of helper functions:
  - zpdesc_lock()
  - zpdesc_unlock()
  - zpdesc_trylock()
  - zpdesc_wait_locked()
  - zpdesc_get()
  - zpdesc_put()

Here we use the folio version of functions for 2 reasons.  First,
zswap.zpool currently only uses order-0 pages and using folio could save
some compound_head checks.  Second, folio_put could bypass devmap checking
that we don't need.

BTW, thanks Intel LKP found a build warning on the patch.

Originally-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Link: https://lkml.kernel.org/r/20241216150450.1228021-3-42.hyeyoo@gmail.com
Signed-off-by: Alex Shi <alexs@kernel.org>
Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Acked-by: Sergey Senozhatsky <senozhatsky@chromium.org>
Tested-by: Sergey Senozhatsky <senozhatsky@chromium.org>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Vishal Moola (Oracle) <vishal.moola@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Alex Shi 2024-12-17 00:04:33 +09:00 committed by Andrew Morton
parent 92bcb43ce8
commit d6611534d6
2 changed files with 73 additions and 21 deletions

View File

@ -104,4 +104,34 @@ static_assert(sizeof(struct zpdesc) <= sizeof(struct page));
const struct page *: (const struct zpdesc *)(p), \
struct page *: (struct zpdesc *)(p)))
static inline void zpdesc_lock(struct zpdesc *zpdesc)
{
folio_lock(zpdesc_folio(zpdesc));
}
static inline bool zpdesc_trylock(struct zpdesc *zpdesc)
{
return folio_trylock(zpdesc_folio(zpdesc));
}
static inline void zpdesc_unlock(struct zpdesc *zpdesc)
{
folio_unlock(zpdesc_folio(zpdesc));
}
static inline void zpdesc_wait_locked(struct zpdesc *zpdesc)
{
folio_wait_locked(zpdesc_folio(zpdesc));
}
static inline void zpdesc_get(struct zpdesc *zpdesc)
{
folio_get(zpdesc_folio(zpdesc));
}
static inline void zpdesc_put(struct zpdesc *zpdesc)
{
folio_put(zpdesc_folio(zpdesc));
}
#endif

View File

@ -428,13 +428,17 @@ static __maybe_unused int is_first_page(struct page *page)
return PagePrivate(page);
}
static inline bool is_first_zpdesc(struct zpdesc *zpdesc)
{
return PagePrivate(zpdesc_page(zpdesc));
}
/* Protected by class->lock */
static inline int get_zspage_inuse(struct zspage *zspage)
{
return zspage->inuse;
}
static inline void mod_zspage_inuse(struct zspage *zspage, int val)
{
zspage->inuse += val;
@ -448,6 +452,14 @@ static inline struct page *get_first_page(struct zspage *zspage)
return first_page;
}
static struct zpdesc *get_first_zpdesc(struct zspage *zspage)
{
struct zpdesc *first_zpdesc = zspage->first_zpdesc;
VM_BUG_ON_PAGE(!is_first_zpdesc(first_zpdesc), zpdesc_page(first_zpdesc));
return first_zpdesc;
}
#define FIRST_OBJ_PAGE_TYPE_MASK 0xffffff
static inline unsigned int get_first_obj_offset(struct page *page)
@ -734,6 +746,16 @@ static struct page *get_next_page(struct page *page)
return (struct page *)page->index;
}
static struct zpdesc *get_next_zpdesc(struct zpdesc *zpdesc)
{
struct zspage *zspage = get_zspage(zpdesc_page(zpdesc));
if (unlikely(ZsHugePage(zspage)))
return NULL;
return zpdesc->next;
}
/**
* obj_to_location - get (<page>, <obj_idx>) from encoded object value
* @obj: the encoded object value
@ -803,11 +825,11 @@ static void reset_page(struct page *page)
static int trylock_zspage(struct zspage *zspage)
{
struct page *cursor, *fail;
struct zpdesc *cursor, *fail;
for (cursor = get_first_page(zspage); cursor != NULL; cursor =
get_next_page(cursor)) {
if (!trylock_page(cursor)) {
for (cursor = get_first_zpdesc(zspage); cursor != NULL; cursor =
get_next_zpdesc(cursor)) {
if (!zpdesc_trylock(cursor)) {
fail = cursor;
goto unlock;
}
@ -815,9 +837,9 @@ static int trylock_zspage(struct zspage *zspage)
return 1;
unlock:
for (cursor = get_first_page(zspage); cursor != fail; cursor =
get_next_page(cursor))
unlock_page(cursor);
for (cursor = get_first_zpdesc(zspage); cursor != fail; cursor =
get_next_zpdesc(cursor))
zpdesc_unlock(cursor);
return 0;
}
@ -1635,7 +1657,7 @@ static int putback_zspage(struct size_class *class, struct zspage *zspage)
*/
static void lock_zspage(struct zspage *zspage)
{
struct page *curr_page, *page;
struct zpdesc *curr_zpdesc, *zpdesc;
/*
* Pages we haven't locked yet can be migrated off the list while we're
@ -1647,24 +1669,24 @@ static void lock_zspage(struct zspage *zspage)
*/
while (1) {
migrate_read_lock(zspage);
page = get_first_page(zspage);
if (trylock_page(page))
zpdesc = get_first_zpdesc(zspage);
if (zpdesc_trylock(zpdesc))
break;
get_page(page);
zpdesc_get(zpdesc);
migrate_read_unlock(zspage);
wait_on_page_locked(page);
put_page(page);
zpdesc_wait_locked(zpdesc);
zpdesc_put(zpdesc);
}
curr_page = page;
while ((page = get_next_page(curr_page))) {
if (trylock_page(page)) {
curr_page = page;
curr_zpdesc = zpdesc;
while ((zpdesc = get_next_zpdesc(curr_zpdesc))) {
if (zpdesc_trylock(zpdesc)) {
curr_zpdesc = zpdesc;
} else {
get_page(page);
zpdesc_get(zpdesc);
migrate_read_unlock(zspage);
wait_on_page_locked(page);
put_page(page);
zpdesc_wait_locked(zpdesc);
zpdesc_put(zpdesc);
migrate_read_lock(zspage);
}
}