mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2024-12-29 09:12:07 +00:00
mm: return the folio from __read_swap_cache_async()
Patch series "More swap folio conversions". These all seem like fairly straightforward conversions to me. A lot of compound_head() calls get removed. And page_swap_info(), which is nice. This patch (of 13): Move the folio->page conversion into the callers that actually want that. Most of the callers are happier with the folio anyway. If the page_allocated boolean is set, the folio allocated is of order-0, so it is safe to pass the page directly to swap_readpage(). Link: https://lkml.kernel.org/r/20231213215842.671461-1-willy@infradead.org Link: https://lkml.kernel.org/r/20231213215842.671461-2-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
8ba2f844f0
commit
96c7b0b422
@ -34,7 +34,7 @@ void zswap_swapon(int type);
|
||||
void zswap_swapoff(int type);
|
||||
void zswap_memcg_offline_cleanup(struct mem_cgroup *memcg);
|
||||
void zswap_lruvec_state_init(struct lruvec *lruvec);
|
||||
void zswap_page_swapin(struct page *page);
|
||||
void zswap_folio_swapin(struct folio *folio);
|
||||
#else
|
||||
|
||||
struct zswap_lruvec_state {};
|
||||
@ -54,7 +54,7 @@ static inline void zswap_swapon(int type) {}
|
||||
static inline void zswap_swapoff(int type) {}
|
||||
static inline void zswap_memcg_offline_cleanup(struct mem_cgroup *memcg) {}
|
||||
static inline void zswap_lruvec_state_init(struct lruvec *lruvec) {}
|
||||
static inline void zswap_page_swapin(struct page *page) {}
|
||||
static inline void zswap_folio_swapin(struct folio *folio) {}
|
||||
#endif
|
||||
|
||||
#endif /* _LINUX_ZSWAP_H */
|
||||
|
@ -49,10 +49,9 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
|
||||
struct vm_area_struct *vma,
|
||||
unsigned long addr,
|
||||
struct swap_iocb **plug);
|
||||
struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
|
||||
struct mempolicy *mpol, pgoff_t ilx,
|
||||
bool *new_page_allocated,
|
||||
bool skip_if_exists);
|
||||
struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_flags,
|
||||
struct mempolicy *mpol, pgoff_t ilx, bool *new_page_allocated,
|
||||
bool skip_if_exists);
|
||||
struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t flag,
|
||||
struct mempolicy *mpol, pgoff_t ilx);
|
||||
struct page *swapin_readahead(swp_entry_t entry, gfp_t flag,
|
||||
|
@ -410,14 +410,12 @@ struct folio *filemap_get_incore_folio(struct address_space *mapping,
|
||||
return folio;
|
||||
}
|
||||
|
||||
struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
|
||||
struct mempolicy *mpol, pgoff_t ilx,
|
||||
bool *new_page_allocated,
|
||||
bool skip_if_exists)
|
||||
struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
|
||||
struct mempolicy *mpol, pgoff_t ilx, bool *new_page_allocated,
|
||||
bool skip_if_exists)
|
||||
{
|
||||
struct swap_info_struct *si;
|
||||
struct folio *folio;
|
||||
struct page *page;
|
||||
void *shadow = NULL;
|
||||
|
||||
*new_page_allocated = false;
|
||||
@ -434,10 +432,8 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
|
||||
*/
|
||||
folio = filemap_get_folio(swap_address_space(entry),
|
||||
swp_offset(entry));
|
||||
if (!IS_ERR(folio)) {
|
||||
page = folio_file_page(folio, swp_offset(entry));
|
||||
goto got_page;
|
||||
}
|
||||
if (!IS_ERR(folio))
|
||||
goto got_folio;
|
||||
|
||||
/*
|
||||
* Just skip read ahead for unused swap slot.
|
||||
@ -451,7 +447,7 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
|
||||
goto fail_put_swap;
|
||||
|
||||
/*
|
||||
* Get a new page to read into from swap. Allocate it now,
|
||||
* Get a new folio to read into from swap. Allocate it now,
|
||||
* before marking swap_map SWAP_HAS_CACHE, when -EEXIST will
|
||||
* cause any racers to loop around until we add it to cache.
|
||||
*/
|
||||
@ -487,13 +483,13 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
|
||||
* stumble across a swap_map entry whose SWAP_HAS_CACHE
|
||||
* has not yet been cleared. Or race against another
|
||||
* __read_swap_cache_async(), which has set SWAP_HAS_CACHE
|
||||
* in swap_map, but not yet added its page to swap cache.
|
||||
* in swap_map, but not yet added its folio to swap cache.
|
||||
*/
|
||||
schedule_timeout_uninterruptible(1);
|
||||
}
|
||||
|
||||
/*
|
||||
* The swap entry is ours to swap in. Prepare the new page.
|
||||
* The swap entry is ours to swap in. Prepare the new folio.
|
||||
*/
|
||||
|
||||
__folio_set_locked(folio);
|
||||
@ -514,10 +510,9 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
|
||||
/* Caller will initiate read into locked folio */
|
||||
folio_add_lru(folio);
|
||||
*new_page_allocated = true;
|
||||
page = &folio->page;
|
||||
got_page:
|
||||
got_folio:
|
||||
put_swap_device(si);
|
||||
return page;
|
||||
return folio;
|
||||
|
||||
fail_unlock:
|
||||
put_swap_folio(folio, entry);
|
||||
@ -545,16 +540,16 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
|
||||
bool page_allocated;
|
||||
struct mempolicy *mpol;
|
||||
pgoff_t ilx;
|
||||
struct page *page;
|
||||
struct folio *folio;
|
||||
|
||||
mpol = get_vma_policy(vma, addr, 0, &ilx);
|
||||
page = __read_swap_cache_async(entry, gfp_mask, mpol, ilx,
|
||||
folio = __read_swap_cache_async(entry, gfp_mask, mpol, ilx,
|
||||
&page_allocated, false);
|
||||
mpol_cond_put(mpol);
|
||||
|
||||
if (page_allocated)
|
||||
swap_readpage(page, false, plug);
|
||||
return page;
|
||||
swap_readpage(&folio->page, false, plug);
|
||||
return folio_file_page(folio, swp_offset(entry));
|
||||
}
|
||||
|
||||
static unsigned int __swapin_nr_pages(unsigned long prev_offset,
|
||||
@ -639,7 +634,7 @@ static unsigned long swapin_nr_pages(unsigned long offset)
|
||||
struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
|
||||
struct mempolicy *mpol, pgoff_t ilx)
|
||||
{
|
||||
struct page *page;
|
||||
struct folio *folio;
|
||||
unsigned long entry_offset = swp_offset(entry);
|
||||
unsigned long offset = entry_offset;
|
||||
unsigned long start_offset, end_offset;
|
||||
@ -664,31 +659,31 @@ struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
|
||||
blk_start_plug(&plug);
|
||||
for (offset = start_offset; offset <= end_offset ; offset++) {
|
||||
/* Ok, do the async read-ahead now */
|
||||
page = __read_swap_cache_async(
|
||||
folio = __read_swap_cache_async(
|
||||
swp_entry(swp_type(entry), offset),
|
||||
gfp_mask, mpol, ilx, &page_allocated, false);
|
||||
if (!page)
|
||||
if (!folio)
|
||||
continue;
|
||||
if (page_allocated) {
|
||||
swap_readpage(page, false, &splug);
|
||||
swap_readpage(&folio->page, false, &splug);
|
||||
if (offset != entry_offset) {
|
||||
SetPageReadahead(page);
|
||||
folio_set_readahead(folio);
|
||||
count_vm_event(SWAP_RA);
|
||||
}
|
||||
}
|
||||
put_page(page);
|
||||
folio_put(folio);
|
||||
}
|
||||
blk_finish_plug(&plug);
|
||||
swap_read_unplug(splug);
|
||||
lru_add_drain(); /* Push any new pages onto the LRU now */
|
||||
skip:
|
||||
/* The page was likely read above, so no need for plugging here */
|
||||
page = __read_swap_cache_async(entry, gfp_mask, mpol, ilx,
|
||||
folio = __read_swap_cache_async(entry, gfp_mask, mpol, ilx,
|
||||
&page_allocated, false);
|
||||
if (unlikely(page_allocated))
|
||||
swap_readpage(page, false, NULL);
|
||||
zswap_page_swapin(page);
|
||||
return page;
|
||||
swap_readpage(&folio->page, false, NULL);
|
||||
zswap_folio_swapin(folio);
|
||||
return folio_file_page(folio, swp_offset(entry));
|
||||
}
|
||||
|
||||
int init_swap_address_space(unsigned int type, unsigned long nr_pages)
|
||||
@ -806,7 +801,7 @@ static struct page *swap_vma_readahead(swp_entry_t targ_entry, gfp_t gfp_mask,
|
||||
{
|
||||
struct blk_plug plug;
|
||||
struct swap_iocb *splug = NULL;
|
||||
struct page *page;
|
||||
struct folio *folio;
|
||||
pte_t *pte = NULL, pentry;
|
||||
unsigned long addr;
|
||||
swp_entry_t entry;
|
||||
@ -839,18 +834,18 @@ static struct page *swap_vma_readahead(swp_entry_t targ_entry, gfp_t gfp_mask,
|
||||
continue;
|
||||
pte_unmap(pte);
|
||||
pte = NULL;
|
||||
page = __read_swap_cache_async(entry, gfp_mask, mpol, ilx,
|
||||
folio = __read_swap_cache_async(entry, gfp_mask, mpol, ilx,
|
||||
&page_allocated, false);
|
||||
if (!page)
|
||||
if (!folio)
|
||||
continue;
|
||||
if (page_allocated) {
|
||||
swap_readpage(page, false, &splug);
|
||||
swap_readpage(&folio->page, false, &splug);
|
||||
if (i != ra_info.offset) {
|
||||
SetPageReadahead(page);
|
||||
folio_set_readahead(folio);
|
||||
count_vm_event(SWAP_RA);
|
||||
}
|
||||
}
|
||||
put_page(page);
|
||||
folio_put(folio);
|
||||
}
|
||||
if (pte)
|
||||
pte_unmap(pte);
|
||||
@ -858,13 +853,13 @@ static struct page *swap_vma_readahead(swp_entry_t targ_entry, gfp_t gfp_mask,
|
||||
swap_read_unplug(splug);
|
||||
lru_add_drain();
|
||||
skip:
|
||||
/* The page was likely read above, so no need for plugging here */
|
||||
page = __read_swap_cache_async(targ_entry, gfp_mask, mpol, targ_ilx,
|
||||
/* The folio was likely read above, so no need for plugging here */
|
||||
folio = __read_swap_cache_async(targ_entry, gfp_mask, mpol, targ_ilx,
|
||||
&page_allocated, false);
|
||||
if (unlikely(page_allocated))
|
||||
swap_readpage(page, false, NULL);
|
||||
zswap_page_swapin(page);
|
||||
return page;
|
||||
swap_readpage(&folio->page, false, NULL);
|
||||
zswap_folio_swapin(folio);
|
||||
return folio_file_page(folio, swp_offset(entry));
|
||||
}
|
||||
|
||||
/**
|
||||
|
58
mm/zswap.c
58
mm/zswap.c
@ -368,12 +368,12 @@ void zswap_lruvec_state_init(struct lruvec *lruvec)
|
||||
atomic_long_set(&lruvec->zswap_lruvec_state.nr_zswap_protected, 0);
|
||||
}
|
||||
|
||||
void zswap_page_swapin(struct page *page)
|
||||
void zswap_folio_swapin(struct folio *folio)
|
||||
{
|
||||
struct lruvec *lruvec;
|
||||
|
||||
if (page) {
|
||||
lruvec = folio_lruvec(page_folio(page));
|
||||
if (folio) {
|
||||
lruvec = folio_lruvec(folio);
|
||||
atomic_long_inc(&lruvec->zswap_lruvec_state.nr_zswap_protected);
|
||||
}
|
||||
}
|
||||
@ -1383,14 +1383,14 @@ static void __zswap_load(struct zswap_entry *entry, struct page *page)
|
||||
* writeback code
|
||||
**********************************/
|
||||
/*
|
||||
* Attempts to free an entry by adding a page to the swap cache,
|
||||
* decompressing the entry data into the page, and issuing a
|
||||
* bio write to write the page back to the swap device.
|
||||
* Attempts to free an entry by adding a folio to the swap cache,
|
||||
* decompressing the entry data into the folio, and issuing a
|
||||
* bio write to write the folio back to the swap device.
|
||||
*
|
||||
* This can be thought of as a "resumed writeback" of the page
|
||||
* This can be thought of as a "resumed writeback" of the folio
|
||||
* to the swap device. We are basically resuming the same swap
|
||||
* writeback path that was intercepted with the zswap_store()
|
||||
* in the first place. After the page has been decompressed into
|
||||
* in the first place. After the folio has been decompressed into
|
||||
* the swap cache, the compressed version stored by zswap can be
|
||||
* freed.
|
||||
*/
|
||||
@ -1398,56 +1398,56 @@ static int zswap_writeback_entry(struct zswap_entry *entry,
|
||||
struct zswap_tree *tree)
|
||||
{
|
||||
swp_entry_t swpentry = entry->swpentry;
|
||||
struct page *page;
|
||||
struct folio *folio;
|
||||
struct mempolicy *mpol;
|
||||
bool page_was_allocated;
|
||||
bool folio_was_allocated;
|
||||
struct writeback_control wbc = {
|
||||
.sync_mode = WB_SYNC_NONE,
|
||||
};
|
||||
|
||||
/* try to allocate swap cache page */
|
||||
/* try to allocate swap cache folio */
|
||||
mpol = get_task_policy(current);
|
||||
page = __read_swap_cache_async(swpentry, GFP_KERNEL, mpol,
|
||||
NO_INTERLEAVE_INDEX, &page_was_allocated, true);
|
||||
if (!page)
|
||||
folio = __read_swap_cache_async(swpentry, GFP_KERNEL, mpol,
|
||||
NO_INTERLEAVE_INDEX, &folio_was_allocated, true);
|
||||
if (!folio)
|
||||
return -ENOMEM;
|
||||
|
||||
/*
|
||||
* Found an existing page, we raced with load/swapin. We generally
|
||||
* writeback cold pages from zswap, and swapin means the page just
|
||||
* became hot. Skip this page and let the caller find another one.
|
||||
* Found an existing folio, we raced with load/swapin. We generally
|
||||
* writeback cold folios from zswap, and swapin means the folio just
|
||||
* became hot. Skip this folio and let the caller find another one.
|
||||
*/
|
||||
if (!page_was_allocated) {
|
||||
put_page(page);
|
||||
if (!folio_was_allocated) {
|
||||
folio_put(folio);
|
||||
return -EEXIST;
|
||||
}
|
||||
|
||||
/*
|
||||
* Page is locked, and the swapcache is now secured against
|
||||
* folio is locked, and the swapcache is now secured against
|
||||
* concurrent swapping to and from the slot. Verify that the
|
||||
* swap entry hasn't been invalidated and recycled behind our
|
||||
* backs (our zswap_entry reference doesn't prevent that), to
|
||||
* avoid overwriting a new swap page with old compressed data.
|
||||
* avoid overwriting a new swap folio with old compressed data.
|
||||
*/
|
||||
spin_lock(&tree->lock);
|
||||
if (zswap_rb_search(&tree->rbroot, swp_offset(entry->swpentry)) != entry) {
|
||||
spin_unlock(&tree->lock);
|
||||
delete_from_swap_cache(page_folio(page));
|
||||
delete_from_swap_cache(folio);
|
||||
return -ENOMEM;
|
||||
}
|
||||
spin_unlock(&tree->lock);
|
||||
|
||||
__zswap_load(entry, page);
|
||||
__zswap_load(entry, &folio->page);
|
||||
|
||||
/* page is up to date */
|
||||
SetPageUptodate(page);
|
||||
/* folio is up to date */
|
||||
folio_mark_uptodate(folio);
|
||||
|
||||
/* move it to the tail of the inactive list after end_writeback */
|
||||
SetPageReclaim(page);
|
||||
folio_set_reclaim(folio);
|
||||
|
||||
/* start writeback */
|
||||
__swap_writepage(page, &wbc);
|
||||
put_page(page);
|
||||
__swap_writepage(&folio->page, &wbc);
|
||||
folio_put(folio);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1593,7 +1593,7 @@ bool zswap_store(struct folio *folio)
|
||||
|
||||
dst = acomp_ctx->buffer;
|
||||
sg_init_table(&input, 1);
|
||||
sg_set_page(&input, page, PAGE_SIZE, 0);
|
||||
sg_set_page(&input, &folio->page, PAGE_SIZE, 0);
|
||||
|
||||
/*
|
||||
* We need PAGE_SIZE * 2 here since there maybe over-compression case,
|
||||
|
Loading…
Reference in New Issue
Block a user