swap: convert add_to_swap() to take a folio

The only caller already has a folio available, so this saves a conversion.
Also convert the return type to boolean.

Link: https://lkml.kernel.org/r/20220504182857.4013401-9-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Matthew Wilcox (Oracle) 2022-05-12 20:23:02 -07:00 committed by Andrew Morton
parent e2e3fdc7d4
commit 09c02e5632
3 changed files with 31 additions and 28 deletions

View File

@ -32,7 +32,7 @@ extern struct address_space *swapper_spaces[];
>> SWAP_ADDRESS_SPACE_SHIFT]) >> SWAP_ADDRESS_SPACE_SHIFT])
void show_swap_cache_info(void); void show_swap_cache_info(void);
int add_to_swap(struct page *page); bool add_to_swap(struct folio *folio);
void *get_shadow_from_swap_cache(swp_entry_t entry); void *get_shadow_from_swap_cache(swp_entry_t entry);
int add_to_swap_cache(struct page *page, swp_entry_t entry, int add_to_swap_cache(struct page *page, swp_entry_t entry,
gfp_t gfp, void **shadowp); gfp_t gfp, void **shadowp);
@ -119,9 +119,9 @@ struct page *find_get_incore_page(struct address_space *mapping, pgoff_t index)
return find_get_page(mapping, index); return find_get_page(mapping, index);
} }
static inline int add_to_swap(struct page *page) static inline bool add_to_swap(struct folio *folio)
{ {
return 0; return false;
} }
static inline void *get_shadow_from_swap_cache(swp_entry_t entry) static inline void *get_shadow_from_swap_cache(swp_entry_t entry)

View File

@ -176,24 +176,26 @@ void __delete_from_swap_cache(struct page *page,
} }
/** /**
* add_to_swap - allocate swap space for a page * add_to_swap - allocate swap space for a folio
* @page: page we want to move to swap * @folio: folio we want to move to swap
* *
* Allocate swap space for the page and add the page to the * Allocate swap space for the folio and add the folio to the
* swap cache. Caller needs to hold the page lock. * swap cache.
*
* Context: Caller needs to hold the folio lock.
* Return: Whether the folio was added to the swap cache.
*/ */
int add_to_swap(struct page *page) bool add_to_swap(struct folio *folio)
{ {
struct folio *folio = page_folio(page);
swp_entry_t entry; swp_entry_t entry;
int err; int err;
VM_BUG_ON_PAGE(!PageLocked(page), page); VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
VM_BUG_ON_PAGE(!PageUptodate(page), page); VM_BUG_ON_FOLIO(!folio_test_uptodate(folio), folio);
entry = folio_alloc_swap(folio); entry = folio_alloc_swap(folio);
if (!entry.val) if (!entry.val)
return 0; return false;
/* /*
* XArray node allocations from PF_MEMALLOC contexts could * XArray node allocations from PF_MEMALLOC contexts could
@ -206,7 +208,7 @@ int add_to_swap(struct page *page)
/* /*
* Add it to the swap cache. * Add it to the swap cache.
*/ */
err = add_to_swap_cache(page, entry, err = add_to_swap_cache(&folio->page, entry,
__GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN, NULL); __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN, NULL);
if (err) if (err)
/* /*
@ -215,22 +217,23 @@ int add_to_swap(struct page *page)
*/ */
goto fail; goto fail;
/* /*
* Normally the page will be dirtied in unmap because its pte should be * Normally the folio will be dirtied in unmap because its
* dirty. A special case is MADV_FREE page. The page's pte could have * pte should be dirty. A special case is MADV_FREE page. The
* dirty bit cleared but the page's SwapBacked bit is still set because * page's pte could have dirty bit cleared but the folio's
* clearing the dirty bit and SwapBacked bit has no lock protected. For * SwapBacked flag is still set because clearing the dirty bit
* such page, unmap will not set dirty bit for it, so page reclaim will * and SwapBacked flag has no lock protected. For such folio,
* not write the page out. This can cause data corruption when the page * unmap will not set dirty bit for it, so folio reclaim will
* is swap in later. Always setting the dirty bit for the page solves * not write the folio out. This can cause data corruption when
* the problem. * the folio is swapped in later. Always setting the dirty flag
* for the folio solves the problem.
*/ */
set_page_dirty(page); folio_mark_dirty(folio);
return 1; return true;
fail: fail:
put_swap_page(page, entry); put_swap_page(&folio->page, entry);
return 0; return false;
} }
/* /*

View File

@ -1731,8 +1731,8 @@ retry:
page_list)) page_list))
goto activate_locked; goto activate_locked;
} }
if (!add_to_swap(page)) { if (!add_to_swap(folio)) {
if (!PageTransHuge(page)) if (!folio_test_large(folio))
goto activate_locked_split; goto activate_locked_split;
/* Fallback to swap normal pages */ /* Fallback to swap normal pages */
if (split_folio_to_list(folio, if (split_folio_to_list(folio,
@ -1741,7 +1741,7 @@ retry:
#ifdef CONFIG_TRANSPARENT_HUGEPAGE #ifdef CONFIG_TRANSPARENT_HUGEPAGE
count_vm_event(THP_SWPOUT_FALLBACK); count_vm_event(THP_SWPOUT_FALLBACK);
#endif #endif
if (!add_to_swap(page)) if (!add_to_swap(folio))
goto activate_locked_split; goto activate_locked_split;
} }