mm: convert put_devmap_managed_page_refs() to put_devmap_managed_folio_refs()

All callers have a folio so we can remove this use of
page_ref_sub_return().

Link: https://lkml.kernel.org/r/20240424191914.361554-4-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Matthew Wilcox (Oracle) 2024-04-24 20:19:08 +01:00 committed by Andrew Morton
parent 6785c54a1b
commit 53e45c4f6d
4 changed files with 15 additions and 15 deletions

View File

@ -1441,17 +1441,17 @@ vm_fault_t finish_fault(struct vm_fault *vmf);
#if defined(CONFIG_ZONE_DEVICE) && defined(CONFIG_FS_DAX)
DECLARE_STATIC_KEY_FALSE(devmap_managed_key);
bool __put_devmap_managed_page_refs(struct page *page, int refs);
static inline bool put_devmap_managed_page_refs(struct page *page, int refs)
bool __put_devmap_managed_folio_refs(struct folio *folio, int refs);
static inline bool put_devmap_managed_folio_refs(struct folio *folio, int refs)
{
if (!static_branch_unlikely(&devmap_managed_key))
return false;
if (!is_zone_device_page(page))
if (!folio_is_zone_device(folio))
return false;
return __put_devmap_managed_page_refs(page, refs);
return __put_devmap_managed_folio_refs(folio, refs);
}
#else /* CONFIG_ZONE_DEVICE && CONFIG_FS_DAX */
static inline bool put_devmap_managed_page_refs(struct page *page, int refs)
static inline bool put_devmap_managed_folio_refs(struct folio *folio, int refs)
{
return false;
}
@ -1575,7 +1575,7 @@ static inline void put_page(struct page *page)
* For some devmap managed pages we need to catch refcount transition
* from 2 to 1:
*/
if (put_devmap_managed_page_refs(&folio->page, 1))
if (put_devmap_managed_folio_refs(folio, 1))
return;
folio_put(folio);
}

View File

@ -89,7 +89,7 @@ static inline struct folio *try_get_folio(struct page *page, int refs)
* belongs to this folio.
*/
if (unlikely(page_folio(page) != folio)) {
if (!put_devmap_managed_page_refs(&folio->page, refs))
if (!put_devmap_managed_folio_refs(folio, refs))
folio_put_refs(folio, refs);
goto retry;
}
@ -156,7 +156,7 @@ struct folio *try_grab_folio(struct page *page, int refs, unsigned int flags)
*/
if (unlikely((flags & FOLL_LONGTERM) &&
!folio_is_longterm_pinnable(folio))) {
if (!put_devmap_managed_page_refs(&folio->page, refs))
if (!put_devmap_managed_folio_refs(folio, refs))
folio_put_refs(folio, refs);
return NULL;
}
@ -198,7 +198,7 @@ static void gup_put_folio(struct folio *folio, int refs, unsigned int flags)
refs *= GUP_PIN_COUNTING_BIAS;
}
if (!put_devmap_managed_page_refs(&folio->page, refs))
if (!put_devmap_managed_folio_refs(folio, refs))
folio_put_refs(folio, refs);
}

View File

@ -512,9 +512,9 @@ void zone_device_page_init(struct page *page)
EXPORT_SYMBOL_GPL(zone_device_page_init);
#ifdef CONFIG_FS_DAX
bool __put_devmap_managed_page_refs(struct page *page, int refs)
bool __put_devmap_managed_folio_refs(struct folio *folio, int refs)
{
if (page->pgmap->type != MEMORY_DEVICE_FS_DAX)
if (folio->page.pgmap->type != MEMORY_DEVICE_FS_DAX)
return false;
/*
@ -522,9 +522,9 @@ bool __put_devmap_managed_page_refs(struct page *page, int refs)
* refcount is 1, then the page is free and the refcount is
* stable because nobody holds a reference on the page.
*/
if (page_ref_sub_return(page, refs) == 1)
wake_up_var(&page->_refcount);
if (folio_ref_sub_return(folio, refs) == 1)
wake_up_var(&folio->_refcount);
return true;
}
EXPORT_SYMBOL(__put_devmap_managed_page_refs);
EXPORT_SYMBOL(__put_devmap_managed_folio_refs);
#endif /* CONFIG_FS_DAX */

View File

@ -980,7 +980,7 @@ void folios_put_refs(struct folio_batch *folios, unsigned int *refs)
unlock_page_lruvec_irqrestore(lruvec, flags);
lruvec = NULL;
}
if (put_devmap_managed_page_refs(&folio->page, nr_refs))
if (put_devmap_managed_folio_refs(folio, nr_refs))
continue;
if (folio_ref_sub_and_test(folio, nr_refs))
free_zone_device_folio(folio);