mm: use __page_cache_release() in folios_put()

Pass a pointer to the lruvec so we can take advantage of the
folio_lruvec_relock_irqsave().  Adjust the calling convention of
folio_lruvec_relock_irqsave() to suit and add a page_cache_release()
wrapper.

Link: https://lkml.kernel.org/r/20240227174254.710559-9-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: David Hildenbrand <david@redhat.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Matthew Wilcox (Oracle) 2024-02-27 17:42:42 +00:00 committed by Andrew Morton
parent 24835f899c
commit f1ee018bae
2 changed files with 37 additions and 41 deletions

View File

@ -1705,18 +1705,18 @@ static inline struct lruvec *folio_lruvec_relock_irq(struct folio *folio,
return folio_lruvec_lock_irq(folio); return folio_lruvec_lock_irq(folio);
} }
/* Don't lock again iff page's lruvec locked */ /* Don't lock again iff folio's lruvec locked */
static inline struct lruvec *folio_lruvec_relock_irqsave(struct folio *folio, static inline void folio_lruvec_relock_irqsave(struct folio *folio,
struct lruvec *locked_lruvec, unsigned long *flags) struct lruvec **lruvecp, unsigned long *flags)
{ {
if (locked_lruvec) { if (*lruvecp) {
if (folio_matches_lruvec(folio, locked_lruvec)) if (folio_matches_lruvec(folio, *lruvecp))
return locked_lruvec; return;
unlock_page_lruvec_irqrestore(locked_lruvec, *flags); unlock_page_lruvec_irqrestore(*lruvecp, *flags);
} }
return folio_lruvec_lock_irqsave(folio, flags); *lruvecp = folio_lruvec_lock_irqsave(folio, flags);
} }
#ifdef CONFIG_CGROUP_WRITEBACK #ifdef CONFIG_CGROUP_WRITEBACK

View File

@ -74,22 +74,21 @@ static DEFINE_PER_CPU(struct cpu_fbatches, cpu_fbatches) = {
.lock = INIT_LOCAL_LOCK(lock), .lock = INIT_LOCAL_LOCK(lock),
}; };
/* static void __page_cache_release(struct folio *folio, struct lruvec **lruvecp,
* This path almost never happens for VM activity - pages are normally freed unsigned long *flagsp)
* in batches. But it gets used by networking - and for compound pages.
*/
static void __page_cache_release(struct folio *folio)
{ {
if (folio_test_lru(folio)) { if (folio_test_lru(folio)) {
struct lruvec *lruvec; folio_lruvec_relock_irqsave(folio, lruvecp, flagsp);
unsigned long flags; lruvec_del_folio(*lruvecp, folio);
lruvec = folio_lruvec_lock_irqsave(folio, &flags);
lruvec_del_folio(lruvec, folio);
__folio_clear_lru_flags(folio); __folio_clear_lru_flags(folio);
unlock_page_lruvec_irqrestore(lruvec, flags);
} }
/* See comment on folio_test_mlocked in folios_put() */
/*
* In rare cases, when truncation or holepunching raced with
* munlock after VM_LOCKED was cleared, Mlocked may still be
* found set here. This does not indicate a problem, unless
* "unevictable_pgs_cleared" appears worryingly large.
*/
if (unlikely(folio_test_mlocked(folio))) { if (unlikely(folio_test_mlocked(folio))) {
long nr_pages = folio_nr_pages(folio); long nr_pages = folio_nr_pages(folio);
@ -99,9 +98,23 @@ static void __page_cache_release(struct folio *folio)
} }
} }
/*
* This path almost never happens for VM activity - pages are normally freed
* in batches. But it gets used by networking - and for compound pages.
*/
static void page_cache_release(struct folio *folio)
{
struct lruvec *lruvec = NULL;
unsigned long flags;
__page_cache_release(folio, &lruvec, &flags);
if (lruvec)
unlock_page_lruvec_irqrestore(lruvec, flags);
}
static void __folio_put_small(struct folio *folio) static void __folio_put_small(struct folio *folio)
{ {
__page_cache_release(folio); page_cache_release(folio);
mem_cgroup_uncharge(folio); mem_cgroup_uncharge(folio);
free_unref_page(&folio->page, 0); free_unref_page(&folio->page, 0);
} }
@ -115,7 +128,7 @@ static void __folio_put_large(struct folio *folio)
* be called for hugetlb (it has a separate hugetlb_cgroup.) * be called for hugetlb (it has a separate hugetlb_cgroup.)
*/ */
if (!folio_test_hugetlb(folio)) if (!folio_test_hugetlb(folio))
__page_cache_release(folio); page_cache_release(folio);
destroy_large_folio(folio); destroy_large_folio(folio);
} }
@ -216,7 +229,7 @@ static void folio_batch_move_lru(struct folio_batch *fbatch, move_fn_t move_fn)
if (move_fn != lru_add_fn && !folio_test_clear_lru(folio)) if (move_fn != lru_add_fn && !folio_test_clear_lru(folio))
continue; continue;
lruvec = folio_lruvec_relock_irqsave(folio, lruvec, &flags); folio_lruvec_relock_irqsave(folio, &lruvec, &flags);
move_fn(lruvec, folio); move_fn(lruvec, folio);
folio_set_lru(folio); folio_set_lru(folio);
@ -999,24 +1012,7 @@ void folios_put_refs(struct folio_batch *folios, unsigned int *refs)
continue; continue;
} }
if (folio_test_lru(folio)) { __page_cache_release(folio, &lruvec, &flags);
lruvec = folio_lruvec_relock_irqsave(folio, lruvec,
&flags);
lruvec_del_folio(lruvec, folio);
__folio_clear_lru_flags(folio);
}
/*
* In rare cases, when truncation or holepunching raced with
* munlock after VM_LOCKED was cleared, Mlocked may still be
* found set here. This does not indicate a problem, unless
* "unevictable_pgs_cleared" appears worryingly large.
*/
if (unlikely(folio_test_mlocked(folio))) {
__folio_clear_mlocked(folio);
zone_stat_sub_folio(folio, NR_MLOCK);
count_vm_event(UNEVICTABLE_PGCLEARED);
}
if (j != i) if (j != i)
folios->folios[j] = folio; folios->folios[j] = folio;