mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-01 10:45:49 +00:00
mm: convert page_add_anon_rmap() to use a folio internally
The API for page_add_anon_rmap() needs to be page-based, because we can add mappings of individual pages. But inside the function, we want to only call compound_head() once and then use the folio APIs instead of the page APIs that each call compound_head(). Link: https://lkml.kernel.org/r/20230111142915.1001531-7-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
62beb906ef
commit
ee0800c2f6
23
mm/rmap.c
23
mm/rmap.c
@ -1207,10 +1207,11 @@ static void __page_check_anon_rmap(struct page *page,
|
||||
* and to ensure that PageAnon is not being upgraded racily to PageKsm
|
||||
* (but PageKsm is never downgraded to PageAnon).
|
||||
*/
|
||||
void page_add_anon_rmap(struct page *page,
|
||||
struct vm_area_struct *vma, unsigned long address, rmap_t flags)
|
||||
void page_add_anon_rmap(struct page *page, struct vm_area_struct *vma,
|
||||
unsigned long address, rmap_t flags)
|
||||
{
|
||||
atomic_t *mapped;
|
||||
struct folio *folio = page_folio(page);
|
||||
atomic_t *mapped = &folio->_nr_pages_mapped;
|
||||
int nr = 0, nr_pmdmapped = 0;
|
||||
bool compound = flags & RMAP_COMPOUND;
|
||||
bool first = true;
|
||||
@ -1219,20 +1220,18 @@ void page_add_anon_rmap(struct page *page,
|
||||
if (likely(!compound)) {
|
||||
first = atomic_inc_and_test(&page->_mapcount);
|
||||
nr = first;
|
||||
if (first && PageCompound(page)) {
|
||||
mapped = subpages_mapcount_ptr(compound_head(page));
|
||||
if (first && folio_test_large(folio)) {
|
||||
nr = atomic_inc_return_relaxed(mapped);
|
||||
nr = (nr < COMPOUND_MAPPED);
|
||||
}
|
||||
} else if (PageTransHuge(page)) {
|
||||
} else if (folio_test_pmd_mappable(folio)) {
|
||||
/* That test is redundant: it's for safety or to optimize out */
|
||||
|
||||
first = atomic_inc_and_test(compound_mapcount_ptr(page));
|
||||
first = atomic_inc_and_test(&folio->_entire_mapcount);
|
||||
if (first) {
|
||||
mapped = subpages_mapcount_ptr(page);
|
||||
nr = atomic_add_return_relaxed(COMPOUND_MAPPED, mapped);
|
||||
if (likely(nr < COMPOUND_MAPPED + COMPOUND_MAPPED)) {
|
||||
nr_pmdmapped = thp_nr_pages(page);
|
||||
nr_pmdmapped = folio_nr_pages(folio);
|
||||
nr = nr_pmdmapped - (nr & FOLIO_PAGES_MAPPED);
|
||||
/* Raced ahead of a remove and another add? */
|
||||
if (unlikely(nr < 0))
|
||||
@ -1248,11 +1247,11 @@ void page_add_anon_rmap(struct page *page,
|
||||
VM_BUG_ON_PAGE(!first && PageAnonExclusive(page), page);
|
||||
|
||||
if (nr_pmdmapped)
|
||||
__mod_lruvec_page_state(page, NR_ANON_THPS, nr_pmdmapped);
|
||||
__lruvec_stat_mod_folio(folio, NR_ANON_THPS, nr_pmdmapped);
|
||||
if (nr)
|
||||
__mod_lruvec_page_state(page, NR_ANON_MAPPED, nr);
|
||||
__lruvec_stat_mod_folio(folio, NR_ANON_MAPPED, nr);
|
||||
|
||||
if (likely(!PageKsm(page))) {
|
||||
if (likely(!folio_test_ksm(folio))) {
|
||||
/* address might be in next vma when migration races vma_adjust */
|
||||
if (first)
|
||||
__page_set_anon_rmap(page, vma, address,
|
||||
|
Loading…
Reference in New Issue
Block a user