diff --git a/include/linux/rmap.h b/include/linux/rmap.h index 21af80d5b711..be020d38b0a5 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -267,6 +267,7 @@ void remove_migration_ptes(struct folio *src, struct folio *dst, bool locked); * Called by memory-failure.c to kill processes. */ struct anon_vma *page_lock_anon_vma_read(struct page *page); +struct anon_vma *folio_lock_anon_vma_read(struct folio *folio); void page_unlock_anon_vma_read(struct anon_vma *anon_vma); int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma); diff --git a/mm/folio-compat.c b/mm/folio-compat.c index 46fa179e32fb..968ad97bbffa 100644 --- a/mm/folio-compat.c +++ b/mm/folio-compat.c @@ -164,3 +164,10 @@ void putback_lru_page(struct page *page) { folio_putback_lru(page_folio(page)); } + +#ifdef CONFIG_MMU +struct anon_vma *page_lock_anon_vma_read(struct page *page) +{ + return folio_lock_anon_vma_read(page_folio(page)); +} +#endif diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 258913d5e036..aa8236848949 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -487,12 +487,13 @@ static struct task_struct *task_early_kill(struct task_struct *tsk, static void collect_procs_anon(struct page *page, struct list_head *to_kill, int force_early) { + struct folio *folio = page_folio(page); struct vm_area_struct *vma; struct task_struct *tsk; struct anon_vma *av; pgoff_t pgoff; - av = page_lock_anon_vma_read(page); + av = folio_lock_anon_vma_read(folio); if (av == NULL) /* Not actually mapped anymore */ return; diff --git a/mm/rmap.c b/mm/rmap.c index c74de8af7eec..64655d345234 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -526,28 +526,28 @@ struct anon_vma *page_get_anon_vma(struct page *page) * atomic op -- the trylock. If we fail the trylock, we fall back to getting a * reference like with page_get_anon_vma() and then block on the mutex. */ -struct anon_vma *page_lock_anon_vma_read(struct page *page) +struct anon_vma *folio_lock_anon_vma_read(struct folio *folio) { struct anon_vma *anon_vma = NULL; struct anon_vma *root_anon_vma; unsigned long anon_mapping; rcu_read_lock(); - anon_mapping = (unsigned long)READ_ONCE(page->mapping); + anon_mapping = (unsigned long)READ_ONCE(folio->mapping); if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) goto out; - if (!page_mapped(page)) + if (!folio_mapped(folio)) goto out; anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); root_anon_vma = READ_ONCE(anon_vma->root); if (down_read_trylock(&root_anon_vma->rwsem)) { /* - * If the page is still mapped, then this anon_vma is still + * If the folio is still mapped, then this anon_vma is still * its anon_vma, and holding the mutex ensures that it will * not go away, see anon_vma_free(). */ - if (!page_mapped(page)) { + if (!folio_mapped(folio)) { up_read(&root_anon_vma->rwsem); anon_vma = NULL; } @@ -560,7 +560,7 @@ struct anon_vma *page_lock_anon_vma_read(struct page *page) goto out; } - if (!page_mapped(page)) { + if (!folio_mapped(folio)) { rcu_read_unlock(); put_anon_vma(anon_vma); return NULL;