mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-07 13:43:51 +00:00
mm: add folio_add_new_anon_rmap()
In contrast to other rmap functions, page_add_new_anon_rmap() is always called with a freshly allocated page. That means it can't be called with a tail page. Turn page_add_new_anon_rmap() into folio_add_new_anon_rmap() and add a page_add_new_anon_rmap() wrapper. Callers can be converted individually. [akpm@linux-foundation.org: fix NOMMU build. page_add_new_anon_rmap() requires CONFIG_MMU] [willy@infradead.org: folio-compat.c needs rmap.h] Link: https://lkml.kernel.org/r/20230111142915.1001531-9-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
eb01a2ad7e
commit
4d510f3da4
@ -194,6 +194,8 @@ void page_add_anon_rmap(struct page *, struct vm_area_struct *,
|
||||
unsigned long address, rmap_t flags);
|
||||
void page_add_new_anon_rmap(struct page *, struct vm_area_struct *,
|
||||
unsigned long address);
|
||||
void folio_add_new_anon_rmap(struct folio *, struct vm_area_struct *,
|
||||
unsigned long address);
|
||||
void page_add_file_rmap(struct page *, struct vm_area_struct *,
|
||||
bool compound);
|
||||
void page_remove_rmap(struct page *, struct vm_area_struct *,
|
||||
|
@ -6,6 +6,7 @@
|
||||
|
||||
#include <linux/migrate.h>
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/rmap.h>
|
||||
#include <linux/swap.h>
|
||||
#include "internal.h"
|
||||
|
||||
@ -123,3 +124,13 @@ void putback_lru_page(struct page *page)
|
||||
{
|
||||
folio_putback_lru(page_folio(page));
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
void page_add_new_anon_rmap(struct page *page, struct vm_area_struct *vma,
|
||||
unsigned long address)
|
||||
{
|
||||
VM_BUG_ON_PAGE(PageTail(page), page);
|
||||
|
||||
return folio_add_new_anon_rmap((struct folio *)page, vma, address);
|
||||
}
|
||||
#endif
|
||||
|
37
mm/rmap.c
37
mm/rmap.c
@ -1264,41 +1264,40 @@ void page_add_anon_rmap(struct page *page, struct vm_area_struct *vma,
|
||||
}
|
||||
|
||||
/**
|
||||
* page_add_new_anon_rmap - add mapping to a new anonymous page
|
||||
* @page: the page to add the mapping to
|
||||
* folio_add_new_anon_rmap - Add mapping to a new anonymous folio.
|
||||
* @folio: The folio to add the mapping to.
|
||||
* @vma: the vm area in which the mapping is added
|
||||
* @address: the user virtual address mapped
|
||||
*
|
||||
* If it's a compound page, it is accounted as a compound page. As the page
|
||||
* is new, it's assume to get mapped exclusively by a single process.
|
||||
*
|
||||
* Same as page_add_anon_rmap but must only be called on *new* pages.
|
||||
* Like page_add_anon_rmap() but must only be called on *new* folios.
|
||||
* This means the inc-and-test can be bypassed.
|
||||
* Page does not have to be locked.
|
||||
* The folio does not have to be locked.
|
||||
*
|
||||
* If the folio is large, it is accounted as a THP. As the folio
|
||||
* is new, it's assumed to be mapped exclusively by a single process.
|
||||
*/
|
||||
void page_add_new_anon_rmap(struct page *page,
|
||||
struct vm_area_struct *vma, unsigned long address)
|
||||
void folio_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma,
|
||||
unsigned long address)
|
||||
{
|
||||
int nr;
|
||||
|
||||
VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
|
||||
__SetPageSwapBacked(page);
|
||||
__folio_set_swapbacked(folio);
|
||||
|
||||
if (likely(!PageCompound(page))) {
|
||||
if (likely(!folio_test_pmd_mappable(folio))) {
|
||||
/* increment count (starts at -1) */
|
||||
atomic_set(&page->_mapcount, 0);
|
||||
atomic_set(&folio->_mapcount, 0);
|
||||
nr = 1;
|
||||
} else {
|
||||
VM_BUG_ON_PAGE(!PageTransHuge(page), page);
|
||||
/* increment count (starts at -1) */
|
||||
atomic_set(compound_mapcount_ptr(page), 0);
|
||||
atomic_set(subpages_mapcount_ptr(page), COMPOUND_MAPPED);
|
||||
nr = thp_nr_pages(page);
|
||||
__mod_lruvec_page_state(page, NR_ANON_THPS, nr);
|
||||
atomic_set(&folio->_entire_mapcount, 0);
|
||||
atomic_set(&folio->_nr_pages_mapped, COMPOUND_MAPPED);
|
||||
nr = folio_nr_pages(folio);
|
||||
__lruvec_stat_mod_folio(folio, NR_ANON_THPS, nr);
|
||||
}
|
||||
|
||||
__mod_lruvec_page_state(page, NR_ANON_MAPPED, nr);
|
||||
__page_set_anon_rmap(page, vma, address, 1);
|
||||
__lruvec_stat_mod_folio(folio, NR_ANON_MAPPED, nr);
|
||||
__page_set_anon_rmap(&folio->page, vma, address, 1);
|
||||
}
|
||||
|
||||
/**
|
||||
|
Loading…
Reference in New Issue
Block a user