mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-01 10:45:49 +00:00
vmscan: report vm_flags in page_referenced()
Collect vma->vm_flags of the VMAs that actually referenced the page. This is preparing for more informed reclaim heuristics, eg. to protect executable file pages more aggressively. For now only the VM_EXEC bit will be used by the caller. Thanks to Johannes, Peter and Minchan for all the good tips. Acked-by: Peter Zijlstra <peterz@infradead.org> Reviewed-by: Rik van Riel <riel@redhat.com> Reviewed-by: Minchan Kim <minchan.kim@gmail.com> Reviewed-by: Johannes Weiner <hannes@cmpxchg.org> Signed-off-by: Wu Fengguang <fengguang.wu@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
608e8e66a1
commit
6fe6b7e357
@ -83,7 +83,8 @@ static inline void page_dup_rmap(struct page *page, struct vm_area_struct *vma,
|
|||||||
/*
|
/*
|
||||||
* Called from mm/vmscan.c to handle paging out
|
* Called from mm/vmscan.c to handle paging out
|
||||||
*/
|
*/
|
||||||
int page_referenced(struct page *, int is_locked, struct mem_cgroup *cnt);
|
int page_referenced(struct page *, int is_locked,
|
||||||
|
struct mem_cgroup *cnt, unsigned long *vm_flags);
|
||||||
int try_to_unmap(struct page *, int ignore_refs);
|
int try_to_unmap(struct page *, int ignore_refs);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -117,7 +118,7 @@ int try_to_munlock(struct page *);
|
|||||||
#define anon_vma_prepare(vma) (0)
|
#define anon_vma_prepare(vma) (0)
|
||||||
#define anon_vma_link(vma) do {} while (0)
|
#define anon_vma_link(vma) do {} while (0)
|
||||||
|
|
||||||
#define page_referenced(page,l,cnt) TestClearPageReferenced(page)
|
#define page_referenced(page, locked, cnt, flags) TestClearPageReferenced(page)
|
||||||
#define try_to_unmap(page, refs) SWAP_FAIL
|
#define try_to_unmap(page, refs) SWAP_FAIL
|
||||||
|
|
||||||
static inline int page_mkclean(struct page *page)
|
static inline int page_mkclean(struct page *page)
|
||||||
|
37
mm/rmap.c
37
mm/rmap.c
@ -333,7 +333,9 @@ static int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
|
|||||||
* repeatedly from either page_referenced_anon or page_referenced_file.
|
* repeatedly from either page_referenced_anon or page_referenced_file.
|
||||||
*/
|
*/
|
||||||
static int page_referenced_one(struct page *page,
|
static int page_referenced_one(struct page *page,
|
||||||
struct vm_area_struct *vma, unsigned int *mapcount)
|
struct vm_area_struct *vma,
|
||||||
|
unsigned int *mapcount,
|
||||||
|
unsigned long *vm_flags)
|
||||||
{
|
{
|
||||||
struct mm_struct *mm = vma->vm_mm;
|
struct mm_struct *mm = vma->vm_mm;
|
||||||
unsigned long address;
|
unsigned long address;
|
||||||
@ -381,11 +383,14 @@ static int page_referenced_one(struct page *page,
|
|||||||
(*mapcount)--;
|
(*mapcount)--;
|
||||||
pte_unmap_unlock(pte, ptl);
|
pte_unmap_unlock(pte, ptl);
|
||||||
out:
|
out:
|
||||||
|
if (referenced)
|
||||||
|
*vm_flags |= vma->vm_flags;
|
||||||
return referenced;
|
return referenced;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int page_referenced_anon(struct page *page,
|
static int page_referenced_anon(struct page *page,
|
||||||
struct mem_cgroup *mem_cont)
|
struct mem_cgroup *mem_cont,
|
||||||
|
unsigned long *vm_flags)
|
||||||
{
|
{
|
||||||
unsigned int mapcount;
|
unsigned int mapcount;
|
||||||
struct anon_vma *anon_vma;
|
struct anon_vma *anon_vma;
|
||||||
@ -405,7 +410,8 @@ static int page_referenced_anon(struct page *page,
|
|||||||
*/
|
*/
|
||||||
if (mem_cont && !mm_match_cgroup(vma->vm_mm, mem_cont))
|
if (mem_cont && !mm_match_cgroup(vma->vm_mm, mem_cont))
|
||||||
continue;
|
continue;
|
||||||
referenced += page_referenced_one(page, vma, &mapcount);
|
referenced += page_referenced_one(page, vma,
|
||||||
|
&mapcount, vm_flags);
|
||||||
if (!mapcount)
|
if (!mapcount)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -418,6 +424,7 @@ static int page_referenced_anon(struct page *page,
|
|||||||
* page_referenced_file - referenced check for object-based rmap
|
* page_referenced_file - referenced check for object-based rmap
|
||||||
* @page: the page we're checking references on.
|
* @page: the page we're checking references on.
|
||||||
* @mem_cont: target memory controller
|
* @mem_cont: target memory controller
|
||||||
|
* @vm_flags: collect encountered vma->vm_flags who actually referenced the page
|
||||||
*
|
*
|
||||||
* For an object-based mapped page, find all the places it is mapped and
|
* For an object-based mapped page, find all the places it is mapped and
|
||||||
* check/clear the referenced flag. This is done by following the page->mapping
|
* check/clear the referenced flag. This is done by following the page->mapping
|
||||||
@ -427,7 +434,8 @@ static int page_referenced_anon(struct page *page,
|
|||||||
* This function is only called from page_referenced for object-based pages.
|
* This function is only called from page_referenced for object-based pages.
|
||||||
*/
|
*/
|
||||||
static int page_referenced_file(struct page *page,
|
static int page_referenced_file(struct page *page,
|
||||||
struct mem_cgroup *mem_cont)
|
struct mem_cgroup *mem_cont,
|
||||||
|
unsigned long *vm_flags)
|
||||||
{
|
{
|
||||||
unsigned int mapcount;
|
unsigned int mapcount;
|
||||||
struct address_space *mapping = page->mapping;
|
struct address_space *mapping = page->mapping;
|
||||||
@ -467,7 +475,8 @@ static int page_referenced_file(struct page *page,
|
|||||||
*/
|
*/
|
||||||
if (mem_cont && !mm_match_cgroup(vma->vm_mm, mem_cont))
|
if (mem_cont && !mm_match_cgroup(vma->vm_mm, mem_cont))
|
||||||
continue;
|
continue;
|
||||||
referenced += page_referenced_one(page, vma, &mapcount);
|
referenced += page_referenced_one(page, vma,
|
||||||
|
&mapcount, vm_flags);
|
||||||
if (!mapcount)
|
if (!mapcount)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -481,29 +490,35 @@ static int page_referenced_file(struct page *page,
|
|||||||
* @page: the page to test
|
* @page: the page to test
|
||||||
* @is_locked: caller holds lock on the page
|
* @is_locked: caller holds lock on the page
|
||||||
* @mem_cont: target memory controller
|
* @mem_cont: target memory controller
|
||||||
|
* @vm_flags: collect encountered vma->vm_flags who actually referenced the page
|
||||||
*
|
*
|
||||||
* Quick test_and_clear_referenced for all mappings to a page,
|
* Quick test_and_clear_referenced for all mappings to a page,
|
||||||
* returns the number of ptes which referenced the page.
|
* returns the number of ptes which referenced the page.
|
||||||
*/
|
*/
|
||||||
int page_referenced(struct page *page, int is_locked,
|
int page_referenced(struct page *page,
|
||||||
struct mem_cgroup *mem_cont)
|
int is_locked,
|
||||||
|
struct mem_cgroup *mem_cont,
|
||||||
|
unsigned long *vm_flags)
|
||||||
{
|
{
|
||||||
int referenced = 0;
|
int referenced = 0;
|
||||||
|
|
||||||
if (TestClearPageReferenced(page))
|
if (TestClearPageReferenced(page))
|
||||||
referenced++;
|
referenced++;
|
||||||
|
|
||||||
|
*vm_flags = 0;
|
||||||
if (page_mapped(page) && page->mapping) {
|
if (page_mapped(page) && page->mapping) {
|
||||||
if (PageAnon(page))
|
if (PageAnon(page))
|
||||||
referenced += page_referenced_anon(page, mem_cont);
|
referenced += page_referenced_anon(page, mem_cont,
|
||||||
|
vm_flags);
|
||||||
else if (is_locked)
|
else if (is_locked)
|
||||||
referenced += page_referenced_file(page, mem_cont);
|
referenced += page_referenced_file(page, mem_cont,
|
||||||
|
vm_flags);
|
||||||
else if (!trylock_page(page))
|
else if (!trylock_page(page))
|
||||||
referenced++;
|
referenced++;
|
||||||
else {
|
else {
|
||||||
if (page->mapping)
|
if (page->mapping)
|
||||||
referenced +=
|
referenced += page_referenced_file(page,
|
||||||
page_referenced_file(page, mem_cont);
|
mem_cont, vm_flags);
|
||||||
unlock_page(page);
|
unlock_page(page);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -577,6 +577,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
|
|||||||
struct pagevec freed_pvec;
|
struct pagevec freed_pvec;
|
||||||
int pgactivate = 0;
|
int pgactivate = 0;
|
||||||
unsigned long nr_reclaimed = 0;
|
unsigned long nr_reclaimed = 0;
|
||||||
|
unsigned long vm_flags;
|
||||||
|
|
||||||
cond_resched();
|
cond_resched();
|
||||||
|
|
||||||
@ -627,7 +628,8 @@ static unsigned long shrink_page_list(struct list_head *page_list,
|
|||||||
goto keep_locked;
|
goto keep_locked;
|
||||||
}
|
}
|
||||||
|
|
||||||
referenced = page_referenced(page, 1, sc->mem_cgroup);
|
referenced = page_referenced(page, 1,
|
||||||
|
sc->mem_cgroup, &vm_flags);
|
||||||
/* In active use or really unfreeable? Activate it. */
|
/* In active use or really unfreeable? Activate it. */
|
||||||
if (sc->order <= PAGE_ALLOC_COSTLY_ORDER &&
|
if (sc->order <= PAGE_ALLOC_COSTLY_ORDER &&
|
||||||
referenced && page_mapping_inuse(page))
|
referenced && page_mapping_inuse(page))
|
||||||
@ -1208,6 +1210,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
|
|||||||
{
|
{
|
||||||
unsigned long pgmoved;
|
unsigned long pgmoved;
|
||||||
unsigned long pgscanned;
|
unsigned long pgscanned;
|
||||||
|
unsigned long vm_flags;
|
||||||
LIST_HEAD(l_hold); /* The pages which were snipped off */
|
LIST_HEAD(l_hold); /* The pages which were snipped off */
|
||||||
LIST_HEAD(l_inactive);
|
LIST_HEAD(l_inactive);
|
||||||
struct page *page;
|
struct page *page;
|
||||||
@ -1248,7 +1251,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
|
|||||||
|
|
||||||
/* page_referenced clears PageReferenced */
|
/* page_referenced clears PageReferenced */
|
||||||
if (page_mapping_inuse(page) &&
|
if (page_mapping_inuse(page) &&
|
||||||
page_referenced(page, 0, sc->mem_cgroup))
|
page_referenced(page, 0, sc->mem_cgroup, &vm_flags))
|
||||||
pgmoved++;
|
pgmoved++;
|
||||||
|
|
||||||
list_add(&page->lru, &l_inactive);
|
list_add(&page->lru, &l_inactive);
|
||||||
|
Loading…
Reference in New Issue
Block a user