mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-12-29 09:16:33 +00:00
mm: multi-gen LRU: simplify lru_gen_look_around()
Update the folio generation in place with or without current->reclaim_state->mm_walk. The LRU lock is held for longer, if mm_walk is NULL and the number of folios to update is more than PAGEVEC_SIZE. This causes a measurable regression from the LRU lock contention during a microbencmark. But a tiny regression is not worth the complexity. Link: https://lkml.kernel.org/r/20230118001827.1040870-8-talumbau@google.com Signed-off-by: T.J. Alumbaugh <talumbau@google.com> Cc: Yu Zhao <yuzhao@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
b5ff413361
commit
abf086721a
73
mm/vmscan.c
73
mm/vmscan.c
@ -4587,13 +4587,12 @@ static void lru_gen_age_node(struct pglist_data *pgdat, struct scan_control *sc)
|
||||
void lru_gen_look_around(struct page_vma_mapped_walk *pvmw)
|
||||
{
|
||||
int i;
|
||||
pte_t *pte;
|
||||
unsigned long start;
|
||||
unsigned long end;
|
||||
unsigned long addr;
|
||||
struct lru_gen_mm_walk *walk;
|
||||
int young = 0;
|
||||
unsigned long bitmap[BITS_TO_LONGS(MIN_LRU_BATCH)] = {};
|
||||
pte_t *pte = pvmw->pte;
|
||||
unsigned long addr = pvmw->address;
|
||||
struct folio *folio = pfn_folio(pvmw->pfn);
|
||||
struct mem_cgroup *memcg = folio_memcg(folio);
|
||||
struct pglist_data *pgdat = folio_pgdat(folio);
|
||||
@ -4610,25 +4609,28 @@ void lru_gen_look_around(struct page_vma_mapped_walk *pvmw)
|
||||
/* avoid taking the LRU lock under the PTL when possible */
|
||||
walk = current->reclaim_state ? current->reclaim_state->mm_walk : NULL;
|
||||
|
||||
start = max(pvmw->address & PMD_MASK, pvmw->vma->vm_start);
|
||||
end = min(pvmw->address | ~PMD_MASK, pvmw->vma->vm_end - 1) + 1;
|
||||
start = max(addr & PMD_MASK, pvmw->vma->vm_start);
|
||||
end = min(addr | ~PMD_MASK, pvmw->vma->vm_end - 1) + 1;
|
||||
|
||||
if (end - start > MIN_LRU_BATCH * PAGE_SIZE) {
|
||||
if (pvmw->address - start < MIN_LRU_BATCH * PAGE_SIZE / 2)
|
||||
if (addr - start < MIN_LRU_BATCH * PAGE_SIZE / 2)
|
||||
end = start + MIN_LRU_BATCH * PAGE_SIZE;
|
||||
else if (end - pvmw->address < MIN_LRU_BATCH * PAGE_SIZE / 2)
|
||||
else if (end - addr < MIN_LRU_BATCH * PAGE_SIZE / 2)
|
||||
start = end - MIN_LRU_BATCH * PAGE_SIZE;
|
||||
else {
|
||||
start = pvmw->address - MIN_LRU_BATCH * PAGE_SIZE / 2;
|
||||
end = pvmw->address + MIN_LRU_BATCH * PAGE_SIZE / 2;
|
||||
start = addr - MIN_LRU_BATCH * PAGE_SIZE / 2;
|
||||
end = addr + MIN_LRU_BATCH * PAGE_SIZE / 2;
|
||||
}
|
||||
}
|
||||
|
||||
pte = pvmw->pte - (pvmw->address - start) / PAGE_SIZE;
|
||||
/* folio_update_gen() requires stable folio_memcg() */
|
||||
if (!mem_cgroup_trylock_pages(memcg))
|
||||
return;
|
||||
|
||||
rcu_read_lock();
|
||||
arch_enter_lazy_mmu_mode();
|
||||
|
||||
pte -= (addr - start) / PAGE_SIZE;
|
||||
|
||||
for (i = 0, addr = start; addr != end; i++, addr += PAGE_SIZE) {
|
||||
unsigned long pfn;
|
||||
|
||||
@ -4653,56 +4655,27 @@ void lru_gen_look_around(struct page_vma_mapped_walk *pvmw)
|
||||
!folio_test_swapcache(folio)))
|
||||
folio_mark_dirty(folio);
|
||||
|
||||
if (walk) {
|
||||
old_gen = folio_update_gen(folio, new_gen);
|
||||
if (old_gen >= 0 && old_gen != new_gen)
|
||||
update_batch_size(walk, folio, old_gen, new_gen);
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
old_gen = folio_lru_gen(folio);
|
||||
if (old_gen < 0)
|
||||
folio_set_referenced(folio);
|
||||
else if (old_gen != new_gen)
|
||||
__set_bit(i, bitmap);
|
||||
folio_activate(folio);
|
||||
}
|
||||
|
||||
arch_leave_lazy_mmu_mode();
|
||||
rcu_read_unlock();
|
||||
mem_cgroup_unlock_pages();
|
||||
|
||||
/* feedback from rmap walkers to page table walkers */
|
||||
if (suitable_to_scan(i, young))
|
||||
update_bloom_filter(lruvec, max_seq, pvmw->pmd);
|
||||
|
||||
if (!walk && bitmap_weight(bitmap, MIN_LRU_BATCH) < PAGEVEC_SIZE) {
|
||||
for_each_set_bit(i, bitmap, MIN_LRU_BATCH) {
|
||||
folio = pfn_folio(pte_pfn(pte[i]));
|
||||
folio_activate(folio);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
/* folio_update_gen() requires stable folio_memcg() */
|
||||
if (!mem_cgroup_trylock_pages(memcg))
|
||||
return;
|
||||
|
||||
if (!walk) {
|
||||
spin_lock_irq(&lruvec->lru_lock);
|
||||
new_gen = lru_gen_from_seq(lruvec->lrugen.max_seq);
|
||||
}
|
||||
|
||||
for_each_set_bit(i, bitmap, MIN_LRU_BATCH) {
|
||||
folio = pfn_folio(pte_pfn(pte[i]));
|
||||
if (folio_memcg_rcu(folio) != memcg)
|
||||
continue;
|
||||
|
||||
old_gen = folio_update_gen(folio, new_gen);
|
||||
if (old_gen < 0 || old_gen == new_gen)
|
||||
continue;
|
||||
|
||||
if (walk)
|
||||
update_batch_size(walk, folio, old_gen, new_gen);
|
||||
else
|
||||
lru_gen_update_size(lruvec, folio, old_gen, new_gen);
|
||||
}
|
||||
|
||||
if (!walk)
|
||||
spin_unlock_irq(&lruvec->lru_lock);
|
||||
|
||||
mem_cgroup_unlock_pages();
|
||||
}
|
||||
|
||||
/******************************************************************************
|
||||
|
Loading…
Reference in New Issue
Block a user