memcg-v1: no need for memcg locking for MGLRU

While updating the generation of the folios, MGLRU requires that the
folio's memcg association remains stable.  With the charge migration
deprecated, there is no need for MGLRU to acquire locks to keep the folio
and memcg association stable.

[yuzhao@google.com: remove !rcu_read_lock_held() assertion]
  Link: https://lkml.kernel.org/r/ZykEtcHrQRq-KrBC@google.com
  Link: https://syzkaller.appspot.com/bug?extid=24f45b8beab9788e467e
  Link: https://lore.kernel.org/lkml/67294349.050a0220.701a.0010.GAE@google.com/
[akpm@linux-foundation.org: remove now-unused local]
[shakeel.butt@linux.dev: folio_rcu() fixup, per Yu Zhao]
  Link: https://lkml.kernel.org/r/iwmabnye3nl4merealrawt3bdvfii2pwavwrddrqpraoveet7h@ezrsdhjwwej7
Link: https://lkml.kernel.org/r/20241025012304.2473312-6-shakeel.butt@linux.dev
Signed-off-by: Shakeel Butt <shakeel.butt@linux.dev>
Reviewed-by: Roman Gushchin <roman.gushchin@linux.dev>
Cc: Hugh Dickins <hughd@google.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Yosry Ahmed <yosryahmed@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Shakeel Butt 2024-10-24 18:23:02 -07:00 committed by Andrew Morton
parent 568bcf4148
commit cf4a65539c

View File

@ -3137,7 +3137,6 @@ static int folio_update_gen(struct folio *folio, int gen)
unsigned long new_flags, old_flags = READ_ONCE(folio->flags);
VM_WARN_ON_ONCE(gen >= MAX_NR_GENS);
VM_WARN_ON_ONCE(!rcu_read_lock_held());
do {
/* lru_gen_del_folio() has isolated this page? */
@ -3353,7 +3352,7 @@ static struct folio *get_pfn_folio(unsigned long pfn, struct mem_cgroup *memcg,
if (folio_nid(folio) != pgdat->node_id)
return NULL;
if (folio_memcg_rcu(folio) != memcg)
if (folio_memcg(folio) != memcg)
return NULL;
/* file VMAs can contain anon pages from COW */
@ -3649,10 +3648,8 @@ static void walk_mm(struct mm_struct *mm, struct lru_gen_mm_walk *walk)
.p4d_entry = walk_pud_range,
.walk_lock = PGWALK_RDLOCK,
};
int err;
struct lruvec *lruvec = walk->lruvec;
struct mem_cgroup *memcg = lruvec_memcg(lruvec);
walk->next_addr = FIRST_USER_ADDRESS;
@ -3665,10 +3662,6 @@ static void walk_mm(struct mm_struct *mm, struct lru_gen_mm_walk *walk)
if (walk->seq != max_seq)
break;
/* folio_update_gen() requires stable folio_memcg() */
if (!mem_cgroup_trylock_pages(memcg))
break;
/* the caller might be holding the lock for write */
if (mmap_read_trylock(mm)) {
err = walk_page_range(mm, walk->next_addr, ULONG_MAX, &mm_walk_ops, walk);
@ -3676,8 +3669,6 @@ static void walk_mm(struct mm_struct *mm, struct lru_gen_mm_walk *walk)
mmap_read_unlock(mm);
}
mem_cgroup_unlock_pages();
if (walk->batched) {
spin_lock_irq(&lruvec->lru_lock);
reset_batch_size(walk);
@ -4099,10 +4090,6 @@ bool lru_gen_look_around(struct page_vma_mapped_walk *pvmw)
}
}
/* folio_update_gen() requires stable folio_memcg() */
if (!mem_cgroup_trylock_pages(memcg))
return true;
arch_enter_lazy_mmu_mode();
pte -= (addr - start) / PAGE_SIZE;
@ -4147,7 +4134,6 @@ bool lru_gen_look_around(struct page_vma_mapped_walk *pvmw)
}
arch_leave_lazy_mmu_mode();
mem_cgroup_unlock_pages();
/* feedback from rmap walkers to page table walkers */
if (mm_state && suitable_to_scan(i, young))