mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-15 11:27:47 +00:00
mm: make vma cache SLAB_TYPESAFE_BY_RCU
To enable SLAB_TYPESAFE_BY_RCU for vma cache we need to ensure that object reuse before RCU grace period is over will be detected by lock_vma_under_rcu(). Current checks are sufficient as long as vma is detached before it is freed. The only place this is not currently happening is in exit_mmap(). Add the missing vma_mark_detached() in exit_mmap(). Another issue which might trick lock_vma_under_rcu() during vma reuse is vm_area_dup(), which copies the entire content of the vma into a new one, overriding new vma's vm_refcnt and temporarily making it appear as attached. This might trick a racing lock_vma_under_rcu() to operate on a reused vma if it found the vma before it got reused. To prevent this situation, we should ensure that vm_refcnt stays at detached state (0) when it is copied and advances to attached state only after it is added into the vma tree. Introduce vm_area_init_from() which preserves new vma's vm_refcnt and use it in vm_area_dup(). Since all vmas are in detached state with no current readers when they are freed, lock_vma_under_rcu() will not be able to take vm_refcnt after vma got detached even if vma is reused. Finally, make vm_area_cachep SLAB_TYPESAFE_BY_RCU. This will facilitate vm_area_struct reuse and will minimize the number of call_rcu() calls. Link: https://lkml.kernel.org/r/20250111042604.3230628-17-surenb@google.com Signed-off-by: Suren Baghdasaryan <surenb@google.com> Reviewed-by: Vlastimil Babka <vbabka@suse.cz> Cc: Christian Brauner <brauner@kernel.org> Cc: David Hildenbrand <david@redhat.com> Cc: David Howells <dhowells@redhat.com> Cc: Davidlohr Bueso <dave@stgolabs.net> Cc: Hillf Danton <hdanton@sina.com> Cc: Hugh Dickens <hughd@google.com> Cc: Jann Horn <jannh@google.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Jonathan Corbet <corbet@lwn.net> Cc: kernel test robot <oliver.sang@intel.com> Cc: Klara Modin <klarasmodin@gmail.com> Cc: Liam R. Howlett <Liam.Howlett@Oracle.com> Cc: Lokesh Gidra <lokeshgidra@google.com> Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> Cc: Mateusz Guzik <mjguzik@gmail.com> Cc: Mattew Wilcox <willy@infradead.org> Cc: Mel Gorman <mgorman@techsingularity.net> Cc: Michal Hocko <mhocko@suse.com> Cc: Minchan Kim <minchan@google.com> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Pasha Tatashin <pasha.tatashin@soleen.com> Cc: "Paul E . McKenney" <paulmck@kernel.org> Cc: Peter Xu <peterx@redhat.com> Cc: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Shakeel Butt <shakeel.butt@linux.dev> Cc: Sourav Panda <souravpanda@google.com> Cc: Wei Yang <richard.weiyang@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
d747fe2b5f
commit
19c5a0d3a9
@ -258,8 +258,6 @@ void setup_initial_init_mm(void *start_code, void *end_code,
|
||||
struct vm_area_struct *vm_area_alloc(struct mm_struct *);
|
||||
struct vm_area_struct *vm_area_dup(struct vm_area_struct *);
|
||||
void vm_area_free(struct vm_area_struct *);
|
||||
/* Use only if VMA has no other users */
|
||||
void __vm_area_free(struct vm_area_struct *vma);
|
||||
|
||||
#ifndef CONFIG_MMU
|
||||
extern struct rb_root nommu_region_tree;
|
||||
|
@ -574,6 +574,12 @@ static inline void *folio_get_private(struct folio *folio)
|
||||
|
||||
typedef unsigned long vm_flags_t;
|
||||
|
||||
/*
|
||||
* freeptr_t represents a SLUB freelist pointer, which might be encoded
|
||||
* and not dereferenceable if CONFIG_SLAB_FREELIST_HARDENED is enabled.
|
||||
*/
|
||||
typedef struct { unsigned long v; } freeptr_t;
|
||||
|
||||
/*
|
||||
* A region containing a mapping of a non-memory backed file under NOMMU
|
||||
* conditions. These are held in a global tree and are pinned by the VMAs that
|
||||
@ -677,6 +683,9 @@ struct vma_numab_state {
|
||||
*
|
||||
* Only explicitly marked struct members may be accessed by RCU readers before
|
||||
* getting a stable reference.
|
||||
*
|
||||
* WARNING: when adding new members, please update vm_area_init_from() to copy
|
||||
* them during vm_area_struct content duplication.
|
||||
*/
|
||||
struct vm_area_struct {
|
||||
/* The first cache line has the info for VMA tree walking. */
|
||||
@ -687,9 +696,7 @@ struct vm_area_struct {
|
||||
unsigned long vm_start;
|
||||
unsigned long vm_end;
|
||||
};
|
||||
#ifdef CONFIG_PER_VMA_LOCK
|
||||
struct rcu_head vm_rcu; /* Used for deferred freeing. */
|
||||
#endif
|
||||
freeptr_t vm_freeptr; /* Pointer used by SLAB_TYPESAFE_BY_RCU */
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -234,12 +234,6 @@ enum _slab_flag_bits {
|
||||
#define SLAB_NO_OBJ_EXT __SLAB_FLAG_UNUSED
|
||||
#endif
|
||||
|
||||
/*
|
||||
* freeptr_t represents a SLUB freelist pointer, which might be encoded
|
||||
* and not dereferenceable if CONFIG_SLAB_FREELIST_HARDENED is enabled.
|
||||
*/
|
||||
typedef struct { unsigned long v; } freeptr_t;
|
||||
|
||||
/*
|
||||
* ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
|
||||
*
|
||||
|
@ -449,6 +449,42 @@ struct vm_area_struct *vm_area_alloc(struct mm_struct *mm)
|
||||
return vma;
|
||||
}
|
||||
|
||||
static void vm_area_init_from(const struct vm_area_struct *src,
|
||||
struct vm_area_struct *dest)
|
||||
{
|
||||
dest->vm_mm = src->vm_mm;
|
||||
dest->vm_ops = src->vm_ops;
|
||||
dest->vm_start = src->vm_start;
|
||||
dest->vm_end = src->vm_end;
|
||||
dest->anon_vma = src->anon_vma;
|
||||
dest->vm_pgoff = src->vm_pgoff;
|
||||
dest->vm_file = src->vm_file;
|
||||
dest->vm_private_data = src->vm_private_data;
|
||||
vm_flags_init(dest, src->vm_flags);
|
||||
memcpy(&dest->vm_page_prot, &src->vm_page_prot,
|
||||
sizeof(dest->vm_page_prot));
|
||||
/*
|
||||
* src->shared.rb may be modified concurrently when called from
|
||||
* dup_mmap(), but the clone will reinitialize it.
|
||||
*/
|
||||
data_race(memcpy(&dest->shared, &src->shared, sizeof(dest->shared)));
|
||||
memcpy(&dest->vm_userfaultfd_ctx, &src->vm_userfaultfd_ctx,
|
||||
sizeof(dest->vm_userfaultfd_ctx));
|
||||
#ifdef CONFIG_ANON_VMA_NAME
|
||||
dest->anon_name = src->anon_name;
|
||||
#endif
|
||||
#ifdef CONFIG_SWAP
|
||||
memcpy(&dest->swap_readahead_info, &src->swap_readahead_info,
|
||||
sizeof(dest->swap_readahead_info));
|
||||
#endif
|
||||
#ifndef CONFIG_MMU
|
||||
dest->vm_region = src->vm_region;
|
||||
#endif
|
||||
#ifdef CONFIG_NUMA
|
||||
dest->vm_policy = src->vm_policy;
|
||||
#endif
|
||||
}
|
||||
|
||||
struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig)
|
||||
{
|
||||
struct vm_area_struct *new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
|
||||
@ -458,11 +494,7 @@ struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig)
|
||||
|
||||
ASSERT_EXCLUSIVE_WRITER(orig->vm_flags);
|
||||
ASSERT_EXCLUSIVE_WRITER(orig->vm_file);
|
||||
/*
|
||||
* orig->shared.rb may be modified concurrently, but the clone
|
||||
* will be reinitialized.
|
||||
*/
|
||||
data_race(memcpy(new, orig, sizeof(*new)));
|
||||
vm_area_init_from(orig, new);
|
||||
vma_lock_init(new, true);
|
||||
INIT_LIST_HEAD(&new->anon_vma_chain);
|
||||
vma_numab_state_init(new);
|
||||
@ -471,7 +503,7 @@ struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig)
|
||||
return new;
|
||||
}
|
||||
|
||||
void __vm_area_free(struct vm_area_struct *vma)
|
||||
void vm_area_free(struct vm_area_struct *vma)
|
||||
{
|
||||
/* The vma should be detached while being destroyed. */
|
||||
vma_assert_detached(vma);
|
||||
@ -480,25 +512,6 @@ void __vm_area_free(struct vm_area_struct *vma)
|
||||
kmem_cache_free(vm_area_cachep, vma);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PER_VMA_LOCK
|
||||
static void vm_area_free_rcu_cb(struct rcu_head *head)
|
||||
{
|
||||
struct vm_area_struct *vma = container_of(head, struct vm_area_struct,
|
||||
vm_rcu);
|
||||
|
||||
__vm_area_free(vma);
|
||||
}
|
||||
#endif
|
||||
|
||||
void vm_area_free(struct vm_area_struct *vma)
|
||||
{
|
||||
#ifdef CONFIG_PER_VMA_LOCK
|
||||
call_rcu(&vma->vm_rcu, vm_area_free_rcu_cb);
|
||||
#else
|
||||
__vm_area_free(vma);
|
||||
#endif
|
||||
}
|
||||
|
||||
static void account_kernel_stack(struct task_struct *tsk, int account)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_VMAP_STACK)) {
|
||||
@ -3144,6 +3157,11 @@ void __init mm_cache_init(void)
|
||||
|
||||
void __init proc_caches_init(void)
|
||||
{
|
||||
struct kmem_cache_args args = {
|
||||
.use_freeptr_offset = true,
|
||||
.freeptr_offset = offsetof(struct vm_area_struct, vm_freeptr),
|
||||
};
|
||||
|
||||
sighand_cachep = kmem_cache_create("sighand_cache",
|
||||
sizeof(struct sighand_struct), 0,
|
||||
SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_TYPESAFE_BY_RCU|
|
||||
@ -3160,8 +3178,9 @@ void __init proc_caches_init(void)
|
||||
sizeof(struct fs_struct), 0,
|
||||
SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT,
|
||||
NULL);
|
||||
vm_area_cachep = KMEM_CACHE(vm_area_struct,
|
||||
SLAB_HWCACHE_ALIGN|SLAB_NO_MERGE|SLAB_PANIC|
|
||||
vm_area_cachep = kmem_cache_create("vm_area_struct",
|
||||
sizeof(struct vm_area_struct), &args,
|
||||
SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_TYPESAFE_BY_RCU|
|
||||
SLAB_ACCOUNT);
|
||||
mmap_init();
|
||||
nsproxy_cache_init();
|
||||
|
@ -1364,7 +1364,8 @@ void exit_mmap(struct mm_struct *mm)
|
||||
do {
|
||||
if (vma->vm_flags & VM_ACCOUNT)
|
||||
nr_accounted += vma_pages(vma);
|
||||
remove_vma(vma, /* unreachable = */ true);
|
||||
vma_mark_detached(vma);
|
||||
remove_vma(vma);
|
||||
count++;
|
||||
cond_resched();
|
||||
vma = vma_next(&vmi);
|
||||
|
9
mm/vma.c
9
mm/vma.c
@ -406,19 +406,14 @@ static bool can_vma_merge_right(struct vma_merge_struct *vmg,
|
||||
/*
|
||||
* Close a vm structure and free it.
|
||||
*/
|
||||
void remove_vma(struct vm_area_struct *vma, bool unreachable)
|
||||
void remove_vma(struct vm_area_struct *vma)
|
||||
{
|
||||
might_sleep();
|
||||
vma_close(vma);
|
||||
if (vma->vm_file)
|
||||
fput(vma->vm_file);
|
||||
mpol_put(vma_policy(vma));
|
||||
if (unreachable) {
|
||||
vma_mark_detached(vma);
|
||||
__vm_area_free(vma);
|
||||
} else {
|
||||
vm_area_free(vma);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1201,7 +1196,7 @@ static void vms_complete_munmap_vmas(struct vma_munmap_struct *vms,
|
||||
/* Remove and clean up vmas */
|
||||
mas_set(mas_detach, 0);
|
||||
mas_for_each(mas_detach, vma, ULONG_MAX)
|
||||
remove_vma(vma, /* unreachable = */ false);
|
||||
remove_vma(vma);
|
||||
|
||||
vm_unacct_memory(vms->nr_accounted);
|
||||
validate_mm(mm);
|
||||
|
2
mm/vma.h
2
mm/vma.h
@ -170,7 +170,7 @@ int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
|
||||
unsigned long start, size_t len, struct list_head *uf,
|
||||
bool unlock);
|
||||
|
||||
void remove_vma(struct vm_area_struct *vma, bool unreachable);
|
||||
void remove_vma(struct vm_area_struct *vma);
|
||||
|
||||
void unmap_region(struct ma_state *mas, struct vm_area_struct *vma,
|
||||
struct vm_area_struct *prev, struct vm_area_struct *next);
|
||||
|
@ -693,14 +693,9 @@ static inline void mpol_put(struct mempolicy *)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void __vm_area_free(struct vm_area_struct *vma)
|
||||
{
|
||||
free(vma);
|
||||
}
|
||||
|
||||
static inline void vm_area_free(struct vm_area_struct *vma)
|
||||
{
|
||||
__vm_area_free(vma);
|
||||
free(vma);
|
||||
}
|
||||
|
||||
static inline void lru_add_drain(void)
|
||||
|
Loading…
x
Reference in New Issue
Block a user