mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-01-09 14:50:19 +00:00
Merge branch 'slab/for-6.2/fit_rcu_head' into slab/for-next
A series by myself to reorder fields in struct slab to allow the embedded rcu_head to grow (for debugging purposes). Requires changes to isolate_movable_page() to skip slab pages which can otherwise become false-positive __PageMovable due to its use of low bits in page->mapping.
This commit is contained in:
commit
76537db3b9
15
mm/migrate.c
15
mm/migrate.c
@ -74,13 +74,22 @@ int isolate_movable_page(struct page *page, isolate_mode_t mode)
|
||||
if (unlikely(!get_page_unless_zero(page)))
|
||||
goto out;
|
||||
|
||||
if (unlikely(PageSlab(page)))
|
||||
goto out_putpage;
|
||||
/* Pairs with smp_wmb() in slab freeing, e.g. SLUB's __free_slab() */
|
||||
smp_rmb();
|
||||
/*
|
||||
* Check PageMovable before holding a PG_lock because page's owner
|
||||
* assumes anybody doesn't touch PG_lock of newly allocated page
|
||||
* so unconditionally grabbing the lock ruins page's owner side.
|
||||
* Check movable flag before taking the page lock because
|
||||
* we use non-atomic bitops on newly allocated page flags so
|
||||
* unconditionally grabbing the lock ruins page's owner side.
|
||||
*/
|
||||
if (unlikely(!__PageMovable(page)))
|
||||
goto out_putpage;
|
||||
/* Pairs with smp_wmb() in slab allocation, e.g. SLUB's alloc_slab_page() */
|
||||
smp_rmb();
|
||||
if (unlikely(PageSlab(page)))
|
||||
goto out_putpage;
|
||||
|
||||
/*
|
||||
* As movable pages are not isolated from LRU lists, concurrent
|
||||
* compaction threads can race against page migration functions
|
||||
|
@ -1370,6 +1370,8 @@ static struct slab *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
|
||||
|
||||
account_slab(slab, cachep->gfporder, cachep, flags);
|
||||
__folio_set_slab(folio);
|
||||
/* Make the flag visible before any changes to folio->mapping */
|
||||
smp_wmb();
|
||||
/* Record if ALLOC_NO_WATERMARKS was set when allocating the slab */
|
||||
if (sk_memalloc_socks() && page_is_pfmemalloc(folio_page(folio, 0)))
|
||||
slab_set_pfmemalloc(slab);
|
||||
@ -1387,9 +1389,11 @@ static void kmem_freepages(struct kmem_cache *cachep, struct slab *slab)
|
||||
|
||||
BUG_ON(!folio_test_slab(folio));
|
||||
__slab_clear_pfmemalloc(slab);
|
||||
__folio_clear_slab(folio);
|
||||
page_mapcount_reset(folio_page(folio, 0));
|
||||
folio->mapping = NULL;
|
||||
/* Make the mapping reset visible before clearing the flag */
|
||||
smp_wmb();
|
||||
__folio_clear_slab(folio);
|
||||
|
||||
if (current->reclaim_state)
|
||||
current->reclaim_state->reclaimed_slab += 1 << order;
|
||||
|
54
mm/slab.h
54
mm/slab.h
@ -11,37 +11,43 @@ struct slab {
|
||||
|
||||
#if defined(CONFIG_SLAB)
|
||||
|
||||
struct kmem_cache *slab_cache;
|
||||
union {
|
||||
struct list_head slab_list;
|
||||
struct {
|
||||
struct list_head slab_list;
|
||||
void *freelist; /* array of free object indexes */
|
||||
void *s_mem; /* first object */
|
||||
};
|
||||
struct rcu_head rcu_head;
|
||||
};
|
||||
struct kmem_cache *slab_cache;
|
||||
void *freelist; /* array of free object indexes */
|
||||
void *s_mem; /* first object */
|
||||
unsigned int active;
|
||||
|
||||
#elif defined(CONFIG_SLUB)
|
||||
|
||||
union {
|
||||
struct list_head slab_list;
|
||||
struct rcu_head rcu_head;
|
||||
#ifdef CONFIG_SLUB_CPU_PARTIAL
|
||||
struct {
|
||||
struct slab *next;
|
||||
int slabs; /* Nr of slabs left */
|
||||
};
|
||||
#endif
|
||||
};
|
||||
struct kmem_cache *slab_cache;
|
||||
/* Double-word boundary */
|
||||
void *freelist; /* first free object */
|
||||
union {
|
||||
unsigned long counters;
|
||||
struct {
|
||||
unsigned inuse:16;
|
||||
unsigned objects:15;
|
||||
unsigned frozen:1;
|
||||
union {
|
||||
struct list_head slab_list;
|
||||
#ifdef CONFIG_SLUB_CPU_PARTIAL
|
||||
struct {
|
||||
struct slab *next;
|
||||
int slabs; /* Nr of slabs left */
|
||||
};
|
||||
#endif
|
||||
};
|
||||
/* Double-word boundary */
|
||||
void *freelist; /* first free object */
|
||||
union {
|
||||
unsigned long counters;
|
||||
struct {
|
||||
unsigned inuse:16;
|
||||
unsigned objects:15;
|
||||
unsigned frozen:1;
|
||||
};
|
||||
};
|
||||
};
|
||||
struct rcu_head rcu_head;
|
||||
};
|
||||
unsigned int __unused;
|
||||
|
||||
@ -66,9 +72,10 @@ struct slab {
|
||||
#define SLAB_MATCH(pg, sl) \
|
||||
static_assert(offsetof(struct page, pg) == offsetof(struct slab, sl))
|
||||
SLAB_MATCH(flags, __page_flags);
|
||||
SLAB_MATCH(compound_head, slab_list); /* Ensure bit 0 is clear */
|
||||
#ifndef CONFIG_SLOB
|
||||
SLAB_MATCH(rcu_head, rcu_head);
|
||||
SLAB_MATCH(compound_head, slab_cache); /* Ensure bit 0 is clear */
|
||||
#else
|
||||
SLAB_MATCH(compound_head, slab_list); /* Ensure bit 0 is clear */
|
||||
#endif
|
||||
SLAB_MATCH(_refcount, __page_refcount);
|
||||
#ifdef CONFIG_MEMCG
|
||||
@ -76,6 +83,9 @@ SLAB_MATCH(memcg_data, memcg_data);
|
||||
#endif
|
||||
#undef SLAB_MATCH
|
||||
static_assert(sizeof(struct slab) <= sizeof(struct page));
|
||||
#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && defined(CONFIG_SLUB)
|
||||
static_assert(IS_ALIGNED(offsetof(struct slab, freelist), 2*sizeof(void *)));
|
||||
#endif
|
||||
|
||||
/**
|
||||
* folio_slab - Converts from folio to slab.
|
||||
|
26
mm/slub.c
26
mm/slub.c
@ -1800,6 +1800,8 @@ static inline struct slab *alloc_slab_page(gfp_t flags, int node,
|
||||
|
||||
slab = folio_slab(folio);
|
||||
__folio_set_slab(folio);
|
||||
/* Make the flag visible before any changes to folio->mapping */
|
||||
smp_wmb();
|
||||
if (page_is_pfmemalloc(folio_page(folio, 0)))
|
||||
slab_set_pfmemalloc(slab);
|
||||
|
||||
@ -1999,17 +2001,11 @@ static void __free_slab(struct kmem_cache *s, struct slab *slab)
|
||||
int order = folio_order(folio);
|
||||
int pages = 1 << order;
|
||||
|
||||
if (kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS)) {
|
||||
void *p;
|
||||
|
||||
slab_pad_check(s, slab);
|
||||
for_each_object(p, s, slab_address(slab), slab->objects)
|
||||
check_object(s, slab, p, SLUB_RED_INACTIVE);
|
||||
}
|
||||
|
||||
__slab_clear_pfmemalloc(slab);
|
||||
__folio_clear_slab(folio);
|
||||
folio->mapping = NULL;
|
||||
/* Make the mapping reset visible before clearing the flag */
|
||||
smp_wmb();
|
||||
__folio_clear_slab(folio);
|
||||
if (current->reclaim_state)
|
||||
current->reclaim_state->reclaimed_slab += pages;
|
||||
unaccount_slab(slab, order, s);
|
||||
@ -2025,9 +2021,17 @@ static void rcu_free_slab(struct rcu_head *h)
|
||||
|
||||
static void free_slab(struct kmem_cache *s, struct slab *slab)
|
||||
{
|
||||
if (unlikely(s->flags & SLAB_TYPESAFE_BY_RCU)) {
|
||||
if (kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS)) {
|
||||
void *p;
|
||||
|
||||
slab_pad_check(s, slab);
|
||||
for_each_object(p, s, slab_address(slab), slab->objects)
|
||||
check_object(s, slab, p, SLUB_RED_INACTIVE);
|
||||
}
|
||||
|
||||
if (unlikely(s->flags & SLAB_TYPESAFE_BY_RCU))
|
||||
call_rcu(&slab->rcu_head, rcu_free_slab);
|
||||
} else
|
||||
else
|
||||
__free_slab(s, slab);
|
||||
}
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user