slab updates for 6.12

-----BEGIN PGP SIGNATURE-----
 
 iQEzBAABCAAdFiEEe7vIQRWZI0iWSE3xu+CwddJFiJoFAmbn5g0ACgkQu+CwddJF
 iJq+Uwf/aqnLNEpjUBzwUUhSojCpPnTtiyjv+AILTxoSTHmbu8OvN0W79+Rpbdmk
 O4QapAK+BCs+VL2VATwCCufcJ75Z78txO+buQE0DgwluFTIYZ+IwpUMPsK04ln6A
 FD1/uvP1QFx60heqcp2c4zWFBUpg4DE6ufx2A5kieO268lFcWLxyVlcdgRU79ZCt
 uAcV2yDLk3GvPGfxZwPKEmZUo/FmuSoBv0XgT+eWxmTu/R7hcpFse49OyjBH8Tvb
 8d/RCIFgXOr8dTIjtds7eenwB/is4TkRlctezEQ0jO9/JwL/BVOgXZjD1qCtNWqz
 is4TWK7VV+vdq1RD+0xC2hV/+uGEwQ==
 =+WAm
 -----END PGP SIGNATURE-----

Merge tag 'slab-for-6.12' of git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab

Pull slab updates from Vlastimil Babka:
 "This time it's mostly refactoring and improving APIs for slab users in
  the kernel, along with some debugging improvements.

   - kmem_cache_create() refactoring (Christian Brauner)

     Over the years have been growing new parameters to
     kmem_cache_create() where most of them are needed only for a small
     number of caches - most recently the rcu_freeptr_offset parameter.

     To avoid adding new parameters to kmem_cache_create() and adjusting
     all its callers, or creating new wrappers such as
     kmem_cache_create_rcu(), we can now pass extra parameters using the
     new struct kmem_cache_args. Not explicitly initialized fields
     default to values interpreted as unused.

     kmem_cache_create() is for now a wrapper that works both with the
     new form: kmem_cache_create(name, object_size, args, flags) and the
     legacy form: kmem_cache_create(name, object_size, align, flags,
     ctor)

   - kmem_cache_destroy() waits for kfree_rcu()'s in flight (Vlastimil
     Babka, Uladislau Rezki)

     Since SLOB removal, kfree() is allowed for freeing objects
     allocated by kmem_cache_create(). By extension kfree_rcu() as
     allowed as well, which can allow converting simple call_rcu()
     callbacks that only do kmem_cache_free(), as there was never a
     kmem_cache_free_rcu() variant. However, for caches that can be
     destroyed e.g. on module removal, the cache owners knew to issue
     rcu_barrier() first to wait for the pending call_rcu()'s, and this
     is not sufficient for pending kfree_rcu()'s due to its internal
     batching optimizations. Ulad has provided a new
     kvfree_rcu_barrier() and to make the usage less error-prone,
     kmem_cache_destroy() calls it. Additionally, destroying
     SLAB_TYPESAFE_BY_RCU caches now again issues rcu_barrier()
     synchronously instead of using an async work, because the past
     motivation for async work no longer applies. Users of custom
     call_rcu() callbacks should however keep calling rcu_barrier()
     before cache destruction.

   - Debugging use-after-free in SLAB_TYPESAFE_BY_RCU caches (Jann Horn)

     Currently, KASAN cannot catch UAFs in such caches as it is legal to
     access them within a grace period, and we only track the grace
     period when trying to free the underlying slab page. The new
     CONFIG_SLUB_RCU_DEBUG option changes the freeing of individual
     object to be RCU-delayed, after which KASAN can poison them.

   - Delayed memcg charging (Shakeel Butt)

     In some cases, the memcg is uknown at allocation time, such as
     receiving network packets in softirq context. With
     kmem_cache_charge() these may be now charged later when the user
     and its memcg is known.

   - Misc fixes and improvements (Pedro Falcato, Axel Rasmussen,
     Christoph Lameter, Yan Zhen, Peng Fan, Xavier)"

* tag 'slab-for-6.12' of git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab: (34 commits)
  mm, slab: restore kerneldoc for kmem_cache_create()
  io_uring: port to struct kmem_cache_args
  slab: make __kmem_cache_create() static inline
  slab: make kmem_cache_create_usercopy() static inline
  slab: remove kmem_cache_create_rcu()
  file: port to struct kmem_cache_args
  slab: create kmem_cache_create() compatibility layer
  slab: port KMEM_CACHE_USERCOPY() to struct kmem_cache_args
  slab: port KMEM_CACHE() to struct kmem_cache_args
  slab: remove rcu_freeptr_offset from struct kmem_cache
  slab: pass struct kmem_cache_args to do_kmem_cache_create()
  slab: pull kmem_cache_open() into do_kmem_cache_create()
  slab: pass struct kmem_cache_args to create_cache()
  slab: port kmem_cache_create_usercopy() to struct kmem_cache_args
  slab: port kmem_cache_create_rcu() to struct kmem_cache_args
  slab: port kmem_cache_create() to struct kmem_cache_args
  slab: add struct kmem_cache_args
  slab: s/__kmem_cache_create/do_kmem_cache_create/g
  memcg: add charging of already allocated slab objects
  mm/slab: Optimize the code logic in find_mergeable()
  ...
This commit is contained in:
Linus Torvalds 2024-09-18 08:53:53 +02:00
commit bdf56c7580
15 changed files with 935 additions and 453 deletions

View File

@ -521,9 +521,14 @@ EXPORT_SYMBOL(__fput_sync);
void __init files_init(void) void __init files_init(void)
{ {
filp_cachep = kmem_cache_create_rcu("filp", sizeof(struct file), struct kmem_cache_args args = {
offsetof(struct file, f_freeptr), .use_freeptr_offset = true,
SLAB_HWCACHE_ALIGN | SLAB_PANIC | SLAB_ACCOUNT); .freeptr_offset = offsetof(struct file, f_freeptr),
};
filp_cachep = kmem_cache_create("filp", sizeof(struct file), &args,
SLAB_HWCACHE_ALIGN | SLAB_PANIC |
SLAB_ACCOUNT | SLAB_TYPESAFE_BY_RCU);
percpu_counter_init(&nr_files, 0, GFP_KERNEL); percpu_counter_init(&nr_files, 0, GFP_KERNEL);
} }

View File

@ -175,13 +175,59 @@ static __always_inline void * __must_check kasan_init_slab_obj(
return (void *)object; return (void *)object;
} }
bool __kasan_slab_free(struct kmem_cache *s, void *object, bool __kasan_slab_pre_free(struct kmem_cache *s, void *object,
unsigned long ip, bool init); unsigned long ip);
static __always_inline bool kasan_slab_free(struct kmem_cache *s, /**
void *object, bool init) * kasan_slab_pre_free - Check whether freeing a slab object is safe.
* @object: Object to be freed.
*
* This function checks whether freeing the given object is safe. It may
* check for double-free and invalid-free bugs and report them.
*
* This function is intended only for use by the slab allocator.
*
* @Return true if freeing the object is unsafe; false otherwise.
*/
static __always_inline bool kasan_slab_pre_free(struct kmem_cache *s,
void *object)
{ {
if (kasan_enabled()) if (kasan_enabled())
return __kasan_slab_free(s, object, _RET_IP_, init); return __kasan_slab_pre_free(s, object, _RET_IP_);
return false;
}
bool __kasan_slab_free(struct kmem_cache *s, void *object, bool init,
bool still_accessible);
/**
* kasan_slab_free - Poison, initialize, and quarantine a slab object.
* @object: Object to be freed.
* @init: Whether to initialize the object.
* @still_accessible: Whether the object contents are still accessible.
*
* This function informs that a slab object has been freed and is not
* supposed to be accessed anymore, except when @still_accessible is set
* (indicating that the object is in a SLAB_TYPESAFE_BY_RCU cache and an RCU
* grace period might not have passed yet).
*
* For KASAN modes that have integrated memory initialization
* (kasan_has_integrated_init() == true), this function also initializes
* the object's memory. For other modes, the @init argument is ignored.
*
* This function might also take ownership of the object to quarantine it.
* When this happens, KASAN will defer freeing the object to a later
* stage and handle it internally until then. The return value indicates
* whether KASAN took ownership of the object.
*
* This function is intended only for use by the slab allocator.
*
* @Return true if KASAN took ownership of the object; false otherwise.
*/
static __always_inline bool kasan_slab_free(struct kmem_cache *s,
void *object, bool init,
bool still_accessible)
{
if (kasan_enabled())
return __kasan_slab_free(s, object, init, still_accessible);
return false; return false;
} }
@ -371,7 +417,14 @@ static inline void *kasan_init_slab_obj(struct kmem_cache *cache,
{ {
return (void *)object; return (void *)object;
} }
static inline bool kasan_slab_free(struct kmem_cache *s, void *object, bool init)
static inline bool kasan_slab_pre_free(struct kmem_cache *s, void *object)
{
return false;
}
static inline bool kasan_slab_free(struct kmem_cache *s, void *object,
bool init, bool still_accessible)
{ {
return false; return false;
} }

View File

@ -111,6 +111,11 @@ static inline void __kvfree_call_rcu(struct rcu_head *head, void *ptr)
kvfree(ptr); kvfree(ptr);
} }
static inline void kvfree_rcu_barrier(void)
{
rcu_barrier();
}
#ifdef CONFIG_KASAN_GENERIC #ifdef CONFIG_KASAN_GENERIC
void kvfree_call_rcu(struct rcu_head *head, void *ptr); void kvfree_call_rcu(struct rcu_head *head, void *ptr);
#else #else

View File

@ -35,6 +35,7 @@ static inline void rcu_virt_note_context_switch(void)
void synchronize_rcu_expedited(void); void synchronize_rcu_expedited(void);
void kvfree_call_rcu(struct rcu_head *head, void *ptr); void kvfree_call_rcu(struct rcu_head *head, void *ptr);
void kvfree_rcu_barrier(void);
void rcu_barrier(void); void rcu_barrier(void);
void rcu_momentary_eqs(void); void rcu_momentary_eqs(void);

View File

@ -240,17 +240,173 @@ struct mem_cgroup;
*/ */
bool slab_is_available(void); bool slab_is_available(void);
struct kmem_cache *kmem_cache_create(const char *name, unsigned int size, /**
unsigned int align, slab_flags_t flags, * struct kmem_cache_args - Less common arguments for kmem_cache_create()
void (*ctor)(void *)); *
struct kmem_cache *kmem_cache_create_usercopy(const char *name, * Any uninitialized fields of the structure are interpreted as unused. The
unsigned int size, unsigned int align, * exception is @freeptr_offset where %0 is a valid value, so
slab_flags_t flags, * @use_freeptr_offset must be also set to %true in order to interpret the field
unsigned int useroffset, unsigned int usersize, * as used. For @useroffset %0 is also valid, but only with non-%0
void (*ctor)(void *)); * @usersize.
struct kmem_cache *kmem_cache_create_rcu(const char *name, unsigned int size, *
unsigned int freeptr_offset, * When %NULL args is passed to kmem_cache_create(), it is equivalent to all
slab_flags_t flags); * fields unused.
*/
struct kmem_cache_args {
/**
* @align: The required alignment for the objects.
*
* %0 means no specific alignment is requested.
*/
unsigned int align;
/**
* @useroffset: Usercopy region offset.
*
* %0 is a valid offset, when @usersize is non-%0
*/
unsigned int useroffset;
/**
* @usersize: Usercopy region size.
*
* %0 means no usercopy region is specified.
*/
unsigned int usersize;
/**
* @freeptr_offset: Custom offset for the free pointer
* in &SLAB_TYPESAFE_BY_RCU caches
*
* By default &SLAB_TYPESAFE_BY_RCU caches place the free pointer
* outside of the object. This might cause the object to grow in size.
* Cache creators that have a reason to avoid this can specify a custom
* free pointer offset in their struct where the free pointer will be
* placed.
*
* Note that placing the free pointer inside the object requires the
* caller to ensure that no fields are invalidated that are required to
* guard against object recycling (See &SLAB_TYPESAFE_BY_RCU for
* details).
*
* Using %0 as a value for @freeptr_offset is valid. If @freeptr_offset
* is specified, %use_freeptr_offset must be set %true.
*
* Note that @ctor currently isn't supported with custom free pointers
* as a @ctor requires an external free pointer.
*/
unsigned int freeptr_offset;
/**
* @use_freeptr_offset: Whether a @freeptr_offset is used.
*/
bool use_freeptr_offset;
/**
* @ctor: A constructor for the objects.
*
* The constructor is invoked for each object in a newly allocated slab
* page. It is the cache user's responsibility to free object in the
* same state as after calling the constructor, or deal appropriately
* with any differences between a freshly constructed and a reallocated
* object.
*
* %NULL means no constructor.
*/
void (*ctor)(void *);
};
struct kmem_cache *__kmem_cache_create_args(const char *name,
unsigned int object_size,
struct kmem_cache_args *args,
slab_flags_t flags);
static inline struct kmem_cache *
__kmem_cache_create(const char *name, unsigned int size, unsigned int align,
slab_flags_t flags, void (*ctor)(void *))
{
struct kmem_cache_args kmem_args = {
.align = align,
.ctor = ctor,
};
return __kmem_cache_create_args(name, size, &kmem_args, flags);
}
/**
* kmem_cache_create_usercopy - Create a kmem cache with a region suitable
* for copying to userspace.
* @name: A string which is used in /proc/slabinfo to identify this cache.
* @size: The size of objects to be created in this cache.
* @align: The required alignment for the objects.
* @flags: SLAB flags
* @useroffset: Usercopy region offset
* @usersize: Usercopy region size
* @ctor: A constructor for the objects, or %NULL.
*
* This is a legacy wrapper, new code should use either KMEM_CACHE_USERCOPY()
* if whitelisting a single field is sufficient, or kmem_cache_create() with
* the necessary parameters passed via the args parameter (see
* &struct kmem_cache_args)
*
* Return: a pointer to the cache on success, NULL on failure.
*/
static inline struct kmem_cache *
kmem_cache_create_usercopy(const char *name, unsigned int size,
unsigned int align, slab_flags_t flags,
unsigned int useroffset, unsigned int usersize,
void (*ctor)(void *))
{
struct kmem_cache_args kmem_args = {
.align = align,
.ctor = ctor,
.useroffset = useroffset,
.usersize = usersize,
};
return __kmem_cache_create_args(name, size, &kmem_args, flags);
}
/* If NULL is passed for @args, use this variant with default arguments. */
static inline struct kmem_cache *
__kmem_cache_default_args(const char *name, unsigned int size,
struct kmem_cache_args *args,
slab_flags_t flags)
{
struct kmem_cache_args kmem_default_args = {};
/* Make sure we don't get passed garbage. */
if (WARN_ON_ONCE(args))
return ERR_PTR(-EINVAL);
return __kmem_cache_create_args(name, size, &kmem_default_args, flags);
}
/**
* kmem_cache_create - Create a kmem cache.
* @__name: A string which is used in /proc/slabinfo to identify this cache.
* @__object_size: The size of objects to be created in this cache.
* @__args: Optional arguments, see &struct kmem_cache_args. Passing %NULL
* means defaults will be used for all the arguments.
*
* This is currently implemented as a macro using ``_Generic()`` to call
* either the new variant of the function, or a legacy one.
*
* The new variant has 4 parameters:
* ``kmem_cache_create(name, object_size, args, flags)``
*
* See __kmem_cache_create_args() which implements this.
*
* The legacy variant has 5 parameters:
* ``kmem_cache_create(name, object_size, align, flags, ctor)``
*
* The align and ctor parameters map to the respective fields of
* &struct kmem_cache_args
*
* Context: Cannot be called within a interrupt, but can be interrupted.
*
* Return: a pointer to the cache on success, NULL on failure.
*/
#define kmem_cache_create(__name, __object_size, __args, ...) \
_Generic((__args), \
struct kmem_cache_args *: __kmem_cache_create_args, \
void *: __kmem_cache_default_args, \
default: __kmem_cache_create)(__name, __object_size, __args, __VA_ARGS__)
void kmem_cache_destroy(struct kmem_cache *s); void kmem_cache_destroy(struct kmem_cache *s);
int kmem_cache_shrink(struct kmem_cache *s); int kmem_cache_shrink(struct kmem_cache *s);
@ -262,20 +418,23 @@ int kmem_cache_shrink(struct kmem_cache *s);
* f.e. add ____cacheline_aligned_in_smp to the struct declaration * f.e. add ____cacheline_aligned_in_smp to the struct declaration
* then the objects will be properly aligned in SMP configurations. * then the objects will be properly aligned in SMP configurations.
*/ */
#define KMEM_CACHE(__struct, __flags) \ #define KMEM_CACHE(__struct, __flags) \
kmem_cache_create(#__struct, sizeof(struct __struct), \ __kmem_cache_create_args(#__struct, sizeof(struct __struct), \
__alignof__(struct __struct), (__flags), NULL) &(struct kmem_cache_args) { \
.align = __alignof__(struct __struct), \
}, (__flags))
/* /*
* To whitelist a single field for copying to/from usercopy, use this * To whitelist a single field for copying to/from usercopy, use this
* macro instead for KMEM_CACHE() above. * macro instead for KMEM_CACHE() above.
*/ */
#define KMEM_CACHE_USERCOPY(__struct, __flags, __field) \ #define KMEM_CACHE_USERCOPY(__struct, __flags, __field) \
kmem_cache_create_usercopy(#__struct, \ __kmem_cache_create_args(#__struct, sizeof(struct __struct), \
sizeof(struct __struct), \ &(struct kmem_cache_args) { \
__alignof__(struct __struct), (__flags), \ .align = __alignof__(struct __struct), \
offsetof(struct __struct, __field), \ .useroffset = offsetof(struct __struct, __field), \
sizeof_field(struct __struct, __field), NULL) .usersize = sizeof_field(struct __struct, __field), \
}, (__flags))
/* /*
* Common kmalloc functions provided by all allocators * Common kmalloc functions provided by all allocators
@ -556,6 +715,35 @@ void *kmem_cache_alloc_lru_noprof(struct kmem_cache *s, struct list_lru *lru,
gfp_t gfpflags) __assume_slab_alignment __malloc; gfp_t gfpflags) __assume_slab_alignment __malloc;
#define kmem_cache_alloc_lru(...) alloc_hooks(kmem_cache_alloc_lru_noprof(__VA_ARGS__)) #define kmem_cache_alloc_lru(...) alloc_hooks(kmem_cache_alloc_lru_noprof(__VA_ARGS__))
/**
* kmem_cache_charge - memcg charge an already allocated slab memory
* @objp: address of the slab object to memcg charge
* @gfpflags: describe the allocation context
*
* kmem_cache_charge allows charging a slab object to the current memcg,
* primarily in cases where charging at allocation time might not be possible
* because the target memcg is not known (i.e. softirq context)
*
* The objp should be pointer returned by the slab allocator functions like
* kmalloc (with __GFP_ACCOUNT in flags) or kmem_cache_alloc. The memcg charge
* behavior can be controlled through gfpflags parameter, which affects how the
* necessary internal metadata can be allocated. Including __GFP_NOFAIL denotes
* that overcharging is requested instead of failure, but is not applied for the
* internal metadata allocation.
*
* There are several cases where it will return true even if the charging was
* not done:
* More specifically:
*
* 1. For !CONFIG_MEMCG or cgroup_disable=memory systems.
* 2. Already charged slab objects.
* 3. For slab objects from KMALLOC_NORMAL caches - allocated by kmalloc()
* without __GFP_ACCOUNT
* 4. Allocating internal metadata has failed
*
* Return: true if charge was successful otherwise false.
*/
bool kmem_cache_charge(void *objp, gfp_t gfpflags);
void kmem_cache_free(struct kmem_cache *s, void *objp); void kmem_cache_free(struct kmem_cache *s, void *objp);
kmem_buckets *kmem_buckets_create(const char *name, slab_flags_t flags, kmem_buckets *kmem_buckets_create(const char *name, slab_flags_t flags,

View File

@ -3755,6 +3755,11 @@ SYSCALL_DEFINE2(io_uring_setup, u32, entries,
static int __init io_uring_init(void) static int __init io_uring_init(void)
{ {
struct kmem_cache_args kmem_args = {
.useroffset = offsetof(struct io_kiocb, cmd.data),
.usersize = sizeof_field(struct io_kiocb, cmd.data),
};
#define __BUILD_BUG_VERIFY_OFFSET_SIZE(stype, eoffset, esize, ename) do { \ #define __BUILD_BUG_VERIFY_OFFSET_SIZE(stype, eoffset, esize, ename) do { \
BUILD_BUG_ON(offsetof(stype, ename) != eoffset); \ BUILD_BUG_ON(offsetof(stype, ename) != eoffset); \
BUILD_BUG_ON(sizeof_field(stype, ename) != esize); \ BUILD_BUG_ON(sizeof_field(stype, ename) != esize); \
@ -3839,12 +3844,9 @@ static int __init io_uring_init(void)
* range, and HARDENED_USERCOPY will complain if we haven't * range, and HARDENED_USERCOPY will complain if we haven't
* correctly annotated this range. * correctly annotated this range.
*/ */
req_cachep = kmem_cache_create_usercopy("io_kiocb", req_cachep = kmem_cache_create("io_kiocb", sizeof(struct io_kiocb), &kmem_args,
sizeof(struct io_kiocb), 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC | SLAB_ACCOUNT |
SLAB_HWCACHE_ALIGN | SLAB_PANIC | SLAB_TYPESAFE_BY_RCU);
SLAB_ACCOUNT | SLAB_TYPESAFE_BY_RCU,
offsetof(struct io_kiocb, cmd.data),
sizeof_field(struct io_kiocb, cmd.data), NULL);
io_buf_cachep = KMEM_CACHE(io_buffer, io_buf_cachep = KMEM_CACHE(io_buffer,
SLAB_HWCACHE_ALIGN | SLAB_PANIC | SLAB_ACCOUNT); SLAB_HWCACHE_ALIGN | SLAB_PANIC | SLAB_ACCOUNT);

View File

@ -3564,18 +3564,15 @@ kvfree_rcu_drain_ready(struct kfree_rcu_cpu *krcp)
} }
/* /*
* This function is invoked after the KFREE_DRAIN_JIFFIES timeout. * Return: %true if a work is queued, %false otherwise.
*/ */
static void kfree_rcu_monitor(struct work_struct *work) static bool
kvfree_rcu_queue_batch(struct kfree_rcu_cpu *krcp)
{ {
struct kfree_rcu_cpu *krcp = container_of(work,
struct kfree_rcu_cpu, monitor_work.work);
unsigned long flags; unsigned long flags;
bool queued = false;
int i, j; int i, j;
// Drain ready for reclaim.
kvfree_rcu_drain_ready(krcp);
raw_spin_lock_irqsave(&krcp->lock, flags); raw_spin_lock_irqsave(&krcp->lock, flags);
// Attempt to start a new batch. // Attempt to start a new batch.
@ -3614,11 +3611,27 @@ static void kfree_rcu_monitor(struct work_struct *work)
// be that the work is in the pending state when // be that the work is in the pending state when
// channels have been detached following by each // channels have been detached following by each
// other. // other.
queue_rcu_work(system_unbound_wq, &krwp->rcu_work); queued = queue_rcu_work(system_unbound_wq, &krwp->rcu_work);
} }
} }
raw_spin_unlock_irqrestore(&krcp->lock, flags); raw_spin_unlock_irqrestore(&krcp->lock, flags);
return queued;
}
/*
* This function is invoked after the KFREE_DRAIN_JIFFIES timeout.
*/
static void kfree_rcu_monitor(struct work_struct *work)
{
struct kfree_rcu_cpu *krcp = container_of(work,
struct kfree_rcu_cpu, monitor_work.work);
// Drain ready for reclaim.
kvfree_rcu_drain_ready(krcp);
// Queue a batch for a rest.
kvfree_rcu_queue_batch(krcp);
// If there is nothing to detach, it means that our job is // If there is nothing to detach, it means that our job is
// successfully done here. In case of having at least one // successfully done here. In case of having at least one
@ -3840,6 +3853,86 @@ void kvfree_call_rcu(struct rcu_head *head, void *ptr)
} }
EXPORT_SYMBOL_GPL(kvfree_call_rcu); EXPORT_SYMBOL_GPL(kvfree_call_rcu);
/**
* kvfree_rcu_barrier - Wait until all in-flight kvfree_rcu() complete.
*
* Note that a single argument of kvfree_rcu() call has a slow path that
* triggers synchronize_rcu() following by freeing a pointer. It is done
* before the return from the function. Therefore for any single-argument
* call that will result in a kfree() to a cache that is to be destroyed
* during module exit, it is developer's responsibility to ensure that all
* such calls have returned before the call to kmem_cache_destroy().
*/
void kvfree_rcu_barrier(void)
{
struct kfree_rcu_cpu_work *krwp;
struct kfree_rcu_cpu *krcp;
bool queued;
int i, cpu;
/*
* Firstly we detach objects and queue them over an RCU-batch
* for all CPUs. Finally queued works are flushed for each CPU.
*
* Please note. If there are outstanding batches for a particular
* CPU, those have to be finished first following by queuing a new.
*/
for_each_possible_cpu(cpu) {
krcp = per_cpu_ptr(&krc, cpu);
/*
* Check if this CPU has any objects which have been queued for a
* new GP completion. If not(means nothing to detach), we are done
* with it. If any batch is pending/running for this "krcp", below
* per-cpu flush_rcu_work() waits its completion(see last step).
*/
if (!need_offload_krc(krcp))
continue;
while (1) {
/*
* If we are not able to queue a new RCU work it means:
* - batches for this CPU are still in flight which should
* be flushed first and then repeat;
* - no objects to detach, because of concurrency.
*/
queued = kvfree_rcu_queue_batch(krcp);
/*
* Bail out, if there is no need to offload this "krcp"
* anymore. As noted earlier it can run concurrently.
*/
if (queued || !need_offload_krc(krcp))
break;
/* There are ongoing batches. */
for (i = 0; i < KFREE_N_BATCHES; i++) {
krwp = &(krcp->krw_arr[i]);
flush_rcu_work(&krwp->rcu_work);
}
}
}
/*
* Now we guarantee that all objects are flushed.
*/
for_each_possible_cpu(cpu) {
krcp = per_cpu_ptr(&krc, cpu);
/*
* A monitor work can drain ready to reclaim objects
* directly. Wait its completion if running or pending.
*/
cancel_delayed_work_sync(&krcp->monitor_work);
for (i = 0; i < KFREE_N_BATCHES; i++) {
krwp = &(krcp->krw_arr[i]);
flush_rcu_work(&krwp->rcu_work);
}
}
}
EXPORT_SYMBOL_GPL(kvfree_rcu_barrier);
static unsigned long static unsigned long
kfree_rcu_shrink_count(struct shrinker *shrink, struct shrink_control *sc) kfree_rcu_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
{ {

View File

@ -5,6 +5,7 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/rcupdate.h>
#include "../mm/slab.h" #include "../mm/slab.h"
static struct kunit_resource resource; static struct kunit_resource resource;
@ -157,6 +158,34 @@ static void test_kmalloc_redzone_access(struct kunit *test)
kmem_cache_destroy(s); kmem_cache_destroy(s);
} }
struct test_kfree_rcu_struct {
struct rcu_head rcu;
};
static void test_kfree_rcu(struct kunit *test)
{
struct kmem_cache *s = test_kmem_cache_create("TestSlub_kfree_rcu",
sizeof(struct test_kfree_rcu_struct),
SLAB_NO_MERGE);
struct test_kfree_rcu_struct *p = kmem_cache_alloc(s, GFP_KERNEL);
kfree_rcu(p, rcu);
kmem_cache_destroy(s);
KUNIT_EXPECT_EQ(test, 0, slab_errors);
}
static void test_leak_destroy(struct kunit *test)
{
struct kmem_cache *s = test_kmem_cache_create("TestSlub_kfree_rcu",
64, SLAB_NO_MERGE);
kmem_cache_alloc(s, GFP_KERNEL);
kmem_cache_destroy(s);
KUNIT_EXPECT_EQ(test, 1, slab_errors);
}
static int test_init(struct kunit *test) static int test_init(struct kunit *test)
{ {
slab_errors = 0; slab_errors = 0;
@ -177,6 +206,8 @@ static struct kunit_case test_cases[] = {
KUNIT_CASE(test_clobber_redzone_free), KUNIT_CASE(test_clobber_redzone_free),
KUNIT_CASE(test_kmalloc_redzone_access), KUNIT_CASE(test_kmalloc_redzone_access),
KUNIT_CASE(test_kfree_rcu),
KUNIT_CASE(test_leak_destroy),
{} {}
}; };

View File

@ -70,6 +70,38 @@ config SLUB_DEBUG_ON
off in a kernel built with CONFIG_SLUB_DEBUG_ON by specifying off in a kernel built with CONFIG_SLUB_DEBUG_ON by specifying
"slab_debug=-". "slab_debug=-".
config SLUB_RCU_DEBUG
bool "Enable UAF detection in TYPESAFE_BY_RCU caches (for KASAN)"
depends on SLUB_DEBUG
# SLUB_RCU_DEBUG should build fine without KASAN, but is currently useless
# without KASAN, so mark it as a dependency of KASAN for now.
depends on KASAN
default KASAN_GENERIC || KASAN_SW_TAGS
help
Make SLAB_TYPESAFE_BY_RCU caches behave approximately as if the cache
was not marked as SLAB_TYPESAFE_BY_RCU and every caller used
kfree_rcu() instead.
This is intended for use in combination with KASAN, to enable KASAN to
detect use-after-free accesses in such caches.
(KFENCE is able to do that independent of this flag.)
This might degrade performance.
Unfortunately this also prevents a very specific bug pattern from
triggering (insufficient checks against an object being recycled
within the RCU grace period); so this option can be turned off even on
KASAN builds, in case you want to test for such a bug.
If you're using this for testing bugs / fuzzing and care about
catching all the bugs WAY more than performance, you might want to
also turn on CONFIG_RCU_STRICT_GRACE_PERIOD.
WARNING:
This is designed as a debugging feature, not a security feature.
Objects are sometimes recycled without RCU delay under memory pressure.
If unsure, say N.
config PAGE_OWNER config PAGE_OWNER
bool "Track page owner" bool "Track page owner"
depends on DEBUG_KERNEL && STACKTRACE_SUPPORT depends on DEBUG_KERNEL && STACKTRACE_SUPPORT

View File

@ -208,15 +208,12 @@ void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
return (void *)object; return (void *)object;
} }
static inline bool poison_slab_object(struct kmem_cache *cache, void *object, /* Returns true when freeing the object is not safe. */
unsigned long ip, bool init) static bool check_slab_allocation(struct kmem_cache *cache, void *object,
unsigned long ip)
{ {
void *tagged_object; void *tagged_object = object;
if (!kasan_arch_is_ready())
return false;
tagged_object = object;
object = kasan_reset_tag(object); object = kasan_reset_tag(object);
if (unlikely(nearest_obj(cache, virt_to_slab(object), object) != object)) { if (unlikely(nearest_obj(cache, virt_to_slab(object), object) != object)) {
@ -224,37 +221,47 @@ static inline bool poison_slab_object(struct kmem_cache *cache, void *object,
return true; return true;
} }
/* RCU slabs could be legally used after free within the RCU period. */
if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU))
return false;
if (!kasan_byte_accessible(tagged_object)) { if (!kasan_byte_accessible(tagged_object)) {
kasan_report_invalid_free(tagged_object, ip, KASAN_REPORT_DOUBLE_FREE); kasan_report_invalid_free(tagged_object, ip, KASAN_REPORT_DOUBLE_FREE);
return true; return true;
} }
return false;
}
static inline void poison_slab_object(struct kmem_cache *cache, void *object,
bool init, bool still_accessible)
{
void *tagged_object = object;
object = kasan_reset_tag(object);
/* RCU slabs could be legally used after free within the RCU period. */
if (unlikely(still_accessible))
return;
kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE), kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE),
KASAN_SLAB_FREE, init); KASAN_SLAB_FREE, init);
if (kasan_stack_collection_enabled()) if (kasan_stack_collection_enabled())
kasan_save_free_info(cache, tagged_object); kasan_save_free_info(cache, tagged_object);
return false;
} }
bool __kasan_slab_free(struct kmem_cache *cache, void *object, bool __kasan_slab_pre_free(struct kmem_cache *cache, void *object,
unsigned long ip, bool init) unsigned long ip)
{ {
if (is_kfence_address(object)) if (!kasan_arch_is_ready() || is_kfence_address(object))
return false;
return check_slab_allocation(cache, object, ip);
}
bool __kasan_slab_free(struct kmem_cache *cache, void *object, bool init,
bool still_accessible)
{
if (!kasan_arch_is_ready() || is_kfence_address(object))
return false; return false;
/* poison_slab_object(cache, object, init, still_accessible);
* If the object is buggy, do not let slab put the object onto the
* freelist. The object will thus never be allocated again and its
* metadata will never get released.
*/
if (poison_slab_object(cache, object, ip, init))
return true;
/* /*
* If the object is put into quarantine, do not let slab put the object * If the object is put into quarantine, do not let slab put the object
@ -504,11 +511,16 @@ bool __kasan_mempool_poison_object(void *ptr, unsigned long ip)
return true; return true;
} }
if (is_kfence_address(ptr)) if (is_kfence_address(ptr) || !kasan_arch_is_ready())
return false; return true;
slab = folio_slab(folio); slab = folio_slab(folio);
return !poison_slab_object(slab->slab_cache, ptr, ip, false);
if (check_slab_allocation(slab->slab_cache, ptr, ip))
return false;
poison_slab_object(slab->slab_cache, ptr, false, false);
return true;
} }
void __kasan_mempool_unpoison_object(void *ptr, size_t size, unsigned long ip) void __kasan_mempool_unpoison_object(void *ptr, size_t size, unsigned long ip)

View File

@ -996,6 +996,51 @@ static void kmem_cache_invalid_free(struct kunit *test)
kmem_cache_destroy(cache); kmem_cache_destroy(cache);
} }
static void kmem_cache_rcu_uaf(struct kunit *test)
{
char *p;
size_t size = 200;
struct kmem_cache *cache;
KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB_RCU_DEBUG);
cache = kmem_cache_create("test_cache", size, 0, SLAB_TYPESAFE_BY_RCU,
NULL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
p = kmem_cache_alloc(cache, GFP_KERNEL);
if (!p) {
kunit_err(test, "Allocation failed: %s\n", __func__);
kmem_cache_destroy(cache);
return;
}
*p = 1;
rcu_read_lock();
/* Free the object - this will internally schedule an RCU callback. */
kmem_cache_free(cache, p);
/*
* We should still be allowed to access the object at this point because
* the cache is SLAB_TYPESAFE_BY_RCU and we've been in an RCU read-side
* critical section since before the kmem_cache_free().
*/
READ_ONCE(*p);
rcu_read_unlock();
/*
* Wait for the RCU callback to execute; after this, the object should
* have actually been freed from KASAN's perspective.
*/
rcu_barrier();
KUNIT_EXPECT_KASAN_FAIL(test, READ_ONCE(*p));
kmem_cache_destroy(cache);
}
static void empty_cache_ctor(void *object) { } static void empty_cache_ctor(void *object) { }
static void kmem_cache_double_destroy(struct kunit *test) static void kmem_cache_double_destroy(struct kunit *test)
@ -1937,6 +1982,7 @@ static struct kunit_case kasan_kunit_test_cases[] = {
KUNIT_CASE(kmem_cache_oob), KUNIT_CASE(kmem_cache_oob),
KUNIT_CASE(kmem_cache_double_free), KUNIT_CASE(kmem_cache_double_free),
KUNIT_CASE(kmem_cache_invalid_free), KUNIT_CASE(kmem_cache_invalid_free),
KUNIT_CASE(kmem_cache_rcu_uaf),
KUNIT_CASE(kmem_cache_double_destroy), KUNIT_CASE(kmem_cache_double_destroy),
KUNIT_CASE(kmem_cache_accounted), KUNIT_CASE(kmem_cache_accounted),
KUNIT_CASE(kmem_cache_bulk), KUNIT_CASE(kmem_cache_bulk),

View File

@ -261,8 +261,6 @@ struct kmem_cache {
unsigned int object_size; /* Object size without metadata */ unsigned int object_size; /* Object size without metadata */
struct reciprocal_value reciprocal_size; struct reciprocal_value reciprocal_size;
unsigned int offset; /* Free pointer offset */ unsigned int offset; /* Free pointer offset */
/* Specific free pointer requested (if not UINT_MAX) */
unsigned int rcu_freeptr_offset;
#ifdef CONFIG_SLUB_CPU_PARTIAL #ifdef CONFIG_SLUB_CPU_PARTIAL
/* Number of per cpu partial objects to keep around */ /* Number of per cpu partial objects to keep around */
unsigned int cpu_partial; unsigned int cpu_partial;
@ -424,7 +422,9 @@ kmalloc_slab(size_t size, kmem_buckets *b, gfp_t flags, unsigned long caller)
gfp_t kmalloc_fix_flags(gfp_t flags); gfp_t kmalloc_fix_flags(gfp_t flags);
/* Functions provided by the slab allocators */ /* Functions provided by the slab allocators */
int __kmem_cache_create(struct kmem_cache *, slab_flags_t flags); int do_kmem_cache_create(struct kmem_cache *s, const char *name,
unsigned int size, struct kmem_cache_args *args,
slab_flags_t flags);
void __init kmem_cache_init(void); void __init kmem_cache_init(void);
extern void create_boot_cache(struct kmem_cache *, const char *name, extern void create_boot_cache(struct kmem_cache *, const char *name,
@ -445,6 +445,13 @@ static inline bool is_kmalloc_cache(struct kmem_cache *s)
return (s->flags & SLAB_KMALLOC); return (s->flags & SLAB_KMALLOC);
} }
static inline bool is_kmalloc_normal(struct kmem_cache *s)
{
if (!is_kmalloc_cache(s))
return false;
return !(s->flags & (SLAB_CACHE_DMA|SLAB_ACCOUNT|SLAB_RECLAIM_ACCOUNT));
}
/* Legal flag mask for kmem_cache_create(), for various configurations */ /* Legal flag mask for kmem_cache_create(), for various configurations */
#define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \ #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \
SLAB_CACHE_DMA32 | SLAB_PANIC | \ SLAB_CACHE_DMA32 | SLAB_PANIC | \

View File

@ -40,11 +40,6 @@ LIST_HEAD(slab_caches);
DEFINE_MUTEX(slab_mutex); DEFINE_MUTEX(slab_mutex);
struct kmem_cache *kmem_cache; struct kmem_cache *kmem_cache;
static LIST_HEAD(slab_caches_to_rcu_destroy);
static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work);
static DECLARE_WORK(slab_caches_to_rcu_destroy_work,
slab_caches_to_rcu_destroy_workfn);
/* /*
* Set of flags that will prevent slab merging * Set of flags that will prevent slab merging
*/ */
@ -88,6 +83,19 @@ unsigned int kmem_cache_size(struct kmem_cache *s)
EXPORT_SYMBOL(kmem_cache_size); EXPORT_SYMBOL(kmem_cache_size);
#ifdef CONFIG_DEBUG_VM #ifdef CONFIG_DEBUG_VM
static bool kmem_cache_is_duplicate_name(const char *name)
{
struct kmem_cache *s;
list_for_each_entry(s, &slab_caches, list) {
if (!strcmp(s->name, name))
return true;
}
return false;
}
static int kmem_cache_sanity_check(const char *name, unsigned int size) static int kmem_cache_sanity_check(const char *name, unsigned int size)
{ {
if (!name || in_interrupt() || size > KMALLOC_MAX_SIZE) { if (!name || in_interrupt() || size > KMALLOC_MAX_SIZE) {
@ -95,6 +103,10 @@ static int kmem_cache_sanity_check(const char *name, unsigned int size)
return -EINVAL; return -EINVAL;
} }
/* Duplicate names will confuse slabtop, et al */
WARN(kmem_cache_is_duplicate_name(name),
"kmem_cache of name '%s' already exists\n", name);
WARN_ON(strchr(name, ' ')); /* It confuses parsers */ WARN_ON(strchr(name, ' ')); /* It confuses parsers */
return 0; return 0;
} }
@ -169,14 +181,15 @@ struct kmem_cache *find_mergeable(unsigned int size, unsigned int align,
if (ctor) if (ctor)
return NULL; return NULL;
size = ALIGN(size, sizeof(void *));
align = calculate_alignment(flags, align, size);
size = ALIGN(size, align);
flags = kmem_cache_flags(flags, name); flags = kmem_cache_flags(flags, name);
if (flags & SLAB_NEVER_MERGE) if (flags & SLAB_NEVER_MERGE)
return NULL; return NULL;
size = ALIGN(size, sizeof(void *));
align = calculate_alignment(flags, align, size);
size = ALIGN(size, align);
list_for_each_entry_reverse(s, &slab_caches, list) { list_for_each_entry_reverse(s, &slab_caches, list) {
if (slab_unmergeable(s)) if (slab_unmergeable(s))
continue; continue;
@ -202,39 +215,29 @@ struct kmem_cache *find_mergeable(unsigned int size, unsigned int align,
} }
static struct kmem_cache *create_cache(const char *name, static struct kmem_cache *create_cache(const char *name,
unsigned int object_size, unsigned int freeptr_offset, unsigned int object_size,
unsigned int align, slab_flags_t flags, struct kmem_cache_args *args,
unsigned int useroffset, unsigned int usersize, slab_flags_t flags)
void (*ctor)(void *))
{ {
struct kmem_cache *s; struct kmem_cache *s;
int err; int err;
if (WARN_ON(useroffset + usersize > object_size)) if (WARN_ON(args->useroffset + args->usersize > object_size))
useroffset = usersize = 0; args->useroffset = args->usersize = 0;
/* If a custom freelist pointer is requested make sure it's sane. */ /* If a custom freelist pointer is requested make sure it's sane. */
err = -EINVAL; err = -EINVAL;
if (freeptr_offset != UINT_MAX && if (args->use_freeptr_offset &&
(freeptr_offset >= object_size || !(flags & SLAB_TYPESAFE_BY_RCU) || (args->freeptr_offset >= object_size ||
!IS_ALIGNED(freeptr_offset, sizeof(freeptr_t)))) !(flags & SLAB_TYPESAFE_BY_RCU) ||
!IS_ALIGNED(args->freeptr_offset, sizeof(freeptr_t))))
goto out; goto out;
err = -ENOMEM; err = -ENOMEM;
s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL); s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL);
if (!s) if (!s)
goto out; goto out;
err = do_kmem_cache_create(s, name, object_size, args, flags);
s->name = name;
s->size = s->object_size = object_size;
s->rcu_freeptr_offset = freeptr_offset;
s->align = align;
s->ctor = ctor;
#ifdef CONFIG_HARDENED_USERCOPY
s->useroffset = useroffset;
s->usersize = usersize;
#endif
err = __kmem_cache_create(s, flags);
if (err) if (err)
goto out_free_cache; goto out_free_cache;
@ -248,12 +251,25 @@ static struct kmem_cache *create_cache(const char *name,
return ERR_PTR(err); return ERR_PTR(err);
} }
static struct kmem_cache * /**
do_kmem_cache_create_usercopy(const char *name, * __kmem_cache_create_args - Create a kmem cache.
unsigned int size, unsigned int freeptr_offset, * @name: A string which is used in /proc/slabinfo to identify this cache.
unsigned int align, slab_flags_t flags, * @object_size: The size of objects to be created in this cache.
unsigned int useroffset, unsigned int usersize, * @args: Additional arguments for the cache creation (see
void (*ctor)(void *)) * &struct kmem_cache_args).
* @flags: See %SLAB_* flags for an explanation of individual @flags.
*
* Not to be called directly, use the kmem_cache_create() wrapper with the same
* parameters.
*
* Context: Cannot be called within a interrupt, but can be interrupted.
*
* Return: a pointer to the cache on success, NULL on failure.
*/
struct kmem_cache *__kmem_cache_create_args(const char *name,
unsigned int object_size,
struct kmem_cache_args *args,
slab_flags_t flags)
{ {
struct kmem_cache *s = NULL; struct kmem_cache *s = NULL;
const char *cache_name; const char *cache_name;
@ -275,7 +291,7 @@ do_kmem_cache_create_usercopy(const char *name,
mutex_lock(&slab_mutex); mutex_lock(&slab_mutex);
err = kmem_cache_sanity_check(name, size); err = kmem_cache_sanity_check(name, object_size);
if (err) { if (err) {
goto out_unlock; goto out_unlock;
} }
@ -296,12 +312,14 @@ do_kmem_cache_create_usercopy(const char *name,
/* Fail closed on bad usersize of useroffset values. */ /* Fail closed on bad usersize of useroffset values. */
if (!IS_ENABLED(CONFIG_HARDENED_USERCOPY) || if (!IS_ENABLED(CONFIG_HARDENED_USERCOPY) ||
WARN_ON(!usersize && useroffset) || WARN_ON(!args->usersize && args->useroffset) ||
WARN_ON(size < usersize || size - usersize < useroffset)) WARN_ON(object_size < args->usersize ||
usersize = useroffset = 0; object_size - args->usersize < args->useroffset))
args->usersize = args->useroffset = 0;
if (!usersize) if (!args->usersize)
s = __kmem_cache_alias(name, size, align, flags, ctor); s = __kmem_cache_alias(name, object_size, args->align, flags,
args->ctor);
if (s) if (s)
goto out_unlock; goto out_unlock;
@ -311,9 +329,8 @@ do_kmem_cache_create_usercopy(const char *name,
goto out_unlock; goto out_unlock;
} }
s = create_cache(cache_name, size, freeptr_offset, args->align = calculate_alignment(flags, args->align, object_size);
calculate_alignment(flags, align, size), s = create_cache(cache_name, object_size, args, flags);
flags, useroffset, usersize, ctor);
if (IS_ERR(s)) { if (IS_ERR(s)) {
err = PTR_ERR(s); err = PTR_ERR(s);
kfree_const(cache_name); kfree_const(cache_name);
@ -335,118 +352,7 @@ do_kmem_cache_create_usercopy(const char *name,
} }
return s; return s;
} }
EXPORT_SYMBOL(__kmem_cache_create_args);
/**
* kmem_cache_create_usercopy - Create a cache with a region suitable
* for copying to userspace
* @name: A string which is used in /proc/slabinfo to identify this cache.
* @size: The size of objects to be created in this cache.
* @align: The required alignment for the objects.
* @flags: SLAB flags
* @useroffset: Usercopy region offset
* @usersize: Usercopy region size
* @ctor: A constructor for the objects.
*
* Cannot be called within a interrupt, but can be interrupted.
* The @ctor is run when new pages are allocated by the cache.
*
* The flags are
*
* %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
* to catch references to uninitialised memory.
*
* %SLAB_RED_ZONE - Insert `Red` zones around the allocated memory to check
* for buffer overruns.
*
* %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
* cacheline. This can be beneficial if you're counting cycles as closely
* as davem.
*
* Return: a pointer to the cache on success, NULL on failure.
*/
struct kmem_cache *
kmem_cache_create_usercopy(const char *name, unsigned int size,
unsigned int align, slab_flags_t flags,
unsigned int useroffset, unsigned int usersize,
void (*ctor)(void *))
{
return do_kmem_cache_create_usercopy(name, size, UINT_MAX, align, flags,
useroffset, usersize, ctor);
}
EXPORT_SYMBOL(kmem_cache_create_usercopy);
/**
* kmem_cache_create - Create a cache.
* @name: A string which is used in /proc/slabinfo to identify this cache.
* @size: The size of objects to be created in this cache.
* @align: The required alignment for the objects.
* @flags: SLAB flags
* @ctor: A constructor for the objects.
*
* Cannot be called within a interrupt, but can be interrupted.
* The @ctor is run when new pages are allocated by the cache.
*
* The flags are
*
* %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
* to catch references to uninitialised memory.
*
* %SLAB_RED_ZONE - Insert `Red` zones around the allocated memory to check
* for buffer overruns.
*
* %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
* cacheline. This can be beneficial if you're counting cycles as closely
* as davem.
*
* Return: a pointer to the cache on success, NULL on failure.
*/
struct kmem_cache *
kmem_cache_create(const char *name, unsigned int size, unsigned int align,
slab_flags_t flags, void (*ctor)(void *))
{
return do_kmem_cache_create_usercopy(name, size, UINT_MAX, align, flags,
0, 0, ctor);
}
EXPORT_SYMBOL(kmem_cache_create);
/**
* kmem_cache_create_rcu - Create a SLAB_TYPESAFE_BY_RCU cache.
* @name: A string which is used in /proc/slabinfo to identify this cache.
* @size: The size of objects to be created in this cache.
* @freeptr_offset: The offset into the memory to the free pointer
* @flags: SLAB flags
*
* Cannot be called within an interrupt, but can be interrupted.
*
* See kmem_cache_create() for an explanation of possible @flags.
*
* By default SLAB_TYPESAFE_BY_RCU caches place the free pointer outside
* of the object. This might cause the object to grow in size. Callers
* that have a reason to avoid this can specify a custom free pointer
* offset in their struct where the free pointer will be placed.
*
* Note that placing the free pointer inside the object requires the
* caller to ensure that no fields are invalidated that are required to
* guard against object recycling (See SLAB_TYPESAFE_BY_RCU for
* details.).
*
* Using zero as a value for @freeptr_offset is valid. To request no
* offset UINT_MAX must be specified.
*
* Note that @ctor isn't supported with custom free pointers as a @ctor
* requires an external free pointer.
*
* Return: a pointer to the cache on success, NULL on failure.
*/
struct kmem_cache *kmem_cache_create_rcu(const char *name, unsigned int size,
unsigned int freeptr_offset,
slab_flags_t flags)
{
return do_kmem_cache_create_usercopy(name, size, freeptr_offset, 0,
flags | SLAB_TYPESAFE_BY_RCU, 0, 0,
NULL);
}
EXPORT_SYMBOL(kmem_cache_create_rcu);
static struct kmem_cache *kmem_buckets_cache __ro_after_init; static struct kmem_cache *kmem_buckets_cache __ro_after_init;
@ -534,87 +440,25 @@ kmem_buckets *kmem_buckets_create(const char *name, slab_flags_t flags,
fail: fail:
for (idx = 0; idx < ARRAY_SIZE(kmalloc_caches[KMALLOC_NORMAL]); idx++) for (idx = 0; idx < ARRAY_SIZE(kmalloc_caches[KMALLOC_NORMAL]); idx++)
kmem_cache_destroy((*b)[idx]); kmem_cache_destroy((*b)[idx]);
kfree(b); kmem_cache_free(kmem_buckets_cache, b);
return NULL; return NULL;
} }
EXPORT_SYMBOL(kmem_buckets_create); EXPORT_SYMBOL(kmem_buckets_create);
#ifdef SLAB_SUPPORTS_SYSFS
/* /*
* For a given kmem_cache, kmem_cache_destroy() should only be called * For a given kmem_cache, kmem_cache_destroy() should only be called
* once or there will be a use-after-free problem. The actual deletion * once or there will be a use-after-free problem. The actual deletion
* and release of the kobject does not need slab_mutex or cpu_hotplug_lock * and release of the kobject does not need slab_mutex or cpu_hotplug_lock
* protection. So they are now done without holding those locks. * protection. So they are now done without holding those locks.
*
* Note that there will be a slight delay in the deletion of sysfs files
* if kmem_cache_release() is called indrectly from a work function.
*/ */
static void kmem_cache_release(struct kmem_cache *s) static void kmem_cache_release(struct kmem_cache *s)
{ {
if (slab_state >= FULL) { kfence_shutdown_cache(s);
sysfs_slab_unlink(s); if (__is_defined(SLAB_SUPPORTS_SYSFS) && slab_state >= FULL)
sysfs_slab_release(s); sysfs_slab_release(s);
} else { else
slab_kmem_cache_release(s); slab_kmem_cache_release(s);
}
}
#else
static void kmem_cache_release(struct kmem_cache *s)
{
slab_kmem_cache_release(s);
}
#endif
static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work)
{
LIST_HEAD(to_destroy);
struct kmem_cache *s, *s2;
/*
* On destruction, SLAB_TYPESAFE_BY_RCU kmem_caches are put on the
* @slab_caches_to_rcu_destroy list. The slab pages are freed
* through RCU and the associated kmem_cache are dereferenced
* while freeing the pages, so the kmem_caches should be freed only
* after the pending RCU operations are finished. As rcu_barrier()
* is a pretty slow operation, we batch all pending destructions
* asynchronously.
*/
mutex_lock(&slab_mutex);
list_splice_init(&slab_caches_to_rcu_destroy, &to_destroy);
mutex_unlock(&slab_mutex);
if (list_empty(&to_destroy))
return;
rcu_barrier();
list_for_each_entry_safe(s, s2, &to_destroy, list) {
debugfs_slab_release(s);
kfence_shutdown_cache(s);
kmem_cache_release(s);
}
}
static int shutdown_cache(struct kmem_cache *s)
{
/* free asan quarantined objects */
kasan_cache_shutdown(s);
if (__kmem_cache_shutdown(s) != 0)
return -EBUSY;
list_del(&s->list);
if (s->flags & SLAB_TYPESAFE_BY_RCU) {
list_add_tail(&s->list, &slab_caches_to_rcu_destroy);
schedule_work(&slab_caches_to_rcu_destroy_work);
} else {
kfence_shutdown_cache(s);
debugfs_slab_release(s);
}
return 0;
} }
void slab_kmem_cache_release(struct kmem_cache *s) void slab_kmem_cache_release(struct kmem_cache *s)
@ -626,29 +470,63 @@ void slab_kmem_cache_release(struct kmem_cache *s)
void kmem_cache_destroy(struct kmem_cache *s) void kmem_cache_destroy(struct kmem_cache *s)
{ {
int err = -EBUSY; int err;
bool rcu_set;
if (unlikely(!s) || !kasan_check_byte(s)) if (unlikely(!s) || !kasan_check_byte(s))
return; return;
/* in-flight kfree_rcu()'s may include objects from our cache */
kvfree_rcu_barrier();
if (IS_ENABLED(CONFIG_SLUB_RCU_DEBUG) &&
(s->flags & SLAB_TYPESAFE_BY_RCU)) {
/*
* Under CONFIG_SLUB_RCU_DEBUG, when objects in a
* SLAB_TYPESAFE_BY_RCU slab are freed, SLUB will internally
* defer their freeing with call_rcu().
* Wait for such call_rcu() invocations here before actually
* destroying the cache.
*
* It doesn't matter that we haven't looked at the slab refcount
* yet - slabs with SLAB_TYPESAFE_BY_RCU can't be merged, so
* the refcount should be 1 here.
*/
rcu_barrier();
}
cpus_read_lock(); cpus_read_lock();
mutex_lock(&slab_mutex); mutex_lock(&slab_mutex);
rcu_set = s->flags & SLAB_TYPESAFE_BY_RCU;
s->refcount--; s->refcount--;
if (s->refcount) if (s->refcount) {
goto out_unlock; mutex_unlock(&slab_mutex);
cpus_read_unlock();
return;
}
err = shutdown_cache(s); /* free asan quarantined objects */
kasan_cache_shutdown(s);
err = __kmem_cache_shutdown(s);
WARN(err, "%s %s: Slab cache still has objects when called from %pS", WARN(err, "%s %s: Slab cache still has objects when called from %pS",
__func__, s->name, (void *)_RET_IP_); __func__, s->name, (void *)_RET_IP_);
out_unlock:
list_del(&s->list);
mutex_unlock(&slab_mutex); mutex_unlock(&slab_mutex);
cpus_read_unlock(); cpus_read_unlock();
if (!err && !rcu_set)
kmem_cache_release(s); if (slab_state >= FULL)
sysfs_slab_unlink(s);
debugfs_slab_release(s);
if (err)
return;
if (s->flags & SLAB_TYPESAFE_BY_RCU)
rcu_barrier();
kmem_cache_release(s);
} }
EXPORT_SYMBOL(kmem_cache_destroy); EXPORT_SYMBOL(kmem_cache_destroy);
@ -760,9 +638,7 @@ void __init create_boot_cache(struct kmem_cache *s, const char *name,
{ {
int err; int err;
unsigned int align = ARCH_KMALLOC_MINALIGN; unsigned int align = ARCH_KMALLOC_MINALIGN;
struct kmem_cache_args kmem_args = {};
s->name = name;
s->size = s->object_size = size;
/* /*
* kmalloc caches guarantee alignment of at least the largest * kmalloc caches guarantee alignment of at least the largest
@ -771,14 +647,14 @@ void __init create_boot_cache(struct kmem_cache *s, const char *name,
*/ */
if (flags & SLAB_KMALLOC) if (flags & SLAB_KMALLOC)
align = max(align, 1U << (ffs(size) - 1)); align = max(align, 1U << (ffs(size) - 1));
s->align = calculate_alignment(flags, align, size); kmem_args.align = calculate_alignment(flags, align, size);
#ifdef CONFIG_HARDENED_USERCOPY #ifdef CONFIG_HARDENED_USERCOPY
s->useroffset = useroffset; kmem_args.useroffset = useroffset;
s->usersize = usersize; kmem_args.usersize = usersize;
#endif #endif
err = __kmem_cache_create(s, flags); err = do_kmem_cache_create(s, name, size, &kmem_args, flags);
if (err) if (err)
panic("Creation of kmalloc slab %s size=%u failed. Reason %d\n", panic("Creation of kmalloc slab %s size=%u failed. Reason %d\n",

412
mm/slub.c
View File

@ -750,6 +750,50 @@ static inline bool slab_update_freelist(struct kmem_cache *s, struct slab *slab,
return false; return false;
} }
/*
* kmalloc caches has fixed sizes (mostly power of 2), and kmalloc() API
* family will round up the real request size to these fixed ones, so
* there could be an extra area than what is requested. Save the original
* request size in the meta data area, for better debug and sanity check.
*/
static inline void set_orig_size(struct kmem_cache *s,
void *object, unsigned int orig_size)
{
void *p = kasan_reset_tag(object);
unsigned int kasan_meta_size;
if (!slub_debug_orig_size(s))
return;
/*
* KASAN can save its free meta data inside of the object at offset 0.
* If this meta data size is larger than 'orig_size', it will overlap
* the data redzone in [orig_size+1, object_size]. Thus, we adjust
* 'orig_size' to be as at least as big as KASAN's meta data.
*/
kasan_meta_size = kasan_metadata_size(s, true);
if (kasan_meta_size > orig_size)
orig_size = kasan_meta_size;
p += get_info_end(s);
p += sizeof(struct track) * 2;
*(unsigned int *)p = orig_size;
}
static inline unsigned int get_orig_size(struct kmem_cache *s, void *object)
{
void *p = kasan_reset_tag(object);
if (!slub_debug_orig_size(s))
return s->object_size;
p += get_info_end(s);
p += sizeof(struct track) * 2;
return *(unsigned int *)p;
}
#ifdef CONFIG_SLUB_DEBUG #ifdef CONFIG_SLUB_DEBUG
static unsigned long object_map[BITS_TO_LONGS(MAX_OBJS_PER_PAGE)]; static unsigned long object_map[BITS_TO_LONGS(MAX_OBJS_PER_PAGE)];
static DEFINE_SPINLOCK(object_map_lock); static DEFINE_SPINLOCK(object_map_lock);
@ -979,50 +1023,6 @@ static void print_slab_info(const struct slab *slab)
&slab->__page_flags); &slab->__page_flags);
} }
/*
* kmalloc caches has fixed sizes (mostly power of 2), and kmalloc() API
* family will round up the real request size to these fixed ones, so
* there could be an extra area than what is requested. Save the original
* request size in the meta data area, for better debug and sanity check.
*/
static inline void set_orig_size(struct kmem_cache *s,
void *object, unsigned int orig_size)
{
void *p = kasan_reset_tag(object);
unsigned int kasan_meta_size;
if (!slub_debug_orig_size(s))
return;
/*
* KASAN can save its free meta data inside of the object at offset 0.
* If this meta data size is larger than 'orig_size', it will overlap
* the data redzone in [orig_size+1, object_size]. Thus, we adjust
* 'orig_size' to be as at least as big as KASAN's meta data.
*/
kasan_meta_size = kasan_metadata_size(s, true);
if (kasan_meta_size > orig_size)
orig_size = kasan_meta_size;
p += get_info_end(s);
p += sizeof(struct track) * 2;
*(unsigned int *)p = orig_size;
}
static inline unsigned int get_orig_size(struct kmem_cache *s, void *object)
{
void *p = kasan_reset_tag(object);
if (!slub_debug_orig_size(s))
return s->object_size;
p += get_info_end(s);
p += sizeof(struct track) * 2;
return *(unsigned int *)p;
}
void skip_orig_size_check(struct kmem_cache *s, const void *object) void skip_orig_size_check(struct kmem_cache *s, const void *object)
{ {
set_orig_size(s, (void *)object, s->object_size); set_orig_size(s, (void *)object, s->object_size);
@ -1888,7 +1888,6 @@ static inline void inc_slabs_node(struct kmem_cache *s, int node,
int objects) {} int objects) {}
static inline void dec_slabs_node(struct kmem_cache *s, int node, static inline void dec_slabs_node(struct kmem_cache *s, int node,
int objects) {} int objects) {}
#ifndef CONFIG_SLUB_TINY #ifndef CONFIG_SLUB_TINY
static bool freelist_corrupted(struct kmem_cache *s, struct slab *slab, static bool freelist_corrupted(struct kmem_cache *s, struct slab *slab,
void **freelist, void *nextfree) void **freelist, void *nextfree)
@ -2183,6 +2182,45 @@ void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p,
__memcg_slab_free_hook(s, slab, p, objects, obj_exts); __memcg_slab_free_hook(s, slab, p, objects, obj_exts);
} }
static __fastpath_inline
bool memcg_slab_post_charge(void *p, gfp_t flags)
{
struct slabobj_ext *slab_exts;
struct kmem_cache *s;
struct folio *folio;
struct slab *slab;
unsigned long off;
folio = virt_to_folio(p);
if (!folio_test_slab(folio)) {
return folio_memcg_kmem(folio) ||
(__memcg_kmem_charge_page(folio_page(folio, 0), flags,
folio_order(folio)) == 0);
}
slab = folio_slab(folio);
s = slab->slab_cache;
/*
* Ignore KMALLOC_NORMAL cache to avoid possible circular dependency
* of slab_obj_exts being allocated from the same slab and thus the slab
* becoming effectively unfreeable.
*/
if (is_kmalloc_normal(s))
return true;
/* Ignore already charged objects. */
slab_exts = slab_obj_exts(slab);
if (slab_exts) {
off = obj_to_index(s, slab, p);
if (unlikely(slab_exts[off].objcg))
return true;
}
return __memcg_slab_post_alloc_hook(s, NULL, flags, 1, &p);
}
#else /* CONFIG_MEMCG */ #else /* CONFIG_MEMCG */
static inline bool memcg_slab_post_alloc_hook(struct kmem_cache *s, static inline bool memcg_slab_post_alloc_hook(struct kmem_cache *s,
struct list_lru *lru, struct list_lru *lru,
@ -2196,18 +2234,37 @@ static inline void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
void **p, int objects) void **p, int objects)
{ {
} }
static inline bool memcg_slab_post_charge(void *p, gfp_t flags)
{
return true;
}
#endif /* CONFIG_MEMCG */ #endif /* CONFIG_MEMCG */
#ifdef CONFIG_SLUB_RCU_DEBUG
static void slab_free_after_rcu_debug(struct rcu_head *rcu_head);
struct rcu_delayed_free {
struct rcu_head head;
void *object;
};
#endif
/* /*
* Hooks for other subsystems that check memory allocations. In a typical * Hooks for other subsystems that check memory allocations. In a typical
* production configuration these hooks all should produce no code at all. * production configuration these hooks all should produce no code at all.
* *
* Returns true if freeing of the object can proceed, false if its reuse * Returns true if freeing of the object can proceed, false if its reuse
* was delayed by KASAN quarantine, or it was returned to KFENCE. * was delayed by CONFIG_SLUB_RCU_DEBUG or KASAN quarantine, or it was returned
* to KFENCE.
*/ */
static __always_inline static __always_inline
bool slab_free_hook(struct kmem_cache *s, void *x, bool init) bool slab_free_hook(struct kmem_cache *s, void *x, bool init,
bool after_rcu_delay)
{ {
/* Are the object contents still accessible? */
bool still_accessible = (s->flags & SLAB_TYPESAFE_BY_RCU) && !after_rcu_delay;
kmemleak_free_recursive(x, s->flags); kmemleak_free_recursive(x, s->flags);
kmsan_slab_free(s, x); kmsan_slab_free(s, x);
@ -2217,13 +2274,42 @@ bool slab_free_hook(struct kmem_cache *s, void *x, bool init)
debug_check_no_obj_freed(x, s->object_size); debug_check_no_obj_freed(x, s->object_size);
/* Use KCSAN to help debug racy use-after-free. */ /* Use KCSAN to help debug racy use-after-free. */
if (!(s->flags & SLAB_TYPESAFE_BY_RCU)) if (!still_accessible)
__kcsan_check_access(x, s->object_size, __kcsan_check_access(x, s->object_size,
KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT); KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT);
if (kfence_free(x)) if (kfence_free(x))
return false; return false;
/*
* Give KASAN a chance to notice an invalid free operation before we
* modify the object.
*/
if (kasan_slab_pre_free(s, x))
return false;
#ifdef CONFIG_SLUB_RCU_DEBUG
if (still_accessible) {
struct rcu_delayed_free *delayed_free;
delayed_free = kmalloc(sizeof(*delayed_free), GFP_NOWAIT);
if (delayed_free) {
/*
* Let KASAN track our call stack as a "related work
* creation", just like if the object had been freed
* normally via kfree_rcu().
* We have to do this manually because the rcu_head is
* not located inside the object.
*/
kasan_record_aux_stack_noalloc(x);
delayed_free->object = x;
call_rcu(&delayed_free->head, slab_free_after_rcu_debug);
return false;
}
}
#endif /* CONFIG_SLUB_RCU_DEBUG */
/* /*
* As memory initialization might be integrated into KASAN, * As memory initialization might be integrated into KASAN,
* kasan_slab_free and initialization memset's must be * kasan_slab_free and initialization memset's must be
@ -2237,17 +2323,24 @@ bool slab_free_hook(struct kmem_cache *s, void *x, bool init)
*/ */
if (unlikely(init)) { if (unlikely(init)) {
int rsize; int rsize;
unsigned int inuse; unsigned int inuse, orig_size;
inuse = get_info_end(s); inuse = get_info_end(s);
orig_size = get_orig_size(s, x);
if (!kasan_has_integrated_init()) if (!kasan_has_integrated_init())
memset(kasan_reset_tag(x), 0, s->object_size); memset(kasan_reset_tag(x), 0, orig_size);
rsize = (s->flags & SLAB_RED_ZONE) ? s->red_left_pad : 0; rsize = (s->flags & SLAB_RED_ZONE) ? s->red_left_pad : 0;
memset((char *)kasan_reset_tag(x) + inuse, 0, memset((char *)kasan_reset_tag(x) + inuse, 0,
s->size - inuse - rsize); s->size - inuse - rsize);
/*
* Restore orig_size, otherwize kmalloc redzone overwritten
* would be reported
*/
set_orig_size(s, x, orig_size);
} }
/* KASAN might put x into memory quarantine, delaying its reuse. */ /* KASAN might put x into memory quarantine, delaying its reuse. */
return !kasan_slab_free(s, x, init); return !kasan_slab_free(s, x, init, still_accessible);
} }
static __fastpath_inline static __fastpath_inline
@ -2261,7 +2354,7 @@ bool slab_free_freelist_hook(struct kmem_cache *s, void **head, void **tail,
bool init; bool init;
if (is_kfence_address(next)) { if (is_kfence_address(next)) {
slab_free_hook(s, next, false); slab_free_hook(s, next, false, false);
return false; return false;
} }
@ -2276,7 +2369,7 @@ bool slab_free_freelist_hook(struct kmem_cache *s, void **head, void **tail,
next = get_freepointer(s, object); next = get_freepointer(s, object);
/* If object's reuse doesn't have to be delayed */ /* If object's reuse doesn't have to be delayed */
if (likely(slab_free_hook(s, object, init))) { if (likely(slab_free_hook(s, object, init, false))) {
/* Move object to the new freelist */ /* Move object to the new freelist */
set_freepointer(s, object, *head); set_freepointer(s, object, *head);
*head = object; *head = object;
@ -2316,7 +2409,11 @@ static inline struct slab *alloc_slab_page(gfp_t flags, int node,
struct slab *slab; struct slab *slab;
unsigned int order = oo_order(oo); unsigned int order = oo_order(oo);
folio = (struct folio *)alloc_pages_node(node, flags, order); if (node == NUMA_NO_NODE)
folio = (struct folio *)alloc_pages(flags, order);
else
folio = (struct folio *)__alloc_pages_node(node, flags, order);
if (!folio) if (!folio)
return NULL; return NULL;
@ -3414,14 +3511,15 @@ slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
{ {
static DEFINE_RATELIMIT_STATE(slub_oom_rs, DEFAULT_RATELIMIT_INTERVAL, static DEFINE_RATELIMIT_STATE(slub_oom_rs, DEFAULT_RATELIMIT_INTERVAL,
DEFAULT_RATELIMIT_BURST); DEFAULT_RATELIMIT_BURST);
int cpu = raw_smp_processor_id();
int node; int node;
struct kmem_cache_node *n; struct kmem_cache_node *n;
if ((gfpflags & __GFP_NOWARN) || !__ratelimit(&slub_oom_rs)) if ((gfpflags & __GFP_NOWARN) || !__ratelimit(&slub_oom_rs))
return; return;
pr_warn("SLUB: Unable to allocate memory on node %d, gfp=%#x(%pGg)\n", pr_warn("SLUB: Unable to allocate memory on CPU %u (of node %d) on node %d, gfp=%#x(%pGg)\n",
nid, gfpflags, &gfpflags); cpu, cpu_to_node(cpu), nid, gfpflags, &gfpflags);
pr_warn(" cache: %s, object size: %u, buffer size: %u, default order: %u, min order: %u\n", pr_warn(" cache: %s, object size: %u, buffer size: %u, default order: %u, min order: %u\n",
s->name, s->object_size, s->size, oo_order(s->oo), s->name, s->object_size, s->size, oo_order(s->oo),
oo_order(s->min)); oo_order(s->min));
@ -3920,8 +4018,7 @@ static void *__slab_alloc_node(struct kmem_cache *s,
* If the object has been wiped upon free, make sure it's fully initialized by * If the object has been wiped upon free, make sure it's fully initialized by
* zeroing out freelist pointer. * zeroing out freelist pointer.
* *
* Note that we also wipe custom freelist pointers specified via * Note that we also wipe custom freelist pointers.
* s->rcu_freeptr_offset.
*/ */
static __always_inline void maybe_wipe_obj_freeptr(struct kmem_cache *s, static __always_inline void maybe_wipe_obj_freeptr(struct kmem_cache *s,
void *obj) void *obj)
@ -4063,6 +4160,15 @@ void *kmem_cache_alloc_lru_noprof(struct kmem_cache *s, struct list_lru *lru,
} }
EXPORT_SYMBOL(kmem_cache_alloc_lru_noprof); EXPORT_SYMBOL(kmem_cache_alloc_lru_noprof);
bool kmem_cache_charge(void *objp, gfp_t gfpflags)
{
if (!memcg_kmem_online())
return true;
return memcg_slab_post_charge(objp, gfpflags);
}
EXPORT_SYMBOL(kmem_cache_charge);
/** /**
* kmem_cache_alloc_node - Allocate an object on the specified node * kmem_cache_alloc_node - Allocate an object on the specified node
* @s: The cache to allocate from. * @s: The cache to allocate from.
@ -4471,7 +4577,7 @@ void slab_free(struct kmem_cache *s, struct slab *slab, void *object,
memcg_slab_free_hook(s, slab, &object, 1); memcg_slab_free_hook(s, slab, &object, 1);
alloc_tagging_slab_free_hook(s, slab, &object, 1); alloc_tagging_slab_free_hook(s, slab, &object, 1);
if (likely(slab_free_hook(s, object, slab_want_init_on_free(s)))) if (likely(slab_free_hook(s, object, slab_want_init_on_free(s), false)))
do_slab_free(s, slab, object, object, 1, addr); do_slab_free(s, slab, object, object, 1, addr);
} }
@ -4480,7 +4586,7 @@ void slab_free(struct kmem_cache *s, struct slab *slab, void *object,
static noinline static noinline
void memcg_alloc_abort_single(struct kmem_cache *s, void *object) void memcg_alloc_abort_single(struct kmem_cache *s, void *object)
{ {
if (likely(slab_free_hook(s, object, slab_want_init_on_free(s)))) if (likely(slab_free_hook(s, object, slab_want_init_on_free(s), false)))
do_slab_free(s, virt_to_slab(object), object, object, 1, _RET_IP_); do_slab_free(s, virt_to_slab(object), object, object, 1, _RET_IP_);
} }
#endif #endif
@ -4499,6 +4605,33 @@ void slab_free_bulk(struct kmem_cache *s, struct slab *slab, void *head,
do_slab_free(s, slab, head, tail, cnt, addr); do_slab_free(s, slab, head, tail, cnt, addr);
} }
#ifdef CONFIG_SLUB_RCU_DEBUG
static void slab_free_after_rcu_debug(struct rcu_head *rcu_head)
{
struct rcu_delayed_free *delayed_free =
container_of(rcu_head, struct rcu_delayed_free, head);
void *object = delayed_free->object;
struct slab *slab = virt_to_slab(object);
struct kmem_cache *s;
kfree(delayed_free);
if (WARN_ON(is_kfence_address(object)))
return;
/* find the object and the cache again */
if (WARN_ON(!slab))
return;
s = slab->slab_cache;
if (WARN_ON(!(s->flags & SLAB_TYPESAFE_BY_RCU)))
return;
/* resume freeing */
if (slab_free_hook(s, object, slab_want_init_on_free(s), true))
do_slab_free(s, slab, object, object, 1, _THIS_IP_);
}
#endif /* CONFIG_SLUB_RCU_DEBUG */
#ifdef CONFIG_KASAN_GENERIC #ifdef CONFIG_KASAN_GENERIC
void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr) void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr)
{ {
@ -5145,17 +5278,11 @@ static void set_cpu_partial(struct kmem_cache *s)
#endif #endif
} }
/* Was a valid freeptr offset requested? */
static inline bool has_freeptr_offset(const struct kmem_cache *s)
{
return s->rcu_freeptr_offset != UINT_MAX;
}
/* /*
* calculate_sizes() determines the order and the distribution of data within * calculate_sizes() determines the order and the distribution of data within
* a slab object. * a slab object.
*/ */
static int calculate_sizes(struct kmem_cache *s) static int calculate_sizes(struct kmem_cache_args *args, struct kmem_cache *s)
{ {
slab_flags_t flags = s->flags; slab_flags_t flags = s->flags;
unsigned int size = s->object_size; unsigned int size = s->object_size;
@ -5196,7 +5323,7 @@ static int calculate_sizes(struct kmem_cache *s)
*/ */
s->inuse = size; s->inuse = size;
if (((flags & SLAB_TYPESAFE_BY_RCU) && !has_freeptr_offset(s)) || if (((flags & SLAB_TYPESAFE_BY_RCU) && !args->use_freeptr_offset) ||
(flags & SLAB_POISON) || s->ctor || (flags & SLAB_POISON) || s->ctor ||
((flags & SLAB_RED_ZONE) && ((flags & SLAB_RED_ZONE) &&
(s->object_size < sizeof(void *) || slub_debug_orig_size(s)))) { (s->object_size < sizeof(void *) || slub_debug_orig_size(s)))) {
@ -5218,8 +5345,8 @@ static int calculate_sizes(struct kmem_cache *s)
*/ */
s->offset = size; s->offset = size;
size += sizeof(void *); size += sizeof(void *);
} else if ((flags & SLAB_TYPESAFE_BY_RCU) && has_freeptr_offset(s)) { } else if ((flags & SLAB_TYPESAFE_BY_RCU) && args->use_freeptr_offset) {
s->offset = s->rcu_freeptr_offset; s->offset = args->freeptr_offset;
} else { } else {
/* /*
* Store freelist pointer near middle of object to keep * Store freelist pointer near middle of object to keep
@ -5294,65 +5421,6 @@ static int calculate_sizes(struct kmem_cache *s)
return !!oo_objects(s->oo); return !!oo_objects(s->oo);
} }
static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags)
{
s->flags = kmem_cache_flags(flags, s->name);
#ifdef CONFIG_SLAB_FREELIST_HARDENED
s->random = get_random_long();
#endif
if (!calculate_sizes(s))
goto error;
if (disable_higher_order_debug) {
/*
* Disable debugging flags that store metadata if the min slab
* order increased.
*/
if (get_order(s->size) > get_order(s->object_size)) {
s->flags &= ~DEBUG_METADATA_FLAGS;
s->offset = 0;
if (!calculate_sizes(s))
goto error;
}
}
#ifdef system_has_freelist_aba
if (system_has_freelist_aba() && !(s->flags & SLAB_NO_CMPXCHG)) {
/* Enable fast mode */
s->flags |= __CMPXCHG_DOUBLE;
}
#endif
/*
* The larger the object size is, the more slabs we want on the partial
* list to avoid pounding the page allocator excessively.
*/
s->min_partial = min_t(unsigned long, MAX_PARTIAL, ilog2(s->size) / 2);
s->min_partial = max_t(unsigned long, MIN_PARTIAL, s->min_partial);
set_cpu_partial(s);
#ifdef CONFIG_NUMA
s->remote_node_defrag_ratio = 1000;
#endif
/* Initialize the pre-computed randomized freelist if slab is up */
if (slab_state >= UP) {
if (init_cache_random_seq(s))
goto error;
}
if (!init_kmem_cache_nodes(s))
goto error;
if (alloc_kmem_cache_cpus(s))
return 0;
error:
__kmem_cache_release(s);
return -EINVAL;
}
static void list_slab_objects(struct kmem_cache *s, struct slab *slab, static void list_slab_objects(struct kmem_cache *s, struct slab *slab,
const char *text) const char *text)
{ {
@ -5906,28 +5974,90 @@ __kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
return s; return s;
} }
int __kmem_cache_create(struct kmem_cache *s, slab_flags_t flags) int do_kmem_cache_create(struct kmem_cache *s, const char *name,
unsigned int size, struct kmem_cache_args *args,
slab_flags_t flags)
{ {
int err; int err = -EINVAL;
err = kmem_cache_open(s, flags); s->name = name;
if (err) s->size = s->object_size = size;
return err;
s->flags = kmem_cache_flags(flags, s->name);
#ifdef CONFIG_SLAB_FREELIST_HARDENED
s->random = get_random_long();
#endif
s->align = args->align;
s->ctor = args->ctor;
#ifdef CONFIG_HARDENED_USERCOPY
s->useroffset = args->useroffset;
s->usersize = args->usersize;
#endif
if (!calculate_sizes(args, s))
goto out;
if (disable_higher_order_debug) {
/*
* Disable debugging flags that store metadata if the min slab
* order increased.
*/
if (get_order(s->size) > get_order(s->object_size)) {
s->flags &= ~DEBUG_METADATA_FLAGS;
s->offset = 0;
if (!calculate_sizes(args, s))
goto out;
}
}
#ifdef system_has_freelist_aba
if (system_has_freelist_aba() && !(s->flags & SLAB_NO_CMPXCHG)) {
/* Enable fast mode */
s->flags |= __CMPXCHG_DOUBLE;
}
#endif
/*
* The larger the object size is, the more slabs we want on the partial
* list to avoid pounding the page allocator excessively.
*/
s->min_partial = min_t(unsigned long, MAX_PARTIAL, ilog2(s->size) / 2);
s->min_partial = max_t(unsigned long, MIN_PARTIAL, s->min_partial);
set_cpu_partial(s);
#ifdef CONFIG_NUMA
s->remote_node_defrag_ratio = 1000;
#endif
/* Initialize the pre-computed randomized freelist if slab is up */
if (slab_state >= UP) {
if (init_cache_random_seq(s))
goto out;
}
if (!init_kmem_cache_nodes(s))
goto out;
if (!alloc_kmem_cache_cpus(s))
goto out;
/* Mutex is not taken during early boot */ /* Mutex is not taken during early boot */
if (slab_state <= UP) if (slab_state <= UP) {
return 0; err = 0;
goto out;
}
err = sysfs_slab_add(s); err = sysfs_slab_add(s);
if (err) { if (err)
__kmem_cache_release(s); goto out;
return err;
}
if (s->flags & SLAB_STORE_USER) if (s->flags & SLAB_STORE_USER)
debugfs_slab_add(s); debugfs_slab_add(s);
return 0; out:
if (err)
__kmem_cache_release(s);
return err;
} }
#ifdef SLAB_SUPPORTS_SYSFS #ifdef SLAB_SUPPORTS_SYSFS

View File

@ -714,6 +714,7 @@ struct sock *inet_csk_accept(struct sock *sk, struct proto_accept_arg *arg)
out: out:
release_sock(sk); release_sock(sk);
if (newsk && mem_cgroup_sockets_enabled) { if (newsk && mem_cgroup_sockets_enabled) {
gfp_t gfp = GFP_KERNEL | __GFP_NOFAIL;
int amt = 0; int amt = 0;
/* atomically get the memory usage, set and charge the /* atomically get the memory usage, set and charge the
@ -731,8 +732,8 @@ struct sock *inet_csk_accept(struct sock *sk, struct proto_accept_arg *arg)
} }
if (amt) if (amt)
mem_cgroup_charge_skmem(newsk->sk_memcg, amt, mem_cgroup_charge_skmem(newsk->sk_memcg, amt, gfp);
GFP_KERNEL | __GFP_NOFAIL); kmem_cache_charge(newsk, gfp);
release_sock(newsk); release_sock(newsk);
} }