mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2024-12-28 08:42:10 +00:00
b1031968b1
The allocation paths that use alloc_cache duplicate the same code pattern, sometimes in a quite convoluted way. Fold the allocation into the cache code itself, making it just an allocator function, and keeping the cache policy invisible to callers. Another justification for doing this, beyond code simplicity, is that it makes it trivial to test the impact of disabling the cache and using slab directly, which I've used for slab improvement experiments. One relevant detail is that we provide a callback to optionally initialize memory only when we actually reach slab. This allows us to avoid blindly executing the allocation with GFP_ZERO and only clean fields when they matter. Signed-off-by: Gabriel Krisman Bertazi <krisman@suse.de> Link: https://lore.kernel.org/r/20241216204615.759089-2-krisman@suse.de Signed-off-by: Jens Axboe <axboe@kernel.dk>
75 lines
1.6 KiB
C
75 lines
1.6 KiB
C
#ifndef IOU_ALLOC_CACHE_H
|
|
#define IOU_ALLOC_CACHE_H
|
|
|
|
/*
|
|
* Don't allow the cache to grow beyond this size.
|
|
*/
|
|
#define IO_ALLOC_CACHE_MAX 128
|
|
|
|
static inline bool io_alloc_cache_put(struct io_alloc_cache *cache,
|
|
void *entry)
|
|
{
|
|
if (cache->nr_cached < cache->max_cached) {
|
|
if (!kasan_mempool_poison_object(entry))
|
|
return false;
|
|
cache->entries[cache->nr_cached++] = entry;
|
|
return true;
|
|
}
|
|
return false;
|
|
}
|
|
|
|
static inline void *io_alloc_cache_get(struct io_alloc_cache *cache)
|
|
{
|
|
if (cache->nr_cached) {
|
|
void *entry = cache->entries[--cache->nr_cached];
|
|
|
|
kasan_mempool_unpoison_object(entry, cache->elem_size);
|
|
return entry;
|
|
}
|
|
|
|
return NULL;
|
|
}
|
|
|
|
static inline void *io_cache_alloc(struct io_alloc_cache *cache, gfp_t gfp,
|
|
void (*init_once)(void *obj))
|
|
{
|
|
if (unlikely(!cache->nr_cached)) {
|
|
void *obj = kmalloc(cache->elem_size, gfp);
|
|
|
|
if (obj && init_once)
|
|
init_once(obj);
|
|
return obj;
|
|
}
|
|
return io_alloc_cache_get(cache);
|
|
}
|
|
|
|
/* returns false if the cache was initialized properly */
|
|
static inline bool io_alloc_cache_init(struct io_alloc_cache *cache,
|
|
unsigned max_nr, size_t size)
|
|
{
|
|
cache->entries = kvmalloc_array(max_nr, sizeof(void *), GFP_KERNEL);
|
|
if (cache->entries) {
|
|
cache->nr_cached = 0;
|
|
cache->max_cached = max_nr;
|
|
cache->elem_size = size;
|
|
return false;
|
|
}
|
|
return true;
|
|
}
|
|
|
|
static inline void io_alloc_cache_free(struct io_alloc_cache *cache,
|
|
void (*free)(const void *))
|
|
{
|
|
void *entry;
|
|
|
|
if (!cache->entries)
|
|
return;
|
|
|
|
while ((entry = io_alloc_cache_get(cache)) != NULL)
|
|
free(entry);
|
|
|
|
kvfree(cache->entries);
|
|
cache->entries = NULL;
|
|
}
|
|
#endif
|