2022-07-07 20:16:20 +00:00
|
|
|
#ifndef IOU_ALLOC_CACHE_H
|
|
|
|
#define IOU_ALLOC_CACHE_H
|
|
|
|
|
2022-07-07 20:20:54 +00:00
|
|
|
/*
|
|
|
|
* Don't allow the cache to grow beyond this size.
|
|
|
|
*/
|
2024-03-17 00:23:44 +00:00
|
|
|
#define IO_ALLOC_CACHE_MAX 128
|
2022-07-07 20:20:54 +00:00
|
|
|
|
|
|
|
static inline bool io_alloc_cache_put(struct io_alloc_cache *cache,
|
2024-03-20 21:19:44 +00:00
|
|
|
void *entry)
|
2022-07-07 20:16:20 +00:00
|
|
|
{
|
2023-04-04 12:39:57 +00:00
|
|
|
if (cache->nr_cached < cache->max_cached) {
|
2024-03-20 21:19:44 +00:00
|
|
|
if (!kasan_mempool_poison_object(entry))
|
|
|
|
return false;
|
|
|
|
cache->entries[cache->nr_cached++] = entry;
|
2022-07-07 20:20:54 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
2022-07-07 20:16:20 +00:00
|
|
|
}
|
|
|
|
|
2024-03-20 21:19:44 +00:00
|
|
|
static inline void *io_alloc_cache_get(struct io_alloc_cache *cache)
|
2022-07-07 20:16:20 +00:00
|
|
|
{
|
2024-03-20 21:19:44 +00:00
|
|
|
if (cache->nr_cached) {
|
|
|
|
void *entry = cache->entries[--cache->nr_cached];
|
2022-07-07 20:16:20 +00:00
|
|
|
|
2023-12-19 22:29:05 +00:00
|
|
|
kasan_mempool_unpoison_object(entry, cache->elem_size);
|
2023-02-23 16:43:52 +00:00
|
|
|
return entry;
|
2022-07-07 20:16:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2024-12-16 20:46:07 +00:00
|
|
|
static inline void *io_cache_alloc(struct io_alloc_cache *cache, gfp_t gfp,
|
|
|
|
void (*init_once)(void *obj))
|
|
|
|
{
|
|
|
|
if (unlikely(!cache->nr_cached)) {
|
|
|
|
void *obj = kmalloc(cache->elem_size, gfp);
|
|
|
|
|
|
|
|
if (obj && init_once)
|
|
|
|
init_once(obj);
|
|
|
|
return obj;
|
|
|
|
}
|
|
|
|
return io_alloc_cache_get(cache);
|
|
|
|
}
|
|
|
|
|
2024-03-20 21:19:44 +00:00
|
|
|
/* returns false if the cache was initialized properly */
|
|
|
|
static inline bool io_alloc_cache_init(struct io_alloc_cache *cache,
|
2023-04-04 12:39:57 +00:00
|
|
|
unsigned max_nr, size_t size)
|
2022-07-07 20:16:20 +00:00
|
|
|
{
|
2024-03-20 21:19:44 +00:00
|
|
|
cache->entries = kvmalloc_array(max_nr, sizeof(void *), GFP_KERNEL);
|
|
|
|
if (cache->entries) {
|
|
|
|
cache->nr_cached = 0;
|
|
|
|
cache->max_cached = max_nr;
|
|
|
|
cache->elem_size = size;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
2022-07-07 20:16:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void io_alloc_cache_free(struct io_alloc_cache *cache,
|
2024-03-20 21:19:44 +00:00
|
|
|
void (*free)(const void *))
|
2022-07-07 20:16:20 +00:00
|
|
|
{
|
2024-03-20 21:19:44 +00:00
|
|
|
void *entry;
|
|
|
|
|
|
|
|
if (!cache->entries)
|
|
|
|
return;
|
2022-07-07 20:16:20 +00:00
|
|
|
|
2024-03-20 21:19:44 +00:00
|
|
|
while ((entry = io_alloc_cache_get(cache)) != NULL)
|
2023-02-23 16:43:52 +00:00
|
|
|
free(entry);
|
2024-03-20 21:19:44 +00:00
|
|
|
|
|
|
|
kvfree(cache->entries);
|
|
|
|
cache->entries = NULL;
|
2022-07-07 20:16:20 +00:00
|
|
|
}
|
|
|
|
#endif
|