mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2024-12-29 17:23:36 +00:00
block: manage bio slab cache by xarray
Managing bio slab cache via xarray by using slab cache size as xarray index, and storing 'struct bio_slab' instance into xarray. So code is simplified a lot, meantime it becomes more readable than before. Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Ming Lei <ming.lei@redhat.com> Reviewed-by: Pavel Begunkov <asml.silence@gmail.com> Tested-by: Pavel Begunkov <asml.silence@gmail.com> Reviewed-by: Hannes Reinecke <hare@suse.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
1a23e06cda
commit
49d1ec8573
126
block/bio.c
126
block/bio.c
@ -19,6 +19,7 @@
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/sched/sysctl.h>
|
||||
#include <linux/blk-crypto.h>
|
||||
#include <linux/xarray.h>
|
||||
|
||||
#include <trace/events/block.h>
|
||||
#include "blk.h"
|
||||
@ -58,89 +59,80 @@ struct bio_slab {
|
||||
char name[8];
|
||||
};
|
||||
static DEFINE_MUTEX(bio_slab_lock);
|
||||
static struct bio_slab *bio_slabs;
|
||||
static unsigned int bio_slab_nr, bio_slab_max;
|
||||
static DEFINE_XARRAY(bio_slabs);
|
||||
|
||||
static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size)
|
||||
static struct bio_slab *create_bio_slab(unsigned int size)
|
||||
{
|
||||
unsigned int sz = sizeof(struct bio) + extra_size;
|
||||
struct kmem_cache *slab = NULL;
|
||||
struct bio_slab *bslab, *new_bio_slabs;
|
||||
unsigned int new_bio_slab_max;
|
||||
unsigned int i, entry = -1;
|
||||
struct bio_slab *bslab = kzalloc(sizeof(*bslab), GFP_KERNEL);
|
||||
|
||||
if (!bslab)
|
||||
return NULL;
|
||||
|
||||
snprintf(bslab->name, sizeof(bslab->name), "bio-%d", size);
|
||||
bslab->slab = kmem_cache_create(bslab->name, size,
|
||||
ARCH_KMALLOC_MINALIGN, SLAB_HWCACHE_ALIGN, NULL);
|
||||
if (!bslab->slab)
|
||||
goto fail_alloc_slab;
|
||||
|
||||
bslab->slab_ref = 1;
|
||||
bslab->slab_size = size;
|
||||
|
||||
if (!xa_err(xa_store(&bio_slabs, size, bslab, GFP_KERNEL)))
|
||||
return bslab;
|
||||
|
||||
kmem_cache_destroy(bslab->slab);
|
||||
|
||||
fail_alloc_slab:
|
||||
kfree(bslab);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline unsigned int bs_bio_slab_size(struct bio_set *bs)
|
||||
{
|
||||
return bs->front_pad + sizeof(struct bio) +
|
||||
BIO_INLINE_VECS * sizeof(struct bio_vec);
|
||||
}
|
||||
|
||||
static struct kmem_cache *bio_find_or_create_slab(struct bio_set *bs)
|
||||
{
|
||||
unsigned int size = bs_bio_slab_size(bs);
|
||||
struct bio_slab *bslab;
|
||||
|
||||
mutex_lock(&bio_slab_lock);
|
||||
|
||||
i = 0;
|
||||
while (i < bio_slab_nr) {
|
||||
bslab = &bio_slabs[i];
|
||||
|
||||
if (!bslab->slab && entry == -1)
|
||||
entry = i;
|
||||
else if (bslab->slab_size == sz) {
|
||||
slab = bslab->slab;
|
||||
bslab->slab_ref++;
|
||||
break;
|
||||
}
|
||||
i++;
|
||||
}
|
||||
|
||||
if (slab)
|
||||
goto out_unlock;
|
||||
|
||||
if (bio_slab_nr == bio_slab_max && entry == -1) {
|
||||
new_bio_slab_max = bio_slab_max << 1;
|
||||
new_bio_slabs = krealloc(bio_slabs,
|
||||
new_bio_slab_max * sizeof(struct bio_slab),
|
||||
GFP_KERNEL);
|
||||
if (!new_bio_slabs)
|
||||
goto out_unlock;
|
||||
bio_slab_max = new_bio_slab_max;
|
||||
bio_slabs = new_bio_slabs;
|
||||
}
|
||||
if (entry == -1)
|
||||
entry = bio_slab_nr++;
|
||||
|
||||
bslab = &bio_slabs[entry];
|
||||
|
||||
snprintf(bslab->name, sizeof(bslab->name), "bio-%d", entry);
|
||||
slab = kmem_cache_create(bslab->name, sz, ARCH_KMALLOC_MINALIGN,
|
||||
SLAB_HWCACHE_ALIGN, NULL);
|
||||
if (!slab)
|
||||
goto out_unlock;
|
||||
|
||||
bslab->slab = slab;
|
||||
bslab->slab_ref = 1;
|
||||
bslab->slab_size = sz;
|
||||
out_unlock:
|
||||
bslab = xa_load(&bio_slabs, size);
|
||||
if (bslab)
|
||||
bslab->slab_ref++;
|
||||
else
|
||||
bslab = create_bio_slab(size);
|
||||
mutex_unlock(&bio_slab_lock);
|
||||
return slab;
|
||||
|
||||
if (bslab)
|
||||
return bslab->slab;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void bio_put_slab(struct bio_set *bs)
|
||||
{
|
||||
struct bio_slab *bslab = NULL;
|
||||
unsigned int i;
|
||||
unsigned int slab_size = bs_bio_slab_size(bs);
|
||||
|
||||
mutex_lock(&bio_slab_lock);
|
||||
|
||||
for (i = 0; i < bio_slab_nr; i++) {
|
||||
if (bs->bio_slab == bio_slabs[i].slab) {
|
||||
bslab = &bio_slabs[i];
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
bslab = xa_load(&bio_slabs, slab_size);
|
||||
if (WARN(!bslab, KERN_ERR "bio: unable to find slab!\n"))
|
||||
goto out;
|
||||
|
||||
WARN_ON_ONCE(bslab->slab != bs->bio_slab);
|
||||
|
||||
WARN_ON(!bslab->slab_ref);
|
||||
|
||||
if (--bslab->slab_ref)
|
||||
goto out;
|
||||
|
||||
xa_erase(&bio_slabs, slab_size);
|
||||
|
||||
kmem_cache_destroy(bslab->slab);
|
||||
bslab->slab = NULL;
|
||||
kfree(bslab);
|
||||
|
||||
out:
|
||||
mutex_unlock(&bio_slab_lock);
|
||||
@ -1570,15 +1562,13 @@ int bioset_init(struct bio_set *bs,
|
||||
unsigned int front_pad,
|
||||
int flags)
|
||||
{
|
||||
unsigned int back_pad = BIO_INLINE_VECS * sizeof(struct bio_vec);
|
||||
|
||||
bs->front_pad = front_pad;
|
||||
|
||||
spin_lock_init(&bs->rescue_lock);
|
||||
bio_list_init(&bs->rescue_list);
|
||||
INIT_WORK(&bs->rescue_work, bio_alloc_rescue);
|
||||
|
||||
bs->bio_slab = bio_find_or_create_slab(front_pad + back_pad);
|
||||
bs->bio_slab = bio_find_or_create_slab(bs);
|
||||
if (!bs->bio_slab)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -1642,16 +1632,8 @@ static void __init biovec_init_slabs(void)
|
||||
|
||||
static int __init init_bio(void)
|
||||
{
|
||||
bio_slab_max = 2;
|
||||
bio_slab_nr = 0;
|
||||
bio_slabs = kcalloc(bio_slab_max, sizeof(struct bio_slab),
|
||||
GFP_KERNEL);
|
||||
|
||||
BUILD_BUG_ON(BIO_FLAG_LAST > BVEC_POOL_OFFSET);
|
||||
|
||||
if (!bio_slabs)
|
||||
panic("bio: can't allocate bios\n");
|
||||
|
||||
bio_integrity_init();
|
||||
biovec_init_slabs();
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user