mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-12-29 09:16:33 +00:00
block: convert bounce, q->bio_split to bioset_init()/mempool_init()
Convert the core block functionality to embedded bio sets. Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
0b6bad7d66
commit
338aa96d56
@ -992,6 +992,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id,
|
||||
spinlock_t *lock)
|
||||
{
|
||||
struct request_queue *q;
|
||||
int ret;
|
||||
|
||||
q = kmem_cache_alloc_node(blk_requestq_cachep,
|
||||
gfp_mask | __GFP_ZERO, node_id);
|
||||
@ -1002,8 +1003,8 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id,
|
||||
if (q->id < 0)
|
||||
goto fail_q;
|
||||
|
||||
q->bio_split = bioset_create(BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
|
||||
if (!q->bio_split)
|
||||
ret = bioset_init(&q->bio_split, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
|
||||
if (ret)
|
||||
goto fail_id;
|
||||
|
||||
q->backing_dev_info = bdi_alloc_node(gfp_mask, node_id);
|
||||
@ -1075,7 +1076,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id,
|
||||
fail_stats:
|
||||
bdi_put(q->backing_dev_info);
|
||||
fail_split:
|
||||
bioset_free(q->bio_split);
|
||||
bioset_exit(&q->bio_split);
|
||||
fail_id:
|
||||
ida_simple_remove(&blk_queue_ida, q->id);
|
||||
fail_q:
|
||||
|
@ -188,16 +188,16 @@ void blk_queue_split(struct request_queue *q, struct bio **bio)
|
||||
switch (bio_op(*bio)) {
|
||||
case REQ_OP_DISCARD:
|
||||
case REQ_OP_SECURE_ERASE:
|
||||
split = blk_bio_discard_split(q, *bio, q->bio_split, &nsegs);
|
||||
split = blk_bio_discard_split(q, *bio, &q->bio_split, &nsegs);
|
||||
break;
|
||||
case REQ_OP_WRITE_ZEROES:
|
||||
split = blk_bio_write_zeroes_split(q, *bio, q->bio_split, &nsegs);
|
||||
split = blk_bio_write_zeroes_split(q, *bio, &q->bio_split, &nsegs);
|
||||
break;
|
||||
case REQ_OP_WRITE_SAME:
|
||||
split = blk_bio_write_same_split(q, *bio, q->bio_split, &nsegs);
|
||||
split = blk_bio_write_same_split(q, *bio, &q->bio_split, &nsegs);
|
||||
break;
|
||||
default:
|
||||
split = blk_bio_segment_split(q, *bio, q->bio_split, &nsegs);
|
||||
split = blk_bio_segment_split(q, *bio, &q->bio_split, &nsegs);
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -824,8 +824,7 @@ static void __blk_release_queue(struct work_struct *work)
|
||||
if (q->mq_ops)
|
||||
blk_mq_debugfs_unregister(q);
|
||||
|
||||
if (q->bio_split)
|
||||
bioset_free(q->bio_split);
|
||||
bioset_exit(&q->bio_split);
|
||||
|
||||
ida_simple_remove(&blk_queue_ida, q->id);
|
||||
call_rcu(&q->rcu_head, blk_free_queue_rcu);
|
||||
|
@ -28,28 +28,29 @@
|
||||
#define POOL_SIZE 64
|
||||
#define ISA_POOL_SIZE 16
|
||||
|
||||
static struct bio_set *bounce_bio_set, *bounce_bio_split;
|
||||
static mempool_t *page_pool, *isa_page_pool;
|
||||
static struct bio_set bounce_bio_set, bounce_bio_split;
|
||||
static mempool_t page_pool, isa_page_pool;
|
||||
|
||||
#if defined(CONFIG_HIGHMEM)
|
||||
static __init int init_emergency_pool(void)
|
||||
{
|
||||
int ret;
|
||||
#if defined(CONFIG_HIGHMEM) && !defined(CONFIG_MEMORY_HOTPLUG)
|
||||
if (max_pfn <= max_low_pfn)
|
||||
return 0;
|
||||
#endif
|
||||
|
||||
page_pool = mempool_create_page_pool(POOL_SIZE, 0);
|
||||
BUG_ON(!page_pool);
|
||||
ret = mempool_init_page_pool(&page_pool, POOL_SIZE, 0);
|
||||
BUG_ON(ret);
|
||||
pr_info("pool size: %d pages\n", POOL_SIZE);
|
||||
|
||||
bounce_bio_set = bioset_create(BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
|
||||
BUG_ON(!bounce_bio_set);
|
||||
ret = bioset_init(&bounce_bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
|
||||
BUG_ON(ret);
|
||||
if (bioset_integrity_create(bounce_bio_set, BIO_POOL_SIZE))
|
||||
BUG_ON(1);
|
||||
|
||||
bounce_bio_split = bioset_create(BIO_POOL_SIZE, 0, 0);
|
||||
BUG_ON(!bounce_bio_split);
|
||||
ret = bioset_init(&bounce_bio_split, BIO_POOL_SIZE, 0, 0);
|
||||
BUG_ON(ret);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -91,12 +92,14 @@ static void *mempool_alloc_pages_isa(gfp_t gfp_mask, void *data)
|
||||
*/
|
||||
int init_emergency_isa_pool(void)
|
||||
{
|
||||
if (isa_page_pool)
|
||||
int ret;
|
||||
|
||||
if (mempool_initialized(&isa_page_pool))
|
||||
return 0;
|
||||
|
||||
isa_page_pool = mempool_create(ISA_POOL_SIZE, mempool_alloc_pages_isa,
|
||||
mempool_free_pages, (void *) 0);
|
||||
BUG_ON(!isa_page_pool);
|
||||
ret = mempool_init(&isa_page_pool, ISA_POOL_SIZE, mempool_alloc_pages_isa,
|
||||
mempool_free_pages, (void *) 0);
|
||||
BUG_ON(ret);
|
||||
|
||||
pr_info("isa pool size: %d pages\n", ISA_POOL_SIZE);
|
||||
return 0;
|
||||
@ -163,13 +166,13 @@ static void bounce_end_io(struct bio *bio, mempool_t *pool)
|
||||
|
||||
static void bounce_end_io_write(struct bio *bio)
|
||||
{
|
||||
bounce_end_io(bio, page_pool);
|
||||
bounce_end_io(bio, &page_pool);
|
||||
}
|
||||
|
||||
static void bounce_end_io_write_isa(struct bio *bio)
|
||||
{
|
||||
|
||||
bounce_end_io(bio, isa_page_pool);
|
||||
bounce_end_io(bio, &isa_page_pool);
|
||||
}
|
||||
|
||||
static void __bounce_end_io_read(struct bio *bio, mempool_t *pool)
|
||||
@ -184,12 +187,12 @@ static void __bounce_end_io_read(struct bio *bio, mempool_t *pool)
|
||||
|
||||
static void bounce_end_io_read(struct bio *bio)
|
||||
{
|
||||
__bounce_end_io_read(bio, page_pool);
|
||||
__bounce_end_io_read(bio, &page_pool);
|
||||
}
|
||||
|
||||
static void bounce_end_io_read_isa(struct bio *bio)
|
||||
{
|
||||
__bounce_end_io_read(bio, isa_page_pool);
|
||||
__bounce_end_io_read(bio, &isa_page_pool);
|
||||
}
|
||||
|
||||
static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
|
||||
@ -214,13 +217,13 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
|
||||
return;
|
||||
|
||||
if (!passthrough && sectors < bio_sectors(*bio_orig)) {
|
||||
bio = bio_split(*bio_orig, sectors, GFP_NOIO, bounce_bio_split);
|
||||
bio = bio_split(*bio_orig, sectors, GFP_NOIO, &bounce_bio_split);
|
||||
bio_chain(bio, *bio_orig);
|
||||
generic_make_request(*bio_orig);
|
||||
*bio_orig = bio;
|
||||
}
|
||||
bio = bio_clone_bioset(*bio_orig, GFP_NOIO, passthrough ? NULL :
|
||||
bounce_bio_set);
|
||||
&bounce_bio_set);
|
||||
|
||||
bio_for_each_segment_all(to, bio, i) {
|
||||
struct page *page = to->bv_page;
|
||||
@ -247,7 +250,7 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
|
||||
|
||||
bio->bi_flags |= (1 << BIO_BOUNCED);
|
||||
|
||||
if (pool == page_pool) {
|
||||
if (pool == &page_pool) {
|
||||
bio->bi_end_io = bounce_end_io_write;
|
||||
if (rw == READ)
|
||||
bio->bi_end_io = bounce_end_io_read;
|
||||
@ -279,10 +282,10 @@ void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig)
|
||||
if (!(q->bounce_gfp & GFP_DMA)) {
|
||||
if (q->limits.bounce_pfn >= blk_max_pfn)
|
||||
return;
|
||||
pool = page_pool;
|
||||
pool = &page_pool;
|
||||
} else {
|
||||
BUG_ON(!isa_page_pool);
|
||||
pool = isa_page_pool;
|
||||
BUG_ON(!mempool_initialized(&isa_page_pool));
|
||||
pool = &isa_page_pool;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1582,7 +1582,7 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md,
|
||||
* won't be affected by this reassignment.
|
||||
*/
|
||||
struct bio *b = bio_clone_bioset(bio, GFP_NOIO,
|
||||
md->queue->bio_split);
|
||||
&md->queue->bio_split);
|
||||
ci.io->orig_bio = b;
|
||||
bio_advance(bio, (bio_sectors(bio) - ci.sector_count) << 9);
|
||||
bio_chain(b, bio);
|
||||
|
@ -760,6 +760,11 @@ struct biovec_slab {
|
||||
struct kmem_cache *slab;
|
||||
};
|
||||
|
||||
static inline bool bioset_initialized(struct bio_set *bs)
|
||||
{
|
||||
return bs->bio_slab != NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* a small number of entries is fine, not going to be performance critical.
|
||||
* basically we just need to survive
|
||||
|
@ -652,7 +652,7 @@ struct request_queue {
|
||||
|
||||
struct blk_mq_tag_set *tag_set;
|
||||
struct list_head tag_set_list;
|
||||
struct bio_set *bio_split;
|
||||
struct bio_set bio_split;
|
||||
|
||||
#ifdef CONFIG_BLK_DEBUG_FS
|
||||
struct dentry *debugfs_dir;
|
||||
|
Loading…
Reference in New Issue
Block a user