mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-08 14:13:53 +00:00
block: split bio_kmalloc from bio_alloc_bioset
bio_kmalloc shares almost no logic with the bio_set based fast path in bio_alloc_bioset. Split it into an entirely separate implementation. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com> Reviewed-by: Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com> Acked-by: Damien Le Moal <damien.lemoal@wdc.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
4eb1d68904
commit
3175199ab0
165
block/bio.c
165
block/bio.c
@ -396,123 +396,101 @@ static void punt_bios_to_rescuer(struct bio_set *bs)
|
|||||||
* @nr_iovecs: number of iovecs to pre-allocate
|
* @nr_iovecs: number of iovecs to pre-allocate
|
||||||
* @bs: the bio_set to allocate from.
|
* @bs: the bio_set to allocate from.
|
||||||
*
|
*
|
||||||
* Description:
|
* Allocate a bio from the mempools in @bs.
|
||||||
* If @bs is NULL, uses kmalloc() to allocate the bio; else the allocation is
|
|
||||||
* backed by the @bs's mempool.
|
|
||||||
*
|
*
|
||||||
* When @bs is not NULL, if %__GFP_DIRECT_RECLAIM is set then bio_alloc will
|
* If %__GFP_DIRECT_RECLAIM is set then bio_alloc will always be able to
|
||||||
* always be able to allocate a bio. This is due to the mempool guarantees.
|
* allocate a bio. This is due to the mempool guarantees. To make this work,
|
||||||
* To make this work, callers must never allocate more than 1 bio at a time
|
* callers must never allocate more than 1 bio at a time from the general pool.
|
||||||
* from this pool. Callers that need to allocate more than 1 bio must always
|
* Callers that need to allocate more than 1 bio must always submit the
|
||||||
* submit the previously allocated bio for IO before attempting to allocate
|
* previously allocated bio for IO before attempting to allocate a new one.
|
||||||
* a new one. Failure to do so can cause deadlocks under memory pressure.
|
* Failure to do so can cause deadlocks under memory pressure.
|
||||||
*
|
*
|
||||||
* Note that when running under submit_bio_noacct() (i.e. any block
|
* Note that when running under submit_bio_noacct() (i.e. any block driver),
|
||||||
* driver), bios are not submitted until after you return - see the code in
|
* bios are not submitted until after you return - see the code in
|
||||||
* submit_bio_noacct() that converts recursion into iteration, to prevent
|
* submit_bio_noacct() that converts recursion into iteration, to prevent
|
||||||
* stack overflows.
|
* stack overflows.
|
||||||
*
|
*
|
||||||
* This would normally mean allocating multiple bios under
|
* This would normally mean allocating multiple bios under submit_bio_noacct()
|
||||||
* submit_bio_noacct() would be susceptible to deadlocks, but we have
|
* would be susceptible to deadlocks, but we have
|
||||||
* deadlock avoidance code that resubmits any blocked bios from a rescuer
|
* deadlock avoidance code that resubmits any blocked bios from a rescuer
|
||||||
* thread.
|
* thread.
|
||||||
*
|
*
|
||||||
* However, we do not guarantee forward progress for allocations from other
|
* However, we do not guarantee forward progress for allocations from other
|
||||||
* mempools. Doing multiple allocations from the same mempool under
|
* mempools. Doing multiple allocations from the same mempool under
|
||||||
* submit_bio_noacct() should be avoided - instead, use bio_set's front_pad
|
* submit_bio_noacct() should be avoided - instead, use bio_set's front_pad
|
||||||
* for per bio allocations.
|
* for per bio allocations.
|
||||||
*
|
*
|
||||||
* RETURNS:
|
* Returns: Pointer to new bio on success, NULL on failure.
|
||||||
* Pointer to new bio on success, NULL on failure.
|
|
||||||
*/
|
*/
|
||||||
struct bio *bio_alloc_bioset(gfp_t gfp_mask, unsigned int nr_iovecs,
|
struct bio *bio_alloc_bioset(gfp_t gfp_mask, unsigned int nr_iovecs,
|
||||||
struct bio_set *bs)
|
struct bio_set *bs)
|
||||||
{
|
{
|
||||||
gfp_t saved_gfp = gfp_mask;
|
gfp_t saved_gfp = gfp_mask;
|
||||||
unsigned front_pad;
|
|
||||||
unsigned inline_vecs;
|
|
||||||
struct bio_vec *bvl = NULL;
|
|
||||||
struct bio *bio;
|
struct bio *bio;
|
||||||
void *p;
|
void *p;
|
||||||
|
|
||||||
if (!bs) {
|
/* should not use nobvec bioset for nr_iovecs > 0 */
|
||||||
if (nr_iovecs > UIO_MAXIOV)
|
if (WARN_ON_ONCE(!mempool_initialized(&bs->bvec_pool) && nr_iovecs > 0))
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
p = kmalloc(struct_size(bio, bi_inline_vecs, nr_iovecs), gfp_mask);
|
/*
|
||||||
front_pad = 0;
|
* submit_bio_noacct() converts recursion to iteration; this means if
|
||||||
inline_vecs = nr_iovecs;
|
* we're running beneath it, any bios we allocate and submit will not be
|
||||||
} else {
|
* submitted (and thus freed) until after we return.
|
||||||
/* should not use nobvec bioset for nr_iovecs > 0 */
|
*
|
||||||
if (WARN_ON_ONCE(!mempool_initialized(&bs->bvec_pool) &&
|
* This exposes us to a potential deadlock if we allocate multiple bios
|
||||||
nr_iovecs > 0))
|
* from the same bio_set() while running underneath submit_bio_noacct().
|
||||||
return NULL;
|
* If we were to allocate multiple bios (say a stacking block driver
|
||||||
/*
|
* that was splitting bios), we would deadlock if we exhausted the
|
||||||
* submit_bio_noacct() converts recursion to iteration; this
|
* mempool's reserve.
|
||||||
* means if we're running beneath it, any bios we allocate and
|
*
|
||||||
* submit will not be submitted (and thus freed) until after we
|
* We solve this, and guarantee forward progress, with a rescuer
|
||||||
* return.
|
* workqueue per bio_set. If we go to allocate and there are bios on
|
||||||
*
|
* current->bio_list, we first try the allocation without
|
||||||
* This exposes us to a potential deadlock if we allocate
|
* __GFP_DIRECT_RECLAIM; if that fails, we punt those bios we would be
|
||||||
* multiple bios from the same bio_set() while running
|
* blocking to the rescuer workqueue before we retry with the original
|
||||||
* underneath submit_bio_noacct(). If we were to allocate
|
* gfp_flags.
|
||||||
* multiple bios (say a stacking block driver that was splitting
|
*/
|
||||||
* bios), we would deadlock if we exhausted the mempool's
|
if (current->bio_list &&
|
||||||
* reserve.
|
(!bio_list_empty(¤t->bio_list[0]) ||
|
||||||
*
|
!bio_list_empty(¤t->bio_list[1])) &&
|
||||||
* We solve this, and guarantee forward progress, with a rescuer
|
bs->rescue_workqueue)
|
||||||
* workqueue per bio_set. If we go to allocate and there are
|
gfp_mask &= ~__GFP_DIRECT_RECLAIM;
|
||||||
* bios on current->bio_list, we first try the allocation
|
|
||||||
* without __GFP_DIRECT_RECLAIM; if that fails, we punt those
|
|
||||||
* bios we would be blocking to the rescuer workqueue before
|
|
||||||
* we retry with the original gfp_flags.
|
|
||||||
*/
|
|
||||||
|
|
||||||
if (current->bio_list &&
|
|
||||||
(!bio_list_empty(¤t->bio_list[0]) ||
|
|
||||||
!bio_list_empty(¤t->bio_list[1])) &&
|
|
||||||
bs->rescue_workqueue)
|
|
||||||
gfp_mask &= ~__GFP_DIRECT_RECLAIM;
|
|
||||||
|
|
||||||
|
p = mempool_alloc(&bs->bio_pool, gfp_mask);
|
||||||
|
if (!p && gfp_mask != saved_gfp) {
|
||||||
|
punt_bios_to_rescuer(bs);
|
||||||
|
gfp_mask = saved_gfp;
|
||||||
p = mempool_alloc(&bs->bio_pool, gfp_mask);
|
p = mempool_alloc(&bs->bio_pool, gfp_mask);
|
||||||
if (!p && gfp_mask != saved_gfp) {
|
|
||||||
punt_bios_to_rescuer(bs);
|
|
||||||
gfp_mask = saved_gfp;
|
|
||||||
p = mempool_alloc(&bs->bio_pool, gfp_mask);
|
|
||||||
}
|
|
||||||
|
|
||||||
front_pad = bs->front_pad;
|
|
||||||
inline_vecs = BIO_INLINE_VECS;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (unlikely(!p))
|
if (unlikely(!p))
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
bio = p + front_pad;
|
bio = p + bs->front_pad;
|
||||||
bio_init(bio, NULL, 0);
|
if (nr_iovecs > BIO_INLINE_VECS) {
|
||||||
|
|
||||||
if (nr_iovecs > inline_vecs) {
|
|
||||||
unsigned long idx = 0;
|
unsigned long idx = 0;
|
||||||
|
struct bio_vec *bvl = NULL;
|
||||||
|
|
||||||
bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, &bs->bvec_pool);
|
bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, &bs->bvec_pool);
|
||||||
if (!bvl && gfp_mask != saved_gfp) {
|
if (!bvl && gfp_mask != saved_gfp) {
|
||||||
punt_bios_to_rescuer(bs);
|
punt_bios_to_rescuer(bs);
|
||||||
gfp_mask = saved_gfp;
|
gfp_mask = saved_gfp;
|
||||||
bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, &bs->bvec_pool);
|
bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx,
|
||||||
|
&bs->bvec_pool);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (unlikely(!bvl))
|
if (unlikely(!bvl))
|
||||||
goto err_free;
|
goto err_free;
|
||||||
|
|
||||||
bio->bi_flags |= idx << BVEC_POOL_OFFSET;
|
bio->bi_flags |= idx << BVEC_POOL_OFFSET;
|
||||||
bio->bi_max_vecs = bvec_nr_vecs(idx);
|
bio_init(bio, bvl, bvec_nr_vecs(idx));
|
||||||
} else if (nr_iovecs) {
|
} else if (nr_iovecs) {
|
||||||
bvl = bio->bi_inline_vecs;
|
bio_init(bio, bio->bi_inline_vecs, BIO_INLINE_VECS);
|
||||||
bio->bi_max_vecs = inline_vecs;
|
} else {
|
||||||
|
bio_init(bio, NULL, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
bio->bi_pool = bs;
|
bio->bi_pool = bs;
|
||||||
bio->bi_io_vec = bvl;
|
|
||||||
return bio;
|
return bio;
|
||||||
|
|
||||||
err_free:
|
err_free:
|
||||||
@ -521,6 +499,31 @@ struct bio *bio_alloc_bioset(gfp_t gfp_mask, unsigned int nr_iovecs,
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL(bio_alloc_bioset);
|
EXPORT_SYMBOL(bio_alloc_bioset);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* bio_kmalloc - kmalloc a bio for I/O
|
||||||
|
* @gfp_mask: the GFP_* mask given to the slab allocator
|
||||||
|
* @nr_iovecs: number of iovecs to pre-allocate
|
||||||
|
*
|
||||||
|
* Use kmalloc to allocate and initialize a bio.
|
||||||
|
*
|
||||||
|
* Returns: Pointer to new bio on success, NULL on failure.
|
||||||
|
*/
|
||||||
|
struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned int nr_iovecs)
|
||||||
|
{
|
||||||
|
struct bio *bio;
|
||||||
|
|
||||||
|
if (nr_iovecs > UIO_MAXIOV)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
bio = kmalloc(struct_size(bio, bi_inline_vecs, nr_iovecs), gfp_mask);
|
||||||
|
if (unlikely(!bio))
|
||||||
|
return NULL;
|
||||||
|
bio_init(bio, nr_iovecs ? bio->bi_inline_vecs : NULL, nr_iovecs);
|
||||||
|
bio->bi_pool = NULL;
|
||||||
|
return bio;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(bio_kmalloc);
|
||||||
|
|
||||||
void zero_fill_bio_iter(struct bio *bio, struct bvec_iter start)
|
void zero_fill_bio_iter(struct bio *bio, struct bvec_iter start)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
@ -408,6 +408,7 @@ extern int biovec_init_pool(mempool_t *pool, int pool_entries);
|
|||||||
extern int bioset_init_from_src(struct bio_set *bs, struct bio_set *src);
|
extern int bioset_init_from_src(struct bio_set *bs, struct bio_set *src);
|
||||||
|
|
||||||
extern struct bio *bio_alloc_bioset(gfp_t, unsigned int, struct bio_set *);
|
extern struct bio *bio_alloc_bioset(gfp_t, unsigned int, struct bio_set *);
|
||||||
|
struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned int nr_iovecs);
|
||||||
extern void bio_put(struct bio *);
|
extern void bio_put(struct bio *);
|
||||||
|
|
||||||
extern void __bio_clone_fast(struct bio *, struct bio *);
|
extern void __bio_clone_fast(struct bio *, struct bio *);
|
||||||
@ -420,11 +421,6 @@ static inline struct bio *bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs)
|
|||||||
return bio_alloc_bioset(gfp_mask, nr_iovecs, &fs_bio_set);
|
return bio_alloc_bioset(gfp_mask, nr_iovecs, &fs_bio_set);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned int nr_iovecs)
|
|
||||||
{
|
|
||||||
return bio_alloc_bioset(gfp_mask, nr_iovecs, NULL);
|
|
||||||
}
|
|
||||||
|
|
||||||
extern blk_qc_t submit_bio(struct bio *);
|
extern blk_qc_t submit_bio(struct bio *);
|
||||||
|
|
||||||
extern void bio_endio(struct bio *);
|
extern void bio_endio(struct bio *);
|
||||||
|
Loading…
Reference in New Issue
Block a user