mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-04 12:16:41 +00:00
block: support large requests in blk_rq_map_user_iov
This patch adds support for larger requests in blk_rq_map_user_iov by allowing it to build multiple bios for a request. This functionality used to exist for the non-vectored blk_rq_map_user in the past, and this patch reuses the existing functionality for it on the unmap side, which stuck around. Thanks to the iov_iter API supporting multiple bios is fairly trivial, as we can just iterate the iov until we've consumed the whole iov_iter. Signed-off-by: Christoph Hellwig <hch@lst.de> Reported-by: Jeff Lien <Jeff.Lien@hgst.com> Tested-by: Jeff Lien <Jeff.Lien@hgst.com> Reviewed-by: Keith Busch <keith.busch@intel.com> Signed-off-by: Jens Axboe <axboe@fb.com>
This commit is contained in:
parent
f21018427c
commit
4d6af73d9e
@ -57,6 +57,49 @@ static int __blk_rq_unmap_user(struct bio *bio)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int __blk_rq_map_user_iov(struct request *rq,
|
||||||
|
struct rq_map_data *map_data, struct iov_iter *iter,
|
||||||
|
gfp_t gfp_mask, bool copy)
|
||||||
|
{
|
||||||
|
struct request_queue *q = rq->q;
|
||||||
|
struct bio *bio, *orig_bio;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
if (copy)
|
||||||
|
bio = bio_copy_user_iov(q, map_data, iter, gfp_mask);
|
||||||
|
else
|
||||||
|
bio = bio_map_user_iov(q, iter, gfp_mask);
|
||||||
|
|
||||||
|
if (IS_ERR(bio))
|
||||||
|
return PTR_ERR(bio);
|
||||||
|
|
||||||
|
if (map_data && map_data->null_mapped)
|
||||||
|
bio_set_flag(bio, BIO_NULL_MAPPED);
|
||||||
|
|
||||||
|
iov_iter_advance(iter, bio->bi_iter.bi_size);
|
||||||
|
if (map_data)
|
||||||
|
map_data->offset += bio->bi_iter.bi_size;
|
||||||
|
|
||||||
|
orig_bio = bio;
|
||||||
|
blk_queue_bounce(q, &bio);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We link the bounce buffer in and could have to traverse it
|
||||||
|
* later so we have to get a ref to prevent it from being freed
|
||||||
|
*/
|
||||||
|
bio_get(bio);
|
||||||
|
|
||||||
|
ret = blk_rq_append_bio(q, rq, bio);
|
||||||
|
if (ret) {
|
||||||
|
bio_endio(bio);
|
||||||
|
__blk_rq_unmap_user(orig_bio);
|
||||||
|
bio_put(bio);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* blk_rq_map_user_iov - map user data to a request, for REQ_TYPE_BLOCK_PC usage
|
* blk_rq_map_user_iov - map user data to a request, for REQ_TYPE_BLOCK_PC usage
|
||||||
* @q: request queue where request should be inserted
|
* @q: request queue where request should be inserted
|
||||||
@ -82,10 +125,11 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
|
|||||||
struct rq_map_data *map_data,
|
struct rq_map_data *map_data,
|
||||||
const struct iov_iter *iter, gfp_t gfp_mask)
|
const struct iov_iter *iter, gfp_t gfp_mask)
|
||||||
{
|
{
|
||||||
struct bio *bio;
|
|
||||||
int unaligned = 0;
|
|
||||||
struct iov_iter i;
|
|
||||||
struct iovec iov, prv = {.iov_base = NULL, .iov_len = 0};
|
struct iovec iov, prv = {.iov_base = NULL, .iov_len = 0};
|
||||||
|
bool copy = (q->dma_pad_mask & iter->count) || map_data;
|
||||||
|
struct bio *bio = NULL;
|
||||||
|
struct iov_iter i;
|
||||||
|
int ret;
|
||||||
|
|
||||||
if (!iter || !iter->count)
|
if (!iter || !iter->count)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
@ -101,42 +145,29 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
|
|||||||
*/
|
*/
|
||||||
if ((uaddr & queue_dma_alignment(q)) ||
|
if ((uaddr & queue_dma_alignment(q)) ||
|
||||||
iovec_gap_to_prv(q, &prv, &iov))
|
iovec_gap_to_prv(q, &prv, &iov))
|
||||||
unaligned = 1;
|
copy = true;
|
||||||
|
|
||||||
prv.iov_base = iov.iov_base;
|
prv.iov_base = iov.iov_base;
|
||||||
prv.iov_len = iov.iov_len;
|
prv.iov_len = iov.iov_len;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (unaligned || (q->dma_pad_mask & iter->count) || map_data)
|
i = *iter;
|
||||||
bio = bio_copy_user_iov(q, map_data, iter, gfp_mask);
|
do {
|
||||||
else
|
ret =__blk_rq_map_user_iov(rq, map_data, &i, gfp_mask, copy);
|
||||||
bio = bio_map_user_iov(q, iter, gfp_mask);
|
if (ret)
|
||||||
|
goto unmap_rq;
|
||||||
if (IS_ERR(bio))
|
if (!bio)
|
||||||
return PTR_ERR(bio);
|
bio = rq->bio;
|
||||||
|
} while (iov_iter_count(&i));
|
||||||
if (map_data && map_data->null_mapped)
|
|
||||||
bio_set_flag(bio, BIO_NULL_MAPPED);
|
|
||||||
|
|
||||||
if (bio->bi_iter.bi_size != iter->count) {
|
|
||||||
/*
|
|
||||||
* Grab an extra reference to this bio, as bio_unmap_user()
|
|
||||||
* expects to be able to drop it twice as it happens on the
|
|
||||||
* normal IO completion path
|
|
||||||
*/
|
|
||||||
bio_get(bio);
|
|
||||||
bio_endio(bio);
|
|
||||||
__blk_rq_unmap_user(bio);
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!bio_flagged(bio, BIO_USER_MAPPED))
|
if (!bio_flagged(bio, BIO_USER_MAPPED))
|
||||||
rq->cmd_flags |= REQ_COPY_USER;
|
rq->cmd_flags |= REQ_COPY_USER;
|
||||||
|
|
||||||
blk_queue_bounce(q, &bio);
|
|
||||||
bio_get(bio);
|
|
||||||
blk_rq_bio_prep(q, rq, bio);
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
unmap_rq:
|
||||||
|
__blk_rq_unmap_user(bio);
|
||||||
|
rq->bio = NULL;
|
||||||
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(blk_rq_map_user_iov);
|
EXPORT_SYMBOL(blk_rq_map_user_iov);
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user