mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2024-12-29 09:13:38 +00:00
blk-integrity: remove seed for user mapped buffers
The seed is only used for kernel generation and verification. That doesn't happen for user buffers, so passing the seed around doesn't accomplish anything. Signed-off-by: Keith Busch <kbusch@kernel.org> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Anuj Gupta <anuj20.g@samsung.com> Reviewed-by: Kanchan Joshi <joshi.k@samsung.com> Link: https://lore.kernel.org/r/20241016201309.1090320-1-kbusch@meta.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
826cc42adf
commit
133008e84b
@ -199,7 +199,7 @@ EXPORT_SYMBOL(bio_integrity_add_page);
|
|||||||
|
|
||||||
static int bio_integrity_copy_user(struct bio *bio, struct bio_vec *bvec,
|
static int bio_integrity_copy_user(struct bio *bio, struct bio_vec *bvec,
|
||||||
int nr_vecs, unsigned int len,
|
int nr_vecs, unsigned int len,
|
||||||
unsigned int direction, u32 seed)
|
unsigned int direction)
|
||||||
{
|
{
|
||||||
bool write = direction == ITER_SOURCE;
|
bool write = direction == ITER_SOURCE;
|
||||||
struct bio_integrity_payload *bip;
|
struct bio_integrity_payload *bip;
|
||||||
@ -247,7 +247,6 @@ static int bio_integrity_copy_user(struct bio *bio, struct bio_vec *bvec,
|
|||||||
}
|
}
|
||||||
|
|
||||||
bip->bip_flags |= BIP_COPY_USER;
|
bip->bip_flags |= BIP_COPY_USER;
|
||||||
bip->bip_iter.bi_sector = seed;
|
|
||||||
bip->bip_vcnt = nr_vecs;
|
bip->bip_vcnt = nr_vecs;
|
||||||
return 0;
|
return 0;
|
||||||
free_bip:
|
free_bip:
|
||||||
@ -258,7 +257,7 @@ static int bio_integrity_copy_user(struct bio *bio, struct bio_vec *bvec,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static int bio_integrity_init_user(struct bio *bio, struct bio_vec *bvec,
|
static int bio_integrity_init_user(struct bio *bio, struct bio_vec *bvec,
|
||||||
int nr_vecs, unsigned int len, u32 seed)
|
int nr_vecs, unsigned int len)
|
||||||
{
|
{
|
||||||
struct bio_integrity_payload *bip;
|
struct bio_integrity_payload *bip;
|
||||||
|
|
||||||
@ -267,7 +266,6 @@ static int bio_integrity_init_user(struct bio *bio, struct bio_vec *bvec,
|
|||||||
return PTR_ERR(bip);
|
return PTR_ERR(bip);
|
||||||
|
|
||||||
memcpy(bip->bip_vec, bvec, nr_vecs * sizeof(*bvec));
|
memcpy(bip->bip_vec, bvec, nr_vecs * sizeof(*bvec));
|
||||||
bip->bip_iter.bi_sector = seed;
|
|
||||||
bip->bip_iter.bi_size = len;
|
bip->bip_iter.bi_size = len;
|
||||||
bip->bip_vcnt = nr_vecs;
|
bip->bip_vcnt = nr_vecs;
|
||||||
return 0;
|
return 0;
|
||||||
@ -303,8 +301,7 @@ static unsigned int bvec_from_pages(struct bio_vec *bvec, struct page **pages,
|
|||||||
return nr_bvecs;
|
return nr_bvecs;
|
||||||
}
|
}
|
||||||
|
|
||||||
int bio_integrity_map_user(struct bio *bio, void __user *ubuf, ssize_t bytes,
|
int bio_integrity_map_user(struct bio *bio, void __user *ubuf, ssize_t bytes)
|
||||||
u32 seed)
|
|
||||||
{
|
{
|
||||||
struct request_queue *q = bdev_get_queue(bio->bi_bdev);
|
struct request_queue *q = bdev_get_queue(bio->bi_bdev);
|
||||||
unsigned int align = blk_lim_dma_alignment_and_pad(&q->limits);
|
unsigned int align = blk_lim_dma_alignment_and_pad(&q->limits);
|
||||||
@ -350,9 +347,9 @@ int bio_integrity_map_user(struct bio *bio, void __user *ubuf, ssize_t bytes,
|
|||||||
|
|
||||||
if (copy)
|
if (copy)
|
||||||
ret = bio_integrity_copy_user(bio, bvec, nr_bvecs, bytes,
|
ret = bio_integrity_copy_user(bio, bvec, nr_bvecs, bytes,
|
||||||
direction, seed);
|
direction);
|
||||||
else
|
else
|
||||||
ret = bio_integrity_init_user(bio, bvec, nr_bvecs, bytes, seed);
|
ret = bio_integrity_init_user(bio, bvec, nr_bvecs, bytes);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto release_pages;
|
goto release_pages;
|
||||||
if (bvec != stack_vec)
|
if (bvec != stack_vec)
|
||||||
|
@ -113,9 +113,9 @@ int blk_rq_map_integrity_sg(struct request *rq, struct scatterlist *sglist)
|
|||||||
EXPORT_SYMBOL(blk_rq_map_integrity_sg);
|
EXPORT_SYMBOL(blk_rq_map_integrity_sg);
|
||||||
|
|
||||||
int blk_rq_integrity_map_user(struct request *rq, void __user *ubuf,
|
int blk_rq_integrity_map_user(struct request *rq, void __user *ubuf,
|
||||||
ssize_t bytes, u32 seed)
|
ssize_t bytes)
|
||||||
{
|
{
|
||||||
int ret = bio_integrity_map_user(rq->bio, ubuf, bytes, seed);
|
int ret = bio_integrity_map_user(rq->bio, ubuf, bytes);
|
||||||
|
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -114,7 +114,7 @@ static struct request *nvme_alloc_user_request(struct request_queue *q,
|
|||||||
|
|
||||||
static int nvme_map_user_request(struct request *req, u64 ubuffer,
|
static int nvme_map_user_request(struct request *req, u64 ubuffer,
|
||||||
unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
|
unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
|
||||||
u32 meta_seed, struct io_uring_cmd *ioucmd, unsigned int flags)
|
struct io_uring_cmd *ioucmd, unsigned int flags)
|
||||||
{
|
{
|
||||||
struct request_queue *q = req->q;
|
struct request_queue *q = req->q;
|
||||||
struct nvme_ns *ns = q->queuedata;
|
struct nvme_ns *ns = q->queuedata;
|
||||||
@ -152,8 +152,7 @@ static int nvme_map_user_request(struct request *req, u64 ubuffer,
|
|||||||
bio_set_dev(bio, bdev);
|
bio_set_dev(bio, bdev);
|
||||||
|
|
||||||
if (has_metadata) {
|
if (has_metadata) {
|
||||||
ret = blk_rq_integrity_map_user(req, meta_buffer, meta_len,
|
ret = blk_rq_integrity_map_user(req, meta_buffer, meta_len);
|
||||||
meta_seed);
|
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out_unmap;
|
goto out_unmap;
|
||||||
}
|
}
|
||||||
@ -170,7 +169,7 @@ static int nvme_map_user_request(struct request *req, u64 ubuffer,
|
|||||||
|
|
||||||
static int nvme_submit_user_cmd(struct request_queue *q,
|
static int nvme_submit_user_cmd(struct request_queue *q,
|
||||||
struct nvme_command *cmd, u64 ubuffer, unsigned bufflen,
|
struct nvme_command *cmd, u64 ubuffer, unsigned bufflen,
|
||||||
void __user *meta_buffer, unsigned meta_len, u32 meta_seed,
|
void __user *meta_buffer, unsigned meta_len,
|
||||||
u64 *result, unsigned timeout, unsigned int flags)
|
u64 *result, unsigned timeout, unsigned int flags)
|
||||||
{
|
{
|
||||||
struct nvme_ns *ns = q->queuedata;
|
struct nvme_ns *ns = q->queuedata;
|
||||||
@ -187,7 +186,7 @@ static int nvme_submit_user_cmd(struct request_queue *q,
|
|||||||
req->timeout = timeout;
|
req->timeout = timeout;
|
||||||
if (ubuffer && bufflen) {
|
if (ubuffer && bufflen) {
|
||||||
ret = nvme_map_user_request(req, ubuffer, bufflen, meta_buffer,
|
ret = nvme_map_user_request(req, ubuffer, bufflen, meta_buffer,
|
||||||
meta_len, meta_seed, NULL, flags);
|
meta_len, NULL, flags);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@ -268,7 +267,7 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
|
|||||||
c.rw.lbatm = cpu_to_le16(io.appmask);
|
c.rw.lbatm = cpu_to_le16(io.appmask);
|
||||||
|
|
||||||
return nvme_submit_user_cmd(ns->queue, &c, io.addr, length, metadata,
|
return nvme_submit_user_cmd(ns->queue, &c, io.addr, length, metadata,
|
||||||
meta_len, lower_32_bits(io.slba), NULL, 0, 0);
|
meta_len, NULL, 0, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool nvme_validate_passthru_nsid(struct nvme_ctrl *ctrl,
|
static bool nvme_validate_passthru_nsid(struct nvme_ctrl *ctrl,
|
||||||
@ -323,7 +322,7 @@ static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
|
|||||||
|
|
||||||
status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
|
status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
|
||||||
cmd.addr, cmd.data_len, nvme_to_user_ptr(cmd.metadata),
|
cmd.addr, cmd.data_len, nvme_to_user_ptr(cmd.metadata),
|
||||||
cmd.metadata_len, 0, &result, timeout, 0);
|
cmd.metadata_len, &result, timeout, 0);
|
||||||
|
|
||||||
if (status >= 0) {
|
if (status >= 0) {
|
||||||
if (put_user(result, &ucmd->result))
|
if (put_user(result, &ucmd->result))
|
||||||
@ -370,7 +369,7 @@ static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
|
|||||||
|
|
||||||
status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
|
status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
|
||||||
cmd.addr, cmd.data_len, nvme_to_user_ptr(cmd.metadata),
|
cmd.addr, cmd.data_len, nvme_to_user_ptr(cmd.metadata),
|
||||||
cmd.metadata_len, 0, &cmd.result, timeout, flags);
|
cmd.metadata_len, &cmd.result, timeout, flags);
|
||||||
|
|
||||||
if (status >= 0) {
|
if (status >= 0) {
|
||||||
if (put_user(cmd.result, &ucmd->result))
|
if (put_user(cmd.result, &ucmd->result))
|
||||||
@ -504,7 +503,7 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
|
|||||||
if (d.addr && d.data_len) {
|
if (d.addr && d.data_len) {
|
||||||
ret = nvme_map_user_request(req, d.addr,
|
ret = nvme_map_user_request(req, d.addr,
|
||||||
d.data_len, nvme_to_user_ptr(d.metadata),
|
d.data_len, nvme_to_user_ptr(d.metadata),
|
||||||
d.metadata_len, 0, ioucmd, vec);
|
d.metadata_len, ioucmd, vec);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -72,7 +72,7 @@ struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio, gfp_t gfp,
|
|||||||
unsigned int nr);
|
unsigned int nr);
|
||||||
int bio_integrity_add_page(struct bio *bio, struct page *page, unsigned int len,
|
int bio_integrity_add_page(struct bio *bio, struct page *page, unsigned int len,
|
||||||
unsigned int offset);
|
unsigned int offset);
|
||||||
int bio_integrity_map_user(struct bio *bio, void __user *ubuf, ssize_t len, u32 seed);
|
int bio_integrity_map_user(struct bio *bio, void __user *ubuf, ssize_t len);
|
||||||
void bio_integrity_unmap_user(struct bio *bio);
|
void bio_integrity_unmap_user(struct bio *bio);
|
||||||
bool bio_integrity_prep(struct bio *bio);
|
bool bio_integrity_prep(struct bio *bio);
|
||||||
void bio_integrity_advance(struct bio *bio, unsigned int bytes_done);
|
void bio_integrity_advance(struct bio *bio, unsigned int bytes_done);
|
||||||
@ -99,7 +99,7 @@ static inline void bioset_integrity_free(struct bio_set *bs)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static inline int bio_integrity_map_user(struct bio *bio, void __user *ubuf,
|
static inline int bio_integrity_map_user(struct bio *bio, void __user *ubuf,
|
||||||
ssize_t len, u32 seed)
|
ssize_t len)
|
||||||
{
|
{
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
@ -28,7 +28,7 @@ static inline bool queue_limits_stack_integrity_bdev(struct queue_limits *t,
|
|||||||
int blk_rq_map_integrity_sg(struct request *, struct scatterlist *);
|
int blk_rq_map_integrity_sg(struct request *, struct scatterlist *);
|
||||||
int blk_rq_count_integrity_sg(struct request_queue *, struct bio *);
|
int blk_rq_count_integrity_sg(struct request_queue *, struct bio *);
|
||||||
int blk_rq_integrity_map_user(struct request *rq, void __user *ubuf,
|
int blk_rq_integrity_map_user(struct request *rq, void __user *ubuf,
|
||||||
ssize_t bytes, u32 seed);
|
ssize_t bytes);
|
||||||
|
|
||||||
static inline bool
|
static inline bool
|
||||||
blk_integrity_queue_supports_integrity(struct request_queue *q)
|
blk_integrity_queue_supports_integrity(struct request_queue *q)
|
||||||
@ -104,8 +104,7 @@ static inline int blk_rq_map_integrity_sg(struct request *q,
|
|||||||
}
|
}
|
||||||
static inline int blk_rq_integrity_map_user(struct request *rq,
|
static inline int blk_rq_integrity_map_user(struct request *rq,
|
||||||
void __user *ubuf,
|
void __user *ubuf,
|
||||||
ssize_t bytes,
|
ssize_t bytes)
|
||||||
u32 seed)
|
|
||||||
{
|
{
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user