mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2024-12-29 09:13:38 +00:00
io_uring/rw: add separate prep handler for fixed read/write
Rather than sprinkle opcode checks in the generic read/write prep handler, have a separate prep handler for the vectored readv/writev operation. Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
0e984ec88d
commit
f688944cfb
@ -98,7 +98,7 @@ const struct io_issue_def io_issue_defs[] = {
|
||||
.ioprio = 1,
|
||||
.iopoll = 1,
|
||||
.iopoll_queue = 1,
|
||||
.prep = io_prep_rw,
|
||||
.prep = io_prep_rw_fixed,
|
||||
.issue = io_read,
|
||||
},
|
||||
[IORING_OP_WRITE_FIXED] = {
|
||||
@ -111,7 +111,7 @@ const struct io_issue_def io_issue_defs[] = {
|
||||
.ioprio = 1,
|
||||
.iopoll = 1,
|
||||
.iopoll_queue = 1,
|
||||
.prep = io_prep_rw,
|
||||
.prep = io_prep_rw_fixed,
|
||||
.issue = io_write,
|
||||
},
|
||||
[IORING_OP_POLL_ADD] = {
|
||||
|
@ -83,18 +83,6 @@ int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
||||
/* used for fixed read/write too - just read unconditionally */
|
||||
req->buf_index = READ_ONCE(sqe->buf_index);
|
||||
|
||||
if (req->opcode == IORING_OP_READ_FIXED ||
|
||||
req->opcode == IORING_OP_WRITE_FIXED) {
|
||||
struct io_ring_ctx *ctx = req->ctx;
|
||||
u16 index;
|
||||
|
||||
if (unlikely(req->buf_index >= ctx->nr_user_bufs))
|
||||
return -EFAULT;
|
||||
index = array_index_nospec(req->buf_index, ctx->nr_user_bufs);
|
||||
req->imu = ctx->user_bufs[index];
|
||||
io_req_set_rsrc_node(req, ctx, 0);
|
||||
}
|
||||
|
||||
ioprio = READ_ONCE(sqe->ioprio);
|
||||
if (ioprio) {
|
||||
ret = ioprio_check_cap(ioprio);
|
||||
@ -131,6 +119,24 @@ int io_prep_rwv(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int io_prep_rw_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
||||
{
|
||||
struct io_ring_ctx *ctx = req->ctx;
|
||||
u16 index;
|
||||
int ret;
|
||||
|
||||
ret = io_prep_rw(req, sqe);
|
||||
if (unlikely(ret))
|
||||
return ret;
|
||||
|
||||
if (unlikely(req->buf_index >= ctx->nr_user_bufs))
|
||||
return -EFAULT;
|
||||
index = array_index_nospec(req->buf_index, ctx->nr_user_bufs);
|
||||
req->imu = ctx->user_bufs[index];
|
||||
io_req_set_rsrc_node(req, ctx, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Multishot read is prepared just like a normal read/write request, only
|
||||
* difference is that we set the MULTISHOT flag.
|
||||
|
@ -17,6 +17,7 @@ struct io_async_rw {
|
||||
|
||||
int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe);
|
||||
int io_prep_rwv(struct io_kiocb *req, const struct io_uring_sqe *sqe);
|
||||
int io_prep_rw_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe);
|
||||
int io_read(struct io_kiocb *req, unsigned int issue_flags);
|
||||
int io_readv_prep_async(struct io_kiocb *req);
|
||||
int io_write(struct io_kiocb *req, unsigned int issue_flags);
|
||||
|
Loading…
Reference in New Issue
Block a user