mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2024-12-29 09:13:38 +00:00
io_uring/rsrc: add io_rsrc_node_lookup() helper
There are lots of spots open-coding this functionality, add a generic helper that does the node lookup in a speculation safe way. Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
3597f2786b
commit
b54a14041e
@ -240,10 +240,12 @@ static int __io_sync_cancel(struct io_uring_task *tctx,
|
||||
/* fixed must be grabbed every time since we drop the uring_lock */
|
||||
if ((cd->flags & IORING_ASYNC_CANCEL_FD) &&
|
||||
(cd->flags & IORING_ASYNC_CANCEL_FD_FIXED)) {
|
||||
if (unlikely(fd >= ctx->file_table.data.nr))
|
||||
struct io_rsrc_node *node;
|
||||
|
||||
node = io_rsrc_node_lookup(&ctx->file_table.data, fd);
|
||||
if (unlikely(!node))
|
||||
return -EBADF;
|
||||
fd = array_index_nospec(fd, ctx->file_table.data.nr);
|
||||
cd->file = io_file_from_index(&ctx->file_table, fd);
|
||||
cd->file = io_slot_file(node);
|
||||
if (!cd->file)
|
||||
return -EBADF;
|
||||
}
|
||||
|
@ -58,7 +58,7 @@ static int io_install_fixed_file(struct io_ring_ctx *ctx, struct file *file,
|
||||
u32 slot_index)
|
||||
__must_hold(&req->ctx->uring_lock)
|
||||
{
|
||||
struct io_rsrc_node *node;
|
||||
struct io_rsrc_node *node, *old_node;
|
||||
|
||||
if (io_is_uring_fops(file))
|
||||
return -EBADF;
|
||||
@ -71,9 +71,9 @@ static int io_install_fixed_file(struct io_ring_ctx *ctx, struct file *file,
|
||||
if (!node)
|
||||
return -ENOMEM;
|
||||
|
||||
slot_index = array_index_nospec(slot_index, ctx->file_table.data.nr);
|
||||
if (ctx->file_table.data.nodes[slot_index])
|
||||
io_put_rsrc_node(ctx->file_table.data.nodes[slot_index]);
|
||||
old_node = io_rsrc_node_lookup(&ctx->file_table.data, slot_index);
|
||||
if (old_node)
|
||||
io_put_rsrc_node(old_node);
|
||||
else
|
||||
io_file_bitmap_set(&ctx->file_table, slot_index);
|
||||
|
||||
@ -123,15 +123,17 @@ int io_fixed_fd_install(struct io_kiocb *req, unsigned int issue_flags,
|
||||
|
||||
int io_fixed_fd_remove(struct io_ring_ctx *ctx, unsigned int offset)
|
||||
{
|
||||
struct io_rsrc_node *node;
|
||||
|
||||
if (unlikely(!ctx->file_table.data.nr))
|
||||
return -ENXIO;
|
||||
if (offset >= ctx->file_table.data.nr)
|
||||
return -EINVAL;
|
||||
|
||||
offset = array_index_nospec(offset, ctx->file_table.data.nr);
|
||||
if (!ctx->file_table.data.nodes[offset])
|
||||
node = io_rsrc_node_lookup(&ctx->file_table.data, offset);
|
||||
if (!node)
|
||||
return -EBADF;
|
||||
io_put_rsrc_node(ctx->file_table.data.nodes[offset]);
|
||||
io_put_rsrc_node(node);
|
||||
ctx->file_table.data.nodes[offset] = NULL;
|
||||
io_file_bitmap_clear(&ctx->file_table, offset);
|
||||
return 0;
|
||||
|
@ -52,7 +52,7 @@ static inline struct file *io_slot_file(struct io_rsrc_node *node)
|
||||
static inline struct file *io_file_from_index(struct io_file_table *table,
|
||||
int index)
|
||||
{
|
||||
struct io_rsrc_node *node = table->data.nodes[index];
|
||||
struct io_rsrc_node *node = io_rsrc_node_lookup(&table->data, index);
|
||||
|
||||
if (node)
|
||||
return io_slot_file(node);
|
||||
|
@ -1879,16 +1879,12 @@ inline struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
|
||||
struct file *file = NULL;
|
||||
|
||||
io_ring_submit_lock(ctx, issue_flags);
|
||||
if (unlikely((unsigned int)fd >= ctx->file_table.data.nr))
|
||||
goto out;
|
||||
fd = array_index_nospec(fd, ctx->file_table.data.nr);
|
||||
node = ctx->file_table.data.nodes[fd];
|
||||
node = io_rsrc_node_lookup(&ctx->file_table.data, fd);
|
||||
if (node) {
|
||||
io_req_assign_rsrc_node(req, node);
|
||||
req->flags |= io_slot_flags(node);
|
||||
file = io_slot_file(node);
|
||||
}
|
||||
out:
|
||||
io_ring_submit_unlock(ctx, issue_flags);
|
||||
return file;
|
||||
}
|
||||
|
@ -172,22 +172,24 @@ static int io_msg_ring_data(struct io_kiocb *req, unsigned int issue_flags)
|
||||
return __io_msg_ring_data(target_ctx, msg, issue_flags);
|
||||
}
|
||||
|
||||
static struct file *io_msg_grab_file(struct io_kiocb *req, unsigned int issue_flags)
|
||||
static int io_msg_grab_file(struct io_kiocb *req, unsigned int issue_flags)
|
||||
{
|
||||
struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
|
||||
struct io_ring_ctx *ctx = req->ctx;
|
||||
struct file *file = NULL;
|
||||
int idx = msg->src_fd;
|
||||
struct io_rsrc_node *node;
|
||||
int ret = -EBADF;
|
||||
|
||||
io_ring_submit_lock(ctx, issue_flags);
|
||||
if (likely(idx < ctx->file_table.data.nr)) {
|
||||
idx = array_index_nospec(idx, ctx->file_table.data.nr);
|
||||
file = io_file_from_index(&ctx->file_table, idx);
|
||||
if (file)
|
||||
get_file(file);
|
||||
node = io_rsrc_node_lookup(&ctx->file_table.data, msg->src_fd);
|
||||
if (node) {
|
||||
msg->src_file = io_slot_file(node);
|
||||
if (msg->src_file)
|
||||
get_file(msg->src_file);
|
||||
req->flags |= REQ_F_NEED_CLEANUP;
|
||||
ret = 0;
|
||||
}
|
||||
io_ring_submit_unlock(ctx, issue_flags);
|
||||
return file;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int io_msg_install_complete(struct io_kiocb *req, unsigned int issue_flags)
|
||||
@ -256,7 +258,6 @@ static int io_msg_send_fd(struct io_kiocb *req, unsigned int issue_flags)
|
||||
struct io_ring_ctx *target_ctx = req->file->private_data;
|
||||
struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
|
||||
struct io_ring_ctx *ctx = req->ctx;
|
||||
struct file *src_file = msg->src_file;
|
||||
|
||||
if (msg->len)
|
||||
return -EINVAL;
|
||||
@ -264,12 +265,10 @@ static int io_msg_send_fd(struct io_kiocb *req, unsigned int issue_flags)
|
||||
return -EINVAL;
|
||||
if (target_ctx->flags & IORING_SETUP_R_DISABLED)
|
||||
return -EBADFD;
|
||||
if (!src_file) {
|
||||
src_file = io_msg_grab_file(req, issue_flags);
|
||||
if (!src_file)
|
||||
return -EBADF;
|
||||
msg->src_file = src_file;
|
||||
req->flags |= REQ_F_NEED_CLEANUP;
|
||||
if (!msg->src_file) {
|
||||
int ret = io_msg_grab_file(req, issue_flags);
|
||||
if (unlikely(ret))
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (io_msg_need_remote(target_ctx))
|
||||
|
@ -1343,13 +1343,11 @@ static int io_send_zc_import(struct io_kiocb *req, unsigned int issue_flags)
|
||||
if (sr->flags & IORING_RECVSEND_FIXED_BUF) {
|
||||
struct io_ring_ctx *ctx = req->ctx;
|
||||
struct io_rsrc_node *node;
|
||||
int idx;
|
||||
|
||||
ret = -EFAULT;
|
||||
io_ring_submit_lock(ctx, issue_flags);
|
||||
if (sr->buf_index < ctx->buf_table.nr) {
|
||||
idx = array_index_nospec(sr->buf_index, ctx->buf_table.nr);
|
||||
node = ctx->buf_table.nodes[idx];
|
||||
node = io_rsrc_node_lookup(&ctx->buf_table, sr->buf_index);
|
||||
if (node) {
|
||||
io_req_assign_rsrc_node(sr->notif, node);
|
||||
ret = 0;
|
||||
}
|
||||
|
@ -62,13 +62,11 @@ int io_nop(struct io_kiocb *req, unsigned int issue_flags)
|
||||
if (nop->flags & IORING_NOP_FIXED_BUFFER) {
|
||||
struct io_ring_ctx *ctx = req->ctx;
|
||||
struct io_rsrc_node *node;
|
||||
int idx;
|
||||
|
||||
ret = -EFAULT;
|
||||
io_ring_submit_lock(ctx, issue_flags);
|
||||
if (nop->buffer < ctx->buf_table.nr) {
|
||||
idx = array_index_nospec(nop->buffer, ctx->buf_table.nr);
|
||||
node = READ_ONCE(ctx->buf_table.nodes[idx]);
|
||||
node = io_rsrc_node_lookup(&ctx->buf_table, nop->buffer);
|
||||
if (node) {
|
||||
io_req_assign_rsrc_node(req, node);
|
||||
ret = 0;
|
||||
}
|
||||
|
@ -181,6 +181,7 @@ static int __io_sqe_files_update(struct io_ring_ctx *ctx,
|
||||
return -EINVAL;
|
||||
|
||||
for (done = 0; done < nr_args; done++) {
|
||||
struct io_rsrc_node *node;
|
||||
u64 tag = 0;
|
||||
|
||||
if ((tags && copy_from_user(&tag, &tags[done], sizeof(tag))) ||
|
||||
@ -195,9 +196,10 @@ static int __io_sqe_files_update(struct io_ring_ctx *ctx,
|
||||
if (fd == IORING_REGISTER_FILES_SKIP)
|
||||
continue;
|
||||
|
||||
i = array_index_nospec(up->offset + done, ctx->file_table.data.nr);
|
||||
if (ctx->file_table.data.nodes[i]) {
|
||||
io_put_rsrc_node(ctx->file_table.data.nodes[i]);
|
||||
i = up->offset + done;
|
||||
node = io_rsrc_node_lookup(&ctx->file_table.data, i);
|
||||
if (node) {
|
||||
io_put_rsrc_node(node);
|
||||
ctx->file_table.data.nodes[i] = NULL;
|
||||
io_file_bitmap_clear(&ctx->file_table, i);
|
||||
}
|
||||
@ -958,9 +960,9 @@ static int io_clone_buffers(struct io_ring_ctx *ctx, struct io_ring_ctx *src_ctx
|
||||
goto out_unlock;
|
||||
|
||||
for (i = 0; i < nbufs; i++) {
|
||||
struct io_rsrc_node *src_node = src_ctx->buf_table.nodes[i];
|
||||
struct io_rsrc_node *dst_node;
|
||||
struct io_rsrc_node *dst_node, *src_node;
|
||||
|
||||
src_node = io_rsrc_node_lookup(&src_ctx->buf_table, i);
|
||||
if (src_node == rsrc_empty_node) {
|
||||
dst_node = rsrc_empty_node;
|
||||
} else {
|
||||
|
@ -70,6 +70,14 @@ int io_register_rsrc(struct io_ring_ctx *ctx, void __user *arg,
|
||||
extern const struct io_rsrc_node empty_node;
|
||||
#define rsrc_empty_node (struct io_rsrc_node *) &empty_node
|
||||
|
||||
static inline struct io_rsrc_node *io_rsrc_node_lookup(struct io_rsrc_data *data,
|
||||
int index)
|
||||
{
|
||||
if (index < data->nr)
|
||||
return data->nodes[array_index_nospec(index, data->nr)];
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline void io_put_rsrc_node(struct io_rsrc_node *node)
|
||||
{
|
||||
if (node != rsrc_empty_node && !--node->refs)
|
||||
|
@ -332,17 +332,15 @@ static int io_prep_rw_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe
|
||||
struct io_ring_ctx *ctx = req->ctx;
|
||||
struct io_rsrc_node *node;
|
||||
struct io_async_rw *io;
|
||||
u16 index;
|
||||
int ret;
|
||||
|
||||
ret = io_prep_rw(req, sqe, ddir, false);
|
||||
if (unlikely(ret))
|
||||
return ret;
|
||||
|
||||
if (unlikely(req->buf_index >= ctx->buf_table.nr))
|
||||
node = io_rsrc_node_lookup(&ctx->buf_table, req->buf_index);
|
||||
if (!node)
|
||||
return -EFAULT;
|
||||
index = array_index_nospec(req->buf_index, ctx->buf_table.nr);
|
||||
node = ctx->buf_table.nodes[index];
|
||||
io_req_assign_rsrc_node(req, node);
|
||||
|
||||
io = req->async_data;
|
||||
|
@ -66,17 +66,13 @@ static struct file *io_splice_get_file(struct io_kiocb *req,
|
||||
return io_file_get_normal(req, sp->splice_fd_in);
|
||||
|
||||
io_ring_submit_lock(ctx, issue_flags);
|
||||
if (unlikely(sp->splice_fd_in >= ctx->file_table.data.nr))
|
||||
goto out;
|
||||
sp->splice_fd_in = array_index_nospec(sp->splice_fd_in, ctx->file_table.data.nr);
|
||||
node = ctx->file_table.data.nodes[sp->splice_fd_in];
|
||||
node = io_rsrc_node_lookup(&ctx->file_table.data, sp->splice_fd_in);
|
||||
if (node) {
|
||||
node->refs++;
|
||||
sp->rsrc_node = node;
|
||||
file = io_slot_file(node);
|
||||
req->flags |= REQ_F_NEED_CLEANUP;
|
||||
}
|
||||
out:
|
||||
io_ring_submit_unlock(ctx, issue_flags);
|
||||
return file;
|
||||
}
|
||||
|
@ -209,18 +209,17 @@ int io_uring_cmd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
||||
|
||||
if (ioucmd->flags & IORING_URING_CMD_FIXED) {
|
||||
struct io_ring_ctx *ctx = req->ctx;
|
||||
u16 index;
|
||||
struct io_rsrc_node *node;
|
||||
|
||||
index = READ_ONCE(sqe->buf_index);
|
||||
if (unlikely(index >= ctx->buf_table.nr))
|
||||
node = io_rsrc_node_lookup(&ctx->buf_table, req->buf_index);
|
||||
if (unlikely(!node))
|
||||
return -EFAULT;
|
||||
req->buf_index = array_index_nospec(index, ctx->buf_table.nr);
|
||||
/*
|
||||
* Pi node upfront, prior to io_uring_cmd_import_fixed()
|
||||
* being called. This prevents destruction of the mapped buffer
|
||||
* we'll need at actual import time.
|
||||
*/
|
||||
io_req_assign_rsrc_node(req, ctx->buf_table.nodes[req->buf_index]);
|
||||
io_req_assign_rsrc_node(req, node);
|
||||
}
|
||||
ioucmd->cmd_op = READ_ONCE(sqe->cmd_op);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user