mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-01 10:45:49 +00:00
io_uring: temporarily disable registered waits
Disable wait argument registration as it'll be replaced with a more generic feature. We'll still need IORING_ENTER_EXT_ARG_REG parsing in a few commits so leave it be. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/70b1d1d218c41ba77a76d1789c8641dab0b0563e.1731689588.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
3730aebbda
commit
83e041522e
@ -330,14 +330,6 @@ struct io_ring_ctx {
|
|||||||
atomic_t cq_wait_nr;
|
atomic_t cq_wait_nr;
|
||||||
atomic_t cq_timeouts;
|
atomic_t cq_timeouts;
|
||||||
struct wait_queue_head cq_wait;
|
struct wait_queue_head cq_wait;
|
||||||
|
|
||||||
/*
|
|
||||||
* If registered with IORING_REGISTER_CQWAIT_REG, a single
|
|
||||||
* page holds N entries, mapped in cq_wait_arg. cq_wait_index
|
|
||||||
* is the maximum allowable index.
|
|
||||||
*/
|
|
||||||
struct io_uring_reg_wait *cq_wait_arg;
|
|
||||||
unsigned char cq_wait_index;
|
|
||||||
} ____cacheline_aligned_in_smp;
|
} ____cacheline_aligned_in_smp;
|
||||||
|
|
||||||
/* timeouts */
|
/* timeouts */
|
||||||
@ -431,8 +423,6 @@ struct io_ring_ctx {
|
|||||||
unsigned short n_sqe_pages;
|
unsigned short n_sqe_pages;
|
||||||
struct page **ring_pages;
|
struct page **ring_pages;
|
||||||
struct page **sqe_pages;
|
struct page **sqe_pages;
|
||||||
|
|
||||||
struct page **cq_wait_page;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
struct io_tw_state {
|
struct io_tw_state {
|
||||||
|
@ -627,9 +627,6 @@ enum io_uring_register_op {
|
|||||||
/* resize CQ ring */
|
/* resize CQ ring */
|
||||||
IORING_REGISTER_RESIZE_RINGS = 33,
|
IORING_REGISTER_RESIZE_RINGS = 33,
|
||||||
|
|
||||||
/* register fixed io_uring_reg_wait arguments */
|
|
||||||
IORING_REGISTER_CQWAIT_REG = 34,
|
|
||||||
|
|
||||||
/* this goes last */
|
/* this goes last */
|
||||||
IORING_REGISTER_LAST,
|
IORING_REGISTER_LAST,
|
||||||
|
|
||||||
|
@ -2709,7 +2709,6 @@ static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx)
|
|||||||
io_alloc_cache_free(&ctx->msg_cache, io_msg_cache_free);
|
io_alloc_cache_free(&ctx->msg_cache, io_msg_cache_free);
|
||||||
io_futex_cache_free(ctx);
|
io_futex_cache_free(ctx);
|
||||||
io_destroy_buffers(ctx);
|
io_destroy_buffers(ctx);
|
||||||
io_unregister_cqwait_reg(ctx);
|
|
||||||
mutex_unlock(&ctx->uring_lock);
|
mutex_unlock(&ctx->uring_lock);
|
||||||
if (ctx->sq_creds)
|
if (ctx->sq_creds)
|
||||||
put_cred(ctx->sq_creds);
|
put_cred(ctx->sq_creds);
|
||||||
@ -3195,15 +3194,6 @@ void __io_uring_cancel(bool cancel_all)
|
|||||||
static struct io_uring_reg_wait *io_get_ext_arg_reg(struct io_ring_ctx *ctx,
|
static struct io_uring_reg_wait *io_get_ext_arg_reg(struct io_ring_ctx *ctx,
|
||||||
const struct io_uring_getevents_arg __user *uarg)
|
const struct io_uring_getevents_arg __user *uarg)
|
||||||
{
|
{
|
||||||
struct io_uring_reg_wait *arg = READ_ONCE(ctx->cq_wait_arg);
|
|
||||||
|
|
||||||
if (arg) {
|
|
||||||
unsigned int index = (unsigned int) (uintptr_t) uarg;
|
|
||||||
|
|
||||||
if (index <= ctx->cq_wait_index)
|
|
||||||
return arg + index;
|
|
||||||
}
|
|
||||||
|
|
||||||
return ERR_PTR(-EFAULT);
|
return ERR_PTR(-EFAULT);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -570,82 +570,6 @@ static int io_register_resize_rings(struct io_ring_ctx *ctx, void __user *arg)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
void io_unregister_cqwait_reg(struct io_ring_ctx *ctx)
|
|
||||||
{
|
|
||||||
unsigned short npages = 1;
|
|
||||||
|
|
||||||
if (!ctx->cq_wait_page)
|
|
||||||
return;
|
|
||||||
|
|
||||||
io_pages_unmap(ctx->cq_wait_arg, &ctx->cq_wait_page, &npages, true);
|
|
||||||
ctx->cq_wait_arg = NULL;
|
|
||||||
if (ctx->user)
|
|
||||||
__io_unaccount_mem(ctx->user, 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Register a page holding N entries of struct io_uring_reg_wait, which can
|
|
||||||
* be used via io_uring_enter(2) if IORING_GETEVENTS_EXT_ARG_REG is set.
|
|
||||||
* If that is set with IORING_GETEVENTS_EXT_ARG, then instead of passing
|
|
||||||
* in a pointer for a struct io_uring_getevents_arg, an index into this
|
|
||||||
* registered array is passed, avoiding two (arg + timeout) copies per
|
|
||||||
* invocation.
|
|
||||||
*/
|
|
||||||
static int io_register_cqwait_reg(struct io_ring_ctx *ctx, void __user *uarg)
|
|
||||||
{
|
|
||||||
struct io_uring_cqwait_reg_arg arg;
|
|
||||||
struct io_uring_reg_wait *reg;
|
|
||||||
struct page **pages;
|
|
||||||
unsigned long len;
|
|
||||||
int nr_pages, poff;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
if (ctx->cq_wait_page || ctx->cq_wait_arg)
|
|
||||||
return -EBUSY;
|
|
||||||
if (copy_from_user(&arg, uarg, sizeof(arg)))
|
|
||||||
return -EFAULT;
|
|
||||||
if (!arg.nr_entries || arg.flags)
|
|
||||||
return -EINVAL;
|
|
||||||
if (arg.struct_size != sizeof(*reg))
|
|
||||||
return -EINVAL;
|
|
||||||
if (check_mul_overflow(arg.struct_size, arg.nr_entries, &len))
|
|
||||||
return -EOVERFLOW;
|
|
||||||
if (len > PAGE_SIZE)
|
|
||||||
return -EINVAL;
|
|
||||||
/* offset + len must fit within a page, and must be reg_wait aligned */
|
|
||||||
poff = arg.user_addr & ~PAGE_MASK;
|
|
||||||
if (len + poff > PAGE_SIZE)
|
|
||||||
return -EINVAL;
|
|
||||||
if (poff % arg.struct_size)
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
pages = io_pin_pages(arg.user_addr, len, &nr_pages);
|
|
||||||
if (IS_ERR(pages))
|
|
||||||
return PTR_ERR(pages);
|
|
||||||
ret = -EINVAL;
|
|
||||||
if (nr_pages != 1)
|
|
||||||
goto out_free;
|
|
||||||
if (ctx->user) {
|
|
||||||
ret = __io_account_mem(ctx->user, 1);
|
|
||||||
if (ret)
|
|
||||||
goto out_free;
|
|
||||||
}
|
|
||||||
|
|
||||||
reg = vmap(pages, 1, VM_MAP, PAGE_KERNEL);
|
|
||||||
if (reg) {
|
|
||||||
ctx->cq_wait_index = arg.nr_entries - 1;
|
|
||||||
WRITE_ONCE(ctx->cq_wait_page, pages);
|
|
||||||
WRITE_ONCE(ctx->cq_wait_arg, (void *) reg + poff);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
ret = -ENOMEM;
|
|
||||||
if (ctx->user)
|
|
||||||
__io_unaccount_mem(ctx->user, 1);
|
|
||||||
out_free:
|
|
||||||
io_pages_free(&pages, nr_pages);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
|
static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
|
||||||
void __user *arg, unsigned nr_args)
|
void __user *arg, unsigned nr_args)
|
||||||
__releases(ctx->uring_lock)
|
__releases(ctx->uring_lock)
|
||||||
@ -840,12 +764,6 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
|
|||||||
break;
|
break;
|
||||||
ret = io_register_resize_rings(ctx, arg);
|
ret = io_register_resize_rings(ctx, arg);
|
||||||
break;
|
break;
|
||||||
case IORING_REGISTER_CQWAIT_REG:
|
|
||||||
ret = -EINVAL;
|
|
||||||
if (!arg || nr_args != 1)
|
|
||||||
break;
|
|
||||||
ret = io_register_cqwait_reg(ctx, arg);
|
|
||||||
break;
|
|
||||||
default:
|
default:
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
break;
|
break;
|
||||||
|
@ -5,6 +5,5 @@
|
|||||||
int io_eventfd_unregister(struct io_ring_ctx *ctx);
|
int io_eventfd_unregister(struct io_ring_ctx *ctx);
|
||||||
int io_unregister_personality(struct io_ring_ctx *ctx, unsigned id);
|
int io_unregister_personality(struct io_ring_ctx *ctx, unsigned id);
|
||||||
struct file *io_uring_register_get_file(unsigned int fd, bool registered);
|
struct file *io_uring_register_get_file(unsigned int fd, bool registered);
|
||||||
void io_unregister_cqwait_reg(struct io_ring_ctx *ctx);
|
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
Loading…
Reference in New Issue
Block a user