mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2024-12-29 17:22:07 +00:00
io_uring/eventfd: move to more idiomatic RCU free usage
In some ways, it just "happens to work" currently with using the ops
field for both the free and signaling bit. But it depends on ordering
of operations in terms of freeing and signaling. Clean it up and use the
usual refs == 0 under RCU read side lock to determine if the ev_fd is
still valid, and use the reference to gate the freeing as well.
Fixes: 21a091b970
("io_uring: signal registered eventfd to process deferred task work")
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
f4eaf8eda8
commit
60b6c075e8
@ -541,29 +541,33 @@ static __cold void io_queue_deferred(struct io_ring_ctx *ctx)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void io_eventfd_ops(struct rcu_head *rcu)
|
void io_eventfd_free(struct rcu_head *rcu)
|
||||||
{
|
{
|
||||||
struct io_ev_fd *ev_fd = container_of(rcu, struct io_ev_fd, rcu);
|
struct io_ev_fd *ev_fd = container_of(rcu, struct io_ev_fd, rcu);
|
||||||
int ops = atomic_xchg(&ev_fd->ops, 0);
|
|
||||||
|
|
||||||
if (ops & BIT(IO_EVENTFD_OP_SIGNAL_BIT))
|
eventfd_ctx_put(ev_fd->cq_ev_fd);
|
||||||
eventfd_signal_mask(ev_fd->cq_ev_fd, EPOLL_URING_WAKE);
|
kfree(ev_fd);
|
||||||
|
}
|
||||||
|
|
||||||
/* IO_EVENTFD_OP_FREE_BIT may not be set here depending on callback
|
void io_eventfd_do_signal(struct rcu_head *rcu)
|
||||||
* ordering in a race but if references are 0 we know we have to free
|
{
|
||||||
* it regardless.
|
struct io_ev_fd *ev_fd = container_of(rcu, struct io_ev_fd, rcu);
|
||||||
*/
|
|
||||||
if (atomic_dec_and_test(&ev_fd->refs)) {
|
eventfd_signal_mask(ev_fd->cq_ev_fd, EPOLL_URING_WAKE);
|
||||||
eventfd_ctx_put(ev_fd->cq_ev_fd);
|
|
||||||
kfree(ev_fd);
|
if (atomic_dec_and_test(&ev_fd->refs))
|
||||||
}
|
io_eventfd_free(rcu);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void io_eventfd_signal(struct io_ring_ctx *ctx)
|
static void io_eventfd_signal(struct io_ring_ctx *ctx)
|
||||||
{
|
{
|
||||||
struct io_ev_fd *ev_fd = NULL;
|
struct io_ev_fd *ev_fd = NULL;
|
||||||
|
|
||||||
rcu_read_lock();
|
if (READ_ONCE(ctx->rings->cq_flags) & IORING_CQ_EVENTFD_DISABLED)
|
||||||
|
return;
|
||||||
|
|
||||||
|
guard(rcu)();
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* rcu_dereference ctx->io_ev_fd once and use it for both for checking
|
* rcu_dereference ctx->io_ev_fd once and use it for both for checking
|
||||||
* and eventfd_signal
|
* and eventfd_signal
|
||||||
@ -576,24 +580,23 @@ static void io_eventfd_signal(struct io_ring_ctx *ctx)
|
|||||||
* the function and rcu_read_lock.
|
* the function and rcu_read_lock.
|
||||||
*/
|
*/
|
||||||
if (unlikely(!ev_fd))
|
if (unlikely(!ev_fd))
|
||||||
goto out;
|
return;
|
||||||
if (READ_ONCE(ctx->rings->cq_flags) & IORING_CQ_EVENTFD_DISABLED)
|
if (!atomic_inc_not_zero(&ev_fd->refs))
|
||||||
goto out;
|
return;
|
||||||
if (ev_fd->eventfd_async && !io_wq_current_is_worker())
|
if (ev_fd->eventfd_async && !io_wq_current_is_worker())
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
if (likely(eventfd_signal_allowed())) {
|
if (likely(eventfd_signal_allowed())) {
|
||||||
eventfd_signal_mask(ev_fd->cq_ev_fd, EPOLL_URING_WAKE);
|
eventfd_signal_mask(ev_fd->cq_ev_fd, EPOLL_URING_WAKE);
|
||||||
} else {
|
} else {
|
||||||
atomic_inc(&ev_fd->refs);
|
if (!atomic_fetch_or(BIT(IO_EVENTFD_OP_SIGNAL_BIT), &ev_fd->ops)) {
|
||||||
if (!atomic_fetch_or(BIT(IO_EVENTFD_OP_SIGNAL_BIT), &ev_fd->ops))
|
call_rcu_hurry(&ev_fd->rcu, io_eventfd_do_signal);
|
||||||
call_rcu_hurry(&ev_fd->rcu, io_eventfd_ops);
|
return;
|
||||||
else
|
}
|
||||||
atomic_dec(&ev_fd->refs);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
out:
|
out:
|
||||||
rcu_read_unlock();
|
if (atomic_dec_and_test(&ev_fd->refs))
|
||||||
|
call_rcu(&ev_fd->rcu, io_eventfd_free);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void io_eventfd_flush_signal(struct io_ring_ctx *ctx)
|
static void io_eventfd_flush_signal(struct io_ring_ctx *ctx)
|
||||||
|
@ -106,10 +106,10 @@ bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task,
|
|||||||
|
|
||||||
enum {
|
enum {
|
||||||
IO_EVENTFD_OP_SIGNAL_BIT,
|
IO_EVENTFD_OP_SIGNAL_BIT,
|
||||||
IO_EVENTFD_OP_FREE_BIT,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
void io_eventfd_ops(struct rcu_head *rcu);
|
void io_eventfd_do_signal(struct rcu_head *rcu);
|
||||||
|
void io_eventfd_free(struct rcu_head *rcu);
|
||||||
void io_activate_pollwq(struct io_ring_ctx *ctx);
|
void io_activate_pollwq(struct io_ring_ctx *ctx);
|
||||||
|
|
||||||
static inline void io_lockdep_assert_cq_locked(struct io_ring_ctx *ctx)
|
static inline void io_lockdep_assert_cq_locked(struct io_ring_ctx *ctx)
|
||||||
|
@ -63,9 +63,9 @@ static int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg,
|
|||||||
|
|
||||||
ev_fd->eventfd_async = eventfd_async;
|
ev_fd->eventfd_async = eventfd_async;
|
||||||
ctx->has_evfd = true;
|
ctx->has_evfd = true;
|
||||||
rcu_assign_pointer(ctx->io_ev_fd, ev_fd);
|
|
||||||
atomic_set(&ev_fd->refs, 1);
|
atomic_set(&ev_fd->refs, 1);
|
||||||
atomic_set(&ev_fd->ops, 0);
|
atomic_set(&ev_fd->ops, 0);
|
||||||
|
rcu_assign_pointer(ctx->io_ev_fd, ev_fd);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -78,8 +78,8 @@ int io_eventfd_unregister(struct io_ring_ctx *ctx)
|
|||||||
if (ev_fd) {
|
if (ev_fd) {
|
||||||
ctx->has_evfd = false;
|
ctx->has_evfd = false;
|
||||||
rcu_assign_pointer(ctx->io_ev_fd, NULL);
|
rcu_assign_pointer(ctx->io_ev_fd, NULL);
|
||||||
if (!atomic_fetch_or(BIT(IO_EVENTFD_OP_FREE_BIT), &ev_fd->ops))
|
if (atomic_dec_and_test(&ev_fd->refs))
|
||||||
call_rcu(&ev_fd->rcu, io_eventfd_ops);
|
call_rcu(&ev_fd->rcu, io_eventfd_free);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user