mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-12-29 17:25:38 +00:00
eventfd: guard wake_up in eventfd fs calls as well
Guard wakeups that the user can trigger, and that may end up triggering a call back into eventfd_signal. This is in addition to the current approach that only guards in eventfd_signal. Rename in_eventfd_signal -> in_eventfd at the same time to reflect this. Without this there would be a deadlock in the following code using libaio: int main() { struct io_context *ctx = NULL; struct iocb iocb; struct iocb *iocbs[] = { &iocb }; int evfd; uint64_t val = 1; evfd = eventfd(0, EFD_CLOEXEC); assert(!io_setup(2, &ctx)); io_prep_poll(&iocb, evfd, POLLIN); io_set_eventfd(&iocb, evfd); assert(1 == io_submit(ctx, 1, iocbs)); write(evfd, &val, 8); } Signed-off-by: Dylan Yudaken <dylany@fb.com> Reviewed-by: Jens Axboe <axboe@kernel.dk> Link: https://lore.kernel.org/r/20220816135959.1490641-1-dylany@fb.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
521a547ced
commit
9f0deaa12d
10
fs/eventfd.c
10
fs/eventfd.c
@ -69,17 +69,17 @@ __u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n)
|
||||
* it returns false, the eventfd_signal() call should be deferred to a
|
||||
* safe context.
|
||||
*/
|
||||
if (WARN_ON_ONCE(current->in_eventfd_signal))
|
||||
if (WARN_ON_ONCE(current->in_eventfd))
|
||||
return 0;
|
||||
|
||||
spin_lock_irqsave(&ctx->wqh.lock, flags);
|
||||
current->in_eventfd_signal = 1;
|
||||
current->in_eventfd = 1;
|
||||
if (ULLONG_MAX - ctx->count < n)
|
||||
n = ULLONG_MAX - ctx->count;
|
||||
ctx->count += n;
|
||||
if (waitqueue_active(&ctx->wqh))
|
||||
wake_up_locked_poll(&ctx->wqh, EPOLLIN);
|
||||
current->in_eventfd_signal = 0;
|
||||
current->in_eventfd = 0;
|
||||
spin_unlock_irqrestore(&ctx->wqh.lock, flags);
|
||||
|
||||
return n;
|
||||
@ -253,8 +253,10 @@ static ssize_t eventfd_read(struct kiocb *iocb, struct iov_iter *to)
|
||||
__set_current_state(TASK_RUNNING);
|
||||
}
|
||||
eventfd_ctx_do_read(ctx, &ucnt);
|
||||
current->in_eventfd = 1;
|
||||
if (waitqueue_active(&ctx->wqh))
|
||||
wake_up_locked_poll(&ctx->wqh, EPOLLOUT);
|
||||
current->in_eventfd = 0;
|
||||
spin_unlock_irq(&ctx->wqh.lock);
|
||||
if (unlikely(copy_to_iter(&ucnt, sizeof(ucnt), to) != sizeof(ucnt)))
|
||||
return -EFAULT;
|
||||
@ -301,8 +303,10 @@ static ssize_t eventfd_write(struct file *file, const char __user *buf, size_t c
|
||||
}
|
||||
if (likely(res > 0)) {
|
||||
ctx->count += ucnt;
|
||||
current->in_eventfd = 1;
|
||||
if (waitqueue_active(&ctx->wqh))
|
||||
wake_up_locked_poll(&ctx->wqh, EPOLLIN);
|
||||
current->in_eventfd = 0;
|
||||
}
|
||||
spin_unlock_irq(&ctx->wqh.lock);
|
||||
|
||||
|
@ -46,7 +46,7 @@ void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt);
|
||||
|
||||
static inline bool eventfd_signal_allowed(void)
|
||||
{
|
||||
return !current->in_eventfd_signal;
|
||||
return !current->in_eventfd;
|
||||
}
|
||||
|
||||
#else /* CONFIG_EVENTFD */
|
||||
|
@ -936,7 +936,7 @@ struct task_struct {
|
||||
#endif
|
||||
#ifdef CONFIG_EVENTFD
|
||||
/* Recursion prevention for eventfd_signal() */
|
||||
unsigned in_eventfd_signal:1;
|
||||
unsigned in_eventfd:1;
|
||||
#endif
|
||||
#ifdef CONFIG_IOMMU_SVA
|
||||
unsigned pasid_activated:1;
|
||||
|
Loading…
Reference in New Issue
Block a user