mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-12-29 17:25:38 +00:00
eventfd: simplify eventfd_signal_mask()
The eventfd_signal_mask() helper was introduced for io_uring and similar to eventfd_signal() it always passed 1 for @n. So don't bother with that argument at all. Link: https://lore.kernel.org/r/20231122-vfs-eventfd-signal-v2-3-bd549b14ce0c@kernel.org Reviewed-by: Jan Kara <jack@suse.cz> Reviewed-by: Jens Axboe <axboe@kernel.dk> Signed-off-by: Christian Brauner <brauner@kernel.org>
This commit is contained in:
parent
3652117f85
commit
120ae58593
@ -43,9 +43,10 @@ struct eventfd_ctx {
|
||||
int id;
|
||||
};
|
||||
|
||||
__u64 eventfd_signal_mask(struct eventfd_ctx *ctx, __u64 n, __poll_t mask)
|
||||
__u64 eventfd_signal_mask(struct eventfd_ctx *ctx, __poll_t mask)
|
||||
{
|
||||
unsigned long flags;
|
||||
__u64 n = 1;
|
||||
|
||||
/*
|
||||
* Deadlock or stack overflow issues can happen if we recurse here
|
||||
@ -68,7 +69,7 @@ __u64 eventfd_signal_mask(struct eventfd_ctx *ctx, __u64 n, __poll_t mask)
|
||||
current->in_eventfd = 0;
|
||||
spin_unlock_irqrestore(&ctx->wqh.lock, flags);
|
||||
|
||||
return n;
|
||||
return n == 1;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -84,7 +85,7 @@ __u64 eventfd_signal_mask(struct eventfd_ctx *ctx, __u64 n, __poll_t mask)
|
||||
*/
|
||||
__u64 eventfd_signal(struct eventfd_ctx *ctx)
|
||||
{
|
||||
return eventfd_signal_mask(ctx, 1, 0);
|
||||
return eventfd_signal_mask(ctx, 0);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(eventfd_signal);
|
||||
|
||||
|
@ -36,7 +36,7 @@ struct file *eventfd_fget(int fd);
|
||||
struct eventfd_ctx *eventfd_ctx_fdget(int fd);
|
||||
struct eventfd_ctx *eventfd_ctx_fileget(struct file *file);
|
||||
__u64 eventfd_signal(struct eventfd_ctx *ctx);
|
||||
__u64 eventfd_signal_mask(struct eventfd_ctx *ctx, __u64 n, __poll_t mask);
|
||||
__u64 eventfd_signal_mask(struct eventfd_ctx *ctx, __poll_t mask);
|
||||
int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *wait,
|
||||
__u64 *cnt);
|
||||
void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt);
|
||||
@ -63,8 +63,7 @@ static inline int eventfd_signal(struct eventfd_ctx *ctx)
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
static inline int eventfd_signal_mask(struct eventfd_ctx *ctx, __u64 n,
|
||||
unsigned mask)
|
||||
static inline int eventfd_signal_mask(struct eventfd_ctx *ctx, __poll_t mask)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
@ -558,7 +558,7 @@ static void io_eventfd_ops(struct rcu_head *rcu)
|
||||
int ops = atomic_xchg(&ev_fd->ops, 0);
|
||||
|
||||
if (ops & BIT(IO_EVENTFD_OP_SIGNAL_BIT))
|
||||
eventfd_signal_mask(ev_fd->cq_ev_fd, 1, EPOLL_URING_WAKE);
|
||||
eventfd_signal_mask(ev_fd->cq_ev_fd, EPOLL_URING_WAKE);
|
||||
|
||||
/* IO_EVENTFD_OP_FREE_BIT may not be set here depending on callback
|
||||
* ordering in a race but if references are 0 we know we have to free
|
||||
@ -594,7 +594,7 @@ static void io_eventfd_signal(struct io_ring_ctx *ctx)
|
||||
goto out;
|
||||
|
||||
if (likely(eventfd_signal_allowed())) {
|
||||
eventfd_signal_mask(ev_fd->cq_ev_fd, 1, EPOLL_URING_WAKE);
|
||||
eventfd_signal_mask(ev_fd->cq_ev_fd, EPOLL_URING_WAKE);
|
||||
} else {
|
||||
atomic_inc(&ev_fd->refs);
|
||||
if (!atomic_fetch_or(BIT(IO_EVENTFD_OP_SIGNAL_BIT), &ev_fd->ops))
|
||||
|
Loading…
Reference in New Issue
Block a user