eventfd: make eventfd_signal{_mask}() void

No caller care about the return value.

Link: https://lore.kernel.org/r/20231122-vfs-eventfd-signal-v2-4-bd549b14ce0c@kernel.org
Reviewed-by: Jan Kara <jack@suse.cz>
Reviewed-by: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Christian Brauner <brauner@kernel.org>
This commit is contained in:
Christian Brauner 2023-11-22 13:48:25 +01:00
parent 120ae58593
commit b7638ad0c7
2 changed files with 22 additions and 34 deletions

View File

@ -43,10 +43,19 @@ struct eventfd_ctx {
int id;
};
__u64 eventfd_signal_mask(struct eventfd_ctx *ctx, __poll_t mask)
/**
* eventfd_signal_mask - Increment the event counter
* @ctx: [in] Pointer to the eventfd context.
* @mask: [in] poll mask
*
* This function is supposed to be called by the kernel in paths that do not
* allow sleeping. In this function we allow the counter to reach the ULLONG_MAX
* value, and we signal this as overflow condition by returning a EPOLLERR
* to poll(2).
*/
void eventfd_signal_mask(struct eventfd_ctx *ctx, __poll_t mask)
{
unsigned long flags;
__u64 n = 1;
/*
* Deadlock or stack overflow issues can happen if we recurse here
@ -57,37 +66,18 @@ __u64 eventfd_signal_mask(struct eventfd_ctx *ctx, __poll_t mask)
* safe context.
*/
if (WARN_ON_ONCE(current->in_eventfd))
return 0;
return;
spin_lock_irqsave(&ctx->wqh.lock, flags);
current->in_eventfd = 1;
if (ULLONG_MAX - ctx->count < n)
n = ULLONG_MAX - ctx->count;
ctx->count += n;
if (ctx->count < ULLONG_MAX)
ctx->count++;
if (waitqueue_active(&ctx->wqh))
wake_up_locked_poll(&ctx->wqh, EPOLLIN | mask);
current->in_eventfd = 0;
spin_unlock_irqrestore(&ctx->wqh.lock, flags);
return n == 1;
}
/**
* eventfd_signal - Increment the event counter
* @ctx: [in] Pointer to the eventfd context.
*
* This function is supposed to be called by the kernel in paths that do not
* allow sleeping. In this function we allow the counter to reach the ULLONG_MAX
* value, and we signal this as overflow condition by returning a EPOLLERR
* to poll(2).
*
* Returns the amount by which the counter was incremented.
*/
__u64 eventfd_signal(struct eventfd_ctx *ctx)
{
return eventfd_signal_mask(ctx, 0);
}
EXPORT_SYMBOL_GPL(eventfd_signal);
EXPORT_SYMBOL_GPL(eventfd_signal_mask);
static void eventfd_free_ctx(struct eventfd_ctx *ctx)
{

View File

@ -35,8 +35,7 @@ void eventfd_ctx_put(struct eventfd_ctx *ctx);
struct file *eventfd_fget(int fd);
struct eventfd_ctx *eventfd_ctx_fdget(int fd);
struct eventfd_ctx *eventfd_ctx_fileget(struct file *file);
__u64 eventfd_signal(struct eventfd_ctx *ctx);
__u64 eventfd_signal_mask(struct eventfd_ctx *ctx, __poll_t mask);
void eventfd_signal_mask(struct eventfd_ctx *ctx, __poll_t mask);
int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *wait,
__u64 *cnt);
void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt);
@ -58,14 +57,8 @@ static inline struct eventfd_ctx *eventfd_ctx_fdget(int fd)
return ERR_PTR(-ENOSYS);
}
static inline int eventfd_signal(struct eventfd_ctx *ctx)
static inline void eventfd_signal_mask(struct eventfd_ctx *ctx, __poll_t mask)
{
return -ENOSYS;
}
static inline int eventfd_signal_mask(struct eventfd_ctx *ctx, __poll_t mask)
{
return -ENOSYS;
}
static inline void eventfd_ctx_put(struct eventfd_ctx *ctx)
@ -91,5 +84,10 @@ static inline void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt)
#endif
static inline void eventfd_signal(struct eventfd_ctx *ctx)
{
eventfd_signal_mask(ctx, 0);
}
#endif /* _LINUX_EVENTFD_H */